diff --git "a/1979.jsonl" "b/1979.jsonl" new file mode 100644--- /dev/null +++ "b/1979.jsonl" @@ -0,0 +1,660 @@ +{"seq_id":"184528270","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nfrom urllib.parse import parse_qs\n\n\nclass MessageHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n content_length = self.headers.get('content-length', 0)\n length = int(content_length)\n\n request_body = self.rfile.read(length)\n data = request_body.decode()\n\n msg = parse_qs(data)[\"message\"][0]\n\n self.send_response(200)\n self.send_header('Content-type', 'text/plain; charset=utf-8')\n self.end_headers()\n self.wfile.write(msg.encode())\n\nif __name__ == '__main__':\n server_address = ('', 8000)\n httpd = HTTPServer(server_address, MessageHandler)\n httpd.serve_forever()\n","sub_path":"Lesson-2/3_MessageboardPartOne/MessageboardPartOne.py","file_name":"MessageboardPartOne.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"491892287","text":"import tensorflow as tf\nfrom tensorflow.contrib import slim\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport numpy as np\n\nnum_keep_radio = 0.73 #0.7\n\n\n# define prelu\ndef prelu(inputs):\n alphas = tf.get_variable(\"alphas\", shape=inputs.get_shape()[-1], dtype=tf.float32, initializer=tf.constant_initializer(0.25))\n pos = tf.nn.relu(inputs)\n neg = alphas * (inputs-abs(inputs))*0.5\n return pos + neg\n\n\n# cls_prob:batch*2\n# label:batch\ndef cls_ohem(cls_prob, label, cls_num):\n zeros = tf.zeros_like(label)\n # label=-1 --> label=0net_factory\n # pos -> 1, neg -> 0, others -> 0\n # label_filter_invalid = tf.where(tf.less(label, 0), zeros, label)\n num_cls_prob = tf.size(cls_prob)\n cls_prob_reshape = tf.reshape(cls_prob, [num_cls_prob, -1])\n label_int = tf.cast(label, tf.int32)\n\n # get the number of rows of class_prob\n num_row = tf.to_int32(cls_prob.get_shape()[0])\n\n #row = [0,2,4.....]\n # row = tf.range(num_row)*2\n row = tf.range(num_row) * cls_num # cls_num分几类\n indices_ = row + label_int\n label_prob = tf.squeeze(tf.gather(cls_prob_reshape, indices_))\n loss = -tf.log(label_prob+1e-10)\n zeros = tf.zeros_like(label_prob, dtype=tf.float32)\n ones = tf.ones_like(label_prob, dtype=tf.float32)\n\n # set pos and neg to be 1, rest to be 0\n valid_inds = tf.where(label < zeros, zeros, ones)\n # get the number of POS and NEG examples\n num_valid = tf.reduce_sum(valid_inds)\n keep_num = tf.cast(num_valid*num_keep_radio, dtype=tf.int32)\n # FILTER OUT PART AND LANDMARK DATA\n loss = loss * valid_inds\n loss, _ = tf.nn.top_k(loss, k=keep_num)\n return tf.reduce_mean(loss)\n\n\ndef cal_accuracy(cls_prob, label):\n\n '''\n :param cls_prob:\n :param label:\n :return:calculate classification accuracy for pos and neg examples only\n '''\n # get the index of maximum value along axis one from cls_prob\n # 0 for negative 1 for positive\n pred = tf.argmax(cls_prob, axis=1)\n label_int = tf.cast(label, tf.int64)\n # return the index of pos and neg examples\n cond = tf.where(tf.greater_equal(label_int, 0))\n picked = tf.squeeze(cond)\n # gather the label of pos and neg examples\n label_picked = tf.gather(label_int, picked)\n pred_picked = tf.gather(pred, picked)\n # calculate the mean value of rnet vector contains 1 and 0, 1 for correct classification, 0 for incorrect\n # ACC = (TP+FP)/total population\n accuracy_op = tf.reduce_mean(tf.cast(tf.equal(label_picked,pred_picked), tf.float32))\n return accuracy_op\n\n\ndef R_Net(inputs, label_hat=None, label_mask=None, label_block=None, label_blur=None, label_bow=None, label_illumination=None, training=True):\n with slim.arg_scope([slim.conv2d],\n activation_fn=prelu,\n weights_initializer=slim.xavier_initializer(),\n biases_initializer=tf.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(0.0005),\n padding='valid'):\n print(inputs.get_shape())\n net = slim.conv2d(inputs, num_outputs=28, kernel_size=[3, 3], stride=1, scope=\"conv1\")\n print(net.get_shape())\n net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope=\"pool1\", padding='SAME')\n print(net.get_shape())\n net = slim.conv2d(net, num_outputs=48, kernel_size=[3, 3], stride=1, scope=\"conv2\")\n print(net.get_shape())\n net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope=\"pool2\")\n print(net.get_shape())\n net = slim.conv2d(net, num_outputs=64, kernel_size=[2, 2], stride=1, scope=\"conv3\")\n print(net.get_shape())\n fc_flatten = slim.flatten(net)\n print(fc_flatten.get_shape())\n fc1 = slim.fully_connected(fc_flatten, num_outputs=128, scope=\"fc1\")\n print(fc1.get_shape())\n # batch*2\n cls_hat_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_hat\", activation_fn=tf.nn.softmax)\n print(cls_hat_prob.get_shape())\n cls_mask_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_mask\", activation_fn=tf.nn.softmax)\n print(cls_mask_prob.get_shape())\n cls_block_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_block\", activation_fn=tf.nn.softmax)\n print(cls_block_prob.get_shape())\n cls_blur_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_blur\", activation_fn=tf.nn.softmax)\n print(cls_blur_prob.get_shape())\n cls_bow_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_bow\", activation_fn=tf.nn.softmax)\n print(cls_bow_prob.get_shape())\n cls_illumination_prob = slim.fully_connected(fc1, num_outputs=2, scope=\"cls_fc_illumination\", activation_fn=tf.nn.softmax)\n print(cls_illumination_prob.get_shape())\n\n #train\n if training:\n loss_hat = cls_ohem(cls_hat_prob, label_hat, 2)\n accuracy_hat = cal_accuracy(cls_hat_prob, label_hat)\n\n loss_mask = cls_ohem(cls_mask_prob, label_mask, 2)\n accuracy_mask = cal_accuracy(cls_mask_prob, label_mask)\n\n loss_block = cls_ohem(cls_block_prob, label_block, 2)\n accuracy_block = cal_accuracy(cls_block_prob, label_block)\n\n loss_blur = cls_ohem(cls_blur_prob, label_blur, 2)\n accuracy_blur = cal_accuracy(cls_blur_prob, label_blur)\n\n loss_bow = cls_ohem(cls_bow_prob, label_bow, 2)\n accuracy_bow = cal_accuracy(cls_bow_prob, label_bow)\n\n loss_illumination = cls_ohem(cls_illumination_prob, label_illumination, 2)\n accuracy_illumination = cal_accuracy(cls_illumination_prob, label_illumination)\n\n L2_loss = tf.add_n(slim.losses.get_regularization_losses())\n return loss_hat, accuracy_hat, loss_mask, accuracy_mask, loss_block, accuracy_block, loss_blur, \\\n accuracy_blur, loss_bow, accuracy_bow, loss_illumination, accuracy_illumination, L2_loss\n else:\n return cls_hat_prob, cls_mask_prob, cls_block_prob, cls_blur_prob, cls_bow_prob, cls_illumination_prob\n","sub_path":"rnet/RNet_models.py","file_name":"RNet_models.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551246630","text":"from django.shortcuts import render\nfrom topic.models import Topic\nfrom news.API import NewscredApi\n\n# Create your views here.\n\ndef render(request, page=1):\n ## galleris = to get the all galleries by using Gallery model class . .query\n try:\n topic = Topic.objects.filter(page_id=page)[0]\n except IndexError:\n return False\n options = {}\n\n query = topic.query\n pagesize = topic.pagesize\n\n if query:\n options['query'] = query\n if pagesize:\n options['pagesize'] = pagesize\n\n article_obj = NewscredApi('topics', options)\n return article_obj.response()","sub_path":"Projects/Django/news/topic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"376841762","text":"import numpy as np\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import deque\nfrom ..dataset import Transition, TransitionMiniBatch, trace_back_and_clear\nfrom .utility import get_action_size_from_env\n\n\nclass TransitionQueue:\n \"\"\" A queue for transition objects.\n\n This class is a replacement for deque for Transition objects.\n When the last transition of an episode is removed from the buffer,\n the all links between the transition in the same episode will be cleared to\n make GC properly free transitions objects.\n\n Args:\n maxlen (int): the maximum size of buffer.\n\n Attributes:\n maxlen (int): the maximum size of buffer.\n buffer (list): buffer for transitions.\n cursor (int): the current cursor pointing to the position to insert.\n\n \"\"\"\n def __init__(self, maxlen=None):\n self.maxlen = maxlen\n self.buffer = []\n self.cursor = 0\n\n def append(self, transition):\n \"\"\" Appends a transition to buffer.\n\n Args:\n transition (d3rlpy.dataset.Transition): transition.\n\n \"\"\"\n assert isinstance(transition, Transition)\n if self.maxlen is None or self.size() < self.maxlen:\n self.buffer.append(transition)\n else:\n if self.buffer[self.cursor].terminal:\n # clear links to correctly free memories\n trace_back_and_clear(self.buffer[self.cursor])\n self.buffer[self.cursor] = transition\n self.cursor += 1\n if self.cursor == self.maxlen:\n self.cursor = 0\n\n def __len__(self):\n return self.size()\n\n def __getitem__(self, index):\n return self.buffer[index]\n\n def __iter__(self):\n return iter(self.buffer)\n\n def size(self):\n \"\"\" Returns the size of buffer.\n\n Returns:\n int: the size of buffer.\n\n \"\"\"\n return len(self.buffer)\n\n\nclass Buffer(metaclass=ABCMeta):\n @abstractmethod\n def append(self, observation, action, reward, terminal):\n \"\"\" Append observation, action, reward and terminal flag to buffer.\n\n If the terminal flag is True, Monte-Carlo returns will be computed with\n an entire episode and the whole transitions will be appended.\n\n Args:\n observation (numpy.ndarray): observation.\n action (numpy.ndarray or int): action.\n reward (float): reward.\n terminal (bool or float): terminal flag.\n\n \"\"\"\n pass\n\n @abstractmethod\n def append_episode(self, episode):\n \"\"\" Append Episode object to buffer.\n\n Args:\n episode (d3rlpy.dataset.Episode): episode.\n\n \"\"\"\n pass\n\n @abstractmethod\n def sample(self, batch_size, n_frames=1):\n \"\"\" Returns sampled mini-batch of transitions.\n\n If observation is image, you can stack arbitrary frames via\n ``n_frames``.\n\n .. code-block:: python\n\n buffer.observation_shape == (3, 84, 84)\n\n # stack 4 frames\n batch = buffer.sample(batch_size=32, n_frames=4)\n\n batch.observations.shape == (32, 12, 84, 84)\n\n Args:\n batch_size (int): mini-batch size.\n n_frames (int):\n the number of frames to stack for image observation.\n\n Returns:\n d3rlpy.dataset.TransitionMiniBatch: mini-batch.\n\n \"\"\"\n pass\n\n @abstractmethod\n def size(self):\n \"\"\" Returns the number of appended elements in buffer.\n\n Returns:\n int: the number of elements in buffer.\n\n \"\"\"\n pass\n\n\nclass ReplayBuffer(Buffer):\n \"\"\" Standard Replay Buffer.\n\n Args:\n maxlen (int): the maximum number of data length.\n env (gym.Env): gym-like environment to extract shape information.\n episodes (list(d3rlpy.dataset.Episode)): list of episodes to\n initialize buffer\n\n Attributes:\n prev_observation (numpy.ndarray): previously appended observation.\n prev_action (numpy.ndarray or int): previously appended action.\n prev_reward (float): previously appended reward.\n prev_transition (d3rlpy.dataset.Transition):\n previously appended transition.\n transitions (d3rlpy.online.buffers.TransitionQueue):\n queue of transitions.\n observation_shape (tuple): observation shape.\n action_size (int): action size.\n\n \"\"\"\n def __init__(self, maxlen, env, episodes=None):\n # temporary cache to hold transitions for an entire episode\n self.prev_observation = None\n self.prev_action = None\n self.prev_reward = None\n self.prev_transition = None\n\n self.transitions = TransitionQueue(maxlen=maxlen)\n\n # extract shape information\n self.observation_shape = env.observation_space.shape\n self.action_size = get_action_size_from_env(env)\n\n # add initial transitions\n if episodes:\n for episode in episodes:\n self.append_episode(episode)\n\n def append(self, observation, action, reward, terminal):\n # validation\n assert observation.shape == self.observation_shape\n if isinstance(action, np.ndarray):\n assert action.shape[0] == self.action_size\n else:\n action = int(action)\n assert action < self.action_size\n\n # create Transition object\n if self.prev_observation is not None:\n if isinstance(terminal, bool):\n terminal = 1.0 if terminal else 0.0\n\n transition = Transition(observation_shape=self.observation_shape,\n action_size=self.action_size,\n observation=self.prev_observation,\n action=self.prev_action,\n reward=self.prev_reward,\n next_observation=observation,\n next_action=action,\n next_reward=reward,\n terminal=terminal,\n prev_transition=self.prev_transition)\n\n if self.prev_transition:\n self.prev_transition.next_transition = transition\n\n self.transitions.append(transition)\n\n self.prev_transition = transition\n\n self.prev_observation = observation\n self.prev_action = action\n self.prev_reward = reward\n\n if terminal:\n self.prev_observation = None\n self.prev_action = None\n self.prev_reward = None\n self.prev_transition = None\n\n def append_episode(self, episode):\n assert episode.get_observation_shape() == self.observation_shape\n assert episode.get_action_size() == self.action_size\n for transition in episode.transitions:\n self.transitions.append(transition)\n\n def sample(self, batch_size, n_frames=1):\n indices = np.random.randint(self.size(), size=batch_size)\n transitions = [self.transitions[index] for index in indices]\n return TransitionMiniBatch(transitions, n_frames)\n\n def size(self):\n return len(self.transitions)\n\n def __len__(self):\n return self.size()\n","sub_path":"d3rlpy/online/buffers.py","file_name":"buffers.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341128061","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import fields, models, tools\r\n\r\nfrom ..models import crm_alima_don\r\nclass CrmAlimaReports(models.Model):\r\n \"\"\" CRM Dons Analysis \"\"\"\r\n #row_number() over () as id, pour ajouter un id sur la requete\r\n _name = \"crm.alima.dashboard.un\"\r\n _auto = False\r\n _description = \"Repartition annuelle des dons\"\r\n _rec_name = 'annee'\r\n\r\n annee = fields.Char(string='Annee', readonly=True)\r\n montant = fields.Float('Total dons prive', readonly=True, digits=(16, 2), group_operator=\"sum\", help=\"Total dons prive\")\r\n nombre = fields.Float('Nombre de dons', readonly=True, digits=(16, 0), group_operator=\"sum\", help=\"Nombre de dons\")\r\n moyen = fields.Float('Dons moyens', readonly=True, digits=(16, 2), group_operator=\"avg\", help=\"Dons moyenss\")\r\n\r\n def init(self):\r\n tools.drop_view_if_exists(self._cr, 'crm_alima_dashboard_un')\r\n self._cr.execute(\"\"\"\r\n CREATE VIEW crm_alima_dashboard_un AS (\r\n SELECT\r\n c.id,\r\n date_part('year', c.date) as annee,\r\n sum(c.\"montantEur\") as montant,\r\n count(c.\"montantEur\") as nombre,\r\n avg(c.\"montantEur\") as moyen\r\n FROM\r\n public.crm_alima_don c\r\n GROUP BY\r\n annee, c.id\r\n )\"\"\")\r\nclass CrmAlimaReportsDeux(models.Model):\r\n \"\"\" CRM Dons Analysis \"\"\"\r\n _name = \"crm.alima.dashboard.deux\"\r\n _auto = False\r\n _description = \"Montant collectés par type donnateur\"\r\n _rec_name = 'type'\r\n\r\n type = fields.Char(string='Type', readonly=True)\r\n anneedonateur = fields.Char(string='Annee premier don', readonly=True)\r\n annedon = fields.Char(string='Date du don', readonly=True)\r\n mode_versement = fields.Char(string='Mode de versement', readonly=True)\r\n type_de_personne = fields.Char(string='type de personne', readonly=True)\r\n nombre = fields.Float('Nombre de dons', readonly=True, digits=(16, 0), group_operator=\"sum\", help=\"Nombre de dons\")\r\n montant = fields.Float('Montant dons Euro', readonly=True, digits=(16, 2), group_operator=\"sum\", help=\"Total dons euro\")\r\n id = fields.Char(string='donateur', readonly=True)\r\n\r\n\r\n def init(self):\r\n tools.drop_view_if_exists(self._cr, 'crm_alima_dashboard_deux')\r\n self._cr.execute(\"\"\"\r\n CREATE VIEW crm_alima_dashboard_deux AS (\r\n SELECT\r\n public.crm_alima_donateur.id as id,\r\n 1 as nombre,\r\n public.crm_alima_don.\"montantEur\" as montant,\r\n date_part('year', crm_alima_donateur.\"datePremierDon\") as anneedonateur,\r\n date_part('year', crm_alima_don.\"date\") as annedon,\r\n (CASE WHEN date_part('year', crm_alima_don.\"date\") = date_part('year', crm_alima_donateur.\"datePremierDon\")\r\n THEN 'nouveaux donateurs' ELSE 'anciens donateurs' END) as type,\r\n crm_alima_don.mode_versement,\r\n crm_alima_donateur.type_de_personne\r\n FROM\r\n public.crm_alima_don,\r\n public.crm_alima_donateur\r\n where\r\n public.crm_alima_donateur.id = public.crm_alima_don.donateur\r\n ORDER BY\r\n date_part('year', crm_alima_don.\"date\")\r\n )\"\"\")\r\n","sub_path":"crm_alima/report/crm_alima_report.py","file_name":"crm_alima_report.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427860561","text":"# File : Cube_of_n.py\n# Desc : This program calculates the sum of cube of the first n natural numbers\n# where the value of n is provided by the user.\n\ndef main():\n a = int(input(\"Enter the number of digits to be added: \"))\n\n for i in range(a):\n a = a + i\n\n print(\"The Sum of digits is\", a)\n\nmain()","sub_path":"Cube_of_n.py","file_name":"Cube_of_n.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150863127","text":"#Programa principal que llama a los demás archivos\n#Para correrlo es necesario ubicarse dentro de la carpeta /Funciones\n\nimport os\n\n\nclear = lambda: os.system('cls')\nclear()\n\nmarquesina ='》═══════════════════~◈~═══════════════════《'\nprint(marquesina)\ncentrar= (len(marquesina))\nlesgo = 'Bienvenido al sistema TecCel'\nprint (lesgo.center(len(marquesina)))\nprint('\\nOpciones disponibles')\nprint('\\n[1]-Mostrar el almacen actual\\n[2]-Información de los productos\\\n \\n[3]-Precio de los productos\\n[4]-Registrar de venta\\\n \\n[5]-Agregar nuevos productos al almacen\\n[6]-Informacion de empleados\\\n \\n[7]-Reporte de ventas\\n[8]-Salir' )\n\n\n\nopcion = int(input('\\nSeleccione la opcion que desee realizar: '))\n#print ('\\n'+marquesina)\nwhile opcion < 1 or opcion > 8:\n opcion = int(input('Ingrese un número válido: '))\nif opcion == 1:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_1.py')\nelif opcion == 2:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_2.py')\nelif opcion == 3:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_3.py')\nelif opcion == 4:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_4.py')\nelif opcion == 5:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_5.py')\nelif opcion == 6:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_6.py')\nelif opcion == 7:\n clear()\n print(marquesina)\n os.system('python Funcionalidad_7.py')\nelse:\n clear()","sub_path":"Funciones/Programa_Principal.py","file_name":"Programa_Principal.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461867730","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom home import views as home_views\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^cart/', include('cart.urls', namespace='cart')),\n url(r'^contact/$', home_views.contact, name=\"contact\"),\n url(r'^orders/', include('orders.urls', namespace='orders')),\n url(r'^payment/', include('payment.urls', namespace='payment')),\n url(r'^paypal/', include('paypal.standard.ipn.urls')),\n url(r'^products/', include('products.urls', namespace=\"products\")),\n url(r'^$', home_views.home, name=\"home\"),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"oscardeen/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432186366","text":"try:\n from itertools import izip as zip\nexcept ImportError:\n pass\nfrom unittest import TestCase\n\nfrom tests.testutils import iter_html_docs, get_html_doc\nfrom searchcmd.download import HtmlDocument\nfrom searchcmd.cmdextract import CommandExtractor, extract_commands\n\nTEST_DATA_DIR = 'cmdextract'\n\nCOMMANDS = {\n 'http://unixmantra.com': [\n (1254, u'find ./music -name \"*.mp3\" -print0 | xargs -0 ls'),\n (1258, u'find ./work -print | xargs grep \"profit\"'),\n (1267,\n 'find . -name \"*.sh\" -print0 | xargs -0 -I {} mv {} ~/back.scripts'),\n (1306, 'find . -name \"*.sh\" | xargs grep \"ksh\"'),\n (1328, 'find /tmp -name \"*.tmp\" | xargs rm'),\n (1335, 'find /tmp -name \"*.tmp\" -print0 | xargs -0 rm'),\n (1373, 'mv chap1 chap1.old'),\n (1374, 'mv chap2 chap2.old'),\n (1375, 'mv chap3 chap3.old'),\n (1385, 'ar r lib.a chap1 ?...'),\n (1386, 'ar r lib.a chap2 ?...'),\n (1387, 'ar r lib.a chap3 ?...')],\n 'http://brunolinux.com': [\n (62, \"sed -i 's/ugly/beautiful/g' /home/bruno/old-friends/sue.txt\"),\n (91, (\"find /home/bruno/old-friends -type f \"\n \"-exec sed -i 's/ugly/beautiful/g' {} \\\\;\")),\n (118, u'mv $fl $fl.old'),\n (119, u\"sed 's/FINDSTRING/REPLACESTRING/g' $fl.old > $fl\"),\n (120, u'rm -f $fl.old'),\n (150, ('perl -e \"s/old_string/new_string/g;\" '\n '-pi.save $(find DirectoryName -type f)'))],\n 'http://cyberciti.biz': [\n (118, 'hwclock -r'),\n (120, 'hwclock --show'),\n (122, 'hwclock --show --utc'),\n (126, 'date -s \"2 OCT 2006 18:00:00\"'),\n (128, 'date --set=\"2 OCT 2006 18:00:00\"'),\n (130, 'date +%Y%m%d -s \"20081128\"'),\n (131, 'date +%T -s \"10:13:13\"'),\n (133, 'date +%T%p -s \"6:10:30AM\" # date +%T%p -s \"12:10:30PM\"'),\n (135, 'hwclock --systohc'),\n (137, 'hwclock -w'),\n (158, 'date -s 2007.04.08-22:46+0000'),\n (167, u'date -s \\u201c2 OCT 2006 18:00:00\\u2033'),\n (217, u'export http_proxy=\\u2019http://10.10.1.2:3128\\u2032'),\n (234, u'hwclock \\u2013show'),\n (235, u'hwclock \\u2013systohc'),\n (286, 'date +%Y%m%d -s \"20080817\"'),\n (354, u'date set=\\u201d2 OCT 2006 18:00:00\\u2033'),\n (371, u'date -s \\u201d2 OCT 2006 18:00:00\\u2033'),\n (380, u'date \\u2013set=\\u201d2 sep 2011 11:27:20\\u2033'),\n (412, u'hwclock \\u2013systohc'),\n (479, 'date 07252208002009'),\n (623, u'date +%Y%m%d%T -s \\u201c20081225 10:05:00\\u2033'),\n (704, u'date \\u2013set=\\u201d2 OCT 2006 18:00:00\\u2033'),\n (832, 'for example 23/03/2011 to 23.03.2011'),\n (841, u'hwclock \\u2013show'),\n (843, 'ls -l /etc/localtime'),\n (845, 'date 041106232011'),\n (846, u'hwclock \\u2013systohc'),\n (847, 'ln -sf /usr/share/zoneinfo/ /etc/localtime'),\n (848, 'vi /etc/sysconfig/clock (update timezone if redhat)'),\n (850, u'hwclock \\u2013show'),\n (852, 'ls -l /etc/localtime'),\n (929, 'date 030613252012'),\n (1126, 'uname -a'),\n (1144, u'date -s \\u201cYYYY-MM-DD HH:MM:SS\\u201d'),\n (1161, 'uname -a'),\n (1183, 'date --set=\"4 JAN 2014 13:25:00\"'),\n (1239, (u'python -c \\u2018import platform ; '\n u'print platform.dist()[0]\\u2019')),\n (1260, u'su \\u2013'),\n (1261, 'passwd root'),\n (1301, u'date +%T -s \\u201c10:13:13\\u2033'),\n (1310, u'date -s \\u201cYYYY-MM-DD HH:MM:SS\\u201d')],\n 'http://stackoverflow.com': [\n (5, 'du -h --max-depth=1'),\n (24, 'du -h -s *')]\n}\n\nNR_TEXTS = {'http://unixmantra.com': 225,\n 'http://brunolinux.com': 47,\n 'http://cyberciti.biz': 705,\n 'http://stackoverflow.com': 49}\n\nMERGED_COMMANDS = set([\n u'find ./music -name \"*.mp3\" -print0 | xargs -0 ls',\n 'hwclock --show',\n \"sed -i 's/ugly/beautiful/g' /home/bruno/old-friends/sue.txt\",\n 'date --set=\"2 OCT 2006 18:00:00\"',\n 'mv chap1 chap1.old',\n u'date +%T -s \\u201c10:13:13\\u2033',\n 'ar r lib.a chap1 ?...',\n 'find . -name \"*.sh\" -print0 | xargs -0 -I {} mv {} ~/back.scripts',\n u'mv $fl $fl.old',\n 'mv chap2 chap2.old',\n 'date 07252208002009',\n 'date +%T%p -s \"6:10:30AM\" # date +%T%p -s \"12:10:30PM\"',\n 'find /tmp -name \"*.tmp\" | xargs rm',\n 'du -h --max-depth=1',\n 'mv chap3 chap3.old',\n u'date \\u2013set=\\u201d2 sep 2011 11:27:20\\u2033',\n u'date -s \\u201d2 OCT 2006 18:00:00\\u2033',\n 'date +%Y%m%d -s \"20081128\"',\n 'find . -name \"*.sh\" | xargs grep \"ksh\"',\n 'date 041106232011',\n 'ar r lib.a chap3 ?...',\n u'find ./work -print | xargs grep \"profit\"',\n u'date +%Y%m%d%T -s \\u201c20081225 10:05:00\\u2033',\n 'date -s 2007.04.08-22:46+0000',\n 'find /tmp -name \"*.tmp\" -print0 | xargs -0 rm',\n 'date +%T -s \"10:13:13\"',\n 'date --set=\"4 JAN 2014 13:25:00\"',\n u'hwclock \\u2013systohc',\n 'hwclock --show --utc',\n u'date -s \\u201c2 OCT 2006 18:00:00\\u2033',\n 'hwclock --systohc',\n (\"find /home/bruno/old-friends -type f \"\n \"-exec sed -i 's/ugly/beautiful/g' {} \\\\;\"),\n 'date -s \"2 OCT 2006 18:00:00\"',\n u\"sed 's/FINDSTRING/REPLACESTRING/g' $fl.old > $fl\",\n 'date 030613252012',\n u'date set=\\u201d2 OCT 2006 18:00:00\\u2033',\n u'hwclock \\u2013show',\n 'du -h -s *',\n 'date +%Y%m%d -s \"20080817\"',\n 'ar r lib.a chap2 ?...',\n u'date \\u2013set=\\u201d2 OCT 2006 18:00:00\\u2033',\n 'hwclock -w',\n u'date -s \\u201cYYYY-MM-DD HH:MM:SS\\u201d',\n 'hwclock -r'])\n\n\nclass TestCommandExtract(TestCase):\n\n def test_extract_commands(self):\n cmds = extract_commands(iter_html_docs(TEST_DATA_DIR))\n self.assertEqual(set(cmds.commands.keys()), MERGED_COMMANDS)\n\n cmds = extract_commands(iter_html_docs(TEST_DATA_DIR), 'xargs')\n self.assertEqual(set(cmds.commands.keys()), set([\n 'find /tmp -name \"*.tmp\" | xargs rm',\n u'find ./music -name \"*.mp3\" -print0 | xargs -0 ls',\n 'find . -name \"*.sh\" | xargs grep \"ksh\"',\n 'find /tmp -name \"*.tmp\" -print0 | xargs -0 rm',\n 'find . -name \"*.sh\" -print0 | xargs -0 -I {} mv {} ~/back.scripts',\n u'find ./work -print | xargs grep \"profit\"']))\n\n cmds = extract_commands(\n get_html_doc(TEST_DATA_DIR, 'stackoverflow.com'), 'xargs')\n self.assertEqual(cmds.commands, {})\n\n doc = HtmlDocument('http://stackoverflow.com', b'')\n doc.body = None\n cmds = extract_commands(doc)\n self.assertEqual(cmds.nr_docs, 0)\n\n def test_iter_texts(self):\n extractor = CommandExtractor()\n for doc in iter_html_docs(TEST_DATA_DIR):\n print(doc.url.url)\n nr_txts = 0\n for line, txt in extractor.iter_text_lines(doc):\n nr_txts += 1\n self.assertEqual(NR_TEXTS[doc.url.url], nr_txts)\n\n def test_iter_commands(self):\n extractor = CommandExtractor()\n for doc in iter_html_docs(TEST_DATA_DIR):\n print(doc.url.url)\n for (line, cmd), correct in zip(extractor.iter_commands(doc),\n COMMANDS[doc.url.url]):\n self.assertEqual((line, cmd), correct)\n\n def test_get_command(self):\n ext = CommandExtractor('find')\n self.assertEqual(ext.get_command('$ find . -name \"*.mp3\"'),\n 'find . -name \"*.mp3\"')\n self.assertIsNone(ext.get_command('# ls -hl'))\n self.assertIsNone(ext.get_command('find a file'))\n self.assertIsNone(ext.get_command('Find . -name \"*.mp3\"'))\n\n def test_sudo(self):\n ext = CommandExtractor('ls')\n self.assertEqual(ext.get_command('# sudo ls -hl'),\n 'ls -hl')\n ext = CommandExtractor('sudo')\n self.assertEqual(\n ext.get_command('# sudo -u www vi ~www/htdocs/index.html'),\n 'sudo -u www vi ~www/htdocs/index.html')\n\n def test_has_wanted_command(self):\n ext = CommandExtractor('xargs')\n self.assertTrue(ext.has_wanted_command(\n u'find ./music -name \"*.mp3\" -print0 | xargs -0 ls'))\n self.assertFalse(ext.has_wanted_command(\n u'find ./music -name \"*.mp3\" -print0 | grep xargs'))\n\n ext = CommandExtractor(['du', 'mv'])\n self.assertTrue(ext.has_wanted_command(\n u'du -h -s *'))\n self.assertFalse(ext.has_wanted_command(\n u'ls -hl'))\n\n ext = CommandExtractor(['git commit'])\n self.assertTrue(ext.has_wanted_command(\n u'git commit --amend'))\n self.assertFalse(ext.has_wanted_command(\n u'git pull origin master'))\n\n def test_is_command_name(self):\n ext = CommandExtractor()\n\n self.assertTrue(ext.is_command_name('ls'))\n self.assertTrue(ext.is_command_name('l'))\n self.assertTrue(ext.is_command_name('7z'))\n self.assertTrue(ext.is_command_name('apt-get'))\n\n self.assertFalse(ext.is_command_name(''))\n self.assertFalse(ext.is_command_name('1'))\n self.assertFalse(ext.is_command_name('22'))\n self.assertFalse(ext.is_command_name('apt-'))\n self.assertFalse(ext.is_command_name('-'))\n self.assertFalse(ext.is_command_name('-ls'))\n self.assertFalse(ext.is_command_name('apt get'))\n self.assertFalse(ext.is_command_name('APT-GET'))\n\n def test_is_command_output(self):\n ext = CommandExtractor()\n self.assertTrue(ext.is_command_output(\n 'drwxr-xr-x 3 root root 4,0K maj 5 2014 home'))\n self.assertFalse(ext.is_command_output('total 0'))\n\n def test_is_command(self):\n ext = CommandExtractor()\n\n self.assertTrue(ext.is_command(\n u'date -s \\u201cYYYY-MM-DD HH:MM:SS\\u201d'))\n self.assertTrue(ext.is_command('uname -a'))\n self.assertTrue(ext.is_command('git log'))\n\n self.assertFalse(ext.is_command(''))\n self.assertFalse(ext.is_command('when I use:'))\n self.assertFalse(ext.is_command('is used.'))\n self.assertFalse(ext.is_command('thank you!!!!'))\n self.assertFalse(ext.is_command('thanks for sharing'))\n self.assertFalse(ext.is_command(\n 'drwxr-xr-x 3 root root 4,0K maj 5 2014 home'))\n self.assertFalse(ext.is_command(\n 'ls -al %s' % 'a'*ext.MAX_COMMAND_LENGTH))\n self.assertFalse(ext.is_command(\n (\"you can use '-c' & '-w' with wc to obtain number of characters\"\n \"and words repectively\")))\n self.assertFalse(ext.is_command('250 total'))\n\n # TODO\n self.assertTrue(ext.is_command('thanx :)'))\n","sub_path":"tests/test_cmdextract.py","file_name":"test_cmdextract.py","file_ext":"py","file_size_in_byte":10652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189898191","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\n\n\nclass DataAPI(models.Model):\n managed_by = models.ForeignKey(User, related_name=\"data_api\", on_delete=models.CASCADE,\n null=True)\n title = models.CharField(max_length=255)\n version = models.FloatField()\n usage_notes = models.TextField()\n location = models.CharField(max_length=255)\n type = models.CharField(max_length = 255)\n formats_supported = models.CharField(max_length = 255)\n\n def __str__(self):\n return self.title\n\n\nclass ChangeLog(models.Model):\n data_api = models.ForeignKey(DataAPI, on_delete=models.CASCADE, null = True, blank = True)\n change_details = models.TextField()\n change_date = models.DateField(default=datetime.now, blank=True)\n change_made_by = models.ForeignKey(User, related_name=\"change_log\", on_delete=models.CASCADE,\n null=True)\n","sub_path":"data_api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118881833","text":"#!/usr/bin/python3\r\n\r\nfrom xml.dom.minidom import parse\r\n\r\n# Open XML document using minidom parser\r\nDOMTree = parse(\"DROANADB.xml\")\r\nmap1 = DOMTree.documentElement\r\n\r\n\r\n\r\n# Get all the movies in the collection\r\nnodes = map1.getElementsByTagName(\"node\")\r\nparent_map = dict((c, p) for p in nodes.iter() for c in p)\r\nprint(parent_map)\r\n\r\n'''# Print detail of each movie.\r\nfor node in nodes:\r\n print (\"*****Movie*****\")\r\n if movie.hasAttribute(\"title\"):\r\n print (\"Title: %s\" % movie.getAttribute(\"title\"))\r\n type = movie.getElementsByTagName('type')[0]\r\n print (\"Type: %s\" % type.childNodes[0].data)\r\n format = movie.getElementsByTagName('format')[0]\r\n print (\"Format: %s\" % format.childNodes[0].data)\r\n rating = movie.getElementsByTagName('rating')[0]\r\n print (\"Rating: %s\" % rating.childNodes[0].data)\r\n description = movie.getElementsByTagName('description')[0]\r\n print (\"Description: %s\" % description.childNodes[0].data)\r\n\r\n\r\nparent_map = dict((c, p) for p in tree.getiterator() for c in p)'''\r\n","sub_path":"Day3/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385102117","text":"import numpy as np\nfrom src.alg.simplex.config import SimplexConfig\n\n\nclass LinearProgramProblem:\n def __init__(self, c, extr, A, signs, b, var_sings):\n self.c = c # c - goal function coefficients\n self.extr = extr\n self.A = A # A - constraint matrix\n self.signs = signs # signs - signs between A and b\n self.b = b # b - right part\n self.var_signs = var_sings # limits on values\n self.lim_cnt, self.var_cnt = self.A.shape\n self.conf_X = list([i] for i in range(self.var_cnt))\n\n def convert_canon_type(self):\n self.convert_extr()\n self.convert_limit_sign()\n self.convert_var_sign()\n self.convert_b_sign()\n return self.A, self.b, self.c\n\n def convert_extr(self):\n if self.extr == SimplexConfig.maximum:\n self.c = (-1) * self.c\n self.extr = SimplexConfig.minimum\n\n def convert_limit_sign(self):\n for ind, sign in enumerate(self.signs):\n if sign == SimplexConfig.loose_less or sign == SimplexConfig.loose_more:\n self.var_signs.append(SimplexConfig.positive)\n self.var_cnt += 1\n self.c = np.append(self.c, 0)\n self.A = np.append(\n self.A, np.zeros((self.lim_cnt, 1), float), axis=1)\n if sign == SimplexConfig.loose_less:\n self.A[ind][self.var_cnt - 1] = 1\n if sign == SimplexConfig.loose_more:\n self.A[ind][self.var_cnt - 1] = -1\n self.signs[ind] = SimplexConfig.equal\n\n def convert_var_sign(self):\n for ind, var in enumerate(self.var_signs):\n if var == SimplexConfig.any:\n self.var_signs[ind] = SimplexConfig.positive\n self.var_signs.append(SimplexConfig.positive)\n self.conf_X[ind].append(self.var_cnt)\n self.var_cnt += 1\n self.c = np.append(self.c, self.c[ind] * (-1))\n self.A = np.append(\n self.A, np.array(\n [[(-1) * self.A[j, ind]] for j in range(\n self.lim_cnt)], float), axis=1)\n\n def convert_b_sign(self):\n for ind, var in enumerate(self.b):\n if var < 0:\n self.b[ind] *= (-1)\n self.A[ind] = (-1) * self.A[ind]\n\n def sort_conditions(self):\n for ind, sign in enumerate(self.signs):\n if (sign == SimplexConfig.loose_less and self.extr == SimplexConfig.minimum) or \\\n (sign == SimplexConfig.loose_more and self.extr == SimplexConfig.maximum):\n self.A[ind] *= (-1)\n self.b[ind] *= (-1)\n if sign == SimplexConfig.loose_less:\n self.signs[ind] = SimplexConfig.loose_more\n elif sign == SimplexConfig.loose_more:\n self.signs[ind] = SimplexConfig.loose_less\n\n def find_init_X(self, X):\n init_X = np.zeros(len(self.conf_X))\n for ind, nums in enumerate(self.conf_X):\n if len(nums) == 1:\n init_X[ind] = X[nums[0]]\n elif len(nums) == 2:\n init_X[ind] = X[nums[0]] - X[nums[1]]\n return init_X\n","sub_path":"src/alg/simplex/lin_prog_problem.py","file_name":"lin_prog_problem.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212074283","text":"import tensorflow as tf\nimport numpy as np\nfrom os import listdir\nimport os\nfrom scipy.misc import imread, imresize\nfrom time import time\n\n# /scratch/jzm218/landmark/test\n# /scratch/jzm218/landmark/index\n\nnetid = 'jzm218'\n\ndata_dir_query = 'scratch/'+netid+'/landmark/test'\ndata_dir_database = 'scratch/'+netid+'/landmark/index'\n\nfeature_dir_query = 'feature_query_np'\nfeature_dir_database = 'feature_test_np'\n\n# The images \nimgs = tf.placeholder(tf.float32, [None, 224, 224, 3])\n\nparameters = []\n\n# - - - - Convolutional Layers - - - - \n\n# zero-mean input \nwith tf.name_scope('preprocess') as scope:\n\tmean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n\timages = imgs-mean\n\n# conv1_1\nwith tf.name_scope('conv1_1') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv1_1 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\t\n# conv1_2\nwith tf.name_scope('conv1_2') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv1_2 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\t\n# pool1\npool1 = tf.nn.max_pool(conv1_2,\n\t\t ksize=[1, 2, 2, 1],\n\t\t strides=[1, 2, 2, 1],\n\t\t padding='SAME',\n\t\t name='pool1')\n\t\t\n# conv2_1\nwith tf.name_scope('conv2_1') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv2_1 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\t\n# conv2_2\nwith tf.name_scope('conv2_2') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv2_1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv2_2 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\t\n# pool2\npool2 = tf.nn.max_pool(conv2_2,\n\t\t\t\t\t ksize=[1, 2, 2, 1],\n\t\t\t\t\t strides=[1, 2, 2, 1],\n\t\t\t\t\t padding='SAME',\n\t\t\t\t\t name='pool2')\n\n# conv3_1\nwith tf.name_scope('conv3_1') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv3_1 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv3_2\nwith tf.name_scope('conv3_2') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv3_1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv3_2 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv3_3\nwith tf.name_scope('conv3_3') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv3_2, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv3_3 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# pool3\npool3 = tf.nn.max_pool(conv3_3,\n\t\t\t\t\t ksize=[1, 2, 2, 1],\n\t\t\t\t\t strides=[1, 2, 2, 1],\n\t\t\t\t\t padding='SAME',\n\t\t\t\t\t name='pool3')\n\t\t\t\t\t\n# conv4_1\nwith tf.name_scope('conv4_1') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv4_1 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv4_2\nwith tf.name_scope('conv4_2') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv4_1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv4_2 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv4_3\nwith tf.name_scope('conv4_3') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv4_2, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv4_3 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# pool4\npool4 = tf.nn.max_pool(conv4_3,\n\t\t\t\t\t ksize=[1, 2, 2, 1],\n\t\t\t\t\t strides=[1, 2, 2, 1],\n\t\t\t\t\t padding='SAME',\n\t\t\t\t\t name='pool4')\n\t\t\t\t\t\n# conv5_1\nwith tf.name_scope('conv5_1') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv5_1 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv5_2\nwith tf.name_scope('conv5_2') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv5_1, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv5_2 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# conv5_3\nwith tf.name_scope('conv5_3') as scope:\n\tkernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tconv = tf.nn.conv2d(conv5_2, kernel, [1, 1, 1, 1], padding='SAME')\n\tbiases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tout = tf.nn.bias_add(conv, biases)\n\tconv5_3 = tf.nn.relu(out, name=scope)\n\tparameters += [kernel, biases]\n\n# pool5\npool5 = tf.nn.max_pool(conv5_3,\n\t\t\t\t\t ksize=[1, 2, 2, 1],\n\t\t\t\t\t strides=[1, 2, 2, 1],\n\t\t\t\t\t padding='SAME',\n\t\t\t\t\t name='pool4')\n\n# - - - - Fully Connected Layers - - - - \n\t\t\t\t\t\n# fc1\nwith tf.name_scope('fc1') as scope:\n\tshape = int(np.prod(pool5.get_shape()[1:]))\n\tfc1w = tf.Variable(tf.truncated_normal([shape, 4096],\n\t\t\t\t\t\t\t\t\t\t\t\t dtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t\t stddev=1e-1), name='weights')\n\tfc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),\n\t\t\t\t\t\t trainable=True, name='biases')\n\tpool5_flat = tf.reshape(pool5, [-1, shape])\n\tfc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)\n\tfc1 = tf.nn.relu(fc1l)\n\tparameters += [fc1w, fc1b]\n\n\n# fc2\nwith tf.name_scope('fc2') as scope:\n\tfc2w = tf.Variable(tf.truncated_normal([4096, 4096],\n\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t\t\tstddev=1e-1), name='weights')\n\tfc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),\n\t\t\t\t\t\t\t\t\ttrainable=True, name='biases')\n\tfc2l = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n\tfc2 = tf.nn.relu(fc2l)\n\tparameters += [fc2w, fc2b]\n\t\n# fc3\nwith tf.name_scope('fc3') as scope:\n\tfc3w = tf.Variable(tf.truncated_normal([4096, 1000],\n\t\t\t\t\t\t\t\t\t\t\t\t\tdtype=tf.float32,\n\t\t\t\t\t\t\t\t\t\t\t\t\tstddev=1e-1), name='weights')\n\tfc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),\n\t\t\t\t\t\t\t\t\ttrainable=True, name='biases')\n\tfc3l = tf.nn.bias_add(tf.matmul(fc2, fc3w), fc3b)\n\tfc3 = tf.nn.relu(fc3l)\n\tparameters += [fc3w, fc3b]\n\t\ndef min(s):\n\treturn \"%.2f min\" % (s/60)\n\t\ndef load_save_features(sess, data_dir, dirs, feature_dir):\n\t# Creating the directory for query images \n\tif not os.path.exists(feature_dir):\n\t\tos.mkdir(feature_dir)\n\tn_dir = len(dirs)\n\tstep_dir = n_dir // 100\n\tfor i, d in enumerate(dirs):\n\t\tpath = feature_dir+'/'+d[:-4]\n\t\tif os.path.exists(path+'.npy'):\n\t\t\tcontinue\n\t\tt0 = time()\n\t\t# Reading the image \n\t\timg = imread(data_dir+'/'+d)\n\t\timg = imresize(img, (224, 224))\n\t\t# Getting, saving the feature vector\n\t\tfeature = sess.run(fc3, feed_dict={imgs: [img]})\n\t\tnp.save(path, feature)\t\n\t\t# Show progress\n\t\tif i % step_dir == 0:\n\t\t\tt1 = time()\n\t\t\tprint(\"Progress: %.2f%% Time:\" % (100*i/n_dir),min(t1-t0))\t\n\t\t\n\t\nwith tf.Session() as sess:\n\t# Loading weights\n\tprint('Load weights...')\n\tt0 = time()\n\tweight_file = 'vgg16_weights.npz'\n\tweights = np.load(weight_file)\n\tkeys = sorted(weights.keys())\n\tfor i, k in enumerate(keys):\n\t\tsess.run(parameters[i].assign(weights[k]))\n\tt1 = time()\n\tprint('Load complete. Time:',min(t1-t0))\n\t\n\t# Loading images \n\tdirs_query = listdir(data_dir_query)\n\tdirs_database = listdir(data_dir_database)\n\t\n\tn_query = len(dirs_query)\n\tn_database = len(dirs_database)\n\t\n\tstep_query = n_query // 100\n\tstep_database = n_database // 100\n\t\n\tprint('Load/save query images...')\n\tload_save_features(sess, data_dir_query, dirs_query, feature_dir_query)\n\tprint(\"Query images complete.\")\t\n\n\tprint(\"Load/save database images...\")\n\tload_save_features(sess, data_dir_database, dirs_database, feature_dir_database)\n\tprint('Database images complete.')\n\tprint(\"Saving complete.\")","sub_path":"vgg_kmans/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":10241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"95919766","text":"# https://www.deeplearningwizard.com/deep_learning/deep_reinforcement_learning_pytorch/dynamic_programming_frozenlake/\n# -*- coding: utf-8 -*-\nimport time\n\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport collections\nimport os\n\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\nnp.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n\nENV_NAME = \"CartPole-v1\"\nMODEL_DIR = \"models\"\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\n\n\nclass Qnet(nn.Module):\n def __init__(self):\n super(Qnet, self).__init__()\n self.fc1 = nn.Linear(4, 128) # fully connected\n self.fc2 = nn.Linear(128, 128)\n self.fc3 = nn.Linear(128, 2)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def get_action(self, obs, epsilon):\n out = self.forward(obs)\n\n coin = random.random() # 0.0과 1.0사이의 임의의 값을 반환\n if coin < epsilon:\n return random.randint(0, 1)\n else:\n return out.argmax().item() # argmax: 더 큰 값에 대응되는 인덱스 반환\n\n\nclass ReplayMemory:\n def __init__(self, buffer_limit=50000):\n self.memory = collections.deque(maxlen=buffer_limit)\n\n def put(self, transition):\n self.memory.append(transition)\n\n def size(self):\n return len(self.memory)\n\n def sample(self, n):\n mini_batch = random.sample(self.memory, n)\n observation_lst, action_lst, reward_lst, next_observation_lst, done_mask_lst = [], [], [], [], []\n\n for transition in mini_batch:\n observation, action, reward, next_observation, done_mask = transition\n observation_lst.append(observation)\n action_lst.append([action])\n reward_lst.append([reward])\n next_observation_lst.append(next_observation)\n done_mask_lst.append([done_mask])\n\n return torch.tensor(observation_lst, dtype=torch.float), torch.tensor(action_lst), \\\n torch.tensor(reward_lst, dtype=torch.float), torch.tensor(next_observation_lst, dtype=torch.float), \\\n torch.tensor(done_mask_lst)\n\n\ndef play(q, num_episodes):\n env = gym.make(ENV_NAME)\n\n for i in range(num_episodes):\n episode_reward = 0 # cumulative_reward\n\n # Environment 초기화와 변수 초기화\n observation = env.reset()\n env.render()\n\n episode_steps = 0\n\n while True:\n episode_steps += 1\n action = q.get_action(torch.from_numpy(observation).float(), epsilon=0.0)\n\n # action을 통해서 next_state, reward, done, info를 받아온다\n next_observation, reward, done, _ = env.step(action)\n env.render()\n\n episode_reward += reward # episode_reward 를 산출하는 방법은 감가률 고려하지 않는 이 라인이 더 올바름.\n observation = next_observation\n\n if done:\n break\n\n print(\"[EPISODE: {0}] EPISODE_STEPS: {1:3d}, EPISODE REWARD: {2:4.1f}\".format(\n i, episode_steps, episode_reward\n ))\n\n\ndef main_q_play():\n q = Qnet()\n model_params = torch.load(\n os.path.join(\".\", MODEL_DIR, \"dqn_CartPole-v1_500.0_0.0.pth\")\n )\n q.load_state_dict(model_params)\n play(q, num_episodes=3)\n\n\nif __name__ == \"__main__\":\n main_q_play()\n","sub_path":"dqn_model_load_and_play.py","file_name":"dqn_model_load_and_play.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176551550","text":"import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport time\n\nnp.set_printoptions(suppress=True)\n\n\ndef get_now_time(nowBJTStr=datetime.datetime.today().strftime('%Y%m%d%H')):\n '''\n 按当天日期和时间判断指导报起报时间\n 获取当前时间,判断当前时间是否超过中午12点,如果超过则返回当天日期+'2000',如果未超过则返回当天日期+'0800'\n :return: 起报时间\n '''\n nowHour = int(nowBJTStr[-2:]) # 字符串中获取小时并转为整数\n # 若当前时间在下午13时之前,则设置起报时间为0800,否则设置为2000\n if nowHour < 13:\n qiBaoShiJian = nowBJTStr[:-2] + '0800'\n nowTimeStr = nowBJTStr[:-2] + '08'\n timeTemp = datetime.datetime.strptime(nowTimeStr, '%Y%m%d%H')\n previousTime = timeTemp - datetime.timedelta(days=1)\n previousBeforeTime = timeTemp - datetime.timedelta(days=2)\n previousTimeS = previousTime.strftime('%Y%m%d%H')\n previousBeforeTimeS = previousBeforeTime.strftime('%Y%m%d%H')\n return qiBaoShiJian, nowTimeStr, previousTimeS, previousBeforeTimeS\n else:\n qiBaoShiJian = nowBJTStr[:-2] + '2000'\n nowTimeStr = nowBJTStr[:-2] + '20'\n timeTemp = datetime.datetime.strptime(nowTimeStr, '%Y%m%d%H')\n previousTime = timeTemp - datetime.timedelta(days=1)\n previousBeforeTime = timeTemp - datetime.timedelta(days=2)\n previousTimeS = previousTime.strftime('%Y%m%d%H')\n previousBeforeTimeS = previousBeforeTime.strftime('%Y%m%d%H')\n return qiBaoShiJian, nowTimeStr, previousTimeS, previousBeforeTimeS\n\n\ndef combineData():\n global arDataofDA, arDataofMB, arDataofZD, eEle\n arDataofDA = np.loadtxt(pathMDA + eEle + '\\\\' + timeList[1][2:] + '.024', skiprows=3).reshape(44114)\n arDataofMB = np.loadtxt(pahtMMB + eEle + '\\\\' + timeList[1][2:] + '.024', skiprows=3).reshape(44114)\n arDataofZD = np.loadtxt(pathMZD + timeList[1] + '_' + eEle + '.txt').reshape(44114)\n # print(pathMDA + eEle + '\\\\' + timeList[1][2:] + '.024')\n # print(pahtMMB + eEle + '\\\\' + timeList[1][2:] + '.024')\n # print(pathMZD + timeList[1] + '_' + eEle + '.txt')\n arDataBase = np.array([arDataofZD, arDataofDA])\n arDataCombine = np.append(arDataBase, arDataofMB)\n dim0 = arDataBase.shape\n arDataCombineR = arDataCombine.reshape(dim0[0] + 1, dim0[1])\n return arDataCombineR\n\n\ndef combineVR():\n global eEle, listMode, pathVeriRes, str1, timeList\n listFileName = []\n for eMode in listMode:\n fileName = '{}{}_10days_{}_{}.txt'.format(str1, timeList[-1], eEle, eMode)\n listFileName.append(fileName)\n listFileName = sorted(listFileName)\n arVRofDA, arVRofMB = np.loadtxt(pathVeriRes + listFileName[0]), np.loadtxt(pathVeriRes + listFileName[1])\n arVRZero = np.zeros((161, 274))\n arVRBase = np.array([arVRZero, arVRofDA])\n arVRCombine = np.append(arVRBase, arVRofMB)\n dim0 = arVRBase.shape\n arVRCombineR = arVRCombine.reshape(dim0[0] + 1, dim0[1], dim0[2])\n return arVRCombineR\n\n\nif __name__ == '__main__':\n pathVeriRes = 'F:\\\\work\\\\2020Correct\\\\data\\\\verificationResultCheck\\\\'\n pathMZD = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_md_24h_Grid\\\\'\n pathMDA = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_Result_DA\\\\'\n pahtMMB = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_Result_Grid_20\\\\'\n pathRes = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_Result_Combine\\\\'\n listElement = ['TMAX', 'TMIN']\n listMode = ['DA', 'MB']\n str1 = 'skilleDayforePoint_'\n\n dataStart = '2020033109'\n duringDayS = 350\n for eDay in range(duringDayS):\n getTime1 = datetime.datetime.strptime(dataStart, '%Y%m%d%H')\n getTime1 = getTime1 + datetime.timedelta(hours=12 * eDay)\n getTimeStrF = getTime1.strftime('%Y%m%d%H')\n timeList = get_now_time(getTimeStrF)\n for eEle in listElement:\n print('【正在执行%s集成运算】' % eEle)\n arDataCombineR = combineData()\n arVRCombineR = combineVR()\n ##############################\n listCoordinate, listResult = [], []\n for i in range(161):\n for j in range(274):\n listCoordinate.append(np.argmax(arVRCombineR[:, i, j]))\n for ki, kv in enumerate(listCoordinate):\n listResult.append(arDataCombineR[kv][ki])\n arrResult = np.array(listResult).reshape(161, 274)\n np.savetxt(pathRes + eEle + '\\\\' + timeList[1][2:] + '.024', arrResult, fmt='%.2f')\n print('【%s已完成】'%timeList[0])\n\n#\n#\n# print(arDataCombineR)\n# print('************')\n# arVRofDA = pathVeriRes\n\n\n# path1 = 'C:\\\\Users\\\\ybtd2\\\\Desktop\\\\新建文件夹\\\\'\n# path2 = 'F:\\\\work\\\\2020Correct\\\\data\\\\TM_Result_DA\\\\TMIN\\\\'\n# f1 = 'skilleDayforePoint_2020091508_10days_TMIN_DA.txt'\n# f2 = 'skilleDayforePoint_2020091508_10days_TMIN_MB.txt'\n# arDA = path1 + f1, skiprows=3)\n# arMB = np.loadtxt(path1 + f2, skiprows=3)\n# arDA1 = arDA.copy()\n# ar3 = arDA - arMB\n# arZero = np.zeros((161, 274))\n# arBase0 = np.array([arZero, arDA])\n#\n# ar1 = np.append(arBase0, arMB)\n# dim0 = arBase0.shape\n# ar2 = ar1.reshape(dim0[0] + 1, dim0[1], dim0[2])\n#\n# for k in range(3):\n# for i in range(161):\n# for j in range(274):\n# print(np.nanmax(ar2[:, i, j]))\n# print(np.argmax(ar2[:, i, j]))\n","sub_path":"Combine.py","file_name":"Combine.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"76771570","text":"from CRABClient.UserUtilities import config, getUsernameFromSiteDB\n\n\nconfig = config()\n\n\ninputDataset = \"@1@\"\ncmsRunFileName = \"UserCode/DTDPGAnalysis/test/RunTree_collisions_cfg.py\"\njobName = \"@3@\" #inputDataset[inputDataset.find(\"/\", 0)+1: inputDataset.find(\"/\", 1)]\n\n\n# Will create crab directory /crab_\nconfig.General.requestName = jobName\nconfig.General.workArea = \"crabJobs\"\n\n\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\nconfig.JobType.pluginName = \"Analysis\"\n\n\nconfig.JobType.maxMemoryMB = 3000\n\n\n# python file for cmsRun\nconfig.JobType.psetName = cmsRunFileName\n\n\n#config.JobType.allowUndistributedCMSSW = True\n\n\n# Other input files/directories (for example, the directory containing JEC .txt files)\nconfig.JobType.inputFiles = [\"luminosityLists\"]\n\n\nconfig.Data.inputDataset = inputDataset\nconfig.Data.inputDBS = \"global\"\n\n\n# Preferable for DATA\n#config.Data.splitting = \"LumiBased\"\n#config.Data.unitsPerJob = 1\n\nconfig.Data.splitting = \"EventAwareLumiBased\"\nconfig.Data.unitsPerJob = 1000\n#config.Data.totalUnits = 10000000\n\n#config.Data.lumiMask = \"luminosityLists/Cert_271036-284044_13TeV_PromptReco_Collisions16_JSON.txt\"\nconfig.Data.lumiMask = \"luminosityLists/Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt\"\n\n\n\n# Preferable for MC\n#config.Data.splitting = \"FileBased\"\n#config.Data.unitsPerJob = 1\n#config.Data.totalUnits = 200\n\n\nconfig.Data.outLFNDirBase = \"/store/user/%s/\" % (getUsernameFromSiteDB())\nconfig.Data.publication = False\n#config.Data.publishDataName = \"May2015_Data_analysis\"\n\n\nconfig.Site.storageSite = \"T2_IN_TIFR\"\n#config.Site.whitelist = [\"T2_IN_TIFR\"]\n","sub_path":"CMSSW_9_2_11/src/crabConfig.py","file_name":"crabConfig.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225825122","text":"# encoding: UTF-8\nfrom llampexwidgets import LlItemView\nfrom PyQt4 import QtGui, QtCore, uic\nimport time\n\nclass RecordScript(object):\n def __init__(self, form):\n self.form = form\n self.rpc = self.form.prjconn\n \n self.db = self.rpc.qtdb\n self.table = self.form.actionobj.table\n self.ui = form.ui\n self.model = form.model\n self.tmd = form.tmd\n self.row = form.row\n \n \n","sub_path":"projects/erp/billing/warehouse/warehouse/record/scripts/articulos.py","file_name":"articulos.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"219834224","text":"from flask import request, abort, g\nfrom flask.ext.restful import Resource, Api, reqparse, marshal\nfrom flask import render_template\n\nfrom config import *\nfrom models.item import Item\nfrom models.whim import Whim\nfrom models.user import User\n\nfrom functools import wraps\nimport re\n\n\napi = Api(app)\n\nuser_parser = reqparse.RequestParser()\nuser_parser.add_argument('username', type=str, required=True)\nuser_parser.add_argument('password', type=str, required=True)\n\nitem_parser = reqparse.RequestParser()\nitem_parser.add_argument('title', type=str, required=True)\nitem_parser.add_argument('completed', type=bool, required=True)\n\nwhim_parser = reqparse.RequestParser()\nwhim_parser.add_argument('title', type=str, required=True)\nwhim_parser.add_argument('completed', type=bool, required=True)\n\nclass RegisterAPI(Resource):\n def post(self):\n args = user_parser.parse_args()\n username = args['username']\n password = args['password']\n # check if username already exists\n if User.get(username) is not None:\n abort(403)\n user = User(username,password)\n db.session.add(user)\n db.session.commit()\n token = user.generate_auth_token()\n return { 'name':user.name,'token': token }\n\nclass LoginAPI(Resource):\n def post(self):\n args = user_parser.parse_args()\n username = args['username']\n password = args['password']\n # get user\n user = User.get(username)\n if not user or not user.check_password(password):\n abort(403)\n # generate token\n token = user.generate_auth_token()\n return { 'name':user.name,'token': token }\n\ndef authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if 'Authorization' not in request.headers:\n abort(401)\n user = User.verify_auth_token(request.headers['Authorization'])\n if not user:\n abort(401)\n g.user = user\n return func(*args, **kwargs)\n return wrapper\n\ndef current_user():\n return g.user\n\nclass AuthResource(Resource):\n method_decorators = [authenticate]\n \nclass UserProfileAPI(AuthResource):\n # this is private\n def get(self):\n # get the user\n user = g.user\n if not item.user == user:\n abort(403)\n\n #get data from header, and return profile info\n\n return 200\n def post(self):\n # get the user\n user = g.user\n if not item.user == user:\n abort(403)\n\n #get data from header, and save\n\n return 200\nclass WhimsAPI(Resource):\n # @marshal_with(Item.fields())\n def get(self):\n #use header parameters to know what whims to search for\n #example if createdBy:\"Luke Dickinson\",city:\"provo\" then we would use this\n #to filter for whims that were created by Luke Dickinson and in provo\n #possible parameters need to be defined though\n #request.headers['createdBy']\n items = [i for i in user.items]\n if items:\n return {'items': marshal(items,Item.fields())}\n return {'whims': []}\n \nclass JoinWhimAPI(AuthResource):\n # this is private\n def post(self, id):\n # get the user\n user = g.user\n whim = Whim.get(id)\n if not whim:\n abort(403)\n if not item.user == user:\n abort(403)\n\n #Join the whim, add me to the whim guest list \n\n return 200\n\nclass LeaveWhimAPI(AuthResource):\n # this is private\n def post(self, id):\n # get the user\n user = g.user\n whim = Whim.get(id)\n if not whim:\n abort(403)\n if not item.user == user:\n abort(403)\n\n #Leave the whim, remove me from the whim guest list \n\n return 200\n \nclass WhimAPI(AuthResource):\n # these is private\n\n def get(self, id):\n # get the user\n user = g.user\n whim = Whim.get(id)\n #if id == 1:\n # sampleWhim = ;\n # if id == 2:\n # sampleWhim = ;\n\n if not whim and not sampleWhim:\n abort(403)\n\n #sample Data\n return {\"whim:\"}\n\n #create\n def post(self, id):\n args = item_parser.parse_args()\n title = args['title']\n completed = args['completed']\n # get the user\n user = g.user\n whim = Whim.get(id)\n if not whim:\n abort(403)\n if not item.user == user:\n abort(403)\n whim.title = title\n whim.completed = completed\n db.session.add(whim)\n db.session.commit()\n return marshal(whim,Whim.fields())\n \n #update\n def put(self, id):\n args = item_parser.parse_args()\n title = args['title']\n completed = args['completed']\n # get the user\n user = g.user\n whim = Whim.get(id)\n if not whim:\n abort(403)\n if not item.user == user:\n abort(403)\n whim.title = title\n whim.completed = completed\n db.session.commit()\n return marshal(whim,Whim.fields())\n \n def delete(self,id):\n user = g.user\n whim = Whim.get(id)\n if not whim:\n abort(403)\n if not item.user == user:\n abort(403)\n db.session.delete(whim)\n db.session.commit()\n return 200\n \nclass ItemsAPI(AuthResource):\n # @marshal_with(Item.fields())\n def get(self):\n # get the user\n user = g.user\n items = [i for i in user.items]\n if items:\n return {'items': marshal(items,Item.fields())}\n return {'items': []}\n\n def post(self):\n # get the user\n user = g.user\n import json\n r = json.loads(request.data)\n title = r['item']['title']\n item = Item(title)\n item.user = user\n db.session.add(item)\n db.session.commit()\n return {'item': marshal(item,Item.fields())}\n\nclass ItemAPI(AuthResource):\n def get(self, id):\n # get the user\n user = g.user\n item = Item.get(id)\n if not item:\n abort(403)\n if not item.user == user:\n abort(403)\n return marshal(item,Item.fields())\n\n def put(self, id):\n args = item_parser.parse_args()\n title = args['title']\n completed = args['completed']\n # get the user\n user = g.user\n item = Item.get(id)\n if not item:\n abort(403)\n if not item.user == user:\n abort(403)\n item.title = title\n item.completed = completed\n db.session.add(item)\n db.session.commit()\n return marshal(item,Item.fields())\n\n def delete(self,id):\n user = g.user\n item = Item.get(id)\n if not item:\n abort(403)\n if not item.user == user:\n abort(403)\n db.session.delete(item)\n db.session.commit()\n return 200\n\n\n\nclass CommandTester(Resource):\n def get(self):\n return app.send_static_file('commandTester.html')\n\n\nclass CommandTesterJS(Resource):\n def get(self):\n return app.send_static_file('commandTester.js')\n\n\napi.add_resource(RegisterAPI, '/users/register')\napi.add_resource(LoginAPI, '/users/login')\napi.add_resource(UserProfileAPI, '/userProfile')\napi.add_resource(JoinWhimAPI, '/joinWhim')\napi.add_resource(LeaveWhimAPI, '/leaveWhim')\napi.add_resource(WhimsAPI, '/whims')\napi.add_resource(WhimAPI, '/whim/',endpoint='whim')\napi.add_resource(ItemsAPI, '/items')\napi.add_resource(ItemAPI, '/items/',endpoint='item')\napi.add_resource(CommandTester, '/CommandTester/',)\napi.add_resource(CommandTesterJS, '/CommandTester/CommandTester.js',)\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(host='0.0.0.0')\n","sub_path":"appold.py","file_name":"appold.py","file_ext":"py","file_size_in_byte":7806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177954739","text":"import requests as req\n\n\nasync def add_prediction_feedback(payload, message):\n\n if payload.emoji.name == '✔️':\n vote = 1\n elif payload.emoji.name == '❌':\n vote = -1\n else:\n #Thanks for the enthusiasm, but not what we're looking for.\n return\n\n prediction_data = extract_prediction(message)\n\n prediction = {\n \"discord_id\": payload.user_id,\n \"name\": prediction_data[0],\n \"prediction\": prediction_data[1],\n \"confidence\": prediction_data[2],\n \"vote\": vote\n }\n\n endpoint = \"https://www.osrsbotdetector.com/api/discord/predictionfeedback/\"\n\n request = req.post(endpoint, json=prediction)\n\n return\n\n\ndef extract_prediction(message):\n name_substring = \"+ Name: \"\n prediction_substring = \"Prediction: \"\n confidence_substring = \"Confidence: \"\n\n message_lines = message.content.splitlines()\n\n name_line = [i for i in message_lines if name_substring in i]\n prediction_line = [i for i in message_lines if prediction_substring in i]\n confidence_line = [i for i in message_lines if confidence_substring in i]\n\n name = name_line[0].split(name_substring)[1]\n prediction = prediction_line[0].split(prediction_substring)[1]\n confidence = confidence_line[0].split(confidence_substring)[1]\n\n return name, prediction, float(confidence)","sub_path":"reaction_commands.py","file_name":"reaction_commands.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330800710","text":"import os \nfrom ._django import BASE_DIR\nfrom django.utils.translation import gettext_lazy as _\nTIME_ZONE = 'UTC' #'Europe/Kiev'\nUSE_I18N = True\n# USE_L10N = False \nUSE_L10N = True \nUSE_TZ = True\nLANGUAGES = [\n ('uk', _('Українська')),\n ('en', _('Англійська')),\n ('ru', _('Російська')),\n # ('en-us', ('en')),\n]\nLANGUAGE_CODE = 'uk' \nMODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\nROSETTA_MESSAGES_PER_PAGE = 100\nROSETTA_ENABLE_TRANSLATION_SUGGESTIONS = True\n# YANDEX_TRANSLATE_KEY = 'trnsl.1.1.20200327T033955Z.7e48435c2547f277.fffb24ccd1d9ddde9374eb10b4d05a2157ef725c'\nAZURE_CLIENT_SECRET = \"fa4b9549dca24df88eeb6c58ada57bed\"\n\nDATE_FORMAT = \"d-m-Y H:M\"\n\n","sub_path":"core/default_settings/_translation.py","file_name":"_translation.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"94895550","text":"import os\r\nimport common.np as np\r\nfrom common.config import GPU\r\n\r\nif GPU:\r\n import cupy\r\nimport numpy\r\n\r\ndataset_dir = os.path.dirname(os.path.abspath(__file__))\r\ndata_path = os.path.join(dataset_dir, '../../1_data')\r\n\r\ndata_file = os.path.join(data_path, 'train_data.npy')\r\nlabel_file = os.path.join(data_path, 'train_label.npy')\r\n\r\ntrain_num = 6000\r\nimg_dim = (1, 28, 28)\r\nimg_size = 784\r\n\r\ndef load_katakana(normalize=True, flatten=False, shuffle=True, devide=5):\r\n\r\n \"\"\"カタカナデータセットの読み込み\r\n\r\n Parameters\r\n ----------\r\n normalize: 画像のピクセル値を0\r\n .0\r\n ~1.0\r\n に正規化する\r\n\r\n flatten: 画像を一次元配列に平にするかどうか\r\n shuffle: データを社フルするかどうか\r\n devide: データを���の程度訓練データに割るか、5の場合、4/5が訓練データで、1/5がテストデータ\r\n\r\n Returns\r\n -------\r\n (訓練画像, 訓練ラベル), (テスト画像, テストラベル)\r\n \"\"\"\r\n # テストデータの読み込み\r\n data = numpy.load(data_file)\r\n label = numpy.load(label_file)\r\n\r\n # シャルフルするかどうか\r\n if shuffle:\r\n indexes = numpy.arange(len(data))\r\n numpy.random.shuffle(indexes)\r\n data = data[indexes]\r\n label = label[indexes]\r\n\r\n if GPU:\r\n # GPUに転送\r\n import cupy\r\n data = cupy.array(data)\r\n label = cupy.array(label)\r\n\r\n if normalize:\r\n data /= 255.0\r\n\r\n if flatten:\r\n data.reshape(-1, img_size)\r\n\r\n x_train = data\r\n t_train = label\r\n x_test = None\r\n t_test = None\r\n\r\n if devide != 0:\r\n size_of_test = len(data) // devide\r\n size_of_split = size_of_test * (devide-1)\r\n x_train = data[:size_of_split]\r\n t_train = label[:size_of_split]\r\n x_test = data[size_of_split:]\r\n t_test = label[size_of_split:]\r\n\r\n return (x_train, t_train), (x_test, t_test)\r\n\r\n\r\ndef t_main():\r\n (x_train, t_train), (x_test, t_test) = load_katakana()\r\n print('x_train', x_train.shape, 't_test', t_test.shape)\r\n print('x_test', x_train.shape, 't_test', t_test.shape)\r\n\r\n print('-------------- x_test-------------- ')\r\n print(t_train[:5])\r\n\r\n print('-------------- t_test-------------- ')\r\n print(t_test[:5])\r\n\r\n\r\nif __name__ == '__main__':\r\n t_main()\r\n\r\n","sub_path":"dataset/katakana.py","file_name":"katakana.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532134810","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nALIAS = 'revision-if-match'\nIS_SHIM_EXTENSION = True\nIS_STANDARD_ATTR_EXTENSION = False\nNAME = 'If-Match constraints based on revision_number'\nAPI_PREFIX = ''\nDESCRIPTION = (\"Extension indicating that If-Match based on revision_number \"\n \"is supported.\")\nUPDATED_TIMESTAMP = '2016-12-11T00:00:00-00:00'\nRESOURCE_ATTRIBUTE_MAP = {}\nSUB_RESOURCE_ATTRIBUTE_MAP = {}\nACTION_MAP = {}\nREQUIRED_EXTENSIONS = ['standard-attr-revisions']\nOPTIONAL_EXTENSIONS = []\nACTION_STATUS = {}\n","sub_path":"neutron_lib/api/definitions/revisionifmatch.py","file_name":"revisionifmatch.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45818128","text":"#!/usr/bin/env python3\n\nimport myscript4\n\n# Test case 1\n# Testing basic step function\nprint(\"Test case 1\")\nmyscript4.thing1.setFilePath(\"/myscript2.py\")\nmyscript4.start()\n\nfor i in range(15):\n myscript4.step()\nmyscript4.quit()\nprint(\"should be at linenumber 22 --> e = 5\")","sub_path":"CSE_485_Programatic_Tracer5/CodeTogether/Testcase1.py","file_name":"Testcase1.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426717820","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 18 00:29:04 2016\r\n\r\n@author: Bismillah Jan\r\n\"\"\"\r\n\r\n\"\"\"\r\nreadData function: read data from the file \r\n\"\"\"\r\ndef readData():\r\n node1=[]\r\n node2=[]\r\n f=open(\"graph.txt\", 'r')\r\n #splitting the Data\r\n while True:\r\n line=f.readline()\r\n if not line: \r\n break\r\n else:\r\n b=line.split() \r\n if b!=[]:\r\n c,d=b\r\n node1.append(c)\r\n node2.append(d)\r\n return node1, node2\r\n \r\n\"\"\"\r\n**********************************************\r\n\"\"\"\r\ndef makeAdjList(node1, node2):\r\n adjList={}\r\n value=[]\r\n for i in range(len(node1)):\r\n key=node1[i]\r\n value.append([])\r\n for j in range(len(node2)):\r\n if(key==node1[j]):\r\n value[i].append(node2[j])\r\n if key not in adjList:\r\n adjList[key]=value[i] \r\n return adjList\r\n\r\n \r\n","sub_path":"A01_DFS and BFS rommania map/Adj_List_classes.py","file_name":"Adj_List_classes.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589353620","text":"from django.shortcuts import render, redirect\nfrom .forms import AccountCreateForm, UserCreateForm\nfrom .models import Account\nfrom django.contrib.auth.decorators import login_required \nfrom django.contrib.auth import authenticate, login, logout\n\n\ndef index(request):\n return render(request,'account/index.html')\n\ndef register_user(request):\n user_form = UserCreateForm(request.POST or None)\n account_form = AccountCreateForm(request.POST or None)\n\n if request.method == \"POST\" and user_form.is_valid() and account_form.is_valid:\n #Userモデル\n user = user_form.save(commit=False)\n user.is_activate = True\n user.save()\n\n # Accountモデル ↑紐付けよう\n account = account_form.save(commit=False)\n # ログインしている状態なら、 = request.user\n account.user = user\n # account.image = request.FILES.get('image', None)\n account.save()\n # add manytomanyフィールドで使うcommit=Falseのとき必要\n # account_form.save_m2m()\n \n return redirect('login')\n\n\n context = {\n 'user_form':user_form,\n 'profile_form':account_form,\n }\n return render(request,'account/user_create.html', context)\n\ndef loginfunc(request):\n if request.method == 'POST':\n username1= request.POST['username']\n password1 = request.POST['password']\n user = authenticate(request, username=username1, password=password1)\n \n\n # ? ユーザーがいる場合\n if user is not None:\n login(request, user)\n return redirect('account_detail', pk=user.pk)\n else:\n return render(request, 'account/login.html',{'error':'ログインに失敗しました'})\n return render(request,'account/login.html')\n\n@login_required\ndef listfunc(request):\n object_list = Account.objects.all()\n return render(request,'account/list.html', {'object_list':object_list})\n\ndef logoutfunc(request):\n logout(request)\n return redirect('login')\n\ndef detailfunc(request, pk):\n object = Account.objects.get(pk=pk)\n return render(request, 'account/account_detail.html', {'object':object})","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"403321106","text":"import socket\n\nhost_name = \"localhost\"\nudp_port = 80\n\n#create socket object\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nclient.sendto(b\"aaaaabbbbbbb\", (host_name, udp_port))\n\n#receive some data\ndata, addr = client.recvfrom(4096)\nprint(data)\nprint(addr)\n","sub_path":"udp-client.py","file_name":"udp-client.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139600033","text":"from rest_framework.decorators import api_view\nfrom dashboardviews.models import Order, Product, Customer\nfrom time import gmtime, strftime\nfrom random import randint\nfrom rest_framework.response import Response\n# b = Blog(name='Beatles Blog', tagline='All the latest Beatles news.')\n# b.save()\n@api_view(['POST'])\ndef GetOrder(request):\n data = request.data\n client = data[0][\"costumer\"]\n mycustumer = Customer(first_name=client[\"first_name\"], last_name=client[\"last_name\"], address=client[\"address\"], city=client[\"city\"], email=client[\"email\"], phone=client[\"phone\"], state=client[\"state\"])\n mycustumer.save()\n for singleproduct in data[0][\"cart\"] :\n myproduct = Product.objects.get(id= singleproduct[\"product_id\"])\n myid = '#'+str(randint(11,99)) + strftime(\" %a, %d %b %Y %H:%M:%S\", gmtime())\n myorder = Order(id = myid, customer = mycustumer, product = myproduct,quantity=singleproduct[\"quantity\"], price=myproduct.price , total= data[0][\"total\"])\n myorder.save()\n return Response(data)","sub_path":"online_store/api/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556547347","text":"import pygame\r\nfrom player import *\r\n\r\nclass Game:\r\n def __init__(self, id):\r\n self.id = id\r\n self.players = [Player(0), Player(1)]\r\n self.p1Play = False\r\n self.p2Play = False\r\n self.bothReady = False\r\n self.p1win = 0\r\n self.p2win = 0\r\n self.quit = False\r\n\r\n def play(self, p, move):\r\n self.players[int(p)].setMove(move)\r\n if p == 0:\r\n self.p1Play = True\r\n else:\r\n self.p2Play = True\r\n if self.p2Play and self.p1Play:\r\n self.checkWinner()\r\n else:\r\n print(\"Waiting for other player ....\")\r\n\r\n def getP1win(self):\r\n return str(self.p1win)\r\n\r\n def getP2win(self):\r\n return str(self.p2win)\r\n\r\n def checkWinner(self):\r\n winner = -1\r\n if self.players[0].move == \"R\" and self.players[1].move == \"S\":\r\n winner = 0\r\n elif self.players[0].move == \"S\" and self.players[1].move == \"R\":\r\n winner = 1\r\n elif self.players[0].move == \"P\" and self.players[1].move == \"R\":\r\n winner = 0\r\n elif self.players[0].move == \"R\" and self.players[1].move == \"P\":\r\n winner = 1\r\n elif self.players[0].move == \"S\" and self.players[1].move == \"P\":\r\n winner = 0\r\n elif self.players[0].move == \"P\" and self.players[1].move == \"S\":\r\n winner = 1\r\n\r\n if winner == 0:\r\n self.p1win += 1\r\n elif winner == 1:\r\n self.p2win += 1\r\n\r\n return winner\r\n\r\n def getPlayerMove(self, p):\r\n return self.players[p].move\r\n\r\n def bothPlayed(self):\r\n return self.p1Play and self.p2Play\r\n\r\n def reset(self):\r\n self.p1Play = False\r\n self.p2Play = False\r\n\r\n\r\n def __str__(self):\r\n return str(\"Game id:\" + str(self.id))","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578749088","text":"# Link: https://www.geeksforgeeks.org/find-winner-election-votes-represented-candidate-names/\n# IsDone: 0\ndef CheckWhoWinsTheElection(A):\n\tA.sort()\n\tcounter = maxCounter = 0\n\tcandidate = maxCandidate = 0\n\t\n\tfor i in range(0, len(A)):\n\t\tif(A[i] == candidate):\n\t\t\tcounter += 1\n\t\telse:\n\t\t\tcounter = 1\n\t\t\tcandidate = A[i]\n\n\t\tif(counter > maxCounter):\n\t\t\tmaxCandidate = A[i]\n\t\t\tmaxCounter = counter\n\n\tprint (maxCandidate, \"appeared \", maxCounter, \" times\")\n\n\t\t\nA = [2, 3, 2, 1, 2, 2, 3, 2, 2]\nCheckWhoWinsTheElection(A)\nA = [3, 3, 3, 2, 2, 3]\nCheckWhoWinsTheElection(A)\n","sub_path":"src/3.6Sorting/CheckWhoWinsTheElectionWithOutSpace.py","file_name":"CheckWhoWinsTheElectionWithOutSpace.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70412164","text":"from mercado_bitcoin.writers import DataWriter\nfrom mercado_bitcoin.ingestors import DataIngestor\nimport datetime\nimport pytest\nfrom unittest.mock import mock_open, patch\n\n@pytest.fixture\n@patch(\"mercado_bitcoin.ingestors.DataIngestor.__abstractmethods__\", set())\ndef data_ingestor_fixture():\n return DataIngestor(\n coins=[\"TEST\", \"VINI\"],\n writer=DataWriter,\n default_start_date=datetime.date(2021,6,21)\n )\n\n@patch(\"mercado_bitcoin.ingestors.DataIngestor.__abstractmethods__\", set())\nclass TestIngestors:\n\n def test_checkpoint_filename(self, data_ingestor_fixture):\n actual = data_ingestor_fixture._checkpoint_filename\n expected = \"DataIngestor.checkpoint\"\n assert actual == expected\n\n\n def test_load_checkpoint_no_checkpoint(self, data_ingestor_fixture):\n actual = data_ingestor_fixture._load_checkpoint()\n expected = datetime.date(2021,6,21)\n assert actual == expected\n \n @patch(\"builtins.open\", new_callable=mock_open, read_data=\"2021-06-25\")\n def test_load_checkpoint_existing_checkpoint(self, mock, data_ingestor_fixture):\n actual = data_ingestor_fixture._load_checkpoint()\n expected = datetime.date(2021,6,25)\n assert actual == expected\n\n @patch(\"mercado_bitcoin.ingestors.DataIngestor._write_checkpoint\", return_value = None)\n def test_update_checkpoint_updated(self, mock, data_ingestor_fixture):\n data_ingestor_fixture._update_checkpoint(value=datetime.date(2019,1,1))\n expected = datetime.date(2019,1,1)\n assert data_ingestor_fixture._checkpoint == expected\n \n @patch(\"mercado_bitcoin.ingestors.DataIngestor._write_checkpoint\", return_value = None)\n def test_update_checkpoint_written(self, mock, data_ingestor_fixture):\n data_ingestor_fixture._update_checkpoint(value=datetime.date(2019,1,1))\n mock.assert_called_once()\n \n @patch(\"builtins.open\", new_callable=mock_open, read_data=\"2021-06-25\")\n @patch(\"mercado_bitcoin.ingestors.DataIngestor._checkpoint_filename\", return_value = \"foobar.checkpoint\")\n def test_write_checkpoint(self, mock_checkpoint_filename, mock_open_file, data_ingestor_fixture):\n data_ingestor_fixture._write_checkpoint()\n mock_open_file.assert_called_with(mock_checkpoint_filename, 'w')","sub_path":"modulo5/tests/test_ingestores.py","file_name":"test_ingestores.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451766905","text":"import argparse\nimport xml.etree.cElementTree as etree\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef processMedlineFolder(medlineFolder,outFolder):\n\t\"\"\"Basic function that iterates through abstracts in a medline file, do a basic word count and save to a file\n\n\tArgs:\n\t\tmedlineFolder (folder): Medline XML folder containing abstracts\n\t\toutFolder (folder): Folder to save output data to\n\tReturns:\n\t\tNothing\n\n\t\"\"\"\n\tabstractCount = 0\n\n\t# List of all files in the directory\n\tfiles = [ f for f in listdir(medlineFolder) if isfile(join(medlineFolder, f)) ]\n\t\n\t# Filter for only XML files\n\tfiles = sorted([ f for f in files if f.endswith('xml') ])\n\n\toutfile = join(outFolder,\"countWords.txt\")\n\twith open(outfile, \"a\") as result:\n\t\t# Iterate over all files\n\t\tfor f in files:\n\t\t\tprint(\"Processing %s\" % f)\n\t\t\tfullpath = join(medlineFolder,f)\n\t\t\t# Iterate through the XML file and stop on each MedlineCitation\n\t\t\tfor event, elem in etree.iterparse(fullpath, events=('start', 'end', 'start-ns', 'end-ns')):\n\t\t\t\tif (event=='end' and elem.tag=='MedlineCitation'):\n\n\t\t\t\t\t# Let's get the PMID and Abstract elements from the XML\n\t\t\t\t\tpmidElements = elem.findall('./PMID')\n\t\t\t\t\tabstractElements = elem.findall('./Article/Abstract/AbstractText')\n\n\t\t\t\t\tif len(pmidElements) != 1 or len(abstractElements) != 1:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t# Pull the values of the PMID and abstract elements\n\t\t\t\t\tpmid = pmidElements[0].text\n\t\t\t\t\tabstract = abstractElements[0].text\n\n\t\t\t\t\tif not abstract is None:\n\t\t\t\t\t\t# Do a very basic word count\n\t\t\t\t\t\twordCount = len(abstract.split())\n\n\t\t\t\t\t\t# Prepare and save output to file\n\t\t\t\t\t\tline = \"%s\\t%d\\n\" % (pmid,wordCount)\n\n\t\t\t\t\t\tresult.write(line)\n\n\t\t\t\t\t\tabstractCount += 1\n\n\tprint(\"%d abstracts processed\" % abstractCount)\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Little toy example to \"process\" a Medline abstract file and gives naive word counts for each abstract')\n\tparser.add_argument('-i',required=True,help='Medline folder to process')\n\tparser.add_argument('-o',required=True,help='Output folder for word-counts')\n\n\targs = parser.parse_args()\n\n\tprocessMedlineFolder(args.i,args.o)\n\n","sub_path":"server/tools/CountWords/0.1/CountWords.py","file_name":"CountWords.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"234958933","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nfrom pyimagesearch.nn.conv.lenet import LeNet\nfrom pyimagesearch.nn.utils.captchahelper import preprocess\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom keras.preprocessing.image import img_to_array\nfrom keras.optimizers import SGD\nfrom imutils import paths\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-d', '--dataset', required=True, help='path to input dataset')\n ap.add_argument('-m', '--model', required=True, help='path to output model')\n args = vars(ap.parse_args())\n\n data = []\n labels = []\n\n for imagePath in paths.list_images(args['dataset']):\n image = cv2.imread(imagePath)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = preprocess(image, 28, 28)\n image = img_to_array(image)\n data.append(image)\n labels.append(imagePath.split(os.path.sep)[-2])\n\n data = np.array(data, dtype='float') / 255.0\n labels = np.array(labels)\n\n (trainX, testX, trainY, testY) = train_test_split(data,\n labels,\n test_size=0.25,\n random_state=42)\n\n lb = LabelBinarizer()\n trainY = lb.fit_transform(trainY)\n testY = lb.fit_transform(testY)\n\n opt = SGD(lr=0.01)\n num_classes = np.unique(labels).shape[0]\n model = LeNet.build(width=28, height=28, depth=1, classes=num_classes)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=20, verbose=1)\n\n predictions = model.predict(testX, batch_size=32)\n\n print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1),\n target_names=[str(x) for x in lb.classes_]))\n\n model.save(args['model'])\n\n plt.style.use('ggplot')\n plt.figure()\n plt.plot(np.arange(0, 20), H.history['loss'], label='train_loss')\n plt.plot(np.arange(0, 20), H.history['val_loss'], label='val_loss')\n plt.plot(np.arange(0, 20), H.history['accuracy'], label='acc')\n plt.plot(np.arange(0, 20), H.history['val_accuracy'], label='val_acc')\n\n plt.title('Training Loss and Accuracy')\n plt.xlabel('Epoch #')\n plt.ylabel('Loss/Accuracy')\n plt.legend()\n plt.show()\n","sub_path":"chapter_21_captcha/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"428407180","text":"import genius_api\nimport requests\nfrom bs4 import BeautifulSoup\n\ntoken = genius_api.api['access_token']\ndef get_lyrics(song_title):\n \n import requests\n from bs4 import BeautifulSoup\n artist_name = \"Wu Tang Clan\"\n base_url = 'https://api.genius.com'\n headers = {'Authorization': 'Bearer ' + token}\n search_url = base_url + '/search'\n data = {'q': song_title + ' ' + artist_name}\n response = requests.get(search_url, data=data, headers=headers)\n\n json = response.json()\n # remote_song_info = None\n\n url_list = []\n\n for hit in json['response']['hits']:\n url_response = hit['result']['url']\n url_list.append(url_response)\n\n url = url_list[0]\n\n page = requests.get(url)\n html = BeautifulSoup(page.text, 'html.parser')\n lyrics = html.find('div', class_='lyrics').get_text()\n return lyrics","sub_path":"barchart/Python/lyrics.py","file_name":"lyrics.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325293141","text":"import os\nfrom glob import glob\n\nfrom run_main import functions\nimport apps.app as backapp\nfrom apps.database import Base, engine, Session, Eventnames\n\ndef create_new_database():\n for k in ['wav', 'vocal', 'inst']:\n audio_list = glob('./audio/{0}/[0-9]*.wav'.format(k))\n print(audio_list)\n for audio in audio_list:\n os.remove(audio)\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n backapp.init_db()\n\ndef create_eventnames():\n session = Session()\n names = session.query(Eventnames).all()\n for func in functions:\n if func not in names:\n session.add(Eventnames(event_name=func))\n session.commit()\n session.close()\n\ncreate_new_database()\ncreate_eventnames()\n","sub_path":"database_init.py","file_name":"database_init.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391610104","text":"import numpy as np # linear algebra\nimport pandas as pd # \nfrom wordcloud import WordCloud, STOPWORDS\nimport datetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns # visualization tool\ndef get_month(x):\n try:\n return month_order[int(str(x).split('-')[1]) - 1]\n except:\n return np.nan\ndef get_day(x):\n try:\n year, month, day = (int(i) for i in x.split('-')) \n \n answer = datetime.date(year, month, day).weekday()\n return day_order[answer]\n except:\n return np.nan\ndef get_director(x):\n for i in x:\n if i['job'] == 'Director':\n return i['name']\n return np.nan\ndef get_actor(x):\n if(x!=[]):\n return x[0]['name']\n else:\n return np.nan\n #for acteur in x:\n # act.append(acteur['name'])\n #return act\ndef get_actorArray(x):\n ligne_acteur=np.zeros(len(actors))\n for acteur in x:\n ligne_acteur[actors[acteur]]=1\n return ligne_acteur\n\n\ndef count_word(df, ref_col, liste):\n keyword_count = dict()\n for s in liste: keyword_count[s] = 0\n for liste_keywords in df[ref_col].str.split('|'): \n if type(liste_keywords) == float and pd.isnull(liste_keywords): continue \n for s in [s for s in liste_keywords if s in liste]: \n if pd.notnull(s): keyword_count[s] += 1\n #______________________________________________________________________\n # convert the dictionary in a list to sort the keywords by frequency\n keyword_occurences = []\n for k,v in keyword_count.items():\n keyword_occurences.append([k,v])\n keyword_occurences.sort(key = lambda x:x[1], reverse = True)\n return keyword_occurences, keyword_count\n\ndef plotTage(keyocc):\n\ttrunc_occurences = keyocc[0:50]\n\tfig = plt.figure(1, figsize=(18,13))\n\tax2 = fig.add_subplot(2,1,2)\n\ty_axis = [i[1] for i in trunc_occurences]\n\tx_axis = [k for k,i in enumerate(trunc_occurences)]\n\tx_label = [i[0] for i in trunc_occurences]\n\tplt.xticks(rotation=85, fontsize = 15)\n\tplt.yticks(fontsize = 15)\n\tplt.xticks(x_axis, x_label)\n\tplt.ylabel(\"Nb. of occurences\", fontsize = 18, labelpad = 10)\n\tax2.bar(x_axis, y_axis, align = 'center', color='g')\n\t#_______________________\n\tplt.title(\"Tags\",bbox={'facecolor':'k', 'pad':5},color='w',fontsize = 25)\n\tplt.show()\n\n\ndef TopTen(theList):\n TopTen = list()\n\n for i in range(0, 10):\n TopTen.append(theList[i])\n \n return TopTen\ndef to_frequency_table(data):\n frequencytable = {}\n for key in data:\n if key in frequencytable:\n frequencytable[key] += 1\n else:\n frequencytable[key] = 1\n return frequencytable\n\n\ndef List10(df,col):\n\tdirector_dic = to_frequency_table(df[col])\n\tdirector_list = list(director_dic.items())\n\tdirector_list.sort(key=lambda tup: tup[1],reverse=True)\n\treturn TopTen([director_list[i][0] for i in range(len(director_list))])\n\n\ndef get_actor_first(x):\n return (x[0]['name'] if len(x)> 0else np.nan)\ndef get_actor_second(x):\n return (x[1]['name'] if len(x)>1 else np.nan)\ndef get_actor_third(x):\n return (x[2]['name'] if len(x)>2 else np.nan)\n","sub_path":"IADS/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"279383848","text":"from itertools import groupby\n\n\nclass Where:\n\n c = {\n '>': '$gt',\n '>=': '$gte',\n '<': '$lt',\n '<=': '$lte',\n '!=': '$ne'\n }\n\n def __init__(self,conditions=None):\n self.conditions = conditions\n\n def _filter(self,conditions):\n m = {}\n # filter\n while isinstance(conditions, list) and len(conditions) == 1:\n conditions = conditions[0]\n\n if isinstance(conditions, list):\n # `AND` has high priority,so we should split `OR` first\n if 'OR' in conditions:\n k, b = '$or', 'OR'\n else:\n k, b = '$and', 'AND'\n\n subconditions = self._split_list(conditions, b)\n for sub in subconditions:\n m.setdefault(k, []).append(self._filter(sub))\n else:\n name, value, comp = conditions['name'], conditions['value'], conditions['compare']\n if comp == '=':\n m[name] = value\n elif comp == 'LIKE':\n if not value.startswith('%'):\n value = '^' + value\n if not value.endswith('%'):\n value += '$'\n regex = value.strip('%').replace('%', '*')\n m[name] = {'$regex': regex}\n else:\n m[name] = {self.c[comp]: value}\n print(m)\n return m\n\n def _split_list(self,source,wd):\n return [list(g) for k, g in groupby(source, lambda x: x == wd) if not k]\n\n def find(self):\n _m = self._filter(self.conditions) if self.conditions else None\n return _m","sub_path":"msql/where.py","file_name":"where.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"353723551","text":"from mutagen.id3 import ID3, APIC, TIT2, TPE1, TALB\nimport os\n\n\ndef SetMp3Info(path, info):\n songFile = ID3(path)\n # songFile['APIC'] = APIC( # 插入封面\n # encoding=3,\n # mime='image/jpeg',\n # type=3,\n # desc=u'Cover',\n # data=info['picData']\n # )\n songFile['TIT2'] = TIT2( # 插入歌名\n encoding=3,\n text=info['title']\n )\n songFile['TPE1'] = TPE1( # 插入第一演奏家、歌手、等\n encoding=3,\n text=info['artist']\n )\n songFile['TALB'] = TALB( # 插入专辑名\n encoding=3,\n text=info['album']\n )\n songFile.save()\n\nif __name__ == '__main__':\n picPath = 'icon.png'\n with open(picPath, 'rb') as f:\n picData = f.read()\n info = {'picData': picData, 'title': '你的酒馆对我打了烊',\n 'artist': '陈雪凝', 'album': '绿色'}\n songPath = '赵雷 - 成都.mp3'\n SetMp3Info(songPath, info)","sub_path":"Netease/MP3Info.py","file_name":"MP3Info.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430909208","text":"# 使用“队列”进行进程之间的通信\r\n\r\nfrom multiprocessing import Queue, Process\r\nimport os, time, random\r\n\r\n# 写数据的进程\r\ndef write(q1):\r\n print('写数据的子进程,进程号:%s' % os.getpid())\r\n for value in ['A', 'B', 'C']:\r\n print('将 %s 放入队列中' % value)\r\n # put()方法将数据插入到Queue中\r\n q1.put(value)\r\n time.sleep(random.random())\r\n\r\n# 读取数据的进程\r\ndef read(q2):\r\n print('读取数据的子进程,进程号:%s' % os.getpid())\r\n while True:\r\n # 这里实现了不同进程中的通信,write进程使用put()方法把数据写入Queue,然后read进程使用get()方法读取Queue中的数据\r\n value_1 = q2.get() # get()方法从Queue中读取并删除一个数据\r\n print('从队列中获取了数据:%s' % value_1)\r\n\r\nif __name__ == \"__main__\":\r\n # 父进程创建Queue,并传给各个子进程,这里设置了队列中最大并发数为3\r\n q = Queue(3)\r\n pw = Process(target=write, args=(q,))\r\n pr = Process(target=read, args=(q,))\r\n # 启动子进程pw,写入:\r\n pw.start()\r\n # 启动子进程pr,读取:\r\n pr.start()\r\n # 等待 pw进程结束:\r\n pw.join()\r\n # pr 进程里是死循环,无法等待其结束,只能强行终止\r\n pr.terminate()","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493593947","text":"# Time: O(n)\n# Space: O(n)\n\n# Given a string, find the first non-repeating character in it and\n# return it's index. If it doesn't exist, return -1.\n#\n# Examples:\n#\n# s = \"leetcode\"\n# return 0.\n#\n# s = \"loveleetcode\",\n# return 2.\n# Note: You may assume the string contain only lowercase letters.\n\n\nfrom collections import defaultdict\n\nclass Solution(object):\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n lookup = defaultdict(int)\n candidtates = set()\n for i, c in enumerate(s):\n if lookup[c]:\n candidtates.discard(lookup[c])\n else:\n lookup[c] = i+1\n candidtates.add(i+1)\n\n return min(candidtates)-1 if candidtates else -1\n\n def firstUniqChar_ming1(self, s): # USE THIS\n import collections\n d = collections.Counter(s)\n return next((i for i, c in enumerate(s) if d[c] == 1), -1)\n\n def firstUniqChar_ming2(self, s):\n import collections\n od = collections.OrderedDict()\n for i, c in enumerate(s):\n od[c] = i if c not in od else -1 # record index for uniq char, -1 for duplicate char\n\n # https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition\n return next((v for v in od.values() if v >= 0), -1)\n","sub_path":"Python/first-unique-character-in-a-string.py","file_name":"first-unique-character-in-a-string.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315145621","text":"\n\n#calss header\nclass _BULGE():\n\tdef __init__(self,): \n\t\tself.name = \"BULGE\"\n\t\tself.definitions = [u'a curved shape sticking out from the surface of something: ', u'a sudden increase that soon returns to the usual level: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_bulge.py","file_name":"_bulge.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209555721","text":"# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nprint(tf.__version__)\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n# 0: airplane# 1: automobile#\n# 2: bird# 3: cat#\n# 4: deer# 5: dog#\n# 6: frog # 7: horse#\n# 8: ship# 9: truck\n\n(train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data()\nprint(f'{train_images.shape}')\n\ntrain_images_input = train_images / 255.0\ntest_images_input = test_images / 255.0\n# print(f'{train_images[:1]}')\n# print(f'{train_images_input[:1]}')\n\nmodel = keras.Sequential([\n keras.layers.Conv2D(32, (3, 3), activation='relu',padding='same', input_shape=(32, 32, 3)),\n keras.layers.MaxPooling2D((2,2)),\n keras.layers.Conv2D(64, (3, 3), activation='relu',padding='same'),\n keras.layers.MaxPooling2D((2,2)),\n keras.layers.Conv2D(128, (3, 3), activation='relu',padding='same'),\n keras.layers.MaxPooling2D((2,2)),\n keras.layers.Dropout(0.1),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation='relu'),\n # keras.layers.Dense(256, activation='relu'),\n keras.layers.Dropout(0.1),\n keras.layers.Dense(10, activation='softmax')\n])\n# opt = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_images_input, train_labels, epochs=20, batch_size=50)\nmodel.summary()\nmodel.save(\"./mycifar10.model\")\ntest_loss, test_acc = model.evaluate(test_images_input, test_labels, verbose=2)\n","sub_path":"src/imagetutorials/filters/cifar10-train.py","file_name":"cifar10-train.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611785966","text":"class Solution:\n def twoSum(self, nums, target):\n ''' solution 1\n Time complexity: O(n^2)\n Space complexity: O(1)\n '''\n result = []\n length = len(nums)\n for i in range(length - 1):\n for j in range(i + 1, length):\n if nums[i] + nums[j] == target:\n result.extend([i, j])\n return result\n\n def twoSum_1(self, nums, target):\n ''' solution 2\n Time complexity: O(n)\n Space complexity: O(n)\n '''\n result = []\n length = len(nums)\n map = {}\n for i in range(length):\n map[nums[i]] = i\n for i in range(length):\n complement = target - nums[i]\n if map.__contains__(complement) and map[complement] > i:\n result.extend([i, map[complement]])\n return result\n","sub_path":"001 Two Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373119898","text":"\"\"\"\nencode/and decode protocol as described in\nhttps://developer.telldus.com/doxygen/html/TellStickNet.html\n\"\"\"\nimport importlib\nimport logging\n_LOGGER = logging.getLogger(__name__)\n\n\nTAG_INTEGER = \"i\"\nTAG_DICT = \"h\"\nTAG_LIST = \"l\"\nTAG_END = \"s\"\nTAG_SEP = \":\"\n\n\nclass Protocol(object):\n \"\"\" main object for encodeing and decodeing devices \"\"\"\n def __init__(self, protocol=None):\n if protocol is not None:\n self._protocol = protocol\n self._methods = None\n modname = \"tellsticknet.protocols.%s\" % self._protocol\n self._module = importlib.import_module(modname)\n self._model = None\n self._method = None\n self._params = None\n\n def __str__(self):\n \"\"\" retrus protolu used as string \"\"\"\n return self._protocol\n\n def setModel(self, model):\n \"\"\" sets model attribute \"\"\"\n self._model = model\n\n def setParameters(self, params):\n \"\"\" seTs prameters \"\"\"\n self._params = params\n\n def setMethod(self, action):\n \"\"\"\" sets method \"\"\"\n self._method = self.varForMethod(action)\n\n def encode(self, command):\n \"\"\"\" encodes command with selcted protocol \"\"\"\n msg = {'protocol': self._protocol,\n 'method': self._method,\n 'model': self._model}\n msg = {**msg, **self._params}\n return encode_packet(command, **msg)\n\n def methods(self, model):\n \"\"\" retruns metods available in a proticol \"\"\"\n try:\n modname = \"tellsticknet.protocols.%s\" % self._protocol\n func = getattr(self._module, \"methods\")\n self._methods = func(model)\n return self._methods\n except ImportError:\n \"\"\" passes if protocol is incomplete \"\"\"\n _LOGGER.exception(\"Can not get methods for protocol %s\" +\n \"model <%s> modname, %s\",\n self._protocol, self._model, modname)\n\n def varForMethod(self, method):\n \"\"\" retruns int representation of method \"\"\"\n try:\n modname = \"tellsticknet.protocols.%s\" % self._protocol\n func = getattr(self._module, \"method\")\n self._method = func(method)\n return self._method\n except ImportError:\n _LOGGER.exception(\"Can not get methods for protocol\" +\n \"%s, modname, %s\",\n self._protocol, modname)\n raise\n\n\ndef _expect(condition):\n if not condition:\n raise RuntimeError()\n\n\ndef _encode_string(s):\n \"\"\"\n encode a string\n\n >>> _encode_string(\"hello\")\n '5:hello'\n\n >>> _encode_string(\"hellothere\")\n 'A:hellothere'\n\n >>> _encode_string(\"\")\n '0:'\n\n >>> _encode_string(4711)\n Traceback (most recent call last):\n ...\n TypeError: object of type 'int' has no len()\n \"\"\"\n return \"%X%s%s\" % (len(s), TAG_SEP, s)\n\n\ndef _encode_integer(d):\n \"\"\"\n encode a integer\n\n >>> _encode_integer(42)\n 'i2as'\n\n >>> _encode_integer(-42)\n 'i-2as'\n\n >>> _encode_integer(0)\n 'i0s'\n\n >>> _encode_integer(3.3)\n 'i3s'\n \"\"\"\n return \"%s%x%s\" % (TAG_INTEGER, int(d), TAG_END)\n\n\ndef _encode_dict(d):\n \"\"\"\n encode a dict\n (keys will be put in sorted order)\n\n >>> _encode_dict({\"foo\": \"bar\", \"baz\": 42})\n 'h3:bazi2as3:foo3:bars'\n\n >>> _encode_dict({})\n 'hs'\n\n >>> _encode_dict([])\n Traceback (most recent call last):\n ...\n RuntimeError\n\n >>> _encode_dict(None)\n Traceback (most recent call last):\n ...\n RuntimeError\n \"\"\"\n _expect(isinstance(d, dict))\n\n return \"%s%s%s\" % (\n TAG_DICT,\n \"\".join(_encode_any(x)\n for keyval in sorted(d.items())\n for x in keyval),\n TAG_END)\n\n\ndef _encode_list(l):\n raise NotImplementedError()\n\n\ndef _encode_any(t):\n if isinstance(t, int):\n return _encode_integer(t)\n elif isinstance(t, str):\n return _encode_string(t)\n elif isinstance(t, dict):\n return _encode_dict(t)\n elif isinstance(t, list):\n return _encode_list(t)\n else:\n raise NotImplementedError()\n\n\ndef encode_packet(command, **args):\n \"\"\"\n encode a packet\n\n >>> encode_packet(\"hello\", foo=\"x\")\n b'5:helloh3:foo1:xs'\n\n >>> encode_packet(\"hello\", data=dict(number=7))\n b'5:helloh4:datah6:numberi7sss'\n \"\"\"\n res = _encode_string(command)\n if args:\n res += _encode_dict(args)\n return res.encode(\"ascii\")\n\n\ndef _decode_string(packet):\n \"\"\"\n decode a string\n returns tuple (decoded string, rest of packet not consumed)\n\n >>> _decode_string(\"5:hello\")\n ('hello', '')\n\n >>> _decode_string(\"5:hell\")\n Traceback (most recent call last):\n ...\n RuntimeError\n\n >>> _decode_string(\"hello\")\n Traceback (most recent call last):\n ...\n RuntimeError\n \"\"\"\n sep = packet.find(TAG_SEP)\n _expect(sep > 0)\n length = packet[:sep]\n length = int(length, 16)\n start = len(TAG_SEP) + sep\n end = start + length\n _expect(end <= len(packet))\n val = packet[start:end]\n return val, packet[end:]\n\n\ndef _decode_integer(packet):\n \"\"\"\n decode an integer\n returns tuple (decoded integer, rest of packet not consumed)\n\n >>> _decode_integer(\"i4711s\")\n (18193, '')\n\n >>> _decode_integer(\"i0s\")\n (0, '')\n\n >>> _decode_integer(\"i-3s\")\n (-3, '')\n\n >>> _decode_integer(\"i03s\") # invalid according to specification\n (3, '')\n\n #Traceback (most recent call last):\n # ...\n #RuntimeError\n\n >>> _decode_integer(\"i-0s\") # invalid according to specification\n Traceback (most recent call last):\n ...\n RuntimeError\n\n # this is invalid according to specification but seems to be\n # generated anyway\n >>> _decode_integer(\"i0000000000s\")\n (0, '')\n \"\"\"\n _expect(packet[0] == TAG_INTEGER)\n packet = packet[len(TAG_INTEGER):]\n end = packet.find(TAG_END)\n _expect(end > 0)\n val = packet[:end]\n # disabled check since i0000000000s seems to be present\n # but invalid according to specification\n # _expect(val[0] != \"0\" or len(val) == 1)\n _expect(val[0] != \"-\" or val[1] != \"0\")\n return int(val, 16), packet[end + len(TAG_END):]\n\n\ndef _decode_dict(packet):\n \"\"\"\n decode a dict\n returns tuple (decoded string, rest of packet not consumed)\n\n >>> _decode_dict(\"h3:foo3:bars\")\n ({'foo': 'bar'}, '')\n \"\"\"\n rest = packet[1:]\n d = {}\n\n while rest[0] != TAG_END:\n k, rest = _decode_string(rest)\n v, rest = _decode_any(rest)\n d[k] = v\n return d, rest[1:]\n\n\ndef _decode_list(packet):\n \"\"\"\n decode a list\n returns tuple (decoded list, rest of packet not consumed)\n\n \"\"\"\n raise NotImplementedError()\n\n\ndef _decode_any(packet):\n \"\"\"\n decode a token\n \"\"\"\n tag = packet[0]\n if tag == TAG_INTEGER:\n return _decode_integer(packet)\n elif tag == TAG_DICT:\n return _decode_dict(packet)\n elif tag == TAG_LIST:\n return _decode_list(packet)\n else:\n return _decode_string(packet)\n\n\ndef _fixup(d):\n \"\"\"\n Convenience method to let the protocol implementation use the key '_class'\n instead of 'class', which is a reserved word, as an argument to the dict\n constructor\n\n >>> _fixup(dict(a=1, _b=2)) == {'a': 1, 'b': 2}\n True\n \"\"\"\n return {(k[1:] if k.startswith('_') else k): v\n for k, v in d.items()} if d else None\n\n\ndef _decode(**packet):\n \"\"\"\n dynamic lookup of the protocol implementation\n \"\"\"\n\n protocol = packet[\"protocol\"]\n try:\n modname = \"tellsticknet.protocols.%s\" % protocol\n module = importlib.import_module(modname)\n func = getattr(module, \"decode\")\n\n # convert any _class=foo to class=foo\n packet = _fixup(func(packet.copy()))\n\n # convert data={temp=42, humidity=38} to\n # data=[{name=temp, value=42},{name=humidity, valye=38}]\n if packet is not None:\n if 'data' in packet:\n packet['data'] = [\n dict(name=name,\n value=value)\n for name, value\n in packet['data'].items()]\n\n return packet\n except ImportError:\n SRC_URL = (\"https://github.com/telldus/telldus/\"\n \"tree/master/telldus-core/service\")\n _LOGGER.exception(\"Can not decode protocol %s, packet <%s> \"\n \"Missing or broken _decode in %s \"\n \"Check %s for protocol implementation\",\n protocol, packet[\"data\"],\n modname, SRC_URL)\n raise\n\n\ndef _decode_command(packet):\n command, rest = _decode_any(packet)\n args, rest = _decode_any(rest)\n _expect(len(rest) == 0)\n _expect(isinstance(command, str))\n _expect(isinstance(args, dict))\n return command, args\n\n\ndef decode_packet(packet):\n \"\"\"\n decode a packet\n\n >>> packet = \"7:RawDatah5:class6:sensor8:protocol\\\n 8:mandolyn5:model13:temperaturehumidity4:dataiAF1D466Bss\"\n >>> len(decode_packet(packet)[\"data\"])\n 2\n\n >>> packet = \"7:RawDatah5:class6:sensor8:protocol\\\n A:fineoffset4:datai488029FF9Ass\"\n >>> len(decode_packet(packet)[\"data\"])\n 1\n\n >>> packet = \"7:RawDatah8:protocolC:everflourish4:dataiA1CC92ss\"\n \"\"\"\n try:\n command, args = _decode_command(packet)\n if command == 'zwaveinfo':\n _LOGGER.info('Got Z-Wave info packet')\n _LOGGER.debug('%s %s', command, args)\n elif command == \"RawData\":\n return _decode(**args)\n else:\n raise NotImplementedError()\n except NotImplementedError:\n _LOGGER.warning(\"failed to decode packet, skipping: %s\", packet)\n","sub_path":"tellsticknet/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":9794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"217261456","text":"import os\nimport unittest\nfrom parsers.meminfo import parse_meminfo\nfrom model import MemoryStats\n\nclass MeminfoParserTest(unittest.TestCase):\n\n def setUp(self):\n with open(os.path.join(os.path.dirname(__file__), 'meminfo.tail'), 'rb') as f:\n self.example = f.read()\n\n def test_parse_meminfo(self):\n print (self.example)\n stats = MemoryStats()\n parse_meminfo(stats, self.example)\n\n self.assertEqual(45, len(stats.meminfo))\n self.assertEqual(20507388 * 1024, stats.get('MemTotal'))\n self.assertEqual(8326068 * 1024, stats.get('MemFree'))\n self.assertEqual(20559872 * 1024, stats.get('DirectMap2M'))\n\n self.assertEqual(1, stats.get('HugePages_Total'))\n self.assertEqual(2, stats.get('HugePages_Free'))\n self.assertEqual(3, stats.get('HugePages_Rsvd'))\n self.assertEqual(4, stats.get('HugePages_Surp'))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_meminfo_parsing.py","file_name":"test_meminfo_parsing.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590523224","text":"from random import randrange\r\nfrom tkinter import *\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Lab №1\")\r\nLabel(root, text='№', font='Arial 10').grid(column=0, row=0)\r\nfor i in range(1, 9):\r\n Label(root, text=str(i), font='Arial 10').grid(column=0, row=i)\r\n\r\nLabel(root, text='Xo', font='Arial 10').grid(column=0, row=9)\r\nLabel(root, text='dx', font='Arial 10').grid(column=0, row=10)\r\nfor j in range(1, 4):\r\n Label(root, text='X{}'.format(j), font='Arial 10').grid(column=j, row=0)\r\n Label(root, text='Xн{}'.format(j), font='Arial 10').grid(column=j+4, row=0)\r\n\r\nLabel(root, text='Y', font='Arial 10').grid(column=4, row=0)\r\n\r\nM, N = 3, 8\r\nmatrix = [[randrange(0, 20) for y in range(M)] for x in range(N)]\r\na0, a1, a2, a3 = 1, 2, 3, 4\r\nlst_y = []\r\nlst_x0 = []\r\nlst_dx = []\r\nfor i in range(3):\r\n lst = []\r\n for j in range(8):\r\n lst.append(matrix[j][i])\r\n x_min = min(lst)\r\n x_max = max(lst)\r\n x_0 = (x_max + x_min) / 2\r\n dx = x_0 - x_min\r\n lst_x0.append(x_0)\r\n lst_dx.append(dx)\r\n lst.clear()\r\n\r\nmatrix.append(lst_x0)\r\nmatrix.append(lst_dx)\r\n\r\nfor i in range(10):\r\n lst = matrix[i]\r\n y = a0 + a1*lst[0] + a2*lst[1] + a3*lst[2]\r\n lst.append(y)\r\n lst_y.append(y)\r\n\r\nfor i in range(3):\r\n for j in range(9):\r\n x_n = (matrix[j][i] - lst_x0[i]) / lst_dx[i]\r\n matrix[j].append(x_n)\r\n\r\nfor i in range(3):\r\n matrix[9].append(0)\r\n\r\nfor i in range(10):\r\n for j in range(7):\r\n Button(root, text=str('{:.1f}'.format(matrix[i][j])), font='Arial 10',\r\n width=5, height=2).grid(column=j+1, row=i+1)\r\n\r\n\r\nlst_with_criterion = []\r\nfor i in range(8):\r\n lst_with_criterion.append((lst_y[i]-lst_y[8])**2)\r\n\r\nlst_with_factors = matrix[lst_with_criterion.index(max(lst_with_criterion))]\r\nLabel(root, text='Max (Y - Y(ref))**2: {}'.format(max(lst_with_criterion)), font='Arial 10',\r\n fg='red').grid(row=11, column=0, columnspan=8)\r\nLabel(root, text='Reference Y: {}'.format(lst_y[8]), font='Arial 10', fg='red').grid(row=12, column=0, columnspan=8)\r\nLabel(root, text='Factor value: {}, {}, {}'.format(lst_with_factors[0], lst_with_factors[1], lst_with_factors[2]),\r\n font='Arial 10', fg='red').grid(row=13, column=0, columnspan=8)\r\n\r\nroot.mainloop()\r\n","sub_path":"Lab1/Lab1.py","file_name":"Lab1.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146269991","text":"from time import gmtime, strftime\nfrom docx.shared import Inches\nfrom docx.shared import Pt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport seaborn.linearmodels as snsl\nimport tushare as ts\nfrom docx import Document\nfrom docx.enum.style import WD_STYLE_TYPE\nfrom timeit import default_timer as timer\nfrom datetime import datetime, timedelta\nimport matplotlib.dates as mdates\nfrom matplotlib.pyplot import subplots, draw\nfrom matplotlib.finance import _candlestick\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\n\ndef shiftFromToday(day, isBack): #int\n today = pd.datetime.today()\n if isBack:\n theday = today - BDay(day)\n print(theday)\n theday = '{}'.format(theday)\n theday = theday.split(' ')[0]\n return theday\n else:\n theday = today + BDay(day)\n print(theday)\n theday = '{}'.format(theday)\n theday = theday.split(' ')[0]\n return theday\ndef today():\n return '{}'.format(strftime(\"%Y-%m-%d\", gmtime())) # \"%Y-%m-%d %H:%M:%S\"\ndef yesterday():\n return '{}'.format(datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d'))\n\n\ndef templeateForming(): #with layout\n # TEMPLATE FORMING\n template = \"tushareUse/Files/input.docx\"\n document = ts.MailMerge(template)\n document.merge(\n today=today(),\n p1='Springfield',\n p2='800-555-5555')\n document.write('tushareUse/Files/output.docx')\n\n#Document editing\nwordDocument = Document('tushareUse/Files/output.docx')\n\ndef newDocument():\n wordDocument._body.clear_content()\n\ndef openDocument():\n p = wordDocument.add_paragraph()\n setParagraphStyle(p)\n r = p.add_run()\n return r\n\ndef saveDocument():\n wordDocument.save('tushareUse/Files/output.docx')\n\ndef addTableToDocument(rows, cols):\n # styles = [s for s in wordDocument.styles if s.type == WD_STYLE_TYPE.TABLE]\n # for style in styles:\n # print(style.name)\n table = wordDocument.add_table(rows = rows, cols = cols)\n table.style = 'Light Shading Accent 3'\n return table\n\ndef setParagraphStyle(p):\n style = wordDocument.styles['Normal']\n font = style.font\n font.name = 'Arial'\n font.size = Pt(15)\n p.style = wordDocument.styles['Normal']\n\ndef setPageMargin(inputfile): ## not tested\n # Open the document\n document = Document(outputfile)\n # changing the page margins\n sections = document.sections\n for section in sections:\n section.top_margin = Cm(0)\n section.bottom_margin = Cm(0)\n section.left_margin = Cm(0)\n section.right_margin = Cm(0)\n\n document.save(args.outputFile)\n\ndef drawCandlestick(df):\n #weekFormatter = DateFormatter('%b %d') # 如:Jan 12\n fig, ax = plt.subplots()\n fig.subplots_adjust(bottom=0.2)\n df['t'] = df['date']\n _candlestick(ax, df, width=0.6, colorup='r', colordown='g')\n\n ax.xaxis_date()\n ax.autoscale_view()\n plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')\n\n\n ax.grid(True)\n plt.title('000777', fontproperties=zhfont)\n plt.show()\n\ndef getCodeArray():\n allStocks = ts.get_today_all()\n codeArray = []\n for i in range(0, allStocks.shape[0]): # pandas dataframe row count, column count is stock.shape[1]\n codeArray.append(allStocks.iloc[i]['code'])\n return codeArray","sub_path":"GoGenjiWheel/src/GoGenji/tushareUse/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5494461","text":"import os\nimport pandas as pd\nimport urllib.request\nfrom carto.datasets import DatasetManager\nfrom carto.auth import APIKeyAuthClient\nimport boto3\nfrom botocore.exceptions import NoCredentialsError\nfrom zipfile import ZipFile\n\n# name of table on Carto where you want to upload data\n# this should be a table name that is not currently in use\ndataset_name = 'ene_004_renewable_energy_share_of_total_energy_consumption' #check\n\n# first, set the directory that you are working in with the path variable\n# you can use an environmental variable, as we did, or directly enter the directory name as a string\n# example: path = '/home/ene_021a_renewable_energy_consumption'\npath = os.getenv('PROCESSING_DIR')+dataset_name\n#move to this directory\nos.chdir(path)\n\n# create a new sub-directory within your specified dir called 'data'\ndata_dir = 'data/'\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n'''\nDownload data and save to your data directory\n'''\n# insert the url used to download the data from the source website\nurl = 'http://databank.worldbank.org/data/download/SE4ALL_csv.zip'#check\n\n# download the cata from the source\nraw_data_file = data_dir+os.path.basename(url)\nurllib.request.urlretrieve(url, raw_data_file)\n\n#unzip source data\nraw_data_file_unzipped = raw_data_file.split('.')[0]\nzip_ref = ZipFile(raw_data_file, 'r')\nzip_ref.extractall(raw_data_file_unzipped)\nzip_ref.close()\n\n'''\nProcess data\n'''\n# read in csv file as Dataframe \ndf = pd.read_csv(raw_data_file_unzipped+'/SE4ALLData.csv')\n\n# subset for renewable energy consumption data \ndf_subset = df[df['Indicator Name']=='Renewable energy share of TFEC (%)']\n\n#convert tables from wide form (each year is a column) to long form (a single column of years and a single column of values)\nyear_list = [str(year) for year in range(1990, 2016)] #check\ndf_long = pd.melt (df_subset, id_vars= ['Country Name' ,'Country Code'] ,\n value_vars = year_list,\n var_name = 'year',\n value_name = 'renewable_energy_share_of_total_energy_consumption_p')\n\n#convert year column from object to integer\ndf_long.year=df_long.year.astype('int64')\n\n#save processed dataset to csv\nprocessed_data_file = data_dir+dataset_name+'_edit.csv'\ndf_long.to_csv(processed_data_file, index=False)\n\n'''\nUpload processed data to Carto\n'''\nprint('Uploading processed data to Carto.')\n#set up carto authentication using local variables for username (CARTO_WRI_RW_USER) and API key (CARTO_WRI_RW_KEY)\nauth_client = APIKeyAuthClient(api_key=os.getenv('CARTO_WRI_RW_KEY'), base_url=\"https://{user}.carto.com/\".format(user=os.getenv('CARTO_WRI_RW_USER')))\n#set up dataset manager with authentication\ndataset_manager = DatasetManager(auth_client)\n#upload dataset to carto\ndataset = dataset_manager.create(processed_data_file)\nprint('Carto table created: {}'.format(os.path.basename(processed_data_file).split('.')[0]))\n#set dataset privacy to 'Public with link'\ndataset.privacy = 'LINK'\ndataset.save()\nprint('Privacy set to public with link.')\n\n'''\nUpload original data and processed data to Amazon S3 storage\n'''\ndef upload_to_aws(local_file, bucket, s3_file):\n s3 = boto3.client('s3', aws_access_key_id=os.getenv('aws_access_key_id'), aws_secret_access_key=os.getenv('aws_secret_access_key'))\n try:\n s3.upload_file(local_file, bucket, s3_file)\n print(\"Upload Successful\")\n print(\"http://{}.s3.amazonaws.com/{}\".format(bucket, s3_file))\n return True\n except FileNotFoundError:\n print(\"The file was not found\")\n return False\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\n\nprint('Uploading original data to S3.')\n# Copy the raw data into a zipped file to upload to S3\nraw_data_dir = data_dir+dataset_name+'.zip'\nwith ZipFile(raw_data_dir,'w') as zip:\n zip.write(raw_data_file, os.path.basename(raw_data_file))\n\n# Upload raw data file to S3\nuploaded = upload_to_aws(raw_data_dir, 'wri-public-data', 'resourcewatch/'+os.path.basename(raw_data_dir))\n\nprint('Uploading processed data to S3.')\n# Copy the processed data into a zipped file to upload to S3\nprocessed_data_dir = data_dir+dataset_name+'_edit'+'.zip'\nwith ZipFile(processed_data_dir,'w') as zip:\n zip.write(processed_data_file, os.path.basename(processed_data_file))\n\n# Upload processed data file to S3\nuploaded = upload_to_aws(processed_data_dir, 'wri-public-data', 'resourcewatch/'+os.path.basename(processed_data_dir))\n","sub_path":"archive/ene_004_renewable_energy_share_of_total_energy_consumption/ene_004_renewable_energy_share_of_total_energy_consumption_processing.py","file_name":"ene_004_renewable_energy_share_of_total_energy_consumption_processing.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559722940","text":"\"\"\" utilities for making break nodes between normal nodes \"\"\"\nimport pandas as pd\nimport numpy as np\nimport math\nimport sys\n\nimport break_node as BN\nimport demand as Demand\n\ndef make_nodes(O,D,travel_time,starting_node,timelength=60):\n \"\"\"starting with O, ending with D, make a dummy node every timelength minutes\n arguments: O: origin node, integer\n D: destination node, integer\n travel_time: time from O to D, in minutes\n starting_node: starting point for new nodes, integer\n timelength: size of each segment, minutes, default 60\n returns: 2 dimensional array of travel times for new nodes.\n This array is one-directional, from O to D. Nodes\n are numbered from zero, sequentially, and can be\n extracted from the keys of the array (ignore O and D)\n \"\"\"\n\n # travel time is broken up into timelength minute chunks via\n # [i*timelength + timelength for i in range (0,math.floor(travel_time/timelength))]\n\n num_new_nodes = math.floor(travel_time/timelength)\n if num_new_nodes > 100:\n print('trying to make more than 100 nodes (',\n num_new_nodes,\n ' to be exact). 100 nodes means for any reasonable sized network, this will never run. check for bugs. If you really want this behavior, then edit breaks.py')\n print(O,D,travel_time,starting_node)\n assert num_new_nodes < 100\n # if exactly some multiple of timelength minutes, drop that last node\n if travel_time % timelength == 0:\n num_new_nodes -= 1\n\n new_times = {}\n new_times[O] = {}\n new_times[D] = {}\n new_times[O][O] = 0\n new_times[O][D] = travel_time\n new_times[D][D] = 0\n\n for idx in range(0,num_new_nodes):\n node = idx+starting_node\n new_times[node] = {}\n # compute travel minutes: node = 0, timelength min; node = 1, 120, etc\n new_times[O][node] = timelength*idx + timelength\n new_times[node][node] = 0\n new_times[node][D] = travel_time - (timelength*idx + timelength)\n if node > starting_node:\n for pidx in range(0,idx):\n prev_node=pidx+starting_node\n new_times[prev_node][node] = (idx - pidx) * timelength\n\n # new nodes are stored in \"new_times\" as keys of second dimension\n # not symmetric, but rather, directional. Opposite way is impossible\n # so those values are NaN and easily set to infinity\n return new_times\n\ndef split_links(O,D,travel_time,starting_node):\n \"\"\"split the link from O to D in half\n arguments: O: origin node, integer\n D: destination node, integer\n travel_time: time from O to D, integer\n starting_node: starting point for new nodes, integer\n returns: 2 dimensional array of travel times for new nodes.\n This array is one-directional, from O to D. Nodes\n are numbered from starting_node + zero, sequentially\n \"\"\"\n\n new_times = {}\n new_times[O] = {}\n new_times[D] = {}\n new_times[O][O] = 0\n new_times[O][D] = travel_time\n new_times[D][D] = 0\n\n node = starting_node\n new_times[node] = {}\n # compute travel minutes\n new_times[O][node] = math.floor(travel_time/2)\n new_times[node][node] = 0\n new_times[node][D] = travel_time - new_times[O][node]\n\n # new nodes are stored in \"new_times\" as keys of second dimension\n # not symmetric, but rather, directional. Opposite way is impossible\n # so those values are NaN and easily set to infinity\n return new_times\n\n\ndef split_links_break_nodes(O,D,travel_time,new_node,break_time,reset_time):\n \"\"\"split the link from O to D in half\n arguments: O: origin node, integer\n D: destination node, integer\n travel_time: time from O to D, integer\n starting_node: starting point for new nodes, integer\n returns: 2 dimensional array of travel times for new nodes.\n This array is one-directional, from O to D. Nodes\n are numbered from starting_node + zero, sequentially,\n new_node\n \"\"\"\n bn = BN.BreakNode(O,D,travel_time,new_node,break_time,reset_time)\n new_times = Demand.zeroed_trip_triplets(3)\n # np.zeros(3,dtype=[('x', int), ('y', int),('t',float)])\n\n # # copy existing. this is redundant\n # new_times[0] = [O,O,0]\n # new_times[1] = [O,D,travel_time]\n # new_times[2] = [D,D,0]\n\n\n # compute travel minutes\n new_times[0] = (O,new_node,bn.tt_o)\n new_times[1] = (new_node,new_node, 0)\n new_times[2] = (new_node,D,bn.tt_d)\n\n # new nodes are stored in \"new_times\" as keys of second dimension\n # not symmetric, but rather, directional. Opposite way is impossible\n # so those values are NaN and easily set to infinity\n return (new_times,bn)\n\n\n\"\"\"Code that gets used a lot, so split out into its own fn\"\"\"\ndef break_node_splitter(origin,destination,tt,min_start):\n \"\"\"Given an Origin and a Destination node, plus travel time between\n and the numbering of nodes (min start is an integer for the first\n node that will be created), create necessary break nodes between O\n and D that will satisfy the break rules.\n\n Currently knows only about the 11hr drive, 10hr break rule.\n\n Going to make it work for 8hr drive, 0.5hr break.\n\n \"\"\"\n new_times = Demand.zeroed_trip_triplets(0)\n # np.zeros(0,dtype=[('x', int), ('y', int),('t',float)])\n new_nodes = []\n long_break_time = 60*10\n long_break_interval = 60*11\n short_break_time = 30\n short_break_interval = 60*8\n\n # for the 11 hour drive rule\n long_possible_breaks = math.ceil(tt/(11*60))\n if long_possible_breaks == 0:\n long_possible_breaks = 1\n\n # no real need to count up 8 hr breaks...at a minimum, can slot\n # one in between each 11 hr break\n segment_tt = tt\n for i in range(0,long_possible_breaks):\n # insert 11 hr break opportunity\n pair11 = split_links_break_nodes(origin,\n destination,\n segment_tt,\n min_start,\n long_break_time,\n long_break_interval\n )\n min_start += 1\n\n\n node11 = pair11[1]\n\n # possibly set up a dimension thing here?\n # node11.add_dimension_name('Drive') # or similar?\n\n # slot in an 8 hr break between origin and 11 hr\n pair8 = split_links_break_nodes(origin,\n node11.node,\n node11.tt_o,\n min_start,\n short_break_time,\n short_break_interval)\n node8=pair8[1]\n # need to correct the destination of the node8 because of the\n # way the demand object stores and retrieves breaks between an\n # OD pair\n node8.destination = destination\n min_start += 1\n\n # possibly set up a dimension thing here too?\n # pair8[1].add_dimension_name('halfhrbreak') # or similar?\n\n # if i==long_possible_breaks-1:\n # # closing in on destination, so include potential to get\n # # from short break to goal?\n # extra_link = np.array([node8.node,destination,node8.tt_d+node11.tt_d])\n # new_times = np.concatenate(new_times,\n # pair11[0],\n # pair8[0])\n new_times = np.concatenate((new_times,\n pair11[0],\n pair8[0]),axis=0)\n\n\n # but I want the 8hr break nodes coming before the 11 hr ones\n new_nodes.append(pair8[1])\n new_nodes.append(pair11[1])\n\n # set for next loop\n segment_tt = node11.tt_d\n origin=node11.node\n\n\n # end of loop. Consider boundary conditions\n node8 = new_nodes[-2]\n node11 = new_nodes[-1]\n # might not need a short break after long break prior to\n # arrival at destination, so make it so can just get to dest\n # from short break\n extra_connection = np.array([(node8.node,destination,node8.tt_d+node11.tt_d)],\n dtype=[('x', int), ('y', int),('t',float)])\n new_times = np.concatenate((new_times,extra_connection),axis=0)\n # print(tt,long_possible_breaks*long_break_interval,\n # tt - long_possible_breaks*long_break_interval)\n\n # might need another 8 hr break node, but\n # only put in another 8 hr node if need to do so\n if tt - ((long_possible_breaks-1)*long_break_interval)>short_break_interval :\n # in this case, might need a short break before long break,\n # then another short break prior to destination (worst case,\n # pickup, 0 time, long break, 8hrs, need short break,\n # dest). so make it here\n\n pair8 = split_links_break_nodes(node11.node,\n destination,\n node11.tt_d,\n min_start,\n short_break_time,\n short_break_interval)\n min_start += 1 # not necessary, but good habit\n\n new_times = np.concatenate((new_times,pair8[0]),axis=0)\n new_nodes.append(pair8[1])\n\n return (new_times,new_nodes,min_start)\n\n\"\"\"Yes, this seems redundant with above by the name, but it isn't\"\"\"\ndef split_break_node(record,travel_times,min_start=None):\n \"\"\"Pass in a demand record, and split out all the required break nodes\n to get to the origin from the depot, to the destination from the\n origin, and back to the depot from the destination\n\n This function knows about the break rules. Currently only one is\n implemented (drive 11, break 10). Working on drive 8 break 0.5,\n then will work on on-duty 14 break 10.\n\n \"\"\"\n\n if min_start == None:\n min_start = len(travel_times.index)\n new_times = Demand.zeroed_trip_triplets(0)\n # np.zeros(0,dtype=[('x', int), ('y', int),('t',float)])\n new_nodes = []\n tt = travel_times.loc[0,record.origin]\n if not np.isnan(tt):\n pair = break_node_splitter(0,record.origin,tt,min_start)\n new_times = np.concatenate((new_times,pair[0]),axis=0)\n new_nodes.extend(pair[1])\n min_start = pair[2]\n\n tt = travel_times.loc[record.origin,record.destination]\n if not np.isnan(tt):\n pair = break_node_splitter(record.origin,record.destination,tt,min_start)\n new_times = np.concatenate((new_times,pair[0]),axis=0)\n new_times = np.concatenate((new_times,pair[0]),axis=0)\n new_nodes.extend(pair[1])\n min_start = pair[2]\n\n tt = travel_times.loc[record.destination,0]\n if not np.isnan(tt):\n pair = break_node_splitter(record.destination,0,tt,min_start)\n new_times = np.concatenate((new_times,pair[0]),axis=0)\n new_nodes.extend(pair[1])\n min_start = pair[2]\n #print(new_times)\n\n return (new_times,new_nodes,min_start)\n\ndef aggregate_split_nodes(travel_time,newtimes):\n \"\"\"combine current time matrix with list of new times for each new node\"\"\"\n # at this time, I keep careful track of new nodes, so there should\n # be no need for the adjustment code.\n\n new_df=pd.DataFrame(data=newtimes)\n #new_df = pd.DataFrame(data=merged_nt,columns=['from','to','time'])\n new_df.drop_duplicates(inplace=True)\n df_new_times = new_df.pivot(index='x',columns='y',values='t')\n # print(df_new_times)\n\n df_new_times.update(travel_time)\n # print(df_new_times)\n for idx in travel_time.index:\n if not idx in df_new_times.index:\n print('problems ahead. missing',idx,'from',df_new_times.index,' Bailing out')\n assert idx in df_new_times.index\n # print(df_new_times.index)\n # print(travel_time.index)\n # print(df_new_times)\n travel_time = df_new_times\n return travel_time\n\n\n\n\n# def make_dummy_node(travel_times,pickups,dropoffs,start=-1):\n# \"\"\"create dummy node. Expand travel time matrix\"\"\"\n# # create a dummy node, only reachable from depot,\n# new_times = {}\n# # new node id\n# nn_id = start\n# if start < 0:\n# nn_id = int(travel_times.index.max()) + 1\n# new_times[0] = {0:0}\n# new_times[nn_id] = {0:0}\n# # now all set travel time from nn to all pickups equal to depot to pickups\n# # for p in pickups:\n# # new_times[nn_id][p]=travel_times.loc[0,p]\n# for p in dropoffs:\n# new_times[p]={}\n# new_times[p][nn_id]=travel_times.loc[p,0]\n# new_times[0][nn_id]=0\n# new_times[nn_id][nn_id]= 0\n\n# return new_times\n\n# def make_dummy_vehicle_nodes(vehicles,travel_times,pickups,dropoffs):\n# moretimes = []\n# start = travel_times.index.max()+1\n# new_times = make_dummy_vehicle_node(travel_times,pickups,dropoffs,start)\n# moretimes.append(new_times)\n# for v in range(1,len(vehicles.vehicles)):\n# # for now, just do it every time\n# # but eventually should figure out logic to copy in from new_times\n# start += 1\n# new_times = make_dummy_vehicle_node(travel_times,pickups,dropoffs,start)\n# moretimes.append(new_times)\n# return moretimes\n\n\n\n# def split_generator(travel_times,timelength=600):\n# def gen_breaks(record):\n# min_start = len(travel_times.index)\n# new_times = []\n# idx = 0\n# tt = travel_times.loc[0,record.origin]\n# if tt > timelength:\n# new_times.append( split_links(0,record.origin,\n# tt,\n# min_start))\n# min_start += 1\n# idx += 1\n# tt = travel_times.loc[record.origin,record.destination]\n# if tt > timelength:\n# new_times.append( split_links(record.origin,\n# record.destination,\n# tt,\n# min_start))\n# min_start += 1\n# idx += 1\n# tt = travel_times.loc[record.destination,0]\n# if tt > timelength:\n# new_times.append( split_links(record.destination,0,\n# tt,\n# min_start))\n# return new_times\n# return gen_breaks\n\n# def aggregate_time_matrix(travel_time,newtimes):\n# \"\"\"combine current time matrix with list of new times from gen_breaks, above\"\"\"\n\n# max_new_node = len(travel_time.index)\n# for nt in newtimes:\n# if len(nt) < 3:\n# # don't bother with no new nodes case\n# continue\n# new_df = pd.DataFrame.from_dict(data=nt,orient='index')\n# #new_df = new_df.fillna(sys.maxsize)\n# new_cols = [i for i in range(2,len(new_df))]\n# old_cols = [0,1]\n\n# # need to adjust the dataframe\n# offset = max_new_node - min(new_df.iloc[:,new_cols].columns)\n# # print(max_new_node,offset)\n\n# # first the columns\n# adjustment = [offset for i in range(0,len(new_df.columns))]\n# # if debug:\n# # print(adjustment)\n# adjustment[0] = 0\n# adjustment[1] = 0\n# # if debug:\n# # print(new_df.columns)\n# new_df.columns = [i + adj for (i,adj) in zip(new_df.columns,adjustment)]\n# # if debug:\n# # print(new_df.columns)\n# # then the rows (index)\n# new_df.index = [i + adj for (i,adj) in zip(new_df.index,adjustment)]\n# new_df = new_df.reindex()\n\n# max_new_node = new_df.columns.max()+1\n\n# # if debug:\n# #print(max_new_node,'<-\\n',new_df)\n# #assert 0\n# # first append the new destinations for existing columns\n# travel_time = travel_time.append(new_df.iloc[new_cols,old_cols])\n# #print(travel_time)\n\n# # if debug:\n# # print(travel_time)\n# # then join in the new rows and columns\n# reduced_df = new_df.iloc[:,new_cols]\n# reduced_df = reduced_df.reindex()\n# # if debug:\n# #print(reduced_df)\n# travel_time = travel_time.join(reduced_df\n# ,how='outer'\n# )\n# # if debug:\n# # print(travel_time)\n\n# # if debug:\n# # assert 0\n\n# # now replace NaN with infinity\n# # travel_time = travel_time.fillna(sys.maxsize)\n# # print(travel_time)\n# return travel_time\n\n# def aggregate_dummy_nodes(travel_time,newtimes):\n# \"\"\"combine current time matrix with list of new times for each new node\"\"\"\n\n# max_new_node = len(travel_time.index)\n# for nt in newtimes:\n# # print(nt)\n# new_df = pd.DataFrame.from_dict(data=nt,orient='index')\n# # print (new_df)\n# old_cols = [i for i in new_df.columns.view(int)]\n# old_cols.sort() # shift new node to last\n# new_cols = [old_cols.pop()]\n# # print(new_cols,old_cols)\n# #print(new_df.loc[new_cols,old_cols])\n# #print(new_df.loc[old_cols,new_cols])\n# # assert 0\n# # first append the new destinations for existing columns\n# travel_time = travel_time.append(new_df.loc[new_cols,old_cols])\n\n# # if debug:\n# # print(travel_time)\n# # then join in the new rows and columns\n# reduced_df = new_df.loc[:,new_cols]\n# reduced_df = reduced_df.reindex()\n# travel_time = travel_time.join(reduced_df\n# ,how='outer'\n# )\n# # now replace NaN with infinity\n# # travel_time = travel_time.fillna(sys.maxsize)\n# # print(travel_time)\n# return travel_time\n\n# def breaks_logic():\n# node_visit_transit = {}\n# for n in expanded_mm.index:\n# node_visit_transit[n] = int(d.get_service_time(n))\n\n# breaks = {}\n# constraints = {}\n# starts = []\n# slacks = []\n# ends = []\n# # grab ref to solver\n# solver = routing.solver()\n\n# # min_intervals = d.get_min_intervals(len(vehicles.vehicles))\n# first_breaks = d.get_first_break(len(vehicles.vehicles),mm)\n# print(first_breaks)\n\n# for i in range(0,len(vehicles.vehicles)):\n# print ( 'breaks for vehicle',i)\n# breaks[i] = []\n# constraints[i] = []\n# active_start = routing.ActiveVar(routing.Start(i))\n# active_end = routing.VehicleVar(routing.End(i)) == i\n# counting_end = active_end * count_dimension.CumulVar(routing.End(i))\n# end_count_okay = counting_end > 1\n# active_vehicle = end_count_okay\n# time_start = time_dimension.CumulVar(routing.Start(i))\n# slack_start = time_dimension.SlackVar(routing.Start(i))\n# time_end = time_dimension.CumulVar(routing.End(i))\n# must_start = active_vehicle*(time_start + slack_start + 11*60) # 11 hours later\n\n# for pickup_node in first_breaks.keys():\n# fb = first_breaks[pickup_node]\n# # set up the origin details for constraints\n# pickup_idx = manager.NodeToIndex(pickup_node)\n# active_node = routing.ActiveVar(pickup_idx)\n# same_vehicle_condition = active_node * routing.VehicleVar(pickup_idx) == i\n\n# for j in range(0,len(fb)):\n# pair = fb[j]\n# jth_10hr_break = solver.FixedDurationIntervalVar(\n# pair[0], # minimum start time\n# pair[1], # maximum start time\n# 10 * 60, # duration of break is 10 hours\n# True, # optional, condition on vehicle serving origin\n# '10hr break {} for vehicle {} serving {}'.format(j,i,pickup_node))\n# breaks[i].append(jth_10hr_break)\n# # first pickup constraint---breaks only relevant if first pickup\n# # due to split long nodes, count dimension might be 1 or 2\n# count_val = active_node * count_dimension.CumulVar(pickup_idx)\n# counted_visit = count_val >= 1\n# early_visit = count_val <= 2\n# break_active_condition = counted_visit*early_visit\n# # only use if this vehicle actually serves the intended node\n# cond_expr = solver.ConditionalExpression(\n# same_vehicle_condition,\n# jth_10hr_break.PerformedExpr() == break_active_condition,\n# 1)\n# solver.Add(cond_expr>=1)\n# print('break',len(breaks[i])-1,'for serving node',pair[2])\n\n# # now add additional breaks for whole of likely range\n# # break up full time (horizon) into 10+11 hour ranges (drive 11, break 10)\n# # not quite right, as the 14hr rule also comes into play\n\n# need_breaks = math.floor(args.horizon / 60 / (10 + 11))\n# #need_breaks -= min_intervals[i]\n# # need_breaks = 0\n# # follow_constraints = []\n# # don't need first break, as that is already specified above\n# # if i > 0:\n# # need_breaks = 2\n\n# # # start counting from?\n# # for intvl in range(len(fb),need_breaks):\n# # print(intvl)\n# # # break minimum start time is 0\n# # # break maximum start time is horizon - 10 hours\n\n# # min_start_time = (intvl)*(10 + 11)*60\n# # max_start_time = (intvl)*(10 + 11)*60 + 660\n\n# # if min_start_time > args.horizon - 660:\n# # break\n\n# # require_first_few = False\n# # #if intvl > 0:\n# # # require_first_few = True\n# # # key on first break, but only required if time hasn't run out\n# # # next_10hr_break = solver.FixedDurationStartSyncedOnEndIntervalVar(\n# # # breaks[i][-1], # keyed to prior\n# # # 600, # duration\n# # # 660 # offset\n# # # )\n# # next_10hr_break = solver.FixedDurationStartSyncedOnStartIntervalVar(\n# # breaks[i][0], # keyed to first\n# # 600, # duration\n# # min_start_time # offset\n# # )\n# # # next_10hr_break = solver.FixedDurationIntervalVar(\n# # # min_start_intvar, # maximum start time (11 hours after start)\n# # # 10 * 60, # duration of break is 10 hours\n# # # '10hr break {} for vehicle {}'.format(intvl,i))\n# # # next_10hr_break = solver.FixedDurationIntervalVar(\n# # # min_start_time, # minimum start time\n# # # max_start_time, # maximum start time (11 hours after start)\n# # # 10 * 60, # duration of break is 10 hours\n# # # optional, # optional?\n# # # '{}th 10hr break for vehicle {}'.format(intvl,i))\n\n# # breaks[i].append(next_10hr_break)\n# # # constraints:\n# # # bip = next_10hr_break.MustBePerformed()\n# # # solver.Add(next_10hr_break.PerformedExpr()==True)\n# # # print('break must be performed = ',bip)\n# # # sync with preceding break\n# # # this break starts 11h after end of prior\n# # # follow_after_constraint = next_10hr_break.StartsAfterEndWithDelay(\n# # # breaks[i][intvl-1],\n# # # 660) # 11 hours times 60 minutes = 660\n# # # solver.Add(follow_after_constraint)\n\n\n# # if require_first_few:\n# # # conditional constraint. If vehicle is done before start\n# # # time, then don't bother with this break\n\n# # # first, requirement that break is performed\n# # break_condition = next_10hr_break.PerformedExpr()==True\n\n# # # second, the timing. If route is over, don't need break\n# # break_start = time_start + intvl*(11+10)*60\n# # time_condition = break_start < time_end # break_start\n\n# # # use conditional expression\n# # expression = solver.ConditionalExpression(time_condition,\n# # break_condition,\n# # 1)\n# # solver.AddConstraint(\n# # expression >= 1\n# # )\n\n# # # print(follow_after_constraint)\n# # # follow_constraints.append(follow_after_constraint)\n# print(breaks[i])\n# time_dimension.SetBreakIntervalsOfVehicle(\n# breaks[i], i, node_visit_transit)\n\n# # for follow_after_constraint in follow_constraints:\n# # solver.Add(follow_after_constraint)\n","sub_path":"src/breaks.py","file_name":"breaks.py","file_ext":"py","file_size_in_byte":24998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"105998142","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 1 15:16:46 2018\r\n\r\n@author: robin\r\n\"\"\"\r\n\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\n\r\nfname = 'HW2_Q2_data.txt'\r\n#load the data from the file\r\ndata = np.genfromtxt(fname, comments='#')\r\n\r\nvmag = data[:,0]\r\nimag = data[:,1]\r\ndx = data[:,2]\r\ndy = data[:,3]\r\n\r\nplt.figure(1)\r\nplt.clf() \r\nplt.scatter((vmag-imag),vmag,s=0.5,marker=\".\",color=\"red\")\r\nplt.xlabel('V-I')\r\nplt.ylabel('V')\r\nplt.title('Colour-Magnitude Diagram(CMD)')\r\nplt.gca().invert_yaxis()\r\n#This is an HR diagram\r\n\r\nplt.figure(2)\r\nplt.clf()\r\nalls = plt.scatter(dx,dy,s=0.5,marker=\".\",color=\"blue\")\r\nrangtuc = np.where((dx**2+dy**2)<0.3**2)\r\nrangcld = np.where(((dx+0.6)**2+(dy+0.2)**2)<0.15**2)\r\nsepr = plt.scatter(dx[rangtuc],dy[rangtuc],s=0.5,marker=\".\",color=\"red\")\r\nplt.xlabel('dx(Milli-arcseconds/year)')\r\nplt.ylabel('dy(Milli-arcseconds/year)')\r\nplt.title('Proper Motion of the Stars')\r\n\r\nlgnd = plt.legend((alls,sepr),('All Stars','47 Tuc Stars'),scatterpoints=1,fontsize=8)\r\nlgnd.legendHandles[0]._sizes=[6]\r\nlgnd.legendHandles[1]._sizes=[6]\r\n\r\nplt.figure(3)\r\nplt.clf()\r\nalls2 = plt.scatter(vmag[rangtuc]-imag[rangtuc],vmag[rangtuc],s=0.5,marker=\".\",color=\"red\")\r\nsepr2 = plt.scatter(vmag[rangcld]-imag[rangcld],vmag[rangcld],s=0.5,marker=\".\",color=\"blue\")\r\nplt.xlabel('V-I')\r\nplt.ylabel('V')\r\nplt.title('CMD of 47 Tuc stars and SMC')\r\nplt.gca().invert_yaxis()\r\n\r\nlgnd = plt.legend((alls2,sepr2),('47 Tuc Stars','SMC Stars'),scatterpoints=1,fontsize=8)\r\nlgnd.legendHandles[0]._sizes=[6]\r\nlgnd.legendHandles[1]._sizes=[6]","sub_path":"2nd Year/Astr 205/Assignment 2/assignment2_2.py","file_name":"assignment2_2.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503230676","text":"# CSC 110 HW 5\n# Lucas Schwarz\n# 08/04/15\n\n# This program will convert a word to pig latin.\n# Currently it is set to check up to the first 3 letters for a vowel\n# I couldn't find any english words that start with more than 3 consonants\n# so I stopped there.\n\n# I follwed the examples and given in the assignment and follow up forum questions, hopefully\n# it shows that I know what I'm doing despite the fact that I'm still a little confused about\n# whether or not I should be using 'way' or 'yay'\n\n# Global constants to easily adjust the 'way' or 'yay' later.\n# CONSONANT_FIRST for words that start with a consonant:\nCONSONANT_FIRST = 'ay'\n# VOWEL_FIRST for words that start with a vowel:\nVOWEL_FIRST = 'way'\n\n#Intro function to introduce program to the user, and display a pig emoji\ndef intro():\n\tprint('This program will prompt you for a word and then ')\n\tprint('return the Pig Latin equivalent. Have unfay! \\n')\n\tprint(\"( ఠ ⚇̭ ఠ )\\n\") \n\t#my wife saw where i said unfay and decided a pig emoji would be appropriate.\n\n# input function to return word from user, or exit sentinel.\ndef input_function():\n\tword = input('Please input word, or q to quit: ')\t#input word\n\treturn word\n\n# output function to print word user entered and it's pig latin.\ndef output_function(word,pig_word):\n\tprint('The word you entered was: ', word)\n\tprint('in pig latin that would be: ', pig_word, '\\n')\n\n# processing function makes decision based on first characters of user word,\n# and then creates the pig latin equivalent.\n# argument word is word user entered.\n# argument vowel_location is index position of first vowel.\n# pig_word is the returned pig latin string.\ndef processing_function(word,vowel_location):\n\t#IF first letter a vowel\n\tif vowel_location == 0:\n\t\tpig_word = word + VOWEL_FIRST\n\n\telse:\n\t\t#first_chars is str slice of index prior to first vowel\n\t\tfirst_chars = word[0:vowel_location]\n\t\tpig_word = word[vowel_location:] + first_chars + CONSONANT_FIRST\n\n\treturn pig_word\n\n# vowel_locator function to determine index position of first vowel\n# argument word is word that the user entered.\n# vowel_location is index position of the first vowel\ndef vowel_locator(word):\n\tif word[0] in ('a','e','i','o','u','y'):\n\t\tvowel_location = 0\n\t\treturn vowel_location\n\telif word[1] in ('a','e','i','o','u','y'):\n\t\tvowel_location = 1\n\t\treturn vowel_location\n\telif word[2] in ('a','e','i','o','u','y'):\n\t\tvowel_location = 2\n\t\treturn vowel_location\n\telif word[3] in ('a','e','i','o','u','y'):\n\t\tvowel_location = 3\n\t\treturn vowel_location\n\n#main function shows logic and progression of how program runs.\ndef main():\n\t\n\tintro() #call intro\n\n\t# get first input, will also be checked for exit sentinel.\n\tword = input_function() \n\t\n\twhile word != 'q': # check exit sentinal\n\n\t\tvowel_location = vowel_locator(word) # return location of first vowel\n\t\t\n\t\tpig_word = processing_function(word, vowel_location) #process\n\n\t\toutput_function(word,pig_word) #output\n\n\t\tword = input_function() # get new input\n\nmain() #start program\n\n# Test 1\n# 'test' should return esttay, confirmed\n\n# Test 2\n# 'crisis' should return isiscray, confirmed\n\n# Also tested against all examples given in Assignment.\n","sub_path":"SchwarzHW5.py","file_name":"SchwarzHW5.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419078234","text":"\n\nsquares = []\nsquares = [x**2 for x in range(10)]\n\nprint(squares)\n\nlist1 = [3, 4, 5]\n\n\nmultiplied = [item*3 for item in list1]\n\nprint(multiplied)\n\nlistOfWords = [\"this\", \"is\", \"a\", \"list\", \"of\", \"words\"]\n\nitems = [word[0] for word in listOfWords]\n\nprint(items)","sub_path":"Learning/comprehension.py","file_name":"comprehension.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436936703","text":"import numpy as np\nwith open('input') as f:\n lines = [l for l in f.read().split('\\n') if l.strip() != '']\n state = np.array([[[list(l) for l in lines]]])\nprint(state)\n\n\ndef print_state(state):\n for w in range(state.shape[0]):\n for z in range(state.shape[1]):\n s = state[w, z]\n print('w=%d, z=%d' % (w, z))\n for i in range(s.shape[0]):\n print(''.join(list(s[i])))\n print()\n\n\ndef get_neighbors(state, w, z, i, j):\n neigh = []\n for dw in [-1, 0, 1]:\n for dz in [-1, 0, 1]:\n for di in [-1, 0, 1]:\n for dj in [-1, 0, 1]:\n if dw == dz == di == dj == 0:\n continue\n neigh.append(get_state(state, w + dw, z + dz, i + di, j + dj))\n return np.array(neigh)\n\n\ndef get_state(state, w, z, i, j):\n if w < 0 or w >= state.shape[0]:\n return '.'\n if z < 0 or z >= state.shape[1]:\n return '.'\n if i < 0 or i >= state.shape[2]:\n return '.'\n if j < 0 or j >= state.shape[3]:\n return '.'\n return state[w, z, i, j]\n\n\ndef step(state):\n new_state = np.zeros((\n state.shape[0] + 2,\n state.shape[1] + 2,\n state.shape[2] + 2,\n state.shape[3] + 2,\n ), dtype=state.dtype)\n\n for nw in range(new_state.shape[0]):\n for nz in range(new_state.shape[1]):\n for ni in range(new_state.shape[2]):\n for nj in range(new_state.shape[3]):\n w = nw - 1\n z = nz - 1\n i = ni - 1\n j = nj - 1\n curr = get_state(state, w, z, i, j)\n neighbors = get_neighbors(state, w, z, i, j)\n if curr == '#':\n num_neigh_active = np.count_nonzero(neighbors == '#')\n if num_neigh_active == 2 or num_neigh_active == 3:\n new_state[nw, nz, ni, nj] = '#'\n else:\n new_state[nw, nz, ni, nj] = '.'\n elif curr == '.':\n num_neigh_active = np.count_nonzero(neighbors == '#')\n if num_neigh_active == 3:\n new_state[nw, nz, ni, nj] = '#'\n else:\n new_state[nw, nz, ni, nj] = '.'\n\n return new_state\n\n\nfor cycle in range(6):\n print('After %d cycles' % cycle)\n print_state(state)\n state = step(state)\n\n print('%d active' % np.count_nonzero(state == '#'))\n","sub_path":"2020/17/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"579106168","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/4 22:06\n# @Author : WuxieYaYa\n\n\"\"\"\n给定 n 个非负整数表示每个宽度为 1 的柱子的高度图,计算按此排列的柱子,下雨之后能接多少雨水。\n\n上面是由数组 [0,1,0,2,1,0,1,3,2,1,2,1] 表示的高度图,在这种情况下,可以接 6 个单位的雨水(蓝色部分表示雨水)。 感谢 Marcos 贡献此图。\n\n示例:\n\n输入: [0,1,0,2,1,0,1,3,2,1,2,1]\n输出: 6\n\n链接:https://leetcode-cn.com/problems/trapping-rain-water\n\"\"\"\ndef trap(height):\n # 单调栈\n n = len(height)\n if n <=2 :\n return 0\n res, idx = 0, 0\n stack = []\n while idx < n:\n while len(stack) > 0 and height[idx] > height[stack[-1]]:\n top = stack.pop() # index of the last element in the stack\n if len(stack) == 0:\n break\n h = min(height[stack[-1]], height[idx]) - height[top]\n dist = idx - stack[-1] - 1\n res += (dist * h)\n stack.append(idx)\n idx += 1\n return res\n\n\n\nif __name__ == '__main__':\n a = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]\n print(trap(a))\n","sub_path":"42. 接雨水.py","file_name":"42. 接雨水.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69477712","text":"import random\n\n\n# Ex. takes in 2d20 and outputs the string Rolling 2 d20\ndef roll_str(rolls):\n numDice = rolls.split('d')[0]\n diceVal = rolls.split('d')[1]\n if numDice == '':\n numDice = int(1)\n return \"Rolling %s d%s\" % (numDice, diceVal)\n\n\n# Ex. takes in 2d20 and outputs resultString = 11, 19 results = 30 numDice = 2\ndef roll(rolls):\n results = 0\n resultString = ''\n try:\n numDice = rolls.split('d')[0]\n except Exception as e:\n print(e)\n return \"Use proper format!\"\n rolls, limit = map(str, rolls.split('d'))\n if rolls == '':\n rolls = int(1)\n rolls = int(rolls)\n limit = int(limit)\n for r in range(rolls):\n number = random.randint(1, limit)\n results = results + number\n if resultString == '':\n resultString += str(number)\n else:\n resultString += ', ' + str(number)\n # Returns 3 variables, make sure to store in 3 variables\n return resultString, results, numDice\n","sub_path":"utilityFunction/CommandFunc.py","file_name":"CommandFunc.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489259442","text":"# Complexity\n# Time: O(n * log(n))\n# Space: O(n)\n\nimport math\n\nclass Solution:\n def countPrimes(self, n: int) -> int:\n primes = [0, 0] + [1] * (n - 2)\n \n for base in range(2, int(math.sqrt(n)) + 1):\n if primes[base]:\n for num in range(base * base, n, base):\n primes[num] = 0\n \n return sum(primes)\n\n","sub_path":"204. Count Primes/Python/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64680968","text":"import numpy as np\r\nfrom nltk import ngrams\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\n# Returns the normalized number of shared words in a question pair.\r\n# question1,question2 are lists of strings representing the words from respective question.\r\ndef get_shared_words(question1, question2):\r\n count = 0\r\n if len(question1) == 0 or len(question2) == 0:\r\n count = 0\r\n else:\r\n for word in question1:\r\n if question2.count(word) > 0:\r\n count += 1\r\n if count == 0:\r\n return count\r\n else:\r\n norm_count = count / (len(question1) + len(question2))\r\n return norm_count\r\n\r\n\r\n# Returns the numbers of shared ngrams in 2 questions\r\ndef get_shared_ngrams(q1, q2, n):\r\n count = 0\r\n q1_n = list(ngrams(q1, n))\r\n q2_n = list(ngrams(q2, n))\r\n\r\n for pair in q1_n:\r\n if pair in q2_n:\r\n count += 1\r\n return count\r\n\r\n\r\n# Returns a tuple of questions embeddings\r\ndef get_questions_embeddings(q1, q2, w2v_model):\r\n\r\n w2vec_list_q1 = []\r\n w2vec_list_q2 = []\r\n q1_vec = []\r\n q2_vec = []\r\n\r\n # Getting the embedings for each word in questions\r\n for word in q1:\r\n try:\r\n word_vec_q1 = w2v_model[word]\r\n w2vec_list_q1.append(word_vec_q1)\r\n except KeyError:\r\n print('The word: {}, is not in dictionary'.format(word))\r\n\r\n for word in q2:\r\n try:\r\n word_vec_q2 = w2v_model[word]\r\n w2vec_list_q2.append(word_vec_q2)\r\n except KeyError:\r\n print('The word is not in dictionary')\r\n\r\n if len(w2vec_list_q1) == 0 or len(w2vec_list_q2) == 0:\r\n no_word_embeddings = np.zeros(300).reshape(1, -1)\r\n question_embedings = (no_word_embeddings, no_word_embeddings)\r\n\r\n return question_embedings\r\n\r\n for j in range(len(w2vec_list_q1[0])):\r\n summ_q1 = 0\r\n for i in range(len(w2vec_list_q1)):\r\n\r\n summ_q1 += w2vec_list_q1[i][j]\r\n\r\n q1_vec.append(summ_q1 / len(w2vec_list_q1))\r\n\r\n for j in range(len(w2vec_list_q2[0])):\r\n summ_q2 = 0\r\n for i in range(len(w2vec_list_q2)):\r\n\r\n summ_q2 += w2vec_list_q2[i][j]\r\n\r\n q2_vec.append(summ_q2 / len(w2vec_list_q2))\r\n q1_vec = np.reshape(q1_vec, (1, -1))\r\n q2_vec = np.reshape(q2_vec, (1, -1))\r\n question_embedings = (q1_vec, q2_vec)\r\n\r\n return question_embedings\r\n\r\ndef words_difference(question1_tokenized, question2_tokenized):\r\n difference = len(question2_tokenized) - len(question1_tokenized)\r\n\r\n return abs(difference)\r\n\r\n\r\ndef chars_difference(question1, question2):\r\n q1_sentence = ''\r\n q2_sentence = ''\r\n for word in question1:\r\n q1_sentence += word\r\n for word in question2:\r\n q2_sentence += word\r\n difference = len(q1_sentence) - len(q2_sentence)\r\n return abs(difference)\r\n\r\n\r\n# Some features are not between 0 and 1, thus, for a better performance we should scale them in (0,1) range.\r\ndef feature_scaling(feature_list):\r\n feature_np = np.asarray(feature_list)\r\n scaler = MinMaxScaler()\r\n mms_feature = scaler.fit_transform(feature_np)\r\n mms_feature = mms_feature.flatten()\r\n\r\n return mms_feature\r\n","sub_path":"features_util.py","file_name":"features_util.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598341415","text":"from __future__ import annotations\n\nimport logging\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nimport skimage.io\nfrom PySide6.QtWidgets import QFileDialog, QMessageBox\n\nfrom bsmu.vision.plugins.windows.main import FileMenu\nfrom bsmu.vision.plugins.writers.base import FileWriterPlugin, FileWriter\nfrom bsmu.vision.widgets.viewers.image.layered.base import LayeredImageViewerHolder\n\nif TYPE_CHECKING:\n from bsmu.vision.core.image.base import Image\n from bsmu.vision.plugins.windows.main import MainWindowPlugin, MainWindow\n from bsmu.vision.plugins.doc_interfaces.mdi import MdiPlugin, Mdi\n\n\nclass GenericImageFileWriterPlugin(FileWriterPlugin):\n _DEFAULT_DEPENDENCY_PLUGIN_FULL_NAME_BY_KEY = {\n 'main_window_plugin': 'bsmu.vision.plugins.windows.main.MainWindowPlugin',\n 'mdi_plugin': 'bsmu.vision.plugins.doc_interfaces.mdi.MdiPlugin',\n }\n\n def __init__(\n self,\n main_window_plugin: MainWindowPlugin,\n mdi_plugin: MdiPlugin,\n ):\n super().__init__(GenericImageFileWriter)\n\n self._main_window_plugin = main_window_plugin\n self._main_window: MainWindow | None = None\n\n self._mdi_plugin = mdi_plugin\n self._mdi: Mdi | None = None\n\n self._last_saved_file_dir = None\n\n def _enable_gui(self):\n self._main_window = self._main_window_plugin.main_window\n self._main_window.add_menu_action(FileMenu, 'Save Mask', self._save_active_window_image)\n self._main_window.add_menu_action(FileMenu, 'Save Mask As...', self._select_path_and_save_active_window_image)\n\n self._mdi = self._mdi_plugin.mdi\n\n def _active_window_image(self) -> tuple[Image | None, Path | None]:\n \"\"\"\n :return: path is the active layer path (to use as default save directory)\n \"\"\"\n active_sub_window = self._mdi.activeSubWindow()\n if not isinstance(active_sub_window, LayeredImageViewerHolder):\n QMessageBox.warning(\n self._main_window,\n 'No Layered Image',\n 'The active window does not contain a layered image.')\n return None, None\n\n layer_name = 'masks'\n active_layered_image_viewer = active_sub_window.layered_image_viewer\n image_layer = active_layered_image_viewer.layer_by_name(layer_name)\n if not image_layer or not image_layer.image:\n QMessageBox.warning(\n self._main_window,\n 'No Image',\n f'The layered image does not contain an image in the <{layer_name}> layer.')\n return None, None\n\n return image_layer.image, active_layered_image_viewer.active_layer.path\n\n def _save_active_window_image(self):\n image, active_layer_path = self._active_window_image()\n if image is None:\n return\n\n if image.path is None:\n self._select_path_and_save_image(image, active_layer_path)\n else:\n self._save_image(image, image.path)\n\n def _select_path_and_save_active_window_image(self):\n image, active_layer_path = self._active_window_image()\n if image is None:\n return\n\n self._select_path_and_save_image(image, active_layer_path)\n\n def _select_path_and_save_image(self, image: Image, dialog_dir: Path = None):\n if image.path is not None:\n dialog_dir = image.path\n elif self._last_saved_file_dir is not None:\n dialog_dir = self._last_saved_file_dir\n dialog_dir_str = '' if dialog_dir is None else str(dialog_dir)\n file_name, selected_filter = QFileDialog.getSaveFileName(\n parent=self._main_window, caption='Save Mask', dir=dialog_dir_str, filter='PNG (*.png)')\n if not file_name:\n return\n self._last_saved_file_dir = Path(file_name).parent\n\n save_path = Path(file_name)\n if self._save_image(image, save_path):\n image.path = save_path\n\n def _save_image(self, image: Image, path: Path) -> bool:\n try:\n self._file_writer_cls().write_to_file(image, path)\n return True\n except Exception as e:\n QMessageBox.warning(\n self._main_window,\n 'Save Error',\n f'Cannot save the image.\\n{e}')\n return False\n\n\nclass GenericImageFileWriter(FileWriter):\n _FORMATS = ('png', 'jpg', 'jpeg', 'bmp', 'tif', 'tiff')\n\n def _write_to_file(self, data: Image, path: Path, **kwargs):\n logging.info('Write Generic Image')\n\n skimage.io.imsave(str(path), data.pixels, check_contrast=False)\n","sub_path":"vision-plugins/src/bsmu/vision/plugins/writers/image/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"405575387","text":"import network.HopfieldNetwork as Hopfield\nimport network.FunctionVisualizer as Drawer\nimport network.DBReader as DB\n\na = DB.select(\"./res/cells.db\", \"type1\")\nb = DB.select(\"./res/cells.db\", \"type2\")\nc = DB.select(\"./res/cells.db\", \"type3\")\naWaves = list(a[0].keys())\nbWaves = list(b[0].keys())\ncWaves = list(c[0].keys())\naIntensities = list(a[0].values())\nbIntensities = list(b[0].values())\ncIntensities = list(c[0].values())\nDB.sort(aWaves, aIntensities)\nDB.sort(bWaves, bIntensities)\nDB.sort(cWaves, cIntensities)\n\nDrawer.plt.plot(aWaves, aIntensities)\nDrawer.plt.plot(bWaves, bIntensities)\nDrawer.plt.plot(cWaves, cIntensities)\nDrawer.plt.show()\n\nimages = list()\nimages.append(aIntensities)\nimages.append(bIntensities)\nimages.append(cIntensities)\nprint(Hopfield.recognize(images, aIntensities, 10))\n","sub_path":"network/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"610015101","text":"import sys\nsys.path.append('src') \nfrom utils.text_utils import clean_text\n\n\nartists_source = {\n \"arashi\":{\"original\": \"yarukizero.livejournal.com\",\n \"transliteration\": \"yarukizero.livejournal.com\",\n \"translation\": \"yarukizero.livejournal.com\"},\n \"kpop\":{\"original\": \"melon.com/song\",\n \"translation\": \"popgasa.com\"},\n \"bollywood\":{\"original\": \"bollynook.com\",\n \"translation\": \"bollynook.com\"},\n \"family48\":{\"original\": \"stage48.net/studio48/\",\n \"transliteration\": \"stage48.net/studio48/\",\n \"translation\": \"stage48.net/studio48/\"},\n}\n\n\nartists_names = {\n \"arashi\": [\"Arashi\"],\n \"bollywood\": [\"Armaan Malik\", \"Arijit Singh\", \"Falak Shabir\"],\n \"family48\": [\"AKB48\", \"SDN48\"]\n}\n\n\ndef get_category(artist):\n category = \"kpop\"\n for key in artists_names:\n if clean_text(artist)[0] in clean_text(*artists_names[key]):\n category = key\n return category\n\n\ndef get_source_pages(artist):\n category = get_category(artist)\n return artists_source[category]\n\n\nif __name__ == \"__main__\":\n print(get_source_pages(\"beast\"))","sub_path":"src/search/add_search_sites.py","file_name":"add_search_sites.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292956164","text":"import numpy as np\nimport faiss \nimport copy\nfrom scipy.spatial.distance import *\nfrom sklearn.metrics.pairwise import pairwise_kernels as kernels\nfrom .sampling import Clustering, RowSampler\nfrom sklearn.naive_bayes import GaussianNB,BernoulliNB\nfrom sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier,\n GradientBoostingClassifier)\nfrom sklearn.linear_model import (LogisticRegression, SGDClassifier, \n Perceptron,PassiveAggressiveClassifier)\nfrom sklearn.svm import LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.linear_model import RidgeClassifier\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import (recall_score, f1_score,\n precision_score, accuracy_score)\n \nclass NearestCentroid:\n # Encontar el centroide más cercano\n def _find_nearest_centroid(self,sample):\n #Calcular las distancias con respecto a cada centroide\n dists=[(eval(self.distance)(c,sample),i) for i,c in self.centroids_.items()]\n #ordenar\n dists.sort()\n #regresar el indice del elemento con la menor distancia\n return dists[0][1] \n \n # Ejemplo del la formula del promedio\n def Average(self, **kwargs):\n lb=list(set(self.labels))\n self.centroids_={}\n for j in lb:\n Gj=self.data[np.where(self.labels==j)]\n #print(lb,j,Gj.shape)\n self.centroids_[j]=np.sum(Gj,axis=0)/len(Gj)\n return self\n \n # Predecir las etiquetas para un conjunto de datos\n def predict(self,unlabeled_samples):\n y=[self._find_nearest_centroid(sample) for sample in unlabeled_samples]\n return np.array(y)\n \n \n def Sum(self,**kwarg):\n print(\"su implementación para la formula de suma\")\n return self\n\n #Para Rocchio debería poder pasar los parametros beta y gamma (para eso lo **kwargs)\n def Rocchio(self,**kwargs):\n print(\"Su implentación para la formual de Rocchio\")\n \n \n def NormSum(self,**kwarg):\n print(\"su implementación para la formula la suma normalizada\") \n\n # Metodo para entrenar el modelo, solo recibe un numpy.array con los dato de n x N.\n # Donde n es el número de elmentos y N la dimensión \n def fit(self,data,labels,**kwargs):\n self.data=data\n self.labels=labels\n self.algorithm()\n return self\n\n #estructura propuesta para los algoritmos\n # La variable centroid_type es un string con el nombre de su función de calculo de centroides\n def __init__(self,distance='euclidean',centroid_type='Average', **kwargs):\n #Funcion de similitud/distancia, or defecto similitud coseno\n self.distance=distance\n self.algorithm=getattr(self, centroid_type) \n\n def __str__(self):\n conf={'distance':self.distance, 'centroid_type':self.centroid_type}\n return repr(conf)\n \nclass kNN:\n def _uniform(self,unlabeled_samples):\n samples=unlabeled_samples\n #print(\"shapes Xt, X\", samples.shape,self.data.shape)\n if self.distance=='cosine':\n vnorm=np.linalg.norm(samples,axis=1)\n samples=samples/vnorm.reshape(len(vnorm),1)\n #print(\"shapes Xt, X\", samples.shape,self.data.shape)\n dists,n_ids=self.index.search(samples,self.k)\n labels=[np.argmax(np.bincount(self.labels[n_id])) for n_id in n_ids]\n return np.array(labels)\n\n def _mean(self,unlabeled_samples):\n samples=unlabeled_samples\n if self.distance=='cosine':\n vnorm=np.linalg.norm(samples,axis=1)\n samples=samples/vnorm.reshape(len(vnorm),1)\n dists,n_ids=self.index.search(samples,self.k)\n nnlabels=np.array([self.labels[n_id] for n_id in n_ids])\n labels=[0 for i in range(len(samples))]\n for i,di,li in zip(range(len(dists)),dists,nnlabels):\n ulabels=np.unique(li)\n res={l:np.array([0.0,0]) for l in ulabels}\n for d,l in zip(di,li):\n res[l]=res[l]+np.array([d,1])\n res=sorted(res.items(), key=lambda item: item[1][0]/item[1][1])\n labels[i]=res[0][0]\n return np.array(labels)\n \n def predict(self,unlabeled_samples):\n return self.weight(unlabeled_samples.astype('float32'))\n \n def fit(self,data,labels):\n # faiss solo acepta float32\n self.data=data.astype('float32')\n self.labels=labels\n n,d=self.data.shape\n # si se utiliza distancia coseno deben normalizarse los vectores\n if self.distance==\"cosine\":\n vnorm=np.linalg.norm(self.data,axis=1)\n self.index=faiss.IndexFlatIP(d) # indice que utiliza el producto punto\n self.data=self.data/vnorm.reshape(len(vnorm),1)\n else:\n self.index= faiss.IndexFlatL2(d) # indice que utiliza L2\n self.index.add(self.data) \n return self\n \n def __init__(self,k=1,distance='cosine',weight_type='uniform'):\n #self.function=function #Funcion de similitud/distancia, por defecto similitud coseno\n self.weight=getattr(self, '_{}'.format(weight_type))\n self.distance=distance\n self.k=k\n\n def __str__(self):\n conf={'distance':self.distance,'number_of_neigbhors': self.k, 'weight_type':self.weight_type}\n return repr(conf)\n \nclass kCC:\n def fit(self, data, labels, lazy=False):\n if self.debug:\n print(\"Dataset (nxm)\", data.shape)\n print(\"class disrtibution\", np.bincount(labels))\n self.data=data\n self.labels=labels\n self.sampler_=RowSampler(algorithm=self.sampling,distance=self.distance, \n n_samples=self.n_samples,per_class=self.per_class\n ).fit(self.data,self.labels)\n if self.debug:\n print('%s sampling finishes' %self.sampling)\n print('Applying kernel: %s' %self.kernel)\n self.centroids_=np.array(list(self.sampler_.centroids_.values()))\n if self.sampling!='KMeans':\n self.centers_=np.array(list(self.sampler_.centers_.values()))\n else:\n self.centers_=self.centroids_\n self.references=getattr(self,'%s_' %self.reference_type)\n if not lazy: # lazy is used to prevent classiifier fit\n Xp=kernels(data,self.references, metric=self.kernel,**self.kwargs)\n self.classifier=self.classifier.fit(Xp,self.labels)\n return self\n\n def classifier_fit(self,data,labels):\n Xp=kernels(data,self.references, metric=self.kernel,**self.kwargs)\n self.classifier=self.classifier.fit(Xp,labels)\n\n def predict(self,unlabel_samples):\n Xt=kernels(unlabel_samples,self.references, metric=self.kernel,**self.kwargs)\n return self.classifier.predict(Xt)\n\n def transform(self,samples):\n Xt=kernels(samples,self.references, metric=self.kernel,**self.kwargs)\n return Xt\n \n def __init__(self,n_samples=2, distance='euclidean', kernel='laplacian', reference_type='centers',\n sampling='FFT', classifier=kNN(distance='cosine', k=11),debug=False, per_class=False, **kwargs):\n print(distance, kernel, sampling)\n self.n_samples=n_samples\n self.distance=distance\n self.kernel=kernel\n self.reference_type=reference_type\n self.sampling=sampling\n self.classifier=classifier\n self.per_class=per_class\n self.kwargs=kwargs\n self.debug=debug\n \n \nclass OkCC:\n #clf_list=[RandomForestClassifier(),kNN(distance='cosine', k=11), kNN(distance='euclidean', k=11),\n # GaussianNB(),LogisticRegression(), SGDClassifier(), Perceptron(),\n # PassiveAggressiveClassifier(), LinearSVC(), BernoulliNB(), DecisionTreeClassifier(), \n # ExtraTreesClassifier(), AdaBoostClassifier(), GradientBoostingClassifier()]\n #clf_list=[RandomForestClassifier(n_estimators=100),kNN(distance='cosine', k=11), \n # kNN(distance='euclidean', k=11),PassiveAggressiveClassifier()]\n clf_list=[kNN(distance='cosine', k=11), GaussianNB(),\n kNN(distance='euclidean', k=11),\n kNN(distance='cosine', k=5, weight_type='mean'), \n kNN(distance='euclidean', k=5, weight_type='mean')]\n #NearestCentroid(distance='euclidean'),NearestCentroid(distance='cosine')]\n \n def __init__(self,K=[4,8,16,32,64],distances=['euclidean','cosine'], \n kernels_list=['linear','poly','laplacian','sigmoid','rbf','cosine'],\n references_type=['centers','centroids'], samplings=['Random','FFT','DNet','KMeans'], \n classifiers=clf_list,op_function=f1_score,debug=False, sample_size=32, \n kfolds=3,**kwargs):\n self.K,self.distances,self.kernels_list=K,distances,kernels_list\n self.references_type,self.samplings=references_type,samplings\n self.classifiers,self.sample_size=classifiers,sample_size\n self.kwargs,self.debug=kwargs,debug\n self.kfolds,self.op_function=kfolds, op_function\n self.confs=np.array([(sampling!='KMeans' or distance!='cosine') \n and {'n_samples':k, 'distance': distance, 'kernel':kernel, \n 'reference_type':reference_type,'sampling':sampling, \n 'classifier':copy.deepcopy(classifier)} or None\n for k in K for distance in distances\n for kernel in kernels_list for reference_type in references_type\n for sampling in samplings for classifier in classifiers])\n self.confs=self.confs[self.confs!=None]\n self.confs=self.confs[np.random.permutation(len(self.confs))][:sample_size]\n \n def fit(self,data,labels):\n op_vals=[0 for i in range(self.sample_size)]\n self.kccs=[None for i in range(self.sample_size)]\n for i,conf in enumerate(self.confs):\n print(conf)\n skf = StratifiedKFold(n_splits=self.kfolds, random_state=33)\n skf.get_n_splits(data, labels)\n kcc=kCC(**conf,debug=True,per_class=True).fit(data,labels, lazy=True)\n avg_op_val=0\n for train_index, test_index in skf.split(data, labels):\n X_train, X_test = data[train_index], data[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n #print(\"yt,Xt\",y_test.shape,X_test.shape)\n kcc.classifier_fit(X_train,y_train)\n avg_op_val+=self.op_function(y_test,kcc.predict(X_test))\n op_vals[i]=avg_op_val/self.kfolds\n self.kccs[i]=kcc\n print(\"score value: \", op_vals[i])\n self.rank=np.argsort(-1*np.array(op_vals))\n self.op_vals=op_vals\n for kcc in self.kccs:\n kcc.classifier_fit(data,labels)\n \n def predict(self,unlabeled_samples, ensemble_size=1):\n kcc=self.kccs[self.rank[0]]\n y=kcc.predict(unlabeled_samples)\n if ensemble_size==1:\n return y\n h=y[:,None]\n for k in self.rank[1:ensemble_size]:\n kcc=self.kccs[k]\n h=np.concatenate((h,kcc.predict(unlabeled_samples)[:,None]),axis=1)\n r=[np.bincount(yi, minlength=len(np.unique(kcc.labels))) for yi in h]\n #print(np.argmax(r),axis=1)\n return np.argmax(r,axis=1)\n \n","sub_path":"classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":11568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203190353","text":"# coding: utf-8\n# !/usr/bin/python2.7\nfrom salt_api import remote_run_cmd\nimport json\nimport os\n\n#linux\nclass Run_Remote():\n def __init__(self,ip,os_type='Windows'):\n self.ip = ip\n self.os_type = os_type\n\n #def cd_file(self):\n # re_run = remote_run_cmd(ip=self.ip, os_type=self.os_type)\n # re_run.run_cmd('cd \"C:\\Program Files\"')\n\n def windows_run(self,file_name):\n re_run = remote_run_cmd(ip=self.ip, os_type=self.os_type)\n copy=re_run.run_copy('remote/'+file_name)\n #file_name = 'oper_productid.ps1'\n if \".bat\" in file_name:\n _ret1=re_run.run_cmd('C:/'+file_name)\n\n return _ret1\n elif \".ps1\" in file_name:\n _ret1 = re_run.run_cmd('PowerShell.exe -file C:/'+file_name)\n return _ret1\n\n\n def search_pid(self,file_name):\n re_run = remote_run_cmd(ip=self.ip, os_type=self.os_type)\n if '.ps1' in file_name:\n _ret=re_run.run_cmd('wmic process | findstr \"%s\" | findstr \"powershell.exe\"'%file_name)\n elif '.bat' in file_name:\n _ret=re_run.run_cmd('wmic process | findstr \"%s\" |findstr \"cmd.exe\"'%file_name)\n else:\n _ret = ''\n res = _ret.splitlines()\n list_pid=[]\n for i,val in enumerate(res):\n _v = val.split()\n if '' not in _v:\n pid = _v[10]\n list_pid.append(pid)\n return list_pid\n\n def cmd_kill_pid(self,pid):\n re_run = remote_run_cmd(ip=self.ip, os_type=self.os_type)\n _ret = re_run.run_cmd('taskkill /F /PID '+pid)\n _ret1 = _ret.lower()\n if 'success' in _ret1:\n return _ret1\n else:\n pass\n\n def delete_file(self,file_name):\n re_run = remote_run_cmd(ip=self.ip, os_type=self.os_type)\n #re_run.run_cmd('cd C:\\Program Files')\n _ret = re_run.run_cmd('del C:/'+file_name)\n\n return _ret\n\n def kill_pid(self,file_name):\n pid_list = self.search_pid(file_name)\n for x in pid_list:\n kill = self.cmd_kill_pid(x)\n if kill !=None:\n result = 'Success'\n break\n else:\n result = 'Failure'\n return result\n'''\n def bat_kill_del(self,file_name):\n pid_list = self.search_pid_bat(file_name)\n for x in pid_list:\n kill = self.cmd_kill_pid(x)\n if kill !=None:\n reslut = 'Success'\n break\n else:\n reslut = 'Failure'\n self.delete_file(file_name)\n return reslut\n\n\n\n\nif __name__ == \"__main__\":\n test = GetSysInfoW(ip='192.168.1.99')\n file_name='sd.bat'\n a = test.windows_run(file_name)\n print a\n '''","sub_path":"cmdb/data/module/salt_api/windows_run_remote.py","file_name":"windows_run_remote.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202844096","text":"'''\nC 0 10 -- saiu (nao muda length)\nC 1 10\nC 2 10 -- saiu (nao muda length)\n\nposCarros [-1][1] = placa do ultimo\nposCarros [-1][0] = id do ultimo\n\n\nposCarros = [ [0,[placa]], [1,[placa]], [2,[placa]] ]\n\nif posCarros [-1][0] >= posCarros [-2][0] : tira normal\n ||||||||||||||||||||||||||||||||\n if ultimoPos >= verificadoPos: tira normal\n else: ignora e joga no acumulador de espaço vago mas\n indisponivel\n\no verificado é menor que o ultimo\n\nif verificadoPos <= ultimoPos: não tira e joga no acumulador\n\nEXCEÇÃO: se o len(posCarros ) == 1 o igual nao se aplica e faz:\nlenghtDisponivel = acumulador + lenghtCarro\n\nTESTE:\nC 0 10\nC 1 10\nC 2 10\n\nGANHOS : 0\n\nposCarros = [ [0,[placa]], [1,[placa]], [2,[placa]] ]\ns 0\nacumulador += length[0] -> +10\n\nGANHOS : 10\n\n\nposCarros = [ [1,[placa]], [2,[placa]] ]\ns 2\nacumulador += length[2] -> +10\n\n\nGANHOS : 20\n\nposCarros = [ [1,[placa]] ] ----> EXCEÇÃO\ns 1\nlenghtDisponivel = 20 + 10\n\nGANHOS : 30\n\nC 4 20\nposCarros = [ [3,[placa]] ]\n\nGANHOS : 40\ns 4\n\nc 5 30\n\nGANHOS : 50\n\n\nsortedID = [ [2,[placa]], [1,[placa]], [0,[placa]] ]\n'''\nwhile True:\n parkingInfo = input().split()\n avaibleLength = int(parkingInfo[0])\n nAcctions = int(parkingInfo[1])\n if avaibleLength == -1 and nAcctions == -1: break\n income = 0\n\n\n # define before use variables\n parkedCars = {}\n vehiclePosition = []\n\n count = 0\n countID = 0\n cumulativeLength = 0\n\n while count < nAcctions:\n count += 1\n\n vehicleInfo = input().split()\n acction = vehicleInfo[0].lower()\n plaque = int(vehicleInfo[1])\n\n # if the car is comming in the park\n if acction == \"c\":\n\n vehicleLength = int(vehicleInfo[2])\n\n if avaibleLength > 0 and ((avaibleLength - vehicleLength) >= 0):\n if plaque not in parkedCars:\n vehiclePosition.append([countID, [plaque]])\n avaibleLength -= vehicleLength\n parkedCars[plaque] = vehicleLength\n income += 10\n\n #print(vehiclePosition, \"registrado\")\n countID += 1\n\n else:\n #print(plaque,\"n registrado\")\n pass\n\n #print(\"avaibleLength:\", avaibleLength)\n print(vehiclePosition)\n elif acction == \"s\":\n try:\n #print(vehiclePosition)\n #avaibleLength += parkedCars[plaque]\n lastPos = vehiclePosition[-1][0]\n\n for i in range(len(vehiclePosition)):\n if vehiclePosition[i][1][0] == plaque:\n global removeAfter\n removeAfter = i\n verifiedPos = vehiclePosition[i][0]\n print(verifiedPos, lastPos)\n if (verifiedPos < lastPos) and len(vehiclePosition) > 1:\n print(\"verifiedPos < lastPos\")\n cumulativeLength += parkedCars[plaque]\n\n elif verifiedPos == lastPos:\n if len(vehiclePosition) > 1:\n avaibleLength += parkedCars[plaque]\n print(\"verifiedPos == lastPos and len(vehiclePosition) > 1\")\n elif len(vehiclePosition) == 1:\n print(\"len(vehiclePosition) == 1\")\n avaibleLength += cumulativeLength + parkedCars[plaque]\n\n\n\n print(\"avaibleLength depois:\",avaibleLength)\n\n del parkedCars[plaque]\n del vehiclePosition[removeAfter]\n\n\n except KeyError:\n pass\n\n #print(vehiclePosition)\n\n print(parkedCars)\n print(income)\n income = 0\n parkedCars = {}","sub_path":"Estacionamento.py","file_name":"Estacionamento.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467812950","text":"import threading\r\nimport socket\r\nimport sys\r\nimport ast\r\nfrom datetime import datetime\r\nimport time\r\nimport hashlib\r\nimport struct\r\n\r\n# if len(sys.argv) != 3:\r\n# print(\"ERROR: Usage: python3 Seed.py \")\r\n# exit(1)\r\nIPAddr = \"0.0.0.0\"\r\npeer_list = []\r\nsocket_list = []\r\nport = int(10001)\r\nmsg_list = []\r\nLARGE_NUM=5665665665665665665665665665665665665665665665665665665665\r\nlock = threading.Lock()\r\n\r\nprint(\"Welcome to seed node : \" + IPAddr+\" : \"+str(port))\r\n\r\ndef generate(x):\r\n i=0\r\n while(i<1000000):\r\n k=x*x*x*x*x\r\n i+=1\r\n\r\n# def use(mode):\r\n# i=0\r\n# while(i<100):\r\n# generate(LARGE_NUM)\r\n# i+=1\r\n\r\n\r\n\r\ndef accept_connections():\r\n global peer_list, port, IPAddr\r\n print(\"Accepting connections from peers...\")\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.bind((IPAddr, port))\r\n sock.listen(5) #max no of qued connections =5 here\r\n while True: \r\n conn, addr = sock.accept() \r\n unpacker = struct.Struct('4s I')\r\n data = conn.recv(unpacker.size)\r\n data = unpacker.unpack(data)\r\n print(\"In accept, connected to:\", addr[0], \":\", data[1], \"Got data:\", data)\r\n if data[0] == b'peer':\r\n generate(LARGE_NUM)\r\n conn.send(str.encode(\"This is a response from server 1\"))\r\n addr = (addr[0], data[1])\r\n # peer_list.append(addr)\r\n # peer_list = list(set(peer_list))\r\n print(\"Got to connect in low load mode\")\r\n conn.close()\r\n\t\t\t\r\nt1 = threading.Thread(target=accept_connections)\r\nt1.start()\r\nt1.join()\r\n","sub_path":"Assign3/server_1.py","file_name":"server_1.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108392018","text":"from lttl import ffmpeg_concat\nimport argparse\nimport time\nimport logging\nimport os\nimport tempfile\nimport shutil\n\nlogging.basicConfig(level=logging.INFO)\n\nstart = time.time()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dest\", type=str,\n help=\"destination file name\")\nparser.add_argument(\"input\", type=str, nargs=\"+\",\n help=\"input files to append to dest\")\n\nargs = parser.parse_args()\n\n# ordering of filenames implies order by time\nargs.input = sorted(args.input)\n\n# if dest doesn't exist yet, copy first file to it\nif not os.path.exists(args.dest):\n logging.info(\"Output video not found, copying\")\n shutil.copyfile(args.input.pop(0), args.dest)\n\nif args.input:\n _, fname = tempfile.mkstemp(suffix=\".mp4\")\n try:\n input = [args.dest] + args.input\n ffmpeg_concat(input, fname, copy=True)\n logging.info(\"Replacing %s\", args.dest)\n os.replace(fname, args.dest)\n except:\n os.unlink(fname)\n raise\n\nlogging.info(\"Completed in %d sec\", time.time() - start)\n\n","sub_path":"lttl_concat.py","file_name":"lttl_concat.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342532264","text":"'''\nRequired input settings for web service: \n\n'''\nservice_id = \"ws_6\"\nservice_endpoint = \"http://phylo.cs.nmsu.edu:5004/phylotastic_ws/ts\"\n\ninput_settings = [{'method': \"GET\", 'path': \"/all_species\", 'weight': 0.3, 'input_data': {'taxon': \"Panthera\"} }, \n\t\t{'method': \"GET\", 'path': \"/all_species\", 'weight': 0.7, 'input_data': {'taxon': \"Felidae\"} }\n\t\t]\n\n","sub_path":"QoS/input_configs/ws6_config.py","file_name":"ws6_config.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167407826","text":"# Copyright 2012 OpenStack LLC.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\nimport logging\n\nfrom cliff import lister\nfrom cliff import show\n\nfrom quantumclient.common import exceptions\nfrom quantumclient.common import utils\nfrom quantumclient import port_filters_v11_opt\nfrom quantumclient.quantum.v1_1 import QuantumPortCommand\n\n\nclass ListPort(QuantumPortCommand, lister.Lister):\n \"\"\"List ports that belong to a given tenant's network\"\"\"\n\n api = 'network'\n log = logging.getLogger(__name__ + '.ListPort')\n\n def get_parser(self, prog_name):\n parser = super(ListPort, self).get_parser(prog_name)\n\n parser.add_argument(\n '--show-details',\n help='show detailed info of networks',\n action='store_true',\n default=False, )\n for item in port_filters_v11_opt:\n option_key = item.keys()[0]\n option_defs = item.get(option_key)\n parser.add_argument(option_key, **option_defs)\n return parser\n\n def get_data(self, parsed_args):\n self.log.debug('get_data(%s)' % parsed_args)\n quantum_client = self.app.client_manager.quantum\n quantum_client.tenant = parsed_args.tenant_id\n quantum_client.format = parsed_args.request_format\n search_opts = {\n 'tenant': parsed_args.tenant_id, }\n for item in port_filters_v11_opt:\n option_key = item.keys()[0]\n arg = option_key[2:]\n arg = arg.replace('-', '_')\n arg_value = getattr(parsed_args, arg, None)\n if arg_value is not None:\n search_opts.update({option_key[2:]: arg_value, })\n\n self.log.debug('search options: %s', search_opts)\n\n columns = ('ID', )\n data = None\n if parsed_args.show_details:\n data = quantum_client.list_ports_details(\n parsed_args.net_id, **search_opts)\n # dict:dict: {u'ports': [{\n # u'op-status': u'DOWN',\n # u'state': u'ACTIVE',\n # u'id': u'479ba2b7-042f-44b9-aefb-b1550e114454'}, ]}\n columns = ('ID', 'op-status', 'state')\n else:\n data = quantum_client.list_ports(parsed_args.net_id, **search_opts)\n # {u'ports': [{u'id': u'7a068b68-c736-42ab-9e43-c9d83c57627e'}]}\n ports = []\n if 'ports' in data:\n ports = data['ports']\n\n return (columns,\n (utils.get_item_properties(\n s, columns, formatters={}, ) for s in ports), )\n\n\nclass ShowPort(QuantumPortCommand, show.ShowOne):\n \"\"\"Show information of a given port\"\"\"\n\n api = 'network'\n log = logging.getLogger(__name__ + '.ShowPort')\n\n def get_parser(self, prog_name):\n parser = super(ShowPort, self).get_parser(prog_name)\n parser.add_argument(\n 'port_id', metavar='port-id',\n help='ID of the port to show', )\n parser.add_argument(\n '--show-details',\n help='show detailed info',\n action='store_true',\n default=False, )\n return parser\n\n def get_data(self, parsed_args):\n self.log.debug('get_data(%s)' % parsed_args)\n quantum_client = self.app.client_manager.quantum\n quantum_client.tenant = parsed_args.tenant_id\n quantum_client.format = parsed_args.request_format\n data = None\n if parsed_args.show_details:\n data = quantum_client.show_port_details(\n parsed_args.net_id, parsed_args.port_id)\n # {u'port': {u'op-status': u'DOWN', u'state': u'ACTIVE',\n # u'id': u'479ba2b7-042f-44b9-aefb-\n # b1550e114454', u'attachment': {u'id': u'gw-7a068b68-c7'}}}\n else:\n data = quantum_client.show_port(\n parsed_args.net_id, parsed_args.port_id)\n # {u'port': {u'op-status': u'DOWN', u'state': u'ACTIVE',\n # u'id': u'479ba2b7-042f-44b9-aefb-b1550e114454'}}\n\n port = 'port' in data and data['port'] or None\n if port:\n attachment = 'attachment' in port and port['attachment'] or None\n if attachment:\n interface = attachment['id']\n port.update({'attachment': interface})\n return zip(*sorted(port.iteritems()))\n return ('', [])\n\n\nclass CreatePort(QuantumPortCommand, show.ShowOne):\n \"\"\"Create port for a given network\"\"\"\n\n api = 'network'\n log = logging.getLogger(__name__ + '.CreatePort')\n\n def get_parser(self, prog_name):\n parser = super(CreatePort, self).get_parser(prog_name)\n return parser\n\n def get_data(self, parsed_args):\n self.log.debug('get_data(%s)' % parsed_args)\n quantum_client = self.app.client_manager.quantum\n quantum_client.tenant = parsed_args.tenant_id\n quantum_client.format = parsed_args.request_format\n data = quantum_client.create_port(parsed_args.net_id)\n # {u'network': {u'id': u'e9424a76-6db4-4c93-97b6-ec311cd51f19'}}\n info = 'port' in data and data['port'] or None\n if info:\n print >>self.app.stdout, _('Created a new Logical Port:')\n else:\n info = {'': ''}\n return zip(*sorted(info.iteritems()))\n\n\nclass DeletePort(QuantumPortCommand):\n \"\"\"Delete a given port\"\"\"\n\n api = 'network'\n log = logging.getLogger(__name__ + '.DeletePort')\n\n def get_parser(self, prog_name):\n parser = super(DeletePort, self).get_parser(prog_name)\n parser.add_argument(\n 'port_id', metavar='port-id',\n help='ID of the port to delete', )\n return parser\n\n def run(self, parsed_args):\n self.log.debug('run(%s)' % parsed_args)\n quantum_client = self.app.client_manager.quantum\n quantum_client.tenant = parsed_args.tenant_id\n quantum_client.format = parsed_args.request_format\n quantum_client.delete_port(parsed_args.net_id, parsed_args.port_id)\n print >>self.app.stdout, (_('Deleted Logical Port: %(portid)s') %\n {'portid': parsed_args.port_id})\n return\n\n\nclass UpdatePort(QuantumPortCommand):\n \"\"\"Update information of a given port\"\"\"\n\n api = 'network'\n log = logging.getLogger(__name__ + '.UpdatePort')\n\n def get_parser(self, prog_name):\n parser = super(UpdatePort, self).get_parser(prog_name)\n parser.add_argument(\n 'port_id', metavar='port-id',\n help='ID of the port to update', )\n\n parser.add_argument(\n 'newvalues', metavar='field=newvalue[,field2=newvalue2]',\n help='new values for the Port')\n\n return parser\n\n def run(self, parsed_args):\n self.log.debug('run(%s)' % parsed_args)\n quantum_client = self.app.client_manager.quantum\n quantum_client.tenant = parsed_args.tenant_id\n quantum_client.format = parsed_args.request_format\n field_values = parsed_args.newvalues\n data = {'port': {}}\n for kv in field_values.split(\",\"):\n try:\n k, v = kv.split(\"=\")\n data['port'][k] = v\n except ValueError:\n raise exceptions.CommandError(\n \"malformed new values (field=newvalue): %s\" % kv)\n data['network_id'] = parsed_args.net_id\n data['port']['id'] = parsed_args.port_id\n\n quantum_client.update_port(\n parsed_args.net_id, parsed_args.port_id, data)\n print >>self.app.stdout, (_('Updated Logical Port: %(portid)s') %\n {'portid': parsed_args.port_id})\n return\n","sub_path":"quantumclient/quantum/v1_1/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":8221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148176209","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.db.models import Q\nfrom django.template.loader import render_to_string\n\nfrom sanpham.models import SimTheoGia ,NhaMang, SimTheoLoai , SimNamSinh ,SanPham\nfrom hoadon.models import HoaDon\n\ndef timkiem_nangcao(request):\n sp = SanPham.objects.filter(DaBan=False)\n nm = NhaMang.objects.all()\n ns = SimNamSinh.objects.all()\n stl = SimTheoLoai.objects.all()\n stg = SimTheoGia.objects.all()\n# tim theo nha mang\n nhamang = request.GET.get('nhamang')\n if nhamang:\n sp = sp.filter(\n Q(Mang__title__icontains=nhamang)\n\n )\n# tim kiem theo nam sinh\n namsinh = request.GET.get('namsinh')\n if namsinh:\n sp = sp.filter(\n Q(NamSinh__title__icontains=namsinh)\n\n )\n# tim kiem theo loai sim\n loai = request.GET.get('loai')\n if loai:\n sp = sp.filter(\n Q(LoaiSims__title__icontains=loai)\n\n )\n# tim kiem theo muc gia\n mucgia = request.GET.get('mucgia')\n if mucgia:\n sp = sp.filter(\n Q(LoaiGia__title__icontains=mucgia)\n\n )\n# tim theo so simmmm\n sosim = request.GET.get('so')\n if sosim :\n if '*' in sosim:\n sosim = sosim.split('*')\n if sosim[0] != '':\n sp = sp.filter(Q(SoSim__startswith=sosim[0]))\n if sosim[1] != '':\n sp = sp.filter(Q(SoSim__endswith=sosim[1]))\n else:\n if sosim:\n sp = sp.filter(Q(SoSim__icontains=sosim))\n\n context = {\n \"sanpham\": sp,\n \"ns\": ns,\n \"nhamang\": nhamang,\n \"loai\": loai,\n \"mucgia\": mucgia,\n \"stl\": stl,\n \"stg\": stg,\n \"nm\": nm,\n }\n data = dict()\n data['html'] = render_to_string('includes/timkiem/showtimkiem.html', context, request=request)\n return JsonResponse(data,status=200)\n # return render(request, 'includes/timkiem/ketqua-timkiem.html', context)\n\ndef timkiem(request):\n stl = SimTheoLoai.objects.all()\n sns = SimNamSinh.objects.all()\n nm = NhaMang.objects.all()\n stg = SimTheoGia.objects.all()\n hd = HoaDon.objects.order_by('-NgayDatHang')[0:5]\n sp = SanPham.objects.filter(DaBan=False)\n# tim theo so simmmm\n sosim = request.GET.get('so')\n if sosim :\n if '*' in sosim:\n sosim = sosim.split('*')\n if sosim[0] != '':\n sp = sp.filter(Q(SoSim__startswith=sosim[0]))\n if sosim[1] != '':\n sp = sp.filter(Q(SoSim__endswith=sosim[1]))\n else:\n if sosim:\n sp = sp.filter(Q(SoSim__icontains=sosim))\n\n context = {\n \"sp\": sp,\n \"stl\": stl,\n \"sns\": sns,\n \"nm\": nm,\n \"stg\": stg,\n \"hd\": hd,\n }\n return render(request, 'includes/timkiem/ketqua-timkiem.html', context)\n","sub_path":"PythonWeb/timkiem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480226552","text":"'''\r\nA simple Python logger\r\n\r\n@author: Erik Norvelle\r\n@since: Oct 4, 2013\r\n'''\r\n\r\nimport sys, codecs, os, time\r\n \r\nclass Logger():\r\n \"\"\"\r\n A simple logger that can direct its output either to a log file or to standard error\r\n \"\"\"\r\n \r\n start_time = None\r\n\r\n @classmethod\r\n def log_to_stderr(cls):\r\n Logger.logfile = sys.stderr\r\n Logger.log_dest = \"stderr\"\r\n\r\n @classmethod\r\n def log_to_file(cls, filename):\r\n Logger.logfile = codecs.open(filename, 'w', 'utf-8')\r\n Logger.log_dest = \"file\"\r\n \r\n @classmethod\r\n def log(cls, message):\r\n if not Logger.logfile:\r\n raise Exception(\"Logger class not configured with a log destination\")\r\n Logger.logfile.write(message + \"\\n\")\r\n Logger.logfile.flush()\r\n if Logger.log_dest == \"file\":\r\n os.fsync(Logger.logfile)\r\n \r\n @classmethod\r\n def start_timer(cls):\r\n Logger.start_time = time.time()\r\n \r\n @classmethod\r\n def get_elapsed(cls):\r\n if Logger.start_time is None:\r\n raise Exception(\"Timer has not been started\")\r\n elapsed = time.time() - Logger.start_time\r\n minutes, seconds = divmod(elapsed, 60)\r\n return \"%d:%02d\" % (minutes, seconds)\r\n \r\n @classmethod\r\n def log_with_elapsed(cls, message):\r\n elapsed = Logger.get_elapsed()\r\n message = \"%s (%s elapsed)\" % (message, elapsed)\r\n Logger.log(message)\r\n \r\n","sub_path":"NorvLogger/norvlogger/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313279413","text":"#------------------------------------------------------------------------------\n# Venkata Karthik Thota\n# InsertionSort.py\n# Sorts list of numbers in increasing order\n#------------------------------------------------------------------------------\nimport random\n\nnumbers = []\nfor num in range(1,11):\n numbers.append(random.randrange(1,100))\n\nfor j in range(len(numbers)-1):\n key = numbers[j+1]\n i = j\n while i > -1 and numbers[i] > key:\n numbers[i+1] = numbers[i]\n i -= 1\n\n numbers[i+1] = key\n\nprint (numbers)\n","sub_path":"Examples/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"534632475","text":"frutas = open('frutas.txt', 'r')\nnumeros = open('numeros.txt', 'r')\nlista_numeros = []\nfor i in numeros:\n lista_numeros.append(i)\ndef eliminar_un_caracter(lista, elemento):\n auxilar = []\n for i in lista:\n a = i.replace(elemento, \"\")\n auxilar.append(a)\n return auxilar\ndef numeros_pares(lista):\n aux = []\n for i in lista:\n if(float(i) % 2 == 0):\n aux.append(i)\n return aux\nif __name__ == \"__main__\":\n nueva = eliminar_un_caracter(lista_numeros, \"\\n\")\n nueva_fin = numeros_pares(nueva)\n print(nueva_fin)","sub_path":"taller_de_funciones/ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541107450","text":"from .debugger import Debugger\n\n\nclass HackingOperator(object):\n def __init__(self):\n self.debugger = Debugger()\n self.proc_name = None\n self.as_alloc_addr = None\n\n self.vac_alloc_addr = None\n self.cc_vac_alloc_addr = {\n \"begin\": None,\n \"old data\": None,\n \"pointer\": None,\n \"bool\": None\n }\n\n self.fullmap_attack_alloc_addr = None\n\n # Original Damage\n self.original_dmg = {\n 0x008ECB38: \"00 00 00 00 00 00 24 40\",\n 0x008ED758: \"00 00 00 00 00 00 10 40\",\n 0x008ECB30: \"00 00 00 00 00 00 14 40\",\n 0x008ED778: \"33 33 33 33 33 33 0b 40\"\n }\n\n def toggle_full_god_mode(self, active=None):\n data = {\n \"addr\": 0x007AD377,\n \"data_hack\": \"0F 84\",\n \"data\": \"0F 85\"\n }\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_miss_god_mode(self, active=None):\n data = {\n \"miss_addr\": 0x007AD487,\n \"data\": \"89 06 83 c6 04 ff 4d c0\",\n \"data_hack\": \"c7 06 00 00 00 00 90 90\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"miss_addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"miss_addr\"], data[\"data\"])\n\n def toggle_accuracy_hack(self, active=None):\n \"\"\"\n alloc(fix,8)\n\n fix:\n db 66 66 66 66 66 66 E6 3F\n\n 00424D22: //DC 0D C8 F1 8E 00 DD 5D 34 74 58 FF B6 84 00 00\n fmul qword ptr [fix]\n\n 008ED6F8: //66 66 66 66 66 66 E6 3F 33 33 33 33 33 33 D3 3F\n db 00 00 00 E0 CF 12 63 41\n\n 005DE247: //0F 85 9A 00 00 00 8B 7D 08 6B F6 1C 8B 03 6B FF\n db 90 90 90 90 90 90\n \"\"\"\n\n data1 = {\n \"acc_addr\": 0x00424D22,\n \"data\": \"DC 0D C8 89 8E 00\",\n \"data_hack\": \"DC 0D 00 00 BD 0F\"\n }\n\n data2 = {\n \"acc_addr\": 0x008ED6F8,\n \"data\": \"66 66 66 66 66 66 E6 3F\",\n \"data_hack\": \"00 00 00 E0 CF 12 63 41\"\n }\n\n data3 = {\n \"acc_addr\": 0x005DE247,\n \"data\": \"0F 85 9A 00 00 00\",\n \"data_hack\": \"90 90 90 90 90 90\"\n }\n if active:\n self.debugger.write_process_memory(data1[\"acc_addr\"], data1[\"data_hack\"])\n self.debugger.write_process_memory(data2[\"acc_addr\"], data2[\"data_hack\"])\n self.debugger.write_process_memory(data3[\"acc_addr\"], data3[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data1[\"acc_addr\"], data1[\"data\"])\n self.debugger.write_process_memory(data2[\"acc_addr\"], data2[\"data\"])\n self.debugger.write_process_memory(data3[\"acc_addr\"], data3[\"data\"])\n\n def toggle_defense_hack(self, active=None):\n data = {\n \"addr\": 0x00670090,\n \"data\": \"72 04\",\n \"data_hack\": \"77 04\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_no_knock_back(self, active=None):\n data = {\n \"addr\": 0x007ADB78,\n \"data\": \"7C 03\",\n \"data_hack\": \"7D 03\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_unlimited_attack(self, active=None):\n data1 = {\n \"addr\": 0x007937C5,\n \"data\": \"39 9F\",\n \"data_hack\": \"89 9F\"\n }\n\n data2 = {\n \"addr\": 0x007A7A47,\n \"data\": \"89 08\",\n \"data_hack\": \"29 08\"\n }\n\n if active:\n self.debugger.write_process_memory(data1[\"addr\"], data1[\"data_hack\"])\n self.debugger.write_process_memory(data2[\"addr\"], data2[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data1[\"addr\"], data1[\"data\"])\n self.debugger.write_process_memory(data2[\"addr\"], data2[\"data\"])\n\n def toggle_speed_attack(self, active=None):\n \"\"\"\n alloc(Hack,32)\n label(return)\n\n Hack:\n mov eax, -10000\n cmp eax, 02\n jg 00442de4\n jmp return\n\n 00442DDF:\n jmp Hack\n db 90 90 90\n return:\n \"\"\"\n if active:\n if self.as_alloc_addr:\n addr_alloc = self.as_alloc_addr\n else:\n addr_alloc = self.debugger.allocate_mem(size=32)\n self.as_alloc_addr = addr_alloc\n if not addr_alloc:\n return False\n\n code1 = 0x100442DE4 - (addr_alloc + 0xE)\n code1 = self.debugger.reverse_code(hex(code1)[2:])\n\n code2 = (0x100442DE4 + 0x3) - (addr_alloc + 0x13)\n code2 = self.debugger.reverse_code(hex(code2)[2:])\n\n code3 = addr_alloc - 0x00442DE4\n code3 = self.debugger.reverse_code(hex(code3)[2:])\n\n sub_assembly_function = [\n [\"base_addr\", addr_alloc],\n [\"mov eax,FFFF0000\", \"B8 00 00 FF FF\"],\n [\"cmp eax,02\", \"83 F8 02\"],\n [\"jg NamLun.exe+42DE4\", \"0F 8F\" + code1],\n [\"jmp NamLun.exe+42DE7\", \"E9\" + code2]\n ]\n\n main_assembly_func_hack = [\n [\"base_addr\", 0x00442DDF],\n [\"jmp \", \"E9\" + code3]\n ]\n\n sub_data_hack = \"\"\n main_data_hack = \"\"\n\n for data1 in sub_assembly_function[1:]:\n sub_data_hack += data1[1]\n for data2 in main_assembly_func_hack[1:]:\n main_data_hack += data2[1]\n\n self.debugger.write_process_memory(sub_assembly_function[0][1], sub_data_hack)\n self.debugger.write_process_memory(main_assembly_func_hack[0][1], main_data_hack)\n\n else:\n main_assembly_func = [\n [\"base_addr\", 0x00442DDF],\n [\"jg NamLun.exe+42DE4\", \"7f 03 6a 02 58\"],\n ]\n\n self.debugger.write_process_memory(main_assembly_func[0][1], main_assembly_func[1][1])\n\n def toggle_movement_speed_hack(self, active=None):\n data = {\n \"addr\": 0x007F246D,\n \"data\": \"0F 84 82 00 00 00\",\n \"data_hack\": \"90 90 90 90 90 90\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_air_swim(self, active=None):\n data = {\n \"addr\": 0x00614CC7,\n \"data\": \"75 04\",\n \"data_hack\": \"74 04\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_tubi(self, active=None):\n data = {\n \"addr\": 0x004BECC6,\n \"data\": \"75 36\",\n \"data_hack\": \"90 90\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_mana_regen(self, active=None):\n data = {\n \"addr\": 0x00830420,\n \"data\": \"81 FB 10 27\",\n \"data_hack\": \"81 FB 01 00\"\n }\n\n if active:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data_hack\"])\n else:\n self.debugger.write_process_memory(data[\"addr\"], data[\"data\"])\n\n def toggle_hp_hack(self, active=None):\n # Not yet\n pass\n\n def toggle_full_map_attack(self, active=None):\n \"\"\"\n [ENABLE]\n alloc(FMA,64)\n\n FMA:\n mov edx,[00978358]\n lea edx,[edx+D5C]\n lea eax,[edx]\n jmp 005C97AD\n\n 005C979E:\n jmp FMA\n nop\n nop\n nop\n nop\n\n [DISABLE]\n dealloc(FMA)\n\n 005C979E:\n mov ecx,[ebx+00000480]\n lea eax,[ebx+00000480]\n mov eax,[eax+04]\n \"\"\"\n if active:\n if self.fullmap_attack_alloc_addr:\n fullmap_att_addr = self.fullmap_attack_alloc_addr\n\n else:\n fullmap_att_addr = self.debugger.allocate_mem(size=64)\n self.fullmap_attack_alloc_addr = fullmap_att_addr\n\n fullmap_att_data = {\n \"addr\": fullmap_att_addr,\n \"FMA\": \"8B 15 58 83 97 00\" + \"8D 92 5C 0D 00 00\" + \"8D 02\"\n + \"E9\" + self.debugger.reverse_code(hex(0x1005C97AD - (fullmap_att_addr + 0x13))[2:])\n }\n\n main_data_hack = {\n 0x005C979E: \"E9\" + self.debugger.reverse_code(hex(fullmap_att_addr - 0x005C97A3)[2:])\n }\n\n self.debugger.write_process_memory(fullmap_att_data[\"addr\"],\n fullmap_att_data[\"FMA\"])\n for addr in main_data_hack:\n self.debugger.write_process_memory(addr, main_data_hack[addr])\n\n else:\n main_data = {\n 0x005C979E: \"8B 8B 80 04 00 00\" + \"8D 83 80 04 00 00\" + \"8B 40 04\"\n }\n for addr in main_data:\n self.debugger.write_process_memory(addr, main_data[addr])\n if self.fullmap_attack_alloc_addr:\n self.debugger.free_mem(address=self.fullmap_attack_alloc_addr)\n self.fullmap_attack_alloc_addr = None\n\n def toggle_cc_vac(self, active=None):\n \"\"\"\n [ENABLE]\n alloc(begin,2048)\n alloc(olddata,32)\n alloc(pointer,4)\n alloc(bool,4)\n registersymbol(bool)\n registersymbol(olddata)\n label(set)\n label(ret)\n label(end)\n\n begin:\n cmp [bool],1\n je set\n ret:\n mov esi,olddata\n movsd\n movsd\n movsd\n movsd\n pop edi\n jmp end\n\n set:\n mov esi,[00978358]\n mov esi,[esi+24]//left wall\n mov [pointer], esi\n mov esi,[pointer]\n mov [olddata],esi\n mov esi,[00978358]\n mov esi,[esi+28]//top wall\n mov [pointer], esi\n mov esi,[pointer]\n mov [olddata+04],esi\n mov esi,[00978358]\n mov esi,[esi+2C]//right wall\n mov [pointer], esi\n mov esi,[pointer]\n mov [olddata+08],esi\n mov esi,[00978358]\n mov esi,[esi+30]//bottom wall\n mov [pointer], esi\n mov esi,[pointer]\n mov [olddata+2c],esi\n mov [bool],0\n jmp ret\n\n 007F1156:\n jmp begin\n end:\n\n olddata:\n DB 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n pointer:\n DB 00 00 00 00\n bool:\n DB 01 00 00 00\n\n [DISABLE]\n dealloc(begin)\n dealloc(olddata)\n dealloc(pointer)\n dealloc(bool)\n\n 007F1156:\n DB A5 A5 A5 A5 5F\n \"\"\"\n if active:\n if self.cc_vac_alloc_addr[\"begin\"]:\n begin = self.cc_vac_alloc_addr[\"begin\"]\n old_data = self.cc_vac_alloc_addr[\"old_data\"]\n pointer = self.cc_vac_alloc_addr[\"pointer\"]\n booL = self.cc_vac_alloc_addr[\"bool\"]\n else:\n begin = self.debugger.allocate_mem(size=2048)\n old_data = self.debugger.allocate_mem(size=32)\n pointer = self.debugger.allocate_mem(size=4)\n booL = self.debugger.allocate_mem(size=4)\n self.cc_vac_alloc_addr[\"begin\"] = begin\n self.cc_vac_alloc_addr[\"old_data\"] = old_data\n self.cc_vac_alloc_addr[\"pointer\"] = pointer\n self.cc_vac_alloc_addr[\"bool\"] = booL\n\n begin_data = {\n \"addr\": begin,\n \"begin\": \"83 3D\" + self.debugger.reverse_code(hex(booL)[2:]) + \"01\"\n + \"0F 84 0F 00 00 00\" + \"BE\" + self.debugger.reverse_code(hex(old_data)[2:])\n + \"A5\" * 4 + \"5F\" + \"E9\" + self.debugger.reverse_code(hex(0x1007F115B - (begin + 0x1C))[2:])[\n :11],\n\n \"set\": \"8B 35 58 83 97 00\" + \"8B 76 24\" + \"89 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"8B 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"89 35\" + self.debugger.reverse_code(hex(old_data)[2:])\n + \"8B 35 58 83 97 00\" + \"8B 76 28\"\n + \"89 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"8B 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"89 35\" + self.debugger.reverse_code(hex(old_data + 0x4)[2:])\n + \"8B 35 58 83 97 00\" + \"8B 76 2C\"\n + \"89 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"8B 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"89 35\" + self.debugger.reverse_code(hex(old_data + 0x8)[2:])\n + \"8B 35 58 83 97 00\" + \"8B 76 30\"\n + \"89 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"8B 35\" + self.debugger.reverse_code(hex(pointer)[2:])\n + \"89 35\" + self.debugger.reverse_code(hex(old_data + 0x2c)[2:])\n + \"C7 05\" + self.debugger.reverse_code(hex(booL)[2:]) + \"00\" * 4\n + \"E9 76 FF FF FF\"\n }\n\n bool_data = {\n \"addr\": booL,\n \"bool\": \"01\"\n }\n\n main_data_hack = {\n 0x007F1156: \"E9\" + self.debugger.reverse_code(hex((begin + 0x100000000) - 0x007F115B)[2:])[:11]\n }\n print(hex(begin), hex(old_data), hex(pointer), hex(booL))\n print(self.debugger.reverse_code(hex(0x1007F115B - (begin + 0x1C))[2:])[:11],\n self.debugger.reverse_code(hex((begin + 0x100000000) - 0x007F115B)[2:])[:11])\n self.debugger.write_process_memory(bool_data[\"addr\"], bool_data[\"bool\"])\n self.debugger.write_process_memory(begin_data[\"addr\"], begin_data[\"begin\"]\n + begin_data[\"set\"])\n for addr in main_data_hack:\n self.debugger.write_process_memory(addr, main_data_hack[addr])\n\n else:\n main_data = {\n 0x007F1156: \"A5\" * 4 + \"5F\"\n }\n for addr in main_data:\n self.debugger.write_process_memory(addr, main_data[addr])\n if self.cc_vac_alloc_addr[\"begin\"]:\n for item in self.cc_vac_alloc_addr:\n self.debugger.free_mem(address=self.cc_vac_alloc_addr[item])\n self.cc_vac_alloc_addr[item] = None\n\n def toggle_mobs_vac(self, active=None):\n \"\"\"\n [enable]\n alloc(newmem,64)\n label(retnewmem)\n label(end)\n\n newmem:\n cmp edi,0\n je end\n cmp [ebx+00000178],esi\n je 007f187d\n\n end:\n call 007f1fec\n jmp retnewmem\n\n 007f1867:\n jmp newmem\n nop\n nop\n retnewmem:\n\n [disable]\n 007f1867:\n je 007f186e\n call 007f1fec\n\n dealloc(newmem)\n \"\"\"\n if active == \"left\" or active == \"right\":\n if self.vac_alloc_addr:\n vac = self.vac_alloc_addr\n else:\n vac = self.debugger.allocate_mem(size=64)\n self.vac_alloc_addr = vac\n\n vac_data = {\n \"addr\": vac,\n \"vac\": \"83 FF 00\" + \"0F 84 0C 00 00 00\" + \"39 B3 78 01 00 00\"\n + \"0F 84\" + self.debugger.reverse_code(hex(0x1007F187D - (vac + 0x15))[2:]),\n \"end\": \"E8\" + self.debugger.reverse_code(hex(0x1007F1FEC - (vac + 0x1A))[2:])\n + \"E9\" + self.debugger.reverse_code(hex(0x1007F186E - (vac + 0x1F))[2:])\n }\n self.debugger.write_process_memory(vac_data[\"addr\"], vac_data[\"vac\"]\n + vac_data[\"end\"])\n\n main_data_hack = {\n 0x007f1867: \"E9\" + self.debugger.reverse_code(hex(vac - 0x007F186C)[2:])\n + \"90\" * 2\n }\n for addr in main_data_hack:\n self.debugger.write_process_memory(addr, main_data_hack[addr])\n\n left = {\n \"addr\": 0x007F4055,\n \"data_hack\": \"74 53\",\n \"data\": \"73 53\"\n }\n right = {\n \"addr\": 0x007F40C4,\n \"data_hack\": \"77 72\",\n \"data\": \"76 72\"\n }\n\n if \"left\" in active:\n\n self.debugger.write_process_memory(left[\"addr\"], left[\"data_hack\"])\n self.debugger.write_process_memory(right[\"addr\"], right[\"data\"])\n else:\n self.debugger.write_process_memory(right[\"addr\"], right[\"data_hack\"])\n self.debugger.write_process_memory(left[\"addr\"], left[\"data\"])\n\n else:\n main_data = {\n 0x007f1867: \"74 05\" + \"E8 7E 07 00 00\",\n 0x007F4055: \"73 53\", # left\n 0x007F40C4: \"76 72\" # right\n }\n for addr in main_data:\n self.debugger.write_process_memory(addr, main_data[addr])\n if self.vac_alloc_addr:\n self.debugger.free_mem(address=self.vac_alloc_addr)\n self.vac_alloc_addr = None\n\n def adjust_damage(self, active):\n if active == \"reset\":\n current_dmg = {\n 0x008ECB38: self.debugger.read_process_memory(0x008ECB38, 8),\n 0x008ED758: self.debugger.read_process_memory(0x008ED758, 8),\n 0x008ECB30: self.debugger.read_process_memory(0x008ECB30, 8),\n 0x008ED778: self.debugger.read_process_memory(0x008ED778, 8)\n }\n\n if current_dmg != self.original_dmg:\n for dmg in self.original_dmg:\n self.debugger.write_process_memory(dmg, self.original_dmg[dmg])\n print(\"Reset Dmg!\")\n else:\n dmg_data = dict(\n min={\"addr\": 0x008ECB38, \"current_dmg\": \"\"},\n max1={\"addr\": 0x008ED758, \"current_dmg\": \"\"},\n max2={\"addr\": 0x008ECB30, \"current_dmg\": \"\"},\n max3={\"addr\": 0x008ED778, \"current_dmg\": \"\"}\n )\n\n for dmg in dmg_data:\n dmg_data[dmg][\"current_dmg\"] = self.debugger.read_process_memory(dmg_data[dmg][\"addr\"], 8)\n\n for dmg in dmg_data:\n dmg_data[dmg][\"current_dmg\"] = self.debugger.reverse_code(dmg_data[dmg][\"current_dmg\"][18:])\n dmg_data[dmg][\"current_dmg\"] = int(dmg_data[dmg][\"current_dmg\"].replace(\" \", \"\"), 16)\n\n if \"min\" in active.lower():\n if \"d\" in active.lower():\n for dmg in dmg_data:\n if \"min\" in dmg:\n new_dmin = dmg_data[dmg][\"current_dmg\"] - 5\n data_new_dmin = \"00 \" * 6 + self.debugger.reverse_code(hex(new_dmin)[2:])\n self.debugger.write_process_memory(dmg_data[dmg][\"addr\"], data_new_dmin)\n else:\n for dmg in dmg_data:\n if \"min\" in dmg:\n new_dmin = dmg_data[dmg][\"current_dmg\"] + 5\n data_new_dmin = \"00 \" * 6 + self.debugger.reverse_code(hex(new_dmin)[2:])\n self.debugger.write_process_memory(dmg_data[dmg][\"addr\"], data_new_dmin)\n\n else:\n if \"d\" in active.lower():\n for dmg in dmg_data:\n if \"max\" in dmg:\n new_dmin = dmg_data[dmg][\"current_dmg\"] - 5\n data_new_dmax = \"00 \" * 6 + self.debugger.reverse_code(hex(new_dmin)[2:])\n self.debugger.write_process_memory(dmg_data[dmg][\"addr\"], data_new_dmax)\n\n else:\n for dmg in dmg_data:\n if \"max\" in dmg:\n new_dmin = dmg_data[dmg][\"current_dmg\"] + 5\n data_new_dmax = \"00 \" * 6 + self.debugger.reverse_code(hex(new_dmin)[2:])\n self.debugger.write_process_memory(dmg_data[dmg][\"addr\"], data_new_dmax)\n\n def get_statistic(self):\n statistics = {}\n\n player_pointer = 0x00978140\n player_offset = 0x18\n\n mobs_pointer = 0x0097813C\n mobs_offset = 0x10\n\n map_id_pointer = 0x00979268\n map_id_offset = 0x62C\n\n pl_pointer_data = self.debugger.read_process_memory(player_pointer, 4)\n if not pl_pointer_data:\n statistics[\"player_count\"] = None\n else:\n players_addr = int(self.debugger.reverse_code(pl_pointer_data).replace(\" \", \"\"), 16) + player_offset\n players_number = self.debugger.read_process_memory(players_addr, 4)\n players_number = int(self.debugger.reverse_code(players_number).replace(\" \", \"\"), 16)\n statistics[\"player_count\"] = str(players_number + 1)\n\n mo_pointer_data = self.debugger.read_process_memory(mobs_pointer, 4)\n if not mo_pointer_data:\n statistics[\"monster_count\"] = None\n else:\n mobs_addr = int(self.debugger.reverse_code(mo_pointer_data).replace(\" \", \"\"), 16) + mobs_offset\n mobs_number = self.debugger.read_process_memory(mobs_addr, 4)\n mobs_number = int(self.debugger.reverse_code(mobs_number).replace(\" \", \"\"), 16)\n statistics[\"monster_count\"] = str(mobs_number)\n\n map_pointer_data = self.debugger.read_process_memory(map_id_pointer, 4)\n if not map_pointer_data:\n statistics[\"map_id\"] = None\n else:\n map_id_addr = int(self.debugger.reverse_code(map_pointer_data).replace(\" \", \"\"), 16) + map_id_offset\n map_id = self.debugger.read_process_memory(map_id_addr, 4)\n map_id = int(self.debugger.reverse_code(map_id).replace(\" \", \"\"), 16)\n statistics[\"map_id\"] = str(map_id)\n\n dmg_cap_addr = 0x008ED798\n dmg_cap = self.debugger.read_process_memory(dmg_cap_addr, 4)\n if not dmg_cap:\n statistics[\"damage_cap\"] = None\n else:\n dmg_cap = int(self.debugger.reverse_code(dmg_cap).replace(\" \", \"\"), 16)\n statistics[\"damage_cap\"] = str(dmg_cap)\n\n magic_att_cap_addr = 0x006642A7\n magic_att_cap = self.debugger.read_process_memory(magic_att_cap_addr, 4)\n if not magic_att_cap:\n statistics[\"magic_att_cap\"] = None\n else:\n magic_att_cap = int(self.debugger.reverse_code(magic_att_cap).replace(\" \", \"\"), 16)\n statistics[\"magic_att_cap\"] = str(magic_att_cap)\n\n meso_drop_cap_addr = 0x006C150B\n meso_drop_cap = self.debugger.read_process_memory(meso_drop_cap_addr, 4)\n if not meso_drop_cap:\n statistics[\"meso_drop_cap\"] = None\n else:\n meso_drop_cap = int(self.debugger.reverse_code(meso_drop_cap).replace(\" \", \"\"), 16)\n statistics[\"meso_drop_cap\"] = str(meso_drop_cap)\n\n return statistics\n","sub_path":"trainer/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":23662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274524166","text":"#1\nimport requests, webbrowser\nfrom bs4 import BeautifulSoup\nimport os \nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nfrom pathlib import Path\nimport math\nfrom sklearn.feature_extraction import text\nimport random\n\n#2\n\n# collect the folder names (topics)\n# open the folders and recording all the file names\nfolder_names = []\nall_files = []\ncount = 0\npath = os.getcwd() + \"/Datasets\"\nfor dirpath, dirnames, filenames in os.walk(path):\n if count == 0:\n folder_names = dirnames\n else:\n all_files.append(filenames)\n count+=1\n\n#3\n\n# reading all the files and uploading the content in an array\ntext_seperate = [] #<- contains all the text seperated by topic\ntext_all = [] #<- contains all the text in all 3 folders\n\nfor i in range(len(all_files)):\n text_folder = [] #<- contains all text in the particular folder\n text_documents = \"\"\n for file_name in all_files[i]:\n txt = Path(path+\"/\"+folder_names[i]+\"/\"+file_name).read_text()\n txt = txt.replace(\"\\n\", \" \")\n txt = txt.replace(\"\\t\", \" \")\n text_documents += txt\n text_folder.append(txt)\n text_all.append(text_documents)\n text_seperate.append(text_folder)\n\n#4\n\n# PRE-PROCESSING\n# making all words of the same case (all lowercase)\n# counting all the occurences of the word in the folder\n# counting how many documents the word appears in \nnew_text_all = []\nfor i in range(len(text_all)):\n txt = text_all[i].lower().split()\n new_text = \"\"\n for word in txt:\n occurence = txt.count(word)\n doc_with_word = 0\n for doc in text_seperate[i]:\n if word in doc:\n doc_with_word += 1\n\n # if the word occurs at a minimum 30 times\n # appears in at least 10 documents\n # it is appended in the possible text to find feature vectors\n if occurence >= 30 and doc_with_word >=8:\n new_text += word + \" \"\n new_text_all.append(new_text)\n\n#5\n\n# finding features for topic 1\nvectorizer_1 = TfidfVectorizer(stop_words = text.ENGLISH_STOP_WORDS)\nvectors = vectorizer_1.fit_transform([new_text_all[0]])\nfeature_names_1 = vectorizer_1.get_feature_names()\n\n#6\n\n# finding features for topic 2\nvectorizer_2 = TfidfVectorizer(stop_words = text.ENGLISH_STOP_WORDS)\nvectors = vectorizer_2.fit_transform([new_text_all[1]])\nfeature_names_2 = vectorizer_2.get_feature_names()\n\n#7\n\n# finding features for topic 3\nvectorizer_3 = TfidfVectorizer(stop_words = text.ENGLISH_STOP_WORDS)\nvectors = vectorizer_3.fit_transform([new_text_all[2]])\nfeature_names_3 = vectorizer_3.get_feature_names()\n\n#8\n\n# combining all features in 1 2D array\nfeature_names = []\nfeature_names.append(feature_names_1)\nfeature_names.append(feature_names_2)\nfeature_names.append(feature_names_3)\n\n#9\n\n# further filtering out that the feature vector is not a digit\nfeatures_all = []\n\nfor i in range(len(feature_names)):\n feature_topic = []\n for feature in feature_names[i]:\n counter = False\n if not feature.isdigit():\n for char in feature:\n if char.isdigit():\n counter = True\n break\n if not counter:\n feature_topic.append(feature)\n features_all.append(feature_topic)\n\n#10\n\n# randomly selects 15 features from the list of possible features\nselected_feature_all = []\nselected_num_all = []\n\nfor i in range(len(features_all)):\n \n count = 0\n feature_list = features_all[i]\n max_num = len(feature_list)-1\n \n selected_feature = []\n selected_num = []\n \n while count < 15:\n num = random.randint(0,max_num)\n if num not in selected_num:\n selected_feature.append(feature_list[num])\n selected_num.append(num)\n count += 1\n selected_feature_all.append(selected_feature)\n selected_num_all.append(selected_num)\n\n#11\n\n# finding the tfidf of each possible feature in topic 1\n# tfidf formula is = tf * idf\n# tf = number of occurences of the word / total number of words (frequency)\n# idf = log(10/# of documents with that feature word)\ntfidf_all_1 = []\noccurences_all_1 = []\nfor data in text_seperate[0]:\n doc_tfidf = []\n doc_occurences = []\n for feature in selected_feature_all[0]:\n occurences = data.count(feature)\n doc_occurences.append(occurences)\n num_words = len(data.split())\n tf = occurences/num_words\n doc_with_feature = 0\n idf = 0\n for doc in text_seperate[0]:\n if feature in doc:\n doc_with_feature += 1\n if doc_with_feature != 0:\n idf = math.log((10/doc_with_feature),2)\n tfidf = tf*idf\n doc_tfidf.append(tfidf)\n tfidf_all_1.append(doc_tfidf)\n occurences_all_1.append(doc_occurences)\n\n#12\n\n# displays the dataframe containing the feature names and its respective tfidf in each folder\n# the closet tfidf is to 0, the more the word occurs in the text\ndf_1 = pd.DataFrame(tfidf_all_1)\ndf_1.columns = selected_feature_all[0]\nprint(df_1)\n\n#13\n\n# finding the tfidf of each possible feature in topic 2\n# tfidf formula is = tf * idf\n# tf = number of occurences of the word / total number of words (frequency)\n# idf = log(10/# of documents with that feature word)\ntfidf_all_2 = []\noccurences_all_2 = []\nfor data in text_seperate[1]:\n doc_tfidf = []\n doc_occurences = []\n for feature in selected_feature_all[1]:\n occurences = data.count(feature)\n doc_occurences.append(occurences)\n num_words = len(data.split())\n tf = occurences/num_words\n doc_with_feature = 0\n idf = 0\n \n for doc in text_seperate[1]:\n if feature in doc:\n doc_with_feature += 1\n if doc_with_feature != 0:\n idf = math.log((10/doc_with_feature),2)\n tfidf = tf*idf\n doc_tfidf.append(tfidf)\n tfidf_all_2.append(doc_tfidf)\n occurences_all_2.append(doc_occurences)\n\n#14\n\n# displays the dataframe containing the feature names and its respective tfidf in each folder\n# the closet tfidf is to 0, the more the word occurs in the text\ndf_2 = pd.DataFrame(tfidf_all_2)\ndf_2.columns = selected_feature_all[1]\nprint(df_2)\n\n#15\n\n# finding the tfidf of each possible feature in topic 3\n# tfidf formula is = tf * idf\n# tf = number of occurences of the word / total number of words (frequency)\n# idf = log(10/# of documents with that feature word)\ntfidf_all_3 = []\noccurences_all_3 = []\nfor data in text_seperate[2]:\n doc_tfidf = []\n doc_occurences = []\n for feature in selected_feature_all[2]:\n occurences = data.count(feature)\n doc_occurences.append(occurences)\n num_words = len(data.split())\n tf = occurences/num_words\n doc_with_feature = 0\n idf = 0\n \n for doc in text_seperate[2]:\n if feature in doc:\n doc_with_feature += 1\n if doc_with_feature != 0:\n idf = math.log((10/doc_with_feature),2)\n tfidf = tf*idf\n doc_tfidf.append(tfidf)\n tfidf_all_3.append(doc_tfidf)\n occurences_all_3.append(doc_occurences)\n\n#16\n\n# displays the dataframe containing the feature names and its respective tfidf in each folder\n# the closet tfidf is to 0, the more the word occurs in the text\ndf_3 = pd.DataFrame(tfidf_all_3)\ndf_3.columns = selected_feature_all[2]\nprint(df_3)\n\n#17\n\n# save the list of feature vectors in a csv file names feature_extraction.csv\ndf = pd.DataFrame(selected_feature_all)\ndf = df.T\ndf.columns = folder_names\nprint(df)\n\ndf.to_csv (r'feature_extraction.csv', index = False, header=True)\n\n\n#18\n\n# save the features with their corresponding frequencies to csv\n\nvertical_stack = pd.concat([df_1, df_2, df_3], axis=1)\nvertical_stack.to_csv(r'feature_extration_with_frequency.csv', index = False, header = True)\n\n\n\n\n\n\n","sub_path":"Phase 2.py","file_name":"Phase 2.py","file_ext":"py","file_size_in_byte":7799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61174683","text":"from flask import Flask, render_template, url_for, request, redirect, flash\nfrom caption import *\nfrom werkzeug import secure_filename\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\napp = Flask(__name__)\nUPLOAD_FOLDER = 'D:\\\\uploads'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/image', methods = ['POST'])\ndef upload_image():\n\tif request.method == 'POST':\n\t\timg = request.files['image']\n\t\tprint(img)\n\t\tprint(img.filename)\n\t\timg.save(\"static/\"+img.filename)\n\t\tcaption = caption_this_image(\"static/\"+img.filename)\n\t\tresult_dic = {\n\t\t\t'image' : \"static/\" + img.filename,\n\t\t\t'description' : caption\n\t\t}\n\treturn render_template('index.html', image_results = result_dic)\n\n@app.route('/video', methods = ['POST'])\ndef upload_video():\n\tif request.method == 'POST':\n\t\tkeyword = request.form['keyword']\n\t\tfile = request.files['video']\n\t\tfilename = secure_filename(file.filename)\n\t\tpath = os.path.join(app.config['UPLOAD_FOLDER'], filename).replace(\"\\\\\", \"/\")\n\t\tfile.save(path)\n\t\tvideo_id = caption_this_video(path, 0.6)\n\t\ttime = get_timeline(keyword, video_id)\n\t\ttimeline = []\n\t\tfor e in time:\n\t\t\tif len(e) > 1:\n\t\t\t\ti = [0,1]\n\t\t\t\ti[0] = e[0]\n\t\t\t\ti[1] = e[-1]\n\t\t\t\ttimeline.append(i)\n\t\tlink_subclips = process_video(path, timeline, keyword)\n\t\tresult_dic = {\n\t\t\t'timeline' : timeline,\n\t\t\t'link_subclips' : link_subclips\n\t\t}\n\t\t\n\treturn render_template('index.html', video_results = result_dic)\n\n\n\n\nif __name__ == '__main__':\n\tapp.run(debug = True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291761352","text":"class Solution:\n def minimumDeleteSum(self, s1, s2):\n \"\"\"\n :type s1: str\n :type s2: str\n :rtype: int\n \"\"\"\n l1 = len(s1)\n l2 = len(s2)\n dp = [[0]*(l2+1)]*(l1+1)\n dp = [[0 for _ in range(l2 + 1)] for _ in range(l1 + 1)]\n for i in range(1, l1+1):\n dp[i][0] = ord(s1[i-1])+dp[i-1][0]\n for j in range(1, l2+1):\n dp[0][j] = ord(s2[j-1])+dp[0][j-1]\n for i in range(1, l1+1):\n for j in range(1, l2+1):\n if s1[i-1] == s2[j-1]:\n dp[i][j] = dp[i-1][j-1]\n else:\n dp[i][j] = min(dp[i][j - 1] + ord(s2[j - 1]), dp[i - 1][j] + ord(s1[i - 1]))\n return dp[l1][l2]\n\n# sl=Solution()\n# print(sl.minimumDeleteSum('sea','eat'))\n","sub_path":"712. Minimum ASCII Delete Sum for Two Strings.py","file_name":"712. Minimum ASCII Delete Sum for Two Strings.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469362461","text":"\"\"\"\nThis is a PoC of frequent pattern mining for MP3, so no testing and exception handling will be\nimplemented. Tested on python 3.6.\n\"\"\"\n\nimport subprocess\n\nfrom core.step2 import (\n import_data, generate_vocab, generate_title\n)\nfrom core.step3 import separate_by_terms\nfrom core.step4 import apriori\nfrom core.step5 import generate_max_or_closed_pattern_output\nfrom core.step6 import rank_by_purity\nfrom core.step7 import rank_by_complete_coverage_purity_and_phrase\n\nlda_location = r'/Users/sthk/lda_binary'\n\n\ndef index_to_word(pattern_to_word_dict, data):\n\n tmp = []\n\n for line in data:\n support, *patterns = line.split()\n\n map_words = ' '.join(\n pattern_to_word_dict[int(pattern)]\n for pattern in patterns\n )\n\n tmp.append(' '.join([support, map_words]))\n\n output = '\\n'.join(tmp)\n\n return output\n\n\ndef convert_pattern_index_to_word(index_to_word_dict):\n\n for topic_index in range(5):\n\n # read base data to convert\n with open(f'combined_ranking/pattern-{str(topic_index)}.txt') as f:\n combined_data = f.readlines()\n\n with open(f'closed/closed-{str(topic_index)}.txt') as f:\n closed_data = f.readlines()\n\n with open(f'max/max-{str(topic_index)}.txt') as f:\n max_data = f.readlines()\n\n with open(f'purity/purity-{str(topic_index)}.txt') as f:\n purity_data = f.readlines()\n\n with open(f'patterns/pattern-{str(topic_index)}.txt') as f:\n patterns_data = f.readlines()\n\n # convert and write output\n output_str = index_to_word(index_to_word_dict, combined_data)\n with open(f'combined_ranking/pattern-{str(topic_index)}.txt.phrase',\n 'w') as f:\n f.write(output_str)\n\n output_str = index_to_word(index_to_word_dict, closed_data)\n with open(f'closed/closed-{str(topic_index)}.txt.phrase', 'w') as f:\n f.write(output_str)\n\n output_str = index_to_word(index_to_word_dict, max_data)\n with open(f'max/max-{str(topic_index)}.txt.phrase', 'w') as f:\n f.write(output_str)\n\n output_str = index_to_word(index_to_word_dict, purity_data)\n with open(f'purity/purity-{str(topic_index)}.txt.phrase', 'w') as f:\n f.write(output_str)\n\n output_str = index_to_word(index_to_word_dict, patterns_data)\n with open(f'patterns/pattern-{str(topic_index)}.txt.phrase', 'w') as f:\n f.write(output_str)\n\n\ndef main():\n # Step 2.1\n data_list, data_dict = import_data('paper.txt')\n generate_vocab(data_list)\n\n # Step 2.2\n title_map = generate_title(data_list, data_dict)\n\n # Step 3.1\n # Uncomment to call the lda binary\n # subprocess.call(\n # [lda_location, 'est', '0.001', '5', 'settings.txt', 'title.txt', 'random', 'result']\n # )\n #\n # Step 3.2\n separate_by_terms()\n\n # Step 4\n apriori(0.005) # relative minimum support ~ 0.5%\n\n # Step 5\n generate_max_or_closed_pattern_output(is_closed_mining=False)\n generate_max_or_closed_pattern_output(is_closed_mining=True)\n\n # Step 6\n rank_by_purity()\n\n # Step 7\n rank_by_complete_coverage_purity_and_phrase(0.5, 0.5)\n\n # Create readable output from index\n convert_pattern_index_to_word(title_map)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mp3-frequent-pattern-mining/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"224015768","text":"from tornado.httpclient import HTTPRequest, AsyncHTTPClient\nfrom mod.BaseHandler import BaseHandler\nfrom mod.databases.tables import User\nfrom mod.databases.tables import Article\nfrom mod.Auth.SessionHelper import SessionHelper\nimport tornado.web\nimport tornado.gen\nimport urllib\nimport hashlib\nclass BlogHandler(BaseHandler):\n def get(self):\n sessionhelper = SessionHelper(self,self.db)\n correct_user = sessionhelper.checkSession()\n blog_user_id = self.get_argument(\"id\")\n blog_user = self.db.query(User).filter(User.user_id == blog_user_id).first()\n if blog_user == None:\n self.render(\"homepage/404error.html\",\n correct_user=correct_user)\n else:\n articles = self.db.query(Article).filter(Article.user_id == blog_user_id).all()\n gravatar_url =GravatarHelper(blog_user.user_email,240).getUrl()\n self.render(\"blog.html\",\n correct_user=correct_user,\n articles=articles,\n blog_user=blog_user,\n gravatar_url=gravatar_url)\n def post(self):\n \tpass\n\nclass GravatarHelper(object):\n \"\"\"docstring for GravatarHelper\"\"\"\n def __init__(self, email,size):\n super(GravatarHelper, self).__init__()\n self.email = email\n self.size = size\n\n def getUrl(self):\n default = \"http://www.example.com/default.jpg\"\n size = self.size\n gravatar_url = \"http://secure.gravatar.com/avatar/\" + hashlib.md5(self.email.lower()).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d':\"retro\", 's':str(size)})\n return gravatar_url\n ","sub_path":"mod/BlogHandler/BlogHandler.py","file_name":"BlogHandler.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166523817","text":"import torch\nimport numpy as np\nimport time\nimport json\n\n# Source Files\nimport sys\nsys.path.append(\"..\")\nimport grnn\nimport exp_utils\nimport controller\nimport env.dlqr\nimport gcnn\n\n# Savedir\n# filename = 'exp1.data'\n\n# # Environment Parameters\n# N = 20\n# degree = 5 + 1\n# T = 50\n# p = 1\n# q = 1\n# h = 5\n# A_norm = 0.995\n# B_norm = 1\n# \n# # Training Parameters\n# num_epoch = 100\n# batch_size = 20\n# ensemble_size = 2\n# val_size = 50\n# grnn_hidden_dim = 5\n# device = 'cuda' if torch.cuda.is_available() else 'cpu'\n# \n# # Experiment Parameters\n# num_topologies = 10\n# num_x0s = 100\n# verbose = True\n\n# Training losses for different setups\ndef grnn_criterion(x_traj, u_traj, env, model):\n norms = torch.norm(model.S_()) + torch.norm(model.A) + torch.norm(model.B)\n return env.cost(x_traj, u_traj) + 2 * norms\n\ndef gcnn_criterion(x_traj, u_traj, env, model):\n return env.cost(x_traj, u_traj)\n\n\n##################### SCRIPT ##############################\ndef run(filename, N, degree, T, p, q, h, A_norm, B_norm, num_epoch, batch_size,\n ensemble_size, val_size, grnn_hidden_dim, num_topologies, num_x0s,\n verbose, device, grnn_criterion, gcnn_criterion):\n\n num_controllers = 6\n\n # Group parameters that are reused\n model_params = {\n 'N':N,\n 'T':T,\n 'p':p,\n 'q':p,\n 'h':grnn_hidden_dim\n }\n training_params = {\n 'T': T,\n 'device': device,\n 'num_epoch': num_epoch,\n 'batch_size': batch_size,\n 'ensemble_size': ensemble_size,\n 'val_size': val_size\n }\n\n # Create arrays to store results\n total_rel_costs = torch.zeros(num_controllers, device=device)\n rel_costs_table = torch.zeros((num_topologies, num_controllers), device=device)\n envs = []\n\n for counter in range(num_topologies):\n\n # Generate environment\n dlqrenv, G = env.dlqr.generate_lq_env(\n N, degree, device, A_norm=A_norm, B_norm=B_norm)\n\n # Controller 0: Zero control (i.e. autonomous system)\n controllers = [controller.ZeroController(N, q)]\n\n # Controller 1: grnn with untrainable S\n model_params['S_trainable'] = False\n grnn_fixed_S = exp_utils.generate_model(\n model_params, dlqrenv, use_given_support=True,\n S=dlqrenv.S.clone(), criterion=grnn_criterion,\n **training_params)\n controllers.append( grnn_fixed_S.get_controller(num_x0s) )\n\n # Controller 2: grnn with untrainable S\n model_params['S_trainable'] = True\n grnn_support_S = exp_utils.generate_model(\n model_params, dlqrenv, use_given_support=True,\n S=dlqrenv.S.clone(), criterion=grnn_criterion,\n **training_params)\n controllers.append( grnn_support_S.get_controller(num_x0s) )\n\n # Controller 3: gcnn as in [GS20]\n gcnn_model = exp_utils.generate_gcnn_model(\n S=dlqrenv.S.clone(), N=N, env=dlqrenv,\n criterion=gcnn_criterion, **training_params)\n controllers.append( gcnn.get_gcnn_controller(gcnn_model, N) )\n\n # Controller 4: grnn with dense S\n grnn_dense_S = exp_utils.generate_model(\n model_params, dlqrenv, use_given_support=False,\n S=None, criterion=grnn_criterion,\n **training_params)\n controllers.append( grnn_dense_S.get_controller(num_x0s) )\n\n # Test the performance of GRNN on this env\n rel_costs = exp_utils.estimate_controller_cost(\n dlqrenv, T, controllers, num_x0s=num_x0s)\n total_rel_costs += rel_costs\n\n rel_costs_table[counter] = rel_costs\n envs.append(dlqrenv)\n\n # Print progress\n if verbose:\n print('Iteration: {}'.format(counter+1))\n print(rel_costs.detach().cpu().numpy())\n print(total_rel_costs.data.detach().cpu().numpy() / (counter+1))\n\n # Print result\n print(total_rel_costs / num_topologies)\n\n with open(filename, 'w') as f:\n f.write(json.dumps(rel_costs_table.tolist()))\n#############################################################\n","sub_path":"experiments/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344596680","text":"import openpyxl\r\nimport requests\r\nimport bs4\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.utils import get_column_letter, column_index_from_string\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\nimport phonenumbers\r\nfrom phonenumbers import carrier, timezone, geocoder\r\n\r\n#******* AVOID BEING BLOCKED *******\r\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\r\n\r\n\r\n#******* WORKING WITH EXCEL *******\r\nchooseExcel_File = \"C:\\\\Users\\\\rosenberg\\\\Desktop\\\\withPython\\\\PracticeBS4\\\\p1\\\\src\\\\myPhonenumbers.xlsx\"\r\nchoose_SHEET_Of_Your_Excel_File = 'Hoja1'\r\n\r\nwb = load_workbook(chooseExcel_File, data_only=True)\r\nsh = wb[choose_SHEET_Of_Your_Excel_File]\r\n\r\nrowM = sh.max_row\r\n\r\n\r\nfor r in range(2, rowM+1):\r\n try:\r\n #READER OF EXCEL FILE\r\n letter_Of_Column_With_Name_Of_Companies = \"C\"\r\n number_Of_Row_With_Name_Of_Companies = r\r\n string_Of_number_Of_Row_With_Name_Of_Companies = str(r)\r\n concatenater_Column_With_Number = letter_Of_Column_With_Name_Of_Companies + string_Of_number_Of_Row_With_Name_Of_Companies\r\n companyName = sh[concatenater_Column_With_Number].value \r\n\r\n print(companyName)\r\n\r\n #CREATING URL FOR GOOGLE SEARCHING\r\n text = (\"address of {0} headquarters \".format(companyName))\r\n myCurrentURL = 'https://google.com/search?q=' + text\r\n\r\n #ACCESING TO URL's INFO\r\n response = requests.get(myCurrentURL, headers=headers)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n \r\n #Look for this tag\r\n tagSearched = soup.find_all('div', attrs={'class': 'cXedhc'})\r\n if tagSearched:\r\n print('tag exists')\r\n for tag in tagSearched:\r\n companyAddress = tag.text.strip()\r\n copyOfCompanyAddress = companyAddress.read()\r\n print(copyOfCompanyAddress)\r\n else:\r\n print(\"Does not\") \r\n\r\n # #Where to put that info\r\n # letter_Of_Column_To_Put_Addres = \"D\"\r\n # concatenater_Row_With_Column = letter_Of_Column_To_Put_Addres + string_Of_number_Of_Row_With_Name_Of_Companies\r\n # sh[concatenater_Row_With_Column] = companyAddress\r\n # wb.save(filename = chooseExcel_File)\r\n\r\n # for match in phonenumbers.PhoneNumberMatcher(copyOfCompanyAddress, \"GB\"):\r\n # print(match)\r\n \r\n\r\n \r\n except:\r\n print(\"Something went wrong\")","sub_path":"src/addressRating.py","file_name":"addressRating.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139273968","text":"#!/usr/bin/env python\nimport argparse\nfrom duplicate import handle, rename_images\nfrom pickle_utils import handle_pickle\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('directory', help=\"Enter directory to scan for duplicates\")\n parser.add_argument(\"-a\", help=\"Analyze directory and build look-up table\", action=\"store_true\")\n parser.add_argument(\"-r\", help=\"Rename images with a hash\", action=\"store_true\")\n parser.add_argument(\"-d\", help=\"Delete duplicates of files\", action=\"store_true\")\n parser.add_argument(\"-t\", help=\"Transfer duplicate files\", action=\"store_true\")\n args = parser.parse_args()\n directory = args.directory.rstrip('\\\\')\n\n if args.r:\n rename_images(directory)\n if args.a:\n handle_pickle(directory)\n if args.t and not args.d:\n handle(directory, transfer=True, delete=False)\n if args.d and not args.t:\n handle(directory, transfer=False, delete=True)\n","sub_path":"core/dedupe.py","file_name":"dedupe.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539865295","text":"#!/usr/bin/python\n#\n# Licensed to the Software Freedom Conservancy (SFC) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The SFC licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom selenium import selenium\nimport unittest\nimport time\n\nclass TestPrompts(unittest.TestCase):\n def setUp(self):\n self.selenium = selenium(\"localhost\", \\\n 4444, \"*firefoxproxy\", \"http://www.w3schools.com\")\n self.selenium.start()\n\n def test_alert(self):\n sel = self.selenium\n sel.open(\"/js/tryit.asp?filename=tryjs_alert\")\n sel.select_frame(\"view\")\n sel.click(\"css=input[value='Show alert box']\")\n self.assertEqual(sel.get_alert(), \"Hello! I am an alert box!\")\n\n def test_confirm_accept(self):\n sel = self.selenium\n sel.open(\"/js/tryit.asp?filename=tryjs_confirm\")\n sel.select_frame(\"view\")\n sel.choose_ok_on_next_confirmation()\n sel.click(\"css=input[value='Show a confirm box']\")\n self.assertEqual(sel.get_alert(), \"You pressed OK!\")\n\n def test_confirm_cancel(self):\n sel = self.selenium\n sel.open(\"/js/tryit.asp?filename=tryjs_confirm\")\n sel.select_frame(\"view\")\n sel.choose_ok_on_next_confirmation()\n sel.click(\"css=input[value='Show a confirm box']\")\n self.assertEqual(sel.get_alert(), \"You pressed OK!\")\n\n def test_prompt(self):\n sel = self.selenium\n sel.open(\"/js/tryit.asp?filename=tryjs_prompt\")\n sel.select_frame(\"view\")\n sel.answer_on_next_prompt('Flying Monkey')\n sel.click(\"css=input[value='Show prompt box']\")\n self.assertEqual(sel.get_html_source(), 'Hello Flying Monkey! How are you today?')\n\n def tearDown(self):\n self.selenium.stop()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"py/test/selenium/test_prompts.py","file_name":"test_prompts.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384433997","text":"\nimport os\nimport sys\nsys.path.append(os.getcwd())\n\nfrom lib.utilities import settings\nfrom lib.models.chatterbot_model import Language\n\nlang = Language(\"language\", \"ARCA\")\n\ndef test_model():\n while True:\n x = input(\"> \")\n if x == \"stop\":\n break\n data = {\"text\": x}\n ans = lang.chat(data)\n print(ans)\n\n return\n\nif __name__ == \"__main__\":\n test_model()\n","sub_path":"code/lib/utilities/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529135406","text":"import random\r\n\r\nlist=[0,0,0,0,0,0]\r\nfor i in range(600):\r\n number=random.randint(1, 6)\r\n for j in range(6):\r\n if number==j+1:\r\n list[j]=list[j]+1\r\n\r\nfor k in range(6):\r\n print(\"주사위가 %d인 경우는 %d번\"%(k+1,list[k]))\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"6장 연습문제6.py","file_name":"6장 연습문제6.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649567757","text":"# pec\nimport pandas as pd\ndf = pd.read_csv('literacy_birth_rate.csv')\ndf = df.dropna()\ndf['fertility'] = pd.to_numeric(df['fertility'])\ndf['population'] = pd.to_numeric(df['population'])\ndf['female_literacy'] = pd.to_numeric(df['female literacy'])\ndf = df.drop('female literacy', axis=1)\ngroup = df.groupby('Continent')\nfrom bokeh.plotting import ColumnDataSource\nlatin_america = ColumnDataSource(group.get_group('LAT'))\nafrica = ColumnDataSource(group.get_group('AF'))\n\nfrom bokeh.plotting import figure\nfrom bokeh.io import output_file, show\np = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')","sub_path":"Jupyter/14. Visualización interactiva de datos con Bokeh/Bokeh_11.py","file_name":"Bokeh_11.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421198712","text":"\nclass Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n for i in range(9):\n row, col, square = set(), set(), set()\n for j in range(9):\n if board[i][j] != \".\":\n if board[i][j] in row:\n return False\n else:\n row.add(board[i][j])\n if board[j][i] != \".\":\n if board[j][i] in col:\n return False\n else:\n col.add(board[j][i])\n ii = 3 * (i // 3) + j // 3\n jj = 3 * (i % 3) + j % 3\n if board[ii][jj] != \".\":\n if board[ii][jj] in square:\n return False\n else:\n square.add(board[ii][jj])\n return True\n\n def isValidSudokuV1(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n def _isValidRectangle(ilo, ihi, jlo, jhi):\n s = set()\n for i in range(ilo, ihi):\n for j in range(jlo, jhi):\n if board[i][j] == \".\":\n continue\n elif board[i][j] in s:\n return False\n else:\n s.add(board[i][j])\n return True\n for i in range(9):\n if not _isValidRectangle(i, i + 1, 0, 9):\n return False\n for j in range(9):\n if not _isValidRectangle(0, 9, j, j + 1):\n return False\n for i in range(3):\n for j in range(3):\n if not _isValidRectangle(i*3, (i+1)*3, j*3, (j+1)*3):\n return False\n return True\n\n\n\nif __name__ == '__main__':\n s = Solution()\n board = [\n [\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"],\n ]\n print(s.isValidSudoku(board))","sub_path":"036_valid_sudoku.py","file_name":"036_valid_sudoku.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638375280","text":"'''\nAuthors: Donnie Marino, Kostas Stamatiou\nContact: dmarino@digitalglobe.com\n\nUnit tests for the gbdxtools.Catalog class\n'''\n\nfrom gbdxtools import Interface\nfrom gbdxtools.catalog import Catalog\nimport vcr\nfrom auth_mock import get_mock_gbdx_session\n\n# How to use the mock_gbdx_session and vcr to create unit tests:\n# 1. Add a new test that is dependent upon actually hitting GBDX APIs.\n# 2. Decorate the test with @vcr appropriately\n# 3. replace \"dummytoken\" with a real gbdx token\n# 4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a \"cassette\".\n# 5. replace the real gbdx token with \"dummytoken\" again\n# 6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)\nmock_gbdx_session = get_mock_gbdx_session(token=\"dummytoken\")\ngbdx = Interface(gbdx_connection = mock_gbdx_session)\n\ndef test_init():\n c = Catalog(gbdx)\n assert isinstance(c, Catalog)\n\n@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_address_coords.yaml',filter_headers=['authorization'])\ndef test_catalog_get_address_coords():\n\tc = Catalog(gbdx)\n\tlat, lng = c.get_address_coords('Boulder, CO')\n\tassert lat == 40.0149856\n\tassert lng == -105.2705456\n\n@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_point.yaml',filter_headers=['authorization'])\ndef test_catalog_search_point():\n\tc = Catalog(gbdx)\n\tlat = 40.0149856\n\tlng = -105.2705456\n\tresults = c.search_point(lat,lng)\n\n\tassert results['stats']['totalRecords'] == 310\n\n@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_address.yaml',filter_headers=['authorization'])\ndef test_catalog_search_address():\n\tc = Catalog(gbdx)\n\tresults = c.search_address('Boulder, CO')\n\n\tassert results['stats']['totalRecords'] == 310\n\t\n\n","sub_path":"tests/unit/test_catalog.py","file_name":"test_catalog.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80169511","text":"import sys\n\ninputfilename = sys.argv[1]\noutputfilename = sys.argv[2]\n\nfile = open(inputfilename,'r')\noutput = open(outputfilename,'w')\n\nlines = file.readlines()\n\n#skip over whatever was put in the header stuff\ncounter = 0\nwhile lines[counter] != \"\\n\":\n counter += 1\n\n#get labels\ncounter2 = counter+1\nwhile lines[counter2] != \"\\n\":\n line = lines[counter2]\n if \" = \" in line:\n line = line.strip()\n ls = line.split(\" = \")\n label = ls[0]\n output.write(label+\",\")\n counter2 += 1\noutput.write(\"\\n\")\n\nfor i in range(counter+1,len(lines)):\n line = lines[i]\n if \" = \" in line:\n line = line.strip()\n ls = line.split(\" = \")\n val = ls[1]\n output.write(val+\",\")\n if line == \"\\n\":\n output.write(\"\\n\")\nfile.close()\noutput.close()\n \n","sub_path":"benchmarkSuite/blogOutputToCSV.py","file_name":"blogOutputToCSV.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4821599","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 10:34:28 2020\n\n@author: rubenandrebarreiro\n\"\"\"\n\n# Definition of the necessary Python Libraries\n\n# a) General Libraries:\n\n# Import NumPy Python's Library as np\nimport numpy as np\n\n# Import SciKit-Learn as skl\nimport sklearn as skl\n\n# Import Tree.DecisionTreeClassifier Sub-Module,\n# from SciKit-Learn Python's Library as decision_tree_classifier\nfrom sklearn.tree import DecisionTreeClassifier as decision_tree_classifier\n\n# Import PyPlot Sub-Module, from Matplotlib Python's Library as plt\nimport matplotlib.pyplot as plt\n\n# Import System Python's Library\nimport sys\n\n# Append the Path \"../\" to the System's Path\nsys.path.append('../')\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# The file of the Dataset\ndataset_file = \"../files/data/T6-data.txt\"\n\n# Load the Data of the Dataset with NumPy function loadtxt\ndataset_not_random = np.loadtxt(dataset_file, delimiter=\"\\t\") \n\n# Shuffle the Dataset, not randomized\ndataset_random = skl.utils.shuffle(dataset_not_random)\n\n# Select the Classes of the Dataset, randomized\nys_classes = dataset_random[:, -1]\n\n# Select the Features of the Dataset, randomized\nxs_features = dataset_random[:, 0:-1] \n\n# The size of the Dataset, randomized\ndataset_size = len(xs_features)\n\n# Compute the Means of the Dataset, randomized\ndataset_means = np.mean(xs_features, axis=0)\n\n# Compute the Standard Deviations of the Dataset, randomized\ndataset_stdevs = np.std(xs_features, axis=0)\n\n# Standardize the Dataset, randomized\nxs_features_std = ( ( xs_features - dataset_means ) / dataset_stdevs )\n\n# Update the Classes of the Dataset,\n# to have the value of 1 or -1\nys_classes = ( ( ys_classes * 2 ) - 1 )\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# The Decision Stumps of the Hypothesis for the Models, for Boosting\nhypothesis_decision_stumps = []\n\n# The Weights of the Hypothesis for the Models, for Boosting\nhypothesis_weights = []\n\n# The Weights for the Points, initialized with the same Weight of 1/N,\n# for all the Data Points, where N is the number of Points in the Dataset\npoint_weights = ( np.ones(xs_features_std.shape[0]) / float(xs_features_std.shape[0]) )\n\n\n# The maximum number of Hypothesis, for Boosting\nmax_hypothesis = 20\n\n# The Number of Prediction Errors, for each Hypohtesis Model/Cycle\nboosting_errors_predictions = np.zeros((20, 2))\n\n# Iterate the Loop for a number of a maximum of 20 Hypothesis, for Boosting\nfor ix in range(max_hypothesis):\n \n # Initialise the Decision Stump, from the Decision Tree Classifier,\n # with a Maximum Depth, equal to 1\n decision_tree_classifier_stump = decision_tree_classifier(max_depth=1)\n \n # Fit the Decision Tree, with the Features and Classes of the Dataset,\n # as also, with the Weights pre-computed previously\n decision_tree_classifier_stump.fit(xs_features_std, ys_classes, sample_weight = point_weights)\n \n # Predict the Classes, for the Features of the Dataset,\n # using the current Hypothesis\n pred = decision_tree_classifier_stump.predict(xs_features_std)\n \n \n # Compute the Errors of the Predictions, against the Real Classes,\n # y_m(x^n) != t^n\n errors_pred = (pred != ys_classes).astype(int)\n \n # Compute the Sum of the Errors Weighted \n error_sum_weighted = np.sum(errors_pred * point_weights)\n \n \n # Compute the Alpha value, alpha_m = ln( (1-e_m) / e_m )\n alpha = np.log( (1 - error_sum_weighted) / error_sum_weighted )\n \n \n # Update the Weigths of the Points, for the next loop cycle,\n # w[^n;_m+1] = w[^n;_m] * exp( alpha_m * I( y_m(x^n) != t^n ) )\n point_weights = ( point_weights * np.exp(alpha * errors_pred) )\n \n # Normalize the Weights, after computing new Weights of the Points,\n # dividing them by the sum of the Weights of the Points\n point_weights = ( point_weights / np.sum(point_weights) )\n \n \n # Append the Decision Stump, for the current Hypothesis,\n # to the Decision Stumps of the Hypothesis for the Models, for Boosting\n hypothesis_decision_stumps.append(decision_tree_classifier_stump)\n \n # Append the Alpha value,\n # to the Weights of the Hypothesis for the Models, for Boosting\n hypothesis_weights.append(alpha)\n \n \n preds = np.zeros(dataset_size)\n weighted_preds = np.zeros(dataset_size)\n \n for ix_final_prediction in range(len(hypothesis_decision_stumps)):\n preds = hypothesis_decision_stumps[ix_final_prediction].predict(xs_features_std)\n weighted_preds = ( weighted_preds + ( preds * hypothesis_weights[ix_final_prediction] ) )\n \n weighted_preds[weighted_preds < 0] = -1\n weighted_preds[weighted_preds >= 0] = 1\n \n boosting_errors_predictions[ix, 0] = ( ix + 1 )\n boosting_errors_predictions[ix, 1] = np.sum((weighted_preds != ys_classes).astype(int))\n \n\n# The Function to plot the Prediction Errors, for AdaBoost\ndef plot_prediction_errors(boosting_pred_errors):\n \n # Initialise the Plot\n plt.figure(figsize=(10, 8), frameon=True)\n\n # Set the line representing the continuous values,\n # for the Functions of the Single/Ensemble Bagging Training and Validation Errors\n plt.plot(boosting_pred_errors[:,0], boosting_pred_errors[:,1], '-', color=\"blue\")\n \n # Set the axis for the Plot\n plt.axis([0, 21, 0.0, max(boosting_pred_errors[:,1])])\n \n # Set the laber for the X axis of the Plot\n plt.xlabel(\"Number of Hypothesis/Cycles\")\n \n # Set the laber for the Y axis of the Plot\n plt.ylabel(\"Prediction Errors, for AdaBoost Ensemble Method\")\n \n # Set the Title of the Plot\n plt.title('Prediction Errors, for AdaBoost Ensemble Method,\\nwith 20 Hypothesis/Cycles')\n \n # Save the Plot, as a figure/image\n plt.savefig('imgs/adaboost-prediction-errors.png', dpi=600)\n \n # Show the Plot\n plt.show()\n \n # Close the Plot\n plt.close()\n \nprint(\"\\nPerforming the Adaboost (Adaptative Boost Process)...\\n\\n\")\nprint(\"Number of Errors of the Predictions for the Adaboost Ensemble Method,\\nfor all the Hypothesis/Cycles:\")\nprint(boosting_errors_predictions)\nprint(\"\\nConclusion:\\n- Even with a Bad Classifier, using the Adaboost,\\n we ensure a minimization of the number of\\n the Errors of the Predictions...\\n\\n\")\n\nplot_prediction_errors(boosting_errors_predictions)\n","sub_path":"tutorials/tutorial-6/6.2-adaboost/6.2-adaboost.py","file_name":"6.2-adaboost.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638935830","text":"def get_alphabet_dictionary():\n alphabet = list('abcdefghijklmnopqrstuvwxyz')\n numbers = range(1,27)\n return dict(zip(alphabet, numbers))\n\ndef value_from_letters(value_dict, word):\n value = 0\n for letter in word:\n if letter in value_dict.keys():\n value += value_dict.get(letter.lower())\n return value\n\ndef import_list_from_txt(filename):\n source_data = open(filename,'r',encoding='utf8')\n line_list = source_data.readlines()\n trimmed_list = [line.strip('\\n').split(' ')[0] for line in line_list]\n source_data.close()\n return trimmed_list\n\ndef get_values_for_list(source_list):\n value_dict = {}\n alphabet_dictionary = get_alphabet_dictionary()\n for item in source_list:\n value_dict.update({item : value_from_letters(alphabet_dictionary, item)})\n return value_dict\n\ndef highest_value():\n roster = import_list_from_txt('roster.txt')\n roster_with_values = get_values_for_list(roster)\n return max(roster_with_values, key=roster_with_values.get)\n\ndef words_with_same_value(word):\n word_list = import_list_from_txt('positive-words.txt')\n word_dict_with_values = get_values_for_list(word_list)\n alphabet_dictionary = get_alphabet_dictionary()\n word_value = value_from_letters(alphabet_dictionary, word)\n words = [word for word, value in word_dict_with_values.items() if value == word_value]\n return words if len(words) > 0 else None\n\ndef main():\n\n print(\"The most valuable person in the class is \" + str(highest_value()))\n print(\"These words have the same value as the word Aaron: \" + str(words_with_same_value('Aaron')))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"exam_p1.py","file_name":"exam_p1.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93972431","text":"from django.conf import settings\nimport requests\nfrom django.core.cache import cache\n\n\ndef solve(a=1, b=2, c=1):\n key = '{}_a={}&b={}&c={}'.format(settings.EQUATION_PREFIX, a, b, c)\n if settings.CACHE_ACTIVE:\n value = cache.get(key)\n if value:\n return value\n url = settings.PI_URL + '/calculator/solve?a={}&b={}&c={}'.format(a, b, c)\n response = requests.get(url=url)\n solutions = response.json()\n cache.set(key, solutions, settings.EQUATION_TTL)\n return solutions\n\n\ndef clear_cache():\n cache.clear()\n","sub_path":"front/pi.py","file_name":"pi.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91333966","text":"from matplotlib import pyplot as plt\r\n\r\nmonths=['june','july','aug','sep','oct','nov','dec']\r\nmumbai=[82.5,83.06,83.61,85.6,90.75,85.24,84.06]\r\nplt.plot(months,mumbai,label='Mumbai',linewidth=3,marker='o',markerfacecolor='blue',markersize=12)\r\nplt.title('Mumbai Patrol Prices')\r\nplt.xlabel('months')\r\nplt.ylabel('Prices')\r\nplt.legend()\r\nplt.grid()\r\nplt.savefig('im1.png')\r\nplt.savefig('im1.pdf')\r\nplt.show()","sub_path":"irshad dir/programming/python_self/dataScience/grap1.py","file_name":"grap1.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"256712171","text":"class LinearSearch():\n def search(self, haystack, needle):\n length = len(haystack)\n # go through all values\n for i in range(length-1):\n # check if current value is search value\n if haystack[i] == needle:\n return i\n return -1 \n\nls = LinearSearch()\nhaystack = [2, 3, 5, 7, 11, 13, 17, 19]\n\nprint( ls.search(haystack, 7) )\n\n","sub_path":"Algorithms/Searching Algorithms/Linear Search/Python/LinearSearch.py","file_name":"LinearSearch.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25036501","text":"#!/usr/bin/python3\n\nfor i in range(10):\n for j in range(i, 10):\n if j > i:\n print(\"{:d}{:d}\".format(i, j), end='')\n if i * 10 + j == 89:\n print('')\n else:\n print(', ', end='')\n","sub_path":"0x01-python-if_else_loops_functions/6-print_comb3.py","file_name":"6-print_comb3.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109205622","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@__Create Time__ = 18-2-2 下午10:10\n@__Description__ = \" \"\n\"\"\"\n\nfrom django import forms\nfrom ..models.group import AssetGroup\n\nclass CreateAssetGroupForm(forms.ModelForm):\n class Meta:\n model = AssetGroup\n fields = ['name','comment']\n widgets = {\n 'name': forms.TextInput(\n attrs={\n 'class':'form-control',\n 'placeholder':'资产组名称'\n }\n ),\n 'comment': forms.Textarea(\n attrs={\n 'class':'form-control',\n 'placeholder':'备注'\n }\n )\n }\n\nclass EditAssetGroupForm(CreateAssetGroupForm):\n pass\n","sub_path":"asset/forms/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96377013","text":"# ----------\n# User Instructions:\n#\n# Create a function compute_value which returns\n# a grid of values. The value of a cell is the minimum\n# number of moves required to get from the cell to the goal.\n#\n# If a cell is a wall or it is impossible to reach the goal from a cell,\n# assign that cell a value of 99.\n# ----------\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0]]\ngoal = [len(grid) - 1, len(grid[0]) - 1]\ncost = 1 # the cost associated with moving from a cell to an adjacent one\n\ndelta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\n\ndef compute_value(grid, goal, cost):\n\n valueMatrix = [[99 for col in range(len(grid[0]))] for row in range(len(grid))]\n isExpandable = True\n\n while isExpandable:\n isExpandable = False\n for x in range(len(grid)):\n for y in range(len(grid[0])):\n if goal[0] == x and goal[1] == y:\n if valueMatrix[x][y] > 0:\n valueMatrix[x][y] = 0\n isExpandable = True\n elif grid[x][y] == 0:\n for k in range(len(delta)):\n currentX = x + delta[k][0]\n currentY = y + delta[k][1]\n if (0 <= currentX < len(grid) and 0 <= currentY < len(grid[0])\n and grid[currentX][currentY] == 0):\n currentV = valueMatrix[currentX][currentY] + cost\n if currentV < valueMatrix[x][y]:\n isExpandable = True\n valueMatrix[x][y] = currentV\n return (valueMatrix)\n\n\n\nvalueMatrix = compute_value(grid, goal, cost)\n\nfor i in range(len(valueMatrix)):\n print(valueMatrix[i])","sub_path":"Search/Value Prog.py","file_name":"Value Prog.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"131302141","text":"import shlex\nimport subprocess\n\nfrom pkgcore.ebuild.eapi import EAPI\nfrom pkgcore.ebuild.eclass import EclassDoc\nfrom snakeoil.contexts import patch\nfrom snakeoil.strings import pluralism\n\nfrom .. import base, results, sources\nfrom ..eclass import EclassAddon\nfrom . import Check, EclassCacheCheck\n\n\nclass DeprecatedEclass(results.VersionResult, results.Warning):\n \"\"\"Package uses an eclass that is deprecated/abandoned.\"\"\"\n\n def __init__(self, eclass, replacement, **kwargs):\n super().__init__(**kwargs)\n self.eclass = eclass\n self.replacement = replacement\n\n @property\n def desc(self):\n if self.replacement is not None:\n replacement = f'migrate to {self.replacement}'\n else:\n replacement = 'no replacement'\n return f'uses deprecated eclass: {self.eclass} ({replacement})'\n\n\nclass DuplicateEclassInherits(results.VersionResult, results.Warning):\n \"\"\"An ebuild directly inherits the same eclass multiple times.\n\n Note that this will flag ebuilds that conditionalize global metadata by\n package version (or some other fashion) while inheriting the same eclass\n under both branches, e.g. conditional live ebuilds. In this case, shared\n eclasses should be loaded in a separate, unconditional inherit call.\n \"\"\"\n\n def __init__(self, eclasses, **kwargs):\n super().__init__(**kwargs)\n self.eclasses = tuple(eclasses)\n\n @property\n def desc(self):\n eclasses = ', '.join(self.eclasses)\n es = pluralism(self.eclasses, plural='es')\n return f'duplicate inherits for eclass{es}: {eclasses}'\n\n\nclass EclassUsageCheck(Check):\n \"\"\"Scan packages for various eclass-related issues.\"\"\"\n\n known_results = frozenset([DeprecatedEclass, DuplicateEclassInherits])\n required_addons = (EclassAddon,)\n\n def __init__(self, *args, eclass_addon):\n super().__init__(*args)\n self.deprecated_eclasses = eclass_addon.deprecated\n\n def feed(self, pkg):\n duplicates = set()\n inherited = set()\n\n for eclass in pkg.inherit:\n if eclass not in inherited:\n inherited.add(eclass)\n else:\n duplicates.add(eclass)\n\n if duplicates:\n yield DuplicateEclassInherits(sorted(duplicates), pkg=pkg)\n\n for eclass in inherited.intersection(self.deprecated_eclasses):\n replacement = self.deprecated_eclasses[eclass]\n yield DeprecatedEclass(eclass, replacement, pkg=pkg)\n\n\nclass EclassBashSyntaxError(results.EclassResult, results.Error):\n \"\"\"Bash syntax error in the related eclass.\"\"\"\n\n def __init__(self, lineno, error, **kwargs):\n super().__init__(**kwargs)\n self.lineno = lineno\n self.error = error\n\n @property\n def desc(self):\n return f'{self.eclass}: bash syntax error, line {self.lineno}: {self.error}'\n\n\nclass EclassDocError(results.EclassResult, results.Warning):\n \"\"\"Error when parsing docs for the related eclass.\n\n Eclass docs are parsed as specified by the devmanual [#]_.\n\n .. [#] https://devmanual.gentoo.org/eclass-writing/#documenting-eclasses\n \"\"\"\n\n def __init__(self, error, **kwargs):\n super().__init__(**kwargs)\n self.error = error\n\n @property\n def desc(self):\n return f'{self.eclass}: failed parsing eclass docs: {self.error}'\n\n\nclass EclassDocMissingFunc(results.EclassResult, results.Warning):\n \"\"\"Undocumented function(s) in the related eclass.\"\"\"\n\n def __init__(self, functions, **kwargs):\n super().__init__(**kwargs)\n self.functions = tuple(functions)\n\n @property\n def desc(self):\n s = pluralism(self.functions)\n funcs = ', '.join(self.functions)\n return f'{self.eclass}: undocumented function{s}: {funcs}'\n\n\nclass EclassDocMissingVar(results.EclassResult, results.Warning):\n \"\"\"Undocumented variable(s) in the related eclass.\n\n All exported variables in an eclass should be documented using eclass doc\n tags. Temporary variables should be unset after use so they aren't\n exported.\n \"\"\"\n\n def __init__(self, variables, **kwargs):\n super().__init__(**kwargs)\n self.variables = tuple(variables)\n\n @property\n def desc(self):\n s = pluralism(self.variables)\n variables = ', '.join(self.variables)\n return f'{self.eclass}: undocumented variable{s}: {variables}'\n\n\nclass EclassCheck(EclassCacheCheck):\n \"\"\"Scan eclasses for various issues.\"\"\"\n\n scope = base.eclass_scope\n _source = sources.EclassRepoSource\n known_results = frozenset([\n EclassBashSyntaxError, EclassDocError, EclassDocMissingFunc, EclassDocMissingVar])\n\n def __init__(self, *args):\n super().__init__(*args)\n latest_eapi = EAPI.known_eapis[sorted(EAPI.known_eapis)[-1]]\n self.known_phases = set(latest_eapi.phases_rev)\n self.eclass_keys = latest_eapi.eclass_keys\n\n def feed(self, eclass):\n # check for eclass bash syntax errors\n p = subprocess.run(\n ['bash', '-n', shlex.quote(eclass.path)],\n stderr=subprocess.PIPE, stdout=subprocess.DEVNULL, encoding='utf8')\n if p.returncode != 0 and p.stderr:\n lineno = 0\n error = []\n for line in p.stderr.splitlines():\n path, line, msg = line.split(': ', 2)\n lineno = line[5:]\n error.append(msg.strip('\\n'))\n error = ': '.join(error)\n yield EclassBashSyntaxError(lineno, error, eclass=eclass)\n\n doc_errors = []\n parsing_error = lambda exc: doc_errors.append(EclassDocError(str(exc), eclass=eclass))\n with patch('pkgcore.ebuild.eclass._parsing_error', parsing_error):\n eclass_obj = EclassDoc(eclass.path, sourced=True)\n yield from doc_errors\n\n phase_funcs = {f'{eclass}_{phase}' for phase in self.known_phases}\n # TODO: ignore overridden funcs from other eclasses?\n # ignore phase funcs\n funcs_missing_docs = eclass_obj.exported_functions - phase_funcs - eclass_obj.functions\n if funcs_missing_docs:\n missing = tuple(sorted(funcs_missing_docs))\n yield EclassDocMissingFunc(missing, eclass=eclass)\n # TODO: ignore overridden vars from other eclasses?\n # ignore exported metadata variables, e.g. SRC_URI\n vars_missing_docs = (\n eclass_obj.exported_variables - eclass_obj.variables - self.eclass_keys)\n if vars_missing_docs:\n missing = tuple(sorted(vars_missing_docs))\n yield EclassDocMissingVar(missing, eclass=eclass)\n","sub_path":"src/pkgcheck/checks/eclass.py","file_name":"eclass.py","file_ext":"py","file_size_in_byte":6611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88877367","text":"#!/usr/bin/env python3\n\nimport argparse\nimport global_settings\nimport config\nimport core\n\n\nclass ConfigAction(argparse.Action):\n \"\"\"\n This class is called when a config option is passed to via command line.\n\n It sets the global_settings config_file option to whatever was passed in the argument\n \"\"\"\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n \"\"\"\n Boilerplate for an argparse Action\n\n :param option_strings: A list of command-line option strings which\n should be associated with this action.\n :param dest: The name of the attribute to hold the created object(s)\n :param nargs: The number of command-line arguments that should be\n consumed. See argparse docs for details\n :param kwargs: All other options. See argparse docs for details\n \"\"\"\n if nargs is not None:\n raise ValueError(\"nargs not allowed\")\n super(ConfigAction, self).__init__(option_strings, dest, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n \"\"\"\n Gets executed when the argument is included from the command line\n\n :param parser: Not used.\n :param namespace: Not used.\n :param values: The value passed via the command line. The global config_file variable will be set to this.\n :param option_string: not used\n :return: None\n \"\"\"\n global_settings.config_file = values\n\n\n# Set up an argument parser. We are using defaults to stay consistent with other software.\n# description gets added to the usage statements\nargument_parser = argparse.ArgumentParser(description='This program parses sequencing runs and uploads them to IRIDA.')\n# Add the version argument\nargument_parser.add_argument('-v', '--version',\n action='version', version='IRIDA Uploader {}'.format(global_settings.UPLOADER_VERSION))\n# Our main argument. It is required or else an error will be thrown when the program is run\nargument_parser.add_argument('directory',\n help='Location of sequencing run to upload')\n# Optional argument, for using an alternative config file.\nargument_parser.add_argument('-c', '--config',\n action=ConfigAction,\n help='Path to an alternative configuration file. '\n 'This overrides the default config file in the config directory')\n# Optional argument, Force uploading a run even if a non new status file exists\nargument_parser.add_argument('-f', '--force',\n action='store_true', # This line makes it not parse a variable\n help='Uploader will ignore the status file, '\n 'and try to upload even when a run is in non new status.')\ndef main():\n # Parse the arguments passed from the command line and start the upload\n args = argument_parser.parse_args()\n upload(args.directory, args.force)\n\n\ndef upload(run_directory, force_upload):\n \"\"\"\n start upload on a single run directory\n :param run_directory:\n :param force_upload:\n :return:\n \"\"\"\n config.setup()\n core.cli_entry.validate_and_upload_single_entry(run_directory, force_upload)\n\n\n# This is called when the program is run for the first time\nif __name__ == \"__main__\":\n main()\n","sub_path":"upload_run.py","file_name":"upload_run.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"21131701","text":"#Implementation of the branch and bound median search algorithm\n#Refer to Jones & Pevzner \"An Introduction to Bioinformatics Algorithms\", Chapter 4\n\n#Introducing helper subroutine nucleotides() for converting (s1, s2, ..., si) to respective nucleotide string\n# s : list of numbers representing different nucleotides\n# i : number of those first elements of the list s that we want to convert into nucleotides letters\nnucleotides_dict = {1: 'A', 2: 'T', 3: 'G', 4: 'C'}\ndef nucleotides( s, i ):\n return ''.join( [ nucleotides_dict[ s[ n ] ] for n in range( 0 , i ) ] )\n\n#Hamming distance between two strings is the number of differing positions\n# s, t : two strings to compare\ndef HammingDistance(s, t):\n countDiffs = 0\n for i in range( 0, len(s) ):\n if s[i] != t[i]:\n countDiffs += 1\n return countDiffs\n\n#TotalDistance() computes the best possible match between a given l-mer v and\n#the list of DNA sequences, by going through each sequence and finding the\n#starting position with least possible HammingDistance between l-mer starting there and v\n# v : the given l-mer\n# DNA : given list of DNA sequences\ndef TotalDistance(v, DNA):\n l = len(v)\n totalDistance = 0\n for sequence in DNA:\n n = len(sequence)\n minDistance = float(\"inf\")\n #minPos = 0 #to remember position of the optimum match as well, if needed for output\n for i in range(0, n - l + 1):\n if HammingDistance(v, sequence[ i : i + l ]) < minDistance:\n minDistance = HammingDistance(v, sequence[ i : i + l ])\n #minPos = i\n #Uncomment for debug printing for each sequence:\n #print( ''.join( sequence[ minPos : minPos + l ] ), \"-> distance \", minDistance )\n totalDistance += minDistance\n return totalDistance\n\n#NextVertex() returns the next node, it is needed for traversing the tree\n# a : current node\n# i : current level in the tree\n# L : length of the l-mer\n# k : size of the alphabet\ndef NextVertex( a, i, L, k ):\n if i < L:\n a[ i ] = 1\n return a, i + 1\n else:\n for j in reversed(range(0, L)):\n if a[ j ] < k:\n a[ j ] += 1\n return a, j + 1\n return a, 0\n\n#Bypass skips a given node a, immediately returning the next sibling node, without going down to visit children nodes.\n#This is needed for pruning the search tree, in cases when we know for sure that a particular subtree will not yield the optimal node.\n# a : current node\n# i : current level in the tree\n# L : length of the l-mer\n# k : size of the alphabet\ndef Bypass( a, i, L, k ):\n for j in reversed(range(0, i)):\n if a[ j ] < k:\n a[ j ] += 1\n return a, j + 1\n return a, 0\n\n#Size of the alphabet is four nucleotides (A, T, G, C)\nk = 4\n\n#The core of the program is\n#The Branch and bound median search algorithm which traverses through all possible l-mers using the tree,\n#and looking for l-mer that gives the minimum TotalDistance,\n#i.e. the l-mer that matches the given DNA sequences in the best possible way\n# DNA : given list of DNA sequences where we are searching for motif\n# l : length of the motif\ndef BranchAndBoundMedianSearch(DNA, l):\n #We start at the root node\n s = [ 1 ] * l\n #Best distance yet unknown, thus assign infinity to it\n bestDistance = float(\"inf\")\n #i will denote current level in the tree, from 1 to l\n i = 1\n #i (level) will become 0 only when the traversal is complete and \n #we are back at the root node\n #thus, until i == 0, proceed traversing the tree\n while i > 0:\n #If we are not yet at the leaf node, but rather at some internal node,\n if i < l:\n #our l-mer so far, based on the tree route, is this:\n prefix = nucleotides( s, i )\n #and we check whether it makes sense to go down:\n #optimistic distance is the best distance that the down nodes can possibly yield,\n #in the ideal case if the remaining part of the l-mer will turn out to be \"perfect\" match\n optimisticDistance = TotalDistance( prefix, DNA )\n #And if optimal l-mer cannot possibly be below current node, \n if optimisticDistance > bestDistance:\n #we can safely skip the node and go straight to the right to the next sibling node\n ( s, i ) = Bypass( s, i, l, k )\n #Otherwise (if optimal l-mer may be down), we have to go down (or to the right, if we are finished with this subtree)\n else:\n ( s, i ) = NextVertex( s, i, l, k )\n #And if we are at the leaf node\n else:\n #we check if the l-mer represented by the current node s is better than the known optimum,\n #and if it is better, we overwrite the optimum and remember which l-mer was this\n word = nucleotides( s, i )\n if TotalDistance( word, DNA ) < bestDistance:\n bestDistance = TotalDistance( word, DNA )\n bestWord = word\n #and proceed to the next vertex\n ( s, i ) = NextVertex( s, i, l, k )\n return bestWord\n\ndef main():\n #Test input:\n l = 8 #length of the motif\n DNA = [ list(\"CGGGGCTACGCAACTGGGTCGTCACATTCCCCTTTCGATA\"),\n list(\"TTTGAGGGTGCCCAATAAATGCGGCTCCAAAGCGGACAAA\"),\n list(\"GGATGCAGCTGATGCCGTTTGACGACCTAAATCAACGGCC\"),\n list(\"AAGGATGGTTCTCCAGGAGCGCCTTTGCTGGTTCTACCTG\"),\n list(\"AATTTTCTAAAAAGATTATAATGTCGGTCCATGCTACTTC\"),\n list(\"CTGCTGTACAACTGAGATCATGCTGCATGCTACTTTCAAC\"),\n list(\"TACATGATCTTTTGGTGCTACTTGGATGAGGGAATGATGC\") ]\n \n #Go and find motif\n print( ''.join(BranchAndBoundMedianSearch( DNA, l ) ) )\n \nif __name__ == \"__main__\":\n # stuff only to run when not called via 'import' here\n main()\n\n","sub_path":"algo-bi/branch_and_bound_median_search.py","file_name":"branch_and_bound_median_search.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"122887438","text":"import cv2\nimport numpy as np\nimport time\nimport xlwt\n\nbook = xlwt.Workbook()\nsheet = book.add_sheet(u'time1')\n\ncamera = cv2.VideoCapture(0)\ngray = None\nfirstframe = None\ni = 0\npixel_width = 400\npixel_height = 240\nthreshold = 50 # 差帧后 用于检测动态物体的阈值 ,阈值越大 检测效果越小,即更不易检测到动态物体\n\ncapture = cv2.VideoCapture(\"video/VIRAT400x240.mp4\")\n\nif capture.isOpened():\n while True:\n ret, frame = camera.read()#如果要读取实时视频 则将此处的capture 改成 camera即可\n if not ret:\n break\n\n start = time.time()\n\n firstframe = gray #在调用摄像头的实时视频数据时 采用每一帧与前一帧作差,采用视频数据集的时候 用每一帧与第一帧作差\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n if firstframe is None:\n firstframe = gray\n continue\n\n frameDelta = cv2.absdiff(firstframe, gray)#两幅图的差的绝对值输出到另一幅图上面来\n thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)[1]\n #给两幅图的差异设定阈值\n thresh = cv2.dilate(thresh, None, iterations=2)\n #dilate函数可以对输入图像用特定结构元素进行膨胀操作,该结构元素确定膨胀操作过程中的邻域的形状,各点像素值将被替换为对应邻域上的最大值\n # cnts= cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n x, y, w, h = cv2.boundingRect(thresh)\n frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n end = time.time()\n area_time = end - start\n\n ratio = w * h / (pixel_height * pixel_width ) * 100\n print('时间为'+ str(area_time)+'所占比例为'+ str(ratio)+'%')\n\n if ratio and area_time:\n\n sheet.write(i, 0, area_time)\n sheet.write(i, 1, ratio/100)\n i += 1\n\n cv2.imshow(\"frame\", frame)\n cv2.imshow(\"Thresh\", thresh)\n cv2.imshow(\"frame2\", frameDelta)\n\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord(\"q\"):\n break\n\n\nbook.save('area_time.xls')\n\ncapture.release()\ncv2.destroyAllWindows()","sub_path":"area_Extraction.py","file_name":"area_Extraction.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12019686","text":"import random\r\nimport math\r\n\r\n\r\n# Расчет дистанции между двумя точками\r\ndef distance(point1, point2):\r\n x1, y1 = point1[0], point1[1]\r\n x2, y2 = point2[0], point2[1]\r\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\r\n\r\n\r\n# Функция для расчета угла между двумя точками\r\ndef get_angle(point1, point2):\r\n cos = round((point1[0] * point2[0] + point1[1] * point2[1]) / (\r\n (point1[0] ** 2 + point1[1] ** 2) ** 0.5 * (point2[0] ** 2 + point2[1] ** 2) ** 0.5), 9)\r\n csa = math.acos(cos)\r\n return csa * 180 / math.pi\r\n\r\n\r\ndef get_random_list_point():\r\n checklist = list()\r\n # Генерация точек со случайными координатами\r\n for i in range(n):\r\n checklist.append((random.randint(-100, 100), random.randint(-100, 100)))\r\n return checklist\r\n\r\n\r\ndef get_minimum_distance(checklist, start):\r\n if (0, 0) in checklist:\r\n return 0.0\r\n else:\r\n # Максимально возможное расстояние при рандоме точек от -100 до 100\r\n minimum = (100 ** 2 + 100 * 2) ** 0.5\r\n for i in checklist:\r\n if distance(i, start) < minimum:\r\n minimum = distance(i, start)\r\n return minimum\r\n\r\n\r\ndef get_maximum_distance(checklist, start):\r\n # Минимально возможное расстояние\r\n maximum = 0.0\r\n for i in checklist:\r\n if distance(i, start) > maximum:\r\n maximum = distance(i, start)\r\n return maximum\r\n\r\n\r\ndef get_mean_distance(checklist, start):\r\n summa = 0\r\n for i in checklist:\r\n summa += distance(i, start)\r\n return summa / len(checklist)\r\n\r\n\r\n# Выборка точек для 1 периода\r\ndef get_first_period(checklist):\r\n period = list()\r\n for i in checklist:\r\n if i[0] >= 0 and i[1] > 0:\r\n period.append(i)\r\n return period\r\n\r\n\r\n# Выборка точек для 2 периода\r\ndef get_second_period(checklist):\r\n period = list()\r\n for i in checklist:\r\n if i[0] < 0 and i[1] >= 0:\r\n period.append(i)\r\n return period\r\n\r\n\r\n# Выборка точек для 3 периода\r\ndef get_third_period(checklist):\r\n period = list()\r\n for i in checklist:\r\n if i[0] <= 0 and i[1] < 0:\r\n period.append(i)\r\n return period\r\n\r\n\r\n# Выборка точек для 4 периода\r\ndef get_fourth_period(checklist):\r\n period = list()\r\n for i in checklist:\r\n if i[0] > 0 and i[1] <= 0:\r\n period.append(i)\r\n return period\r\n\r\n\r\n# Сортировка в зависимости от того, есть ли точка (0,0) в списке\r\ndef sort_by_angle(checklist):\r\n # Списки для каждого периода\r\n period1 = get_first_period(checklist)\r\n period2 = get_second_period(checklist)\r\n period3 = get_third_period(checklist)\r\n period4 = get_fourth_period(checklist)\r\n new_checklist = list()\r\n if (0, 0) not in checklist and period1 != list():\r\n period1.sort(reverse=True, key=lambda point: (point[0], point[1]))\r\n new_checklist.append(period1.pop(-1))\r\n elif (0, 0) in checklist:\r\n while (0, 0) in checklist:\r\n new_checklist.append(checklist.pop(checklist.index((0, 0))))\r\n period1.sort(key=lambda point: get_angle((5, 0), point))\r\n period2.sort(key=lambda point: get_angle((0, 5), point))\r\n period3.sort(key=lambda point: get_angle((-5, 0), point))\r\n period4.sort(key=lambda point: get_angle((0, -5), point))\r\n new_checklist += period2 + period3 + period4 + period1\r\n return new_checklist\r\n\r\n\r\n# Ввод количества точек\r\nn = int(input())\r\nif n > 0:\r\n checklist = get_random_list_point()\r\n print(f\"Вывод списка в первоначальном состоянии {checklist}\")\r\n\r\n # Расчет минимального, максимального и среднего расстояний от точек до центра\r\n start = (0, 0)\r\n minimum = get_minimum_distance(checklist, start)\r\n maximum = get_maximum_distance(checklist, start)\r\n mean = get_mean_distance(checklist, start)\r\n print(\"Максимальное расстояние от центра до точки->\", maximum, \"\\n\", \"Минимальное расстояние от центра до точки->\",\r\n minimum, \"\\n\", \"Среднее расстояниеот центра до точек->\",\r\n mean, sep='')\r\n\r\n checklist = sort_by_angle(checklist)\r\n print(f\"Вывод отсортированного списка в порядке обхода против часовой срелки {checklist}\")\r\nelse:\r\n print(\"Неккоректный ввод\")\r\n","sub_path":"test_tsk1.py","file_name":"test_tsk1.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"544550597","text":"# -*- coding: UTF-8 -*-\n\nimport time\nimport requests\nimport MySQLdb\nfrom bs4 import BeautifulSoup\nimport jieba\nimport jieba.analyse\n\ndef remove_values_from_list(the_list, val):\n return [value for value in the_list if value != val]\n\ndef get_web_page(url): #原始地址\n time.sleep(0.5) # 每次爬取前暫停 0.5 秒\n resp = requests.get(\n url=url,\n cookies={'over18': '1'}\n )\n return resp.text\n\njieba.set_dictionary('dict.txt.big')\n\nconn = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"\", db=\"python\",charset='utf8')#連結資料庫\ncur = conn.cursor()\ncur.execute(\"SELECT search_href,id,search_title FROM mobile01 WHERE title_analyst='-1'\")\nresults = cur.fetchall()\n\nfor record in results: \n db_url = record[0]\n mobile01_id = record[1]\n title=record[2]\n title_list=jieba.lcut(title,cut_all=False)\n\n res=requests.get(db_url)\n res.encoding='utf-8'\n soup = BeautifulSoup (res.text, \"html5lib\")\n #內文\n main_article=soup.select('.single-post-content')\n if len(main_article):\n content=main_article[0].text.strip()\n sentence=content.split(\"\\n\")\n sentence=remove_values_from_list(sentence,'')\n\n\n total_count=0\n total_article_count=0\n for line in sentence:\n\n line2=line.strip()\n words=jieba.lcut(line2, cut_all=False)\n\n s1 = set(title_list)\n s2 = set(words)\n article_count=len(s2)\n total_article_count=total_article_count+article_count\n intersection=s1.intersection(s2)\n count=len(intersection)\n total_count=total_count+count\n \n\n # print(format(total_count/total_article_count,'0.1%'))\n if total_article_count==0:\n Title_Analyst = \"0\"\n\n else:\n Title_Analyst = format(total_count/total_article_count*100 , '0.2f')\n\n cur.execute (\"UPDATE mobile01 SET title_analyst=%s WHERE id='%s'\" % (Title_Analyst,mobile01_id))\n conn.commit()\n else :\n cur.execute (\"DELETE FROM mobile01 WHERE id='%s'\" % (mobile01_id))\n conn.commit()\n time.sleep(1)\n\ncur.close()\nconn.close()","sub_path":"python/TitleAnalystMobile01.py","file_name":"TitleAnalystMobile01.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"488387483","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot\n\nprint(\"hello world\")\nSRC_IMG_PATH = \"res/ori_1.jpg\"\nSRC_IMG_COPY_PATH = \"out/ori_copy.jpg\"\nOUT_DIR = \"out/\"\nZOOM_SCALE = 0.2\n\n\ndef split2yuv(img):\n b, g, r = cv2.split(img)\n # show_img(\"b channel\", b)\n # show_img(\"g channel\", g)\n # show_img(\"r channel\", r)\n\n img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\n y, u, v = cv2.split(img_yuv)\n cv2.imwrite(OUT_DIR + \"yuv.jpg\", img_yuv)\n cv2.imwrite(OUT_DIR + \"y_channel.jpg\", y)\n cv2.imwrite(OUT_DIR + \"u_channel.jpg\", u)\n cv2.imwrite(OUT_DIR + \"v_channel.jpg\", v)\n # show_img(\"y channel\", y)\n # show_img(\"u channel\", u)\n # show_img(\"v channel\", v)\n # return y channel img(gray image)\n return y\n\n\ndef hist(img):\n hist1 = cv2.calcHist([img], [0], None, [256], [0.0, 255.0])\n pyplot.plot(range(256), hist1, 'r')\n pyplot.show()\n pyplot.title(\"hist\")\n\n\ndef show_img(title, img):\n cv2.namedWindow(title)\n cv2.imshow(title, img)\n # cv2.waitKey(0)\n\n\ndef get_edge(img):\n blur_img = cv2.blur(img, ksize=(3, 3))\n canny_edge_op = cv2.Canny(blur_img, 50, 200)\n # show_img(\"gray\", img)\n # show_img(\"blur\", blur_img)\n # show_img(\"canny_optimize\", canny_edge_op)\n cv2.imwrite(OUT_DIR + \"gray.jpg\", img)\n cv2.imwrite(OUT_DIR + \"blur.jpg\", blur_img)\n cv2.imwrite(OUT_DIR + \"canny_optimize.jpg\", canny_edge_op)\n dst_img = np.zeros(img.shape, np.uint8)\n cv2.copyTo(img, canny_edge_op, dst=dst_img)\n show_img(\"dst_img\", dst_img)\n cv2.imwrite(OUT_DIR + \"dst_img.jpg\", dst_img)\n cv2.waitKey(0)\n return canny_edge_op, dst_img\n\n\ndef get_contour(img, gray_img):\n ret, thresh = cv2.threshold(gray_img, 100, 255, 0)\n show_img(\"thresh\", thresh)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # 绘制独立轮廓,如第四个轮廓\n # imag = cv2.drawContour(img,contours,-1,(0,255,0),3)\n # 但是大多数时候,下面方法更有用\n imag = cv2.drawContours(img, contours, -1, (0, 255, 0), 3)\n\n cv2.imshow('img', img)\n # cv2.imshow('gray_img', gray_img)\n # show_img(\"image\", imag)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n# 2.a\nsrcImg = cv2.imread(SRC_IMG_PATH)\n# show_img(\"srcImg\", srcImg)\n# 2.b\nwidth, height, channel = srcImg.shape\nprint(\"width:\", width, \", height:\", height, \",channel:\", channel)\nprint(\"src mat size:\", srcImg.size)\n# 2.c\n# cv2.imwrite(SRC_IMG_COPY_PATH, srcImg)\n# hist(srcImg)\n\n# 2.e\nimg_scaled = cv2.resize(srcImg, dsize=None, fx=ZOOM_SCALE, fy=ZOOM_SCALE)\nshow_img(\"scaledImg\", img_scaled)\ncv2.imwrite(OUT_DIR + \"scald_\" + str(ZOOM_SCALE) + \".jpg\", img_scaled)\n# 2.d\nimg_gray = split2yuv(img_scaled)\n\n# 3.a\nimg_edge, img_edge_on_src = get_edge(img_gray)\n\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\nclosed = cv2.morphologyEx(img_edge_on_src, cv2.MORPH_CLOSE, kernel)\n# closed = cv2.morphologyEx(closed, cv2.MORPH_DILATE, kernel)\n# closed = cv2.morphologyEx(closed, cv2.MORPH_DILATE, kernel)\ncv2.imshow(\"Close\", closed)\n\n# opened = cv2.morphologyEx(img_edge_on_src, cv2.MORPH_OPEN, kernel)\n# cv2.imshow(\"Open\", opened)\n# 3.b\nget_contour(img_scaled, closed)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391994747","text":"# ---------------------------------------------------------------------\n# 3Com.4500.get_interface_status\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus\n\n\nclass Script(BaseScript):\n name = \"3Com.4500.get_interface_status\"\n interface = IGetInterfaceStatus\n\n rx_interface_status = re.compile(\n r\"^(?P\\S+)\\s+(?PUP|DOWN)\\s+\\S+\\s+\\S+\\s+\\S+\\s+\\d+\\s*$\", re.MULTILINE\n )\n\n def execute(self, interface=None):\n r = []\n # Try SNMP first\n \"\"\"\n if self.has_snmp():\n try:\n for n, s in self.snmp.join_tables(\"1.3.6.1.2.1.31.1.1.1.1\",\n \"1.3.6.1.2.1.2.2.1.8\", bulk=True): # IF-MIB\n if 'Ethernet' in n:\n if interface:\n if n == interface.replace('Gi ', 'GigabitEthernet'):\n r.append({\n \"interface\": n,\n \"status\": int(s) == 1\n })\n else:\n r.append({\n \"interface\": n,\n \"status\": int(s) == 1\n })\n return r\n except self.snmp.TimeOutError:\n pass\n\n \"\"\"\n # Fallback to CLI\n if interface:\n cmd = \"display interface %s\" % interface\n else:\n cmd = \"display interface\"\n for match in self.rx_interface_status.finditer(self.cli(cmd)):\n r += [{\"interface\": match.group(\"interface\"), \"status\": match.group(\"status\") == \"UP\"}]\n if not r:\n if interface:\n cmd = \"display brief interface %s\" % interface\n else:\n cmd = \"display brief interface\"\n for match in self.rx_interface_status.finditer(self.cli(cmd)):\n r += [\n {\"interface\": match.group(\"interface\"), \"status\": match.group(\"status\") == \"UP\"}\n ]\n return r\n","sub_path":"sa/profiles/3Com/4500/get_interface_status.py","file_name":"get_interface_status.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603069538","text":"#!/usr/bin/env python3\n#coding:utf-8\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nbase_url = 'https://movie.douban.com/'\nurl = 'https://movie.douban.com/typerank?type_name=%E5%96%9C%E5%89%A7&type=24&interval_id=100:90&action='\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/'\n '537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n\n\nresponse = requests.get(url, header).text\nbaseObj = BeautifulSoup(response, 'lxml')\nmovie = baseObj.select('.movie-list-item')\n\nprint(movie)","sub_path":"day10_requests/movie2.py","file_name":"movie2.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407534302","text":"# %matplotlib inline\n\nfrom __future__ import division,print_function\n\nimport os, json\nfrom glob import glob\nimport numpy as np\nimport sys\n# np.set_printoptions(precision=4, linewidth=100)\n# from matplotlib import pyplot as plt\n\n# Import our class, and instantiate\nfrom vgg16 import Vgg16\n\ndef emptyyy():\n return \"foo foo foo!\"\n\ndef local_pet_id(path):\n vgg = Vgg16()\n \n # could have a line here saying if path is null then\n # path='./webdata/'\n # batches = vgg.get_batches(path+'onepic', batch_size=1)\n \n batches = vgg.get_batches(path, batch_size=1)\n imgs,labels = next(batches)\n\n #all_preds = vgg.model.predict(imgs)\n all_preds=vgg.model.predict(imgs)\n \n Beaker_like=[253,225,173,211,193,151,273]\n Baron_like=range(236,240)\n Mildred_like=range(281,289)\n \n #i want Beaker_ary to be a 2d list.\n Beaker_ary=[]\n Baron_ary=[]\n Mildred_ary=[]\n \n for i in range(0,len(imgs)):\n Beaker_ary.append([all_preds[i][x] for x in Beaker_like])\n Baron_ary.append([all_preds[i][x] for x in Baron_like])\n Mildred_ary.append([all_preds[i][x] for x in Mildred_like])\n \n naive_guesses=[]\n \n for i in range(0,len(imgs)):\n Be=np.max(Beaker_ary[i])\n Ba=np.max(Baron_ary[i])\n Mi=np.max(Mildred_ary[i])\n if(Be>Ba):\n if(Be>Mi):\n naive_guesses.append(\"Beaker\")\n else: naive_guesses.append(\"Mildred\")\n else:\n if (Ba>Mi):\n naive_guesses.append(\"Baron\")\n else: naive_guesses.append(\"Mildred\")\n\n return(naive_guesses[0])\n","sub_path":"pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394543623","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 16 11:43:01 2019\n\n@author: Vijay\n\"\"\"\n\"\"\"\nCode Challenge 2\nPerform similar steps as in the above code challenge but store the contents in \nan online mongo atlas database\n\"\"\"\n\nimport pymongo\n#import dns # required for connecting with SRV\n#client = pymongo.MongoClient(\"mongodb://K_Vaid:123chandu30%26@cluster0-shard-00-00-tofyu.mongodb.net:27017,cluster0-shard-00-01-tofyu.mongodb.net:27017,cluster0-shard-00-02-tofyu.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true\")\nclient = pymongo.MongoClient(\"mongodb://vijay1997:vijay%401997@vijay-shard-00-00-obffw.gcp.mongodb.net:27017,vijay-shard-00-01-obffw.gcp.mongodb.net:27017,vijay-shard-00-02-obffw.gcp.mongodb.net:27017/test?ssl=true&replicaSet=vijay-shard-0&authSource=admin&retryWrites=true\")\n\nmydb = client.vijayshersiyadb\n\ndef add_stu(student_name,student_age,student_roll_no,student_branch):\n #unique_employee = mydb.employees.find_one({\"id\":idd})\n #if unique_employee:\n # return \"Employee already exists\"\n #else:\n mydb.vijay.insert_one(\n {\n \"student_name\" :student_name ,\n \"student_age\" :student_age, \n \"student_roll_no\" : student_roll_no, \n \"student_branch \" : student_branch\n })\n return \"stu added successfully\"\n\n\ndef fetch_all_stu():\n user = mydb.vijay.find()\n for i in user:\n print (i)\n\nadd_stu ('vijay',22, 45, 'cs')\nadd_stu ('mohit',21, 46, 'cs')\nadd_stu ('dig',23, 47, 'cs')\nadd_stu ('viku',24, 48, 'cs')\nadd_stu ('anupam',22, 49, 'cs')\n\nfetch_all_stu()","sub_path":"day 9/untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629778261","text":"class Stack:\n def __init__(self):\n self._number = 1\n self._attack = 1\n self._defence = 1\n self._damage = 1\n self._max_health = 1\n self._health = 1\n self._retaliation = True\n self._is_dead = False\n\n def get_damage(self, damage: int):\n all_healthpoints = (self._number - 1) * self._max_health + self._health\n all_healthpoints -= damage\n if all_healthpoints <= 0:\n self._is_dead = True\n else:\n self._number = all_healthpoints // self._max_health\n self._health = all_healthpoints % self._max_health\n if self._health == 0:\n self.health = self._max_health\n\n\n","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"282470889","text":"import cv2\nimport numpy as np\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\ndef helperBlendImages(I1, I2):\n\n # Identify the image regions in the two images by masking out the black regions.\n mask1 = I1.sum(axis=2)\n mask1 = (mask1 > 0).astype(int)\n\n mask2 = I2.sum(axis=2)\n mask2 = (mask2 > 0).astype(int)\n\n maskc = ((mask1 + mask2) // 2).astype(bool)\n\n # Compute alpha values that are proportional to the center seam of the two images.\n alpha1 = (np.ones(mask1.shape, dtype=np.uint8))\n alpha2 = (np.ones(mask2.shape, dtype=np.uint8))\n \n edge1 = edge(mask1)\n \n if np.amin(edge1) == 0 and np.amax(edge1) == 0:\n dist1 = np.ones(edge1.shape)*np.inf\n\n else:\n dist1 = ndimage.distance_transform_edt(1 - edge1)\n\n edge2 = edge(mask2)\n dist2 = ndimage.distance_transform_edt(1 - edge2)\n\n alpha1[maskc] = dist1[maskc] > dist2[maskc]\n alpha2[maskc] = dist1[maskc] <= dist2[maskc]\n\n alpha1 = alpha1[..., np.newaxis]\n alpha2 = alpha2[..., np.newaxis]\n\n outputImage = (alpha1*I1 + alpha2*I2).astype(np.uint8)\n\n return outputImage\n\ndef edge(I):\n kernel = np.ones((7,7), np.uint8)\n map = I - cv2.erode(I.astype(np.uint8), kernel=kernel, iterations=1)\n return map\n","sub_path":"blend.py","file_name":"blend.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644580990","text":"import functools\nimport inspect\n\nfrom doctor._schema import Schema\nfrom doctor._util import UNSET, get_wrapped, make_schema_dict, with_wraps\n\n\nclass Annotation(object):\n\n \"\"\"An annotation contains metadata about a Doctor decorated function.\n\n When the :func:`annotate` decorator is used, it creates an annotation for\n the decorated method and attaches it there. The annotation object contains\n lots of metadata used by Doctor, like the arguments accepted by the\n function and the schema that should be used to validate things.\n\n :param callable annotated_func:\n :param function func:\n :param list[str] arg_names:\n :param str or None args_name:\n :param str or None kwargs_name:\n :param tuple or None default_values:\n :param Schema schema:\n :param dict args_schema:\n :param dict results_schema:\n \"\"\"\n\n _iterable_properties = ('annotated_func', 'func', 'is_method', 'arg_names',\n 'args_name', 'kwargs_name', 'default_values',\n 'schema', 'args_schema', 'result_schema')\n\n def __init__(self, annotated_func, func, is_method, arg_names, args_name,\n kwargs_name, default_values, schema, args_schema=None,\n result_schema=None):\n self.annotated_func = annotated_func\n self.func = func\n self.is_method = is_method\n self.arg_names = arg_names\n self.args_name = args_name\n self.kwargs_name = kwargs_name\n self.default_values = default_values\n self.schema = schema\n self.args_schema = args_schema\n self.result_schema = result_schema\n\n def __iter__(self):\n for attr in self._iterable_properties:\n yield attr, getattr(self, attr)\n\n def __eq__(self, other):\n if not isinstance(other, Annotation):\n return False\n for key in self._iterable_properties:\n if getattr(self, key) != getattr(other, key):\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def collect_properties(self, call_args, call_kwargs):\n \"\"\"Return a dict of properties for validation.\n\n :param tuple call_args: Positional arguments from the function call.\n :param dict call_kwargs: Keyword arguments from the function call.\n :returns: dict\n \"\"\"\n properties = {}\n for i, name in enumerate(self.arg_names):\n if name not in self.args_schema['properties']:\n continue\n if i < len(call_args):\n properties[name] = call_args[i]\n elif name in call_kwargs:\n properties[name] = call_kwargs[name]\n return properties\n\n @classmethod\n def create_args_schema(cls, schema, arg_names, default_values, is_method):\n \"\"\"Create a schema using the annotated function's arguments.\n\n :param Schema schema: Source schema.\n :param list[str] arg_names: The names of the function's arguments.\n :param tuple or None default_values: Any default values for the\n function's arguments.\n :param bool is_method: If True, the function will be treated as a\n method and the first argument (self) will be ignored.\n :returns: dict or None\n \"\"\"\n if is_method:\n # Don't validate the \"self\" parameter for methods.\n arg_names = arg_names[1:]\n if not arg_names:\n # No parameters, nothing to validate.\n return None\n\n # Require any positional arguments.\n required_arg_names = arg_names\n if default_values:\n required_arg_names = required_arg_names[:-len(default_values)]\n return make_schema_dict(schema, 'args', arg_names, required_arg_names)\n\n @classmethod\n def create(cls, _callable, schema, args_schema=UNSET, result_schema=None,\n is_method=False):\n \"\"\"Create a new Annotation object for the given callable.\n\n :param callable _callable:\n :param Schema schema:\n :param dict args_schema:\n :param dict result_schema:\n :param bool is_method:\n :returns: Annotation\n \"\"\"\n if not callable(_callable):\n raise TypeError('{!r} must be a callable (was {!s})'.format(\n _callable, type(_callable)))\n if not isinstance(schema, Schema):\n raise TypeError(('schema should be an instance of Schema '\n '(was {!r})').format(schema))\n\n func = _callable\n if not inspect.isfunction(func):\n is_method = True\n func = func.__call__\n func = getattr(func, '__func__', func)\n\n # Use reflection to get details about the function, so we can use\n # that to generate a schema for it.\n arg_names, args_name, kwargs_name, default_values = (\n inspect.getargspec(func))\n\n # If they haven't passed an args schema, assume they want to validate\n # all the arguments and create a schema on the fly.\n if args_schema is UNSET:\n args_schema = cls.create_args_schema(schema, arg_names,\n default_values, is_method)\n\n return Annotation(_callable, func, is_method, arg_names, args_name,\n kwargs_name, default_values, schema,\n args_schema=args_schema, result_schema=result_schema)\n\n\n@with_wraps(arguments=True)\ndef annotate(schema, args=UNSET, required_args=None, result=None,\n is_method=False):\n \"\"\"Annotate schema metadata for a method.\n\n The method's arguments and result will be validated using the schema when\n the method is called, if args or result are specified.\n\n :param Schema schema: Schema that should be used for validation.\n :param args: If defined, this specifies how to validate the arguments\n to the function.\n :type args: str, dict, list[str], or None\n :param list[str] required_args: If args is a list of strings, these\n arguments will be marked as required.\n :param result: If defined, this specifies how to validate the result of\n the function.\n :type result: str, dict, list[str], or None\n :param bool is_method: If True, treat the annotated function as a method.\n This will ignore the initial argument (self) for validation.\n \"\"\"\n args_schema = make_schema_dict(schema, 'args', args, required_args)\n result_schema = make_schema_dict(schema, 'result', result)\n\n def decorator(func):\n annotation = Annotation.create(\n get_wrapped(func), schema, args_schema=args_schema,\n result_schema=result_schema, is_method=is_method)\n func._doctor_annotation = annotation\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if annotation.args_schema is not None:\n properties = annotation.collect_properties(args, kwargs)\n schema.validator.validate(properties, annotation.args_schema)\n result = func(*args, **kwargs)\n if annotation.result_schema is not None:\n schema.validator.validate(result, annotation.result_schema)\n return result\n wrapper._decorated = func\n return wrapper\n\n return decorator\n","sub_path":"doctor/_annotation.py","file_name":"_annotation.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574646324","text":"#!/usr/bin/python\nimport socket\n\nprint(\"\\033[1m\\033[32m[+] TCP Client v0.1\")\nprint(\"\\033[1m\\033[32m[+] Made by: c7dm4n\\n\")\n\nip = raw_input(\"\\033[1m\\033[0;0m[?] Type an ip: \")\nport = input(\"\\033[1m\\033[0;0m[?] Type an port: \")\nmysockt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nif mysockt.connect_ex((ip, port)):\n print(\"\\n\\033[1m\\033[31m[!] Port closed\\n\")\nelse:\n mysockt.sendall(b\"Hello, world\")\n data = mysockt.recv(4098)\n print(\"\\n\\033[1m\\033[32m[!] Key:\\n\")\n print(data)\n","sub_path":"tcpclient.py","file_name":"tcpclient.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646800063","text":"from openmdao.api import Component, Group, IndepVarComp\nimport numpy as np\nimport pyframe3dd.frame3dd as frame3dd\nfrom commonse.utilities import nodal2sectional\n\nfrom commonse import gravity, eps, Tube\nimport commonse.UtilizationSupplement as util\nfrom commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag\nfrom commonse.environment import WaveBase, PowerWind\nfrom commonse.vertical_cylinder import CylinderDiscretization, CylinderMass\n\n\ndef find_nearest(array,value):\n return (np.abs(array-value)).argmin() \n\n\nclass FloatingFrame(Component):\n \"\"\"\n OpenMDAO Component class for semisubmersible pontoon / truss structure for floating offshore wind turbines.\n Should be tightly coupled with Semi and Mooring classes for full system representation.\n \"\"\"\n\n def __init__(self, nFull):\n super(FloatingFrame,self).__init__()\n\n # Environment\n self.add_param('water_density', val=0.0, units='kg/m**3', desc='density of water')\n\n # Material properties\n self.add_param('material_density', val=0., units='kg/m**3', desc='density of material')\n self.add_param('E', val=0.0, units='Pa', desc='Modulus of elasticity (Youngs) of material')\n self.add_param('G', val=0.0, units='Pa', desc='Shear modulus of material')\n self.add_param('yield_stress', val=0.0, units='Pa', desc='yield stress of material')\n\n # Base column\n self.add_param('base_z_full', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')\n self.add_param('base_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')\n self.add_param('base_t_full', val=np.zeros((nFull,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')\n self.add_param('base_column_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of base column by section')\n self.add_param('base_column_displaced_volume', val=np.zeros((nFull-1,)), units='m**3', desc='column volume of water displaced by section')\n self.add_param('base_column_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')\n self.add_param('base_column_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')\n self.add_param('base_column_Px', np.zeros(nFull), units='N/m', desc='force per unit length in x-direction on base')\n self.add_param('base_column_Py', np.zeros(nFull), units='N/m', desc='force per unit length in y-direction on base')\n self.add_param('base_column_Pz', np.zeros(nFull), units='N/m', desc='force per unit length in z-direction on base')\n self.add_param('base_column_qdyn', np.zeros(nFull), units='N/m**2', desc='dynamic pressure on base')\n\n self.add_param('base_pontoon_attach_upper', val=0.0, units='m', desc='z-value of upper truss attachment on base column')\n self.add_param('base_pontoon_attach_lower', val=0.0, units='m', desc='z-value of lower truss attachment on base column')\n\n # Ballast columns\n self.add_param('auxiliary_z_full', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')\n self.add_param('auxiliary_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')\n self.add_param('auxiliary_t_full', val=np.zeros((nFull,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')\n self.add_param('auxiliary_column_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of ballast column by section')\n self.add_param('auxiliary_column_displaced_volume', val=np.zeros((nFull-1,)), units='m**3', desc='column volume of water displaced by section')\n self.add_param('auxiliary_column_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')\n self.add_param('auxiliary_column_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')\n self.add_param('auxiliary_column_Px', np.zeros(nFull), units='N/m', desc='force per unit length in x-direction on ballast')\n self.add_param('auxiliary_column_Py', np.zeros(nFull), units='N/m', desc='force per unit length in y-direction on ballast')\n self.add_param('auxiliary_column_Pz', np.zeros(nFull), units='N/m', desc='force per unit length in z-direction on ballast')\n self.add_param('auxiliary_column_qdyn', np.zeros(nFull), units='N/m**2', desc='dynamic pressure on ballast')\n\n self.add_param('fairlead', val=0.0, units='m', desc='Depth below water for mooring line attachment')\n\n # Tower\n self.add_param('tower_z_full', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')\n self.add_param('tower_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')\n self.add_param('tower_t_full', val=np.zeros((nFull,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')\n self.add_param('tower_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of tower column by section')\n self.add_param('tower_buckling_length', 0.0, units='m', desc='buckling length')\n self.add_param('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of tower mass')\n self.add_param('tower_Px', np.zeros(nFull), units='N/m', desc='force per unit length in x-direction on tower')\n self.add_param('tower_Py', np.zeros(nFull), units='N/m', desc='force per unit length in y-direction on tower')\n self.add_param('tower_Pz', np.zeros(nFull), units='N/m', desc='force per unit length in z-direction on tower')\n self.add_param('tower_qdyn', np.zeros(nFull), units='N/m**2', desc='dynamic pressure on tower')\n \n # Semi geometry\n self.add_param('radius_to_auxiliary_column', val=0.0, units='m',desc='Distance from base column centerpoint to ballast column centerpoint')\n self.add_param('number_of_auxiliary_columns', val=3, desc='Number of ballast columns evenly spaced around base column')\n\n # Pontoon properties\n self.add_param('pontoon_outer_diameter', val=0.0, units='m',desc='Outer radius of tubular pontoon that connects ballast or base columns')\n self.add_param('pontoon_wall_thickness', val=0.0, units='m',desc='Inner radius of tubular pontoon that connects ballast or base columns')\n self.add_param('cross_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the bottom of the central base to the tops of the outer ballast columns', pass_by_obj=True)\n self.add_param('lower_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their bottoms', pass_by_obj=True)\n self.add_param('upper_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their tops', pass_by_obj=True)\n self.add_param('lower_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their bottoms', pass_by_obj=True)\n self.add_param('upper_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their tops', pass_by_obj=True)\n self.add_param('outer_cross_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their tops', pass_by_obj=True)\n \n # Turbine parameters\n self.add_param('rna_mass', val=0.0, units='kg', desc='mass of tower')\n self.add_param('rna_cg', val=np.zeros(3), units='m', desc='Location of RNA center of mass relative to tower top')\n self.add_param('rna_force', val=np.zeros(3), units='N', desc='Force in xyz-direction on turbine')\n self.add_param('rna_moment', val=np.zeros(3), units='N*m', desc='Moments about turbine base')\n self.add_param('rna_I', val=np.zeros(6), units='kg*m**2', desc='Moments about turbine base')\n\n # safety factors\n self.add_param('gamma_f', 0.0, desc='safety factor on loads')\n self.add_param('gamma_m', 0.0, desc='safety factor on materials')\n self.add_param('gamma_n', 0.0, desc='safety factor on consequence of failure')\n self.add_param('gamma_b', 0.0, desc='buckling safety factor')\n self.add_param('gamma_fatigue', 0.0, desc='total safety factor for fatigue')\n\n # Manufacturing\n self.add_param('connection_ratio_max', val=0.0, desc='Maximum ratio of pontoon outer diameter to base/ballast outer diameter')\n \n # Costing\n self.add_param('pontoon_cost_rate', val=6.250, units='USD/kg', desc='Finished cost rate of truss components')\n \n # Outputs\n self.add_output('pontoon_cost', val=0.0, units='USD', desc='Cost of pontoon elements and connecting truss')\n self.add_output('pontoon_mass', val=0.0, units='kg', desc='Mass of pontoon elements and connecting truss')\n self.add_output('pontoon_displacement', val=0.0, units='m**3', desc='Buoyancy force of submerged pontoon elements')\n self.add_output('pontoon_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of pontoon buoyancy force')\n self.add_output('pontoon_center_of_mass', val=0.0, units='m', desc='z-position of center of pontoon mass')\n\n self.add_output('pontoon_stress', val=np.zeros((60,)), desc='Utilization (<1) of von Mises stress by yield stress and safety factor for all pontoon elements')\n self.add_output('tower_stress', np.zeros(nFull-1), desc='Von Mises stress utilization along tower at specified locations. incudes safety factor.')\n self.add_output('tower_shell_buckling', np.zeros(nFull-1), desc='Shell buckling constraint. Should be < 1 for feasibility. Includes safety factors')\n self.add_output('tower_global_buckling', np.zeros(nFull-1), desc='Global buckling constraint. Should be < 1 for feasibility. Includes safety factors')\n self.add_output('top_deflection', 0.0, units='m', desc='Deflection of tower top in yaw-aligned +x direction')\n\n self.add_output('plot_matrix', val=np.array([]), desc='Ratio of shear stress to yield stress for all pontoon elements', pass_by_obj=True)\n self.add_output('base_connection_ratio', val=np.zeros((nFull,)), desc='Ratio of pontoon outer diameter to base outer diameter')\n self.add_output('auxiliary_connection_ratio', val=np.zeros((nFull,)), desc='Ratio of pontoon outer diameter to base outer diameter')\n self.add_output('pontoon_base_attach_upper', val=0.0, desc='Fractional distance along base column for upper truss attachment')\n self.add_output('pontoon_base_attach_lower', val=0.0, desc='Fractional distance along base column for lower truss attachment')\n\n self.add_output('structural_frequencies', np.zeros(6), units='Hz', desc='First six natural frequencies')\n self.add_output('substructure_mass', val=0.0, units='kg', desc='Mass of substructure elements and connecting truss')\n self.add_output('structural_mass', val=0.0, units='kg', desc='Mass of whole turbine except for mooring lines')\n self.add_output('total_displacement', val=0.0, units='m**3', desc='Total volume of water displaced by floating turbine (except for mooring lines)')\n self.add_output('z_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of buoyancy of whole turbine')\n self.add_output('substructure_center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity of substructure only')\n self.add_output('center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity of whole turbine')\n self.add_output('total_force', val=np.zeros(3), units='N', desc='Net forces on turbine')\n self.add_output('total_moment', val=np.zeros(3), units='N*m', desc='Moments on whole turbine')\n \n # Derivatives\n self.deriv_options['type'] = 'fd'\n self.deriv_options['form'] = 'central'\n self.deriv_options['check_form'] = 'central'\n self.deriv_options['step_calc'] = 'relative'\n self.deriv_options['step_size'] = 1e-5\n \n def solve_nonlinear(self, params, unknowns, resids):\n # If something fails, we have to tell the optimizer this design is no good\n def bad_input():\n unknowns['structural_frequencies'] = 1e30 * np.ones(6)\n unknowns['top_deflection'] = 1e30\n unknowns['substructure_mass'] = 1e30\n unknowns['structural_mass'] = 1e30\n unknowns['total_displacement'] = 1e30\n unknowns['z_center_of_buoyancy'] = 0.0\n unknowns['substructure_center_of_mass'] = 1e30 * np.ones(3)\n unknowns['center_of_mass'] = 1e30 * np.ones(3)\n unknowns['total_force'] = 1e30 * np.ones(3)\n unknowns['total_moment'] = 1e30 * np.ones(3)\n unknowns['tower_stress'] = 1e30 * np.ones(m_base.shape)\n unknowns['tower_shell_buckling'] = 1e30 * np.ones(m_base.shape)\n unknowns['tower_global_buckling'] = 1e30 * np.ones(m_base.shape)\n return\n \n # Unpack variables\n crossAttachFlag = params['cross_attachment_pontoons']\n lowerAttachFlag = params['lower_attachment_pontoons']\n upperAttachFlag = params['upper_attachment_pontoons']\n lowerRingFlag = params['lower_ring_pontoons']\n upperRingFlag = params['upper_ring_pontoons']\n outerCrossFlag = params['outer_cross_pontoons']\n \n R_semi = params['radius_to_auxiliary_column']\n R_od_pontoon = 0.5*params['pontoon_outer_diameter']\n R_od_base = 0.5*params['base_d_full']\n R_od_ballast = 0.5*params['auxiliary_d_full']\n R_od_tower = 0.5*params['tower_d_full']\n\n t_wall_base = params['base_t_full']\n t_wall_ballast = params['auxiliary_t_full']\n t_wall_pontoon = params['pontoon_wall_thickness']\n t_wall_tower = params['tower_t_full']\n\n E = params['E']\n G = params['G']\n rho = params['material_density']\n sigma_y = params['yield_stress']\n \n ncolumn = int(params['number_of_auxiliary_columns'])\n z_base = params['base_z_full']\n z_ballast = params['auxiliary_z_full']\n z_tower = params['tower_z_full']\n z_attach_upper = params['base_pontoon_attach_upper']\n z_attach_lower = params['base_pontoon_attach_lower']\n z_fairlead = -params['fairlead']\n \n m_base = params['base_column_mass']\n m_ballast = params['auxiliary_column_mass']\n m_tower = params['tower_mass']\n \n m_rna = params['rna_mass']\n F_rna = params['rna_force']\n M_rna = params['rna_moment']\n I_rna = params['rna_I']\n cg_rna = params['rna_cg']\n \n rhoWater = params['water_density']\n \n V_base = params['base_column_displaced_volume']\n V_ballast = params['auxiliary_column_displaced_volume']\n\n z_cb_base = params['base_column_center_of_buoyancy']\n z_cb_ballast = params['auxiliary_column_center_of_buoyancy']\n \n cg_base = np.r_[0.0, 0.0, params['base_column_center_of_mass']]\n cg_ballast = np.r_[0.0, 0.0, params['auxiliary_column_center_of_mass']]\n cg_tower = np.r_[0.0, 0.0, params['tower_center_of_mass']]\n \n coeff = params['pontoon_cost_rate']\n \n gamma_f = params['gamma_f']\n gamma_m = params['gamma_m']\n gamma_n = params['gamma_n']\n gamma_b = params['gamma_b']\n gamma_fatigue = params['gamma_fatigue']\n\n # Quick ratio for unknowns\n unknowns['base_connection_ratio'] = params['connection_ratio_max'] - R_od_pontoon/R_od_base\n unknowns['auxiliary_connection_ratio'] = params['connection_ratio_max'] - R_od_pontoon/R_od_ballast\n unknowns['pontoon_base_attach_upper'] = (z_attach_upper - z_base[0]) / (z_base[-1] - z_base[0]) #0.5 7:\n bad_input()\n return\n\n # If there are auxiliary columns, must have attachment pontoons (only have ring pontoons doesn't make sense)\n if (ncolumn > 0) and (not crossAttachFlag) and (not lowerAttachFlag) and (not upperAttachFlag):\n bad_input()\n return\n \n # ---NODES---\n # Senu TODO: Should tower and rna have nodes at their CGs?\n # Senu TODO: Mooring tension on column nodes?\n\n # Add nodes for base column: Using 4 nodes/3 elements per section\n # Make sure there is a node at upper and lower attachment points\n baseBeginID = 0 + 1\n if ncolumn > 0:\n idx = find_nearest(z_base, z_attach_lower)\n z_base[idx] = z_attach_lower\n baseLowerID = idx + 1\n \n idx = find_nearest(z_base, z_attach_upper)\n z_base[idx] = z_attach_upper\n baseUpperID = idx + 1\n \n baseEndID = z_base.size\n freeboard = z_base[-1]\n\n fairleadID = []\n # Need reaction attachment point if just running a spar\n if ncolumn == 0:\n idx = find_nearest(z_base, z_fairlead)\n z_base[idx] = z_fairlead\n fairleadID.append( idx + 1 )\n \n znode = np.copy( z_base )\n xnode = np.zeros(znode.shape)\n ynode = np.zeros(znode.shape)\n\n towerBeginID = baseEndID\n myz = np.zeros(len(z_tower)-1)\n xnode = np.append(xnode, myz)\n ynode = np.append(ynode, myz)\n znode = np.append(znode, z_tower[1:] + freeboard )\n towerEndID = xnode.size\n\n # Create dummy node so that the tower isn't the last in a chain.\n # This avoids a Frame3DD bug\n dummyID = xnode.size + 1\n xnode = np.append(xnode, 0.0)\n ynode = np.append(ynode, 0.0)\n znode = np.append(znode, znode[-1]+1.0 )\n \n # Get x and y positions of surrounding ballast columns\n ballastLowerID = []\n ballastUpperID = []\n ballastx = R_semi * np.cos( np.linspace(0, 2*np.pi, ncolumn+1) )\n ballasty = R_semi * np.sin( np.linspace(0, 2*np.pi, ncolumn+1) )\n ballastx = ballastx[:-1]\n ballasty = ballasty[:-1]\n\n # Add in ballast column nodes around the circle, make sure there is a node at the fairlead\n idx = find_nearest(z_ballast, z_fairlead)\n myones = np.ones(z_ballast.shape)\n for k in xrange(ncolumn):\n ballastLowerID.append( xnode.size + 1 )\n fairleadID.append( xnode.size + idx + 1 )\n xnode = np.append(xnode, ballastx[k]*myones)\n ynode = np.append(ynode, ballasty[k]*myones)\n znode = np.append(znode, z_ballast )\n ballastUpperID.append( xnode.size )\n\n # Add nodes midway around outer ring for cross bracing\n if outerCrossFlag and ncolumn > 0:\n crossx = 0.5*(ballastx + np.roll(ballastx,1))\n crossy = 0.5*(ballasty + np.roll(ballasty,1))\n\n crossOuterLowerID = xnode.size + np.arange(ncolumn) + 1\n xnode = np.append(xnode, crossx)\n ynode = np.append(ynode, crossy)\n znode = np.append(znode, z_ballast[0]*np.ones(ncolumn))\n\n #crossOuterUpperID = xnode.size + np.arange(ncolumn) + 1\n #xnode = np.append(xnode, crossx)\n #ynode = np.append(ynode, crossy)\n #znode = np.append(znode, z_ballast[-1]*np.ones(ncolumn))\n\n # Create Node Data object\n nnode = 1 + np.arange(xnode.size)\n rnode = np.zeros(xnode.shape)\n nodes = frame3dd.NodeData(nnode, xnode, ynode, znode, rnode)\n\n \n # ---REACTIONS---\n # Pin (3DOF) the nodes at the mooring connections. Otherwise free\n # Free=0, Rigid=1\n rid = np.array(fairleadID)\n Rx = Ry = Rz = Rxx = Ryy = Rzz = np.ones(rid.shape)\n #if ncolumn > 0:\n # Rxx[1:] = Ryy[1:] = Rzz[1:] = 0.0\n # First approach\n # Pinned windward column lower node (first ballastLowerID)\n #rid = ballastLowerID[0]\n #Rx = Ry = Rz = Rxx = Ryy = Rzz = 1\n # Rollers for other lower column nodes, restrict motion\n #rid = ballastLowerID[1:]\n #Rz = Rxx = Ryy = Rzz = 1\n\n # Get reactions object from frame3dd\n reactions = frame3dd.ReactionData(rid, Rx, Ry, Rz, Rxx, Ryy, Rzz, rigid=1)\n\n\n # ---ELEMENTS / EDGES---\n N1 = np.array([], dtype=np.int32)\n N2 = np.array([], dtype=np.int32)\n # Lower connection from central base column to ballast columns\n if lowerAttachFlag:\n lowerAttachEID = N1.size + 1\n for k in xrange(ncolumn):\n N1 = np.append(N1, baseLowerID )\n N2 = np.append(N2, ballastLowerID[k] )\n # Upper connection from central base column to ballast columns\n if upperAttachFlag:\n upperAttachEID = N1.size + 1\n for k in xrange(ncolumn):\n N1 = np.append(N1, baseUpperID )\n N2 = np.append(N2, ballastUpperID[k] )\n # Cross braces from lower central base column to upper ballast columns\n if crossAttachFlag:\n crossAttachEID = N1.size + 1\n for k in xrange(ncolumn):\n N1 = np.append(N1, baseLowerID )\n N2 = np.append(N2, ballastUpperID[k] )\n # Will be used later to convert from local member c.s. to global\n cross_angle = np.arctan( (z_attach_upper - z_attach_lower) / R_semi )\n # Lower ring around ballast columns\n if lowerRingFlag:\n lowerRingEID = N1.size + 1\n for k in xrange(ncolumn-1):\n N1 = np.append(N1, ballastLowerID[k] )\n N2 = np.append(N2, ballastLowerID[k+1] )\n N1 = np.append(N1, ballastLowerID[0] )\n N2 = np.append(N2, ballastLowerID[-1] )\n # Upper ring around ballast columns\n if upperRingFlag:\n upperRingEID = N1.size + 1\n for k in xrange(ncolumn-1):\n N1 = np.append(N1, ballastUpperID[k] )\n N2 = np.append(N2, ballastUpperID[k+1] )\n N1 = np.append(N1, ballastUpperID[0] )\n N2 = np.append(N2, ballastUpperID[-1] )\n # Outer cross braces\n if outerCrossFlag:\n outerCrossEID = N1.size + 1\n for k in xrange(ncolumn-1):\n N1 = np.append(N1, crossOuterLowerID[k] )\n N2 = np.append(N2, ballastUpperID[k] )\n N1 = np.append(N1, crossOuterLowerID[k+1] )\n N2 = np.append(N2, ballastUpperID[k] )\n N1 = np.append(N1, crossOuterLowerID[-1] )\n N2 = np.append(N2, ballastUpperID[-1] )\n N1 = np.append(N1, crossOuterLowerID[0] )\n N2 = np.append(N2, ballastUpperID[-1] )\n # TODO: Parameterize these for upper, lower, cross connections\n # Properties for the inner connectors\n mytube = Tube(2.0*R_od_pontoon, t_wall_pontoon)\n Ax = mytube.Area * np.ones(N1.shape)\n As = mytube.Asx * np.ones(N1.shape)\n Jx = mytube.J0 * np.ones(N1.shape)\n I = mytube.Jxx * np.ones(N1.shape)\n S = mytube.S * np.ones(N1.shape)\n C = mytube.C * np.ones(N1.shape)\n modE = E * np.ones(N1.shape)\n modG = G * np.ones(N1.shape)\n roll = 0.0 * np.ones(N1.shape)\n dens = rho * np.ones(N1.shape)\n\n # Now mock up cylindrical columns as truss members even though long, slender assumption breaks down\n # Will set density = 0.0 so that we don't double count the mass\n # First get geometry in each of the elements\n R_od_base,_ = nodal2sectional( R_od_base )\n t_wall_base,_ = nodal2sectional( t_wall_base )\n R_od_ballast,_ = nodal2sectional( R_od_ballast )\n t_wall_ballast,_ = nodal2sectional( t_wall_ballast )\n R_od_tower,_ = nodal2sectional( R_od_tower )\n t_wall_tower,_ = nodal2sectional( t_wall_tower )\n # Senu TODO: Make artificially more stiff?\n baseEID = N1.size + 1\n mytube = Tube(2.0*R_od_base, t_wall_base)\n myrange = np.arange(R_od_base.size)\n myones = np.ones(myrange.shape)\n mydens = m_base / mytube.Area / np.diff(z_base) + eps\n N1 = np.append(N1 , myrange + baseBeginID )\n N2 = np.append(N2 , myrange + baseBeginID + 1)\n Ax = np.append(Ax , mytube.Area )\n As = np.append(As , mytube.Asx )\n Jx = np.append(Jx , mytube.J0 )\n I = np.append(I , mytube.Jxx )\n S = np.append(S , mytube.S )\n C = np.append(C , mytube.C )\n modE = np.append(modE, E*myones )\n modG = np.append(modG, G*myones )\n roll = np.append(roll, np.zeros(myones.shape) )\n dens = np.append(dens, mydens )\n\n # Rest of tower\n towerEID = N1.size + 1\n myrange = np.arange(R_od_tower.size)\n myones = np.ones(myrange.shape)\n mydens = m_tower / mytube.Area / np.diff(z_tower) + eps\n N1 = np.append(N1 , myrange + towerBeginID )\n N2 = np.append(N2 , myrange + towerBeginID + 1)\n Ax = np.append(Ax , mytube.Area )\n As = np.append(As , mytube.Asx )\n Jx = np.append(Jx , mytube.J0 )\n I = np.append(I , mytube.Jxx )\n S = np.append(S , mytube.S )\n C = np.append(C , mytube.C )\n modE = np.append(modE, E*myones )\n modG = np.append(modG, G*myones )\n roll = np.append(roll, np.zeros(myones.shape) )\n dens = np.append(dens, mydens ) \n\n # Dummy element\n dummyEID = N1.size + 1\n N1 = np.append(N1 , towerEndID )\n N2 = np.append(N2 , dummyID )\n Ax = np.append(Ax , Ax[-1] )\n As = np.append(As , As[-1] )\n Jx = np.append(Jx , Jx[-1] )\n I = np.append(I , I[-1] )\n S = np.append(S , S[-1] )\n C = np.append(C , C[-1] )\n modE = np.append(modE, 1e20 )\n modG = np.append(modG, 1e20 )\n roll = np.append(roll, 0.0 )\n dens = np.append(dens, 1e-6 ) \n \n ballastEID = []\n mytube = Tube(2.0*R_od_ballast, t_wall_ballast)\n myrange = np.arange(R_od_ballast.size)\n myones = np.ones(myrange.shape)\n mydens = m_ballast / mytube.Area / np.diff(z_ballast) + eps\n for k in xrange(ncolumn):\n ballastEID.append( N1.size + 1 )\n \n N1 = np.append(N1 , myrange + ballastLowerID[k] )\n N2 = np.append(N2 , myrange + ballastLowerID[k] + 1)\n Ax = np.append(Ax , mytube.Area )\n As = np.append(As , mytube.Asx )\n Jx = np.append(Jx , mytube.J0 )\n I = np.append(I , mytube.Jxx )\n S = np.append(S , mytube.S )\n C = np.append(C , mytube.C )\n modE = np.append(modE, E*myones )\n modG = np.append(modG, G*myones )\n roll = np.append(roll, np.zeros(myones.shape) )\n dens = np.append(dens, mydens ) # Mass added below\n\n\n # ---Get element object from frame3dd---\n nelem = 1 + np.arange(N1.size)\n elements = frame3dd.ElementData(nelem, N1, N2, Ax, As, As, Jx, I, I, modE, modG, roll, dens)\n\n # Store data for plotting, also handy for operations below\n plotMat = np.zeros((nelem.size, 3, 2))\n plotMat[:,:,0] = np.c_[xnode[N1-1], ynode[N1-1], znode[N1-1]]\n plotMat[:,:,1] = np.c_[xnode[N2-1], ynode[N2-1], znode[N2-1]]\n \n # Compute length and center of gravity for each element for use below\n elemL = np.sqrt( np.sum( np.diff(plotMat, axis=2)**2.0, axis=1) ).flatten()\n elemCoG = 0.5*np.sum(plotMat, axis=2)\n\n # ---Options object---\n shear = True # 1: include shear deformation\n geom = False # 1: include geometric stiffness\n dx = -1 # x-axis increment for internal forces, -1 to skip\n other = frame3dd.Options(shear, geom, dx)\n\n # Initialize frame3dd object\n myframe = frame3dd.Frame(nodes, reactions, elements, other)\n\n # Add in extra mass of rna\n inode = np.array([towerEndID], dtype=np.int32) # rna\n m_extra = np.array([m_rna])\n Ixx = np.array([ I_rna[0] ])\n Iyy = np.array([ I_rna[1] ])\n Izz = np.array([ I_rna[2] ])\n Ixy = np.array([ I_rna[3] ])\n Ixz = np.array([ I_rna[4] ])\n Iyz = np.array([ I_rna[5] ])\n rhox = np.array([ cg_rna[0] ])\n rhoy = np.array([ cg_rna[1] ])\n rhoz = np.array([ cg_rna[2] ])\n myframe.changeExtraNodeMass(inode, m_extra, Ixx, Iyy, Izz, Ixy, Ixz, Iyz, rhox, rhoy, rhoz, True)\n\n # ---LOAD CASES---\n # Extreme loading\n gx = 0.0\n gy = 0.0\n gz = -gravity\n load = frame3dd.StaticLoadCase(gx, gy, gz)\n\n # Wind + Wave loading in local base / ballast / tower c.s.\n Px_base, Py_base, Pz_base = params['base_column_Pz'], params['base_column_Py'], -params['base_column_Px'] # switch to local c.s.\n Px_ballast, Py_ballast, Pz_ballast = params['auxiliary_column_Pz'], params['auxiliary_column_Py'], -params['auxiliary_column_Px'] # switch to local c.s.\n Px_tower, Py_tower, Pz_tower = params['tower_Pz'], params['tower_Py'], -params['tower_Px'] # switch to local c.s.\n epsOff = 1e-5\n # Get mass right- ballasts, stiffeners, tower, rna, etc.\n # Also account for buoyancy loads\n # Also apply wind/wave loading as trapezoidal on each element\n # NOTE: Loading is in local element coordinates 0-L, x is along element\n # Base\n nrange = np.arange(R_od_base.size, dtype=np.int32)\n EL = baseEID + nrange\n Ux = V_base * rhoWater * gravity / np.diff(z_base)\n x1 = np.zeros(nrange.shape)\n x2 = np.diff(z_base) - epsOff # subtract small number b.c. of precision\n wx1, wx2 = Px_base[:-1], Px_base[1:]\n wy1, wy2 = Py_base[:-1], Py_base[1:]\n wz1, wz2 = Pz_base[:-1], Pz_base[1:]\n # Tower\n nrange = np.arange(R_od_tower.size, dtype=np.int32)\n EL = np.append(EL, towerEID + nrange)\n Ux = np.append(Ux, np.zeros(nrange.shape))\n x1 = np.append(x1, np.zeros(nrange.shape))\n x2 = np.append(x2, np.diff(z_tower) - epsOff)\n wx1 = np.append(wx1, Px_tower[:-1])\n wx2 = np.append(wx2, Px_tower[1:])\n wy1 = np.append(wy1, Py_tower[:-1])\n wy2 = np.append(wy2, Py_tower[1:])\n wz1 = np.append(wz1, Pz_tower[:-1])\n wz2 = np.append(wz2, Pz_tower[1:])\n # Buoyancy- ballast columns\n nrange = np.arange(R_od_ballast.size, dtype=np.int32)\n for k in xrange(ncolumn):\n EL = np.append(EL, ballastEID[k] + nrange)\n Ux = np.append(Ux, V_ballast * rhoWater * gravity / np.diff(z_ballast) )\n x1 = np.append(x1, np.zeros(nrange.shape))\n x2 = np.append(x2, np.diff(z_ballast) - epsOff)\n wx1 = np.append(wx1, Px_ballast[:-1])\n wx2 = np.append(wx2, Px_ballast[1:])\n wy1 = np.append(wy1, Py_ballast[:-1])\n wy2 = np.append(wy2, Py_ballast[1:])\n wz1 = np.append(wz1, Pz_ballast[:-1])\n wz2 = np.append(wz2, Pz_ballast[1:])\n \n # Add mass of base and ballast columns while we've already done the element enumeration\n Uz = Uy = np.zeros(Ux.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n xx1 = xy1 = xz1 = x1\n xx2 = xy2 = xz2 = x2\n load.changeTrapezoidalLoads(EL, xx1, xx2, wx1, wx2, xy1, xy2, wy1, wy2, xz1, xz2, wz1, wz2)\n\n # Buoyancy for fully submerged members\n # Note indices to elemL and elemCoG could include -1, but since there is assumed to be more than 1 column, this is not necessary\n nrange = np.arange(ncolumn, dtype=np.int32)\n Frange = np.pi * R_od_pontoon**2 * rhoWater * gravity\n F_truss = 0.0\n z_cb = np.zeros((3,))\n if ncolumn > 0 and znode[ballastLowerID[0]-1] < 0.0:\n if lowerAttachFlag:\n EL = lowerAttachEID + nrange\n Uz = Frange * np.ones(nrange.shape)\n F_truss += Frange * elemL[lowerAttachEID-1] * ncolumn\n z_cb += Frange * elemL[lowerAttachEID-1] * ncolumn * elemCoG[lowerAttachEID-1,:]\n Ux = Uy = np.zeros(Uz.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n if lowerRingFlag:\n EL = lowerRingEID + nrange\n Uz = Frange * np.ones(nrange.shape)\n F_truss += Frange * elemL[lowerRingEID-1] * ncolumn\n z_cb += Frange * elemL[lowerRingEID-1] * ncolumn * elemCoG[lowerRingEID-1]\n Ux = Uy = np.zeros(Uz.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n if crossAttachFlag:\n factor = np.minimum(1.0, (0.0 - z_attach_lower) / (znode[ballastUpperID[0]-1] - z_attach_lower) )\n EL = crossAttachEID + nrange\n Ux = factor * Frange * np.sin(cross_angle) * np.ones(nrange.shape)\n Uz = factor * Frange * np.cos(cross_angle) * np.ones(nrange.shape)\n F_truss += factor * Frange * elemL[crossAttachEID-1] * ncolumn\n z_cb += factor * Frange * elemL[crossAttachEID-1] * ncolumn * elemCoG[crossAttachEID-1,:]\n Uy = np.zeros(Uz.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n if outerCrossFlag:\n factor = np.minimum(1.0, (0.0 - znode[baseLowerID-1]) / (znode[ballastUpperID[0]-1] - znode[baseLowerID-1]) )\n # TODO: This one will take a little more math\n #EL = outerCrossEID + np.arange(2*ncolumn, dtype=np.int32) \n #Uz = factor * Frange * np.ones(nrange.shape)\n F_truss += factor * Frange * elemL[outerCrossEID-1] * ncolumn\n z_cb += factor * Frange * elemL[outerCrossEID-1] * ncolumn * elemCoG[outerCrossEID-1,:]\n #Ux = Uy = np.zeros(Uz.shape)\n #load.changeUniformLoads(EL, Ux, Uy, Uz)\n if ncolumn > 0 and znode[ballastUpperID[0]-1] < 0.0:\n if upperAttachFlag:\n EL = upperAttachEID + nrange\n Uz = Frange * np.ones(nrange.shape)\n F_truss += Frange * elemL[upperAttachEID-1] * ncolumn\n z_cb += Frange * elemL[upperAttachEID-1] * ncolumn * elemCoG[upperAttachEID-1,:]\n Ux = Uy = np.zeros(Uz.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n if upperRingFlag:\n EL = upperRingEID + nrange\n Uz = Frange * np.ones(nrange.shape)\n F_truss += Frange * elemL[upperRingEID-1] * ncolumn\n z_cb += Frange * elemL[upperRingEID-1] * ncolumn * elemCoG[upperRingEID-1,:]\n Ux = Uy = np.zeros(Uz.shape)\n load.changeUniformLoads(EL, Ux, Uy, Uz)\n\n # Point loading for rotor thrust and wind loads at CG\n # Note: extra momemt from mass accounted for below\n nF = np.array([ baseEndID ], dtype=np.int32)\n Fx = np.array([ F_rna[0] ])\n Fy = np.array([ F_rna[1] ])\n Fz = np.array([ F_rna[2] ])\n Mxx = np.array([ M_rna[0] ])\n Myy = np.array([ M_rna[1] ])\n Mzz = np.array([ M_rna[2] ])\n load.changePointLoads(nF, Fx, Fy, Fz, Mxx, Myy, Mzz)\n\n # Store load case into frame 3dd object\n myframe.addLoadCase(load)\n\n\n # ---DYNAMIC ANALYSIS---\n nM = 6 # number of desired dynamic modes of vibration\n Mmethod = 1 # 1: subspace Jacobi 2: Stodola\n lump = 0 # 0: consistent mass ... 1: lumped mass matrix\n tol = 1e-5 # mode shape tolerance\n shift = 0.0 # shift value ... for unrestrained structures\n \n myframe.enableDynamics(nM, Mmethod, lump, tol, shift)\n\n # ---DEBUGGING---\n #myframe.write('debug.3dd') # For debugging\n\n # ---RUN ANALYSIS---\n try:\n displacements, forces, reactions, internalForces, mass, modal = myframe.run()\n except:\n bad_input()\n return\n \n # --OUTPUTS--\n nE = nelem.size\n iCase = 0\n unknowns['plot_matrix'] = plotMat\n \n if ncolumn > 0:\n # Buoyancy assembly from incremental calculations above\n V_pontoon = F_truss/rhoWater/gravity\n z_cb = z_cb[-1] / F_truss if F_truss > 0.0 else 0.0\n unknowns['pontoon_displacement'] = V_pontoon\n unknowns['pontoon_center_of_buoyancy'] = z_cb\n\n # Sum up mass and compute CofG. Frame3DD does mass, but not CG\n # TODO: Subtract out extra pontoon length that overlaps with column radii\n ind = baseEID-1\n m_total = Ax[:ind] * rho * elemL[:ind]\n m_pontoon = m_total.sum() #mass.struct_mass\n cg_pontoon = np.sum( m_total[:,np.newaxis] * elemCoG[:ind,:], axis=0 ) / m_total.sum()\n unknowns['pontoon_mass'] = m_pontoon\n unknowns['pontoon_cost'] = coeff * m_pontoon\n unknowns['pontoon_center_of_mass'] = cg_pontoon[-1]\n else:\n V_pontoon = z_cb = m_pontoon = 0.0\n cg_pontoon = np.zeros(3)\n \n # natural frequncies\n unknowns['structural_frequencies'] = np.array( modal.freq )\n\n # deflections due to loading (from cylinder top and wind/wave loads)\n unknowns['top_deflection'] = displacements.dx[iCase, towerEndID-1] # in yaw-aligned direction\n\n # Summary of mass and volumes\n unknowns['substructure_mass'] = m_pontoon + m_base.sum() + ncolumn*m_ballast.sum()\n unknowns['structural_mass'] = mass.total_mass\n unknowns['total_displacement'] = V_base.sum() + ncolumn*V_ballast.sum() + V_pontoon\n\n # Find cb (center of buoyancy) for whole system\n z_cb = (V_base.sum()*z_cb_base + ncolumn*V_ballast.sum()*z_cb_ballast + V_pontoon*z_cb) / unknowns['total_displacement']\n unknowns['z_center_of_buoyancy'] = z_cb\n\n # Find cg (center of gravity) for whole system\n unknowns['substructure_center_of_mass'] = (ncolumn*m_ballast.sum()*cg_ballast + m_base.sum()*cg_base +\n m_pontoon*cg_pontoon) / unknowns['substructure_mass']\n unknowns['center_of_mass'] = (m_rna*cg_rna + m_tower.sum()*cg_tower +\n unknowns['substructure_mass']*unknowns['substructure_center_of_mass']) / mass.total_mass\n Fsum = np.zeros(3)\n Msum = np.zeros(3)\n for k in xrange(len(rid)):\n idx = reactions.node[iCase, k] - 1\n pk = np.array([xnode[idx], ynode[idx], znode[idx]])\n rk = pk - unknowns['center_of_mass']\n F = -1*np.array([reactions.Fx[iCase, k], reactions.Fy[iCase, k], reactions.Fz[iCase, k]])\n M = -1*np.array([reactions.Mxx[iCase, k], reactions.Myy[iCase, k], reactions.Mzz[iCase, k]])\n Fsum += F\n Msum += M + np.cross(rk,F)\n unknowns['total_force'] = -1.0 * np.array([reactions.Fx.sum(), reactions.Fy.sum(), reactions.Fz.sum()])\n unknowns['total_moment'] = -1.0 * np.array([reactions.Mxx.sum(), reactions.Myy.sum(), reactions.Mzz.sum()])\n\n # shear and bending (convert from local to global c.s.)\n Nx = forces.Nx[iCase, 1::2]\n Vy = forces.Vy[iCase, 1::2]\n Vz = forces.Vz[iCase, 1::2]\n\n Tx = forces.Txx[iCase, 1::2]\n My = forces.Myy[iCase, 1::2]\n Mz = forces.Mzz[iCase, 1::2]\n\n # Compute axial and shear stresses in elements given Frame3DD outputs and some geomtry data\n # Method comes from Section 7.14 of Frame3DD documentation\n # http://svn.code.sourceforge.net/p/frame3dd/code/trunk/doc/Frame3DD-manual.html#structuralmodeling\n M = np.sqrt(My*My + Mz*Mz)\n sigma_ax = Nx/Ax - M/S\n sigma_sh = np.sqrt(Vy*Vy + Vz*Vz)/As + Tx/C\n\n # Extract pontoon for stress check\n idx = range(baseEID-1)\n npon = len(idx)\n if len(idx) > 0:\n qdyn_pontoon = np.max( np.abs( np.r_[params['base_column_qdyn'], params['auxiliary_column_qdyn']] ) )\n sigma_ax_pon = sigma_ax[idx]\n sigma_sh_pon = sigma_sh[idx]\n sigma_h_pon = util.hoopStress(2*R_od_pontoon, t_wall_pontoon, qdyn_pontoon) * np.ones(sigma_ax_pon.shape)\n\n unknowns['pontoon_stress'][:npon] = util.vonMisesStressUtilization(sigma_ax_pon, sigma_h_pon, sigma_sh_pon,\n gamma_f*gamma_m*gamma_n, sigma_y)\n \n # Extract tower for Eurocode checks\n idx = towerEID-1 + np.arange(R_od_tower.size, dtype=np.int32)\n L_reinforced = params['tower_buckling_length'] * np.ones(idx.shape)\n sigma_ax_tower = sigma_ax[idx]\n sigma_sh_tower = sigma_sh[idx]\n qdyn_tower,_ = nodal2sectional( params['tower_qdyn'] )\n sigma_h_tower = util.hoopStressEurocode(z_tower, 2*R_od_tower, t_wall_tower, L_reinforced, qdyn_tower)\n\n unknowns['tower_stress'] = util.vonMisesStressUtilization(sigma_ax_tower, sigma_h_tower, sigma_sh_tower,\n gamma_f*gamma_m*gamma_n, sigma_y)\n\n sigma_y = sigma_y * np.ones(idx.shape)\n unknowns['tower_shell_buckling'] = util.shellBucklingEurocode(2*R_od_tower, t_wall_tower, sigma_ax_tower, sigma_h_tower, sigma_sh_tower,\n L_reinforced, modE[idx], sigma_y, gamma_f, gamma_b)\n\n tower_height = z_tower[-1] - z_tower[0]\n unknowns['tower_global_buckling'] = util.bucklingGL(2*R_od_tower, t_wall_tower, Nx[idx], M[idx], tower_height, modE[idx], sigma_y, gamma_f, gamma_b)\n # TODO: FATIGUE\n # Base and ballast columns get API stress/buckling checked in Column Group because that takes into account stiffeners\n\n\n\n\nclass TrussIntegerToBoolean(Component):\n def __init__(self):\n super(TrussIntegerToBoolean,self).__init__()\n self.add_param('cross_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the bottom of the central base to the tops of the outer ballast columns')\n self.add_param('lower_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their bottoms')\n self.add_param('upper_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their tops')\n self.add_param('lower_ring_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer ballast columns at their bottoms')\n self.add_param('upper_ring_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer ballast columns at their tops')\n self.add_param('outer_cross_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer ballast columns at their tops')\n\n self.add_output('cross_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the bottom of the central base to the tops of the outer ballast columns', pass_by_obj=True)\n self.add_output('lower_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their bottoms', pass_by_obj=True)\n self.add_output('upper_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central base to the outer ballast columns at their tops', pass_by_obj=True)\n self.add_output('lower_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their bottoms', pass_by_obj=True)\n self.add_output('upper_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their tops', pass_by_obj=True)\n self.add_output('outer_cross_pontoons', val=True, desc='Inclusion of pontoons that ring around outer ballast columns at their tops', pass_by_obj=True)\n\n def solve_nonlinear(self, params, unknowns, resids):\n unknowns['cross_attachment_pontoons'] = (int(params['cross_attachment_pontoons_int']) == 1)\n unknowns['lower_attachment_pontoons'] = (int(params['lower_attachment_pontoons_int']) == 1)\n unknowns['upper_attachment_pontoons'] = (int(params['upper_attachment_pontoons_int']) == 1)\n unknowns['lower_ring_pontoons'] = (int(params['lower_ring_pontoons_int']) == 1)\n unknowns['upper_ring_pontoons'] = (int(params['upper_ring_pontoons_int']) == 1)\n unknowns['outer_cross_pontoons'] = (int(params['outer_cross_pontoons_int']) == 1)\n\n \n# -----------------\n# Assembly\n# -----------------\n\nclass FloatingLoading(Group):\n\n def __init__(self, nSection, nFull):\n super(FloatingLoading, self).__init__()\n \n # Independent variables that are unique to TowerSE\n self.add('base_pontoon_attach_lower', IndepVarComp('base_pontoon_attach_lower', 0.0), promotes=['*'])\n self.add('base_pontoon_attach_upper', IndepVarComp('base_pontoon_attach_upper', 0.0), promotes=['*'])\n self.add('pontoon_outer_diameter', IndepVarComp('pontoon_outer_diameter', 0.0), promotes=['*'])\n self.add('pontoon_wall_thickness', IndepVarComp('pontoon_wall_thickness', 0.0), promotes=['*'])\n self.add('outer_cross_pontoons_int', IndepVarComp('outer_cross_pontoons_int', 1), promotes=['*'])\n self.add('cross_attachment_pontoons_int', IndepVarComp('cross_attachment_pontoons_int', 1), promotes=['*'])\n self.add('lower_attachment_pontoons_int', IndepVarComp('lower_attachment_pontoons_int', 1), promotes=['*'])\n self.add('upper_attachment_pontoons_int', IndepVarComp('upper_attachment_pontoons_int', 1), promotes=['*'])\n self.add('lower_ring_pontoons_int', IndepVarComp('lower_ring_pontoons_int', 1), promotes=['*'])\n self.add('upper_ring_pontoons_int', IndepVarComp('upper_ring_pontoons_int', 1), promotes=['*'])\n self.add('pontoon_cost_rate', IndepVarComp('pontoon_cost_rate', 0.0), promotes=['*'])\n self.add('connection_ratio_max', IndepVarComp('connection_ratio_max', 0.0), promotes=['*'])\n\n # All the components\n self.add('wind', PowerWind(nFull), promotes=['z0','Uref','shearExp','zref'])\n self.add('windLoads', CylinderWindDrag(nFull), promotes=['cd_usr','beta'])\n self.add('intbool', TrussIntegerToBoolean(), promotes=['*'])\n self.add('frame', FloatingFrame(nFull), promotes=['*'])\n \n # Connections for geometry and mass\n self.connect('wind.z', ['windLoads.z', 'tower_z_full'])\n self.connect('windLoads.d', ['tower_d_full'])\n self.connect('wind.U', 'windLoads.U')\n\n # connections to distLoads1\n self.connect('windLoads.windLoads:Px', 'tower_Px')\n self.connect('windLoads.windLoads:Py', 'tower_Py')\n self.connect('windLoads.windLoads:Pz', 'tower_Pz')\n self.connect('windLoads.windLoads:qdyn', 'tower_qdyn')\n\n","sub_path":"src/floatingse/floating_loading.py","file_name":"floating_loading.py","file_ext":"py","file_size_in_byte":48443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568530916","text":"#!/usr/bin/env python\n\nfrom mpi4py import MPI\nimport os\nimport gzip\nimport numpy\nimport sys\n\nfinished = 0\nidList = []\nfileList = []\nmainList = []\n#taskList = []\nmain_dict = {}\nESPNTop20 = []\nAPTop20 = []\ntopJobs = []\n\n\ndef insertAPTop20(data):\n if len(APTop20) < 20:\n APTop20.append(data)\n APTop20.sort(key=lambda l:l[1], reverse=True)\n else: \n if data[1] > APTop20[19][1]:\n del APTop20[19]\n APTop20.append(data)\n APTop20.sort(key=lambda l:l[1], reverse=True)\n else: pass\n \n \n\ndef parse_tarfile(fname):\n#for filename in os.listdir(path):\n with gzip.open(fname, 'rt') as fin:\n for line in fin:\n fields = line.strip().split(',')\n #print(fields)\n if fields[2] not in idList:\n idList.append(fields[2])\n mainList.append([fields[2],fields[0], int(fields[3])])\n\ndef parse_taskFile(fname):\n with gzip.open(fname, 'rt') as fin:\n for line in fin:\n fields = line.strip().split(',')\n #print(fields)\n if fields[2] in topJobs:\n for entry in ESPNTop20:\n if entry[0] == fields[2]:\n entry[3] = entry[3] + 1\n insertAPTop20([fields[2], float( fields[5]) * 300.0, 0])\n #taskList.append( [fields[2], float( fields[5]) * 300.0 ] )\n \n \ndef unique_list(somelist):\n temp = set()\n for item in somelist:\n if item in temp:\n return False\n temp.add(item)\n return True\n\ndef merge_list(somelist):\n temp = set()\n for item in somelist:\n if item not in temp:\n temp.add(item)\n return(temp)\n\n\ndef create_main_dict(mainList):\n\n for i in mainList:\n if i[0] not in main_dict:\n main_dict[i[0]] = {}\n main_dict[i[0]][i[2]] = [i[1]]\n elif i[2] not in main_dict[i[0]]:\n main_dict[i[0]][i[2]] = [i[1]]\n else:\n main_dict[i[0]][i[2]].append(i[1])\n\n\ndef insertTop20(data):\n if len(ESPNTop20) < 20:\n ESPNTop20.append(data)\n ESPNTop20.sort(key=lambda l:l[1], reverse=True)\n else: \n if data[1] > ESPNTop20[19][1]:\n del ESPNTop20[19]\n ESPNTop20.append(data)\n ESPNTop20.sort(key=lambda l:l[1], reverse=True)\n else: pass\n\n \n \ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\nroot = 0\n\n\ndata = []\npath = \"/scratch3/ggettin/asg4/job_events\"\nfor filename in os.listdir(path):\n data.append(os.path.join(path,filename))\n \nfor i,gzfile in enumerate(data):\n if i%size!=rank: continue\n parse_tarfile(gzfile)\n\n\nl = comm.gather(idList, root=0)\nif l is not None:\n for n in l:\n for x in n:\n idList.append(x)\nidList = merge_list(idList)\n\nll = comm.gather(mainList, root=0)\nif ll is not None:\n for nn in ll:\n for xx in nn:\n mainList.append(xx)\n\n\nif rank == 0:\n pass\nelse:\n idList = []\n\ncomm.barrier()\n \nif rank == 0:\n create_main_dict(mainList) \n \n\"\"\"\n Array of size 20 contains jobID, runtime and eventType\n \n \n \n For each unique job id, (idList)\n calculate the time (using dictionary indexed by idList):\n if 0 or 300, ignore\n if among 20 longest, throw all data in array\n \n \n\"\"\"\nl = comm.gather(idList, root=0)\nll = comm.gather(mainList, root=0)\nlll = comm.gather(main_dict, root=0)\nop = -1\nop2 = -1\nfor job in idList:\n for eventType in main_dict[job]:\n if eventType == 0:\n op = int(main_dict[job][eventType][0])\n #print(op)\n elif eventType >= 2 or eventType <= 6:\n op2 = int(main_dict[job][eventType][0])\n #print(op2)\n newEvent = eventType\n if op == 0 or op == sys.maxsize or op2 == 0 or op2 == sys.maxsize:\n continue\n else:\n convTime = op2 - op\n insertTop20([job, convTime, newEvent, 0])\n continue\n \n\ncomm.barrier()\n\nv = comm.gather(ESPNTop20, root=0)\n\nif rank == 0:\n for entry in ESPNTop20:\n topJobs.append(entry[0])\n\n\nvl = comm.gather(topJobs, root = 0)\n \n \n#path2 = \"/scratch3/ggettin/asg4/task_usage\"\n#data2 = []\n\n#for filename in os.listdir(path2):\n# data2.append(os.path.join(path2,filename))\n \n#for i,gzfile in enumerate(data2):\n# if i%size!=rank: continue\n# parse_taskFile(gzfile)\n\n\n#lv = comm.gather(APTop20, root=0) \n\n\n#if rank == 0:\n \n# APTop20.sort(key=lambda l:l[1], reverse=True)\n \n# for i in range(0, 20):\n# for eventType in main_dict[APTop20[i][0]]:\n# if eventType >= 2 and eventType <= 6:\n# if eventType > APTop20[i][2]:\n# APTop20[i][2] = eventType\n#\n\nif rank == 0: \n print(\"\\n\")\n \n print(\"JobID\\t\\tTime\\t\\t\\tEvent\\tCount\") \n for i in range(0,20):\n print(\"%s\\t%s\\t\\t%s\\t%s\"%(ESPNTop20[i][0], ESPNTop20[i][1], ESPNTop20[i][2], ESPNTop20[i][3]))\n \n\n print(\"\\n\")\n \n# print(\"JobID\\t\\tCPUTime\\t\\tFinalStat\") \n# for i in range(0,20):\n# if APTop20[i][2] == 0:\n# print(\"%s\\t%.1f\\t\\t%s\"%(APTop20[i][0], APTop20[i][1], \"None\"))\n# else:\n# print(\"%s\\t%.1f\\t\\t%s\"%(APTop20[i][0], APTop20[i][1], APTop20[i][2]))\n#\ncomm.barrier()\n\n#if rank == 0:\n# print(\"\\nExecution Time:\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167969017","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\n\r\ndef process_data(ske, ske_v, t, ai, si ,ri):\r\n # t 局部片段的长度\r\n c=0\r\n segments_train=np.zeros([0,6],np.int)\r\n #segments=np.zeros([1,6],np.int)\r\n for a in ai:\r\n for s in si:\r\n for r in ri:\r\n if ske_v[a,s,r]==1:\r\n ske_tmp=ske[a,s,r]\r\n length=ske_tmp.shape[2]\r\n for step in range(0,1):\r\n for k in range(0,length):\r\n if (step+1)*(t-1)+k 通过segments_index 索引,得到ske的截取特征\r\ndef getS(t,segment_index,ske):\r\n # segment_index的行的个数,i.e.也就是样本的个数,\r\n segment_index_num=len(segment_index) \r\n # 3,20是ske数据中的格式,3代表xyz坐标,20代表jionts,\r\n action_seg=np.zeros([segment_index_num,3,20,t])\r\n # i对每一个样本循环处理,\r\n for i in range(0,segment_index_num):\r\n # 要截取得到的片段上的时间索引,\r\n seg_index=np.zeros([t],np.int)\r\n for w in range(0,t):\r\n seg_index[w]=segment_index[i,5]+segment_index[i,4]*w+w\r\n action_seg[i,:,:,:]=ske[segment_index[i,1],segment_index[i,2],segment_index[i,3]][:,:,seg_index] \r\n return action_seg\r\n\r\n# getImages --> 通过getS得到的action_seg 得到图像表示,\r\ndef getImages(action_seg, t):\r\n n_a=action_seg.shape[0]\r\n #imgs=np.zeros([n,224,224])\r\n imgs=np.zeros([n_a,112,112])\r\n for i in range(0,n_a):\r\n imgs[i,:,:]=getImage(action_seg[i,:,:,:], t)\r\n return imgs\r\n \r\n# getImages --> 通过getS得到的action_seg 得到图像表示,\r\ndef getImage(action_seg, t):\r\n import math\r\n from scipy.misc import imresize\r\n joint_pairs=[[3,6],[2,3],[19,2],[7,0],[8,1],[9,7],[10,8],[13,4],[14,5],[15,13],[16,14]]\r\n n=len(joint_pairs)\r\n jnts_angle=np.zeros([np.int(n*(n-1)/2),t])\r\n for s in range(0,t):\r\n k=0\r\n for i in range(0,n):\r\n for j in range(i,n):\r\n if i!=j: \r\n bone_child=action_seg[:,joint_pairs[i][0],s]-action_seg[:,joint_pairs[i][1],s]\r\n bone_child=bone_child/np.sqrt(np.dot(bone_child,bone_child))\r\n bone_parent=action_seg[:,joint_pairs[j][0],s]-action_seg[:,joint_pairs[j][1],s]\r\n bone_parent=bone_parent/np.sqrt(np.dot(bone_parent,bone_parent))\r\n joint_dot=np.dot(bone_child,bone_parent)\r\n jnts_angle[k,s]=math.acos(joint_dot)/math.pi #转变为角度且归一化\r\n k=k+1\r\n# img=jnts_angle/255\r\n# img=imresize(jnts_angle,(2*self.n,2*self.t))/255\r\n img=imresize(jnts_angle,(112,112))/255\r\n #print(img)\r\n return img\r\n","sub_path":"utilize_functions2.py","file_name":"utilize_functions2.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302485485","text":"\"\"\"\nProblem: 82 Remove Duplicates from Sorted List 2\nLevel: Medium\nTags: \nTechnique:\nStatus:\n\nProblem Description: \n Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\nFor example,\nGiven 1->2->3->3->4->4->5, return 1->2->5.\nGiven 1->1->1->2->3, return 2->3. \n\nLesson:\n\"\"\"\n\n\nclass Solution(object):\n\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n # trivial\n if (head == None) or (head.next == None):\n return head\n \n head0 = ListNode(None)\n head0.next = head\n prev = head0\n curr = prev.next\n next = prev.next.next\n print(curr.val)\n print(next.val)\n\n while next != None:\n if curr.val == next.val:\n count = 0\n while curr.val == next.val:\n count = count + 1\n next = next.next\n prev.next = next\n curr = prev \n else:\n prev = prev.next\n curr = curr.next\n next = next.next\n return head0.next\n\n\n\n\n#\n# test code for terminal runs\n#\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef addNode_head(node_h, val):\n \"\"\"\n adds node to head\n :type node_h: ListNode (original head)\n :type val: ListNode (new head)\n :rtype : ListNode\n :method calls: NONE\n \"\"\"\n nn = ListNode(val)\n nn.next = node_h\n return nn\n\nif __name__== \"__main__\":\n \"\"\"\n test code \n \"\"\" \n\n def print_test(ans_e,ans_o,name):\n \"\"\"\n prints tests in standardized format\n :type ans_e: expected answer in printable format\n :type ans_o: observed answer in printable format\n \"\"\"\n print('~'*40)\n if ans_o != ans_e:\n error = 1\n print(\"########## FAIL ##########\")\n print(\"TEST: {} :: Status: FAIL\".format(name))\n else:\n error = 0\n print(\"TEST: {} :: Status: PASS\".format(name))\n print('TEST: {} :: Expected: {}'.format(method, ans_e))\n print('TEST: {} :: Observed: {}'.format(method, ans_o))\n return error\n\n\n err = 0\n sol = Solution()\n\n # test 1\n method = 'simple deletion; easy backend'\n vals = [1,1,1,2,3]\n head0 = ListNode(None)\n head = head0\n for v in vals:\n head.next = ListNode(v)\n head = head.next\n head = head0.next\n expected_answer = [2,3]\n output = sol.deleteDuplicates(head)\n observed_answer = []\n while output != None:\n observed_answer.append(output.val)\n output = output.next\n err = err + print_test(expected_answer, observed_answer,method)\n\n # test 2 \n method = 'delete all'\n vals = [1,1]\n head0 = ListNode(None)\n head = head0\n for v in vals:\n head.next = ListNode(v)\n head = head.next\n head = head0.next\n expected_answer = []\n output = sol.deleteDuplicates(head)\n observed_answer = []\n while output != None:\n observed_answer.append(output.val)\n output = output.next\n err = err + print_test(expected_answer, observed_answer,method)\n\n\n\n # Final pass/fail readout\n print('')\n if err == 0:\n print('PASSED ALL TESTS')\n else:\n print('FAILED A TEST: DEBUG!!!')\n \n","sub_path":"Algorithms_LinkedLists/Code/82_RemoveDuplicatedFromSortedList/v0_save2.py","file_name":"v0_save2.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589478237","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Musician, MusicianComment, Event, EventComment\nfrom .forms import MusicianForm, EventForm, DonationForm, MusicianCommentForm, EventCommentForm\nfrom users.models import User\nfrom django.views import View\nfrom django.contrib.auth.decorators import login_required \nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.http import JsonResponse\nfrom django.contrib.postgres.search import SearchVector\nfrom random import shuffle\nimport json\nimport requests\nimport time\nimport datetime\nimport os\nfrom django import forms\nfrom django.views.decorators.csrf import csrf_exempt\nfrom geopy.geocoders import MapBox\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom project.settings import EMAIL_HOST_USER\nfrom django.utils.translation import gettext\nfrom datetime import datetime, timedelta\n\n\n\n# Create your views here.\nclass About(View):\n def get(self, request):\n return render(request, \"core/about.html\")\n\n\nclass Homepage(View):\n def get(self, request):\n events = Event.objects.all().order_by(\"date_time\")\n live_events = Event.objects.all().filter(in_progress=True)\n try:\n code = request.GET['code']\n if code:\n state = json.loads(request.GET['state'])\n eventId = state['eventId']\n filtered = Event.objects.filter(pk=eventId)\n if filtered.exists():\n event = filtered.first()\n start = {\n \"dateTime\": time.mktime(event.date_time.timetuple())\n }\n requests.post('http://localhost:3000/calendar', json={\n \"code\": code,\n \"event\": {\n \"summary\": event.title,\n \"start\": start,\n \"end\": start\n }\n })\n return redirect(to='event', pk=eventId)\n except:\n pass\n return render(request, 'core/homepage.html', {'events': events, 'live_events': live_events})\n\nclass HomepageRandom(View):\n def get(self, request):\n events = list(Event.objects.all())\n shuffle(events)\n return render(request, 'core/homepage_search.html', {'events': events, 'page_header': \"Random Order\"})\n\nclass HomepageInProgress(View):\n def get(self, request):\n events = Event.objects.all().filter(in_progress=True)\n return render(request, 'core/homepage_search.html', {'events': events, 'page_header': \"Live Now\"})\n\nclass HomepagePastEvents(View):\n def get(self, request):\n events = Event.objects.all().order_by(\"-date_time\")\n return render(request, 'core/homepage_search.html', {'events': events, 'page_header': \"Past Events\", \"past_events\" : True})\n\n\n\nclass EventPage(View):\n def get(self, request, pk):\n event = get_object_or_404(Event, pk=pk)\n if request.user.is_authenticated:\n user_saved = request.user.is_save_event(event)\n else: \n user_saved = None\n comment_form = EventCommentForm()\n musician = event.owner\n # Passing data through to react via json. MUST USE DOUBLE QUOTES\n return render(request, 'core/event.html', {\n 'data': json.dumps({\n \"eventId\": pk,\n \"ownerId\": event.owner.user.id,\n \"userId\": request.user.id,\n \"in_progress\": event.in_progress,\n \"port\": os.getenv('PORT') if os.getenv('PORT') else 3000\n }), \n \"event\": event,\n 'comment_form': comment_form,\n 'musician': musician,\n 'user_saved': user_saved,\n })\n\n def post(self, request, pk): \n event = get_object_or_404(Event, pk=pk)\n user_saved = request.user.is_save_event(event)\n events = Event.objects.all()\n comment_form = EventCommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.event = event\n new_comment.author = request.user\n new_comment.save()\n return redirect(to='event', pk=pk)\n else:\n comment_form = EventCommentForm()\n \n \n return render(request, 'core/event.html', {'event': event, 'comment_form': comment_form})\n\n\nclass AddEvent(View):\n\n def get(self, request, musician_pk):\n form_title = gettext(\"Add an Event:\")\n musician = get_object_or_404(Musician, pk=musician_pk)\n if musician.user == request.user:\n form = EventForm()\n return render(request, 'core/event_add_edit.html', {\"form\": form, \"musician\": musician, \"form_title\": form_title, \"edit\": False})\n return redirect(to=\"show-musician\", musician_pk=musician_pk)\n\n def post(self, request, musician_pk):\n musician = get_object_or_404(Musician, pk=musician_pk)\n if musician.user == request.user:\n form = EventForm(data=request.POST, files=request.FILES)\n print(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.owner = musician\n event.save()\n return redirect(to=\"event\", pk=event.pk)\n return redirect(to=\"show-musician\", musician_pk=musician_pk)\n return redirect(to=\"show-musician\", musician_pk=musician_pk)\n\n\ndef edit_event(request, event_pk):\n form_title = \"Edit Event:\"\n event = get_object_or_404(Event, pk=event_pk)\n musician = event.owner\n if request.user == musician.user:\n if request.method == \"POST\":\n form = EventForm(instance=event, data=request.POST, files=request.FILES)\n if form.is_valid():\n event = form.save(commit=False)\n event.owner = musician\n event = form.save()\n return redirect(to=\"event\", pk=event.pk)\n else:\n form = EventForm(instance=event)\n return render(\n request,\n \"core/event_add_edit.html\",\n {\"form\": form, \"event\": event, \"musician\": musician, \"form_title\": form_title, \"edit\": True} \n )\n return redirect(to=\"show-musician\", musician_pk=event.owner.pk)\n\n\nclass SearchEvents(View):\n def get(self, request):\n query = request.GET.get('query')\n if query is not None:\n events = Event.objects.annotate(search=SearchVector(\"title\", \"description\", \"owner__name\", \"genre\")).filter(search=query).distinct(\"id\").order_by(\"-pk\")\n else:\n events = None\n return render(request, 'core/homepage.html', {\"events\": events, \"query\": query or \"\"})\n\n\nclass AddMusicianInfo(View):\n title = \"Add Musician Info:\"\n\n def get(self, request, user_pk):\n if get_object_or_404(User, pk=user_pk) == request.user:\n form = MusicianForm()\n return render(request, 'core/musician_form.html', {\"form\": form, \"form_title\": self.title, \"edit\": False})\n return redirect(to=\"homepage\")\n\n def post(self, request, user_pk):\n if get_object_or_404(User, pk=user_pk) == request.user:\n form = MusicianForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n mapbox_client = MapBox(settings.MAPBOX_API_KEY)\n musician = form.save(commit=False)\n result = mapbox_client.geocode(musician.city) \n musician.latitude = result.latitude\n musician.longitude = result.longitude\n musician.user = request.user\n musician.save()\n return redirect(to='show-musician', musician_pk=musician.pk)\n return redirect(to=\"homepage\")\n return redirect(to=\"homepage\")\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\nclass getGeocode(View):\n def post(self, request):\n mapbox_client = MapBox(settings.MAPBOX_API_KEY)\n location_json = json.loads(request.body)\n location = location_json[\"address\"]\n result = mapbox_client.geocode(location)\n if result is None: \n return JsonResponse({\"valid\": False})\n else: \n latitude = result.latitude\n longitude = result.longitude\n return JsonResponse({\"valid\": True, \"latitude\": latitude, \"longitude\": longitude})\n\n\nclass ShowMusician(View):\n def get(self, request, musician_pk):\n musician = get_object_or_404(Musician, pk=musician_pk)\n events = list(musician.events.all().order_by(\"date_time\"))\n empty_list = []\n for event in events:\n adjusted_time = event.date_time - timedelta(hours=4)\n empty_list.append({\"start\": adjusted_time.strftime('%Y-%m-%dT%H:%M'), \"url\": f'/event/{event.pk}'})\n if request.user.is_authenticated:\n user_favorite = request.user.is_favorite_musician(musician)\n else: \n user_favorite = None\n comment_form = MusicianCommentForm()\n return render(request, 'core/show_musician.html', {\n 'events': json.dumps(empty_list), \n \"musician\": musician,'comment_form': comment_form, 'user_favorite': user_favorite})\n \n def post(self, request, musician_pk): \n musician = get_object_or_404(Musician, pk=musician_pk)\n user_favorite = request.user.is_favorite_musician(musician)\n comment_form = MusicianCommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.musician = musician\n new_comment.author = request.user\n new_comment.save()\n send_mail(\n 'You have received a new comment!',\n f'''{new_comment.author} has commented with the following:\\n\\n{new_comment.message}\n \\n\\nYou can view all of your comments here: http://127.0.0.1:3000/musician/{musician_pk}''',\n 'livemusiccomments@gmail.com',\n [new_comment.musician.user.email],\n fail_silently=False,\n )\n return redirect(to='show-musician', musician_pk= musician_pk)\n else:\n comment_form = MusicianCommentForm()\n \n \n return render(request, 'core/show_musician.html', {'musician': musician, 'comment_form': comment_form, 'user_favorite': user_favorite})\n\n\nclass AddDonationInfo(View):\n def get(self, request, musician_pk):\n print(\"post attempt\") \n musician = get_object_or_404(Musician, pk=musician_pk)\n if musician.user == request.user:\n form = DonationForm(instance=musician)\n return render(request, 'core/donation_form.html', {\"form\": form , \"musician\": musician})\n return redirect(to=\"homepage\")\n\n def post(self, request, musician_pk):\n musician = get_object_or_404(Musician, pk=musician_pk)\n print(\"post attempt\")\n # if musician.user == request.user:\n form = DonationForm(instance=musician, data=request.POST, files=request.FILES)\n if form.is_valid():\n musician = form.save(commit=False)\n musician.user = request.user\n musician.save()\n return redirect(to='show-musician', musician_pk=musician_pk)\n # return redirect(to=\"homepage\")\n return redirect(to=\"homepage\")\n\n\ndef donation_tutorial (request):\n return render(request, 'core/donation_tutorial.html')\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\nclass FavoriteMusician(View):\n def get(self,request):\n user = request.user\n favorites = user.favorite_musician.all()\n saved_events = user.save_event.all()\n events = list(saved_events)\n empty_list = []\n for event in events:\n adjusted_time = event.date_time - timedelta(hours=4)\n empty_list.append({\"start\": adjusted_time.strftime('%Y-%m-%dT%H:%M'), \"url\": f'/event/{event.pk}'})\n return render(request, \"core/favorite_musicians.html\", {\n 'events': json.dumps(empty_list),\n \"user\":user, \"favorites\":favorites, \"saved_events\":saved_events,})\n \n def post(self, request, musician_pk):\n musician = get_object_or_404(Musician, pk=musician_pk)\n user = request.user\n if musician in user.favorite_musician.all():\n user.favorite_musician.remove(musician)\n return JsonResponse({\"favorite\": False})\n\n else:\n user.favorite_musician.add(musician)\n return JsonResponse({\"favorite\": True})\n\n\ndef edit_musician(request, musician_pk):\n form_title = \"Edit Profile:\"\n musician = get_object_or_404(Musician, pk=musician_pk)\n if request.user == musician.user:\n if request.method == \"POST\":\n form = MusicianForm(instance=musician, data=request.POST, files=request.FILES)\n if form.is_valid():\n musician = form.save(commit=False)\n musician.owner = musician\n musician = form.save()\n return redirect(to=\"show-musician\", musician_pk=musician.user.pk)\n else:\n form = MusicianForm(instance=musician)\n return render(\n request,\n \"core/musician_form.html\",\n {\"form\": form, \"musician\": musician, \"form_title\": form_title, \"edit\": True} \n )\n return redirect(to=\"show-musician\", musician_pk=musician.user.pk)\n\n\ndef default_map(request):\n mapbox_access_token = 'pk.eyJ1IjoicmthcnVuYXJhdG5lIiwiYSI6ImNrZWFib21lYTAzYnkyc283YnQxNXcwNncifQ.sAFQ90D6ZledgFX1gaoNxw'\n musician_info = []\n musicians = Musician.objects.all()\n for musician in musicians:\n if musician.latitude:\n musician_info.append({\"name\": musician.name, \n \"latitude\": musician.latitude, \n \"longitude\": musician.longitude, \n \"pk\": musician.pk,\n \"hasUpcoming\": musician_has_upcoming(musician),\n \"thumb\": musician.thumbnail.url,\n \"bio\": musician.bio })\n return render(request, 'core/map.html', \n { 'mapbox_access_token': mapbox_access_token, \"musician_info\": musician_info })\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\nclass SaveEvent(View):\n def post(self, request, event_pk):\n event = get_object_or_404(Event, pk=event_pk)\n user = request.user\n if event in user.save_event.all():\n user.save_event.remove(event)\n return JsonResponse({\"saved\": False})\n\n else:\n user.save_event.add(event)\n return JsonResponse({\"saved\": True})\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\nclass SaveEventComment(View):\n def post(self, request, event_pk):\n event = get_object_or_404(Event, pk=event_pk)\n user = request.user\n message_json = json.loads(request.body)\n message = message_json[\"message\"]\n new_comment = EventComment(message=message, author=user, event=event)\n new_comment.save()\n html = f'

{message}

' \\\n f'

by {user.username}' \\\n f'


'\n return JsonResponse({\"html\": html})\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\nclass SaveMusicianComment(View):\n def post(self, request, musician_pk):\n musician = get_object_or_404(Musician, pk=musician_pk)\n user = request.user\n message_json = json.loads(request.body)\n message = message_json[\"message\"]\n new_comment = MusicianComment(message=message, author=user, musician=musician)\n new_comment.save()\n html = f'

{message}

' \\\n f'

by {user.username}' \\\n f'


'\n return JsonResponse({\"html\": html})\n\n\ndef musician_has_upcoming(musician):\n events = musician.events.all()\n for event in events:\n if event.is_upcoming:\n return True\n return False\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11046158","text":"#!/usr/bin/env python\n\n\n\"\"\"\nhttp://practice.geeksforgeeks.org/problems/check-binary-string/0\n\"\"\"\n\ndef check_string(binary_number):\n \"\"\"\n start | valid_no_1 | valid_1 | valid_0 | invalid\n start (0) -> valid_no_1\n start (1) -> valid_1\n valid_no_1 (0) -> valid_no_1\n valid_no_1 (1) -> valid_1\n valid_1 (1) -> valid_1\n valid_1 (0) -> valid_0\n valid_0 (0) -> valid_0\n valid_0 (1) -> invalid\n \n \"\"\"\n state = 'start'\n for digit in binary_number:\n if state == 'start' and digit == '0':\n state = 'valid_no_1'\n elif state == 'start' and digit == '1':\n state = 'valid_1'\n elif state == 'valid_no_1' and digit == '0':\n state = 'valid_no_1'\n elif state == 'valid_no_1' and digit == '1':\n state = 'valid_1'\n elif state == 'valid_1' and digit == '0':\n state = 'valid_0'\n elif state == 'valid_1' and digit == '1':\n state = 'valid_1'\n elif state == 'valid_0' and digit == '0':\n state = 'valid_0'\n elif state == 'valid_0' and digit == '1':\n state = 'invalid'\n break\n\n if state == 'invalid':\n return 'INVALID'\n else:\n return 'VALID'\n\n\n \nt = int(input())\nfor i in range(0, t):\n n = input().strip()\n print(check_string(n))\n\n\n","sub_path":"school/checkbinarystring.py","file_name":"checkbinarystring.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526904230","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2019 Scott Shawcroft and Melissa LeBlanc-Williams\n# for Adafruit Industries LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\n`adafruit_st7735`\n====================================================\n\nDisplayio driver for ST7735 based displays.\n\n* Author(s): Melissa LeBlanc-Williams\n\nImplementation Notes\n--------------------\n\n**Hardware:**\n\n**Software and Dependencies:**\n\n* Adafruit CircuitPython firmware for the supported boards:\n https://github.com/adafruit/circuitpython/releases\n\n\"\"\"\n\nimport displayio\n\n__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/adafruit/Adafruit_CircuitPython_ST7735.git\"\n\n_INIT_SEQUENCE = (\n b\"\\x01\\x80\\x32\" # _SWRESET and Delay 50ms\n b\"\\x11\\x80\\xFF\" # _SLPOUT\n b\"\\x3A\\x81\\x05\\x0A\" # _COLMOD\n b\"\\xB1\\x83\\x00\\x06\\x03\\x0A\" # _FRMCTR1\n b\"\\x36\\x01\\x08\" # _MADCTL\n b\"\\xB6\\x02\\x15\\x02\" # _DISSET5\n # 1 clk cycle nonoverlap, 2 cycle gate, rise, 3 cycle osc equalize, Fix on VTL\n b\"\\xB4\\x01\\x00\" # _INVCTR line inversion\n b\"\\xC0\\x82\\x02\\x70\\x0A\" # _PWCTR1 GVDD = 4.7V, 1.0uA, 10 ms delay\n b\"\\xC1\\x01\\x05\" # _PWCTR2 VGH = 14.7V, VGL = -7.35V\n b\"\\xC2\\x02\\x01\\x02\" # _PWCTR3 Opamp current small, Boost frequency\n b\"\\xC5\\x82\\x3C\\x38\\x0A\" # _VMCTR1\n b\"\\xFC\\x02\\x11\\x15\" # _PWCTR6\n b\"\\xE0\\x10\\x09\\x16\\x09\\x20\\x21\\x1B\\x13\\x19\\x17\\x15\\x1E\\x2B\\x04\\x05\\x02\\x0E\" # _GMCTRP1 Gamma\n b\"\\xE1\\x90\\x0B\\x14\\x08\\x1E\\x22\\x1D\\x18\\x1E\\x1B\\x1A\\x24\\x2B\\x06\\x06\\x02\\x0F\\x0A\" # _GMCTRN1\n b\"\\x13\\x80\\x0a\" # _NORON\n b\"\\x29\\x80\\xFF\" # _DISPON\n)\n\n# pylint: disable=too-few-public-methods\nclass ST7735(displayio.Display):\n \"\"\"ST7735 driver\"\"\"\n\n def __init__(self, bus, **kwargs):\n super().__init__(bus, _INIT_SEQUENCE, **kwargs)\n","sub_path":"adafruit_st7735.py","file_name":"adafruit_st7735.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337561858","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 20 11:04:15 2017\n\n@author: Lisa\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.linalg as la\n\ndef plotVectors2D( vs, col='black') :\n \"\"\"\n Plot 2D vectors into fig. vs is expected contain the vectors in its columns\n \"\"\"\n ax = plt.gca()\n for i in range(vs.shape[0]) :\n ax.quiver( 0.0, 0.0, vs[i,0], vs[i,1], angles='xy', scale_units='xy', scale=1, color=col)\n maxaxis = 1.2 * np.max( np.abs(vs))\n ax.set_xlim( [-maxaxis, maxaxis])\n ax.set_ylim( [-maxaxis, maxaxis])\n plt.axes().set_aspect('equal')\n \n\n#1\n#u1 = np.random.randn(2) #2 zufällige Vektoren\n#u2 = np.random.randn(2)\n#\n##2\n#B = np.array([u1, u2]).transpose() #Basis mit u1,u2 als Spaltenvektoren(transpose)\n#Btilde = la.inv(B)\n#\n##3\n#v1 = np.random.randn(2) #zufälliger vektor in Orthogonalbasis\n#vcoeffs_u = np.dot(Btilde, v1) #vektor in basis u\n#vrec = np.dot(B, vcoeffs_u) #theoretisch rekonstruierter vektor\n##zum Vergleichen: entweder Skalarprodukt oder Differenz\n#err = la.norm(v1-vrec)\n#print(\"err= \", err) #Fehler sollte ungefähr Maschinengenauigkeit betragen\n#\n##4\n##die Basen können vertauscht werden, müssen dann nur transponiert werden\n#\n#\n##5\n#plotVectors2D(B.transpose()) #nochmal transpose, da plotvectors die zeilenvektoren darstellt\n#plotVectors2D(Btilde, 'blue')\n\n#1\ndetsB = []\nerrs = []\nfor alpha in (np.linspace(90.0, 180.0, 10) *np.pi) / 180 :\n u1 = np.array([1.0 ,0.0])\n rot = np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])\n u2 = np.dot(rot,u1)\n \n B = np.array([u1, u2]).transpose()\n Btilde = la.inv(B)\n \n #det wird nicht 0, wegen Maschinengenauigkeit\n #dualvektoren werden seeeehr groß bei det nahe 0\n detsB.append(la.det(B))\n print(\"detsB = \",detsB)\n \n #Fehler wird sehr gr0ß bei det nahe 0\n #warum ist orthogonale basis besser -> in großen Räumen numerischer Fehler kleiner\n v1 = np.random.randn(2) #zufälliger vektor in Orthogonalbasis\n vcoeffs_u = np.dot(Btilde, v1) #vektor in basis u\n vrec = np.dot(B, vcoeffs_u)\n errs.append(la.norm(v1-vrec))\n print(\"errs = \", errs)\n \n plotVectors2D(B.transpose()) \n plotVectors2D(Btilde, 'blue')\n plt.show()\n \n input(\"Press key\")\n\n\n\n\n\n\n\n\n","sub_path":"CGTools/Own Code/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3990746","text":"r\"\"\"\nMySQLinterface - Radiogaga.\n\nThis file belongs to Joachim Blom Hansen, Rasmus Jessen Aaskov and Soren\nTrads Steen.\n\"\"\"\nimport MySQLdb\n\n\nclass MySQLConnection:\n \n r\"\"\"Set up a connection to a MySQL sever.\n \n Args:\n address: the address to the mysql server.\n usr: the usename.\n psw: the password.\n dbname: the name of the database.\n \"\"\"\n \n def __init__(self, host, usr, psw, dbname):\n \"\"\"Initialize the MySQLConnection class.\"\"\"\n self.host = host\n self.usr = usr\n self.psw = psw\n self.dbname = dbname\n self.setup_connection()\n\n def setup_connection(self):\n \"\"\"Setting up a connection to a MySQL db.\"\"\"\n self.db = MySQLdb.connect(self.host, self.usr, self.psw, self.dbname)\n self.cursor = self.db.cursor()\n\n def end_connection(self):\n \"\"\"Closing the MySQL connection.\"\"\"\n self.db.close()\n\n\ndef radiogaga_db_get(MySQLConnection, table, element):\n r\"\"\"Get element from Radio gaga server interface.\n \n Args:\n MySQLConnection: element of type MySQLConnection\n table: name of the table in database\n element: dictionary of information to search for\n \"\"\"\n # DB connection\n conn = MySQLConnection\n\n # Write the mysql command to send to the server.\n s = \"SELECT * FROM {0} WHERE \".format(table)\n n = len(element)\n count = 1\n for option in element:\n s = s + \"\"\"{0} LIKE \"{1}\" \"\"\".format(option, element[option])\n count = count + 1\n if count <= n:\n s = s + \" AND \"\n\n # Commit the command\n conn.cursor.execute(s)\n answer = conn.cursor.fetchall()\n return(answer)\n\n\ndef radiogaga_db_insert(MySQLconnection, table, element):\n r\"\"\"Insert an element to the radiogaga database.\n\n Args:\n MySQLconnection: element of type MySQLConnection\n talbe: name of the table\n element: dictionary of information to insert\n \"\"\"\n # DB connection\n conn = MySQLconnection\n\n # Write the mysql command to send to the server\n # Note that this follows the new PEP 3101 and PEP 249 (DB-API)\n s = \"INSERT INTO {0} (\"\n s = s.format(table)\n count = 1\n n = len(element)\n for option in element:\n if count == n:\n d = \"{0}\"\n else:\n d = \"{0},\"\n d = d.format(option)\n s = s + d\n count = count + 1\n s = s + \") VALUES (\"\n count = 1\n for option in element:\n if count == n:\n d = \"\"\" \"{0}\" \"\"\"\n else:\n d = \"\"\" \"{0}\",\"\"\"\n d = d.format(element[option])\n s = s + d\n count = count + 1\n s = s + \")\"\n\n # Commit the command\n conn.cursor.execute(s)\n conn.db.commit()\n\n\ndef radiogaga_db_update(MySQLconnection, table, set_element, where_element):\n r\"\"\"Update existing track/artist info.\n \n Args:\n MySQLconnection: element of type MySQLConnection\n table: namem of table\n set_element: element with info to update\n where_element: element with info about which element to update\n \"\"\"\n conn = MySQLconnection\n \n # Write the mysql command to send to the server\n # Note this follows the new PEP 3101 and PEP 249 (DB-API)\n s = \"UPDATE {0} SET \"\n s = s.format(table)\n set_n = len(set_element)\n set_count = 1\n for option in set_element:\n if set_count == set_n:\n d = \"{0}='{1}'\"\n else:\n d = \"{0}='{1}', \"\n d = d.format(option, set_element[option])\n s = s + d\n set_count = set_count + 1\n s = s + \" WHERE \"\n where_n = len(where_element)\n where_count = 1\n for option in where_element:\n if where_count == where_n:\n d = \"\"\"{0}=\"{1}\" \"\"\"\n else:\n d = \"\"\"{0}=\"{1}\" AND \"\"\"\n d = d.format(option, where_element[option])\n s = s + d\n where_count = where_count + 1\n \n # Commit the command\n conn.cursor.execute(s)\n conn.db.commit()\n ","sub_path":"mysqlinterface.py","file_name":"mysqlinterface.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498620724","text":"from FeatureExtractorNet import *\nfrom nn_pipeline import nn_pipeline\nimport os\n\nstacking_enabled = False\n\n# Define an instance of stacking convolutional network\ndef main(stacking_enabled):\n\n # Change the current working directory to project's parent directory\n os.chdir(\"..\")\n\n if stacking_enabled:\n convNet = FeatureExtractorNet('config_stacking.cfg', config_section=\"DEFAULT\")\n\n # Train the stacking convolutional network\n convNet.fit()\n\n # Train the neural network classifier\n nn_pipeline(\"config_nn.yml\", stacking_enabled)\n\n\nif __name__ == \"__main__\":\n main(stacking_enabled)\n","sub_path":"src/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"555918857","text":"# PPHA 30535\n# Spring 2021\n# Homework 4\n\n# Bhargavi Thakur\n\n#bhargavithakur\n# bhargavithakur\n\n# Due date: Sunday May 2nd before midnight\n# Write your answers in the space between the questions, and commit/push only\n# this file to your repo. Note that there can be a difference between giving a\n# \"minimally\" right answer, and a really good answer, so it can pay to put\n# thought into your work.\n\n##################\n\n# To answer these questions, you will use the two csv documents included in\n# your repo. In nst-est2019-alldata.csv: SUMLEV is the level of aggregation,\n# where 10 is the whole US, 20 is a US region, and 40 is a US state. REGION\n# is the fips code for the US region. STATE is the fips code for the US state\n# The other values are as per the data dictionary at:\n# https://www2.census.gov/programs-surveys/popest/technical-documentation/file-layouts/2010-2019/nst-est2019-alldata.pdf\n# Note that each question will build on the modified dataframe from the\n# question before.\n\n# Question 1: Load the population estimates file into a dataframe. Specify\n# an absolute path using the Python os library to join filenames, so that\n# anyone who clones your homework repo only needs to update one for all\n# loading to work. Then show code doing some basic exploration of the\n# dataframe; imagine you are an intern and are handed a dataset that your\n# boss isn't familiar with, and asks you to summarize for them.\n\n\n\n\n#importing the pandas and os for ease of cloning the code \nimport pandas \nimport pandas as pd \nimport os\n\n#defining the base path file and opening the data file. \nbase_path = r'/Users/bhargavithakur/Documents/GitHub/homework-4-bhargavithakur'\npath = os.path.join(base_path, 'nst-est2019-alldata.csv')\n\n\n#Then show code doing some basic exploration of the\n# dataframe; imagine you are an intern and are handed a dataset that your\n# boss isn't familiar with, and asks you to summarize for them.\n\n\ndf_homework = pd.read_csv(path)\ndf_homework\ndf_homework.head()\ndf_homework.tail()\n\n\n#summarising data using describe and looking at specific rows and columns \ndf_homework.describe()\n\ndf_homework.iloc[0:5, 25:30]\n#In line 61, for example, we can see that at the US national level \n#and at the regional level, the deaths have been increased between the years 2010 and 2014. \n\n\n\n\n\n# Question 2: Your data only includes fips codes for states. Use the us\n# library to crosswalk fips codes to state abbreviations. Keep only the\n# state abbreviations in your data.\n\n#pip install us : installing the us package to display and look at states' fips code \n#and abbreviations \n\n\nimport us\nus.states.MD\ndf_9 = us.states.mapping('fips', 'abbr') \ndf_9\n\n#https://stackoverflow.com/questions/20250771/remap-values-in-pandas-column-with-a-dict\n\n#https://datatofish.com/integers-to-strings-dataframe/\n\n \n#converting the column state into a string variable \ndf_homework['STATE'] = df_homework['STATE'].astype(str)\n\n\n#defining the dictionary to be used for replacing the values in the state columnn \n\ndi = {\"1\": \"AL\", \"2\": \"AK\", \"4\": \"AZ\", \"5\": \"AR\", \"6\": \"CA\",\n \"8\": \"CO\", \"9\": \"CT\", \"10\": \"DE\", \"11\": \"DC\", \"12\": \"FL\",\n \"13\": \"GA\", \"15\": \"HI\", \"16\": \"ID\", \"17\": \"IL\", \"18\": \"IN\",\n \"19\": \"IA\", \"20\": \"KS\", \"21\": \"KY\", \"22\": \"LA\", \"23\": \"ME\",\n \"24\": \"MD\", \"25\": \"MA\", \"26\": \"MI\", \"27\": \"MN\", \"28\": \"MS\",\n \"29\": \"MO\", \"30\": \"MT\", \"31\": \"NE\", \"32\": \"NV\", \"33\": \"NH\",\n \"34\": \"NJ\", \"35\": \"NM\", \"36\": \"NY\", \"37\": \"NC\", \"38\": \"ND\",\n \"39\": \"OH\", \"40\": \"OK\", \"41\": \"OR\", \"42\": \"PA\", \"44\": \"RI\",\n \"45\": \"SC\", \"46\": \"SD\", \"47\": \"TN\", \"48\": \"TX\", \"49\": \"UT\",\n \"50\": \"VT\", \"51\": \"VA\", \"53\": \"WA\", \"54\": \"WV\", \"55\": \"WI\",\n \"56\": \"WY\" , \"72\" : \"PR\"}\n\n#replacing the column values with dictionary values defined above \ndf_homework = df_homework.replace({\"STATE\": di})\n \n\n# Question 3: Subset the data so that only observations for individual\n# US states remain, and only state names and data for the population\n# estimates in 2010-2019 remain.\n\n\n#subsetting the rows below, with aggregated accounting only where state variables are\n#defined \n\ndf_homework2 = df_homework[5:57]\ndf_homework2 \n#subsetting columns which give population estimates for every year \ncols = [c for c in df_homework2.columns if c.startswith('POP') or c == 'STATE']\ndf_homework2 = df_homework2[cols] \ndf_homework2 \n\n\n\n\n\n# Question 4: Reshape the data from wide to long, making sure you reset\n# the index to the default values if any of your data is located in the index.\n\n\n#The data is currently in the wide form, changing it to long form\ndf_homework3 = pd.wide_to_long(df_homework2 , stubnames= 'POPESTIMATE', i ='STATE', j='year')\ndf_homework3\n#because the state column became the index column,resetting the index values \ndf_homework3.reset_index(inplace=True)\n\n\n# Question 5: Open the state-visits.csv file, and fill in the VISITED column\n# with a dummy variable for whether you've visited a state or not. If you\n# haven't been to many states, then filling in a random selection of them\n# is fine too. Save your changes. Then load the file as a dataframe in\n# Python, and merge the visited column into your population dataframe, only\n# keeping values that appear in both dataframes. Are any observations\n# dropped from this? Show code where you investigate your merge, and\n# display any observations that weren't in both dataframes.\n\n\n#loading the second file \npath2 = os.path.join(base_path, 'state-visits.csv')\npath2\ndf_question5 =pd.read_csv(path2)\ndf_question5\n\n\n#merging the two files \ndf_homework4 = df_homework3.merge(df_question5, on ='STATE', how='inner')\ndf_homework4\n\n\n\ndf_merged = df_homework3.merge(df_question5, on ='STATE', how= 'outer', indicator = True)\ndf_merged\nlen(df_homework4)\nlen(df_merged)\ndf_merged[df_merged['_merge'] != 'both']\n\n\n\n\n\n#There's a difference of eleven values between the inner and the outer joins.\n# Peurto Rico occurs only in the census data while Guam exists only in the state_lists data. \n\n\n#end \n\n\n\n","sub_path":"homework 4.py","file_name":"homework 4.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166021045","text":"# Copyright 2010-2011 OpenStack Foundation\n# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ast\nimport cloudpulse\nfrom cloudpulse.operator.ansible.ansible_runner import ansible_runner\nfrom cloudpulse.operator.ansible import openstack_config_reader as os_cfg\nimport json\nimport os\nimport sys\n\n\nclass BaseLine(object):\n\n def base_line(self, os_baseline_cfg):\n try:\n oscfg_reader = os_cfg.os_cfg_reader(os_baseline_cfg)\n oscfg_reader.setOpenstackNodeIp()\n oscfg_reader.printHostList()\n openstack_host_list = oscfg_reader.get_host_list()\n baseline_data = {}\n for host in openstack_host_list:\n f = open('/var/sec_hc/dir_list', 'w+')\n for dir_name in host.getDirList():\n f.write(dir_name + '\\n')\n f.close()\n ans_runner = ansible_runner([host])\n # execute_cmd\n base_dir = os.path.dirname(cloudpulse.__file__)\n base_dir += '/scenario/plugins/security_pulse/testcase'\n flist = [base_dir + '/remote_baseline.py',\n base_dir + '/remote_filecredentials.py',\n '/var/sec_hc/dir_list'\n ]\n results = ans_runner.execute_cmd(\n \"python \" +\n '/var/sec_hc/' +\n \"remote_baseline.py \",\n file_list=flist)\n # for node in results['contacted'].keys():\n role = host.getRole()\n node = host.getIp()\n data = results['contacted'][node]['stdout']\n\n baseline_data.update({role: ast.literal_eval(data)})\n print (baseline_data)\n formated_data = json.dumps(baseline_data, indent=4)\n open('/var/sec_hc/os_allnode_baseline',\n 'w+').write(str(formated_data))\n except Exception as e:\n print (e)\n\nif __name__ == '__main__':\n os_cfg_file = sys.argv[1]\n sec = BaseLine()\n sec.base_line(os_cfg_file)\n","sub_path":"cloudpulse/scenario/plugins/security_pulse/testcase/generate_baseline.py","file_name":"generate_baseline.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436573686","text":"###############################################################################################################\r\n# A Python file that loads and preprocesses neonatal EEG data from CHOP_CICU for background classification\r\n# Written by Daniel Joongwon Kim\r\n# University of Pennsylvania, Department of Computer and Information Science\r\n###############################################################################################################\r\nimport numpy as np\r\nimport pickle\r\nfrom preprocess_data import clean_data, extract_features\r\nfrom wave_features import wavelet_image\r\n\r\n\r\n# A function that reads EEG recordings and labels from the CHOP_CICU dataset for neonatal\r\n# EEG background detection\r\n# Inputs: num_patients - total number of patients to incorporate within the dataset\r\n# squeeze_labels - a boolean indicating whether to squeeze the labels for improved background detection\r\n# Outputs: output_recordings - a list of EEG recordings for every patient. Each array has shape (C x Q), where\r\n# C is the number of channels and Q is the total number of datapoints within\r\n# the patient's EEG\r\n# output_labels - a list of EEG background annotations for every patient, with sampling rate of 1 Hz\r\n# output_idx - a list of EEG background index for every patient, with sampling rate of 1 Hz\r\n# (note that output_labels contains specific background information while output_idx merely distinguishes\r\n# different background stages within each patient's dataset by simple iterators)\r\n# sample_freq - a list of sampling frequencies of each patient\r\ndef load_data_backgrounds(num_patients=15, squeeze_labels=True):\r\n # Open the pickle file that contains the raw data\r\n with open('CHOP_CICU_InputData2.p', 'rb') as fp:\r\n input_dataset = pickle.load(fp)\r\n # Extract labels, recordings and sampling frequencies\r\n labels = input_dataset['labels']\r\n recordings = input_dataset['recordings']\r\n sample_freq = input_dataset['sample_freq']\r\n # Initialize output placeholders\r\n output_labels = []\r\n output_recordings = []\r\n output_idx = []\r\n # Iterate over all patients within the dataset\r\n for ii in range(num_patients):\r\n # Extract patient-specific labels, recordings and sampling frequency\r\n patient_labels = labels[ii]\r\n patient_rec = recordings[ii]\r\n patient_fs = sample_freq[ii]\r\n # Initialize patient-specific outputs\r\n patient_labels_new = np.empty(0)\r\n patient_rec_new = np.empty(0)\r\n patient_idx_new = np.empty(0)\r\n # Iterate over all background EEG patterns within each patient\r\n for jj in range(len(patient_rec)):\r\n time_length = int(np.size(patient_rec[jj], axis=1) / patient_fs)\r\n # Merge labels to retain only three classes (0 - NC, ND, CLV // 1 - ED, LVS)\r\n if squeeze_labels:\r\n if patient_labels[jj] == 1 or patient_labels[jj] == 2 or patient_labels[jj] == 3:\r\n patient_labels[jj] = 0\r\n else:\r\n patient_labels[jj] = 1\r\n # Concatenate labels, indices and recordings to patient dataset\r\n patient_labels_new = np.r_[patient_labels_new, np.repeat(patient_labels[jj], time_length)]\r\n patient_idx_new = np.r_[patient_idx_new, np.repeat(jj, time_length)]\r\n if jj == 0:\r\n patient_rec_new = patient_rec[jj]\r\n else:\r\n patient_rec_new = np.c_[patient_rec_new, patient_rec[jj]]\r\n # Add patient-specific labels and recordings to the output\r\n output_labels.append(patient_labels_new)\r\n output_recordings.append(patient_rec_new)\r\n output_idx.append(patient_idx_new)\r\n return output_recordings, output_labels, output_idx, sample_freq\r\n\r\n\r\n# A function that preprocesses the neonatal EEG background dataset and saves the preprocessed features and annotations\r\n# into .npy files for future use in classifiers\r\n# Inputs: num_patients - total number of patients to incorporate within the dataset\r\n# squeeze_labels - a boolean indicating whether to squeeze the labels for improved background detection\r\n# Outputs: None (saved files of annotations, features and images)\r\ndef preprocess_data_backgrounds(num_patients=15, squeeze_labels=True):\r\n all_recordings, all_labels, all_indices, all_freq = load_data_backgrounds(num_patients, squeeze_labels)\r\n s_length = 5\r\n # Iterate over each patient within the loaded dataset\r\n for idx, (data, label) in enumerate(zip(all_recordings, all_labels)):\r\n fs = all_freq[idx]\r\n patient_indices = all_indices[idx]\r\n # Obtain filtered data and labels\r\n new_data, new_labels, remove = clean_data(data, label, fs, s_length, use_default=True)\r\n np.save('patient%d_annot2b.npy' % (idx + 1), new_labels)\r\n print('Artifact removal complete')\r\n # Obtain filtered background indices\r\n new_indices = process_background_info(patient_indices, s_length, remove)\r\n np.save('patient%d_idx2.npy' % (idx + 1), new_indices)\r\n # Obtain statistical features\r\n feats = extract_features(new_data, fs, normalize='default')\r\n np.save('patient%d_feats2.npy' % (idx + 1), feats)\r\n print('Feature extraction complete')\r\n # Obtain image data using CWT\r\n images = wavelet_image(new_data, 80, downsample_factor=2, method='both')\r\n # Average over channels to account for inconsistent number of channels\r\n images_minmax = np.expand_dims(np.mean(images[0], axis=1), axis=1)\r\n images_mean = np.expand_dims(np.mean(images[1], axis=1), axis=1)\r\n # Merge two different feature maps together\r\n images = np.concatenate((images_minmax, images_mean), axis=1)\r\n np.save('patient%d_images2.npy' % (idx + 1), images)\r\n print('Image extraction complete')\r\n return None\r\n\r\n\r\n# A helper function that formats the specific background index of each processed EEG segment\r\n# Inputs: patient_indices - the EEG background indices of a specific patient, sampled at 1Hz\r\n# s_length - length of each EEG segment\r\n# verifier - a list indicating whether each segment was removed in the artifact rejection process\r\n# Outputs: output_indices - a list of processed EEG background indices for each EEG segment\r\ndef process_background_info(patient_indices, s_length, verifier):\r\n output_indices = []\r\n for ii in range(len(verifier)):\r\n window = patient_indices[ii * s_length: (ii + 1) * s_length]\r\n # Only record background indices of non-artifact segments\r\n if verifier[ii] == 0:\r\n output_indices.append(window[0])\r\n return output_indices\r\n","sub_path":"CHOP_CICU/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113090585","text":"\"\"\"关键字参数\n\n@see: https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments\n\n函数可以使用 kwarg=value 形式的关键字参数来调用。\n\"\"\"\n\nimport pytest\n\n\ndef parrot(voltage, state='a stiff', action='voom', parrot_type='Norwegian Blue'):\n \"\"\"���参数函数的示例\n\n 这个函数接受一个必选参数(voltage)和三个可选参数\n (state, action, and type).\n \"\"\"\n\n message = 'This parrot wouldn\\'t ' + action + ' '\n message += 'if you put ' + str(voltage) + ' volts through it. '\n message += 'Lovely plumage, the ' + parrot_type + '. '\n message += 'It\\'s ' + state + '!'\n\n return message\n\n\ndef test_function_keyword_arguments():\n \"\"\"测试使用指定关键字参数的调用函数\"\"\"\n\n # parrot函数接受一个必选参数 (voltage) 还有三个可选参数 (state, action, type).\n # 这个函数可以通过以下任何一种方式被调用:\n\n message = (\n \"This parrot wouldn't voom if you put 1000 volts through it. \"\n \"Lovely plumage, the Norwegian Blue. \"\n \"It's a stiff!\"\n )\n # 1 位置参数\n assert parrot(1000) == message\n # 1 关键字参数\n assert parrot(voltage=1000) == message\n\n message = (\n \"This parrot wouldn't VOOOOOM if you put 1000000 volts through it. \"\n \"Lovely plumage, the Norwegian Blue. \"\n \"It's a stiff!\"\n )\n # 2 关键字参数\n assert parrot(voltage=1000000, action='VOOOOOM') == message\n # 2 关键字参数\n assert parrot(action='VOOOOOM', voltage=1000000) == message\n\n # 3 位置参数\n message = (\n \"This parrot wouldn't jump if you put 1000000 volts through it. \"\n \"Lovely plumage, the Norwegian Blue. \"\n \"It's bereft of life!\"\n )\n assert parrot(1000000, 'bereft of life', 'jump') == message\n\n # 1 位置, 1 关键字\n message = (\n \"This parrot wouldn't voom if you put 1000 volts through it. \"\n \"Lovely plumage, the Norwegian Blue. \"\n \"It's pushing up the daisies!\"\n )\n assert parrot(1000, state='pushing up the daisies') == message\n\n # 但是下面所有的调用都是无效的。\n\n with pytest.raises(Exception):\n # 必须参数缺失.\n # pylint: disable=no-value-for-parameter\n parrot()\n\n # 在关键字参数之后的非关键字参数。\n # parrot(voltage=5.0, 'dead')\n\n with pytest.raises(Exception):\n # pylint: disable=redundant-keyword-arg\n parrot(110, voltage=220)\n\n with pytest.raises(Exception):\n # 未知的关键字参数\n # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n parrot(actor='John Cleese')\n\n # 在函数调用中,关键字参数必须跟在位置参数之后。传递的所有关键字参数必须与函数接受的其中一个参数匹配 (例如actor不是parrot函数的有效参数),\n # 它们的顺序并不重要。这也包括非可选参数(例如parrot(voltage=1000)也是有效的)。 参数不能接收一个值超过一次。\n # 下面是一个由于这种限制而失败的示例:\n def function_with_one_argument(number):\n return number\n\n with pytest.raises(Exception):\n # pylint: disable=redundant-keyword-arg\n function_with_one_argument(0, number=0)\n\n # 当出现 **name 的最后一个正式形参时,它将接收一个字典,其中包含除与正式形参对应的关键字参数外的所有关键字参数。\n # 这可以与 *name 的形式参数结合使用,该形式参数接收一个包含形式参数列表之外的位置参数的元组。\n # (*name 必须出现在 **name 之前。)\n # 例如,如果我们这样定义一个函数:\n def test_function(first_param, *arguments, **keywords):\n \"\"\"这个函数通过“arguments”元组和关键字字典接受它的参数。\"\"\"\n assert first_param == 'first param'\n assert arguments == ('second param', 'third param')\n assert keywords == {\n 'fourth_param_name': 'fourth named param',\n 'fifth_param_name': 'fifth named param'\n }\n\n test_function(\n 'first param',\n 'second param',\n 'third param',\n fourth_param_name='fourth named param',\n fifth_param_name='fifth named param',\n )\n","sub_path":"src/functions/test_function_keyword_arguments.py","file_name":"test_function_keyword_arguments.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161062533","text":"\"\"\"Tests exercising the eia923 module for use with PyTest.\"\"\"\n\nimport pytest\nfrom pudl import eia923, constants\nfrom pandas import ExcelFile\n\n\ndef test_get_pages_eia923():\n \"\"\"Pull in all pages of EIA923 that we expect to work.\"\"\"\n eia923_xlsx = {}\n for yr in constants.eia923_working_years:\n print(\"Reading EIA 923 spreadsheet data for {}.\".format(yr))\n eia923_xlsx[yr] = ExcelFile(eia923.get_eia923_file(yr))\n\n eia923_dfs = {}\n for page in constants.tab_map_eia923.columns:\n if (page == 'plant_frame'):\n eia923_dfs[page] = \\\n eia923.get_eia923_plant_info(constants.eia923_working_years,\n eia923_xlsx)\n else:\n eia923_dfs[page] = \\\n eia923.get_eia923_page(page, eia923_xlsx,\n years=constants.eia923_working_years,\n verbose=True)\n","sub_path":"test/test_eia923.py","file_name":"test_eia923.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301073942","text":"import logging\nimport time\nimport argparse\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom models import create_model\n\n\ncfg = {\n \"name\": \"EDVR\",\n \"suffix\": None,\n \"model\": \"video_base\",\n \"distortion\": \"sr\",\n \"scale\": 4,\n \"crop_border\": None,\n \"gpu_ids\": [0],\n \"datasets\": {},\n \"network_G\": {\n \"which_model_G\": \"EDVR\",\n \"nf\": 128,\n \"nframes\": 3,\n \"groups\": 8,\n \"front_RBs\": 5,\n \"back_RBs\": 10,\n \"predeblur\": True,\n \"HR_in\": True,\n \"w_TSA\": False,\n \"center\": None,\n },\n \"path\": {\n \"strict_load\": False,\n },\n \"is_train\": False,\n \"train\": False,\n \"dist\": False,\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', required=True)\n parser.add_argument('--video', required=True)\n parser.add_argument('--output')\n parser.add_argument('--show', action='store_true')\n\n return parser.parse_args()\n\n\ndef normalize(img: np.ndarray):\n rank = len(img.shape)\n height_dim = 1 if rank == 4 else 0\n nearest_multiple_16 = img.shape[height_dim] // 16 * 16\n if nearest_multiple_16 != img.shape[height_dim]:\n # crop by height\n crop_need = img.shape[height_dim] - nearest_multiple_16\n if rank == 4:\n img = img[:, crop_need // 2:-crop_need // 2, :, :]\n else:\n img = img[crop_need // 2:-crop_need // 2, :, :]\n\n img = img.astype(np.float32) / 255.0\n img = img.transpose([2, 0, 1])\n return torch.from_numpy(img).float()\n\n\ndef denormalize(tensor):\n numpy = tensor.detach().cpu().numpy()\n img = (numpy * 255.0).clip(0, 255).astype(np.uint8)\n return img.transpose([1, 2, 0])\n\n\ndef main():\n args = parse_args()\n cfg['path']['pretrain_model_G'] = args.model\n\n print(f'Loading model from {args.model}...')\n model = create_model(cfg)\n print(f'Done.')\n\n vc = cv2.VideoCapture(args.video)\n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n n_frames = 3\n if args.output:\n fps = vc.get(cv2.CAP_PROP_FPS) / n_frames\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n # video_format = video.get(cv2.CAP_PROP_FORMAT)\n video_writer = cv2.VideoWriter(\n args.output, fourcc, fps,\n frameSize=(width, height // 16 * 16)\n )\n\n log_frames = 100\n frame_num = 0\n frame_processed = 0\n time_sum = 0\n imgs_in = []\n while True:\n ret, frame = vc.read()\n if not ret:\n break\n\n frame_num += 1\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img = normalize(frame)\n if len(imgs_in) <= n_frames:\n imgs_in.append(img)\n if len(imgs_in) < n_frames:\n continue\n if len(imgs_in) > n_frames:\n imgs_in = [imgs_in[-1]]\n continue\n\n data = {'LQs': torch.from_numpy(np.expand_dims(np.stack(imgs_in), axis=0))}\n model.feed_data(data, need_GT=False)\n\n t = time.time()\n model.test()\n time_sum += time.time() - t\n\n outputs = model.get_current_visuals(need_GT=False)\n output = outputs['rlt']\n output_frame = denormalize(output)\n\n frame_processed += 1\n if frame_processed % log_frames == 0:\n print(f'Processed {frame_processed} frames.')\n\n cv_frame = output_frame[:, :, ::-1]\n if args.output:\n video_writer.write(cv_frame)\n if args.show:\n cv2.imshow('Video', cv_frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n print(f'Average inference time: {time_sum / frame_processed * 1000:0.3f}ms')\n vc.release()\n if args.output:\n video_writer.release()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"codes/convert_video.py","file_name":"convert_video.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"579612722","text":"N = int(input())\ndp1 = []\n\nfor i in range(N):\n array1 = list(map(int,input().split()))\n dp1.append(array1)\n\ndp2 = [[0] * (N) for i in range(N-1)] # dp[N][K]\ndp2.insert(0,dp1[0])\n\n\nfor i in range(1,N):\n for j in range(i+1):\n c = 0 \n if i == j :\n dp2[i][j] = dp2[i-1][j-1] + dp1[i][j]\n c = 1\n\n if j == 0 :\n dp2[i][j] = dp2[i-1][j] + dp1[i][j]\n c = 1\n\n if ( c == 0 ) :\n dp2[i][j] = max(dp2[i-1][j-1] + dp1[i][j], dp2[i-1][j] + dp1[i][j])\n\nprint(max(dp2[N-1]))","sub_path":"BoJ/BoJ.1932(1).py","file_name":"BoJ.1932(1).py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443823748","text":"from osbot_aws.Dependencies import load_dependency\n\n\ndef run(event, context):\n load_dependency('elastic')\n\n from gw_bot.elastic.Log_To_Elk import Log_To_Elk\n log_to_elk = Log_To_Elk()\n try:\n level = event.get('level' )\n category = event.get('category')\n message = event.get('message' )\n data = event.get('data' )\n index = event.get('index' )\n if message:\n if level == 'info':\n return log_to_elk.log_info (message=message, category=category, data=data, index=index)\n elif level == 'debug':\n return log_to_elk.log_debug(message=message, category=category, data=data, index=index)\n elif level == 'error':\n return log_to_elk.log_error(message=message, category=category, data=data, index=index)\n else:\n return log_to_elk.log_error(\"Error: not supported error level: {0} \\n\\nmessage: {1}\\ncategory: {2}\\ndata: {3}\".format(level, message, category, data))\n return message\n except Exception as error:\n return log_to_elk.log_error('Error: ' + str(error), 'Lambda.utils.log_to_elk')","sub_path":"gw_bot/lambdas/log_to_elk.py","file_name":"log_to_elk.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640024538","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n max_length = 0\n char_loc = dict()\n index1 = 0\n for i, c in enumerate(s):\n if c in char_loc:\n length = len(char_loc)\n if length > max_length:\n max_length = length\n index = char_loc[c]\n for j in range(index1, index + 1):\n if s[j] in char_loc and char_loc[s[j]] == j:\n char_loc.pop(s[j], None)\n index1 = index\n\n char_loc[c] = i\n length = len(char_loc)\n if length > max_length:\n max_length = length\n return max_length\n\n\nsol = Solution()\n# \"abcabcbb\"\n# \"`bpfbhmipx`\"\nx = sol.lengthOfLongestSubstring(\"abba\")\nprint(x)\n","sub_path":"longest_substring_without_repeat_char.py","file_name":"longest_substring_without_repeat_char.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"149573548","text":"import re\n\nfrom django.conf import settings\n\n\ndef is_webapps(request):\n if isinstance(request.path_info, basestring):\n is_match = re.match('/(developers/)?apps/', request.path_info)\n else:\n is_match = False\n return {'WEBAPPS': is_match or settings.APP_PREVIEW}\n","sub_path":"mkt/webapps/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545900781","text":"#!/usr/bin/python2.4\n# Copyright 2009 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"View handlers.\n\nThe views module does not get tested and as such only contains view handlers\nwith minimum logic in it.\n\nAny complex logic is to be delegated to views_impl module.\n\"\"\"\n\n# Suppress pylint invalid import order\n# pylint: disable-msg=C6203\n\n\nfrom django import http\nfrom django import shortcuts\nfrom django import template\nfrom django.core import urlresolvers\nfrom django.utils import translation\n\nfrom core import models\nfrom core import permissions\n#TODO(user): this import is needed because rules.GetRule() can not import\n#rules_impl otherwise\n# Suppress pylint const name warnings.\n# pylint: disable-msg=W0611\nfrom core import rules_impl\nfrom core import utils\nfrom core import views_impl\n\n_ = translation.ugettext\n\n\ndef Home(request):\n \"\"\"The landing home page view function.\"\"\"\n\n data = views_impl.Home(request.user)\n context = template.RequestContext(request, data)\n template_name = 'home.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef SystemStatus(request):\n \"\"\"Shows system status.\"\"\"\n\n data = views_impl.SystemStatus()\n context = template.RequestContext(request, data)\n template_name = 'system_status.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef ShowPrograms(request):\n \"\"\"Show all the programs that are present in the system on one page.\"\"\"\n # TODO(user): Required only for the alpha release only, remove later.\n\n program_query = models.Program.GetSearchableProgramsQuery().order('name')\n program_query = utils.QueryIterator(program_query, models.Program,\n prefetch_count=1000, next_count=1000)\n program_list = []\n for program in program_query:\n views_impl.EnrichDisplay(program)\n program_list.append(program)\n\n template_name = 'show_all_programs.html'\n context = template.RequestContext(request, {'program_list': program_list})\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef ShowOwned(request):\n \"\"\"Show the programs that the user has edit privileges for.\"\"\"\n\n data = views_impl.ShowOwned(request.user)\n context = template.RequestContext(request, data)\n template_name = 'show_owned.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef ShowLearning(request):\n \"\"\"Show the activities that the user is enrolled in.\"\"\"\n data = views_impl.ShowLearning(request.user)\n context = template.RequestContext(request, data)\n template_name = 'show_learning.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef ShowTeaching(request):\n \"\"\"Show the programs that the user is teaching.\"\"\"\n data = views_impl.ShowTeaching(request.user)\n context = template.RequestContext(request, data)\n template_name = 'show_teaching.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef ShowProgram(request, program_key):\n \"\"\"Display program, the corresponding sessions and registration options.\"\"\"\n\n data = views_impl.ShowProgram(program_key, request.user)\n context = template.RequestContext(request, data)\n template_name = 'program_detail.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\n@permissions.CourseCreator\ndef CreateProgram(request):\n \"\"\"View handler to create a new program.\"\"\"\n return views_impl.CreateOrUpdateProgram(request)\n\n\n@permissions.ProgramOwner\ndef UpdateProgram(request, program):\n \"\"\"View handler to update an existing program in the datastore.\n\n Args:\n request: The view request object. It contains the forms POST data for\n configuring the program properties.\n program: Program associated with the key.\n\n Returns:\n Renders the resultant page.\n \"\"\"\n return views_impl.CreateOrUpdateProgram(request, program)\n\n\n@permissions.ActivityOwner\ndef RosterEnroll(request, activity, program):\n \"\"\"View handler to enroll people from roster page.\"\"\"\n data = views_impl.RosterEnroll(request, program, activity)\n context = template.RequestContext(request, data)\n return shortcuts.render_to_response('show_roster_enroll.html', context)\n\n\n@permissions.ProgramOwner\ndef DeleteProgram(request, program):\n \"\"\"Delete a program on user request.\n\n Args:\n request: The view request object.\n program: models.Program to be deleted.\n\n Returns:\n Http response.\n \"\"\"\n\n views_impl.DeleteProgram(program.key(), request.user.appengine_user)\n return http.HttpResponseRedirect(urlresolvers.reverse(Home))\n\n\ndef UpdateSettings(request):\n \"\"\"Handles the user settings page.\"\"\"\n return views_impl.UpdateSettings(request)\n\n\ndef ShowRoster(request, activity_key, order_by='status'):\n \"\"\"Displays the roster page for a particular activity.\n\n Args:\n request: The view functions request object.\n activity_key: The key of the activity to show the roster page for.\n order_by: String. Sort order to be applied for the models.UserRegistration.\n\n Returns:\n The roster page html.\n \"\"\"\n data = views_impl.ShowRoster(request.user, activity_key, order_by)\n context = template.RequestContext(request, data)\n\n template_name = 'show_roster.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef PrintRoster(request, activity_key, order_by='user'):\n \"\"\"Displays the print roster page for a particular activity.\n\n Args:\n request: The view functions request object.\n activity_key: The key of the activity to show the roster page for.\n order_by: String. Sort order to be applied for the models.UserRegistration.\n\n Returns:\n The print roster page html.\n \"\"\"\n data = views_impl.ShowRoster(request.user, activity_key, order_by)\n context = template.RequestContext(request, data)\n\n template_name = 'print_roster.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\ndef UserUnregister(request, program_key, activity_key):\n \"\"\"Unregister the user initiating the request from the given activity.\"\"\"\n\n views_impl.ChangeUserStatus(\n [request.user.appengine_user.email()], activity_key,\n utils.RegistrationStatus.UNREGISTERED)\n\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowProgram, kwargs=dict(program_key=program_key)))\n\n\n@permissions.ActivityOwner\ndef UnregisterUsers(unused_request, activity, program=None, users=None):\n \"\"\"Unregister the users for a given activity.\n\n Args:\n activity: A models.Activity.\n program: A models.Program.\n users: A space separated list of user emails.\n\n Returns:\n A http Response.\n \"\"\"\n # The decorator provides the argument 'program'. Do not rename.\n # Suppress pylint invalid import order\n # pylint: disable-msg=W0613\n\n users = users.strip().split()\n views_impl.ChangeUserStatus(users, activity,\n utils.RegistrationStatus.UNREGISTERED,\n force_status=True)\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowRoster, kwargs=dict(activity_key=activity.key(),\n order_by='status')\n ))\n\n\n@permissions.ActivityOwner\ndef ChangeUserStatusToEnrolled(unused_request, activity,\n program=None, users=None):\n \"\"\"Changes the status of an already waitlisted user to enrolled.\n\n Args:\n activity: A models.Activity.\n program: A models.Program.\n users: A space separated list of user emails.\n\n Returns:\n A http Response.\n \"\"\"\n # The decorator provides the argument 'program'. Do not rename.\n # Suppress pylint invalid import order\n # pylint: disable-msg=W0613\n\n users = users.strip().split()\n views_impl.ChangeUserStatus(users, activity,\n utils.RegistrationStatus.ENROLLED,\n force_status=True)\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowRoster, kwargs=dict(activity_key=activity.key(),\n order_by='status')\n ))\n\n\ndef UserRegister(request):\n \"\"\"Registers the user in an activity with post choices.\n\n This function is called after the user completes and confirms her choices for\n registering to an activity. The post information should contain information on\n the activity, the schedules and the user choices of access points for each of\n them etc.\n\n Args:\n request: The request that contains user registration information.\n\n Returns:\n Redirects back to the show program page.\n \"\"\"\n\n if request.method == 'POST': # Only POST is supported for now.\n views_impl.UserRegister(request.POST, request.user)\n if request.POST['users']:\n # Registration for multiple users - admin action - redirect to roster\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowRoster, kwargs=dict(activity_key=request.POST['activity_id'],\n order_by='status')))\n else:\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowProgram, kwargs=dict(program_key=request.POST['program_id'])))\n\n return http.Http404\n\n\ndef ShowActivity(request, activity_key):\n \"\"\"Display program, with a specific session and registration options.\"\"\"\n\n data = views_impl.ShowActivity(activity_key, request.user)\n context = template.RequestContext(request, data)\n template_name = 'program_detail.html'\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n\n@permissions.ActivityCreation\ndef CreateActivity(request, program):\n \"\"\"View handler to create a new activity.\n\n Args:\n request: A request.\n program: Parent models.Program to created the activity under.\n\n Returns:\n http response.\n \"\"\"\n\n return views_impl.CreateOrUpdateActivity(request, program=program)\n\n\n@permissions.ActivityOwner\ndef UpdateActivity(request, activity, program):\n \"\"\"View handler to update an existing activity in the datastore.\n\n Args:\n request: The view request object.\n activity: The models.Activity that should be updated.\n program: models.Program under which the activity is present.\n\n Returns:\n Renders the resultant page.\n \"\"\"\n\n return views_impl.CreateOrUpdateActivity(request, activity=activity,\n program=program)\n\n\n@permissions.ActivityOwner\ndef DeleteActivity(request, activity, program):\n \"\"\"Delete an activity.\n\n Args:\n request: The view request object.\n activity: models.Activity which should be deleted.\n program: models.Program under which the activity is present.\n\n Returns:\n http response.\n \"\"\"\n\n views_impl.DeleteActivity(activity, request.user.appengine_user)\n return http.HttpResponseRedirect(urlresolvers.reverse(\n ShowProgram, kwargs=dict(program_key=program.key())))\n\n\n@permissions.Staff\ndef UpdateCalendarSessionToken(request):\n \"\"\"Updates session tokens for role account when needed or when forced.\n\n Args:\n request: The view request object.\n\n Returns:\n http response.\n \"\"\"\n redirect_path = urlresolvers.reverse(StoreCalendarSessionToken)\n return views_impl.UpdateCalendarSessionToken(request, redirect_path)\n\n\n@permissions.Staff\ndef StoreCalendarSessionToken(request):\n \"\"\"Stores the authentication tokens that are given as a redirect.\n\n The redirect comes from the google authentication server that provides the\n token for accessing calendar feeds using gdata. The redirect url contains the\n authentication tokens as one of the parameters. This method extracts the token\n and stores it in the datastore for future use.\n\n Args:\n request: The view request object.\n\n Returns:\n http response.\n \"\"\"\n\n views_impl.StoreCalendarSessionToken(request)\n admin_url = urlresolvers.reverse('admin:core_configuration_changelist')\n return http.HttpResponseRedirect(admin_url)\n\n\n@permissions.Staff\ndef ResetDatastoreSync(unused_request):\n \"\"\"Reset the external sync process to start over.\"\"\"\n views_impl.ResetDatastoreSync()\n admin_url = urlresolvers.reverse('admin:core_configuration_changelist')\n return http.HttpResponseRedirect(admin_url)\n\n\n@permissions.StaffOrCronOrTask\ndef BeginConferenceRoomsStorage(unused_request):\n \"\"\"Marks the beginning of a new conference rooms collection task.\"\"\"\n\n views_impl.BeginConferenceRoomsStorage()\n admin_url = urlresolvers.reverse('admin:core_accesspoint_changelist')\n return http.HttpResponseRedirect(admin_url)\n\n\ndef FetchAndStoreConferenceRooms(unused_request, query_offset, num_rooms):\n \"\"\"Queries RoomInfoService for rooms and stores them as access points.\n\n Args:\n query_offset: The str offset after which the rooms should be queried from.\n num_rooms: The str number of rooms to retrieve.\n\n Returns:\n A http response.\n \"\"\"\n\n views_impl.FetchAndStoreConferenceRooms(int(query_offset), int(num_rooms))\n admin_url = urlresolvers.reverse('admin:core_accesspoint_changelist')\n return http.HttpResponseRedirect(admin_url)\n\n\ndef ConstructAccessPointsInfo(unused_request):\n \"\"\"Loads access points info and saves it in datastore config object.\"\"\"\n views_impl.ConstructAccessPointsInfo()\n admin_url = urlresolvers.reverse('admin:core_accesspoint_changelist')\n return http.HttpResponseRedirect(admin_url)\n\n\n@permissions.StaffOrCronOrTask\ndef RunDeferred(request):\n \"\"\"Executes deferred tasks by invoking the deferred api handler.\"\"\"\n return views_impl.RunDeferred(request)\n\n\ndef ShowManagerApprovals(request):\n \"\"\"The page that shows pending approvals for a manager.\"\"\"\n return views_impl.ShowManagerApprovals(request)\n\n\ndef Search(request):\n \"\"\"Course search view handler.\"\"\"\n search_context = views_impl.Search(request)\n template_name = 'search_results.html'\n context = template.RequestContext(request, search_context)\n return shortcuts.render_to_response(template_name, context_instance=context)\n\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"413259541","text":"#!/usr/bin/env python\n\n# This software was developed at the National Institute of Standards\n# and Technology by employees of the Federal Government in the course\n# of their official duties. Pursuant to title 17 Section 105 of the\n# United States Code this software is not subject to copyright\n# protection and is in the public domain. NIST assumes no\n# responsibility whatsoever for its use by other parties, and makes\n# no guarantees, expressed or implied, about its quality,\n# reliability, or any other characteristic.\n#\n# We would appreciate acknowledgement if the software is used.\n\n__version__ = \"0.1.0\"\n\nimport logging\nimport os\n\n_logger = logging.getLogger(os.path.basename(__file__))\n\nimport Objects\n\ndef main():\n volumeobject_count = 0\n fileobject_count = 0\n for (event, obj) in Objects.iterparse(args.in_dfxml):\n if event != \"end\":\n continue\n if isinstance(obj, Objects.VolumeObject):\n volumeobject_count += 1\n if len(obj.annos) > 0:\n _logger.info(\"obj.annos: %r.\" % obj.annos)\n raise ValueError(\"Properties of the file system changed in the round trip.\")\n continue\n if not isinstance(obj, Objects.FileObject):\n continue\n fileobject_count += 1\n if \"new\" in obj.annos:\n raise ValueError(\"A new file was created in translation.\")\n if \"deleted\" in obj.annos:\n raise ValueError(\"A file was lost in translation.\")\n if len(obj.diffs) > 0:\n for diff in obj.diffs:\n _logger.info(\"%s: %r -> %r.\" % (diff, getattr(obj, diff), getattr(obj.original_fileobject, diff)))\n raise ValueError(\"Information changed translating between DFXML, CASE, and back.\")\n if volumeobject_count == 0:\n raise ValueError(\"No file systems emitted.\")\n if fileobject_count == 0:\n raise ValueError(\"No files emitted.\")\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\")\n parser.add_argument(\"in_dfxml\")\n args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)\n main()\n","sub_path":"tests/verify_single_filesystem_single_file.py","file_name":"verify_single_filesystem_single_file.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"74387541","text":"from ray.rllib.agents.trainer import with_common_config\nfrom ray.rllib.agents.trainer_template import build_trainer\nfrom ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches\nfrom ray.rllib.execution.train_ops import TrainOneStep\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\n\n# yapf: disable\n# __sphinx_doc_begin__\nDEFAULT_CONFIG = with_common_config({\n # No remote workers by default.\n \"num_workers\": 0,\n # Learning rate.\n \"lr\": 0.0004,\n})\n# __sphinx_doc_end__\n# yapf: enable\n\n\ndef get_policy_class(config):\n if config[\"use_pytorch\"]:\n from ray.rllib.agents.pg.pg_torch_policy import PGTorchPolicy\n return PGTorchPolicy\n else:\n return PGTFPolicy\n\n\n# Experimental distributed execution impl; enable with \"use_exec_api\": True.\ndef execution_plan(workers, config):\n # Collects experiences in parallel from multiple RolloutWorker actors.\n rollouts = ParallelRollouts(workers, mode=\"bulk_sync\")\n\n # Combine experiences batches until we hit `train_batch_size` in size.\n # Then, train the policy on those experiences and update the workers.\n train_op = rollouts \\\n .combine(ConcatBatches(\n min_batch_size=config[\"train_batch_size\"])) \\\n .for_each(TrainOneStep(workers))\n\n # Add on the standard episode reward, etc. metrics reporting. This returns\n # a LocalIterator[metrics_dict] representing metrics for each train step.\n return StandardMetricsReporting(train_op, workers, config)\n\n\nPGTrainer = build_trainer(\n name=\"PG\",\n default_config=DEFAULT_CONFIG,\n default_policy=PGTFPolicy,\n get_policy_class=get_policy_class,\n execution_plan=execution_plan)\n","sub_path":"rllib/agents/pg/pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626577006","text":"import sys \nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.append(os.path.abspath(\"/home/matt/Documents/3f8/python\"))\nfrom inference_functions import *\n\n# Check Arguments\nif len(sys.argv) != 2:\n print(\"Usage: {} l\".format(sys.argv[0]))\n sys.exit(1)\n\n# Read hyper parameter\nl = float(sys.argv[1])\n\n# Parameters\nif(l==0.01):\n eta = 0.018 # Learning rate\nif (l==1):\n eta = 0.001\nelse:\n eta = 0.01\nsteps = 1000 # Training steps\nw = np.ones(801) # Weights\nll = np.zeros(steps) # Average LL Evolution\n\n# Load training data\nX_train = np.loadtxt('X_train.txt')\ny_train = np.loadtxt('y_train.txt')\n\n#Load test data\nX_test = np.loadtxt('X_test.txt')\ny_test = np.loadtxt('y_test.txt')\n\nX = np.loadtxt('X.txt')\ny = np.loadtxt('y.txt')\n\n# Expand using RBFs\nX_train_expanded = expand_inputs(l, X_train, X_train)\nX_train_expanded = np.insert(X_train_expanded,800,1,1)\nX_test_expanded = expand_inputs(l, X_test, X_train)\nX_test_expanded = np.insert(X_test_expanded,800,1,1)\nX_expanded = expand_inputs(l, X, X_train)\nX_expanded = np.insert(X_expanded,800,1,1)\n\n# Train Weights\nfor i in range (0, steps):\n \n # Compute dL(w)/dw\n dw = np.dot(np.transpose(X_train_expanded),y_train-logistic(np.dot(X_train_expanded,w)))\n \n # Perform Gradient Ascent\n w = w + eta*dw\n \n # Update LL Evolution\n ll[i] = compute_average_ll(X_train_expanded,y_train,w)\n \n# Plot LL evolution\nprint(\"FINAL AVG TRAIN LL =\", ll[steps-1])\nplot_ll(ll)\n\n# Plot Classification Regions\nplot_expanded_predictive_distribution(X_train,X_train,y_train,w,l)\nplot_expanded_predictive_distribution(X_test,X_train,y_test,w,l)\nplot_expanded_predictive_distribution(X,X_train,y,w,l)\n\n","sub_path":"python/ex_g.py","file_name":"ex_g.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123499869","text":"import math\n\n\n# Formatação de valores\n\ndef formatar(valor):\n decimal_ = valor - math.floor(valor)\n if decimal_ < 0.5:\n hundred = math.floor(decimal_ * 100)\n else:\n hundred = math.ceil(decimal_ * 100)\n print(hundred)\n if decimal_ == 0.0:\n return f\"R${str(valor).replace('.', ',')}0\"\n elif hundred % 10 == 0:\n return f\"R${str(valor).replace('.', ',')}0\"\n else:\n return f\"R${str(valor).replace('.', ',')}\"\n","sub_path":"pacotes/utils/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549489146","text":"import cv2\nimport numpy as np\n\nf =open('temporalROI.txt','r') # otwarcie pliku\nline = f.readline() \n# odczyt lini\nroi_start, roi_end = line.split() \n# rozbicie lini na poszczegolneframgenty tesktu\nroi_start =int(roi_start) \n# konwersja na int\nroi_end =int(roi_end)\n\n#Liczniki\nTP = 0\nFN = 0\nFP = 0;\n# TN = 0\n\n\nI = cv2.imread('input/in%06d.jpg' %300)\nIG_t = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\nfor i in range(roi_start, roi_end):\n I = cv2.imread('input/in%06d.jpg' % i)\n IG_t2 = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n B = cv2.threshold(cv2.absdiff(IG_t,IG_t2), 12, 255, cv2.THRESH_BINARY)\n B = B[1]\n kernel = np.ones((3, 3), np.uint8)\n erosion = cv2.erode(B, kernel, iterations=1)\n dilation = cv2.dilate(erosion, kernel, iterations=1)\n cv2.imshow(\"I1\", dilation)\n medianBlur_img = cv2.medianBlur(dilation,5)\n cv2.imshow(\"I2\", medianBlur_img)\n cv2.waitKey(1)\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(medianBlur_img)\n cv2.imshow(\"Labels\", np.uint8(labels / stats.shape[0] * 255))\n if (stats.shape[0] > 1): # czy sa jakies obiekty\n tab = stats[1:,4] # wyciecie 4 kolumny bez pierwszego elementu\n pi = np.argmax( tab )# znalezienie indeksu najwiekszego elementu\n pi = pi + 1 # inkrementacja bo chcemy indeks w stats, a nie w tab\n # wyrysownie bbox\n cv2.rectangle(I,(stats[pi,0],stats[pi,1]),(stats[pi,0]+stats[pi,2],stats[pi,1]+stats[pi,3]),(255,0,0),2)\n # wypisanie informacji o polu i numerze najwiekszego elementu\n cv2.putText(I,\"%f\" % stats[pi,4],(stats[pi,0],stats[pi,1]),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0))\n cv2.putText(I,\"%d\" %pi,(np.int(centroids[pi,0]),np.int(centroids[pi,1])),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\n cv2.imshow(\"I color\", I)\n cv2.imshow(\"bin\", B)\n IG_t = IG_t2\n\n GTB = I = cv2.imread('groundtruth/gt%06d.png' %i)\n GTB_G = cv2.cvtColor(GTB, cv2.COLOR_BGR2GRAY)\n #iloczyn logiczny odpowiednich elementow macierzy\n\n TP_M = np.logical_and(( medianBlur_img == 255), (GTB_G == 255))\n FN_M = np.logical_and((medianBlur_img == 0), (GTB_G == 255))\n FP_M = np.logical_and((medianBlur_img == 255), (GTB_G != 255))\n\n TP_S = np.sum(TP_M)\n FN_S = np.sum(FN_M)\n FP_S = np.sum(FP_M)\n # suma elementow w macierzy\n TP = TP + TP_S\n FN = FN + FN_S\n FP = FP + FP_S\n#Obliczone wskazniki\nR = TP/(TP + FN)\nP = TP/(TP + FP)\nF1 = 2*P*R/(P + R)\n\nprint(R)\nprint(P)\nprint(F1)\n\n","sub_path":"lab2_Foreground_object_detection/pedestrians/lab2_2.py","file_name":"lab2_2.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223019178","text":"from dataset import test_data, test_labels\nfrom dataexplore import train_data, train_labels\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport numpy as np\n\n#################################################################\n# 모델 구성\n#################################################################\n\n# 역시 신경망은 layer를 쌓아서 만드네\n# 고려사항\n# 1. 모델에서 얼마나 많은 층을 사용할 것인가?\n# 2. 각 층에서 얼마나 많은 은닉 유닛(hidden unit)을 사용할 것인가?\n# 이 예제의 입력 데이터 = 단어 인덱스의 배열\n# 레이블 = 0 또는 1\n\n# 입력 크기는 영화 리뷰 데이터셋에 적용된 어휘 사전의 크기(10000개의 단어)\nvocab_size = 10000\n\nmodel = keras.Sequential()\nmodel.add(keras.layers.Embedding(vocab_size, 16, input_shape=(None, )))\nmodel.add(keras.layers.GlobalAveragePooling1D())\nmodel.add(keras.layers.Dense(16, activation='relu'))\nmodel.add(keras.layers.Dense(1, activation='sigmoid'))\n\nmodel.summary()\n\n# 모델에 사용할 optimizer 와 loss function 설정\n\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# 검증 세트 만들기\n# 기존 train_data에서 10000개의 샘플을 떼어내어 검증 세트(validation set)를 만들기\n# test_data를 사용하지 않는 이유 = train_data만을 사용하여 모델을 개발하고 튜닝하는 것이 목표\n\nx_val = train_data[:10000]\npartial_x_train = train_data[10000:]\n\ny_val = train_labels[:10000]\npartial_y_train = train_labels[10000:]\n\n#################################################################\n# 모델 훈련\n#################################################################\n\n# 512개의 샘플로 이루어진 미니배치에서 40번의 에포크 동안 훈련\n# x_train과 y_train 텐서에 있는 모든 샘플에 대해 40번 반복한다는 뜻\n\nhistory = model.fit(partial_x_train,\n partial_y_train,\n epochs=40,\n batch_size=512,\n validation_data=(x_val, y_val),\n verbose=1)\n\n#################################################################\n# 모델 평가\n#################################################################\n\n# 모델 성능 확인하기\n# 손실값과 정확도 반환 (손실=오차이므로 숫자가 낮을수록 좋음)\n\nresults = model.evaluate(test_data, test_labels, verbose=2)\n\nprint(results)\n","sub_path":"Text classification/python/model_build.py","file_name":"model_build.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"20307173","text":"import os\nimport time\n\n\ndef main():\n target = os.sys.argv[1]\n wait_time = int(os.sys.argv[2])\n while True:\n if os.path.exists(target):\n print(f'Каталог существует, каталог будет удален через: {wait_time} секунд')\n time.sleep(wait_time)\n else:\n os.makedirs(target)\n print(f'Создаю каталог {user_input_path}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python_1/day_3/mknewdir.py","file_name":"mknewdir.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117654678","text":"\n\nfrom xai.brain.wordbase.nouns._carving import _CARVING\n\n#calss header\nclass _CARVINGS(_CARVING, ):\n\tdef __init__(self,): \n\t\t_CARVING.__init__(self)\n\t\tself.name = \"CARVINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"carving\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_carvings.py","file_name":"_carvings.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634516962","text":"import re\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import TweetTokenizer\nimport string\n\ndef preprocess_tweet(tweet):\n '''\n tweet = string\n returns a list with the tweet words preprocessed\n '''\n\n '''Cleaning tweet'''\n tweet2 = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', tweet)\n tweet2 = re.sub(r'#', '', tweet2)\n tweet2 = re.sub(r'\\$\\w*', '', tweet2)\n tweet2 = re.sub(r'^RT[\\s]+', '', tweet2)\n\n '''Tokenize'''\n tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,reduce_len=True)\n tweet_tokens = tokenizer.tokenize(tweet2)\n\n '''Stop words'''\n stopwords_english = stopwords.words('english')\n tweet_clean = []\n for word in tweet_tokens:\n if word not in stopwords_english:\n tweet_clean.append(word)\n\n '''Stemming'''\n stemmer = PorterStemmer()\n tweet_stemmed = []\n for word in tweet_clean:\n stem_word = stemmer.stem(word)\n tweet_stemmed.append(stem_word)\n\n return tweet_stemmed\n \ndef get_freq(tweets, labels):\n '''\n tweets = list of tweets\n labels = list of tweets labels\n returns a dictionary with frequencies of each word for each label\n '''\n freq = {}\n labels = labels.tolist()\n \n for tweet, y in zip(tweets, labels):\n for word in preprocess_tweet(tweet):\n pair = (word, y)\n freq[pair] = freq.get(pair, 0) + 1\n \n return freq\n\ndef plot_vectors(M):\n \n rows,cols = M.T.shape\n maxes = 1.1*np.amax(abs(M), axis = 0)\n \n ax = plt.axes()\n\n for i,l in enumerate(range(0,cols)):\n ax.arrow(0,0,M[i,0],M[i,1],head_width=0.1,head_length=0.3,color = \"k\")\n ax.annotate(\"Vector {}\".format(i),(M[i,0],M[i,1]))\n\n plt.plot(0,0,'ok')\n plt.xlim([-maxes[0],maxes[0]])\n plt.ylim([-maxes[1],maxes[1]])\n plt.grid(b=True, which='major')\n plt.show()\n\ndef cosine(v,w):\n dot_product = np.dot(v,w)\n norm_v = np.linalg.norm(v)\n norm_w = np.linalg.norm(w)\n cos = dot_product/(norm_v*norm_w)\n return cos\n\ndef euclidean(v,w):\n distance = np.linalg.norm(v - w)\n return distance\n\ndef assign_unk(word):\n \n punct = set(string.punctuation)\n digit = set(string.digits)\n upper = set(string.ascii_uppercase)\n \n noun_suffix = [\"action\", \"age\", \"ance\", \"cy\", \"dom\", \"ee\", \"ence\", \"er\", \"hood\", \"ion\", \"ism\", \"ist\", \"ity\", \"ling\", \"ment\", \"ness\", \"or\", \"ry\", \"scape\", \"ship\", \"ty\"]\n verb_suffix = [\"ate\", \"ify\", \"ise\", \"ize\"]\n adj_suffix = [\"able\", \"ese\", \"ful\", \"i\", \"ian\", \"ible\", \"ic\", \"ish\", \"ive\", \"less\", \"ly\", \"ous\"]\n adv_suffix = [\"ward\", \"wards\", \"wise\"]\n \n for letter in word:\n if letter in punct:\n return \"--unk_punc--\"\n \n if letter in digit:\n return \"--unk_digit--\"\n \n if letter in upper:\n return \"--unk_upper--\"\n \n for noun in noun_suffix:\n if word.endswith(noun):\n return \"--unk_noun--\"\n \n for verb in verb_suffix:\n if word.endswith(verb):\n return \"--unk_verb--\"\n \n for adj in adj_suffix:\n if word.endswith(adj):\n return \"--unk_adj--\"\n \n for adv in adv_suffix:\n if word.endswith(adv):\n return \"--unk_adv--\"\n \n else:\n return \"--unk--\"\n \ndef get_word_tag(line, vocab):\n if not line.split():\n word = \"--n--\"\n tag = \"--s--\"\n else:\n word, tag = line.split()\n if word not in vocab:\n word = assign_unk(word)\n return word, tag\n\ndef preprocess(vocab, data_fp):\n \"\"\"\n Preprocess data\n \"\"\"\n orig = []\n prep = []\n\n # Read data\n with open(data_fp, \"r\") as data_file:\n\n for cnt, word in enumerate(data_file):\n\n # End of sentence\n if not word.split():\n orig.append(word.strip())\n word = \"--n--\"\n prep.append(word)\n continue\n\n # Handle unknown words\n elif word.strip() not in vocab:\n orig.append(word.strip())\n word = assign_unk(word)\n prep.append(word)\n continue\n\n else:\n orig.append(word.strip())\n prep.append(word.strip())\n\n assert(len(orig) == len(open(data_fp, \"r\").readlines()))\n assert(len(prep) == len(open(data_fp, \"r\").readlines()))\n\n return orig, prep","sub_path":"C3 - Natural Language Processing with Sequence Models/Week 1/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539791959","text":"import random\nimport os\nimport logging\n\nlogging.basicConfig(filename = 'dgame.log', level = logging.DEBUG)\n\n\nCELLS = [(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),\n (0,1),(1,1),(2,1),(3,1),(4,1),(5,1),\n (0,2),(1,2),(2,2),(3,2),(4,2),(5,2),\n (0,3),(1,3),(2,3),(3,3),(4,3),(5,3),\n (0,4),(1,4),(2,4),(3,4),(4,4),(5,4),\n (0,5),(1,5),(2,5),(3,5),(4,5),(5,5)]\n\n\ndef clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef get_locations():\n return random.sample(CELLS,3)\n\ndef move_player(player,move):\n\n #bounds y == 5, y == 0, x == 5, x == 0\n x,y = player\n if move == 'LEFT':\n x -=1\n if move == 'RIGHT':\n x += 1\n if move == 'UP':\n y -= 1\n if move == 'DOWN':\n y += 1\n\n return x,y\n\n\ndef get_moves(player):\n moves = ['LEFT','RIGHT','UP','DOWN']\n\n x,y = player\n if x == 0:\n moves.remove('LEFT')\n if x == 5:\n moves.remove('RIGHT')\n if y == 0:\n moves.remove('UP')\n if y == 5:\n moves.remove('DOWN')\n return moves\n\ndef draw_map(player):\n print('_'*6)\n title = '|{}'\n\n for cell in CELLS:\n x,y = cell\n if x < 5:\n line_end = ''\n if cell == player:\n output = title.format('X')\n else:\n output = title.format('_')\n else:\n line_end = '\\n'\n if cell == player:\n output = title.format('X|')\n else:\n output = title.format('_|')\n print(output,end=line_end)\n\n\n\ndef game_loop():\n\n monster, door, player = get_locations()\n playing = True\n logging.info('monster: {}: door: {}: player: {}'.format(\n monster, door, player))\n\n while playing:\n clear_screen()\n draw_map(player)\n valid_moves = get_moves(player)\n print('You are currently in room {}'.format(player)) #fill player\n print('You can move {}'.format(', '.join(valid_moves))) # available moves\n print('QUITTERS QUIT')\n move = input('> ')\n move = move.upper()\n\n if move == 'QUIT':\n print('PCE')\n break\n if move in valid_moves:\n player = move_player(player,move)\n if player == monster:\n print('You are DEAD')\n playing = False\n if player == door:\n print('You Won MOFO')\n playing = False\n else:\n input('Walls hurt')\n else:\n if input('Play again [Y/N] ').lower() != 'n':\n game_loop()\n\n\n\nclear_screen()\nprint('WELCOME TO THE DUNGEON')\ninput('Press Return to Start')\nclear_screen()\ngame_loop()\n\n\n\n #good move?\n #bad move\n #winnger\n #monster\n #otherwise loop back\n","sub_path":"dungeon_game_logging.py","file_name":"dungeon_game_logging.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645862960","text":"from selenium import webdriver\nimport os\n\nclass ChromePage():\n def test(self):\n driverLocation = \"/Users/nkala/PycharmProjects/chromedriver.exe\"\n os.environ[\"webdriver.chrome.driver\"] = driverLocation\n driver = webdriver.Chrome(driverLocation)\n driver.get('http://google.com')\n\ncc = ChromePage()\ncc.test()\n\n\n\n\n\n\n","sub_path":"Learning/PracticeApps/Chromebrowser.py","file_name":"Chromebrowser.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173099016","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Extract yelp reviews from users which are in the given yelp user file\"\"\"\nimport json\nimport argparse\nfrom common_functions import get_user_ids\n\n\ndef copy_reviews_from_users(review_file, output_file, user_file):\n \"\"\"\n copies the reviews of the given users from a given review file to a given output file\n :param review_file: the file where all reviews are stored\n :param output_file: the file where the selected reviews of the given users should be stored\n :param user_file: the file where the user data of the users which reviews should be selected is stored\n \"\"\"\n user_ids = get_user_ids(user_file)\n with open(review_file, \"rb\") as review_file, open(output_file, \"wb\") as review_sample_file:\n for line in review_file:\n review = json.loads(line)\n if review[\"user_id\"] in user_ids:\n review_sample_file.write(line)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Writes all reviews from users which are in a given users file to \"\n \"a given file\")\n parser.add_argument(\"user_file\", type=str,\n help=\"path of the file where the user data is stored\")\n parser.add_argument(\"review_file\", type=str,\n help=\"path of file which contains the reviews\")\n parser.add_argument(\"output_file\", type=str,\n help=\"path of file where the sample of reviews will be stored\")\n args = parser.parse_args()\n copy_reviews_from_users(args.review_file, args.output_file, args.user_file)","sub_path":"src/extract_reviews.py","file_name":"extract_reviews.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433482032","text":"\n'''\nCreated on July 29, 2016\n@author: 林智敏\n'''\nimport datetime\n\nimport pandas as pd\nimport numpy as np\nclass LinearRegression:\n\n def MultMat(self,matA,matB):\n '''\n 矩阵乘法\n :param matA:\n :param matB:\n :return:\n '''\n res = [[0] * len(matB[0]) for i in range(len(matA))] # 初始化结果矩阵\n for i in range(len(matA)):\n for j in range(len(matB[0])):\n for k in range(len(matB)):\n res[i][j] += matA[i][k] * matB[k][j] # 依照数学上矩阵乘法原理,定位a和b的元素并相乘,放到结果阵相应位置\n return res\n\n def loadDataset(self,file):\n '''\n 加载数据集,并分离出特征集和结果集\n :return:\n '''\n dataset = []#特征集\n labelset = []#结果集\n\n for line in file.readlines():\n lineArr = line.strip().split(',')\n res = [float(lineArr[i]) for i in range(1, len(lineArr)-1)]\n dataset.append(res)\n labelset.append(float(lineArr[-1]))\n arrset = np.array(dataset)\n return arrset, labelset\n\n def normSet(self,dataset):\n '''\n 将特征矩阵归一化处理\n :param dataset:\n :return:\n '''\n min = dataset.min(0) # 每列的最小值\n max = dataset.max(0) # 每列的最大值\n ranges = max - min # 每列的极值\n normset = np.zeros(np.shape(dataset)) # 初始化归一化后的矩阵\n m = dataset.shape[0] # 矩阵的行数\n normset = dataset - np.tile(min, (m, 1)) # 矩阵上每个元素减去每列的最小值\n normset = normset / np.tile(ranges, (m, 1)) # 除以每列的极值\n return normset\n\n def trans(self,mat):\n '''\n 矩阵转置\n :param mat:\n :return:\n '''\n if type(mat)==list: # 判断是否为行向量\n tranmat = [[] for i in mat] # 初始化矩阵\n for i in range(len(mat)):\n tranmat[i].append(mat[i]) # 进行矩阵转置\n else:\n tranmat = [[] for i in mat[0]] # 初始化转置阵:根据原矩阵的列数创建转置阵的行数\n for i in mat: # 遍历原矩阵的每一行\n for j in range(len(i)):\n tranmat[j].append(i[j]) # 进行矩阵转置\n return tranmat\n\n def least_square(self, dataset, label):\n '''\n 最小二乘法求参数w\n :param dataset:\n :param label:\n :return:\n '''\n m = dataset.shape[0] # 矩阵行数\n w = np.zeros((m, 1)) # 初始化参数w\n x0 = np.ones((m, 1)) # 创建一个值为一的列向量\n x = np.c_[x0, dataset] # 为特征阵添加值为一的列向量\n w = self.MultMat(np.linalg.inv(self.MultMat(self.trans(x), x)), self.MultMat(self.trans(x), self.trans(label)))#根据公式求w\n return w\n\n def test(self, w, testset, testlabel):\n '''\n 测试并评估\n :param w:\n :param testset:\n :param testlabel:\n :return:\n '''\n m = testset.shape[0] # 矩阵行数\n x0 = np.ones((m, 1)) # 创建一个值为一的列向量\n testset = np.c_[x0, testset] # 为特征阵添加值为一的列向量\n h = self.MultMat(testset, w) # 假设的线性函数\n h = np.mat(h)#转为矩阵\n testlabel = np.mat(self.trans(testlabel)) # 将转置后的结果集变为矩阵\n j = abs(h - testlabel) # 绝对误差阵\n error = j.sum()/len(testlabel) # 平均绝对误差\n return error\n\n\n\nif __name__ == \"__main__\":\n l = LinearRegression()\n with open('C:\\\\Users\\\\chizhu\\\\Desktop\\\\7.28数据集\\\\OnlineNewsPopularity\\\\OnlineNewsPopularity.train') as train:\n dataset,label = l.loadDataset(train) # 加载训练集\n normset = l.normSet(dataset) # 数据归一化\n starttime1 = datetime.datetime.now()\n w = l.least_square(normset, label) # 训练出参数w\n endtime1 = datetime.datetime.now()\n print(\"训练时间=%d s\" % ((endtime1 - starttime1).seconds))\n with open('C:\\\\Users\\\\chizhu\\\\Desktop\\\\7.28数据集\\\\OnlineNewsPopularity\\\\OnlineNewsPopularity.test') as test:\n testset, testlabel = l.loadDataset(test) # 加载测试集\n normtestset = l.normSet(testset) # 归一化\n starttime2 = datetime.datetime.now()\n error = l.test(w, normtestset, testlabel) # 用平均绝对误差评估\n endtime2 = datetime.datetime.now()\n print(\"测试时间=%d s\" % ((endtime2 - starttime2).seconds))\n print('误差=%f'%(error))\n '''\n [out]:\n ####################\n 这里使用的是全部数据\n ####################\n 训练时间=78 s\n 测试时间=1 s\n 误差=8906183.157197\n '''\n\n\n\n\n\n \n\n","sub_path":"src/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643575722","text":"\r\nfrom validation.validation import TeacherException\r\nfrom domains.Lab import Lab\r\nclass Console:\r\n\r\n\r\n def __init__(self,ctrl):\r\n\r\n self.__ctrl=ctrl\r\n\r\n def show_students(self):\r\n\r\n students=self.__ctrl.get_all()\r\n\r\n for student in students:\r\n\r\n print(student)\r\n\r\n def show_labs(self):\r\n\r\n labs=self.__ctrl.get_allLabs()\r\n\r\n for lab in labs:\r\n print (lab)\r\n\r\n def add(self):\r\n\r\n try:\r\n studentId=int(input(\"Introduce student ID:\"))\r\n labNumber=int(input(\"Introduce Lab Number:\"))\r\n problemNumber=input(\"Introduce problem number:\")\r\n\r\n self.__ctrl.add(Lab(studentId,labNumber,problemNumber))\r\n\r\n except ValueError:\r\n print(\"You have not introduced an integer for id or lab number!!!\")\r\n except TeacherException as e:\r\n print(e)\r\n\r\n def oneStud_allLabs(self):\r\n\r\n try:\r\n studId=int(input(\"Introduce student's id:\"))\r\n labs=self.__ctrl.oneStud_allLabs(studId)\r\n for lab in labs:\r\n print(lab)\r\n except ValueError:\r\n print(\"You have not introduced an integer id!!!\")\r\n except TeacherException as e :\r\n print(e)\r\n\r\n def allStud_oneLab(self):\r\n\r\n try:\r\n\r\n labNr=int(input(\"Introduce lab number(integer):\"))\r\n students=self.__ctrl.allStud_oneLab(labNr)\r\n for student in students:\r\n print(student)\r\n except ValueError:\r\n print(\"You have not introduced an integer!!!\")\r\n\r\n\r\n def run(self):\r\n\r\n menu=\"\"\"\r\n\r\n 1---Show all students\r\n 2---Assign a lab activity\r\n 3---Show lab activities for a given student\r\n 4---Show all students with the lab assignemnt for a given lab number\r\n 5---Show all labs\r\n 0---Exit\r\n\r\n \"\"\"\r\n\r\n option = -1\r\n\r\n while option != 0 :\r\n\r\n print (menu)\r\n\r\n option=input(\"Introduce your option:\")\r\n\r\n if option.strip() == \"1\":\r\n self.show_students()\r\n elif option.strip() == \"2\" :\r\n self.add()\r\n elif option.strip() == \"5\" :\r\n self.show_labs()\r\n elif option.strip() == \"0\":\r\n self.__ctrl.save()\r\n exit()\r\n elif option.strip() == \"3\":\r\n self.oneStud_allLabs()\r\n elif option.strip() == \"4\":\r\n self.allStud_oneLab()\r\n else :\r\n print(\"Not valid option!!!\")\r\n continue\r\n\r\n\r\n\r\n\r\n","sub_path":"Old works of python/Manage Lab Activity/ui/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175155159","text":"# -*- coding: utf-8 -*-\nimport traceback\nimport sys\nfrom flask import make_response, jsonify, request, json\nfrom flask_restful import abort, Resource\nfrom flask_restful_swagger_2 import swagger\nimport msgpack\nimport base64\nfrom .definitions import UdfData, UdfCode, UdfRequest, ErrorResponse\nfrom ..api.run_code import run_json_user_code\n\n__license__ = \"Apache License, Version 2.0\"\n__author__ = \"Soeren Gebbert\"\n__copyright__ = \"Copyright 2018, Soeren Gebbert\"\n__maintainer__ = \"Soeren Gebbert\"\n__email__ = \"soerengebbert@googlemail.com\"\n\n\"\"\"\nThere are several different approached available in python that can be implemented:\n- UBJSON https://en.wikipedia.org/wiki/UBJSON\n- BSON https://en.wikipedia.org/wiki/BSON\n- MessagePack https://en.wikipedia.org/wiki/MessagePack\n\nImportant is the support for arrays with integer and floating point numbers that are used in\nxarray and numpy. Support for timestamps is required as well. For structured data is the support of lists\nand maps important.\n\n array integer float map time\n\nMessagePack y y y y y\nBJSON y y y y n\nUBJSON n y y n n\n\nBased on these requirement it seems that MessagePack is the most potent candidate to use\nfor serialization and supports many different languages.\nMessagePack is available here: https://msgpack.org/\n\nUsing Messagepack is quite easy:\n\nIn [1]: import msgpack\nIn [2]: import base64\nIn [3]: d = {1:[1,2,3,4,5,6], \"w\":\"fffff\", \"d\":{\"d\":\"d\"}}\nIn [4]: d\nOut[4]: {1: [1, 2, 3, 4, 5, 6], 'w': 'fffff', 'd': {'d': 'd'}}\nIn [5]: msgpack.packb(d)\nOut[5]: b'\\x83\\x01\\x96\\x01\\x02\\x03\\x04\\x05\\x06\\xa1w\\xa5fffff\\xa1d\\x81\\xa1d\\xa1d'\nIn [6]: p = msgpack.packb(d)\nIn [7]: base64.b64encode(p)\nOut[7]: b'gwGWAQIDBAUGoXelZmZmZmahZIGhZKFk'\nIn [8]: t = base64.b64encode(p)\nIn [9]: base64.b64decode(t)\nOut[9]: b'\\x83\\x01\\x96\\x01\\x02\\x03\\x04\\x05\\x06\\xa1w\\xa5fffff\\xa1d\\x81\\xa1d\\xa1d'\nIn [10]: msgpack.unpackb(base64.b64decode(t))\nOut[10]: {1: [1, 2, 3, 4, 5, 6], b'w': b'fffff', b'd': {b'd': b'd'}}\n\n\"\"\"\n\n\nPOST_JOBS_DOC_UDF = {\n \"description\": \"Run a Python user defined function (UDF) on the provided data\",\n \"tags\": [\"UDF\"],\n \"parameters\": [\n {\n \"name\": \"data\",\n \"in\": \"body\",\n 'required': True,\n \"description\": \"The UDF Python source code and data as JSON definition to process\",\n \"schema\": UdfRequest\n }\n ],\n 'consumes':['application/json'],\n 'produces':[\"application/json\"],\n \"responses\": {\n \"200\": {\n \"description\": \"The result of the UDF computation.\",\n \"schema\": UdfData\n },\n \"400\": {\n \"description\": \"The error message.\",\n \"schema\": ErrorResponse\n }\n }\n}\n\n\nclass Udf(Resource):\n @swagger.doc(POST_JOBS_DOC_UDF)\n def post(self):\n \"\"\"Execute the UDF code\n\n \"\"\"\n\n try:\n if request.is_json is False:\n raise Exception(\"Missing JSON in request\")\n\n json_data = request.get_json()\n result = run_json_user_code(dict_data=json_data)\n except Exception:\n e_type, e_value, e_tb = sys.exc_info()\n response = ErrorResponse(message=str(e_value), traceback=str(traceback.format_tb(e_tb)))\n return make_response(jsonify(response), 400)\n\n return make_response(jsonify(result), 200)\n\n\nPOST_JOBS_DOC_UDF_MESSAGE_PACK = {\n \"description\": \"Run a Python user defined function (UDF) on the provided data that are base64 encoded message pack\"\n \"binary data\",\n \"tags\": [\"UDF\"],\n \"parameters\": [\n {\n \"name\": \"data\",\n \"in\": \"body\",\n 'required': True,\n \"description\": \"The UDF Python source code and data as base64 encoded message pack\",\n \"schema\": UdfRequest\n }\n ],\n 'consumes':['application/base64'],\n 'produces':[\"application/base64\"],\n \"responses\": {\n \"200\": {\n \"description\": \"The result of the UDF computation as base64 encoded message pack.\",\n \"schema\": UdfData\n },\n \"400\": {\n \"description\": \"The error message.\",\n \"schema\": ErrorResponse\n }\n }\n}\n\n\nclass UdfMessagePack(Resource):\n @swagger.doc(POST_JOBS_DOC_UDF_MESSAGE_PACK)\n def post(self):\n \"\"\"Execute the UDF code that is encoded as base64 message pack binary format\n \"\"\"\n\n try:\n if request.is_json is True:\n raise Exception(\"JSON is not supported in request. A base64 encoded message pack blob is required.\")\n\n blob = base64.b64decode(request.data)\n dict_data = msgpack.unpackb(blob, raw=False)\n result = run_json_user_code(dict_data=dict_data)\n except Exception:\n e_type, e_value, e_tb = sys.exc_info()\n response = ErrorResponse(message=str(e_value), traceback=str(traceback.format_tb(e_tb)))\n return make_response(jsonify(response), 400)\n\n result = base64.b64encode(msgpack.packb(result))\n return make_response(result, 200)\n","sub_path":"src/openeo_udf/server/udf.py","file_name":"udf.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620688379","text":" \nfrom __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n# Importing the Kratos Library \nimport KratosMultiphysics \nimport KratosMultiphysics.SolidMechanicsApplication\nimport KratosMultiphysics.StructuralMechanicsApplication\nimport KratosMultiphysics.ContactStructuralMechanicsApplication\n\nKratosMultiphysics.CheckForPreviousImport()\n\ndef CalculateLastIdCondition(model_part):\n cond_id = 0\n for condition in model_part.Conditions:\n cond_id += 1\n\n return cond_id\n\ndef Factory(settings, Model):\n if(type(settings) != KratosMultiphysics.Parameters):\n raise Exception(\"Expected input shall be a Parameters object, encapsulating a json string\")\n return ContactProcess(Model, settings[\"Parameters\"])\n\nclass ContactProcess(KratosMultiphysics.Process):\n \n def __init__(self,model_part,params):\n \n ## Settings string in json format\n default_parameters = KratosMultiphysics.Parameters(\"\"\"\n {\n \"model_part_name\" : \"\",\n \"origin_model_part_name\" : \"\",\n \"destination_model_part_name\" : \"\",\n \"contact_type\" : \"MortarMethod\",\n \"search_factor\" : 1.5,\n \"active_check_factor\" : 0.01,\n \"max_number_results\" : 1000,\n \"augmentation_normal\" : 0.0e0,\n \"augmentation_tangent\" : 0.0e0,\n \"epsilon_DLM\" : 1.0e3,\n \"double_LM\" : false, \n \"simplify_geometry\" : false,\n \"type_search\" : \"InRadius\",\n \"integration_order\" : 5\n }\n \"\"\")\n\n ## Overwrite the default settings with user-provided parameters\n self.params = params\n self.params.ValidateAndAssignDefaults(default_parameters)\n \n self.main_model_part = model_part[self.params[\"model_part_name\"].GetString()]\n\n self.o_model_part = model_part[self.params[\"origin_model_part_name\"].GetString()]\n self.d_model_part = model_part[self.params[\"destination_model_part_name\"].GetString()]\n \n self.o_interface = self.o_model_part.GetSubModelPart(\"Interface\")\n self.d_interface = self.d_model_part.GetSubModelPart(\"Interface\")\n \n self.search_factor = self.params[\"search_factor\"].GetDouble() \n self.active_check_factor = self.params[\"active_check_factor\"].GetDouble() \n self.max_number_results = self.params[\"max_number_results\"].GetInt() \n self.augmentation_normal = self.params[\"augmentation_normal\"].GetDouble()\n self.augmentation_tangent = self.params[\"augmentation_tangent\"].GetDouble()\n self.epsilon_DLM = self.params[\"epsilon_DLM\"].GetDouble()\n self.consider_double_lm = self.params[\"double_LM\"].GetBool()\n self.simplify_geometry = self.params[\"simplify_geometry\"].GetBool()\n self.integration_order = self.params[\"integration_order\"].GetInt() \n if self.params[\"type_search\"].GetString() == \"InRadius\":\n self.type_search = 0\n \n def ExecuteInitialize(self):\n \n # Appending the conditions created to the computing_model_part\n computing_model_part = self.main_model_part.GetSubModelPart(\"computing_domain\")\n computing_model_part.CreateSubModelPart(\"Contact\")\n interface_computing_model_part = computing_model_part.GetSubModelPart(\"Contact\")\n if (self.consider_double_lm == True):\n interface_computing_model_part.Set(KratosMultiphysics.MODIFIED, True)\n \n for node in self.o_interface.Nodes:\n interface_computing_model_part.AddNode(node, 0) \n node.Set(KratosMultiphysics.INTERFACE,True)\n del(node)\n \n for node in self.d_interface.Nodes:\n interface_computing_model_part.AddNode(node, 0)\n node.Set(KratosMultiphysics.INTERFACE,True)\n del(node)\n \n self.Preprocess = KratosMultiphysics.ContactStructuralMechanicsApplication.InterfacePreprocessCondition()\n \n if self.params[\"contact_type\"].GetString() == \"MortarMethod\":\n condition_name = \"MortarContact\"\n elif self.params[\"contact_type\"].GetString() == \"NTN\":\n condition_name = \"NTNContact\"\n elif self.params[\"contact_type\"].GetString() == \"NTS\":\n condition_name = \"NTSContact\"\n \n #print(\"MODEL PART BEFORE CREATING INTERFACE\")\n #print(self.main_model_part) \n \n if (self.consider_double_lm == True):\n final_string = \"DLM\"\n else:\n final_string = \"\"\n \n # It should create the conditions automatically\n initial_id = CalculateLastIdCondition(self.main_model_part)\n self.Preprocess.GenerateInterfacePart(self.o_model_part, self.o_interface, condition_name, initial_id, final_string, self.simplify_geometry) \n initial_id = CalculateLastIdCondition(self.main_model_part)\n self.Preprocess.GenerateInterfacePart(self.d_model_part, self.d_interface, condition_name, initial_id, final_string, self.simplify_geometry) \n\n #print(\"MODEL PART AFTER CREATING INTERFACE\")\n #print(self.main_model_part)\n \n for cond in self.o_interface.Conditions:\n interface_computing_model_part.AddCondition(cond) \n del(cond)\n \n for cond in self.d_interface.Conditions:\n interface_computing_model_part.AddCondition(cond) \n del(cond)\n\n self.contact_search = KratosMultiphysics.ContactStructuralMechanicsApplication.TreeContactSearch(self.o_interface, self.d_interface, self.max_number_results)\n \n if self.params[\"contact_type\"].GetString() == \"MortarMethod\":\n ZeroVector = KratosMultiphysics.Vector(3) \n ZeroVector[0] = 0.0\n ZeroVector[1] = 0.0\n ZeroVector[2] = 0.0\n \n # Initilialize weighted variables and LM\n for node in self.d_interface.Nodes:\n node.SetValue(KratosMultiphysics.ContactStructuralMechanicsApplication.WEIGHTED_GAP, 0.0)\n node.SetValue(KratosMultiphysics.ContactStructuralMechanicsApplication.WEIGHTED_SLIP, 0.0)\n node.SetValue(KratosMultiphysics.ContactStructuralMechanicsApplication.WEIGHTED_FRICTION, 0.0)\n node.SetValue(KratosMultiphysics.ContactStructuralMechanicsApplication.AUXILIAR_ACTIVE, False)\n node.SetValue(KratosMultiphysics.ContactStructuralMechanicsApplication.AUXILIAR_SLIP, False)\n node.SetValue(KratosMultiphysics.NODAL_AREA, 0.0)\n node.SetValue(KratosMultiphysics.NORMAL, ZeroVector)\n node.SetValue(KratosMultiphysics.TANGENT_XI, ZeroVector)\n node.SetValue(KratosMultiphysics.TANGENT_ETA, ZeroVector)\n #node.Set(KratosMultiphysics.SLAVE, True)\n del node\n \n # Setting the master conditions \n for cond in self.o_interface.Nodes:\n cond.SetValue(KratosMultiphysics.NORMAL, ZeroVector) \n cond.SetValue(KratosMultiphysics.TANGENT_XI, ZeroVector) \n cond.SetValue(KratosMultiphysics.TANGENT_ETA, ZeroVector) \n #cond.Set(KratosMultiphysics.MASTER, True) # TODO: This is not supposed o be necessary\n del cond\n \n # Setting the slave conditions \n for cond in self.d_interface.Nodes:\n cond.SetValue(KratosMultiphysics.NORMAL, ZeroVector) \n cond.SetValue(KratosMultiphysics.TANGENT_XI, ZeroVector) \n cond.SetValue(KratosMultiphysics.TANGENT_ETA, ZeroVector) \n del cond\n \n self.contact_search.CreatePointListMortar()\n if (self.consider_double_lm == True):\n self.contact_search.InitializeMortarConditionsDLM(self.active_check_factor, self.epsilon_DLM, self.integration_order)\n else:\n self.contact_search.InitializeMortarConditions(self.active_check_factor, self.augmentation_normal, self.augmentation_tangent, self.integration_order)\n elif self.params[\"contact_type\"].GetString() == \"NTN\":\n self.contact_search.CreatePointListNTN()\n self.contact_search.InitializeNTNConditions()\n elif self.params[\"contact_type\"].GetString() == \"NTS\":\n self.contact_search.CreatePointListNTS()\n self.contact_search.InitializeNTSConditions()\n \n def ExecuteBeforeSolutionLoop(self):\n self.contact_search.TotalClearMortarConditions();\n \n def ExecuteInitializeSolutionStep(self):\n #for cond in self.d_interface.Conditions:\n #print(cond.Is(KratosMultiphysics.ACTIVE))\n \n if self.params[\"contact_type\"].GetString() == \"MortarMethod\": \n self.contact_search.UpdateMortarConditions(self.search_factor, self.type_search)\n #self.contact_search.CheckMortarConditions()\n elif self.params[\"contact_type\"].GetString() == \"NTN\":\n self.contact_search.CreateNTNConditions(self.search_factor, self.type_search)\n elif self.params[\"contact_type\"].GetString() == \"NTS\":\n self.contact_search.CreateNTSConditions(self.search_factor, self.type_search)\n \n #for cond in self.d_interface.Conditions:\n #print(cond.Is(KratosMultiphysics.ACTIVE))\n \n def ExecuteFinalizeSolutionStep(self):\n pass\n\n def ExecuteBeforeOutputStep(self):\n pass\n\n def ExecuteAfterOutputStep(self):\n if self.params[\"contact_type\"].GetString() == \"MortarMethod\":\n self.contact_search.UpdatePointListMortar()\n self.contact_search.PartialClearMortarConditions()\n \n #for cond in self.d_interface.Conditions:\n #print(cond.Is(KratosMultiphysics.ACTIVE))\n \n def ExecuteFinalize(self):\n pass\n","sub_path":"kratos/applications/ContactStructuralMechanicsApplication/python_scripts/contact_process.py","file_name":"contact_process.py","file_ext":"py","file_size_in_byte":10087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264271370","text":"from model import *\n\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nengine = create_engine('sqlite:///database.db')\nBase.metadata.create_all(engine)\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\ndef add_product(id,name,price,Picturelink,Description):\n\tproduct_object=Product(\n\t\tid=id,\t\n\t\tname=name , \n\t\tprice=price , \n\t\tPicturelink=Picturelink , \n\t\tDescription=Description)\n\tsession.add(product_object)\n\tsession.commit()\n\n#add_product(3,\"Bag\",99.9,\"bag.jpeg\",\"A white and black bag\")\t\n\"\"\"\ndef edit_product(name , Description ):\n\tproduct_object=session.query(Product).filter_by(name=name).first()\n\tproduct_object.name = Description\n\tsession.commit()\nedit_product(\"something\" , \"be creative and describe it however you want\")\t\n\"\"\"\n\ndef delete_product(product_name):\n\tsession.query(Product).filter_by(name=product_name).delete()\n\tsession.commit()\n#delete_product(\"something\")\n\ndef query_all():\n\tProduct = session.query(Product).all()\n\treturn Product\n#print(query_all())\n\ndef query_by_id(id):\n\tProduct = session.query(Product).filter_by(id=id).first()\n\treturn Product\n#print (query_by_id(\"something\"))\n\n\ndef Add_To_Cart(ProductID):\n\tProduct_object = Cart(ProductID=ProductID\n\t )\n\tsession.add(product_object)\n\tsession.commit()\n\n\n\t\t\t\n","sub_path":"templates/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28370","text":"# @author : Himanshu Choudhary\n# @home : http://www.himanshuchoudhary.com\n# @git : https://bitbucket.org/himanshuchoudhary/\n\nfrom __future__ import print_function\nimport numpy as np\nimport pandas as pd\nfrom Utils import getIMDBTrainDataset, getIMDBTestDataset\n\nfrom nltk import word_tokenize\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom word2vec import W2V\n\nfrom sklearn.svm import LinearSVC\n\n# from keras.models import Sequential\n# from keras.layers import TimeDistributed, Dense, LSTM\n\nnp.random.seed(1)\n\nFILE_VOCAB = \"dataset/aclImdb/imdb.vocab\"\nFILE_W2V_MODEL = \"dataset/glove.6B.50d.txt\"\n# FILE_W2V_MODEL = \"output/imdb_trained.word2vec.model\"\nFILE_OUT = open(\"output/p3_e.txt\", 'a')\n\nDATASET_SIZE = 3000\nVECTOR_SIZE = 100\nMAX_REVIEW_LEN = 100\n\ndef tokenize(text):\n return word_tokenize(text.decode('utf-8'))\n\ndef pad(tokens):\n return np.array(tokens[:MAX_REVIEW_LEN] if len(tokens) > MAX_REVIEW_LEN else\n np.concatenate((tokens, ['' for _ in range(MAX_REVIEW_LEN-len(tokens))])))\n\ndef weight(token):\n if token in tfidf_weights:\n return tfidf_weights[token]\n return MAX_IDF\n\ndef weightVectorize(data):\n return np.array([np.array([weight(token)*vectorizer.transform(token) for token in tokens]) for tokens in data])\n\ndataset_train = getIMDBTrainDataset(.8*DATASET_SIZE)\ndataset_test = getIMDBTestDataset(.2*DATASET_SIZE)\n\ntfidf_vectorizer = TfidfVectorizer(smooth_idf=True, min_df=1, max_df=.8)\ntfidf_vectorizer.fit(dataset_train.review)\n\nidf = tfidf_vectorizer.idf_\nMAX_IDF = max(idf)\nfeatures = tfidf_vectorizer.get_feature_names()\ntfidf_weights = dict(zip(tfidf_vectorizer.get_feature_names(), idf))\n\nreviews_train = dataset_train.review.map(tokenize)\nreviews_test = dataset_test.review.map(tokenize)\n\n# vectorizer = Word2Vec(pd.concat([reviews_train,reviews_test]),size=VECTOR_SIZE,window=2,min_count=1,workers=1)\n# vectorizer.train(reviews_train)\n\nvectorizer = W2V(vector_size=VECTOR_SIZE,file_vocab=FILE_VOCAB,file_model=FILE_W2V_MODEL)\n\nreviews_train = reviews_train.map(pad)\nreviews_test = reviews_test.map(pad)\n\nreviews_train = weightVectorize(reviews_train.values)\nreviews_test = weightVectorize(reviews_test.values)\n\nclassifier = LinearSVC(verbose=1)\nclassifier.fit(reviews_train.reshape(len(reviews_train),MAX_REVIEW_LEN*VECTOR_SIZE),dataset_train.polarity)\nscore = classifier.score(reviews_test.reshape((len(reviews_test),MAX_REVIEW_LEN*VECTOR_SIZE)),dataset_test.polarity)\n\n# model = Sequential()\n# model.add(LSTM(64,input_dim=VECTOR_SIZE,input_length=MAX_REVIEW_LEN,dropout_U=.2,dropout_W=.2))\n# model.add(Dense(1,activation='sigmoid'))\n# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n# model.fit(reviews_train,dataset_train.polarity,nb_epoch=20,batch_size=32)\n# score = model.evaluate(reviews_test,dataset_test.polarity)[1]\n\nprint(\"Total sentences in the dataset : {:d}\".format(DATASET_SIZE), file=FILE_OUT)\nprint(\"# of sentences used as training set : {:d}\".format(len(dataset_train)), file=FILE_OUT)\nprint(\"# of sentences used as testing set : {:d}\".format(len(dataset_test)), file=FILE_OUT)\nprint(\"Vector Size : {:d} Review Length : {:d}\".format(VECTOR_SIZE,MAX_REVIEW_LEN), file=FILE_OUT)\nprint(\"Accuracy : {:.2f}%\".format(score*100), file=FILE_OUT)\nprint(\"\\n\", file=FILE_OUT)\n","sub_path":"p3_e.py","file_name":"p3_e.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157939440","text":"import time\nfrom tqdm import tqdm\n\nimport torch\n\n\ndef train(model, criterion, optimizer, dloader, args, verbose=False):\n t_start = time.time()\n epoch_loss = 0\n for epoch in range(args.epochs):\n batch_loss = 0\n for batch in tqdm(dloader):\n context_spec = batch[0][0]\n bc_spec = batch[0][1]\n reset = batch[1]\n bc_target = batch[2]\n if verbose:\n print(type(batch))\n print(len(batch))\n print('context_spec:', context_spec.shape)\n print('bc_spec: ', bc_spec.shape)\n print('reset: ', reset)\n print( 'bc_target: ', bc_target.shape)\n input()\n # Move to cuda device (if use_cuda)\n context_spec = torch.tensor(context_spec).to(device)\n bc_spec = torch.tensor(bc_spec).to(device)\n bc_target = torch.tensor(bc_target, requires_grad=False).to(device)\n output, hidden = model(context_spec, bc_spec)\n loss = criterion(output, bc_target)\n tmp_file_loss += loss.item()\n # Update\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n epoch_loss += tmp_file_loss\n file_loss[dloader.name].append(tmp_file_loss)\n print('{} Epoch: {}/{}, Loss: {}'.format(dloader.name,\n epoch,\n args.epochs,\n tmp_file_loss))\n\n print('Epoch: {}/{}, Total Loss: {}, Avg Loss: {}'.format(epoch,\n args.epochs,\n epoch_loss,\n epoch_loss/len(dset)))\n save_model(model, name='checkpoint/model_epoch_{}.pt'.format(epoch))\n t_end = time.time() - t_start\n print('{} epochs took {} seconds'.format(args.epochs, t_end))\n","sub_path":"dialogAgent/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416590382","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Created By : Vítor Pereira\n# Created Date: 01-09-2021\n# version ='0.0.1'\n# ---------------------------------------------------------------------------\n\"\"\"Logistic regression module\"\"\"\n# ---------------------------------------------------------------------------\nfrom .model import Model\nfrom ..util import sigmoid, add_intersect\nimport numpy as np\n\n\nclass LogisticRegression(Model):\n\n def __init__(self,\n epochs:int=10000,\n lr:float=0.1,\n threshold:float=0.5,\n lbd:float=1\n ):\n \"\"\" Logistic regression model.\n\n :param bool gd: If True uses gradient descent (GD) to train the model\n otherwise uses closed form linear algebra. Default False.\n :param int epochs: Number of epochs for GD.\n :param float lr: Learning rate for GD. Default 0.1\n :param threshold: The decision threshold, a value in (0,1). Default 0.5\n :param float ldb: lambda for the regularization. Default 1.\n \"\"\"\n super(LogisticRegression, self).__init__()\n self.theta = None\n self.epochs = epochs\n self.lr = lr\n self.threshold=threshold\n self.lbd = lbd\n\n def fit(self, dataset):\n X, y = dataset.getXy()\n X = add_intersect(X)\n \n self.X = X\n self.y = y\n \n self.train(X, y)\n self.is_fitted = True\n\n def train(self, X, y):\n n = X.shape[1]\n m = X.shape[0]\n self.history = {}\n self.theta = np.zeros(n)\n\n for epoch in range(self.epochs):\n z = np.dot(X, self.theta)\n h = sigmoid(z)\n gradient = np.dot(X.T, (h - y)) / y.size\n if self.lbd>0:\n gradient[1:] = gradient[1:] + (self.lbd / m) * self.theta[1:]\n self.theta -= self.lr * gradient\n self.history[epoch] = [self.theta.copy(), self.cost()]\n\n def probability(self, x):\n assert self.is_fitted, 'Model must be fit before predicting'\n _x = np.hstack(([1], x))\n return sigmoid(np.dot(self.theta, _x))\n\n def predict(self, x):\n p = self.probability(x)\n res = 1 if p >= self.threshold else 0\n return res\n\n def cost(self, X=None, y=None, theta=None):\n X = add_intersect(X) if X is not None else self.X\n y = y if y is not None else self.y\n theta = theta if theta is not None else self.theta\n m = X.shape[0]\n\n h = sigmoid(np.dot(X, theta))\n cost = (-y * np.log(h) - (1-y) * np.log(1-h))\n if self.lbd>0:\n reg = np.dot(theta[1:], theta[1:]) * self.lbd / (2*m)\n res = (np.sum(cost) / m) + reg\n else: \n res = np.sum(cost) / m\n return res","sub_path":"src/si/supervised/logreg.py","file_name":"logreg.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624435960","text":"import sys\nfrom abjad import *\n\nimport common, sideman\n\n# This is a 5/4 pattern!\n\ndef get_pattern_n19(jazz_scale):\n pitches = jazz_scale.get_named_pitches([1, 3, 5, 7, 7])\n durations = [sideman.eighth] * 4 + [sideman.quarter] * 1\n notes = scoretools.make_notes(pitches, durations[0:5])\n rest = Rest(sideman.quarter)\n measure = Measure((4, 4))\n for note in notes: \n measure.append(note)\n measure.append(rest)\n tie = spannertools.Tie()\n attach(tie, measure[3:5])\n return measure\n\ndef get_pattern_n19_chord_measure(jazz_scale):\n pitches = jazz_scale.get_chord_as_named([1 ,3, 5, 7])\n measure = Measure((4, 4))\n chord = Chord(pitches, (4, 4))\n measure.append(chord)\n multiplier = Multiplier(measure.time_signature.duration)\n attach(multiplier, chord)\n return measure\n\ndef get_score():\n treble_pattern = Staff()\n chords = Staff(context_name='ChordNames')\n\n for key in sideman.keys_in_order():\n jazz_scale = sideman.JazzScale(key)\n treble_pattern.append( get_pattern_n19(jazz_scale) )\n chords.append( get_pattern_n19_chord_measure(jazz_scale) )\n\n score = Score([chords, treble_pattern])\n tempo = Tempo(Duration(1, 4), (80, 132))\n attach(tempo, treble_pattern)\n return score\n\ndef title():\n return \"Jazz Pattern 19\"\n\ndef composer():\n return \"Jerry Greene et al, Thiruvathukal\"\n\ndef pdf():\n return \"jazz019.pdf\"\n\ndef midi():\n return \"jazz019.midi\"\n\n\nif __name__ == '__main__':\n score = get_score()\n common.main( score, title(), composer(), pdf())\n common.main( score, title(), composer(), midi())\n","sub_path":"jazz019.py","file_name":"jazz019.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138770535","text":"#%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nfrom time import time\nfrom IPython.display import display\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport visuals as vs\n\n# Load the Red Wines dataset\ndata = pd.read_csv(\"./dataNewSorted.csv\", sep=',')\n\n# Patient knowledge\ntotalPatient = data.shape[0]\nprint('Total number of patients:', totalPatient)\n\ntriage_at_1 = data.loc[(data['triage'] == 1)]\ntriage_1_patients = triage_at_1.shape[0]\n#print(triage_1_patients)\n\ntriage_at_2 = data.loc[(data['triage'] == 2)]\ntriage_2_patients = triage_at_2.shape[0]\n#print(triage_2_patients)\n\ntriage_at_3 = data.loc[(data['triage'] == 3)]\ntriage_3_patients = triage_at_3.shape[0]\n#print(triage_3_patients)\n\ntriage_at_4 = data.loc[(data['triage'] == 4)]\ntriage_4_patients = triage_at_4.shape[0]\n#print(triage_4_patients)\n\ntriage_at_5 = data.loc[(data['triage'] == 5)]\ntriage_5_patients = triage_at_5.shape[0]\n#print(triage_5_patients)\n\n# Patient percentages\npercent_triage_1 = triage_1_patients*100/totalPatient\nprint('Percentage triage 1:', percent_triage_1)\n\npercent_triage_2 = triage_2_patients*100/totalPatient\nprint('Percentage triage 2:', percent_triage_2)\n\npercent_triage_3 = triage_3_patients*100/totalPatient\nprint('Percentage triage 3:', percent_triage_3)\n\npercent_triage_4 = triage_4_patients*100/totalPatient\nprint('Percentage triage 4:', percent_triage_4)\n\npercent_triage_5 = triage_5_patients*100/totalPatient\nprint('Percentage triage 5:', percent_triage_5)\n\n\n# Day knowledge\nday_monday = data.loc[(data['weekDay'] == 0)]\ntotal_monday = day_monday.shape[0]\n\nday_tuesday = data.loc[(data['weekDay'] == 1)]\ntotal_tuesday = day_tuesday.shape[0]\n\nday_wednesday = data.loc[(data['weekDay'] == 2)]\ntotal_wednesday = day_wednesday.shape[0]\n\nday_thursday = data.loc[(data['weekDay'] == 3)]\ntotal_thursday = day_thursday.shape[0]\n\nday_friday = data.loc[(data['weekDay'] == 4)]\ntotal_friday = day_friday.shape[0]\n\nday_saturday = data.loc[(data['weekDay'] == 5)]\ntotal_saturday = day_saturday.shape[0]\n\nday_sunday = data.loc[(data['weekDay'] == 6)]\ntotal_sunday = day_sunday.shape[0]\n\nprint(48.952879581151834 + 6.399834665197024 + 34.451639570129515 + 5.731606503168917 + 4.464039680352714)\n\nprint()\n\n# Day percentages\nprint('Percentage monday:', total_monday*100/totalPatient)\n\nprint('Percentage tuesday:', total_tuesday*100/totalPatient)\n\nprint('Percentage wednesday:', total_wednesday*100/totalPatient)\n\nprint('Percentage thursday:', total_thursday*100/totalPatient)\n\nprint('Percentage friday:', total_friday*100/totalPatient)\n\nprint('Percentage saturday:', total_saturday*100/totalPatient)\n\nprint('Percentage sunday:', total_sunday*100/totalPatient)\n\nprint( 14.277349131992285 + 14.363461008542298 + 13.771011297878204 + 14.108569853954258 + 13.492008817856158 + 15.011022320198402 + 14.976577569578396)\n\n# Visualize skewed continuous features of original data\n#vs.distribution(data, \"weekDay\")\n\n# Day knowledge\nt_duration_1 = data.loc[(data['treatmentDuration'] == 0)]\ntotal_monday = day_monday.shape[0]\n\nprint(data['treatmentDuration'].max())\n\n\nfixedAcidity_citricAcid = data[['triage', 'treatmentDuration']]\ng = sns.JointGrid(x=\"triage\", y=\"treatmentDuration\", data=data, size=6)\ng = g.plot_joint(sns.regplot, scatter_kws={\"s\": 10})\ng = g.plot_marginals(sns.distplot)","sub_path":"dataKnowlege.py","file_name":"dataKnowlege.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125739099","text":"import socket\nimport struct\nimport json\n\nfrom conf.settings import HOST, PORT\n\n\nclass Client:\n def __init__(self):\n self.client_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client_server.connect((HOST, PORT))\n\n def login(self):\n while True:\n username = input('用户名:')\n if not username: break\n self.client_server.send(username.encode('utf-8'))\n password = input('密码:')\n if not username: break\n self.client_server.send(password.encode('utf-8'))\n login_status = self.client_server.recv(1024)\n\n return login_status\n\n def run(self):\n login_status = self.login()\n print(login_status)\n if login_status.decode('utf-8') == 'True':\n print('登录成功...')\n while True:\n cmd = input('请输入命令:')\n if not cmd: continue\n self.client_server.send(cmd.encode('utf-8'))\n if cmd.startswith('get'):\n self.get(self.client_server)\n else:\n header_length = self.client_server.recv(4)\n header_json_length = struct.unpack('i', header_length)[0]\n\n header_info = json.loads(self.client_server.recv(header_json_length).decode('utf-8'))\n data_size = header_info['data_size']\n\n recv_size = 0\n recv_data = b''\n while recv_size < data_size:\n recv_data += self.client_server.recv(1024)\n recv_size += len(recv_data)\n\n print(recv_data.decode('gbk'))\n else:\n print('登录失败...')\n\n def get(self, client_server):\n download_dir = '/Users/eric/PycharmProjects/Learning-Python/模块三作业/FTP/ftp_client/download'\n header_length = client_server.recv(4)\n header_json_length = struct.unpack('i', header_length)[0]\n\n header_info = json.loads(client_server.recv(header_json_length).decode('utf-8'))\n data_size = header_info['file_size']\n filename = header_info['filename']\n\n file_path = '%s/%s' % (download_dir, filename)\n\n with open(file_path, 'wb') as f:\n recv_size = 0\n while recv_size < data_size:\n recv_data = client_server.recv(1024)\n f.write(recv_data)\n recv_size += len(recv_data)\n","sub_path":"模块三作业/FTP/ftp_client/bin/client_server.py","file_name":"client_server.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"517965192","text":"from flask import Flask, request, jsonify, render_template\nfrom time import time\nimport requests\nimport pandas as pd\nimport numpy as np\nimport ast\n\nfrom model import ModelAPI\n\nenv = 'http://52.47.62.31/'\napp = Flask(__name__)\nUSER_ID = '9G08LOYFU88BJ8GHNRU3'\n\n\nmodel = ModelAPI()\n\n\n@app.route(\"/\")\ndef hello():\n return render_template('home.html')\n\n\n@app.route('/train', methods=['GET','POST'])\ndef train():\n global model, env, USER_ID\n data = request.get_json()\n if data == None:\n data = requests.get(url=env+'reset', params= {'user_id':USER_ID}).json()\n nb_users = int(data['nb_users'])\n nb_items = int(data['nb_items'])\n item_history = data['item_history']\n user_history = data['user_history']\n rating_history = data['rating_history']\n start = time()\n model.train(nb_users, nb_items, user_history, item_history, rating_history)\n end = time()\n return 'Training finished in {:.3f} seconds!'.format(end-start)\n\n\n@app.route('/train_ui', methods=['GET','POST'])\ndef train_ui():\n global model, env, USER_ID\n data = request.get_json()\n if data == None:\n data = requests.get(url=env+'reset', params= {'user_id':USER_ID}).json()\n nb_users = int(data['nb_users'])\n nb_items = int(data['nb_items'])\n item_history = data['item_history']\n user_history = data['user_history']\n rating_history = data['rating_history']\n start = time()\n model.train(nb_users, nb_items, user_history, item_history, rating_history)\n end = time()\n res = '{:.3f}'.format(float(end-start))\n return render_template('train.html', time=res)\n\n\n@app.route(\"/predict\", methods=['GET'])\ndef predict():\n global model\n user_id = int(request.args.get('user_id'))\n item_id = int(request.args.get('item_id'))\n predicted_score = float(model.predict(user_id, item_id))\n d = {'rating': predicted_score}\n return jsonify(d)\n\n\n@app.route(\"/predict_ui\", methods=['GET'])\ndef predict_ui():\n global model\n user_id = int(request.args.get('user_id'))\n item_id = int(request.args.get('item_id'))\n predicted_score = float(model.predict(user_id, item_id))\n if predicted_score == -1:\n return render_template('result_error.html')\n return render_template('result.html', score=float(predicted_score), user_id=user_id, item_id=item_id)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"api/flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297075722","text":"import logging\n\n\nfrom slipnet import slipnet\nfrom workspace import workspace\nfrom workspaceStructure import WorkspaceStructure\nfrom formulas import weightedAverage\n\n\nclass Rule(WorkspaceStructure):\n def __init__(self, facet, descriptor, category, relation):\n WorkspaceStructure.__init__(self)\n self.facet = facet\n self.descriptor = descriptor\n self.category = category\n self.relation = relation\n\n def __str__(self):\n if not self.facet:\n return 'Empty rule'\n return 'replace %s of %s %s by %s' % (\n self.facet.name, self.descriptor.name,\n self.category.name, self.relation.name)\n\n def updateExternalStrength(self):\n self.externalStrength = self.internalStrength\n\n def updateInternalStrength(self):\n if not (self.descriptor and self.relation):\n self.internalStrength = 0.0\n return\n averageDepth = (self.descriptor.conceptualDepth +\n self.relation.conceptualDepth) / 2.0\n averageDepth **= 1.1\n # see if the object corresponds to an object\n # if so, see if the descriptor is present (modulo slippages) in the\n # corresponding object\n changedObjects = [o for o in workspace.initial.objects if o.changed]\n changed = changedObjects[0]\n sharedDescriptorTerm = 0.0\n if changed and changed.correspondence:\n targetObject = changed.correspondence.objectFromTarget\n slippages = workspace.slippages()\n slipnode = self.descriptor.applySlippages(slippages)\n if not targetObject.described(slipnode):\n self.internalStrength = 0.0\n return\n sharedDescriptorTerm = 100.0\n conceptual_height = (100.0 - self.descriptor.conceptualDepth) / 10.0\n sharedDescriptorWeight = conceptual_height ** 1.4\n depthDifference = 100.0 - abs(self.descriptor.conceptualDepth -\n self.relation.conceptualDepth)\n weights = ((depthDifference, 12),\n (averageDepth, 18),\n (sharedDescriptorTerm, sharedDescriptorWeight))\n self.internalStrength = weightedAverage(weights)\n if self.internalStrength > 100.0:\n self.internalStrength = 100.0\n\n def ruleEqual(self, other):\n if not other:\n return False\n if self.relation != other.relation:\n return False\n if self.facet != other.facet:\n return False\n if self.category != other.category:\n return False\n if self.descriptor != other.descriptor:\n return False\n return True\n\n def activateRuleDescriptions(self):\n if self.relation:\n self.relation.buffer = 100.0\n if self.facet:\n self.facet.buffer = 100.0\n if self.category:\n self.category.buffer = 100.0\n if self.descriptor:\n self.descriptor.buffer = 100.0\n\n def incompatibleRuleCorrespondence(self, correspondence):\n if not correspondence:\n return False\n # find changed object\n changeds = [o for o in workspace.initial.objects if o.changed]\n if not changeds:\n return False\n changed = changeds[0]\n if correspondence.objectFromInitial != changed:\n return False\n # it is incompatible if the rule descriptor is not in the mapping list\n return bool([m for m in correspondence.conceptMappings\n if m.initialDescriptor == self.descriptor])\n\n def __changeString(self, string):\n # applies the changes to self string ie. successor\n if self.facet == slipnet.length:\n if self.relation == slipnet.predecessor:\n return string[0:-1]\n if self.relation == slipnet.successor:\n return string + string[0:1]\n return string\n # apply character changes\n if self.relation == slipnet.predecessor:\n if 'a' in string:\n return None\n return ''.join([chr(ord(c) - 1) for c in string])\n elif self.relation == slipnet.successor:\n if 'z' in string:\n return None\n return ''.join([chr(ord(c) + 1) for c in string])\n else:\n return self.relation.name.lower()\n\n def buildTranslatedRule(self):\n slippages = workspace.slippages()\n self.category = self.category.applySlippages(slippages)\n self.facet = self.facet.applySlippages(slippages)\n self.descriptor = self.descriptor.applySlippages(slippages)\n self.relation = self.relation.applySlippages(slippages)\n # generate the final string\n self.finalAnswer = workspace.targetString\n changeds = [o for o in workspace.target.objects if\n o.described(self.descriptor) and\n o.described(self.category)]\n changed = changeds and changeds[0] or None\n logging.debug('changed object = %s', changed)\n if changed:\n left = changed.leftIndex\n startString = ''\n if left > 1:\n startString = self.finalAnswer[0: left - 1]\n right = changed.rightIndex\n middleString = self.__changeString(\n self.finalAnswer[left - 1: right])\n if not middleString:\n return False\n endString = ''\n if right < len(self.finalAnswer):\n endString = self.finalAnswer[right:]\n self.finalAnswer = startString + middleString + endString\n return True\n","sub_path":"copycat/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384085208","text":"from data import phrases\nimport destiny\n\ndef run(full_command_text):\n if not full_command_text:\n return default('')\n command, _, params = full_command_text.partition(' ')\n function = COMMANDS.get(command)\n if function:\n return function(params)\n else:\n return default(params)\n\ndef default(params):\n return speak(params)\n\ndef speak(params):\n message = phrases.get_random_phrase()\n return message\n\ndef item_search(params):\n query = params\n result = destiny.search_item(query)\n if result:\n return '%s\\n%s %s\\n%s' % result\n else:\n return 'No results found for \"%s\"' % query\n\nCOMMANDS = {\n 'speak': speak,\n 'item': item_search,\n}\n","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522929085","text":"\"\"\" This module contains the class definition of this package's steady horseshoe\nvortex lattice solver.\n\nThis module contains the following classes:\n SteadyHorseshoeVortexLatticeMethodSolver: This is an aerodynamics solver that\n uses a steady horseshoe vortex lattice method.\n\nThis module contains the following exceptions:\n None\n\nThis module contains the following functions:\n None\n\"\"\"\nimport numpy as np\n\nfrom . import aerodynamics\n\n\nclass SteadyHorseshoeVortexLatticeMethodSolver:\n \"\"\"This is an aerodynamics solver that uses a steady horseshoe vortex lattice\n method.\n\n Citation:\n Adapted from: aerodynamics.vlm3.py in AeroSandbox\n Author: Peter Sharpe\n Date of Retrieval: 04/28/2020\n\n This class contains the following public methods:\n run: Run the solver on the steady problem.\n\n initialize_panel_vortices: This method calculates the locations of the vortex\n vertices, and then initializes the panels' vortices.\n\n collapse_geometry: This method converts attributes of the problem's geometry\n into 1D ndarrays. This facilitates vectorization, which speeds up the solver.\n\n calculate_wing_wing_influences: This method finds the matrix of wing-wing\n influence coefficients associated with this airplane's geometry.\n\n calculate_freestream_wing_influences: Find the normal velocity speed at every\n collocation points without the influence of the vortices.\n\n calculate_vortex_strengths: Solve for each panels' vortex strengths.\n\n calculate_near_field_forces_and_moments: Find the the forces and moments\n calculated from the near field.\n\n calculate_streamlines: Calculates the location of the streamlines coming off\n the back of the wings.\n\n This class contains the following class attributes:\n None\n\n Subclassing:\n This class is not meant to be subclassed.\n \"\"\"\n\n def __init__(self, steady_problem):\n \"\"\"This is the initialization method.\n\n :param steady_problem: SteadyProblem\n This is the steady problem to be solved.\n :return: None\n \"\"\"\n\n # Initialize this solution's attributes.\n self.airplane = steady_problem.airplane\n self.operating_point = steady_problem.operating_point\n\n # Initialize attributes to hold aerodynamic data that pertains to this problem.\n self.wing_wing_influences = np.zeros(\n (self.airplane.num_panels, self.airplane.num_panels)\n )\n self.freestream_velocity = (\n self.operating_point.calculate_freestream_velocity_geometry_axes()\n )\n self.freestream_wing_influences = np.zeros(self.airplane.num_panels)\n self.vortex_strengths = np.zeros(self.airplane.num_panels)\n self.panel_normal_directions = np.zeros((self.airplane.num_panels, 3))\n self.panel_areas = np.zeros(self.airplane.num_panels)\n self.panel_collocation_points = np.zeros((self.airplane.num_panels, 3))\n self.panel_vortex_strengths = np.zeros(self.airplane.num_panels)\n self.panel_back_right_vortex_vertices = np.zeros((self.airplane.num_panels, 3))\n self.panel_front_right_vortex_vertices = np.zeros((self.airplane.num_panels, 3))\n self.panel_front_left_vortex_vertices = np.zeros((self.airplane.num_panels, 3))\n self.panel_back_left_vortex_vertices = np.zeros((self.airplane.num_panels, 3))\n self.panels = np.empty(self.airplane.num_panels, dtype=object)\n self.panel_bound_vortex_centers = np.zeros((self.airplane.num_panels, 3))\n self.panel_bound_vortex_vectors = np.zeros((self.airplane.num_panels, 3))\n self.seed_points = np.empty((0, 3))\n self.streamline_points = None\n\n def run(self, verbose=True):\n \"\"\"Run the solver on the steady problem.\n\n :param verbose: Bool, optional\n This parameter determines if the solver prints output to the console.\n It's default value is True.\n :return: None\n \"\"\"\n\n # Initialize this problem's panels to have vortices congruent with this\n # solver type.\n if verbose:\n print(\"Initializing panel vortices.\")\n self.initialize_panel_vortices()\n\n # Collapse this problem's geometry matrices into 1D ndarrays of attributes.\n if verbose:\n print(\"Collapsing geometry.\")\n self.collapse_geometry()\n\n # Find the matrix of aerodynamic influence coefficients associated with this\n # problem's geometry.\n if verbose:\n print(\"\\nCalculating the wing-wing influences.\")\n self.calculate_wing_wing_influences()\n\n # Find the normal freestream speed at every collocation points without vortices.\n if verbose:\n print(\"\\nCalculating the freestream-wing influences.\")\n self.calculate_freestream_wing_influences()\n\n # Solve for each panel's vortex strengths.\n if verbose:\n print(\"\\nCalculating vortex strengths.\")\n self.calculate_vortex_strengths()\n\n # Solve for the near field forces and moments on each panel.\n if verbose:\n print(\"\\nCalculating near field forces.\")\n self.calculate_near_field_forces_and_moments()\n\n # Solve for the location of the streamlines coming off the back of the wings.\n if verbose:\n print(\"\\nCalculating streamlines.\")\n self.calculate_streamlines()\n\n # Print out the total forces.\n if verbose:\n print(\"\\n\\nForces in Wind Axes:\")\n print(\n \"\\tInduced Drag:\\t\\t\\t\",\n np.round(self.airplane.total_near_field_force_wind_axes[0], 3),\n \" N\",\n )\n print(\n \"\\tSide Force:\\t\\t\\t\\t\",\n np.round(self.airplane.total_near_field_force_wind_axes[1], 3),\n \" N\",\n )\n print(\n \"\\tLift:\\t\\t\\t\\t\\t\",\n np.round(self.airplane.total_near_field_force_wind_axes[2], 3),\n \" N\",\n )\n\n # Print out the total moments.\n if verbose:\n print(\"\\nMoments in Wind Axes:\")\n print(\n \"\\tRolling Moment:\\t\\t\\t\",\n np.round(self.airplane.total_near_field_moment_wind_axes[0], 3),\n \" Nm\",\n )\n print(\n \"\\tPitching Moment:\\t\\t\",\n np.round(self.airplane.total_near_field_moment_wind_axes[1], 3),\n \" Nm\",\n )\n print(\n \"\\tYawing Moment:\\t\\t\\t\",\n np.round(self.airplane.total_near_field_moment_wind_axes[2], 3),\n \" Nm\",\n )\n\n # Print out the coefficients.\n if verbose:\n print(\"\\nCoefficients in Wind Axes:\")\n print(\n \"\\tCDi:\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_force_coefficients_wind_axes[0], 3\n ),\n )\n print(\n \"\\tCY:\\t\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_force_coefficients_wind_axes[1], 3\n ),\n )\n print(\n \"\\tCL:\\t\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_force_coefficients_wind_axes[2], 3\n ),\n )\n print(\n \"\\tCl:\\t\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_moment_coefficients_wind_axes[0], 3\n ),\n )\n print(\n \"\\tCm:\\t\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_moment_coefficients_wind_axes[1], 3\n ),\n )\n print(\n \"\\tCn:\\t\\t\\t\\t\\t\\t\",\n np.round(\n self.airplane.total_near_field_moment_coefficients_wind_axes[2], 3\n ),\n )\n\n def initialize_panel_vortices(self):\n \"\"\"This method calculates the locations of the vortex vertices, and then\n initializes the panels' vortices.\n\n Every panel has a horseshoe vortex. The vortex's finite leg runs along the\n panel's quarter chord from right to\n left. It's infinite legs points backwards in the positive x direction.\n\n :return: None\n \"\"\"\n\n # Find the freestream direction in geometry axes.\n freestream_direction = (\n self.operating_point.calculate_freestream_direction_geometry_axes()\n )\n\n # Iterate through the current_airplane's wings.\n for wing in self.airplane.wings:\n\n # Find a suitable length for the \"infinite\" legs of the horseshoe\n # vortices on this wing. At twenty-times the\n # wing's span, these legs are essentially infinite.\n infinite_leg_length = wing.span * 20\n\n # Iterate through the wing's chordwise and spanwise panel positions.\n for chordwise_position in range(wing.num_chordwise_panels):\n for spanwise_position in range(wing.num_spanwise_panels):\n # Pull the panel object out of the wing's list of panels.\n panel = wing.panels[chordwise_position, spanwise_position]\n\n # Find the location of the panel's front and right vortex vertices.\n front_left_vortex_vertex = panel.front_left_vortex_vertex\n front_right_vortex_vertex = panel.front_right_vortex_vertex\n\n # Initialize the horseshoe vortex at this panel.\n panel.horseshoe_vortex = aerodynamics.HorseshoeVortex(\n finite_leg_origin=front_right_vortex_vertex,\n finite_leg_termination=front_left_vortex_vertex,\n strength=None,\n infinite_leg_direction=freestream_direction,\n infinite_leg_length=infinite_leg_length,\n )\n\n def collapse_geometry(self):\n \"\"\"This method converts attributes of the problem's geometry into 1D\n ndarrays. This facilitates vectorization,\n which speeds up the solver.\n\n :return: None\n \"\"\"\n\n # Initialize a variable to hold the global position of the panel as we\n # iterate through them.\n global_panel_position = 0\n\n # Iterate through the airplane's wings.\n for wing in self.airplane.wings:\n\n # Convert this wing's 2D array of panels into a 1D array.\n panels = np.ravel(wing.panels)\n\n # Iterate through the 1D array of this wing's panels.\n for panel in panels:\n\n # Update the solver's list of attributes with this panel's attributes.\n self.panels[global_panel_position] = panel\n self.panel_normal_directions[\n global_panel_position, :\n ] = panel.normal_direction\n self.panel_areas[global_panel_position] = panel.area\n self.panel_collocation_points[\n global_panel_position, :\n ] = panel.collocation_point\n self.panel_back_right_vortex_vertices[\n global_panel_position, :\n ] = panel.horseshoe_vortex.right_leg.origin\n self.panel_front_right_vortex_vertices[\n global_panel_position, :\n ] = panel.horseshoe_vortex.right_leg.termination\n self.panel_front_left_vortex_vertices[\n global_panel_position, :\n ] = panel.horseshoe_vortex.left_leg.origin\n self.panel_back_left_vortex_vertices[\n global_panel_position, :\n ] = panel.horseshoe_vortex.left_leg.termination\n self.panel_bound_vortex_centers[\n global_panel_position, :\n ] = panel.horseshoe_vortex.finite_leg.center\n self.panel_bound_vortex_vectors[\n global_panel_position, :\n ] = panel.horseshoe_vortex.finite_leg.vector\n\n # Check if this panel is on the trailing edge.\n if panel.is_trailing_edge:\n # If it is, calculate it's streamline seed point and add it to\n # the solver's array of seed points.\n self.seed_points = np.vstack(\n (\n self.seed_points,\n panel.back_left_vertex\n + 0.5 * (panel.back_right_vertex - panel.back_left_vertex),\n )\n )\n\n # Increment the global panel position.\n global_panel_position += 1\n\n def calculate_wing_wing_influences(self):\n \"\"\"This method finds the matrix of wing-wing influence coefficients\n associated with this airplane's geometry.\n\n :return: None\n \"\"\"\n\n # Find the matrix of normalized velocities induced at every panel's\n # collocation point by every panel's horseshoe\n # vortex.\n induced_velocities = (\n aerodynamics.calculate_velocity_induced_by_horseshoe_vortices(\n points=self.panel_collocation_points,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=np.ones(self.airplane.num_panels),\n collapse=False,\n )\n )\n\n # Take the batch dot product of the normalized velocities with each panel's\n # normal direction. This is now the\n # problem's matrix of wing-wing influence coefficients.\n self.wing_wing_influences = np.einsum(\n \"...k,...k->...\",\n induced_velocities,\n np.expand_dims(self.panel_normal_directions, axis=1),\n )\n\n def calculate_freestream_wing_influences(self):\n \"\"\"This method finds the vector of freestream-wing influence coefficients\n associated with this problem.\n\n :return: None\n \"\"\"\n\n # Take the batch dot product of the freestream velocity with each panel's\n # normal direction. This is now the\n # problem's 1D array of freestream-wing influence coefficients.\n self.freestream_wing_influences = np.einsum(\n \"ij,j->i\", self.panel_normal_directions, self.freestream_velocity\n )\n\n def calculate_vortex_strengths(self):\n \"\"\"Solve for each panel's vortex strengths.\n\n :return: None\n \"\"\"\n\n # Solve for the strength of each panel's vortex.\n self.vortex_strengths = np.linalg.solve(\n self.wing_wing_influences, -self.freestream_wing_influences\n )\n\n # Iterate through the panels and update their vortex strengths.\n for panel_num in range(self.panels.size):\n # Get the panel at this location.\n panel = self.panels[panel_num]\n\n # Update this panel's horseshoe vortex strength.\n panel.horseshoe_vortex.update_strength(self.vortex_strengths[panel_num])\n\n def calculate_near_field_forces_and_moments(self):\n \"\"\"Find the the forces and moments calculated from the near field.\n\n Note: The forces and moments calculated are in geometry axes. The moment is\n about the airplane's reference\n point, which should be at the center of gravity. The units are Newtons\n and Newton-meters.\n\n :return: None\n \"\"\"\n\n # Calculate the velocities induced at every panel's bound vortex center.\n induced_velocities = (\n aerodynamics.calculate_velocity_induced_by_horseshoe_vortices(\n points=self.panel_bound_vortex_centers,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=self.vortex_strengths,\n collapse=True,\n )\n )\n\n # Add the freestream velocity to the induced velocities to calculate the\n # total velocity at every panel's bound\n # vortex center.\n total_velocities = induced_velocities + self.freestream_velocity\n\n # Calculate the near field force, in geometry axes, on each panel's bound\n # vortex.\n near_field_forces_geometry_axes = (\n self.operating_point.density\n * np.expand_dims(self.vortex_strengths, axis=1)\n * np.cross(total_velocities, self.panel_bound_vortex_vectors, axis=-1)\n )\n\n # Calculate the near field moments, in geometry axes, on each panel's bound\n # vortex.\n near_field_moments_geometry_axes = np.cross(\n self.panel_bound_vortex_centers - self.airplane.xyz_ref,\n near_field_forces_geometry_axes,\n axis=-1,\n )\n\n # Initialize a variable to hold the global panel position.\n global_panel_position = 0\n\n # Iterate through this solver's panels.\n for panel in self.panels:\n # Update the force and moment on this panel.\n panel.near_field_force_geometry_axes = near_field_forces_geometry_axes[\n global_panel_position, :\n ]\n panel.near_field_moment_geometry_axes = near_field_moments_geometry_axes[\n global_panel_position, :\n ]\n\n # Update the pressure on this panel.\n panel.update_pressure()\n\n # Increment the global panel position.\n global_panel_position += 1\n\n # Sum up the near field forces and moments on every panel to find the total\n # force and moment on the geometry.\n total_near_field_force_geometry_axes = np.sum(\n near_field_forces_geometry_axes, axis=0\n )\n total_near_field_moment_geometry_axes = np.sum(\n near_field_moments_geometry_axes, axis=0\n )\n\n # Find the total near field force in wind axes from the rotation matrix and\n # the total near field force in\n # geometry axes.\n self.airplane.total_near_field_force_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_force_geometry_axes\n )\n\n # Find the total near field moment in wind axes from the rotation matrix and\n # the total near field moment in\n # geometry axes.\n self.airplane.total_near_field_moment_wind_axes = (\n np.transpose(\n self.operating_point.calculate_rotation_matrix_wind_axes_to_geometry_axes()\n )\n @ total_near_field_moment_geometry_axes\n )\n\n # Calculate the current_airplane's induced drag coefficient\n induced_drag_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's side force coefficient.\n side_force_coefficient = (\n self.airplane.total_near_field_force_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's lift coefficient.\n lift_coefficient = (\n -self.airplane.total_near_field_force_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n )\n\n # Calculate the current_airplane's rolling moment coefficient.\n rolling_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[0]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n # Calculate the current_airplane's pitching moment coefficient.\n pitching_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[1]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.c_ref\n )\n\n # Calculate the current_airplane's yawing moment coefficient.\n yawing_moment_coefficient = (\n self.airplane.total_near_field_moment_wind_axes[2]\n / self.operating_point.calculate_dynamic_pressure()\n / self.airplane.s_ref\n / self.airplane.b_ref\n )\n\n self.airplane.total_near_field_force_coefficients_wind_axes = np.array(\n [induced_drag_coefficient, side_force_coefficient, lift_coefficient]\n )\n self.airplane.total_near_field_moment_coefficients_wind_axes = np.array(\n [\n rolling_moment_coefficient,\n pitching_moment_coefficient,\n yawing_moment_coefficient,\n ]\n )\n\n def calculate_streamlines(self, num_steps=10, delta_time=0.1):\n \"\"\"Calculates the location of the streamlines coming off the back of the wings.\n\n :param num_steps: int, optional\n This is the integer number of points along each streamline (not including\n the initial points). It can be\n increased for higher fidelity visuals. The default value is 10.\n :param delta_time: float, optional\n This is the time in seconds between each time current_step It can be\n decreased for higher fidelity visuals\n or to make the streamlines shorter. It's default value is 0.1 seconds.\n :return: None\n \"\"\"\n\n # Initialize a array to hold this problem's matrix of streamline points.\n self.streamline_points = np.expand_dims(self.seed_points, axis=0)\n\n # Iterate through the streamline steps.\n for step in range(num_steps):\n # Get the last row of streamline points.\n last_row_streamline_points = self.streamline_points[-1, :, :]\n\n # Find the induced velocities at this row of points.\n induced_velocities = (\n aerodynamics.calculate_velocity_induced_by_horseshoe_vortices(\n points=last_row_streamline_points,\n back_right_vortex_vertices=self.panel_back_right_vortex_vertices,\n front_right_vortex_vertices=self.panel_front_right_vortex_vertices,\n front_left_vortex_vertices=self.panel_front_left_vortex_vertices,\n back_left_vortex_vertices=self.panel_back_left_vortex_vertices,\n strengths=self.vortex_strengths,\n collapse=True,\n )\n )\n\n # Add the freestream velocity to the induced velocity to get the total\n # velocity at each of the last row of\n # streamline points.\n total_velocities = induced_velocities + self.freestream_velocity\n\n # Interpolate the positions on a new row of streamline points.\n new_row_streamline_points = (\n last_row_streamline_points + total_velocities * delta_time\n )\n\n # Stack the new row of streamline points to the bottom of the matrix of\n # streamline points.\n self.streamline_points = np.vstack(\n (\n self.streamline_points,\n np.expand_dims(new_row_streamline_points, axis=0),\n )\n )\n","sub_path":"src/steady_horseshoe_vortex_lattice_method.py","file_name":"steady_horseshoe_vortex_lattice_method.py","file_ext":"py","file_size_in_byte":24018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"429681623","text":"import os\n\n\nclass Votes:\n\n def __init__(self, file_path):\n self.__path = file_path\n self.__file_object = None\n\n def __enter__(self):\n self.__file_object = open(self.__path)\n return self\n\n def __exit__(self, type, val, tb):\n self.__file_object.close()\n\n def countVotes(self):\n m = {}\n\n with open(self.__path, 'r') as f:\n i = 0\n for line in f.readlines():\n arr = line.split(',')\n arr[1] = arr[1].replace(' ', '')\n arr[1] = arr[1].replace('\\n', '')\n print(int(arr[0]), int(arr[1]))\n if arr[1] not in m:\n val = [arr[0]]\n else:\n val.append(arr[0])\n m.update({arr[1]: val})\n\n sorted_map = sorted(m.items(), key=lambda x: len(x[1]), reverse=True)\n print(list(sorted_map))\n sorted_list = list(sorted_map)\n print([sorted_list[0][0], sorted_list[1][0], sorted_list[2][0]])\n # return [, sorted_map[1], sorted_map[2]]\n # for line in f.readlines():\n # line = line.replace('\\n', '')\n # line = line.replace(' ', '')\n # arr = line.split(',')\n # print(arr)\n # if arr[1] not in m:\n # m[arr[1]] = arr[0]\n # else:\n # print(m[arr[1]])\n # m.update(m[arr[1]],a)\n\n\nif __name__ == '__main__':\n print('hello')\n print(os.getcwd())\n v = Votes('votes.txt')\n print(v.countVotes())\n","sub_path":"datastructs/maps/votes.py","file_name":"votes.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"197565398","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\nfrom os.path import dirname, join, realpath\n\n\nSP = 7\n\nHLT = 0b00000001\nLDI = 0b10000010\nPRN = 0b01000111\nMUL = 0b10100010\nPUSH = 0b01000101\nPOP = 0b01000110\nNOP = 0b00000000\n\n#### SPRINT CHALLENGE ####\nCMP = 0b10100111\nJEQ = 0b01010101\nJMP = 0b01010100\nJNE = 0b01010110\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\" \n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n # init 8-bit registers\n self.reg = [0] * 8\n # we increment pc: it's the index of the current instructions\n self.pc = 0\n # Init memory with 256 bits\n self.ram = [0b0] * 256\n # Per our spec, reg 7 = 0xF4\n self.reg[7] = 0xF4\n\n self.fl = None\n\n # TODO: Branch table \n self.branch_table = {\n HLT : self.hlt,\n LDI : self.ldi,\n PRN : self.prn,\n MUL : self.mul,\n PUSH : self.push,\n POP : self.pop,\n NOP : self.nop,\n CMP : self.cmp,\n \n #### SPRINT CHALLENGE ####\n JEQ : self.jeq,\n JMP : self.jmp,\n JNE : self.jne\n }\n\n ##### SPRINT CHALLENGE #####\n\n def cmp(self, operand_a, operand_b):\n # change flag depending on opA and opB\n if self.reg[operand_a] == self.reg[operand_b]:\n self.fl = \"E\"\n elif self.reg[operand_a] < self.reg[operand_b]:\n self.fl = \"LT\"\n elif self.reg[operand_a] > self.reg[operand_b]:\n self.fl = \"GT\"\n else:\n self.fl = 0\n\n self.pc += 3\n\n def jeq(self, operand_a, operand_b):\n # if flag is E (equal) jump to address stored in given register\n if self.fl == \"E\":\n self.pc = self.reg[operand_a]\n else:\n self.pc += 2\n \n def jmp(self, operand_a, operand_b):\n '''performs an unconditional jump'''\n self.pc = self.reg[operand_a]\n\n def jne(self, operand_a, operand_b):\n '''a conditional jump that follows a test'''\n if self.fl != \"E\":\n self.pc = self.reg[operand_a]\n else:\n self.pc += 2\n\n ############################\n \n # HLT, or Halt: exits the emulator\n def hlt(self, operand_a, operand_b):\n sys.exit(0)\n\n # LDI, or Load Immediate; set specified register to specific value\n def ldi(self, operand_a, operand_b):\n self.reg[operand_a] = operand_b\n # Increment program counter by 3 steps in RAM\n self.pc += 3\n\n # PRN, or print register: prints the current register\n def prn(self, operand_a, operand_b):\n print(self.reg[operand_a])\n self.pc += 2\n\n # Mul, or multiply: multiplies the next two values.\n def mul(self, operand_a, operand_b):\n self.reg[operand_b] *= self.reg[operand_b]\n self.pc += 3\n\n #PUSH: Push the value to the stack, decrement the pointer\n def push(self, operand_a, operand_b):\n # decrement SP\n self.reg[SP] -= 1\n # Get the value we want to store from the register and store it in ram\n self.ram_write(self.reg[operand_a], self.reg[SP])\n self.pc += 2\n\n # POP: pop the top value off the stack\n def pop(self, operand_a, operand_b):\n # get the value of SP and overwrite next register\n value = self.ram_read(self.reg[SP])\n self.reg[operand_a] = value\n # increment SP\n self.reg[SP] += 1\n self.pc += 2\n\n # NOP, or NOPE: passes when the value given is 0s\n def nop(self, operand_a, operand_b):\n self.pc += 1\n\n # RET, or RETURN: returns back to where we called from\n def ret(self, operand_a, operand_b):\n self.pc = self.ram[self.reg[SP]]\n self.reg[SP] += 1 \n\n # CALL: jump to any adress we specify\n def call(self, operand_a, operand_b):\n self.reg[SP] -= 1\n self.ram_write(self.pc + 2, self.reg[SP])\n self.pc = self.reg[operand_a] \n \n\n def load(self, filename=None):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n # For now, we've just hardcoded a program:\n cur_dir = dirname(realpath(__file__))\n files = [\n join(cur_dir, 'examples/stackoverflow.ls8'),\n join(cur_dir, 'examples/addi.ls8'),\n join(cur_dir, 'examples/print8.ls8'),\n join(cur_dir, 'examples/mult.ls8'),\n join(cur_dir, 'examples/stack.ls8'),\n join(cur_dir, 'examples/call.ls8'),\n join(cur_dir, 'examples/printstr.ls8'),\n join(cur_dir, 'examples/sctest.ls8'),\n join(cur_dir, 'examples/histogram.ls8'),\n join(cur_dir, 'examples/keyboard.ls8'),\n join(cur_dir, 'examples/interrupts.ls8'),\n ]\n\n SC = join(cur_dir, 'sctest.ls8')\n\n if filename:\n with open(SC) as f:\n address = 0\n for line in f:\n line = line.split(\"#\")[0].strip()\n if line == '':\n continue\n else:\n instruction = int(line, 2) # by default this is base 10 so we need to change this to parse base 2 number\n self.ram[address] = instruction \n address += 1 \n\n else:\n \n program_sctest = [\n\n 10000010, # LDI R0,10\n 00000000,\n 0b0001010,\n 10000010, # LDI R1,20\n 0b0000001,\n 0b0010100,\n 10000010, # LDI R2,TEST1\n 0b0000010,\n 0b0010011,\n 10100111, # CMP R0,R1\n 00000000,\n 0b0000001,\n 0b1010101, # JEQ R2\n 0b0000010,\n 10000010, # LDI R3,1\n 0b0000011,\n 0b0000001,\n 0b1000111, # PRN R3\n 0b0000011,\n # TEST1 (address 19):\n 10000010, # LDI R2,TEST2\n 0b0000010,\n 0b0100000,\n 10100111, # CMP R0,R1\n 00000000,\n 0b0000001,\n 0b1010110, # JNE R2\n 0b0000010,\n 10000010, # LDI R3,2\n 0b0000011,\n 0b0000010,\n 0b1000111, # PRN R3\n 0b0000011,\n # TEST2 (address 32):\n 10000010, # LDI R1,10\n 0b0000001,\n 0b0001010,\n 10000010, # LDI R2,TEST3\n 0b0000010,\n 0b0110000,\n 10100111, # CMP R0,R1\n 00000000,\n 0b0000001,\n 0b1010101, # JEQ R2\n 0b0000010,\n 10000010, # LDI R3,3\n 0b0000011,\n 0b0000011,\n 0b1000111, # PRN R3\n 0b0000011,\n # TEST3 (address 48):\n 10000010, # LDI R2,TEST4\n 0b0000010,\n 0b0111101,\n 10100111, # CMP R0,R1\n 00000000,\n 0b0000001,\n 0b1010110, # JNE R2\n 0b0000010,\n 10000010, # LDI R3,4\n 0b0000011,\n 0b0000100,\n 0b1000111, # PRN R3\n 0b0000011,\n # TEST4 (address 61):\n 10000010, # LDI R3,5\n 0b0000011,\n 0b0000101,\n 0b1000111, # PRN R3\n 0b0000011,\n 10000010, # LDI R2,TEST5\n 0b0000010,\n 0b1001001,\n 0b1010100, # JMP R2\n 0b0000010,\n 0b1000111, # PRN R3\n 0b0000011,\n # TEST5 (address 73):\n 0b0000001, \n ]\n\n\n\n program = [\n # From print8.ls8\n 0b10000010, # LDI R0,8\n 0b00000000,\n 0b00001000,\n 0b01000111, # PRN R0\n 0b00000000,\n 0b00000001, # HLT\n ]\n\n for address, instruction in enumerate(program):\n self.ram[address] = instruction\n\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n elif op == \"SUB\": \n self.reg[reg_a] -= self.reg[reg_b]\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def ram_read(self, address):\n \"\"\"prints what's stored in that specified address in RAM\"\"\"\n return self.ram[address]\n\n def ram_write(self, value, address):\n \"\"\"Overwrites the address in ram with the value\"\"\"\n self.ram[address] = value\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n while True:\n # self.trace()\n op_code = self.ram[self.pc]\n operand_a, operand_b = self.ram[self.pc + 1], self.ram[self.pc + 2]\n if op_code in self.branch_table:\n self.branch_table[op_code](operand_a, operand_b)\n else: \n self.pc += 1\n\nif __name__ == '__main__':\n LS8 = CPU()\n LS8.load()\n for i in range(9):\n print(LS8.ram_read(i))\n LS8.ram_write(0, 15)\n print('==============')\n print(LS8.ram_read(0))\n print('==============')","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":9914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166182739","text":"import json\nfrom datetime import datetime\n\nfrom aio_pika import connect, ExchangeType, IncomingMessage\n\nimport config\nfrom db_access.paths import route_and_path_points\nfrom models.vehicle import Vehicle\nfrom tracking.path_entry import PathEntry\nfrom tracking.vehicle_entry import VehicleEntry\n\n\nclass VehicleTracker:\n def __init__(self, loop, pool) -> None:\n loop.create_task(self.__listen(loop))\n self.vehicles = dict()\n self.paths = dict()\n self.pool = pool\n\n async def __listen(self, loop):\n connection = await connect(config.amqp_connection_string, loop=loop)\n channel = await connection.channel()\n await channel.set_qos(prefetch_count=1)\n exchange = await channel.declare_exchange(config.amqp_exchange, ExchangeType.TOPIC, durable=True)\n queue = await channel.declare_queue(exclusive=True)\n await queue.bind(exchange, routing_key=config.amqp_routing_key)\n await queue.consume(self.__process_message)\n\n def __process_message(self, message: IncomingMessage):\n with message.process():\n parsed = json.loads(message.body)\n self.__vehicle_update(parsed['transportId'], parsed['lat'], parsed['lon'], parsed['speed'], parsed['createdAt'], parsed['pathId'])\n\n def __vehicle_update(self, id: str, lat: float, lon: float, speed: float, timestamp: float, path_id: str) -> None:\n if not id in self.vehicles:\n if not path_id in self.paths:\n route, pts = route_and_path_points(path_id, self.pool)\n self.paths[path_id] = PathEntry(path_id, route, pts)\n self.vehicles[id] = VehicleEntry(id, path_id, self.paths[path_id].route, self.paths[path_id].path_points)\n self.vehicles[id].register_update(lat, lon, speed, timestamp)\n\n def vehicle_animation(self, id: str, secs: float) -> Vehicle:\n if id in self.vehicles:\n v = self.vehicles[id]\n return Vehicle(id, v.path_id, [Vehicle.Target(v.mileage + v.average_speed * secs, datetime.utcnow().timestamp())])\n\n def vehicle_forecast(self, id: str):\n pass\n\n def stop_forecast(self, id: str):\n pass\n","sub_path":"tracking/vehicle_tracker.py","file_name":"vehicle_tracker.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652205841","text":"# coding: utf-8\n\n\"\"\"\nファイルの概要:TV番組開始5分の視聴率データを取ってくる。\nsetai,kozin,C,T,M1,M2,M3,F1,F2,F3の各層の視聴率データを取ってくる。\n\"\"\"\n\nimport sys\nsys.path.append('../..') # daoを呼び出すため\nfrom util import dao # big_query からデータを持ってくる\nimport pandas as pd\nimport time\nfrom datetime import datetime\n\ndef get_after_5(df):\n start_day = int(df[\"housou_day(s)\"]) # int型\n start_time = int(df[\"time(s)\"]) # int型\n end_time = int(df[\"time(e)\"]) # int型\n media_id = df[\"media_id\"]\n start_time += 5 # 5分後を考える。日にちをまたいでしまった時の処理が以下\n if start_time>2859:\n start_time-=2400\n start_day+=1\n if start_time= imW-1 or y<=0 or y>= imH-1:\n #raise Exception(\"This function cannot be used pixels on the image's outer edge.\")\n borderpixel = True\n if not borderpixel:\n for i in range(x-1,x+2):\n for j in range(y-1,y+2):\n if (i != x or j!=y) and im.getpixel((i,j))== color:\n ans = True \n return ans\n\n\n##Color the border where the black junk ends\ndef colorBorder(color,im,white,black,red,putpixel):\n [imW,imH] = im.size\n for i in range(imW):\n for j in range(imH):\n if isNearColor(white,im,(i,j)) and isNearColor(black,im,(i,j)):\n putpixel((i,j),red)\n \ndef findNextOfColorsOLD(desColors,imA,imW,x,y):\n for delta in range(1,imW-x):\n curColor = imA[x+delta,y]\n# if len(desColors)> 1: \n# print(curColor)\n# print(desColors)\n# print(curColor in desColors)\n if curColor in desColors: \n ans=x+delta\n return ans \n# if delta == imW-1:\n# raise Exception(\"Problem: MaxW needs to be set higher.\")\n ans = imW-1\n return ans\n\ndef findNextOfColors(desColors,imLw,ind):\n (imL,imW) = imLw\n x = ind%imW\n for delta in range(1,imW-x):\n curColor = imL[ind+delta]\n if curColor in desColors: \n return ind+delta\n #print(\"end of row returned by findNextOfColors2\")\n return ind+imW-1\ndef findPrevOfColors(desColors,imLw,ind):\n (imL,imW) = imLw\n x = ind%imW\n for delta in range(1,x+1):\n curColor = imL[ind-delta]\n if curColor in desColors: \n return ind-delta\n #print(\"end of row returned by findNextOfColors2\")\n return ind+imW-1\n\ndef findNextNotOfColors(undesColors,imLw,ind):\n (imL,imW) = imLw\n x = ind%imW\n for delta in range(1,imW-x):\n curColor = imL[ind+delta]\n if curColor not in undesColors: \n return ind+delta\n print(\"end of row returned by findNextOfColors2\")\n return ind+imW-1\n\ndef findPrevNotOfColors(undesColors,imLw,ind):\n (imL,imW) = imLw\n x = ind%imW\n for delta in range(1,x+1):\n curColor = imL[ind-delta]\n if curColor not in undesColors: \n return ind-delta\n print(\"end of row returned by findNextOfColors2\")\n return ind+imW-1\n\n \n#http://stackoverflow.com/questions/11746766/flood-fill-algorithm-python \n#make faster by removing putpixel and repost\ndef floodFill(XY,newColor,im):\n originalColor = im.getpixel(XY) #this color defines the region to fill\n toFill = set()\n toFill.add(XY)\n while not toFill.empty():\n (x,y) = toFill.pop()\n curPixelColor = im.getpixel((x,y))\n if not curPixelColor == originalColor:\n continue\n im.putpixel((x,y), newColor) #make me faster!\n \n #note:don't want diagonals (consider one pixel thick diagonal line separating two white spaces)\n toFill.add((x-1,y))\n toFill.add((x+1,y))\n toFill.add((x,y-1))\n toFill.add((x,y+1))\n #image.save(\"flood.png\")\n\n\n\ndef findPixelsOfColor(imL,color):\n colorPixels = set()\n for i in range(len(imL)):\n if imL[i] == color:\n colorPixels.add(i)\n return colorPixels\n\ndef countPixelsOfColors(imL,colors):\n count = [0]*len(colors)\n for i in range(len(imL)):\n if imL[i] in colors:\n for c in range(len(colors)):\n if imL[i] == colors[c]:\n count[c] = count[c]+1\n break\n return count\n \n \ndef intCentroid(flatCoords,W):\n Coords = [fCinv(fp,W) for fp in flatCoords]\n n = len(Coords)\n return (int(sum([c[0] for c in Coords])/n), int(sum([c[1] for c in Coords])/n))\n\ndef findCenterXY(imLw,color):\n (imL,imW) = imLw\n return intCentroid(findPixelsOfColor(imL,color),imW)\n\ndef drawTransverses(imLw,centerXY,end0,end1):\n return\n\n\ndef fillSetOfPtsWithColor(imL,setOfPts,color):\n tmp_imL = imL\n for pt in setOfPts:\n tmp_imL[pt] = color\n return tmp_imL\n \ndef findConnectedColorComponent(imLw,XY, safeColor):\n tmpColor = safeColor\n (imL,imW) = imLw\n imH = len(imL)//imW\n originalColor = imL[flatCoord(XY,imW)] #this color defines the region to fill\n toFill = set()\n toFill.add(XY)\n connComp = set()\n connComp.add(flatCoord(XY,imW))\n while toFill != set():\n (x,y) = toFill.pop()\n curPixelColor = imL[flatCoord((x,y),imW)]\n if curPixelColor != originalColor:\n continue\n imL[flatCoord((x,y),imW)] = tmpColor\n connComp.add(flatCoord((x,y),imW))\n \n #note:don't want diagonals (consider one pixel thick diagonal line separating two white spaces)\n if x>0: toFill.add((x-1,y))\n if x< imW-1: toFill.add((x+1,y))\n if y> 0: toFill.add((x,y-1))\n if y< imH-1: toFill.add((x,y+1))\n \n changeColor2Color(imLw,tmpColor,originalColor)\n return connComp\n\ndef fillConnectedColorComponent(imLw,XY, fillColor):\n (imL,imW) = imLw\n imH = len(imL)//imW\n originalColor = imL[flatCoord(XY,imW)] #this color defines the region to fill\n toFill = set()\n toFill.add(XY)\n connComp = set()\n connComp.add(flatCoord(XY,imW))\n while toFill != set():\n (x,y) = toFill.pop()\n curPixelColor = imL[flatCoord((x,y),imW)]\n if curPixelColor != originalColor:\n continue\n imL[flatCoord((x,y),imW)] = fillColor\n connComp.add(flatCoord((x,y),imW))\n \n #note:don't want diagonals (consider one pixel thick diagonal line separating two white spaces)\n if x>0: toFill.add((x-1,y))\n if x< imW-1: toFill.add((x+1,y))\n if y> 0: toFill.add((x,y-1))\n if y< imH-1: toFill.add((x,y+1))\n return connComp\n\ndef countNearOfColorOLD(imLw,pt,color):\n (imL,imW) = imLw\n imH = len(imL)//imW\n (x,y) = fCinv(pt,imW)\n borderpixel = False\n count = 0\n if x <= 0 or x >= imW-1 or y<=0 or y>= imH-1:\n #raise Exception(\"This function cannot be used pixels on the image's outer edge.\")\n borderpixel = True\n if not borderpixel:\n for i in range(x-1,x+2):\n for j in range(y-1,y+2):\n if (i != x or j!=y) and imL[flatCoord((i,j),imW)]== color:\n count = count+1\n return count\n\ndef countNearOfColor(imLw,pt,color):\n (imL,imW) = imLw\n imH = len(imL)//imW\n (xp,yp) = fCinv(pt,imW)\n count = 0\n for x in range(xp-1,xp+2):\n for y in range(yp-1,yp+2):\n if (x,y)!=(xp,yp) and 0<= x< imW and 0<= y center[0]:\n x0 = findPrevNotOfColors(featureColors,(Hline,imW),xp)\n x1 = findPrevOfColors(featureColors,(Hline,imW),x0)\n for x in range(x1+1,x0+1):\n y = lineX2Y((xp,yp),center, x)\n imL[flatCoord((x,y),imW)] = transverseColor\n else:\n Hline = []\n for y in range(imH):# MUST MAKE SURE lineX2Y values ARE BETWEEN 0 AND IMw/IMh\n x = lineY2X((xp,yp),center,y)\n if 0<=x center[1]:\n y0 = findPrevNotOfColors(featureColors,(Hline,imW),yp)\n y1 = findPrevOfColors(featureColors,(Hline,imW),y0)\n for y in range(y1+1,y0+1):\n y = lineY2X((xp,yp),center, y)\n imL[flatCoord((x,y),imW)] = transverseColor\n else:\n Vline = [imL[flatCoord((xp,y),imW)] for y in range(imH)]\n if yp< center[1]:\n y0 = findNextNotOfColors(featureColors,(Vline,imW),yp)\n y1 = findNextOfColors(featureColors,(Vline,imW),y0)\n for y in range(y0,y1):\n imL[flatCoord((xp,y),imW)] = transverseColor\n if yp> center[1]:\n y0 = findPrevNotOfColors(featureColors,(Vline,imW),yp)\n y1 = findPrevOfColors(featureColors,(Vline,imW),y0)\n for y in range(y1+1,y0+1):\n imL[flatCoord((xp,y),imW)] = transverseColor\n \n#NOTE this only works for lines of single pixel thickness... otherwise (locally) can't tell diff between corners and ends\ndef isSPEP(imLw,p,EPcolor, curveColor, safeColor):\n (imL,imW) = imLw\n if imL[p]== EPcolor: return True\n# if countNearOfColor(imLw,p,curveColor)> 2: \n# imL[p]=(0,0,0)\n# return False\n (xp,yp) = fCinv(p,imW)\n sqL = []\n for x in range(xp-1,xp+2):\n for y in range(yp-1,yp+2):\n if (x,y)!=(xp,yp):\n pColor = imL[flatCoord((x,y),imW)]\n if pColor== EPcolor: return True\n sqL.append(pColor)\n else: \n sqL.append(safeColor)\n if imLw == curveColor:\n fillConnectedColorComponent((sqL,3),fCinv(0,3),safeColor)\n else:\n p2fill = findNextOfColors([curveColor],(sqL,3),0)\n fillConnectedColorComponent((sqL,3),fCinv(p2fill,3),safeColor)\n if curveColor in sqL:\n return False\n else: return True\n \ndef convertRegion2binaryRectangle(region, imW): #takes in set of points (region), returns smallest possible rectangle (in list form) with 1 at those points and zero elsewhere\n col0 = min([p%imW for p in region])\n col1 = max([p%imW for p in region])\n row0 = min([(p - p%imW)//imW for p in region])\n row1 = max([(p - p%imW)//imW for p in region])\n bImWidth = 1+ col1-col0\n binaryRegionL = [0]*bImWidth*(1+row1-row0)\n for p in region:\n (xp,yp) = fCinv(p,imW)\n binaryRegionL[flatCoord((xp-col0,yp-row0),bImWidth)] = 1\n return (binaryRegionL, bImWidth)\n \ndef isPunctured(regionOfQuestion, imW): # returns True if regionOfQuestion has a nonzero Euler char (is punctured) - does this by converting to binary (1 in region, 0 else), finding a 0, filling it's conn. comp. with 1's and checking for remaining 0's\n binaryRegionLw = convertRegion2binaryRectangle(regionOfQuestion,imW)\n if 0 in fillConnectedColorComponent(binaryRegionLw,fCinv(binaryRegionLw[0].index(0),binaryRegionLw[1]), 1): return True\n else: return False\n \ndef angle(XY,origin):\n (x1,y1) = XY\n (x0,y0) = origin\n (X,Y) = (x1-x0,y1-y0)\n if not X==0:\n return atan(Y/X)\n elif Y> 0:\n return pi/2\n elif Y< 0:\n return -pi/2\n else: \n print(XY)\n raise NameError(\"this point should not be the origin\")\n \n \ndef angularWidth(region, W, origin): #sorts angles, then uses biggest difference between them (mod 2pi) to figure out the gap\n angles = sorted([angle(fCinv(p,W),origin) for p in region])\n angles.append(min(angles))\n return 2*pi - max([(angles[i+1] - angles[i])%(2*pi) for i in range(len(angles)-1)])\n \n \n #Note: the best way to find the average width finding a parallel isotopy between lower and upper curved and integrating against it. Second best way is to do this assuming relatively circular. Third way is this way.\n \n ","sub_path":"src/Main/fcns.py","file_name":"fcns.py","file_ext":"py","file_size_in_byte":14779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575208375","text":"annual_salary = float(input(\"Enter your annual salary : \"))\nportion_saved = float(\n input(\"What percentage will you save from your salary? : \"))\ntotal_cost = float(input(\"What is the price of the house? : \"))\n\n# first we calculate the amount of the down payment\nportion_down_payment = 0.25 * total_cost\n\n# initialize savings to zero\ncurrent_savings = 0\n\n# initialize monthly return on savings rate\nmonthly_rate = 0.04 / 12\n\n# initialize the total time we have spent saving. Zero so far\nsaving_time = 0\n\n# start looping until we make the portion for the down payment\nwhile current_savings < portion_down_payment:\n monthly_return = current_savings * monthly_rate\n monthly_savings = portion_saved * annual_salary / 12\n current_savings += monthly_return + monthly_savings\n saving_time += 1\n\nprint(\"-----------------------------------\")\nprint(\"Time spent saving =\", saving_time, \"months\")\nprint(\"That is\", int(saving_time/12), \"years, and\", saving_time % 12, \"months\")\n","sub_path":"problem-set-1/ps1a.py","file_name":"ps1a.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"417849067","text":"# Common constants and functions for reverting scripts.\nimport urllib2, getpass, base64, re\n\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n import xml.etree.cElementTree as etree\n except ImportError:\n import xml.etree.ElementTree as etree\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nAPI_ENDPOINT = 'https://api.openstreetmap.org'\n# API_ENDPOINT = 'http://master.apis.dev.openstreetmap.org'\n\n\n# Copied from http://stackoverflow.com/a/3884771/1297601\nclass MethodRequest(urllib2.Request):\n \"\"\"A subclass to override Request method and content type.\"\"\"\n GET = 'GET'\n POST = 'POST'\n PUT = 'PUT'\n DELETE = 'DELETE'\n\n def __init__(self, url, data=None, headers={},\n origin_req_host=None, unverifiable=False, method=None):\n headers['Content-Type'] = 'application/xml'\n urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)\n self.method = method\n\n def get_method(self):\n if self.method:\n return self.method\n return urllib2.Request.get_method(self)\n\n\ndef read_auth():\n \"\"\"Read login and password from keyboard, and prepare an basic auth header.\"\"\"\n ok = False\n while not ok:\n login = input('OSM Login: ')\n auth_header = 'Basic {0}'.format(base64.b64encode('{0}:{1}'.format(login, getpass.getpass('OSM Password: '))))\n try:\n request = urllib2.Request(API_ENDPOINT + '/api/0.6/user/details')\n request.add_header('Authorization', auth_header)\n result = urllib2.urlopen(request)\n ok = 'account_created' in result.read()\n except Exception as e:\n print(e)\n if not ok:\n print('You must have mistyped. Please try again.')\n return auth_header\n\n\ndef obj_to_dict(obj):\n \"\"\"Converts XML object to an easy to use dict.\"\"\"\n if obj is None:\n return None\n res = {}\n res['type'] = obj.tag\n res['id'] = obj.get('id')\n res['version'] = int(obj.get('version'))\n res['deleted'] = obj.get('visible') == 'false'\n if obj.tag == 'node' and 'lon' in obj.keys() and 'lat' in obj.keys():\n res['coords'] = (obj.get('lon'), obj.get('lat'))\n res['tags'] = {tag.get('k'): tag.get('v') for tag in obj.findall('tag')}\n if obj.tag == 'way':\n res['refs'] = [x.get('ref') for x in obj.findall('nd')]\n elif obj.tag == 'relation':\n res['refs'] = [(x.get('type'), x.get('ref'), x.get('role')) for x in obj.findall('member')]\n return res\n\n\ndef dict_to_obj(obj):\n \"\"\"Converts object dict back to an XML element.\"\"\"\n if obj is None:\n return None\n res = etree.Element(obj['type'], {'id': str(obj['id']), 'version': str(obj['version'])})\n res.set('visible', 'false' if obj['deleted'] else 'true')\n if 'coords' in obj:\n res.set('lon', obj['coords'][0])\n res.set('lat', obj['coords'][1])\n if 'tags' in obj:\n for k, v in obj['tags'].iteritems():\n res.append(etree.Element('tag', {'k': k, 'v': v}))\n if not obj['deleted']:\n if obj['type'] == 'way':\n for nd in obj['refs']:\n res.append(etree.Element('nd', {'ref': nd}))\n elif obj['type'] == 'relation':\n for member in obj['refs']:\n res.append(etree.Element('member', {'type': member[0], 'ref': member[1], 'role': member[2]}))\n return res\n\n\nclass HTTPError:\n def __init__(self, e):\n self.code = e.code\n self.message = e.read()\n\n\nclass RevertError:\n def __init__(self, msg):\n self.message = msg\n\n\ndef api_download(method, throw=None, sysexit_message=None):\n \"\"\"Downloads an XML response from the OSM API. Returns either an Element, or a tuple of (code, message).\"\"\"\n try:\n try:\n response = urllib2.urlopen('{0}/api/0.6/{1}'.format(API_ENDPOINT, method))\n return etree.parse(response).getroot()\n except urllib2.HTTPError as e:\n if throw is not None and e.code in throw:\n raise HTTPError(e)\n else:\n raise e\n except Exception as e:\n if sysexit_message is not None:\n raise RevertError(': '.join((sysexit_message, str(e))))\n raise e\n\n\ndef changes_to_osc(changes, changeset_id=None):\n # Set explicit actions for each changed object\n for c in changes:\n if 'version' not in c or c['version'] <= 0:\n c['action'] = 'create'\n elif 'deleted' in c and c['deleted']:\n c['action'] = 'delete'\n else:\n c['action'] = 'modify'\n\n # Sort changes, so created nodes are first, and deleted are last\n def change_as_key(ch):\n act = ['create', 'modify', 'delete'].index(ch['action'])\n typ = ['node', 'way', 'relation'].index(ch['type'])\n if act == 2:\n typ = 2 - typ\n return '{0}{1}{2}'.format(act, typ, ch['id'])\n\n changes.sort(key=change_as_key)\n\n osc = etree.Element('osmChange', {'version': '0.6'})\n for c in changes:\n act = etree.SubElement(osc, c['action'])\n el = dict_to_obj(c)\n if changeset_id:\n el.set('changeset', str(changeset_id))\n act.append(el)\n\n try:\n return etree.tostring(osc, pretty_print=True, encoding='utf-8', xml_declaration=True)\n except TypeError:\n # xml.etree.ElementTree does not support pretty printing\n return etree.tostring(osc, encoding='utf-8')\n\n\ndef changeset_xml(changeset_tags):\n create_xml = etree.Element('osm')\n ch = etree.SubElement(create_xml, 'changeset')\n for k, v in changeset_tags.iteritems():\n ch.append(etree.Element('tag', {'k': k, 'v': v.decode('utf-8')}))\n return etree.tostring(create_xml)\n\n\ndef upload_changes(changes, changeset_tags):\n \"\"\"Uploads a list of changes as tuples (action, obj_dict).\"\"\"\n if not changes:\n print('No changes to upload.')\n return False\n\n # Now we need the OSM credentials\n auth_header = read_auth()\n opener = urllib2.build_opener()\n opener.addheaders = [('Authorization', auth_header)]\n\n request = MethodRequest(API_ENDPOINT + '/api/0.6/changeset/create', changeset_xml(changeset_tags), method=MethodRequest.PUT)\n try:\n changeset_id = int(opener.open(request).read())\n print('Writing to changeset {0}'.format(changeset_id))\n except Exception as e:\n print('Failed to create changeset: {0}'.format(e))\n return False\n osc = changes_to_osc(changes, changeset_id)\n\n ok = True\n request = MethodRequest('{0}/api/0.6/changeset/{1}/upload'.format(API_ENDPOINT, changeset_id), osc, method=MethodRequest.POST)\n try:\n opener.open(request)\n except urllib2.HTTPError as e:\n message = e.read()\n print('Server rejected the changeset with code {0}: {1}'.format(e.code, message))\n if e.code == 412:\n # Find the culprit for a failed precondition\n m = re.search(r'Node (\\d+) is still used by (way|relation)s ([0-9,]+)', message)\n if m:\n # Find changeset for the first way or relation that started using that node\n pass\n else:\n m = re.search(r'(Way|The relation) (\\d+) is .+ relations? ([0-9,]+)', message)\n if m:\n # Find changeset for the first relation that started using that way or relation\n pass\n else:\n m = re.search(r'Way (\\d+) requires .+ id in \\(([0-9,]+\\)', message)\n if m:\n # Find changeset that deleted at least the first node in the list\n pass\n else:\n m = re.search(r'Relation with id (\\d+) .+ due to (\\w+) with id (\\d+)', message)\n if m:\n # Find changeset that added member to that relation\n pass\n except Exception as e:\n ok = False\n print('Failed to upload changetset contents: {0}'.format(e))\n # Not returning, since we need to close the changeset\n\n request = MethodRequest('{0}/api/0.6/changeset/{1}/close'.format(API_ENDPOINT, changeset_id), method=MethodRequest.PUT)\n try:\n opener.open(request)\n except Exception as e:\n print('Failed to close changeset (it will close automatically in an hour): {0}'.format(e))\n return ok\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415522429","text":"#difference between the sum of the first one hundred numbers squared and the sum of the first 100 square numbers\n\nsqsum=0\n\nsumn=0\n\nfor r in range(101):\n sumn+=r\n sqsum+=r**2\n\nprint((sumn**2)-sqsum)\n","sub_path":"Euler 6.py","file_name":"Euler 6.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229812647","text":"class Dico:\n def __init__(self):\n self.keys = []\n self.values = []\n \n def get(self, key):\n for i in range(len(self.keys)):\n if(self.keys[i] == key):\n return self.values[i]\n return None\n\n def pop(self, key):\n ret = None\n for i in range(len(self.keys)):\n if(self.keys[i]== key):\n ret = self.values[i]\n del self.keys[i]\n del self.values[i]\n break\n return ret\n\n def update(self, key, value):\n for i in range(len(self.keys)):\n if(self.keys[i] == key):\n self.values[i] = value\n self.keys.append(key)\n self.values.append(value)\n\ndc = Dico()\ndc.update(\"kiko\",75)\nprint(dc.get(\"kiko\"))","sub_path":"Dictionnaire/CorrDico.py","file_name":"CorrDico.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"44290095","text":"from scipy.sparse import lil_matrix, csr_matrix\nimport numpy as np\nfrom prodindex import prodindex\nfrom Thalf import Thalf #for calculating transmissibility\n\n#appending the update the wells\ndef updatewells(reservoir,fluid,numerical,P,well,BC): \n #Setting up J matrix\n J = lil_matrix((numerical.N, numerical.N))\n well.block = np.zeros((len(well.x), 1),dtype='int64')\n well.Jvec = np.zeros((len(well.x), 1),dtype='float64') \n Q = lil_matrix((numerical.N, 1)) #for Neumann BC\n for k in range(0,len(well.x)):\n iblock = 0\n for i in range(0,numerical.Nx):\n if well.x[k][0]<(numerical.xc[i,0]+numerical.dx1[i,0]/2) and well.x[k][0]>=(numerical.xc[i,0]-numerical.dx1[i,0]/2) :\n iblock= i\n break\n jblock = 0\n for j in range(0,numerical.Ny):\n if well.y[k][0]<(numerical.yc[j,0]+numerical.dy1[j,0]/2) and well.y[k][0]>=(numerical.yc[j,0]-numerical.dy1[j,0]/2) :\n jblock= j\n break\n kblock = iblock + jblock * numerical.Nx\n well.block[k,0] = kblock\n well.Jvec[k,0] = prodindex(k,well,reservoir,fluid,numerical)\n \n if well.type[k][0] == 2: #for BHP [psi]\n J[kblock,kblock] = J[kblock,kblock] + well.Jvec[k,0]\n Q[kblock,0] = Q[kblock,0] + J[kblock,kblock]*well.constraint[k][0]\n elif well.type[k][0] == 1:#for rate [scf/day] \n Q[kblock,0] = Q[kblock,0] + well.constraint[k][0] \n J = J.tocsr()\n Q = Q.tocsr()\n return J, Q;","sub_path":"Class_problems/Problem_6_wells_complexproblem/updatewells.py","file_name":"updatewells.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"27327490","text":"import uuid\n\nfrom django.db import models\nfrom django.contrib.auth.models import (\n BaseUserManager, AbstractBaseUser\n)\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.utils.translation import gettext as _\nfrom tinymce.models import HTMLField\n\n\ndef uploaded_filename(instance, filename):\n \"\"\"\n Scramble / uglify the filename of the uploaded file, but keep the files extension (e.g., .jpg or .png)\n :param instance:\n :param filename:\n :return:\n \"\"\"\n extension = filename.split(\".\")[-1]\n return \"{}/{}.{}\".format(instance.pk, uuid.uuid4(), extension)\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, email, password=None):\n \"\"\"\n Creates and saves a User with the given email and password.\n \"\"\"\n if not email:\n raise ValueError(_('Users must have an email address'))\n\n user = self.model(\n email=self.normalize_email(email)\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password):\n \"\"\"\n Creates and saves a superuser with the given email and password.\n \"\"\"\n user = self.create_user(\n email,\n password=password\n )\n user.is_admin = True\n user.save(using=self._db)\n return user\n\n\nclass User(AbstractBaseUser):\n email = models.EmailField(\n verbose_name=_('Email'),\n max_length=255,\n unique=True,\n )\n name = models.CharField(max_length=1024, blank=True, default=\"\")\n is_active = models.BooleanField(default=True)\n is_admin = models.BooleanField(default=False)\n\n ADMINISTRATOR = \"Администратор\"\n MANAGER = \"Менеджер\"\n\n\n TYPE_CHOICES = (\n (ADMINISTRATOR, _('Администратор')),\n (MANAGER, _('Менеджер')),\n\n )\n\n type = models.CharField(choices=TYPE_CHOICES, default=MANAGER, max_length=100, db_index=True, verbose_name=_(\"Тип\"))\n\n objects = UserManager()\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n class Meta:\n verbose_name = _(\"Пользователь\")\n verbose_name_plural = _(\"Пользователи\")\n\n def __str__(self):\n return self.email\n\n def has_perm(self, perm, obj=None):\n return True\n\n def has_module_perms(self, app_label):\n return True\n\n @property\n def is_staff(self):\n return self.is_admin\n\n\nclass Field(models.Model):\n name = models.CharField(max_length=255, blank=False, null=False, unique=True, db_index=True, verbose_name=_('Название'))\n\n class Meta:\n verbose_name = _(\"Месторождение\")\n verbose_name_plural = _(\"Месторождения\")\n\n def __str__(self):\n return self.name\n\n\nclass Well(models.Model):\n name = models.CharField(max_length=255, blank=False, null=False, unique=True, db_index=True, verbose_name=_('Название'))\n field = models.ForeignKey(Field, blank=False, null=False, on_delete=models.CASCADE, related_name='fields')\n well_id = models.IntegerField(default=0, verbose_name=_('sdmo_id'))\n tbd_id = models.CharField(max_length=30, blank=True, null=True, verbose_name=_('tbd_id'))\n\n shortage_isu = models.FloatField(default=0, verbose_name=_('Недобор (ИСУ)'))\n shortage_prs = models.FloatField(default=0, verbose_name=_('Недобор (ПРС)'))\n shortage_wait = models.FloatField(default=0, verbose_name=_('Недобор (Ожид.тех)'))\n\n brigade_num = models.IntegerField(default=0, verbose_name=_('Номер бригады'))\n ts_num = models.CharField(max_length=20, blank=True, default=\"\", verbose_name=_('Номер ТС'))\n well_stop = models.FloatField(default=0, verbose_name=_('Остановы'))\n well_stop_prs = models.FloatField(default=0, verbose_name=_('Остановы (ПРС)'))\n rem_count = models.IntegerField(default=0, verbose_name=_('Кол-во ремонтов'))\n\n server1 = \"192.168.241.2\"\n server2 = \"192.168.243.2\"\n server3 = \"192.168.236.2\"\n server4 = \"192.168.128.2\" \n SERVERS = (\n (server1, _('192.168.241.2')),\n (server2, _('192.168.243.2')),\n (server3, _('192.168.236.2')),\n (server4, _('192.168.128.2')),\n )\n\n server = models.CharField(choices=SERVERS, default=server1, max_length=15, verbose_name=_(\"Сервер\"))\n\n has_isu = models.BooleanField(default=False, verbose_name=_(\"Оснащен ИСУ\"))\n\n SGN = \"ШГН\"\n EVN = \"ЭВН\" \n PRODUCTION_TYPES = (\n (SGN, _('ШГН')),\n (EVN, _('ЭВН')),\n )\n\n production_type = models.CharField(choices=PRODUCTION_TYPES, default=SGN, max_length=100, verbose_name=_(\"Технология добычи\"))\n\n class Meta:\n verbose_name = _(\"Скважина\")\n verbose_name_plural = _(\"Скважины\")\n\n def __str__(self):\n return self.name\n\n\nclass Imbalance(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='imb_wells')\n imbalance = models.FloatField(default=0, verbose_name=_('Неуравновешенность'))\n avg_1997 = models.FloatField(default=0, verbose_name=_('Заполнения насоса'))\n timestamp = models.DateTimeField(blank=True, null=True, verbose_name=_('Дата опроса'))\n\n class Meta:\n verbose_name = _(\"Неуравновешенность\")\n verbose_name_plural = _(\"Неуравновешенность\")\n\n\nclass ImbalanceHistory(models.Model):\n imb = models.ForeignKey(Imbalance, blank=False, null=False, on_delete=models.CASCADE, related_name='imb')\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='imb_wells_history')\n imbalance = models.FloatField(default=0, verbose_name=_('Неуравновешенность'))\n avg_1997 = models.FloatField(default=0, verbose_name=_('Заполнения насоса'))\n timestamp = models.DateTimeField(blank=True, null=True, verbose_name=_('Дата опроса'))\n\n class Meta:\n verbose_name = _(\"Неуравновешенность история\")\n verbose_name_plural = _(\"Неуравновешенность история\")\n\n\nclass ImbalanceHistoryAll(models.Model):\n count = models.IntegerField(default=0, verbose_name=_('Число скважен'))\n percent = models.FloatField(default=0, verbose_name=_('Процент от кольичесво скважен'))\n timestamp = models.DateTimeField(blank=True, null=True, verbose_name=_('Дата'))\n\n class Meta:\n verbose_name = _(\"Неуравновешенность история всех скважен дня\")\n verbose_name_plural = _(\"Неуравновешенность история всех скважен дней\")\n\n\nclass WellMatrix(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='wells')\n\n filling = models.FloatField(default=0, verbose_name=_('Заполнение насоса'))\n fluid_agzu = models.FloatField(default=0, verbose_name=_('Жидкость (АГЗУ)'))\n fluid_isu = models.FloatField(default=0, verbose_name=_('Жидкость (ИСУ)'))\n teh_rej_fluid = models.FloatField(default=0, verbose_name=_('Техрежим жидкости'))\n teh_rej_oil = models.FloatField(default=0, verbose_name=_('Техрежим нефти'))\n teh_rej_water = models.FloatField(default=0, verbose_name=_('Обводненность'))\n timestamp = models.DateField(blank=True, null=True, verbose_name=_('Дата'))\n\n class Meta:\n verbose_name = _(\"Матрица\")\n verbose_name_plural = _(\"Матрица\")\n\n\nclass WellEvents(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='event_wells')\n\n PRS = \"ПРС\"\n KRS = \"КРС\"\n TRS = \"ТРС\"\n GTM = \"ГТМ\"\n OTHER = \"Прочие простои\"\n\n EVENT_CHOICES = (\n (PRS, _('ПРС')),\n (KRS, _('КРС')),\n (TRS, _('ТРС')),\n (GTM, _('ГТМ')),\n (OTHER, _('Прочие простои')),\n )\n\n event_type = models.CharField(choices=EVENT_CHOICES, default=OTHER, max_length=20, verbose_name=_('Тип события'))\n event = models.CharField(max_length=200, verbose_name=_('Событие'))\n beg = models.DateTimeField(blank=False, verbose_name=_('Начало события'))\n end = models.DateTimeField(blank=True, null=True, verbose_name=_('Конец события'))\n\n class Meta:\n verbose_name = _(\"Журнал события\")\n verbose_name_plural = _(\"Журнал событий\")\n\n\nclass Depression(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='depression_wells')\n\n densityPL = models.FloatField(default=0, verbose_name=_('Давление (Пласт)'))\n densityZB = models.FloatField(default=0, verbose_name=_('Давление (Забой)'))\n densityDiff = models.FloatField(default=0, verbose_name=_('Разница'))\n fluid_av = models.FloatField(default=0, verbose_name=_('Жидкость (ср.)'))\n\n timestamp = models.DateField(blank=False, verbose_name=_('Дата'))\n\n class Meta:\n verbose_name = _(\"Подбор депрессии\")\n verbose_name_plural = _(\"Подбор депрессий\")\n\n\nclass FieldBalance(models.Model):\n field = models.ForeignKey(Field, blank=False, null=False, on_delete=models.CASCADE, related_name='bal_fields')\n transport_balance = models.FloatField(default=0, db_index=True, verbose_name=_('Автомобильные весы (жидкость)'))\n ansagan_balance = models.FloatField(default=0, db_index=True, verbose_name=_('Весы по Ансаган (жидкость)'))\n transport_brutto = models.FloatField(default=0, db_index=True, verbose_name=_('Автомобильные весы (брутто)'))\n ansagan_brutto = models.FloatField(default=0, db_index=True, verbose_name=_('Весы по Ансаган (брутто)'))\n transport_netto = models.FloatField(default=0, db_index=True, verbose_name=_('Автомобильные весы (нетто)'))\n ansagan_netto = models.FloatField(default=0, db_index=True, verbose_name=_('Весы по Ансаган (нетто)'))\n transport_density = models.FloatField(default=0, db_index=True, verbose_name=_('Автомобильные весы (плотность)'))\n ansagan_density = models.FloatField(default=0, db_index=True, verbose_name=_('Весы по Ансаган (плотность)'))\n\n agzu_fluid = models.FloatField(default=0, db_index=True, verbose_name=_('Замер жидкости по скважинам'))\n agzu_oil = models.FloatField(default=0, db_index=True, verbose_name=_('Замер нефти по скважинам'))\n teh_rej_fluid = models.FloatField(default=0, db_index=True, verbose_name=_('Замер по Тех. жидкости'))\n teh_rej_oil = models.FloatField(default=0, db_index=True, verbose_name=_('Замер по Тех. нефти'))\n\n timestamp = models.DateField(blank=False, verbose_name=_('Дата замера'))\n\n class Meta:\n verbose_name = _(\"Баланс по месторождению\")\n verbose_name_plural = _(\"Баланс по месторождениям\")\n\n\nclass TS(models.Model):\n gos_num = models.CharField(max_length=20, blank=False, null=False, verbose_name=_('Гос номер'))\n marka = models.CharField(max_length=50, verbose_name=_('Марка'))\n type = models.CharField(max_length=50, verbose_name=_('Тип'))\n total_days = models.IntegerField(default=30, verbose_name=_('Всего дней'))\n in_work = models.IntegerField(default=30, verbose_name=_('В работу'))\n in_rem = models.IntegerField(default=30, verbose_name=_('В ремонте'))\n day_off = models.IntegerField(default=30, verbose_name=_('Выходной'))\n month = models.IntegerField(default=30, verbose_name=_('Месяц'))\n year = models.IntegerField(default=2019, verbose_name=_('Год'))\n field = models.CharField(max_length=50, verbose_name=_('ПСП'))\n kip = models.FloatField(default=100, verbose_name=_('КИП'))\n ktg = models.FloatField(default=100, verbose_name=_('КТГ'))\n\n class Meta:\n verbose_name = _(\"Транспортное средство\")\n verbose_name_plural = _(\"Транспортные средства\")\n\n def __str__(self):\n return self.gos_num\n\n\nclass GSM(models.Model):\n gos_num = models.CharField(max_length=50, blank=False, null=False, verbose_name=_('Гос номер'))\n type = models.CharField(max_length=100, verbose_name=_('Тип'))\n year = models.IntegerField(default=2019, verbose_name=_('Год'))\n month = models.IntegerField(default=-1, verbose_name=_('Месяц'))\n field = models.CharField(max_length=30, verbose_name=_('ПСП'))\n gsm_type = models.CharField(max_length=50, verbose_name=_('Тип ГСМ'))\n sum = models.FloatField(default=0, verbose_name=_('Сумма во ВВ'))\n quantity = models.FloatField(default=0, verbose_name=_('Количество'))\n\n class Meta:\n verbose_name = _(\"ГСМ\")\n verbose_name_plural = _(\"ГСМ\")\n\n def __str__(self):\n return self.gos_num\n\n\nclass ProdProfile(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='prof_wells')\n well_pair = models.IntegerField(default=-1, verbose_name=_('Пара'))\n pre_fluid = models.FloatField(default=0, verbose_name=_('Жидкость (До)'))\n post_fluid = models.FloatField(default=0, verbose_name=_('Жидкость (После)'))\n pre_oil = models.FloatField(default=0, verbose_name=_('Нефть (До)'))\n post_oil = models.FloatField(default=0, verbose_name=_('Нефть (После)'))\n pre_obv = models.FloatField(default=0, verbose_name=_('Обводненность (До)'))\n post_obv = models.FloatField(default=0, verbose_name=_('Обводненность (После)'))\n effect = models.FloatField(default=0, verbose_name=_('Эффект (нефть)'))\n\n class Meta:\n verbose_name = _(\"Профиль добычи\")\n verbose_name_plural = _(\"Профиль добычи\")\n\n\nclass Dynamogram(models.Model):\n well = models.ForeignKey(Well, blank=False, null=False, on_delete=models.CASCADE, related_name='dyn_wells')\n x = ArrayField(models.FloatField(), blank=True)\n y = ArrayField(models.FloatField(), blank=True)\n timestamp = models.DateTimeField(blank=False, verbose_name=_('Время замера'))\n\n class Meta:\n verbose_name = _(\"Динамограмма скважины\")\n verbose_name_plural = _(\"Динамограммы скважин\")\n\n\nclass SumWellInField(models.Model):\n field = models.ForeignKey(Field, blank=False, null=False, on_delete=models.CASCADE, related_name='well_in_fields')\n\n filling = models.FloatField(default=0, verbose_name=_('Заполнение насоса'))\n fluid_agzu = models.FloatField(default=0, verbose_name=_('Жидкость (АГЗУ)'))\n fluid_isu = models.FloatField(default=0, verbose_name=_('Жидкость (ИСУ)'))\n\n teh_rej_fluid = models.FloatField(default=0, verbose_name=_('Техрежим жидкости'))\n teh_rej_oil = models.FloatField(default=0, verbose_name=_('Техрежим нефти'))\n teh_rej_water = models.FloatField(default=0, verbose_name=_('Обводненность'))\n\n timestamp = models.DateField(blank=True, null=True, verbose_name=_('Дата'))\n\n class Meta:\n verbose_name = _(\"Баланс месторождении (ИСУ)\")\n verbose_name_plural = _(\"Баланс месторождений (ИСУ)\")\n\n\nclass FieldMatrix(models.Model):\n field = models.ForeignKey(Field, blank=False, null=False, on_delete=models.CASCADE, related_name='matrix_fields')\n\n filling = models.FloatField(default=0, verbose_name=_('Заполнение насоса'))\n fluid_agzu = models.FloatField(default=0, verbose_name=_('Жидкость (АГЗУ)'))\n fluid_isu = models.FloatField(default=0, verbose_name=_('Жидкость (ИСУ)'))\n\n teh_rej_fluid = models.FloatField(default=0, verbose_name=_('Техрежим жидкости'))\n teh_rej_oil = models.FloatField(default=0, verbose_name=_('Техрежим нефти'))\n teh_rej_water = models.FloatField(default=0, verbose_name=_('Обводненность'))\n\n timestamp = models.DateField(blank=True, null=True, verbose_name=_('Дата'))\n\n class Meta:\n verbose_name = _(\"Баланс месторождении\")\n verbose_name_plural = _(\"Баланс месторождений\")\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296218040","text":"from .Fragments import Fragments\r\nfrom .Potential import *\r\nfrom .MBE_Potential import MBE_Potential\r\nfrom .Compute_Hessian import Hessian\r\nimport numpy as np\r\nimport sys, time\r\n\r\nclass Optimize:\r\n \"\"\"Simple gradient descent implementation which works by taking a potential_function\r\n and following the gradient until certain convergence criteria are met.\r\n \"\"\"\r\n def __init__(self, initial_geometry, \r\n potential_function, \r\n max_iterations=2000, \r\n max_force=10**-5, \r\n max_rms_force=10**-5, \r\n max_delta_energy=10**-9,\r\n step_size=0.5):\r\n self.initial_geometry = initial_geometry\r\n self.potential_function = potential_function\r\n\r\n self.max_iterations = max_iterations\r\n\r\n # convergence conditions\r\n self.max_force = max_force\r\n self.max_rms_force = max_rms_force\r\n self.max_delta_energy = max_delta_energy\r\n\r\n # convergence parameters\r\n self.delta_energy = 10.0\r\n self.current_max_force = 10.0\r\n self.current_rms_force = 10.0\r\n\r\n self.step_size = step_size\r\n\r\n def hybrid_method(self):\r\n geometry = self.gradient_descent(stop_early=True)\r\n self.newtons_method(starting_geometry=geometry)\r\n\r\n def newtons_method(self, starting_geometry=None):\r\n if starting_geometry is None:\r\n geometry = np.copy(self.initial_geometry.flatten())\r\n else:\r\n geometry = starting_geometry.flatten()\r\n\r\n step_size = 1.0\r\n\r\n hessian_calculator = Hessian(self.potential_function)\r\n old_energy, gradients = self.potential_function(np.reshape(geometry, self.initial_geometry.shape))\r\n for iteration in range(self.max_iterations):\r\n hessian = hessian_calculator.evaluate(geometry)\r\n inverted_hessian = np.linalg.inv(hessian)\r\n geometry -= step_size * np.dot(inverted_hessian, gradients.flatten() / 1.88973)\r\n if abs(self.delta_energy) > self.max_delta_energy or self.current_max_force > self.max_force or self.current_rms_force > self.max_rms_force:\r\n energy, gradients = self.potential_function(np.reshape(geometry, self.initial_geometry.shape))\r\n\r\n self.update_convergence_parameters(energy, old_energy, gradients)\r\n old_energy = energy\r\n print(f\"Iteration {iteration}: Energy: {energy*627.5:.6f}, ({self.delta_energy*627.5:.6f}); Max Force: {self.current_max_force:.6f}; RMS Force: {self.current_rms_force:.6f}\")\r\n else:\r\n print(\"Converged Geometry:\")\r\n geometry = np.reshape(geometry, self.initial_geometry.shape)\r\n print(geometry - np.mean(geometry, axis=0))\r\n return\r\n \r\n def gradient_descent(self, stop_early=False, stop_early_iteration=50):\r\n geometry = np.copy(self.initial_geometry)\r\n\r\n old_energy, f = self.potential_function(geometry)\r\n g = np.copy(f / 1.88973)\r\n h = np.copy(f / 1.88973)\r\n for iteration in range(self.max_iterations):\r\n old_geometry = np.copy(geometry)\r\n f_old = np.copy(f)\r\n f /= 1.88973\r\n geometry += self.step_size * f\r\n if abs(self.delta_energy) > self.max_delta_energy or self.current_max_force > self.max_force or self.current_rms_force > self.max_rms_force:\r\n energy, f_new = self.potential_function(geometry)\r\n if energy - old_energy > 0.0:\r\n geometry = old_geometry\r\n self.step_size *= 0.5\r\n f = f_old\r\n iteration -= 1\r\n continue\r\n\r\n f_new /= 1.88973\r\n gamma = np.dot(f_new.flatten(), f_new.flatten()) / np.dot(g.flatten(), g.flatten())\r\n g = f_new\r\n h = g + gamma * h\r\n f = h\r\n\r\n self.update_convergence_parameters(energy, old_energy, f)\r\n\r\n old_energy = energy\r\n self.step_size *= 1.1\r\n print(f\"Iteration {iteration}: Energy: {energy*627.5:.6f}, ({self.delta_energy*627.5:.6f}); Max Force: {self.current_max_force:.6f}; RMS Force: {self.current_rms_force:.6f}\")\r\n else:\r\n print(f\"Converged Geometry: Final Energy = {energy*627.5:.6f}\")\r\n return geometry\r\n \r\n if stop_early is True and iteration >= stop_early_iteration:\r\n print(f\"Stopped after {stop_early_iteration} iterations! Did not converge!\")\r\n return geometry\r\n\r\n print(f\"Failed to converge in {self.max_iterations} steps!\")\r\n \r\n def update_convergence_parameters(self, current_energy, old_energy, gradients):\r\n self.delta_energy = current_energy - old_energy\r\n self.current_max_force = np.amax(np.abs(gradients))\r\n self.current_rms_force = np.sqrt(np.mean(np.einsum('ij,ij->i', gradients, gradients)))\r\n\r\nif __name__ == '__main__':\r\n try:\r\n ifile = sys.argv[1]\r\n except:\r\n print(\"Didn't get an xyz file.\")\r\n sys.exit(1)\r\n \r\n fragments = Fragments(ifile)\r\n #ttm21f = TTM([\"ttm*\"], \"ttm\", \"ttm_from_f2py\", 21)\r\n mbpol = MBPol()\r\n mbe_ff = MBE_Potential(5, fragments, mbpol, return_extras=False)\r\n\r\n optimizer = Optimize(np.vstack(fragments.fragments), mbe_ff.evaluate_on_geometry)\r\n\r\n start = time.time()\r\n print(optimizer.gradient_descent())\r\n #optimizer.newtons_method()\r\n #optimizer.hybrid_method()\r\n print(time.time() - start)","sub_path":"py_MD/Gradient_Descent.py","file_name":"Gradient_Descent.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"453739316","text":"from typing import Tuple, List, Optional\n\nfrom torch.utils.data import DataLoader, RandomSampler, DistributedSampler\n\nfrom .datasets import _split_dataset, _DATASETS\n\n\ndef vision_loaders(name: str,\n batch_size: int,\n train_da: Optional[List] = None,\n test_da: Optional[List] = None,\n norm: Optional[List] = None,\n val_size: int = 0,\n download: bool = False,\n num_workers: int = -1,\n non_training_bs_factor=2,\n distributed: bool = False,\n drop_last: bool = False,\n pin_memory: bool = True,\n return_num_classes: bool = False,\n test_batch_size: Optional[int] = None\n ) -> Tuple:\n \"\"\" Get data loaders for registered vision datasets. homura expects datasets are in `~/.torch/data/DATASET_NAME`.\n Link path if necessary, e.g. `ln -s /original/path $HOME/.torch`. Datasets can be registered\n using `homura.vision.register_dataset`\n\n :param name: name of dataset.\n :param batch_size:\n :param train_da: custom train-time data augmentation\n :param test_da: custom test-time data augmentation\n :param norm: custom normalization after train_da/test_da\n :param val_size: If `val_size>0`, split train set\n :param download:\n :param num_workers:\n :param non_training_bs_factor:\n :param distributed:\n :param return_num_classes:\n :return: (train_set, test_set, [val_set], [num_classes])\n \"\"\"\n\n if name not in _DATASETS.keys():\n raise RuntimeError(f'Unknown dataset name {name}.')\n dataset = _DATASETS[name]\n train_set, test_set = dataset.instantiate(train_da, test_da, norm, download)\n if test_batch_size is None:\n test_batch_size = non_training_bs_factor * batch_size\n if val_size > 0:\n train_set, val_set = _split_dataset(train_set, val_size)\n val_set.transform = test_set.transform\n\n samplers = [None, None, None]\n if distributed:\n import homura\n\n kwargs = dict(num_replicas=homura.get_world_size(), rank=homura.get_global_rank())\n samplers[0] = DistributedSampler(train_set, **kwargs)\n samplers[2] = DistributedSampler(test_set, **kwargs)\n else:\n samplers[0] = RandomSampler(train_set, True)\n\n shared_kwargs = dict(drop_last=drop_last, num_workers=num_workers, pin_memory=pin_memory,\n collate_fn=dataset.collate_fn)\n train_loader = DataLoader(train_set, batch_size, sampler=samplers[0], **shared_kwargs)\n test_loader = DataLoader(test_set, test_batch_size, sampler=samplers[2], **shared_kwargs)\n ret = [train_loader, test_loader]\n if val_size > 0:\n if distributed:\n samplers[1] = DistributedSampler(test_set, **kwargs)\n val_loader = DataLoader(val_set, test_batch_size, sampler=samplers[1], **shared_kwargs)\n ret.append(val_loader)\n\n if return_num_classes:\n ret.append(dataset.num_classes)\n\n return tuple(ret)\n","sub_path":"homura/vision/data/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202302506","text":"from collections import OrderedDict\nfrom itertools import chain\n\nfrom django.contrib.auth import authenticate, get_user_model, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User as DjangoUser\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom registration.backends.simple.views import RegistrationView\n\nfrom .models import User\nfrom instaclone.contrib.pictures.models import Picture\nfrom instaclone.contrib.likes.models import Like\n\n\n@login_required(login_url='/login/')\ndef news_feed(request):\n user = request.user\n news_feed = User.objects.get_news_feed_items_for_user(user=user)\n pictures = {}\n request_user = User.objects.get(user=request.user)\n for user in news_feed:\n for item in user.pictures.all():\n try:\n like = Like.objects.get(user=request_user, picture=item)\n liked = True\n except Like.DoesNotExist:\n liked = False\n pictures[item] = (user, liked)\n qs = OrderedDict(sorted(pictures.items(), key=lambda instance: instance[0].time_created, reverse=True))\n context = {\n 'posts': qs,\n }\n return render(request, 'news_feed.html', context)\n\n\ndef user_profile(request, username):\n django_user = get_object_or_404(DjangoUser, username=username)\n user = get_object_or_404(User, user=django_user)\n request_user = User.objects.get(user=request.user)\n if request_user in user.followers.all():\n followed = True\n else:\n followed = False\n context = {\n 'user': user,\n 'followed': followed,\n }\n return render(request, 'user_profile.html', context)\n\n\ndef toggle_like(request):\n if request.method == 'POST':\n picture_id = request.POST['instance']\n user = User.objects.get(user=request.user)\n picture = Picture.objects.get(id=picture_id)\n like, created = Like.objects.get_or_create(picture=picture, user=user)\n if created:\n like.save()\n else:\n like.delete()\n response_data = {}\n return JsonResponse(response_data)\n\n\ndef toggle_follow(request):\n if request.method == 'POST':\n target_user = User.objects.get(user=DjangoUser.objects.get(id=request.POST['target_user']))\n user = User.objects.get(user=request.user)\n if user in target_user.followers.all():\n target_user.followers.remove(user)\n else:\n target_user.followers.add(user)\n target_user.save()\n response_data = {}\n return JsonResponse(response_data)\n\n\n@login_required\ndef logout_view(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\nclass RegistrationView():\n @staticmethod\n def get_success_view(): # redirects the user on submitting an account registration\n return \"/\"\n","sub_path":"instaclone/contrib/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"154206181","text":"__author__ = 'Pawel'\n\n#In this challenge, we are going to take a sentence and mangle it up by sorting the letters in each word.\n# So, for instance, if you take the word \"hello\" and sort the letters in it, you get \"ehllo\".\n# If you take the two words \"hello world\", and sort the letters in each word, you get \"ehllo dlorw\".\n\nsentenceSplit = input().split()\nsentenceJoin = ' '.join(sentenceSplit)\n\ndef converter(sen):\n '''\n :param sen: split string\n :return: sorted string\n '''\n word1 = []\n for x in range(len(sen)):\n word2 = []\n for y in range(len(sen[x])):\n if sen[x][y].isalpha():\n word2.append(sen[x][y].lower())\n word1.append(''.join(sorted(word2)))\n return ' '.join(word1)\n\ndef mangling(sen, con):\n '''\n :param sen: single line string\n :param con: split and sorted string\n :return: formatted string\n '''\n newSen = []\n correct = 0\n for x in range(len(sen)):\n if sen[x].isupper():\n newSen.append(con[x - correct].upper())\n elif sen[x].isalpha():\n newSen.append(con[x - correct])\n elif sen[x] == ' ':\n newSen.append(con[x - correct])\n else:\n newSen.append(sen[x])\n correct += 1\n return ''.join(newSen)\n\nprint(mangling(sentenceJoin, converter(sentenceSplit)))\n\n#Input 1 >>> Eey fo Entw, adn Eot fo Fgor, Loow fo Abt, adn Egnotu fo Dgo.\n#Input 2 >>> Adder's fkor, adn Bdilm-nors'w ginst, Adilrs'z egl, adn Ehlost'w ginw.\n#Input 3 >>> For a achmr fo eflopruw belortu, eikl a behh-llort bilo adn bbbelu.\n#\n#\n#def mangle(sentence):\n# uppercase = map(str.isupper, sentence)\n# mangled = filter(str.isalpha,\n# ''.join(map(''.join, map(sorted, sentence.lower().split())))\n# )\n# out = ''\n# for c in sentence:\n# caps = next(uppercase)\n# if c.isalpha():\n# c = next(mangled)\n# if caps: c = c.upper()\n# out += c\n# return out\n#\n#if __name__ == '__main__':\n# print(mangle(input()))\n","sub_path":"#220 [E] Mangling sentences.py","file_name":"#220 [E] Mangling sentences.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522167244","text":"from superai.apis.meta_ai.project_ai import ProjectAiApiMixin\nfrom typing import Optional\n\nimport requests\n\nfrom superai.apis.auth import AuthApiMixin\nfrom superai.apis.data import DataApiMixin\nfrom superai.apis.ground_truth import GroundTruthApiMixin\nfrom superai.apis.project import ProjectApiMixin\nfrom superai.apis.jobs import JobsApiMixin\nfrom superai.apis.data_program import DataProgramApiMixin\nfrom superai.apis.meta_ai.model import ModelApiMixin\nfrom superai.config import settings\nfrom superai.exceptions import SuperAIAuthorizationError, SuperAIEntityDuplicatedError, SuperAIError\n\nBASE_URL = settings.get(\"base_url\")\n\n\nclass Client(\n JobsApiMixin,\n AuthApiMixin,\n GroundTruthApiMixin,\n DataApiMixin,\n DataProgramApiMixin,\n ProjectApiMixin,\n ProjectAiApiMixin,\n ModelApiMixin,\n):\n def __init__(self, api_key: str = None, auth_token: str = None, id_token: str = None, base_url: str = None):\n self.api_key = api_key\n self.auth_token = auth_token\n self.id_token = id_token\n if base_url is None:\n self.base_url = BASE_URL\n else:\n self.base_url = base_url\n\n def request(\n self,\n endpoint: str,\n method: str = \"GET\",\n query_params: dict = None,\n body_params: dict = None,\n required_api_key: bool = False,\n required_auth_token: bool = False,\n required_id_token: bool = False,\n ) -> Optional[dict]:\n headers = {}\n if required_api_key and self.api_key:\n headers[\"API-KEY\"] = self.api_key\n if required_auth_token and self.auth_token:\n headers[\"AUTH-TOKEN\"] = self.auth_token\n if required_id_token and self.id_token:\n headers[\"ID-TOKEN\"] = self.id_token\n\n resp = requests.request(\n method, f\"{self.base_url}/{endpoint}\", params=query_params, json=body_params, headers=headers\n )\n try:\n resp.raise_for_status()\n if resp.status_code == 204:\n return None\n else:\n return resp.json()\n except requests.exceptions.HTTPError as http_e:\n try:\n message = http_e.response.json()[\"message\"]\n except:\n message = http_e.response.text\n\n if http_e.response.status_code == 401:\n raise SuperAIAuthorizationError(\n message, http_e.response.status_code, endpoint=f\"{self.base_url}/{endpoint}\"\n )\n elif http_e.response.status_code == 409:\n raise SuperAIEntityDuplicatedError(\n message, http_e.response.status_code, base_url=self.base_url, endpoint=endpoint\n )\n raise SuperAIError(message, http_e.response.status_code)\n","sub_path":"superai/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460886960","text":"import pymongo\n\nfrom constants import constants\nfrom game_models.players.hitter import Hitter\nfrom game_models.players.pitcher import Pitcher\nfrom game_models.players.bullpen_pitcher import BullpenPitcher\n\n\n\"\"\"\nA Team contains all data about a specific team on a specific gameday\n\"\"\"\nclass Team:\n def __init__(self, team):\n db = pymongo.MongoClient()[constants.MONGO_DATABASE]\n bullpen_collection = db[constants.MONGO_TEAM_BULLPENS_COLLECTION]\n\n team_name = team[\"team_name\"]\n self.team_name = team_name\n self.not_qualified_collection = []\n self.qualified = True\n\n self.lineup = []\n for item in team[\"lineup\"]:\n hand = item[\"hand\"]\n position = item[\"pos\"]\n name = item[\"name\"]\n\n match = find_player_from_collection(name, team_name)\n\n if match is not None:\n hitter = Hitter(match[\"player_id\"], position, hand)\n self.lineup.append(hitter)\n else:\n print(\"No hitter data match found for: \" + name)\n self.not_qualified_collection.append(name + \" (\" + position + \")\")\n self.qualified = False\n\n self.starting_pitcher = None\n name = team[\"starting_pitcher\"][\"name\"]\n hand = team[\"starting_pitcher\"][\"hand\"]\n\n self.starting_pitcher_name = name\n self.starting_pitcher_hand = hand\n\n match = find_player_from_collection(name, team_name)\n\n if match is not None:\n self.starting_pitcher = Pitcher(match[\"player_id\"], hand)\n self.starting_pitcher.hand = hand\n if not self.starting_pitcher.qualified:\n print(name + \" is not qualified for this game (see pitcher stabilization constants)\")\n self.not_qualified_collection.append(name + \" (SP)\")\n self.qualified = False\n else:\n print(\"No pitcher data match found for: \" + name)\n self.not_qualified_collection.append(name + \" (SP)\")\n self.qualified = False\n\n if self.qualified:\n self.gb_fielding_pct = average_fielding_pct(self.lineup, self.starting_pitcher, [\"1B\",\"2B\",\"3B\",\"SS\"])\n self.ld_fielding_pct = average_fielding_pct(self.lineup, None, [\"1B\",\"2B\",\"3B\",\"SS\",\"LF\",\"CF\",\"RF\"])\n self.fb_fielding_pct = average_fielding_pct(self.lineup, None, [\"LF\",\"CF\",\"RF\"])\n self.iffb_fielding_pct = average_fielding_pct(self.lineup, self.starting_pitcher, [\"1B\",\"2B\",\"3B\",\"SS\",\"C\"])\n\n bullpen = bullpen_collection.find_one({\"name\": team_name})\n if bullpen:\n self.bullpen = BullpenPitcher(bullpen)\n\n\ndef average_fielding_pct(lineup, pitcher, positions):\n total = 0\n count = 0\n for hitter in lineup:\n if hitter.position in positions and hitter.fielding_pct is not None:\n total += hitter.fielding_pct\n count += 1\n\n if pitcher and pitcher.fielding_pct is not None:\n total += pitcher.fielding_pct\n count += 1\n\n if count > 0:\n return total / count\n else:\n return 1.0\n\n\ndef find_player_from_collection(name, team):\n db = pymongo.MongoClient()[constants.MONGO_DATABASE]\n players_collection = db[constants.MONGO_PLAYERS_COLLECTION]\n\n result = players_collection.find({\"name\": name})\n\n if result.count() == 1:\n return result[0]\n else:\n for i in range(0, result.count()):\n if str(constants.CURRENT_SEASON) in result[i].keys():\n player_team = result[i][str(constants.CURRENT_SEASON)][\"team\"]\n if player_team == team or player_team == constants.MULTIPLE_TEAM:\n return result[i]\n return None\n","sub_path":"game_models/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401100541","text":"\"\"\"\nThis script contains database handler\n\"\"\"\n\nfrom traceback import print_exc\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nfrom src.services.config.config_handler import ConfigHandler\n\n\nclass DbHandler:\n \"\"\"\n Class containing database handler object.\n \"\"\"\n\n def __init__(self):\n\n # Loading infra configuration file\n _config = ConfigHandler().infra_config\n\n conn_string = (\n f\"postgresql://{_config.db.user}:{_config.db.password}@\"\n f\"{_config.db.host}:{_config.db.port}/{_config.db.database}\"\n )\n self._engine = create_engine(conn_string)\n\n @property\n def connection(self):\n import psycopg2\n \"\"\"Returns a connection to the database\"\"\"\n _config = ConfigHandler().infra_config\n return psycopg2.connect(\n host=_config.db.host,\n port=_config.db.port,\n user=_config.db.user,\n password=_config.db.password,\n database=_config.db.database,\n )\n\n def execute(self, sql):\n \"\"\"Executes a SQL query\n\n :param sql : SQL query\n \"\"\"\n import psycopg2\n connection = self.connection\n try:\n with connection.cursor() as cur:\n cur.execute(sql)\n connection.commit()\n print(sql)\n except (Exception, psycopg2.DatabaseError) as error:\n print(f\"\\033[91mError: {error}\\033[0m\")\n print_exc()\n finally:\n cur.close()\n\n def write(self, df, table_name, *args, **kwargs):\n \"\"\"Writes a DataFrame to the db\n\n :param df: pandas DataFrame to write\n :param table_name: name of the table\n \"\"\"\n df.to_sql(name=table_name, con=self._engine, *args, **kwargs)\n\n def read(self, sql, *args, **kwargs):\n \"\"\"Reads a DataFrame from a SQL query, using pd.read_sql\n\n :param sql: query to execute to retrieve the DataFrame\n :returns: the SQL table as a pandas DataFrame\n \"\"\"\n return pd.read_sql(sql, self._engine, *args, **kwargs)\n","sub_path":"src/services/db/db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"631106942","text":"from datetime import datetime, time\nfrom flask import Flask, render_template\napp = Flask(__name__)\n\n# As of 09-12-2015, the hours for Pavilion XI (The Pav) are:\n# Monday - Thursday Friday Saturday Sunday\n# 10:30 am - 8:00 pm 10:30 am - 6:00 pm Closed Closed\n#\n# Per: http://www.virginia.edu/newcomb/building-hours/\n# There is currently no way to programmatically find these\n\nOPEN = time(10, 30)\nM_TH_CLOSE = time(20, 00)\nF_CLOSE = time(18, 30)\n\n\n@app.route('/')\ndef index():\n time_now = datetime.now().time()\n day = datetime.today().weekday()\n close = False\n if day in range(0, 4):\n close = M_TH_CLOSE\n if day == 4:\n close = F_CLOSE\n\n open = is_open(time_now, day)\n meal = is_meal(time_now, day)\n\n return render_template('index.html', open=open, meal=meal, close=close)\n\n\ndef is_open(time_now, day):\n\n hours = {\n 'open': OPEN,\n 'close': time(00, 00)\n }\n if day in range(5): # Monday - Thursday\n hours['close'] = M_TH_CLOSE\n\n if day == 5: # Friday hours\n hours['close'] = F_CLOSE\n\n ret = False\n if time_now >= hours['open'] and time_now <= hours['close']:\n ret = True\n\n return ret\n\n\ndef is_meal(time_now, day):\n hours = {\n 'open': time(16, 00),\n 'close': M_TH_CLOSE\n }\n meal = False\n weekday = day not in range(4, 7)\n if time_now >= hours['open'] and time_now <= hours['close'] and weekday:\n meal = True\n\n return meal\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493648341","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nthis program is for practicing string type operator.\r\nTo use input() function and + \r\n\"\"\"\r\n\r\na = 'Please input your name : '\r\nname = input(a)\r\nhel = \"Hi ! \"+ name*3 +\", wellcom to learn Python.\"\r\nprint(hel)\r\na = \"How old are you ?\"\r\nage = input(a)\r\na = 'Are you male or femail ? '\r\nsex = input(a)\r\nprint()\r\nprint('these are your information : ' )\r\nprint(name + ' is ' + age + ' years old.\\n' + 'you are ' + sex +'.\\n')\r\n","sub_path":"string_ operator_ practice.py","file_name":"string_ operator_ practice.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625516115","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 4 21:20:00 2021\n\n@author: sazamore\n\nA wraparound boundary, demonstrated with a ghost moving left\nto right against a black background. Ghost crosses a boundary\n10 times before stopping.\n\"\"\"\n\nimport turtle\n\n# ================ LIBRARY SETTINGS SETUP =========================\nturtle.colormode(255) # accept 0-255 RGB values\nturtle.tracer(0) # turn off turtle's animation\n\npanel = turtle.Screen()\nw = 700\nh = 500\npanel.setup(width=w, height=h)\npanel.bgcolor(\"black\")\n\n# ================ VARIABLE DEFINITION & SETUP =========================\nghost = turtle.Turtle(shape=\"circle\")\nsize = 4\nrunning = True # while loop conditional\nstep = 1 # increment of ball movement (controls speed of ghost)\ncount = 0 # edge crossingcounter, to determin when to stop animating\ncrosses = 10 # number of edge crosses to stop after\n\n# import and set image as turtle shape\nghost = \"ghost.gif\" # turtle library ONLY works with gifs!\npanel.addshape(ghost) # save the image to the panel so it knows what to draw\nghost.shape(ghost) # change the turtle shape to the saved image\n\nghost.up() # we're not drawing, anymore\n\n# ================ FUNCTION DEFINITION =========================\n\n\n# ================ ANIMATION LOOP =========================\nwhile running:\n ghost.forward(step) # move ghost\n xpos = ghost.xcor() # get x position\n \n if xpos >= w/2:\n # check if it crosses the RIGHT edge\n ghost.goto(-w/2,0) # move it to the left edge\n count += 1 # keep track of the crossing\n \n if count > crosses:\n # check if we've made the intened number of crosses\n running= False \n \n panel.update() # update the window with everything drawn in a single frame\n \n# CLEANUP\nturtle.done()\n\n\n\n","sub_path":"wrapAroundBoundary.py","file_name":"wrapAroundBoundary.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"622657566","text":"import unittest\n\n# O(N) time and O(1) space\n\n\n\n# def lengthOfLastWord(string):\n# list = string.split(' ')\n# return len(list[-1]) if list else 0\n\n# def length_of_last_word(string):\n# current_length, prev_length = 0, 0\n# for char in string:\n# if char == ' ':\n# if current_length != 0:\n# prev_length = current_length\n# current_length = 0\n# else:\n# current_length += 1\n \n# return current_length if current_length != 0 else prev_length\n\n# def length_of_last_word(string):\n# result = 0\n# i = len(string) - 1\n\n# while string[i] == ' ' and i >= 0:\n# i -= 1\n \n# if i == -1:\n# # Reached the beginning of the word, and did not find any words\n# return 0\n# # result = 1\n\n# while string[i] != ' ' and i >= 0:\n# result += 1\n# i -= 1\n \n# return result\n\n\ndef length_of_last_word(string):\n i = len(string) - 1\n end = -1\n\n while i >= 0:\n if string[i] == ' ' and end != -1:\n # already found a word, and encoutered a space\n return end - i\n if string[i] != ' ' and end == -1:\n # found a letter for the first time\n end = i\n i -= 1\n \n return end + 1 if end != -1 else 0\n\nclass Test(unittest.TestCase):\n dataTrue = [('Hello World', 5), ('qwerte', 6), (' ', 0)]\n dataFalse = [('Hello World', 7), ('qwerte', 2), (' ', 3)]\n\n def test_length_of_last_word(self):\n # true check\n for test_data in self.dataTrue:\n actual = length_of_last_word(test_data[0])\n self.assertEqual(actual, test_data[1])\n \n # false check\n for test_data in self.dataFalse:\n actual = length_of_last_word(test_data[0])\n self.assertNotEqual(actual, test_data[1])\n\nif __name__ == '__main__':\n unittest.main()\n\ndef length_of_last_word(word):\n if not word:\n return None\n \n i = len(word) - 1\n end = -1\n\n while i >= 0:\n if word[i] == ' ' and end != -1:\n return end - i\n \n if word[i] != ' ' and end == -1:\n end = i\n \n i -= 1\n \n return end + 1 if end != -1 else 0","sub_path":"strings/lengthOfLastWord.py","file_name":"lengthOfLastWord.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266479140","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" Script to fetch the config of a host.\n:copyright: (c) 2019 by PolyLogyx.\n:license: MIT, see LICENSE for more details.\n\nUsage : python fetch_config.py --domain=127.0.0.1 --node_key=\n\"\"\"\n\nimport argparse\nimport json\n\nimport requests\n\n\ndef main(domain, port, node_key):\n url = \"https://\" + domain + \":\" + str(port) + \"/config\"\n headers={'Content-Type':'application/json'}\n response = requests.post(url,headers=headers, data=json.dumps({\"node_key\": node_key}), verify=False)\n print(json.dumps(response.json(), sort_keys=False, indent=4))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Config.')\n\n parser.add_argument('--domain',\n\n help='Domain/Ip of the server', required=True)\n parser.add_argument('--port',\n\n help='Port', required=False, default=9000)\n parser.add_argument('--node_key',\n\n help='Node Key', required=True)\n args = parser.parse_args()\n\n main(args.domain, args.port, args.node_key)\n","sub_path":"osquery_tls_scripts/fetch_config.py","file_name":"fetch_config.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476437120","text":"import random\r\nimport math\r\n\r\n\r\nclass Chance:\r\n def hit(self, y1, x1, y2, x2, spis, spec):\r\n bools_way = math.sqrt((abs(x1 - x2) ** 2) + (abs(y1 - y2) ** 2))\r\n math.floor(bools_way)\r\n bools_way = int(bools_way)\r\n defense = 0\r\n if y2 < y1 and (y2 + 1, x2) in spis:\r\n defense = 1\r\n elif x2 < x1 and (y2, x2 + 1) in spis:\r\n defense = 1\r\n elif x2 > x1 and (y2, x2 - 1) in spis:\r\n defense = 1\r\n elif y2 > y1 and (y2 - 1, x2) in spis:\r\n defense = 1\r\n else:\r\n defense = 0\r\n if spec == 1:\r\n chance_hit = 70 - (defense * 20) + (bools_way * 2) - random.randint(0, 10)\r\n else:\r\n chance_hit = 99 - (defense * 20) - (bools_way * 6) - random.randint(0, 10)\r\n return chance_hit\r\n","sub_path":"chance.py","file_name":"chance.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"512738725","text":"import numpy as np\nimport math\n\ndef coords_to_pixel(longitude, latitude, zoom=20):\n '''from longitude and latitude in degrees and zoom, returns equivalent in pixel coordinates'''\n x = (128/np.pi) * (2**zoom) * (np.radians(longitude) + np.pi)\n y = (128/np.pi) * (2**zoom) * (np.pi - np.log(np.tan(np.pi/4 + np.radians(latitude)/2)))\n return x, y\n\ndef pixel_to_coords(x,y,zoom=20):\n '''from pixel coordinates, returns longitude and latitude in degrees'''\n longitude = x / ((128/np.pi) * (2**zoom)) - np.pi\n latitude = 2 * np.arctan(np.exp(np.pi - y / (128 / np.pi*2**zoom))) - np.pi / 2\n return (np.degrees(longitude), np.degrees(latitude))\n\ndef bounds_coords(longitude, latitude, zoom=20, width=256, heigh=256):\n '''from longitude and latitude in degrees and zoom, returns left bottom and right top coords in pixel coordinates'''\n x = coords_to_pixel(longitude, latitude, zoom)[0]\n y = coords_to_pixel(longitude, latitude, zoom)[1]\n \n left = x - (width/2)\n right = x + (width/2)\n top = y - (heigh/2)\n bottom = y + (heigh/2)\n \n left_bottom_coords = pixel_to_coords(left, bottom, zoom)\n right_top_coords = pixel_to_coords(right, top, zoom)\n \n return [left_bottom_coords, right_top_coords]\n\ndef center_coords(longitude, latitude, zoom=20, width=256, heigh=256):\n '''from tile coords and zoom, returns center in degrees'''\n x = coords_to_pixel(longitude, latitude, zoom)[0]\n y = coords_to_pixel(longitude, latitude, zoom)[1]\n \n left = x - (width/2)\n right = x + (width/2)\n top = y - (heigh/2)\n bottom = y + (heigh/2)\n \n left_bottom_coords = pixel_to_coords(left, bottom, zoom)\n right_top_coords = pixel_to_coords(right, top, zoom)\n \n return [left_bottom_coords, right_top_coords]\n\ndef deg2num(lon_deg, lat_deg, zoom=20):\n '''from longitude and latitude in degrees and zoom, returns tile number'''\n lat_rad = math.radians(lat_deg)\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n)\n return [xtile, ytile]\n\ndef num2deg(xtile, ytile, zoom=20):\n '''from tile number, returns its West-Nord coordinate. If input xtile+0.5 and ytile+0.5, returns its center'''\n n = 2.0 ** zoom\n lon_deg = xtile / n * 360.0 - 180.0\n lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))\n lat_deg = math.degrees(lat_rad)\n return [lon_deg, lat_deg]\n\n\ndef nTiles(left_top_coord, right_bottom_coord):\n '''from coordinates, retrieves number of tiles each coord to be introduced as tuple as follows: (longitude, latitude)'''\n c1_tile = deg2num(left_top_coord[0], left_top_coord[1], zoom=20)\n c2_tile = deg2num(right_bottom_coord[0], right_bottom_coord[1], zoom=20)\n return ((c1_tile[0]-c2_tile[0])*(c1_tile[1]-c2_tile[1]))","sub_path":"src/geofunctions.py","file_name":"geofunctions.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638378110","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# numpy is used for creating fake data\n#import numpy as np \nimport matplotlib as mpl \nimport matplotlib.pyplot as plt\n\n# agg backend is used to create plot as a .png file\n#mpl.use('agg')\n\nmpl.rc('text', usetex=True)\nmpl.rc('font', family='serif')\nmpl.rcParams['text.latex.unicode']=True\n\n\nworlds_file = ['ap', 'f', 'k', 'sq']\nworlds = ['Office', 'Industrial', 'Kitchen', 'Square']\n\nresults = []\n\ndata_to_plots = []\n\n# Read data\nfor w in worlds_file:\n filename = 'results' + str(w)\n data_to_plots.append([float(line.strip()) for line in open(filename, 'r')])\n\n# Create a figure instance\nfig = plt.figure(1, figsize=(9, 6))\n\n# Create an axes instance\nax = fig.add_subplot(111)\n#ax.set_yscale(\"log\", nonposy='clip')\n#ax.set_xticklabels(worlds[0])\nax.set_xticklabels(worlds)\nax.fontsize = 20\n\n# Create the boxplots\n\nbp = ax.boxplot(data_to_plots, sym='', whis=100.0, patch_artist=True)\n\nax.text(1,max(data_to_plots[0]) + 2, 'Max: ' + str(max(data_to_plots[0])) + ' ms', ha='center')#, fontsize=15)\nax.text(2,max(data_to_plots[1]) + 2, 'Max: ' + str(max(data_to_plots[1])) + ' ms', ha='center')#, fontsize=15)\nax.text(3,max(data_to_plots[2]) + 2, 'Max: ' + str(max(data_to_plots[2])) + ' ms', ha='center')#, fontsize=15)\nax.text(4,max(data_to_plots[3]) + 2, 'Max: ' + str(max(data_to_plots[3])) + ' ms', ha='center')#, fontsize=15)\n\nfor box in bp['boxes']:\n # change outline color\n# box.set(linewidth=2)\n # change fill color\n box.set(facecolor = '#FFFFFF')\n#\nfor whisker in bp['whiskers']:\n whisker.set(color='#000000')\n#\n#for cap in bp['caps']:\n# cap.set(color='#000000', linewidth=2)\n\nfor median in bp['medians']:\n median.set(color='#000000')\n#\n#for flier in bp['fliers']:\n# flier.set(marker='o', color='#FFFFFF', alpha=0.5)\n#\n\nh = plt.ylabel(r'Planning time ($\\frac{t}{ms}$)', fontsize=15)\nax.yaxis.labelpad = 15\n#plt.show()\n\n#fig.show()\n\n#while(True):\n# pass\n\n\n# Save the figure\nfig.savefig('fig1.png', bbox_inches='tight')\n","sub_path":"doc/documentation/data/SIPP_boxplot/times.py","file_name":"times.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159044673","text":"from tkinter import *\nfrom tkinter import ttk\nimport os\nfrom PIL import Image, ImageTk \nimport plotter\nimport winsound\n\nclass ViewCorpus(Frame)\t:\n\tfileList = []\n\ttemplateList=[]\n\tspeakerList=[]\n\tsession=\"\"\n\n\ttemplateVar=None\n\tspeakerVar=None\n\tsessionVar=None\n\tselected=None\n\n\ttemplateSelect=\"\"\n\tspeakerSelect=\"\"\n\n\tdef create_list(self, listofFile):\n\t\tself.update(self.templateVar.get(), self.speakerVar.get(), self.sessionVar.get())\n\t\tpath = os.path.join(\"Data\", \"Results\", self.templateSelect, self.speakerSelect, self.session)\n\t\tprint(path)\n\t\tfileList = os.listdir(path)\n\t\tfor filename in fileList:\n\t\t\tlistofFile.delete(0, END)\n\t\tfor filename in fileList:\n\t\t\tlistofFile.insert(END, filename)\n\n\tdef update(self, templateSelect, speakerSelect, sessionSelect):\n\t\tself.templateSelect = templateSelect\n\t\tself.speakerSelect = speakerSelect\n\t\tself.session = sessionSelect\n\n\tdef __init__(self, parent, controller):\n\t\tttk.Frame.__init__(self, parent)\n\t\timagebuttonHome = ImageTk.PhotoImage(Image.open(\"home.png\"))\n\t\tbuttonHome = ttk.Button(self, style=\"parent.Option.TButton\", image=imagebuttonHome)\n\t\tbuttonHome.bind(\"\", lambda eff: controller.show_frame(\"HomePage\"))\n\t\tbuttonHome.image = imagebuttonHome \n\t\tbuttonHome.grid(row=0, columnspan=100, sticky=W)\n\n\t\tlabelTitle = ttk.Label(self, text=\"View Corpus\", style=\"parent.Important.TLabel\")\n\t\tlabelTitle.place(relx=.5, y=15, anchor=\"center\")\n\t\tself.templateVar = StringVar()\n\t\tself.speakerVar = StringVar()\n\t\tself.sessionVar = StringVar()\n\t\tself.templateVar.set(\"TemplateA\")\n\t\tself.speakerVar.set(\"U1\")\n\t\tself.sessionVar.set(\"1\")\n\t\t#make sure selectmode = multiple after the show_plot support mutiple graphs at once\n\t\tlistofFile = Listbox(self, height = 10, selectmode=MULTIPLE)\n\t\tlistofFile.bind(\"\", lambda eff: self.update(self.templateVar.get(), self.speakerVar.get(), self.sessionVar.get()))\n\t\tlistofFile.grid(row=3, column=0, columnspan = 2, rowspan=7)\n\n\t\tlabelTemplate = ttk.Label(self, text=\"Template\", style=\"parent.TLabel\")\n\t\tlabelTemplate.grid(row=1, column=0, columnspan=2)\n\n\t\tentryTemplate = Entry(self, textvariable = self.templateVar)\n\t\tentryTemplate.grid(row=2, column=0, columnspan = 2)\n\n\t\tlabelSpeaker = ttk.Label(self, text=\"Speaker\", style=\"parent.TLabel\")\n\t\tlabelSpeaker.grid(row=1, column=2, columnspan = 2)\n\n\t\tentrySpeaker = Entry(self, textvariable = self.speakerVar)\n\t\tentrySpeaker.grid(row=2, column=2, columnspan=2)\n\n\t\tlabel = ttk.Label(self, text=\"Session\", style=\"parent.TLabel\")\n\t\tlabel.grid(row=1, column=4, columnspan=2)\n\n\t\tentrySpeaker = Entry(self, textvariable = self.sessionVar)\n\t\tentrySpeaker.grid(row=2, column=4, columnspan=2)\n\n\t\tsubmitButtonImage = ImageTk.PhotoImage(Image.open(\"submit.png\"))\n\t\tsubmitButton = ttk.Button(self, image=submitButtonImage, command = lambda: self.create_list(listofFile))\n\t\tsubmitButton.image = submitButtonImage\n\t\tsubmitButton.grid(row=1, column= 6, rowspan = 2)\n\n\t\tplotWaveButton = ttk.Button(self, text=\"Wave\", style = \"parent.Small.TButton\", command = lambda: plotter.show_waveplot(self, self.templateSelect, self.speakerSelect, self.session, listofFile.curselection()[0]+1, 14, 0))\n\t\tplotWaveButton.grid(row=13, column= 0, sticky=W)\n\n\t\tplotSpecButton = ttk.Button(self, text=\"Spec\", style = \"parent.Small.TButton\", command = lambda: plotter.show_specplot(self, self.templateSelect, self.speakerSelect, self.session, listofFile.curselection()[0]+1, 14, 0))\n\t\tplotSpecButton.grid(row=13, column= 1, sticky=W)\n\n\t\tmergeWave = ttk.Button(self, text=\"Mult Wave\", style = \"parent.Small.TButton\", command = lambda: plotter.multi_waveplots(self.templateSelect, self.speakerSelect, self.session, listofFile.curselection()))\n\t\tmergeWave.grid(row=13, column=2, sticky=W)\n\n\t\tmergeSpec = ttk.Button(self, text=\"Mult Spec\", style = \"parent.Small.TButton\", command = lambda: plotter.multi_specplots(self.templateSelect, self.speakerSelect, self.session, listofFile.curselection()))\n\t\tmergeSpec.grid(row=13, column=3, sticky=W)","sub_path":"vc.py","file_name":"vc.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608383819","text":"from discord.ext import commands\nfrom asyncio import sleep\nimport os\nimport traceback\nimport discord\nimport re \n\nbot = commands.Bot(command_prefix='?')\ntoken = os.environ['DISCORD_BOT_TOKEN']\nclient = discord.Client()\n\nif not discord.opus.is_loaded():\n discord.opus.load_opus(\"heroku-buildpack-libopus\")\n \n\n\n \n@bot.command()\nasync def 全部消えちゃえ(cmd):\n if cmd.author.guild_permissions.administrator:\n await cmd.channel.purge()\n await cmd.channel.send('テキストチャンネルを灰にしてしまいました')\n else:\n await cmd.channel.send('何様のつもり?')\n \n@client.event \nasync def on_ready():\n CHANNEL_ID = '658980967876263936' \n channel = client.get_channel(CHANNEL_ID)\n await channel.send('BOT起動')\n \nbot.run(token)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422453610","text":"\"\"\"\r\n============================\r\nAuthor:virgir\r\nCreation_time:2019-12-04\r\n============================\r\n\"\"\"\r\n# coding=utf-8\r\nimport os\r\nfrom configparser import RawConfigParser\r\n\r\nimport yaml\r\n\r\nfrom script import PATH\r\n\r\n\r\nclass RW_yaml:\r\n def __init__(self, filename=None):\r\n if filename is None:\r\n self.file = os.path.join(PATH.yaml_path, 'test.yaml')\r\n else:\r\n self.file = os.path.join(PATH.yaml_path, filename)\r\n\r\n def read_yaml(self, k, v):\r\n with open(self.file, encoding='utf8') as f:\r\n data = yaml.full_load(f)\r\n return data[k][v]\r\n\r\n @staticmethod\r\n def write_yaml(file_name, data):\r\n with open(os.path.join(PATH.yaml_path, file_name), 'w', encoding='utf8')as f:\r\n yaml.dump(data, f, allow_unicode=True)\r\n\r\n\r\nYaml = RW_yaml()\r\n\r\n\r\nclass RW_conf:\r\n def __init__(self, file_name=None):\r\n if file_name is None:\r\n self.file = os.path.join(PATH.conf_path, 'test.conf')\r\n else:\r\n self.file = os.path.join(PATH.conf_path, file_name)\r\n self.conf = RawConfigParser()\r\n\r\n def read_conf(self, k, v):\r\n self.conf.read(self.file, encoding='utf8')\r\n data = self.conf[k][v]\r\n try:\r\n data = eval(data)\r\n except Exception:\r\n pass\r\n return data\r\n\r\n @staticmethod\r\n def write_conf(file_name, data):\r\n conf = RawConfigParser()\r\n with open(os.path.join(PATH.conf_path, file_name), 'w', encoding='utf8')as f:\r\n for i in data:\r\n conf[i] = data[i]\r\n conf.write(f)\r\n\r\n\r\nConf = RW_conf()\r\n\r\nif __name__ == '__main__':\r\n data = {\r\n \"mysql\": {\r\n 'port': 3306,\r\n 'host': 'api.lemonban.com',\r\n 'user': 'future',\r\n 'pwd': '123456',\r\n 'db': 'futureloan',\r\n },\r\n \"headers\": {\r\n 'header': {'X-Lemonban-Media-Type': 'lemonban.v2'},\r\n 'url': 'http://api.lemonban.com/futureloan'\r\n },\r\n 'xlsx': {\r\n 'clunm': 6,\r\n 'clunm1': 8\r\n }\r\n }\r\n Yaml.write_yaml('test.yaml', data)\r\n Conf.write_conf('test.conf', data)\r\n print(Conf.read_conf('mysql', 'user'))\r\n print(Yaml.read_yaml('headers', 'header'))\r\n","sub_path":"JK1/script/conf_yaml.py","file_name":"conf_yaml.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9794347","text":"import pygame\r\nimport random\r\nimport datetime\r\nimport csv\r\nfrom tkinter import *\r\nfrom snake import Snake\r\nfrom food import Food\r\nfrom higscore_window import HighscoreWindow\r\nfrom submit_highscore import SubmitHighscore\r\n\r\npygame.init()\r\npygame.display.set_caption(\"Snake\")\r\nFONT = pygame.font.SysFont(\"calibri\", 22)\r\nFONT_BOLD = pygame.font.SysFont(\"calibri\", 28, True)\r\nTITLE = pygame.image.load(\"graphics/title.png\")\r\nARROWS = pygame.image.load(\"graphics/arrows.png\")\r\nSPACEBAR = pygame.image.load(\"graphics/spacebar.png\")\r\nINFO = FONT_BOLD.render(\"GAME OVER\", 1, (220, 220, 220))\r\nINSTRUCTION = FONT.render(\r\n \"Click SPACE to play again or LCTRL to show highscore or ESC to quit\",\r\n 1,\r\n (220, 220, 220)\r\n )\r\n\r\n\r\nclass Game:\r\n\r\n def __init__(self):\r\n\r\n self.window = pygame.display.set_mode((1020, 480))\r\n self.snake = Snake()\r\n self.food = Food(self.snake.body)\r\n self.running = True\r\n self.pausing = True\r\n self.direction_x = 0\r\n self.direction_y = 0\r\n\r\n self.play()\r\n\r\n def draw(self):\r\n self.window.fill((0, 0, 0))\r\n pygame.draw.line(self.window, (220, 220, 220), (9, 0), (9, 480), 20)\r\n pygame.draw.line(self.window, (220, 220, 220), (9, 9), (720, 9), 20)\r\n pygame.draw.line(self.window, (220, 220, 220), (729, 0), (729, 480), 20)\r\n pygame.draw.line(self.window, (220, 220, 220), (9, 469), (720, 469), 20)\r\n # for i in range(23):\r\n # pygame.draw.line(self.window, (220, 220, 220), (20, 20 * i), (720, 20 * i), 1)\r\n # for j in range(35):\r\n # pygame.draw.line(self.window, (220, 220, 220), (20 + 20 * j, 20), (20 + 20 * j, 480), 1)\r\n self.window.blit(TITLE, ((880 - (TITLE.get_width() / 2)), 25))\r\n self.window.blit(FONT_BOLD.render(\"LENGTH:\", 1, (200, 200, 200)), (810, 200))\r\n self.window.blit(FONT_BOLD.render(f\"{self.snake.length}\", 1, (200, 200, 200)), (906, 200))\r\n self.window.blit(ARROWS, (770, 315))\r\n self.window.blit(FONT_BOLD.render(\"move\", 1, (200, 200, 200)), (920, 340))\r\n self.window.blit(SPACEBAR, (760, 410))\r\n self.window.blit(FONT_BOLD.render(\"pause\", 1, (200, 200, 200)), (920, 420))\r\n self.snake.draw(self.window)\r\n self.food.draw(self.window)\r\n pygame.display.update()\r\n\r\n def play(self):\r\n while self.running:\r\n pygame.time.Clock().tick(8)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n self.running = False\r\n elif event.key == pygame.K_SPACE:\r\n self.pause()\r\n elif event.key == pygame.K_UP:\r\n self.direction_x = 0\r\n self.direction_y = -20\r\n elif event.key == pygame.K_DOWN:\r\n self.direction_x = 0\r\n self.direction_y = 20\r\n elif event.key == pygame.K_LEFT:\r\n self.direction_x = -20\r\n self.direction_y = 0\r\n elif event.key == pygame.K_RIGHT:\r\n self.direction_x = 20\r\n self.direction_y = 0\r\n a = self.snake.move(self.direction_x, self.direction_y, self.food.position)\r\n if a == \"new food\":\r\n if self.snake.enough_space():\r\n self.food.create_new_food_piece(self.snake.body)\r\n else:\r\n self.game_over()\r\n if a == \"game over\":\r\n self.game_over()\r\n self.draw()\r\n\r\n def pause(self):\r\n self.draw()\r\n self.window.blit(FONT_BOLD.render(\"PAUSED\", 1, (220, 220, 220)), (310, 225))\r\n pygame.display.update()\r\n while self.running:\r\n pygame.time.Clock().tick(8)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n self.play()\r\n\r\n def game_over(self):\r\n self.window.blit(INFO, (370 - INFO.get_width() / 2, 180))\r\n pygame.display.update()\r\n pygame.time.delay(200)\r\n submit = SubmitHighscore()\r\n submit.set_length(self.snake.length)\r\n submit.show()\r\n self.window.blit(INSTRUCTION, (370 - INSTRUCTION.get_width() / 2, 250))\r\n pygame.display.update()\r\n while self.running:\r\n pygame.time.Clock().tick(8)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n quit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n quit()\r\n elif event.key == pygame.K_LCTRL:\r\n leaderboard = HighscoreWindow()\r\n leaderboard.load_data()\r\n leaderboard.show()\r\n elif event.key == pygame.K_SPACE:\r\n self.__init__()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312386528","text":"import os\nimport json\nimport time\nimport requests\nimport urllib.parse\nimport pandas as pd\n\n\ndef get_bearer_token():\n return '<< SECRET KEY >>'\n\n\ndef create_url(p_next_token):\n tweet_fields = \"tweet.fields=created_at\"\n # Tweet fields are adjustable.\n # Options include:\n # attachments, author_id, context_annotations,\n # conversation_id, created_at, entities, geo, id,\n # in_reply_to_user_id, lang, non_public_metrics, organic_metrics,\n # possibly_sensitive, promoted_metrics, public_metrics, referenced_tweets,\n # source, text, and withheld\n \n query_params = f\"query={urllib.parse.quote('bookkeeping lang:en -is:retweet')}\"\n if len(p_next_token) == 0:\n next_param = \"\"\n else:\n next_param = f\"&next_token={p_next_token}\"\n return f\"https://api.twitter.com/2/tweets/search/recent?{query_params}&{tweet_fields}&max_results=100{next_param}\"\n\n\ndef create_headers(bearer_token):\n return {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n\n\ndef connect_to_endpoint(p_url, p_headers):\n response = requests.request(\"GET\", p_url, headers=p_headers)\n if response.status_code != 200:\n raise Exception(response.status_code, response.text)\n print(\"Error\")\n return response.json()\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n next_token = \"\"\n data_df = None\n file_name = \"../Bookkeeping_Tweets_2021-06-04.csv\"\n\n while True:\n url = create_url(next_token)\n headers = create_headers(get_bearer_token())\n json_response = connect_to_endpoint(url, headers)\n \n if data_df is None:\n data_df = pd.DataFrame(json_response[\"data\"])\n else:\n data_df = pd.concat([data_df, pd.DataFrame(json_response[\"data\"])])\n \n print(f\"Data retrieved: {len(data_df)}\")\n \n if (len(data_df) >= 1000):\n break\n elif (\"next_token\" in json_response[\"meta\"].keys()):\n next_token = json_response[\"meta\"][\"next_token\"]\n else:\n break\n \n print(f\"Finished! Total data retrieved: {len(data_df)}\")\n\n if (os.path.exists(f\"./{file_name}\")):\n previous_df = pd.read_csv(file_name, index_col=0, dtype={\"created_at\":object, \"id\":object, \"text\":object})\n new_df = pd.concat([previous_df, data_df]).reset_index()[[\"created_at\", \"id\", \"text\"]]\n else:\n new_df = data_df\n\n new_df.drop_duplicates(ignore_index=True, inplace=True)\n new_df.to_csv(file_name)\n\n print(f\"\\nFinal concat file: {len(new_df)}\")\n","sub_path":"TweetScrapping/bookkeeping_tweets.py","file_name":"bookkeeping_tweets.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104248077","text":"\"\"\"\nDjango settings for armadillo_reuse project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'sfldrzlb)9iibc$-d6z%)cp4k)m!_0-%+5a%!6x@x370eid+3!'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'south',\n 'web_api'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'armadillo_reuse.urls'\n\nWSGI_APPLICATION = 'armadillo_reuse.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nALL_DATABASES = {\n 'stable': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'server_stable',\n 'USER': 'web_stable',\n 'PASSWORD': 'web_stable'\n },\n 'unstable': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'server_unstable',\n 'USER': 'web_unstable',\n 'PASSWORD': 'web_unstable'\n },\n 'local_test': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n },\n}\n\nDATABASES = {}\n\n# Define DATABASES dynamically\n\nimport socket\n\n# Force use of the development database for local versions\nif socket.gethostname() is not \"armadillo\":\n DATABASES[\"default\"] = ALL_DATABASES[\"local_test\"]\nelse:\n from subprocess import check_output\n branches = check_output([\"git\",\"branch\"]).strip().split(\"\\n\")\n branch = next([x for x in branches if x[0]==\"*\"])\n if branch is \"stable\":\n DATABASES[\"default\"] = ALL_DATABASES[\"master\"]\n else:\n DATABASES[\"default\"] = ALL_DATABASES[\"dev\"]\n \n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n","sub_path":"armadillo_reuse/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97590130","text":"# Program that implements the pseudocode below:\n# For i = 10, 20, 30,... ,250:\n# Walk forward i steps\n# Turn left 90 degrees\n\nimport turtle\ntheodoreRoosevelt = turtle.Turtle()\n\nfor i in range (250, 0, -10):\n theodoreRoosevelt.forward(i)\n theodoreRoosevelt.left(90)\n","sub_path":"blockSpiral.py","file_name":"blockSpiral.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51693033","text":"import random\nimport time\n\ndef quickSelect(array, k):\n p = array[(len(array) // 2)]\n part1 = [i for i in array if i < p]\n part2 = [i for i in array if i > p]\n p1 = len(part1)\n r = len(array) - len(part1) - len(part2)\n if k >= p1 and k < p1 + r:\n return p\n elif p1 > k:\n return quickSelect(part1, k)\n else:\n return quickSelect(part2, k - p1 - r)\n\nsearch_time = []\ntime_array = []\n\nfor i in range(1000, 1000000, 10000):\n test_array = [random.randint(1,500) for _ in range(i)]\n for k in range(1,10):\n start_time = time.time()\n res = quickSelect(test_array, 20)\n res_time = time.time() - start_time\n time_array.append(res_time)\n res_time = sum(time_array) / float(len(time_array)) \n search_time.append(res_time)\n\nprint(search_time)","sub_path":"hw2/hw2-3.py","file_name":"hw2-3.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301877587","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 27 15:58:13 2013\n\nEsqueleto de agente usando los servicios web de Flask\n\n/comm es la entrada para la recepcion de mensajes del agente\n/Stop es la entrada que para el agente\n\nTiene una funcion AgentBehavior1 que se lanza como un thread concurrente\n\nAsume que el agente de registro esta en el puerto 9000\n\n@author: javier\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom multiprocessing import Process, Queue\nimport socket\n\nfrom rdflib import Namespace, Graph, logger, RDF, URIRef\nfrom flask import Flask, request\n\nfrom PlanificadorViajes.ecsdi_modules.ACLMessages import build_message, get_message_properties, get_agent_info, send_message, \\\n register_agent\nfrom PlanificadorViajes.AgentUtil.FlaskServer import shutdown_server\nfrom PlanificadorViajes.AgentUtil.Agent import Agent\nfrom PlanificadorViajes.ecsdi_modules.OntologyNamespaces import ACL\nfrom PlanificadorViajes.ecsdi_modules.OntologyNamespaces import Ontologia\n\n__author__ = 'Amazon V2'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--open', help=\"Define si el servidor est abierto al exterior o no\", action='store_true',\n default=False)\nparser.add_argument('--port', type=int, help=\"Puerto de comunicacion del agente\")\nparser.add_argument('--dhost', default=socket.gethostname(), help=\"Host del agente de directorio\")\nparser.add_argument('--dport', type=int, help=\"Puerto de comunicacion del agente de directorio\")\nparser.add_argument('--host', default=socket.gethostname(), help=\"Host del agente\")\n\n\nargs = parser.parse_args()\n\n# Configuration stuff\n\nif args.port is None:\n port = 9050\nelse:\n port = args.port\n\nif args.open is None:\n hostname = '0.0.0.0'\nelse:\n hostname = args.host\n\nif args.dport is None:\n dport = 9000\nelse:\n dport = args.dport\n\nif args.dhost is None:\n dhostname = socket.gethostname()\nelse:\n dhostname = args.dhost\nhostname = socket.gethostname()\nport = 9050\n\nagn = Namespace(\"http://www.agentes.org#\")\n\n# Contador de mensajes\nmessages_cnt = 0\n\n# Datos del Agente\n\nAgenteDevoluciones = Agent('AgenteDevoluciones',\n agn.AgenteDevoluciones,\n 'http://%s:%d/comm' % (hostname, port),\n 'http://%s:%d/Stop' % (hostname, port))\n\n# Directory agent address\nDirectoryAgent = Agent('DirectoryAgent',\n agn.Directory,\n 'http://%s:%d/Register' % (dhostname, dport),\n 'http://%s:%d/Stop' % (dhostname, dport))\n\n# Global triplestore graph\ndsgraph = Graph()\n\ncola1 = Queue()\n\n# Flask stuff\napp = Flask(__name__, template_folder='../templates')\n\n\ndef get_count():\n global messages_cnt\n messages_cnt += 1\n return messages_cnt\n\ndef register():\n gr = register_agent(AgenteDevoluciones, DirectoryAgent, AgenteDevoluciones.uri, get_count())\n pass\n\n\n@app.route(\"/comm\")\ndef comunicacion():\n global dsgraph\n global mss_cnt\n gr = None\n logger.info('Peticion de info recibida')\n\n # Extraemos el mensaje que nos envian\n mensaje = request.args['content']\n gm = Graph()\n gm.parse(data=mensaje)\n\n msgdic = get_message_properties(gm)\n\n # Comprobacion del mensaje\n\n if msgdic is None:\n gr = build_message(Graph(), ACL['no_entendido'], sender=AgenteDevoluciones.uri, msgcnt=get_count())\n else:\n performative = msgdic['performative']\n\n if performative != ACL.request:\n gr = build_message(Graph(), ACL['no_entendido'], sender=AgenteDevoluciones.uri, msgcnt=get_count())\n\n else:\n\n content = msgdic['content']\n accion = gm.value(subject=content, predicate=RDF.type)\n # peticion de devolucion\n if accion == Ontologia.Peticion_retorno:\n logger.info(\"Recibida una peticion de retorno en AgenteDevoluciones\")\n\n for item in gm.subjects(RDF.type, ACL.FipaAclMessage):\n gm.remove((item, None, None))\n\n venta = []\n for item in gm.objects(subject=content, predicate=Ontologia.CompraRetornada):\n venta.append(item)\n\n payDelivery()\n\n gm.remove((content, None, None))\n gr = returnSell(gm, venta)\n\n\n else:\n gr = build_message(Graph(),\n ACL['not-understood'],\n sender=DirectoryAgent.uri,\n msgcnt=get_count())\n\n logger.info('Respondemos a la peticion')\n\n serialize = gr.serialize(format='xml')\n return serialize, 200\n\n\n@app.route(\"/Stop\")\ndef stop():\n \"\"\"\n Entrypoint que para el agente\n\n :return:\n \"\"\"\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"\n\n\ndef payDelivery():\n logger.info('Transferencia aceptada')\n pass\n\n\ndef returnSell(gm, sell):\n content = Ontologia['Recoger_devolucion_' + str(get_count())]\n\n gm.add((content, RDF.type, Ontologia.Recoger_devolucion))\n for item in sell:\n gm.add((content, Ontologia.compra_a_devolver, URIRef(item)))\n\n logistic = get_agent_info(agn.AgenteCentroLogistico, DirectoryAgent, AgenteDevoluciones, get_count())\n\n gr = send_message(\n build_message(gm, perf=ACL.request, sender=AgenteDevoluciones.uri, receiver=logistic.uri,\n msgcnt=get_count(),\n content=content), logistic.address)\n return gr\n\n\ndef tidyup():\n \"\"\"\n Acciones previas a parar el agente\n\n \"\"\"\n pass\n\n\ndef agentbehavior1():\n \"\"\"\n Un comportamiento del agente\n\n :return:\n \"\"\"\n gr = register()\n pass\n\n\nif __name__ == '__main__':\n # Ponemos en marcha los behaviors\n ab1 = Process(target=agentbehavior1, args=())\n ab1.start()\n\n # Ponemos en marcha el servidor\n app.run(host=hostname, port=port)\n\n # Esperamos a que acaben los behaviors\n ab1.join()\n print('The End')\n","sub_path":"PlanificadorViajes/Agentes/trash/AgenteDevoluciones.py","file_name":"AgenteDevoluciones.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118256532","text":"#!/usr/bin/env python\n\nfrom os.path import join\n\ndef configuration(parent_package='',top_path=None):\n\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration(None, parent_package, top_path)\n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('odespy')\n config.get_version(join('odespy', 'version.py'))\n\n return config\n\nif __name__ == '__main__':\n\n from numpy.distutils.core import setup\n #setup(**configuration(top_path='').todict())\n setup(\n name='odespy',\n url='...',\n download_url='...',\n license='GPL',\n author='Liwei Wang and Hans Petter Langtangen',\n author_email='hpl@simula.no',\n configuration=configuration)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"85054774","text":"\n'''\nCommand-line interface to HAVerifier\n'''\n\nimport logging\nimport os\nimport sys\n\nfrom pkg_resources import get_distribution\nfrom argparse import RawDescriptionHelpFormatter\nfrom oslo_config import cfg\n\nfrom haverifier.cmd.commands import task\n\nCONF = cfg.CONF\ncli_opts = [\n cfg.BoolOpt('debug',\n short='d',\n default=False,\n help='increase output verbosity to debug'),\n cfg.BoolOpt('verbose',\n short='v',\n default=False,\n help='increase output verbosity to info')\n]\nCONF.register_cli_opts(cli_opts)\n\nCONFIG_SEARCH_PATHS = [sys.prefix + \"/etc/haverifier\",\n \"~/.heverifier\",\n \"/etc/haverifier\"]\n\n\ndef find_config_files(path_list):\n for path in path_list:\n abspath = os.path.abspath(os.path.expanduser(path))\n confname = abspath + \"/haverifier.conf\"\n if os.path.isfile(confname):\n return [confname]\n\n return None\n\n\nclass HAVerifierCLI():\n '''Command-line interface to HAVerifier'''\n\n # Command categories\n categories = {\n 'task': task.TaskCommands,\n }\n\n def __init__(self):\n self._version = 'haverifier version %s ' % \\\n get_distribution('haverifier').version\n\n def _find_actions(self, subparsers, actions_module):\n '''find action methods'''\n # Find action methods inside actions_module and\n # add them to the command parser.\n # The 'actions_module' argument may be a class\n # or module. Action methods start with 'do_'\n for attr in (a for a in dir(actions_module) if a.startswith('do_')):\n command = attr[3:].replace('_', '-')\n callback = getattr(actions_module, attr)\n desc = callback.__doc__ or ''\n arguments = getattr(callback, 'arguments', [])\n subparser = subparsers.add_parser(\n command,\n description=desc\n )\n for (args, kwargs) in arguments:\n subparser.add_argument(*args, **kwargs)\n subparser.set_defaults(func=callback)\n\n def _add_command_parsers(self, categories, subparsers):\n '''add commands to command-line parser'''\n for category in categories:\n command_object = categories[category]()\n desc = command_object.__doc__ or ''\n subparser = subparsers.add_parser(\n category, description=desc,\n formatter_class=RawDescriptionHelpFormatter\n )\n subparser.set_defaults(command_object=command_object)\n cmd_subparsers = subparser.add_subparsers(title='subcommands')\n self._find_actions(cmd_subparsers, command_object)\n\n def main(self, argv):\n '''run the command line interface'''\n\n # register subcommands to parse additional command line arguments\n def parser(subparsers):\n self._add_command_parsers(HAVerifierCLI.categories, subparsers)\n\n category_opt = cfg.SubCommandOpt(\"category\",\n title=\"Command categories\",\n help=\"Available categories\",\n handler=parser)\n CONF.register_cli_opt(category_opt)\n\n # load CLI args and config files\n CONF(argv, project=\"haverifier\", version=self._version,\n default_config_files=find_config_files(CONFIG_SEARCH_PATHS))\n\n # handle global opts\n logger = logging.getLogger('haverifier')\n logger.setLevel(logging.WARNING)\n\n if CONF.verbose:\n logger.setLevel(logging.INFO)\n\n if CONF.debug:\n logger.setLevel(logging.DEBUG)\n\n # dispatch to category parser\n func = CONF.category.func\n func(CONF.category)\n","sub_path":"build/lib/haverifier/cmd/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25818169","text":"# on a pour départ, un chiffre aléatoire qui est choisi\n# aprés, le jeu nous demande de taper un nombre entre 0 et 100\n# si la réponse est inférieur au nombre aléatoire \n# il doit afficher \"le nombre le plus grand\"\n# si la réponse est supérieur au nombre aléatoire\n# il doit afficher \"le nombre est plus petit\"\n# et sinon, vous avez gagné\n\n# ici, on importe une librairie qui contient\n# des fonctions aléatoires\nimport random\n\n# initialise nos variables nbAléatoire et trouver\n# nb aléatoire sera générer par l'ordinateur \n# trouver est un boolean qui est égale à Faux\nnbAléatoire = random.randrange(0,100)\ntrouver = False\n\n# Tant que trouver est égale à Faux, on continue\nwhile trouver == False:\n print(\"tape un nombre entre 0 et 100\")\n # on convertit le nombre tape au clavier en int \n réponse = int( input() )\n\n # on compare notre réponse avec le nb aléatoire\n if réponse < nbAléatoire:\n print (\"le nombre est plus grand\")\n elif réponse > nbAléatoire :\n print (\"le nombre est plus petit\")\n else :\n print (\"vous avez gagné !\")\n # on passe trouver à vrai pour arrêter la boucle\n trouver = True","sub_path":"00_algorithmique/01_python/exercices/jeux1.py","file_name":"jeux1.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"43064111","text":"# tests.test_classifier.test_threshold\n# Ensure that the discrimination threshold visualizations work.\n#\n# Author: Nathan Danielsen \n# Author: Benjamin Bengfort \n# Created: Wed April 26 20:17:29 2017 -0700\n#\n# Copyright (C) 2017 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: test_threshold.py [] nathan.danielsen@gmail.com $\n\n\"\"\"\nEnsure that the DiscriminationThreshold visualizations work.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport sys\nimport pytest\nimport yellowbrick as yb\nimport matplotlib.pyplot as plt\n\nfrom yellowbrick.classifier.threshold import *\nfrom yellowbrick.utils import is_probabilistic, is_classifier\n\nfrom unittest.mock import patch\nfrom tests.base import VisualTestCase\nfrom tests.dataset import DatasetMixin\nfrom numpy.testing.utils import assert_array_equal\n\nfrom sklearn.svm import LinearSVC, NuSVC\nfrom sklearn.datasets import make_classification\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import RadiusNeighborsClassifier\nfrom sklearn.naive_bayes import BernoulliNB, GaussianNB\nfrom sklearn.linear_model import Ridge, LogisticRegression\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\n\n##########################################################################\n## DiscriminationThreshold Test Cases\n##########################################################################\n\nclass TestDiscriminationThreshold(VisualTestCase, DatasetMixin):\n \"\"\"\n DiscriminationThreshold visualizer tests\n \"\"\"\n\n @pytest.mark.xfail(\n sys.platform == 'win32', reason=\"images not close on windows\"\n )\n def test_binary_discrimination_threshold(self):\n \"\"\"\n Correctly generates viz for binary classification with BernoulliNB\n \"\"\"\n X, y = make_classification(\n n_samples=400, n_features=20, n_informative=8, n_redundant=8,\n n_classes=2, n_clusters_per_class=4, random_state=854\n )\n\n _, ax = plt.subplots()\n\n model = BernoulliNB(3)\n visualizer = DiscriminationThreshold(model, ax=ax, random_state=23)\n\n visualizer.fit(X, y)\n visualizer.poof()\n\n self.assert_images_similar(visualizer)\n\n def test_multiclass_discrimination_threshold(self):\n \"\"\"\n Assert exception is raised in multiclass case.\n \"\"\"\n X, y = make_classification(\n n_samples=400, n_features=20, n_informative=8, n_redundant=8,\n n_classes=3, n_clusters_per_class=4, random_state=854\n )\n\n visualizer = DiscriminationThreshold(GaussianNB(), random_state=23)\n msg = \"multiclass format is not supported\"\n\n with pytest.raises(ValueError, match=msg):\n visualizer.fit(X, y)\n\n @pytest.mark.xfail(\n sys.platform == 'win32', reason=\"images not close on windows\"\n )\n @pytest.mark.skipif(pd is None, reason=\"test requires pandas\")\n def test_pandas_integration(self):\n \"\"\"\n Test with Pandas DataFrame and Series input\n \"\"\"\n _, ax = plt.subplots()\n\n # Load the occupancy dataset from fixtures\n data = self.load_data('occupancy')\n target = 'occupancy'\n features = [\n \"temperature\", \"relative_humidity\", \"light\", \"C02\", \"humidity\"\n ]\n\n # Create instances and target\n X = pd.DataFrame(data[features])\n y = pd.Series(data[target].astype(int))\n\n classes = ['unoccupied', 'occupied']\n\n # Create the visualizer\n viz = DiscriminationThreshold(\n LogisticRegression(), ax=ax, classes=classes, random_state=193\n )\n viz.fit(X, y)\n viz.poof()\n\n self.assert_images_similar(viz, tol=0.1)\n\n def test_quick_method(self):\n \"\"\"\n Test for thresholdviz quick method with random dataset\n \"\"\"\n\n X, y = make_classification(\n n_samples=400, n_features=20, n_informative=8, n_redundant=8,\n n_classes=2, n_clusters_per_class=4, random_state=2721\n )\n\n _, ax = plt.subplots()\n\n discrimination_threshold(BernoulliNB(3), X, y, ax=ax, random_state=5)\n self.assert_images_similar(ax=ax, tol=10)\n\n @patch.object(DiscriminationThreshold, 'draw', autospec=True)\n def test_fit(self, mock_draw):\n \"\"\"\n Test the fit method generates scores, calls draw, and returns self\n \"\"\"\n X, y = make_classification(\n n_samples=400, n_features=20, n_informative=8, n_redundant=8,\n n_classes=2, n_clusters_per_class=4, random_state=1221\n )\n\n visualizer = DiscriminationThreshold(BernoulliNB())\n assert not hasattr(visualizer, \"thresholds_\")\n assert not hasattr(visualizer, \"cv_scores_\")\n\n out = visualizer.fit(X, y)\n\n assert out is visualizer\n mock_draw.assert_called_once()\n assert hasattr(visualizer, \"thresholds_\")\n assert hasattr(visualizer, \"cv_scores_\")\n\n for metric in METRICS:\n assert metric in visualizer.cv_scores_\n assert \"{}_lower\".format(metric) in visualizer.cv_scores_\n assert \"{}_upper\".format(metric) in visualizer.cv_scores_\n\n @pytest.mark.xfail(\n sys.platform == 'win32', reason=\"images not close on windows\"\n )\n def test_binary_discrimination_threshold_alt_args(self):\n \"\"\"\n Correctly generates visualization with alternate arguments\n \"\"\"\n X, y = make_classification(\n n_samples=400, n_features=20, n_informative=10, n_redundant=3,\n n_classes=2, n_clusters_per_class=4, random_state=1231,\n flip_y=0.1, weights=[0.35, 0.65],\n )\n\n exclude = [\"queue_rate\", \"fscore\"]\n cv = StratifiedShuffleSplit(n_splits=1, test_size=0.2)\n visualizer = DiscriminationThreshold(\n NuSVC(), exclude=exclude, cv=cv, random_state=98239\n )\n\n visualizer.fit(X, y)\n visualizer.poof()\n\n for metric in exclude:\n assert metric not in visualizer.cv_scores_\n assert \"{}_lower\".format(metric) not in visualizer.cv_scores_\n assert \"{}_upper\".format(metric) not in visualizer.cv_scores_\n\n self.assert_images_similar(visualizer)\n\n def test_threshold_default_initialization(self):\n \"\"\"\n Test initialization default parameters\n \"\"\"\n model = BernoulliNB(3)\n viz = DiscriminationThreshold(model)\n assert viz.estimator is model\n assert viz.color is None\n assert viz.title is None\n assert viz.n_trials == 50\n assert viz.cv == 0.1\n assert_array_equal(viz.quantiles, np.array((0.1, 0.5, 0.9)))\n\n def test_requires_classifier(self):\n \"\"\"\n Assert requires a classifier\n \"\"\"\n message = \"requires a probabilistic binary classifier\"\n assert not is_classifier(Ridge)\n\n with pytest.raises(yb.exceptions.YellowbrickError, match=message):\n DiscriminationThreshold(Ridge())\n\n def test_requires_probabilistic_classifier(self):\n \"\"\"\n Assert requires probabilistic classifier\n \"\"\"\n message = \"requires a probabilistic binary classifier\"\n assert is_classifier(RadiusNeighborsClassifier)\n assert not is_probabilistic(RadiusNeighborsClassifier)\n\n with pytest.raises(yb.exceptions.YellowbrickError, match=message):\n DiscriminationThreshold(RadiusNeighborsClassifier())\n\n def test_accepts_predict_proba(self):\n \"\"\"\n Will accept classifiers with predict proba function\n \"\"\"\n model = RandomForestClassifier\n assert is_classifier(model)\n assert is_probabilistic(model)\n assert not hasattr(model, \"decision_function\")\n assert hasattr(model, \"predict_proba\")\n\n try:\n DiscriminationThreshold(model())\n except YellowbrickTypeError:\n pytest.fail(\"did not accept decision function model\")\n\n def test_accepts_decision_function(self):\n \"\"\"\n Will accept classifiers with decision function\n \"\"\"\n model = LinearSVC\n assert is_classifier(model)\n assert is_probabilistic(model)\n assert hasattr(model, \"decision_function\")\n assert not hasattr(model, \"predict_proba\")\n\n try:\n DiscriminationThreshold(model())\n except YellowbrickTypeError:\n pytest.fail(\"did not accept decision function model\")\n\n def test_bad_quantiles(self):\n \"\"\"\n Assert exception is raised when bad quantiles are passed in.\n \"\"\"\n msg = (\n \"quantiles must be a sequence of three \"\n \"monotonically increasing values less than 1\"\n )\n\n with pytest.raises(YellowbrickValueError, match=msg):\n DiscriminationThreshold(NuSVC(), quantiles=[0.25, 0.1, 0.75])\n\n def test_bad_cv(self):\n \"\"\"\n Assert an exception is raised when a bad cv value is passed in\n \"\"\"\n with pytest.raises(YellowbrickValueError, match=\"not a valid cv splitter\"):\n DiscriminationThreshold(NuSVC(), cv=\"foo\")\n\n def test_splitter_random_state(self):\n \"\"\"\n Test splitter random state is modified\n \"\"\"\n viz = DiscriminationThreshold(NuSVC(), random_state=None)\n assert viz._check_cv(None, random_state=None).random_state is None\n assert viz._check_cv(None, random_state=42).random_state == 42\n\n splits = StratifiedShuffleSplit(n_splits=1, random_state=None)\n assert viz._check_cv(splits, random_state=None).random_state is None\n assert viz._check_cv(splits, random_state=23).random_state == 23\n\n splits = StratifiedShuffleSplit(n_splits=1, random_state=181)\n assert viz._check_cv(splits, random_state=None).random_state == 181\n assert viz._check_cv(splits, random_state=72).random_state == 72\n\n def test_bad_exclude(self):\n \"\"\"\n Assert an exception is raised on bad exclude param\n \"\"\"\n with pytest.raises(YellowbrickValueError, match=\"not a valid metric\"):\n DiscriminationThreshold(NuSVC(), exclude=\"foo\")\n\n with pytest.raises(YellowbrickValueError, match=\"not a valid metric\"):\n DiscriminationThreshold(NuSVC(), exclude=[\"queue_rate\", \"foo\"])\n","sub_path":"tests/test_classifier/test_threshold.py","file_name":"test_threshold.py","file_ext":"py","file_size_in_byte":10492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401922673","text":"# ===========================================================================\n#\n# Copyright (c) 2013 Qualcomm Technologies Incorporated. \n# All Rights Reserved.\n# QUALCOMM Proprietary and Confidential.\n#\n# ===========================================================================\n\n\nfrom __future__ import print_function\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport os\nimport itertools\nfrom binascii import hexlify\nfrom locators.core_dump import locate as locate_core_dump\nfrom dwarf import decode_object, Structure\nfrom dwarf import Array as darray\nfrom hansei_utils import *\n\ndef dump(dump_path, memory, debug_info):\n address = locate_core_dump(memory, debug_info)\n dump_type = debug_info.variables['rpm_core_dump'].vartype\n rpm_core_dump = decode_object('rpm_core_dump', address, dump_type, memory, debug_info)\n\n #import pdb; pdb.set_trace()\n rpm = cast(rpm_core_dump.rpmserver_state, 'SystemData', memory, debug_info)\n\n #save_logger_level = update_logger(logging.DEBUG, logging)\n dump_mpm(dump_path, rpm, memory, debug_info)\n #update_logger(save_logger_level, logging)\n\ndef dump_mpm(dump_path, rpm, memory, debug_info):\n mpm_file_name = os.path.join(dump_path, 'mpm.txt')\n with open(mpm_file_name, 'w') as mpm_file:\n \n print(\"\\n ~~MPM Register Dump~~\", file=mpm_file)\n \n try:\n mpm_type = debug_info.variables['rpm_mpm_registers'].die\n mpm_address = debug_info.variables['rpm_mpm_registers'].address\n except:\n print(\"Failed to find mpm register dump variable\", file=mpm_file)\n return\n \n # This will break if mpm_register type ever changes...\n mpm_type2 = debug_info.types['uint32_t']\n mpm_array = darray('mpm_registers', mpm_address, mpm_type2, 50, memory, debug_info)\n \n '''\n mpm_reg_0 = decode_object('rpm_mpm_registers', mpm_address, None, memory, debug_info, die=mpm_type2)\n test = memory.read(mpm_address+(0*4), 4)\n test[3].encode('hex')+test[2].encode('hex')+test[1].encode('hex')+test[0].encode('hex')\n memory.read(mpm_address+(x*4), 4)[3].encode('hex')+memory.read(mpm_address+(x*4), 4)[2].encode('hex')+memory.read(mpm_address+(x*4), 4)[1].encode('hex')+memory.read(mpm_address+(x*4), 4)[0].encode('hex')\n mpm_regs = decode_object('rpm_mpm_registers', mpm_address, None, memory, debug_info, die=mpm_type)\n '''\n \n \n for reg_num in range(50):\n reg = mpm_array[reg_num]\n print(\"\\tREGISTER[%02.0u] = 0x%0.8x\\n\" % (reg_num, reg), file=mpm_file)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"rpm_proc/core/bsp/rpm/scripts/hansei/dumpers/mpm.py","file_name":"mpm.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"251793761","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime as zdatetime\nfrom email import utils\n\n\n\"\"\" Create + append a console-line message to 'logger.log' in the pwd. \"\"\"\n\nclass EzLog():\n\n def __init__(self, message = ''):\n \"\"\" Taking care of beez-nice \"\"\"\n self.LFORMAT = '%Y/%m/%d: %H.%M.%S (LOCAL)'\n self.UFORMAT = '%Y/%m/%d: %H.%M.%S [%z]'\n self._hack(message)\n\n def _hack(self, message):\n znow = zdatetime.now()\n znow = utils.localtime(znow)\n self.local_date = znow.strftime(format=self.LFORMAT)\n self.message = str(message)\n\n def __str__(self):\n return self.local_date + \"\\t\" + self.message + \"\\n\"\n\n def hack(self, message):\n \"\"\" Update the time, as well as the message \"\"\"\n self._hack(message)\n\n def is_null(self):\n return len(self.message) == 0\n\n @classmethod\n def Create(recipe):\n ''' Extract the log message from the command line, if present.'''\n import sys\n words = sys.argv[1:]\n message = ''\n for word in words:\n if len(message) is not 0:\n message += ' '\n message += word\n return recipe(message)\n\n\nif __name__ == '__main__':\n entry = EzLog.Create()\n with open(\"./logger.log\", \"a\") as fp:\n if entry.is_null():\n entry.message = \"This is a test\"\n fp.write(str(entry))\n\n","sub_path":"ezlog.py","file_name":"ezlog.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238587381","text":"#-*-coding:utf-8-*-\nimport os,sys\nos.getcwd()\nsys.path.append(os.getcwd())\nimport threading\nimport time\n\n\ndef a(file,times):\n for i in range(times):\n print('a %s---%s'%(file,time.ctime()))\n time.sleep(2)\n\ndef b(file ,times):\n for i in range(times):\n print('b %s---%s'%(file,time.ctime()))\n time.sleep(2)\n#创建线程组,用于装载线程\nthreads=[]\n#创建线程a1,并添加到线程组\na1=threading.Thread(target=a,args=(u'大连必胜',2))\nthreads.append(a1)\n\na2=threading.Thread(target=b,args=(u'北京龟安',2))\nthreads.append(a2)\n\nif __name__ == '__main__':\n\n#启动线程\n for i in threads:\n i.start()\n#守护线程\n#如果不用join对诶个线程做等待终止,那么在线程运行过程中会直接执行最后终止end\n for i in threads:\n i.join()\n print ('threads end in %s'%time.ctime())\n\n\n\n","sub_path":"xiancheng/线程.py","file_name":"线程.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594477389","text":"from tkinter import *\nimport time\nimport random\nprint(\"hello world\")\ni = 2\nwidth = 1920\nheight = 1000\nz = 100\n\ndef mapjs(value, min, max, newmin, newmax):\n return (value / (max - min))*(newmax-newmin)\n\n\n\nclass Drop:\n def __init__(self, canvas):\n self.x = random.random() * width\n self.y = random.random() * height\n self.z = random.random() * z\n\n self.yspeed = mapjs(self.z, 0, z, 10, 30)\n self.width = mapjs(self.z, 0, z, 2, 10)\n self.height = mapjs(self.z, 0, z, 10, 50)\n \n self.drop = canvas.create_line(50, 25, 50, 50 + self.height, fill=\"purple\", width = self.width)\n \n def update(self, canvas):\n\n self.y += self.yspeed\n if self.y > height:\n self.y = -50\n canvas.coords(self.drop, (self.x, self.y, self.x, self.y + self.height))\n\ndef move():\n for elt in drops:\n elt.update(canvas)\n tk.after(10, move)\n\n\n\n\ntk = Tk()\ndecay = 5\ncanvas = Canvas( tk, width = width, height = height, background = \"white\")\ndrops = []\nfor i in range(600):\n drops.append(Drop(canvas))\ncanvas.pack()\nmove()\ntk.mainloop()\n","sub_path":"perso/messarround/purplerain.py","file_name":"purplerain.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118600797","text":"from ophyd.utils import LimitError\nfrom ophyd import Signal\nimport bluesky.plan_stubs as bps\nimport bluesky.preprocessors as bpp\nimport numpy as np\n\n\n# TODO could also use check_value, but like the better error message here?\ndef _validate_motor_limits(motor, start, stop, k):\n # blow up on inverted values\n assert start < stop, (f'start ({start}) must be smaller than '\n f'stop ({stop}) for {k}')\n limits = motor.limits\n if any(not (limits[0] < v < limits[1]) for v in (start, stop)):\n raise LimitError(f\"your requested {k} values are out of limits for \"\n \"the motor \"\n f\"{limits[0]} < ({start}, {stop}) < {limits[1]}\")\n\n\ndef _get_v_with_dflt(sig, dflt):\n ret = yield from bps.read(sig)\n return (ret[sig.name]['value'] if ret is not None else dflt)\n\n\ndef xy_fly(scan_title, *, dwell_time,\n xstart, xstop, xstep_size,\n ystart, ystop, ystep_size=None,\n xspress3=None):\n \"\"\"Do a x-y fly scan.\n\n The x-motor is the 'fast' direction.\n\n Parameters\n ----------\n dwell_time : float\n Target time is s on each pixel\n\n xstart, xstop : float\n The start and stop values in the fast direction in mm\n\n xstep_size :\n xstep_size is step of x movement\n\n ystart, ystop : float\n The start and stop values in the slow direction in mm\n\n ystep_size :\n ystep_size use xstep_size if it isn't passed in\n\n scan_title : str\n Title of scan, required.\n \"\"\"\n xy_fly_stage = xy_stage\n _validate_motor_limits(xy_fly_stage.x, xstart, xstop, 'x')\n _validate_motor_limits(xy_fly_stage.y, ystart, ystop, 'y')\n ystep_size = ystep_size if ystep_size is not None else xstep_size\n assert dwell_time > 0, f'dwell_time ({dwell_time}) must be more than 0'\n assert xstep_size > 0, f'xstep_size ({xstep_size}) must be more than 0'\n assert ystep_size > 0, f'ystep_size ({ystep_size}) must be more than 0'\n ret = yield from bps.read(xy_fly_stage.x.mres) # (in mm)\n xmres = (ret[xy_fly_stage.x.mres.name]['value']\n if ret is not None else .0003125)\n\n ret = yield from bps.read(xy_fly_stage.y.mres) # (in mm)\n ymres = (ret[xy_fly_stage.y.mres.name]['value']\n if ret is not None else .0003125)\n\n prescale = int(np.floor((xstep_size / (5 * xmres))))\n a_xstep_size = prescale * (5 * xmres)\n\n a_ystep_size = int(np.floor((ystep_size / (ymres)))) * ymres\n\n num_xpixels = int(np.floor((xstop - xstart) / a_xstep_size))\n num_ypixels = int(np.floor((ystop - ystart) / a_ystep_size))\n\n flyspeed = a_xstep_size / dwell_time # this is in mm/s\n\n try:\n xy_fly_stage.x.velocity.check_value(flyspeed)\n except LimitError as e:\n raise LimitError(f'You requested a range of {xstop - xstart} with '\n f'{num_xpixels} pixels and a dwell time of '\n f'{dwell_time}. This requires a '\n f'motor velocity of {flyspeed} which '\n 'is out of range.') from e\n\n # set up delta-tau trigger to fast motor\n for v in ['p1600=0', 'p1607=1', 'p1600=1']:\n yield from bps.mv(dtt, v)\n yield from bps.sleep(0.1)\n\n # TODO make this a message?\n sclr.set_mode('flying')\n\n # poke the struck settings\n yield from bps.mv(sclr.mcas.prescale, prescale)\n yield from bps.mv(sclr.mcas.nuse, num_xpixels)\n\n if xspress3 is not None:\n yield from bps.mov(xs.external_trig, True)\n yield from mov(xspress3.total_points, num_xpixels)\n yield from mov(xspress3.hdf5.num_capture, num_xpixels)\n yield from mov(xspress3.settings.num_images, num_xpixels)\n\n @bpp.reset_positions_decorator([xy_fly_stage.x, xy_fly_stage.y])\n @bpp.stage_decorator([sclr])\n @bpp.baseline_decorator([mono, xy_fly_stage])\n # TODO put is other meta data\n @bpp.run_decorator(md={'scan_title': scan_title})\n def fly_body():\n\n yield from bps.mv(xy_fly_stage.x, xstart,\n xy_fly_stage.y, ystart)\n\n @bpp.stage_decorator([x for x in [xspress3] if x is not None])\n def fly_row():\n # go to start of row\n yield from bps.mv(xy_fly_stage.x, xstart,\n xy_fly_stage.y, ystart + y*ystep_size)\n\n # set the fly speed\n yield from bps.mv(xy_fly_stage.x.velocity, flyspeed)\n\n yield from bps.trigger_and_read([xy_fly_stage],\n name='row_ends')\n\n for v in ['p1600=0', 'p1600=1']:\n yield from bps.mv(dtt, v)\n yield from bps.sleep(0.1)\n\n # arm the struck\n yield from bps.trigger(sclr, group=f'fly_row_{y}')\n # maybe start the xspress3\n if xspress3 is not None:\n yield from bps.trigger(xspress3, group=f'fly_row_{y}')\n yield from bps.sleep(0.1)\n # fly the motor\n yield from bps.abs_set(xy_fly_stage.x, xstop + a_xstep_size,\n group=f'fly_row_{y}')\n yield from bps.wait(group=f'fly_row_{y}')\n\n yield from bps.trigger_and_read([xy_fly_stage],\n name='row_ends')\n\n yield from bps.mv(xy_fly_stage.x.velocity, 5.0)\n yield from bps.sleep(.1)\n # read and save the struck\n yield from bps.create(name='primary')\n yield from bps.read(sclr)\n # and maybe the xspress3\n if xspress3 is not None:\n yield from bps.read(xspress3)\n yield from bps.save()\n\n for y in range(num_ypixels):\n if xspress3 is not None:\n yield from bps.mov(xspress3.fly_next, True)\n\n yield from fly_row()\n\n yield from fly_body()\n\n\nE_centers = Signal(value=[], name='E_centers', kind='normal')\nE_centers.tolerance = 1e-15\n\n\ndef E_fly(scan_title, *,\n start, stop,\n step_size,\n num_scans):\n _validate_motor_limits(mono.energy, start, stop, 'E')\n assert step_size > 0, f'step_size ({step_size}) must be more than 0'\n assert num_scans > 0, f'num_scans ({num_scans}) must be more than 0'\n\n e_back = yield from _get_v_with_dflt(mono.e_back, 1977.04)\n energy_cal = yield from _get_v_with_dflt(mono.cal, 0.40118)\n\n def _linear_to_energy(linear):\n linear = np.asarray(linear)\n return (e_back / np.sin(\n np.deg2rad(45)\n + 0.5*np.arctan((28.2474 - linear) / 35.02333)\n + np.deg2rad(energy_cal)/2\n )\n )\n\n def _energy_to_linear(energy):\n energy = np.asarray(energy)\n return (28.2474 + 35.02333 * np.tan(\n np.pi / 2\n - 2 * np.arcsin(e_back / energy)\n + np.deg2rad(energy_cal)\n )\n )\n\n # get limits in linear parameters\n l_start, l_stop = _energy_to_linear([start, stop])\n l_step_size = np.diff(_energy_to_linear([start, start + step_size]))\n\n # scale to match motor resolution\n lmres = yield from _get_v_with_dflt(mono.linear.mres, .0001666)\n\n prescale = int(np.floor((l_step_size / (5 * lmres))))\n a_l_step_size = prescale * (5 * lmres)\n\n num_pixels = int(np.floor((l_stop - l_start) / a_l_step_size))\n\n bin_edges = _linear_to_energy(\n l_start + a_l_step_size * np.arange(num_pixels + 1))\n bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2\n\n yield from bps.mv(E_centers, bin_centers)\n\n # The flyspeed is set by Paul by edict\n flyspeed = 0.1\n\n # set up delta-tau trigger to fast motor\n for v in ['p1600=0', 'p1607=4', 'p1600=1']:\n yield from bps.mv(dtt, v)\n yield from bps.sleep(0.1)\n\n # TODO make this a message?\n sclr.set_mode('flying')\n\n # poke the struck settings\n yield from bps.mv(sclr.mcas.prescale, prescale)\n yield from bps.mv(sclr.mcas.nuse, num_pixels)\n\n @bpp.reset_positions_decorator([mono.linear])\n @bpp.stage_decorator([sclr])\n @bpp.baseline_decorator([mono, xy_stage])\n # TODO put is other meta data\n @bpp.run_decorator(md={'scan_title': scan_title})\n def fly_body():\n yield from bps.trigger_and_read([E_centers], name='energy_bins')\n\n for y in range(num_scans):\n # go to start of row\n yield from bps.mv(mono.linear, l_start)\n\n # set the fly speed\n yield from bps.mv(mono.linear.velocity, flyspeed)\n\n yield from bps.trigger_and_read([mono],\n name='row_ends')\n\n for v in ['p1600=0', 'p1600=1']:\n yield from bps.mv(dtt, v)\n yield from bps.sleep(0.1)\n\n # arm the struck\n yield from bps.trigger(sclr, group=f'fly_energy_{y}')\n # fly the motor\n yield from bps.abs_set(mono.linear, l_stop + a_l_step_size,\n group=f'fly_energy_{y}')\n yield from bps.wait(group=f'fly_energy_{y}')\n\n yield from bps.trigger_and_read([mono],\n name='row_ends')\n\n yield from bps.mv(mono.linear.velocity, 0.5)\n # hard coded to let the sclr count its fingers and toes\n yield from bps.sleep(.1)\n # read and save the struck\n yield from bps.create(name='primary')\n yield from bps.read(sclr)\n yield from bps.save()\n\n yield from fly_body()\n","sub_path":"startup/30-fly_scans.py","file_name":"30-fly_scans.py","file_ext":"py","file_size_in_byte":9482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615529482","text":"import sys\nfrom path import Path\nif str(Path(__file__).parent.parent.abspath()) not in sys.path:\n sys.path.append(str(Path(__file__).parent.parent.abspath()))\nif str((Path(__file__).parent.parent.parent / 'simple').abspath()) not in sys.path:\n sys.path.append(str((Path(__file__).parent.parent.parent / 'simple').abspath()))\nimport pytest\nimport lux.game_map as gm\nimport lux.game_objects as go\nimport lux.constants as c\nimport lux.game as g\n\n\ndef _reset_state():\n gm.MAP_CACHE.clear()\n go.UNIT_CACHE.clear()\n c.LogicGlobals.reset()\n\n\n@pytest.fixture\ndef initialize_game(request):\n _reset_state()\n c.LogicGlobals.game_state = g.Game(0, f\"{request.param} {request.param}\")\n yield\n _reset_state()\n\n\n@pytest.fixture\ndef reset_agent_state():\n return _reset_state\n","sub_path":"kits/python/dev/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344598140","text":"from db import db\r\nfrom typing import List\r\nfrom datetime import datetime\r\n\r\n\r\nclass ActualIncomeByProjModel(db.Model):\r\n __tablename__ = \"vw_crm_line_actual_income\"\r\n\r\n keys_id = db.Column(db.String(200), primary_key=True)\r\n AP_JV = db.Column(db.String(2))\r\n PType = db.Column(db.String(3))\r\n ProjectType = db.Column(db.String(50))\r\n ProductID = db.Column(db.String(15))\r\n ProjectName = db.Column(db.String(255))\r\n TransferDateApprove = db.Column(db.String(200))\r\n TotalUnit = db.Column(db.Integer)\r\n NetPrice = db.Column(db.Float)\r\n FreeDownAmount = db.Column(db.Float)\r\n NetPriceExclFD = db.Column(db.Float)\r\n\r\n @classmethod\r\n def find_by_current(cls) -> List[\"ActualIncomeByProjModel\"]:\r\n return cls.query.filter_by(TransferDateApprove='20191206').all()\r\n\r\n @classmethod\r\n def find_by_date(cls, _date: str) -> List[\"ActualIncomeByProjModel\"]:\r\n return cls.query.filter_by(TransferDateApprove=_date).order_by(cls.PType.asc()).all()\r\n\r\n @classmethod\r\n def get_previousday(cls, _date: str) -> datetime:\r\n sql_statement = \"\"\"\r\n SELECT [dbo].[CRM_fn_GetDateAddPrevious](CAST(GETDATE() AS DATE), {})\r\n \"\"\".format(_date)\r\n return db.session.execute(sql_statement).fetchone()\r\n\r\n def save_to_db(self) -> None:\r\n db.session.add(self)\r\n db.session.commit()\r\n\r\n def delete_from_db(self) -> None:\r\n db.session.delete(self)\r\n db.session.commit()\r\n","sub_path":"models/vw_crm_line_actual_income.py","file_name":"vw_crm_line_actual_income.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459159623","text":"#!/usr/bin/env python\n#coding: utf-8\n\n#Text classification with an RNN\n#This text classification tutorial trains a [recurrent neural network](https://developers.google.com/machine-learning/glossary/#recurrent_neural_network) on the [IMDB large movie review dataset](http://ai.stanford.edu/~amaas/data/sentiment/) for sentiment analysis.\n\nfrom kubeflow import fairing\nfrom kubeflow.fairing import TrainJob\nimport importlib\nimport argparse\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport os\n\nUSING_KATIB = False\n\ndef data_loader(hyperparams, local_data_dir):\n dataset, info = tfds.load('imdb_reviews/subwords8k', \n data_dir=local_data_dir,\n with_info=True,\n as_supervised=True)\n train_dataset, test_dataset = dataset['train'], dataset['test']\n encoder = info.features['text'].encoder\n train_dataset = train_dataset.shuffle(hyperparams['BUFFER_SIZE'])\n train_dataset = train_dataset.padded_batch(hyperparams['BATCH_SIZE'], padded_shapes=None)\n test_dataset = test_dataset.padded_batch(hyperparams['BATCH_SIZE'], padded_shapes=None)\n return train_dataset, test_dataset, encoder\n\ndef define_model(encoder):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(encoder.vocab_size, 64),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(1)\n ])\n return model\n\nclass MovieReviewClassification(object):\n def __init__(self, learning_rate=1e-4, batch_size=64, epochs=2, local_data_dir='/app/tensorflow_datasets'):\n hyperparams = {'BUFFER_SIZE': 10000, 'BATCH_SIZE': batch_size}\n self.model_file = \"lstm_trained\"\n self.learning_rate = learning_rate\n self.epochs = epochs\n self.train_dataset, self.test_dataset, self.encoder = data_loader(hyperparams, local_data_dir)\n \n def train(self):\n model = define_model(self.encoder)\n model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(self.learning_rate),\n metrics=['accuracy'])\n history = model.fit(self.train_dataset, epochs=self.epochs,\n validation_data=self.test_dataset,\n validation_steps=30)\n model.save(self.model_file)\n test_loss, test_acc = model.evaluate(self.test_dataset)\n print('Test Loss: {}'.format(test_loss))\n print('Test Accuracy: {}'.format(test_acc))\n\nif __name__ == \"__main__\":\n \n if USING_KATIB:\n parser = argparse.ArgumentParser(description=\"Using Katib for hyperparameter tuning\")\n parser.add_argument(\"-lr\", \"--learning_rate\", default=\"1e-4\", help=\"Learning rate for the Keras optimizer\")\n parser.add_argument(\"-bsz\", \"--batch_size\", default=\"64\", help=\"Batch size for each step of learning\")\n parser.add_argument(\"-e\", \"--epochs\", default=\"2\", help=\"Number of epochs in each trial\")\n args = parser.parse_args()\n learning_rate = float(args.learning_rate)\n batch_size = float(args.batch_size)\n epochs = float(args.epochs)\n model = MovieReviewClassification(learning_rate, batch_size, epochs, local_data_dir=\"~/tensorflow_datasets\")\n model.train()\n \n else:\n #using Fairing\n GCP_PROJECT = fairing.cloud.gcp.guess_project_name()\n DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)\n BuildContext = None\n FAIRING_BACKEND = 'KubeflowGKEBackend'\n BackendClass = getattr(importlib.import_module('kubeflow.fairing.backends'), FAIRING_BACKEND)\n\n data_files = ['tensorflow_datasets/downloads/ai.stanfor.edu_amaas_sentime_aclImdb_v1xA90oY07YfkP66HhdzDg046Ll8Bf3nAIlC6Rkj0WWP4.tar.gz', \n 'tensorflow_datasets/downloads/ai.stanfor.edu_amaas_sentime_aclImdb_v1xA90oY07YfkP66HhdzDg046Ll8Bf3nAIlC6Rkj0WWP4.tar.gz.INFO',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/dataset_info.json',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/imdb_reviews-test.tfrecord-00000-of-00001',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/imdb_reviews-train.tfrecord-00000-of-00001',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/imdb_reviews-unsupervised.tfrecord-00000-of-00001',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/label.labels.txt',\n 'tensorflow_datasets/imdb_reviews/subwords8k/1.0.0/text.text.subwords',\n 'requirements.txt']\n \n train_job = TrainJob(MovieReviewClassification,\n input_files=data_files, \n docker_registry=DOCKER_REGISTRY, \n backend=BackendClass(build_context_source=BuildContext))\n train_job.submit()","sub_path":"text_classification_rnn.py","file_name":"text_classification_rnn.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329177313","text":"from django.conf.urls.defaults import patterns, url, include\nfrom django.shortcuts import redirect\n\nfrom urlconf_decorator import decorate\n\nfrom amo.decorators import write\nfrom . import views\n\n# These will all start with /addon//\ndetail_patterns = patterns('',\n # Redirect to the edit page from the base.\n url('^$', lambda r, addon_id: redirect('devhub.addons.edit', addon_id,\n permanent=True)),\n url('^edit$', views.edit, name='devhub.addons.edit'),\n url('^ownership$', views.ownership, name='devhub.addons.owner'),\n url('^payments$', views.payments, name='devhub.addons.payments'),\n url('^payments/disable$', views.disable_payments,\n name='devhub.addons.payments.disable'),\n url('^profile$', views.profile, name='devhub.addons.profile'),\n url('^edit_(?P
[^/]+)(?:/(?P[^/]+))?$',\n views.addons_section, name='devhub.addons.section'),\n\n url('^versions/$', views.version_list, name='devhub.versions'),\n url('^versions/(?P\\d+)$', views.version_edit,\n name='devhub.versions.edit'),\n url('^versions/(?P[^/]+)$', views.version_bounce),\n)\n\nurlpatterns = decorate(write, patterns('',\n url('^$', views.index, name='devhub.index'),\n\n # URLs for a single add-on.\n ('^addon/(?P\\d+)/', include(detail_patterns)),\n # Redirect people who have /addons/ instead of /addon/.\n ('^addons/\\d+/.*',\n lambda r: redirect(r.path.replace('addons', 'addon', 1))),\n\n # Add-on submission\n url('^addon/submit$', views.submit_addon, name='devhub.submit_addon'),\n\n # Redirect to /addons/ at the base.\n url('^addon$', lambda r: redirect('devhub.addons', permanent=True)),\n url('^addons$', views.dashboard, name='devhub.addons'),\n url('^addons/activity$', views.activity,\n name='devhub.addons.activity'),\n url('^upload$', views.upload, name='devhub.upload'),\n url('^upload/([^/]+)$', views.upload_detail,\n name='devhub.upload_detail')))\n","sub_path":"apps/devhub/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221414228","text":"# Node Class\nclass Node:\n\n # function to initialise the node object\n def __init__(self, data):\n self.data = data # Assign data\n self.next = None # Initialize next as Null\n\n\n# Linked list class contains a Node object\nclass LinkedList:\n\n # Function to initialize HEAD\n def __init__(self):\n self.head = None\n\n # This function prints contents of LinkedList\n # Starting from HEAD\n def printList(self):\n present_ele = self.head\n while(present_ele):\n print(present_ele.data)\n present_ele = present_ele.next\n\n # Function to Insert a New Node at beginning\n def push(self, new_data):\n # Allocate the Node & Put in the data\n new_node = Node(new_data)\n\n # Make next of New Node as Head\n new_node.next = self.head\n\n # Move the Head to point to New Node\n self.head = new_node\n\n # Function to know the size of Linked List\n def linkedListSize(self):\n count = 0\n current = self.head\n\n while current is not None:\n count += 1\n current = current.next\n\n return count\n\n\nif __name__ == \"__main__\":\n\n # Start with the empty list\n llist = LinkedList()\n\n llist.push(1)\n llist.push(13)\n llist.push(4)\n llist.push(22)\n llist.push(10)\n llist.push(5)\n\n #llist.push(20)\n #print(\"After adding value at HEAD\")\n #llist.printList()\n\n #llist.deleteNodeAt(5)\n #print(\"After deletion at 5: \")\n print(\"Nodes are : \")\n llist.printList()\n\n if llist:\n print(llist)\n else:\n print(\"No\")\n\n print(\"Size of linked list is: \", llist.linkedListSize())\n","sub_path":"Datastructures/linked_list/linked_list_size.py","file_name":"linked_list_size.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262982561","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass Encoder(nn.Module):\r\n\r\n def __init__(self, word_dim, embeddings=None, hidden_size=200, num_layers=3, batch_size=40):\r\n super(Encoder, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.num_layers = num_layers\r\n self.batch_size = batch_size\r\n # self.hidden = self.init_hidden()\r\n\r\n if embeddings is None:\r\n self.word_embedding = nn.Embedding(36906, word_dim)\r\n else:\r\n self.word_embedding = nn.Embedding.from_pretrained(embeddings, freeze=False)\r\n\r\n self.lstm = nn.LSTM(word_dim, hidden_size, num_layers=num_layers, batch_first=True)\r\n self.last = nn.Linear(hidden_size, 3)\r\n self.softmax = nn.LogSoftmax(dim=1)\r\n\r\n def forward(self, _input, h0, c0):\r\n # input: concat(premise, hypothesis) (Bowman et al. 2016)\r\n embedded = self.word_embedding(_input)\r\n out1, (hn, cn) = self.lstm(embedded, (h0, c0))\r\n out2 = self.last(cn)\r\n return self.softmax(out2)\r\n\r\n def init_hidden(self):\r\n return (torch.zeros(self.num_layers, self.batch_size, self.hidden_size),\r\n torch.zeros(self.num_layers, self.batch_size, self.hidden_size))\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513003979","text":"# -*- coding: utf-8 -*-\nimport os\nimport requests\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom Meizi2.settings import IMAGES_STORE\n\nclass Meizi2Pipeline(object):\n def process_item(self, item, spider):\n if 'images' in item:\n print('有item--image')\n if not os.path.exists(IMAGES_STORE):\n os.makedirs(IMAGES_STORE)\n\n for image in item['images']:\n image_path = IMAGES_STORE+'/'+image[7:].replace('/','_')\n\n response = requests.get(image)\n if response.status_code == 200:\n with open(image_path,'wb') as f:\n f.write(response.content)\n\n else:\n print('图片下载失败:',image)\n\n return item\n","sub_path":"爬虫/day10_0519/Meizi2/Meizi2/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"650491378","text":"import sys\ninput = sys.stdin.readline\n\ndef dfs(expression, depth, use_cnt):\n global max_result, min_result\n if depth == N:\n tmp = eval(expression)\n max_result = max(max_result, tmp)\n min_result = min(min_result, tmp)\n return\n if use_cnt[0] < p_cnt:\n use_cnt[0] += 1\n dfs(expression + \"+\" + str(nums[depth]), depth + 1, use_cnt)\n use_cnt[0] -= 1\n if use_cnt[1] < mi_cnt:\n use_cnt[1] += 1\n dfs(expression + \"-\" + str(nums[depth]), depth + 1, use_cnt)\n use_cnt[1] -= 1\n \n if use_cnt[2] < mu_cnt:\n use_cnt[2] += 1\n dfs(expression + \"*\" + str(nums[depth]), depth + 1, use_cnt)\n use_cnt[2] -= 1\n \n if use_cnt[3] < d_cnt:\n use_cnt[3] += 1\n dfs(expression + \"//\" + str(nums[depth]), depth + 1, use_cnt)\n use_cnt[3] -= 1\n \n \nN = int(input())\nnums = list(map(int, input().split()))\np_cnt, mi_cnt, mu_cnt, d_cnt = map(int, input().split())\nmax_result, min_result = -float('inf'), float('inf')\ndfs(str(nums[0]), 1, [0, 0, 0, 0])\nprint(max_result)\nprint(min_result)\n","sub_path":"15659연산자끼워넣기3.py","file_name":"15659연산자끼워넣기3.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396516870","text":"\nimport argparse\nfrom depccg.tools.reader import read_trees_guess_extension\nfrom depccg.printer import print_\nfrom depccg.tokens import english_annotator\nfrom depccg.download import SEMANTIC_TEMPLATES\n\nLANG = 'en'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('PATH',\n help='path to either of *.auto, *.xml, *.jigg.xml, *.ptb')\n parser.add_argument('--annotator',\n default='spacy',\n choices=english_annotator.keys(),\n help='annotate POS, named entity, and lemmas using this library')\n parser.add_argument('-f',\n '--format',\n default='xml',\n choices=['auto', 'xml', 'prolog', 'jigg_xml', 'jigg_xml_ccg2lambda', 'json'],\n help='output format')\n parser.add_argument('--semantic-templates',\n help='semantic templates used in \"ccg2lambda\" format output')\n args = parser.parse_args()\n\n annotate_fun = english_annotator[args.annotator]\n doc, trees = [], []\n for _, tokens, tree in read_trees_guess_extension(args.PATH):\n doc.append([token.word for token in tokens])\n trees.append([(tree, 0)])\n tagged_doc = annotate_fun(doc, tokenize=False)\n\n semantic_templates = args.semantic_templates or SEMANTIC_TEMPLATES[LANG]\n print_(trees,\n tagged_doc,\n format=args.format,\n lang=LANG,\n semantic_templates=semantic_templates)\n","sub_path":"depccg/tools/tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225713320","text":"import skymapper as skm\nimport matplotlib.pyplot as plt\nimport inspect\n\nif __name__ == \"__main__\":\n\n # cycle through all defined projections and show the full sky\n # with default graticules\n args = {\"ra_0\": 0}\n conic_args = {\"ra_0\":0,\n \"dec_0\": -10,\n \"dec_1\": -40,\n \"dec_2\": 10\n }\n\n for name, proj_cls in skm.projection_register.items():\n proj = None\n signature = inspect.signature(proj_cls.__init__)\n try:\n proj = proj_cls(**args)\n except TypeError:\n try:\n proj = proj_cls(**conic_args)\n except TypeError:\n pass\n \n if proj is not None:\n map = skm.Map(proj, interactive=False)\n map.grid()\n map.fig.suptitle(name)\n map.show()\n","sub_path":"examples/all_projections.py","file_name":"all_projections.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168321915","text":"#-*- coding:utf-8 -*-\n\nimport re\nimport datetime\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom lianjia_spider.items import LianjiaSpiderItem\n\nclass Spider(CrawlSpider):\n name = 'LianjiaZufang'\n\n allowed_domains = ['lianjia.com']\n statr_urls = ['gz', 'bj', 'fs', 'xm']\n\n # 爬虫规则\n rules = (\n Rule(LinkExtractor(allow='zufang/\\d+.html'), follow=True, callback='parse_item'),\n Rule(LinkExtractor(allow='zufang/\\w+\\d+\\.html'), follow=True, callback='parse_item'),\n )\n def start_requests(self):\n for dq in self.statr_urls:\n yield Request('https://%s.lianjia.com/zufang/' % dq)\n for i in range(2, 101):\n yield Request('https://%s.lianjia.com/zufang/pg%d/' % (dq, i))\n\n def parse_item(self, response):\n sel = Selector(response)\n item = LianjiaSpiderItem()\n\n item['title'] = sel.xpath('//div[@class=\"title\"]/h1[@class=\"main\"]/text()').extract_first()\n item['area'] = sel.xpath('//div[@class=\"zf-room\"]/p[1]/text()').extract_first()\n item['price'] = sel.xpath('//div[@class=\"price \"]/span[@class=\"total\"]/text()').extract_first()\n item['time'] = sel.xpath('//div[@class=\"zf-room\"]/p[@class=\"lf\"][5]/text()').extract_first()\n item['city'] = sel.xpath('//div[@class=\"container\"]/div/a[2]/text()').extract_first()[:-2]\n item['town'] = sel.xpath('//div[@class=\"container\"]/div/a[3]/text()').extract_first()[:-2]\n # 获取地理位置\n position = re.search('resblockPosition:(.+),', response.body)\n item['position'] = position.group()[:-1].split(':')[1][1:-1]\n item['url'] = response.url\n yield item\n","sub_path":"lianjia_spider/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645776396","text":"from json import loads\n\nfrom django.core.serializers import serialize\nfrom django.db.utils import IntegrityError\nfrom django.http import JsonResponse, HttpResponseBadRequest, Http404, HttpResponse\nfrom django.shortcuts import get_list_or_404, get_object_or_404\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\n\nfrom .models import Car\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass CarsView(View):\n http_method_names = ['get', 'post', 'patch', 'delete']\n\n def get(self, *args, **kwargs):\n if 'pk' in kwargs:\n serialized = serialize('json', get_list_or_404(Car, pk=kwargs['pk']))\n return JsonResponse(loads(serialized), safe=False)\n serialized = serialize('json', get_list_or_404(Car))\n return JsonResponse(loads(serialized), safe=False)\n\n def post(self, *args, **kwargs):\n data = self._parse_request_body(self.request.body)\n try:\n new_car = Car.objects.create(**data)\n except IntegrityError:\n return HttpResponseBadRequest('Missing fields')\n\n return JsonResponse({'pk': new_car.pk}, status=201)\n\n def patch(self, *args, **kwargs):\n data = self._parse_request_body(self.request.body)\n try:\n pk = kwargs['pk']\n except KeyError:\n return HttpResponseBadRequest('Missing primary key argument. (PATCH /cars/...)')\n\n try:\n car_queryset = Car.objects.filter(pk=pk)\n if len(car_queryset) == 0:\n raise Http404('Resource not found')\n car_queryset.update(**data)\n except IntegrityError:\n return HttpResponseBadRequest('Unique field already exists')\n\n return JsonResponse({'pk': car_queryset[0].pk})\n\n def delete(self, *args, **kwargs):\n try:\n pk = kwargs['pk']\n except KeyError:\n return HttpResponseBadRequest('Missing primary key argument. (DELETE /cars/...)')\n car = get_object_or_404(Car, pk=pk)\n car.delete()\n return HttpResponse(status=204)\n\n def _parse_request_body(self, body):\n if len(body) == 0:\n return loads(\"{}\")\n if isinstance(body, bytes):\n body = body.decode()\n loads_body = loads(body)\n return loads_body\n","sub_path":"cars/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482891133","text":"'''\nGiven a string s, find the longest palindromic substring in s. You may assume that the \nmaximum length of s is 1000.\n\nExample 1:\nInput: \"babad\"\nOutput: \"bab\"\nNote: \"aba\" is also a valid answer.\n\nExample 2:\nInput: \"cbbd\"\nOutput: \"bb\"\n'''\n\ndef longest_palin_substring(s:str) -> str:\n if s == None or len(s) == 0: return ''\n start, end = 0, 0\n \n def expand(s, left, right):\n while left >= 0 and right < len(s) and s[left] == s[right]:\n left -= 1\n right += 1\n return right - left - 1\n\n for i in range(len(s)):\n len1 = expand(s, i, i)\n len2 = expand(s, i, i+1)\n length = max(len1, len2)\n\n if length > end - start:\n start = i - (length-1)//2\n end = i + length//2\n\n return s[start:end+1]\n\n\n","sub_path":"algorithms/strings/longest_palin_substring.py","file_name":"longest_palin_substring.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451154775","text":"import random\nimport os\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\n# from dataset.data_loader import GetLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom model import CNNModel\nimport numpy as np\n# from test import test\nfrom S2U_test import test\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\nimport os\nfrom skimage import io, transform\nimport cv2\nimport pandas as pd\n\n\nclass MNIST(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\" Intialize the MNIST dataset \"\"\"\n self.root_dir = root_dir\n self.landmarks_frame = pd.read_csv(csv_file)\n self.transform = transform\n \n def __getitem__(self, index):\n \"\"\" Get a sample from the dataset \"\"\"\n img_name = os.path.join(self.root_dir,self.landmarks_frame.iloc[index,0])\n image = io.imread(img_name)\n # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n label = self.landmarks_frame['label'][index]\n label = torch.FloatTensor([label])\n\n if self.transform:\n image = self.transform(image)\n return image, label\n\n def __len__(self):\n \"\"\" Total number of samples in the dataset \"\"\"\n return len(self.landmarks_frame)\n\nclass USPS(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\" Intialize the MNIST dataset \"\"\"\n self.root_dir = root_dir\n self.landmarks_frame = pd.read_csv(csv_file)\n self.transform = transform\n \n def __getitem__(self, index):\n \"\"\" Get a sample from the dataset \"\"\"\n img_name = os.path.join(self.root_dir,self.landmarks_frame.iloc[index,0])\n image = io.imread(img_name)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n label = self.landmarks_frame['label'][index]\n label = torch.FloatTensor([label])\n\n if self.transform:\n image = self.transform(image)\n return image, label\n\n def __len__(self):\n \"\"\" Total number of samples in the dataset \"\"\"\n return len(self.landmarks_frame)\n\n\nsource_dataset_name = 'SVHN'\ntarget_dataset_name = 'USPS'\n# source_image_root = os.path.join('..', 'dataset', source_dataset_name)\n# target_image_root = os.path.join('..', 'dataset', target_dataset_name)\n# model_root = os.path.join('..', 'models')\ncuda = True\ncudnn.benchmark = True\nlr = 1e-3\nbatch_size = 128\nimage_size = 28\nn_epoch = 50\n\nmanual_seed = random.randint(1, 10000)\nrandom.seed(manual_seed)\ntorch.manual_seed(manual_seed)\n\n# load data\n\n# img_transform_source = transforms.Compose([\n# transforms.Resize(image_size),\n# transforms.ToTensor(),\n# transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n# ])\n\n# img_transform_target = transforms.Compose([\n# transforms.Resize(image_size),\n# transforms.ToTensor(),\n# transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n# ])\n\n# dataset_source = datasets.MNIST(\n# root='../dataset',\n# train=True,\n# transform=img_transform_source,\n# download=True\n# )\nsourceTrainDataset = MNIST(csv_file=\"../../hw3_data/digits/svhn/train.csv\", root_dir=\"../../hw3_data/digits/svhn/train\",transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\ndataloader_source = torch.utils.data.DataLoader(dataset=sourceTrainDataset, batch_size=batch_size, shuffle=True)\n\ntargetTrainDataset = USPS(csv_file=\"../../hw3_data/digits/usps/train.csv\", root_dir=\"../../hw3_data/digits/usps/train\",transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\ndataloader_target = torch.utils.data.DataLoader(dataset=targetTrainDataset, batch_size=batch_size, shuffle=True)\n# dataloader_source = torch.utils.data.DataLoader(\n# dataset=dataset_source,\n# batch_size=batch_size,\n# shuffle=True,\n# num_workers=8)\n\n# train_list = os.path.join(target_image_root, 'mnist_m_train_labels.txt')\n\n# dataset_target = GetLoader(\n# data_root=os.path.join(target_image_root, 'mnist_m_train'),\n# data_list=train_list,\n# transform=img_transform_target\n# )\n\n# dataloader_target = torch.utils.data.DataLoader(\n# dataset=dataset_target,\n# batch_size=batch_size,\n# shuffle=True,\n# num_workers=8)\n\n# load model\n\nmy_net = CNNModel()\n\n# setup optimizer\n\noptimizer = optim.Adam(my_net.parameters(), lr=lr)\n\nloss_class = torch.nn.NLLLoss()\nloss_domain = torch.nn.NLLLoss()\n\nif cuda:\n my_net = my_net.cuda()\n loss_class = loss_class.cuda()\n loss_domain = loss_domain.cuda()\n\nfor p in my_net.parameters():\n p.requires_grad = True\n\n# training\n\nfor epoch in range(n_epoch):\n\n len_dataloader = min(len(dataloader_source), len(dataloader_target))\n data_source_iter = iter(dataloader_source)\n data_target_iter = iter(dataloader_target)\n\n i = 0\n while i < len_dataloader:\n\n p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\n\n # training model using source data\n data_source = data_source_iter.next()\n s_img, s_label = data_source\n\n my_net.zero_grad()\n batch_size = len(s_label)\n\n input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)\n class_label = torch.LongTensor(batch_size)\n domain_label = torch.zeros(batch_size)\n domain_label = domain_label.long()\n\n if cuda:\n s_img = s_img.cuda()\n s_label = s_label.cuda()\n input_img = input_img.cuda()\n class_label = class_label.cuda()\n domain_label = domain_label.cuda()\n\n input_img.resize_as_(s_img).copy_(s_img)\n class_label.resize_as_(s_label.long()).copy_(s_label.long())\n\n class_output, domain_output = my_net(input_data=input_img, alpha=alpha)\n err_s_label = loss_class(class_output, class_label.squeeze())\n err_s_domain = loss_domain(domain_output, domain_label)\n\n # training model using target data\n data_target = data_target_iter.next()\n t_img, _ = data_target\n\n batch_size = len(t_img)\n\n input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)\n domain_label = torch.ones(batch_size)\n domain_label = domain_label.long()\n\n if cuda:\n t_img = t_img.cuda()\n input_img = input_img.cuda()\n domain_label = domain_label.cuda()\n\n input_img.resize_as_(t_img).copy_(t_img)\n\n _, domain_output = my_net(input_data=input_img, alpha=alpha)\n err_t_domain = loss_domain(domain_output, domain_label)\n err = err_t_domain + err_s_domain + err_s_label\n err.backward()\n optimizer.step()\n\n i += 1\n\n print ('epoch: %d, [iter: %d / all %d], err_s_label: %f, err_s_domain: %f, err_t_domain: %f' \\\n % (epoch, i, len_dataloader, err_s_label.cpu().data.numpy(),\n err_s_domain.cpu().data.numpy(), err_t_domain.cpu().data.numpy()))\n\n torch.save(my_net, './model/svhn_usps_model'+str(epoch)+'.pth')\n test(source_dataset_name, epoch)\n test(target_dataset_name, epoch)\n\nprint ('done')\n","sub_path":"hw3-Chee-An-Yu/DANN/train/svhn_to_usps.py","file_name":"svhn_to_usps.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"318408024","text":"from zvt.api.common import get_kdata_schema\nfrom zvt.api.computing import ma, macd\nfrom zvt.domain import SecurityType, TradingLevel\nfrom zvt.factors.factor import OneSchemaFactor, MustFactor, OneSchemaMustFactor\n\n\nclass TechnicalFactor(OneSchemaFactor):\n def __init__(self, security_type=SecurityType.stock, exchanges=['sh', 'sz'], codes=None, the_timestamp=None,\n start_timestamp=None, end_timestamp=None, keep_all_timestamp=False, fill_method='ffill', filters=None,\n provider='netease', level=TradingLevel.LEVEL_1DAY,\n indicators=['ma', 'macd'],\n indicators_param=[{'window': 5}, {'slow': 26, 'fast': 12, 'n': 9}]) -> None:\n \"\"\"\n this base class is for init the kdata,you could calculate technical factor from it\n\n :param security_type:\n :type security_type:\n :param exchanges:\n :type exchanges:\n :param codes:\n :type codes:\n :param the_timestamp:\n :type the_timestamp:\n :param start_timestamp:\n :type start_timestamp:\n :param end_timestamp:\n :type end_timestamp:\n :param keep_all_timestamp:\n :type keep_all_timestamp:\n :param fill_method:\n :type fill_method:\n :param filters:\n :type filters:\n :param provider:\n :type provider:\n :param level:\n :type level:\n :param indicators: the technical factors need to calculate\n :type indicators:\n\n \"\"\"\n self.indicators = indicators\n self.indicators_param = indicators_param\n self.data_schema = get_kdata_schema(security_type, level=level)\n\n super().__init__(security_type, exchanges, codes, the_timestamp, None, None, start_timestamp,\n end_timestamp, keep_all_timestamp, fill_method, None, filters, provider, level=level)\n\n def run(self):\n for idx, factor in enumerate(self.indicators):\n if factor == 'ma':\n window = self.indicators_param[idx].get('window')\n if self.security_type == SecurityType.stock:\n self.data_df['ma{}'.format(window)] = ma(self.data_df['qfq_close'], window=window)\n else:\n self.data_df['ma{}'.format(window)] = ma(self.data_df['close'], window=window)\n if factor == 'macd':\n slow = self.indicators_param[idx].get('slow')\n fast = self.indicators_param[idx].get('fast')\n n = self.indicators_param[idx].get('n')\n\n if self.security_type == SecurityType.stock:\n diff, dea, m = macd(self.data_df['qfq_close'], slow=slow, fast=fast, n=n)\n else:\n diff, dea, m = macd(self.data_df['close'], slow=slow, fast=fast, n=n)\n\n self.data_df['diff'] = diff\n self.data_df['dea'] = dea\n self.data_df['m'] = m\n\n\nclass CrossMaFactor(TechnicalFactor, MustFactor):\n def __init__(self, security_type=SecurityType.stock, exchanges=['sh', 'sz'], codes=None, the_timestamp=None,\n start_timestamp=None, end_timestamp=None, keep_all_timestamp=False, fill_method='ffill', filters=None,\n provider='netease', level=TradingLevel.LEVEL_1DAY, short_window=5, long_window=10) -> None:\n super().__init__(security_type, exchanges, codes, the_timestamp, start_timestamp, end_timestamp,\n keep_all_timestamp, fill_method, filters, provider, level, indicators=['ma', 'ma'],\n indicators_param=[{'window': short_window}, {'window': long_window}])\n\n self.short_window = short_window\n self.long_window = long_window\n\n def run(self):\n super().run()\n s = self.data_df['ma{}'.format(self.short_window)] > self.data_df['ma{}'.format(self.long_window)]\n self.df = s.to_frame(name='score')\n\n\nclass IndexFactor(OneSchemaMustFactor):\n def __init__(self, security_type=SecurityType.index, exchanges=['cn'], codes=None, the_timestamp=None,\n window=None, window_func='mean', start_timestamp=None, end_timestamp=None, keep_all_timestamp=False,\n fill_method='ffill', columns=[], filters=None, provider='sina', level=TradingLevel.LEVEL_1DAY,\n effective_number=10) -> None:\n super().__init__(security_type, exchanges, codes, the_timestamp, window, window_func, start_timestamp,\n end_timestamp, keep_all_timestamp, fill_method, columns, filters, provider, level,\n effective_number)\n\n def run(self):\n pass\n\n\nif __name__ == '__main__':\n factor = CrossMaFactor(codes=['000338'], start_timestamp='2019-01-01', end_timestamp='2019-05-29')\n factor.run()\n print(factor.get_df())\n","sub_path":"zvt/factors/technical_factor.py","file_name":"technical_factor.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567391311","text":"#!/usr/bin/env python3\n\n\"\"\"\nUsage: timecourse samples.csv ctab_dir gene1...gnene2...gene3 etc\n\ncreate a timecourse of a given transcript for females\n\n\"\"\"\n\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef timecourse( csv, gender, gene ):\n df = pd.read_csv( csv )\n soi = df.loc[:,\"sex\"] == gender \n frames = df.loc[soi,:]\n\n fpkms_avg = []\n \n for index, sample, sex, stage in frames.itertuples():\n filename = os.path.join( sys.argv[2], sample, \"t_data.ctab\")\n ctab_df = pd.read_table( filename, index_col=\"t_name\")\n roi = ctab_df.loc[:,\"gene_name\"] == gene\n fpkms= ctab_df.loc[roi,\"FPKM\"]\n fpkms_avg.append(np.mean(fpkms) )\n\n return fpkms_avg\n #instead of storing these fpkms like in the last script, we are redturning them into females_ or males_fpkms \nfor item in sys.argv[3:]: \n var = timecourse(sys.argv[1], \"female\", item)\n \n\n stages = [\"10\", \"11\", \"12\", \"13\", \"14A\", \"14B\", \"14C\", \"14D\"]\n\n\n fig, ax = plt.subplots()\n ax.plot(var, color= \"blue\", label= \"fpkms_avg\" )\n plt.suptitle= ( item + \"mean txpt fpkms\" )\n plt.tight_layout()\n ax.set_ylabel(\"FPKMs\")\n ax.set_xlabel(\"stage\")\n plt.tight_layout()\n box=ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height] )\n ax.legend(loc='center left', bbox_to_anchor=(1,0.5), frameon = False)\n #plt.xticks(stages, rotation= \"vertical\")\n fig.savefig(\"{0}.png\".format(item))\n plt.close( fig )\n\n\n\n#print( var ) \n\n#\n# females_fpkms = timecourse( sys.argv[2], \"female\")\n# males_fpkms = timecourse( sys.argv[2], \"male\")\n#\n# females_fpkmsREP =timecourse( sys.argv[3], \"female\")\n# males_fpkmsREP = timecourse( sys.argv[3], \"male\")\n##\n#\n# print(females_fpkms)\n#\n# print(\"THIS IS THE REP BELOW: \")\n# print(females_fpkmsREP)\n#\n# print(\" YO FEMALE FPKMS STOP HERE\")\n#\n# print(males_fpkms)\n#\n# print(\"THIS IS THE REP BELOW: \")\n# print(males_fpkmsREP)\n\n#\n\n# fig, ax = plt.subplots()\n# ax.plot( stages, males_fpkms, color= \"blue\", label= \"male\" )\n# ax.plot( stages, females_fpkms, color= \"red\", label= \"female\" )\n# ax.plot( stages, males_fpkmsREP, color= \"orange\", label= \"male Replicates\")\n# ax.plot( stages, females_fpkmsREP, color= \"green\", label= \"female Replicates\")\n# ax.set_title=( sys.argv[1] )\n# plt.tight_layout()\n# ax.set_ylabel(\"FPKMs\")\n# ax.set_xlabel(\"stage\")\n# plt.tight_layout()\n# box=ax.get_position()\n# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height] )\n# ax.legend(loc='center left', bbox_to_anchor=(1,0.5), frameon = False)\n# plt.xticks(stages, rotation= \"vertical\")\n# fig.savefig( \"timecourseMF.png\")\n# plt.close( fig )\n","sub_path":"day4_Homework/scriptforHWNum2.py","file_name":"scriptforHWNum2.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"32767438","text":"#!/usr/bin/env python\r\n#\r\n# Copyright 2010 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"A simple blog application written on Google App Engine. This renders both the\r\nMainPage as well as the Moderation page, which will show the classification\r\nresults.\r\n\"\"\"\r\n\r\n__author__ = 'Robert Kaplow'\r\n\r\nimport cgi\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp.util import run_wsgi_app\r\n\r\nimport google_prediction\r\n\r\n\r\nclass BlogComment(db.Model):\r\n \"\"\" Represents a comment from the application. This will get stored to the\r\n data store.\r\n \"\"\"\r\n content = db.TextProperty()\r\n tag = db.StringProperty(multiline=True)\r\n best_score = db.FloatProperty()\r\n date = db.DateTimeProperty(auto_now_add=True)\r\n author = db.UserProperty()\r\n\r\n\r\nclass MainPage(webapp.RequestHandler):\r\n \"\"\" Represents the main page of the blog. It displays the current comments as\r\n well as the input box for a new comment.\r\n \"\"\"\r\n\r\n def get(self):\r\n\r\n self.response.out.write(\"\"\"\r\n \r\n \"\"\")\r\n self.response.out.write(\"\"\"\r\n
\r\n \r\n

\r\n

This blog is a use case for the Google Prediction API. Imagine this\r\n is your favorite physics blog, which will track cutting edge physics\r\n research for a general audience. Unfortunately, you have a problem with\r\n spam comments every time you add a new post. Luckily, you've heard of the\r\n Google Prediction API and you try to see how well it can do at classifying\r\n spam comments with a minimum dataset.

\r\n
\"\"\")\r\n\r\n user = users.get_current_user()\r\n if not user:\r\n self.redirect(users.create_login_url(self.request.uri))\r\n comments = db.GqlQuery('SELECT * FROM BlogComment '\r\n 'ORDER BY date DESC LIMIT 50')\r\n for comment in comments:\r\n self.response.out.write('
')\r\n if comment.tag == 'spam':\r\n self.response.out.write('
')\r\n else:\r\n self.response.out.write('
')\r\n if comment.author:\r\n self.response.out.write('%s wrote:' % comment.author.nickname())\r\n else:\r\n self.response.out.write('An anonymous person wrote:')\r\n if comment.content:\r\n self.response.out.write('
%s
'%(comment.content))\r\n if comment.tag:\r\n self.response.out.write('Marked as %s' % comment.tag)\r\n if comment.best_score:\r\n self.response.out.write(', score is %f ' % comment.best_score)\r\n self.response.out.write('
')\r\n\r\n self.response.out.write(\"\"\"Post comment:\r\n
\r\n
\r\n
\r\n \r\n Actually Submit
\r\n
\r\n
\r\n \r\n \"\"\")\r\n\r\n\r\nclass Moderation(webapp.RequestHandler):\r\n \"\"\" Represents the moderation page for the blog. It will show the\r\n classification for the new comment as well as the scores.\r\n \"\"\"\r\n\r\n\r\n def post(self):\r\n # Get the authentication token.\r\n auth = getAuth()\r\n # Set your model here:\r\n model = 'metro/language_id.txt'\r\n # Get the post from the HTML form\r\n post = cgi.escape(self.request.get('content'))\r\n # Make the Google Prediction API call\r\n [prediction, scores] = google_prediction.Predict(auth, model, [post])\r\n\r\n real_submit = cgi.escape(self.request.get('real_submit'))\r\n self.response.out.write(\"\"\"\r\n \r\n \"\"\")\r\n\r\n self.response.out.write('You wrote:

')\r\n self.response.out.write(post)\r\n self.response.out.write('

')\r\n self.response.out.write('Your comment has been flagged as:
')\r\n    self.response.out.write(cgi.escape(prediction))\r\n    self.response.out.write('

Statistics:
')\r\n for key, value in scores.items():\r\n self.response.out.write('label is %s, score is %s
' %(key, value))\r\n self.response.out.write('
')\r\n\r\n if real_submit == 'true':\r\n # Store the blog comment\r\n comment = BlogComment()\r\n comment.content = post\r\n comment.tag = cgi.escape(prediction)\r\n comment.best_score = scores[prediction]\r\n\r\n # Add author if available\r\n if users.get_current_user():\r\n comment.author = users.get_current_user()\r\n comment.put() # Save the comment to our datastore\r\n self.response.out.write('

Thank you for posting!')\r\n self.response.out.write('')\r\n\r\n\r\n# You need to create a file auth-token which has the token returned from a\r\n# google_prediction.get_auth() call\r\ndef getAuth():\r\n auth_file = open('auth-token', 'r')\r\n auth = auth_file.read()\r\n auth_file.close()\r\n return auth.strip()\r\n\r\nclass AdmGetToken(webapp.RequestHandler):\r\n def get(self):\r\n user = self.request.get('user')\r\n userpass = self.request.get('pass') + '..'\r\n self.response.out.write (google_prediction.GetAuthentication(email,password))\r\n\r\nclass AdmTrain(webapp.RequestHandler):\r\n def get(self):\r\n datafile = 'metro/language_id.txt'\r\n self.response.out.write (google_prediction.Train(getAuth(),datafile))\r\n\r\nclass AdmTrainStatus(webapp.RequestHandler):\r\n def get(self):\r\n datafile = 'metro/language_id.txt'\r\n self.response.out.write (google_prediction.TrainStatus(getAuth(),datafile))\r\n\r\n\r\n\r\n\r\napplication = webapp.WSGIApplication([\r\n ('/', MainPage),\r\n ('/posted', Moderation),\r\n ('/adm/gettoken', AdmGetToken),\r\n ('/adm/train', AdmTrain),\r\n ('/adm/trainstatus', AdmTrainStatus) \r\n ],\r\n debug=True)\r\n\r\n\r\ndef main():\r\n run_wsgi_app(application)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"adm_old.py","file_name":"adm_old.py","file_ext":"py","file_size_in_byte":6774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354779459","text":"import nltk\r\n\r\n# 1\r\n# USING A TAGGER\r\n# A part-of-speech tagger, or POS-tagger,\r\n# processes a sequence of words, and attaches\r\n# a part of speech tag to each word.\r\n\r\ntext = nltk.word_tokenize(\"My name is Jay Gulraj\")\r\nnltk.pos_tag(text)\r\ntext = nltk.word_tokenize(\"And now for something completely different\")\r\nnltk.pos_tag(text)\r\nnltk.help.upenn_tagset('NNP')\r\nnltk.help.upenn_tagset('NN.*')\r\nnltk.corpus.brown.readme()\r\n\r\ntext = nltk.word_tokenize(\"They refuse to permit us to obtain the refuse permit\")\r\nnltk.pos_tag(text)\r\ntext = nltk.word_tokenize(\"I play with him so that we could watch that play\")\r\nnltk.pos_tag(text)\r\ntext = nltk.Text(word.lower() for word in nltk.corpus.brown.words())\r\ntext.similar(\"woman\")\r\ntext.similar(\"bought\")\r\ntext.similar(\"over\")\r\ntext.similar(\"the\")\r\n\r\n# TAGGED CORPORA\r\ntagged_token = nltk.tag.str2tuple(\"learn/NN\")\r\ntagged_token\r\ntoken_back = nltk.tag.tuple2str(tagged_token)\r\ntoken_back\r\ntagged_token[0]\r\ntagged_token[1]\r\n\r\n# TWO WAYS OF WRITING THE SAME THING\r\nsent = '''Hi I am\r\nJay gulraj'''\r\nsent = \"Hi I am\\nJay Gulraj\"\r\nsent = '''Hi I am\\nJay Gulraj'''\r\n# THIS IS DIFFERENT\r\nsent = \"Hi I am\" \\\r\n \"Jay Gulraj\"\r\n\r\nsent = '''\r\nThe/AT grand/JJ jury/NN commented/VBD on/IN a/AT number/NN of/IN\r\nother/AP topics/NNS ,/, AMONG/IN them/PPO the/AT Atlanta/NP and/CC\r\nFulton/NP-tl County/NN-tl purchasing/VBG departments/NNS which/WDT it/PPS\r\nsaid/VBD ``/`` ARE/BER well/QL operated/VBN and/CC follow/VB generally/RB\r\naccepted/VBN practices/NNS which/WDT inure/VB to/IN the/AT best/JJT\r\ninterest/NN of/IN both/ABX governments/NNS ''/'' ./.\r\n'''\r\n\r\n[nltk.tag.str2tuple(elem) for elem in sent.split()]\r\n\r\nnltk.corpus.indian.tagged_words()\r\nnltk.corpus.indian.tagged_sents()\r\n\r\n# Let's see which of these tags\r\n# are the most common in the news category\r\n# of the Brown corpus\r\n\r\ntext = nltk.corpus.brown.tagged_words(categories = \"news\",tagset = 'universal')\r\ntaglist = [tuple[1] for tuple in text]\r\nfd = nltk.FreqDist(taglist)\r\nfd.most_common()\r\nfd.plot(cumulative = True)\r\nnltk.app.concordance()\r\n\r\n# NOUNS\r\n\r\nword_tag_pairs = nltk.bigrams(text)\r\nword_tag_pairs = list(word_tag_pairs)\r\ntaglist = [pair[0][1] for pair in word_tag_pairs if pair[1][1]==\"NOUN\"]\r\nfd = nltk.FreqDist(taglist)\r\nfd.most_common()\r\n\r\n\r\n# VERBS\r\n\r\nwsj = nltk.corpus.treebank.tagged_words(tagset = \"universal\")\r\nfd = nltk.FreqDist(wsj)\r\nverblist =[pair[0][0] for pair in fd.most_common() if pair[0][1]==\"VERB\"]\r\nverblist[:10]\r\n\r\ncfd1 = nltk.ConditionalFreqDist(wsj)\r\ncfd1[\"yield\"].most_common()\r\ncfd1[\"play\"].most_common()\r\ncfd1[\"cut\"].most_common()\r\n\r\nwsj2 = nltk.corpus.treebank.tagged_words()\r\ncfd2 = nltk.ConditionalFreqDist((tag,word) for (word,tag) in wsj2)\r\ncfd2[\"NOUN\"].most_common(5)\r\ncfd2[\"VBN\"].most_common(5)\r\ncfdvbn = cfd2[\"VBN\"]\r\nlist(cfdvbn)\r\n\r\nwlist = [w for w in cfd1.conditions() if 'VBD' in cfd1[w] and 'VBN' in cfd1[w]]\r\nsorted(wlist)[:5]\r\n\r\nidx1 = wsj.index((\"kicked\",\"VBD\"))\r\nwsj[idx1-6:idx1+6]\r\n\r\nidx2 = wsj.index((\"kicked\",\"VBN\"))\r\nwsj[idx2-6:idx2+6]\r\n\r\n\"VBN\" in cfd2.conditions()\r\npastpartlist = list(cfd2[\"VBN\"])\r\n(\"Pierre\",\"NNP\") in wsj2\r\npairlist = [wsj2[i-1] for i in range(0,len(wsj2)) for word in pastpartlist if word in wsj2[i] ]\r\npairlist[:5]\r\nlen(pairlist)\r\nfd = nltk.FreqDist(pairlist)\r\nfd.plot(20)\r\n\r\n# UNSIMPLIFIED TAGS\r\n\r\n\r\n# Finally, let's look for words that are\r\n# highly ambiguous as to their part of speech tag.\r\n#\r\n# Understanding why such words are tagged as\r\n# they are in each context can help us\r\n# clarify the distinctions between the tags.\r\n\r\nbrown_news_tagged = nltk.corpus.brown.tagged_words(categories = \"news\", tagset = \"universal\")\r\ndata = nltk.ConditionalFreqDist((word.lower(),tag) for (word,tag) in brown_news_tagged)\r\n\r\nfor word in sorted(data.conditions()):\r\n if len(data[word]) > 3:\r\n tags = [tag for (tag, _) in data[word].most_common()]\r\n print(word, \" \".join(tags))\r\n\r\nnltk.app.concordance()\r\n\r\n\r\nfrom collections import defaultdict\r\nanagrams = defaultdict(list)\r\nwords = nltk.corpus.words.words(\"en\")\r\nfor word in words:\r\n key = \"\".join(sorted(word))\r\n anagrams[key].append(word)\r\n\r\nanagrams['aeilnrt']\r\n\r\n# Since accumulating words like this is\r\n# such a common task, NLTK provides a\r\n# more convenient way of creating a\r\n# defaultdict(list),\r\n# in the form of\r\n# nltk.Index()\r\n\r\nanagrams = nltk.Index((''.join(sorted(w)), w) for w in words)\r\nanagrams[\"aeilnrt\"]\r\n\r\n\r\npos = defaultdict(lambda: defaultdict(int))\r\nbrown_news_tagged = brown.tagged_words(categories = \"news\", tagset = \"universal\")\r\nfor ((w1,t1),(w2,t2)) in nltk.bigrams(brown_news_tagged):\r\n pos[(w2,t1)][t2] +=1\r\n\r\n# A POS tagger could use such information\r\n# to decide that the word right,\r\n# when preceded by a determiner,\r\n# should be tagged as ADJ.\r\npos[(\"right\",\"DET\")]\r\npos[(\"right\",\"DET\")][\"ADJ\"]\r\n\r\n\r\n# AUTOMATIC TAGGING\r\n\r\n# LOOKUP TAGGER\r\n\r\nfd = nltk.FreqDist(nltk.corpus.brown.words(categories = \"news\"))\r\ncfd = nltk.ConditionalFreqDist(nltk.corpus.brown.tagged_words(categories = \"news\"))\r\nmost_freq_words = fd.most_common(100)\r\nlikely_tags = dict((word,cfd[word].max()) for (word,_) in most_freq_words)\r\n\r\nbaseline_tagger = nltk.UnigramTagger(model = likely_tags)\r\n# The below line of code to find accuracy\r\n# could not run in front of me\r\nbaseline_tagger.evaluate(nltk.corpus.brown.tagged_sents())\r\n\r\n# CHECK MANY ENTRIES AS NONE ALSO\r\nsent = nltk.corpus.brown.sents(categories = \"news\")[5]\r\nbaseline_tagger.tag(sent)\r\nsent = nltk.corpus.brown.sents(categories = \"news\")[3]\r\nbaseline_tagger.tag(sent)\r\n\r\nbaseline_tagger = nltk.UnigramTagger(model=likely_tags,backoff=nltk.DefaultTagger('NN'))\r\n\r\n# Let us put all together\r\n# Let us create and evaluate LOOKUP TAGGERS having\r\n# a range of sizes\r\n\r\ndef performance(cfd,wordlist):\r\n lt = dict( (word,cfd[word].max()) for word in wordlist )\r\n baseline_tagger = nltk.UnigramTagger(model = lt, backoff = nltk.DefaultTagger(\"NN\") )\r\n return baseline_tagger.evaluate(nltk.corpus.brown.tagged_sents(categories = \"news\"))\r\n\r\ndef display():\r\n import pylab\r\n word_freqs = nltk.FreqDist(nltk.corpus.brown.words(categories = \"news\")).most_common()\r\n words_by_freq = [ w for (w,_) in word_freqs]\r\n cfd = nltk.ConditionalFreqDist(nltk.corpus.brown.tagged_words(categories = \"news\"))\r\n sizes = 2 ** pylab.arange(15)\r\n perfs = [performance(cfd,words_by_freq[:size]) for size in sizes]\r\n pylab.plot(sizes,perfs,'-bo')\r\n pylab.title(\"Lookup Tagger Performance with Varying Model Size\")\r\n pylab.xlabel(\"Model Size\")\r\n pylab.ylabel(\"Performance\")\r\n pylab.show()\r\n\r\ndisplay()\r\n\r\n\r\n# N-GRAM TAGGING\r\n\r\n# 5.1\r\n# UNIGRAM TAGGING\r\n\r\nfrom nltk.corpus import brown\r\nbrown_tagged_sents = brown.tagged_sents(categories='news')\r\nbrown_sents = brown.sents(categories='news')\r\nunigram_tagger = nltk.UnigramTagger(brown_tagged_sents)\r\nunigram_tagger.tag(brown_sents[2007])\r\nunigram_tagger.evaluate(brown_tagged_sents)\r\n\r\n# SEPERATING THE TRAINING AND TEST DATA\r\nsize = int(len(brown_tagged_sents)*0.9)\r\ntraindata = brown_tagged_sents[:size]\r\ntestdata= brown_tagged_sents[size:]\r\nunigram_tagger = nltk.UnigramTagger(traindata)\r\nunigram_tagger.evaluate(testdata)\r\n\r\n# GENERAL N-GRAM TAGGING\r\n\r\n# An N-GRAM TAGGER is a generalization of Unigram\r\n# Tagger\r\n\r\nbigram_tagger = nltk.BigramTagger(traindata)\r\nbigram_tagger.tag(brown_sents[2007])\r\nunseen_sent = brown_sents[4203]\r\nbigram_tagger.tag(unseen_sent)\r\n\r\n# Overall Accuracy (VERY LOW)\r\nbigram_tagger.evaluate(testdata)\r\n\r\n# As n gets larger, the specificity of the contexts\r\n# increases,\r\n# as does the chance that\r\n# the data we wish to tag contains contexts\r\n# that were not present in the training data.\r\n# This is known as the sparse data problem,\r\n# and is quite pervasive in NLP.\r\n\r\n# COMBINING TAGGERS\r\n\r\nt0 = nltk.DefaultTagger(\"NN\")\r\nt1 = nltk.UnigramTagger(traindata, backoff = t0)\r\nt2 = nltk.BigramTagger(traindata,backoff = t1)\r\nt2.evaluate(testdata) # 0.8449\r\n\r\nt3 = nltk.UnigramTagger(traindata,backoff = t2)\r\nt3.evaluate(testdata) # 0.8392\r\n\r\n\r\n\r\n\r\n# QUE-3\r\ntemp = \"They wind back the clock, while we chase after the wind\"\r\nind_words = nltk.word_tokenize(temp)\r\nnltk.pos_tag(ind_words)\r\nnltk.help.upenn_tagset('VBP') # VERB PRESENT TENSE\r\nnltk.help.upenn_tagset('DT') # DETERMINER\r\nnltk.help.upenn_tagset('PRP') # PRONOUN, PERSONAL\r\nnltk.help.upenn_tagset('NN') # NOUN, COMMON, SINGULAR OR MASS\r\n\r\n# QUE-5\r\ntemp = dict ({\"Jay\":\"Hello\",\"Sanjay\":\"Hi\"})\r\ntemp[\"Seema\"] # ERROR\r\ndel temp[\"Jay\"]\r\ntemp[\"Jay\"] # GOT DELETED\r\ntemp[\"Sanjay\"]\r\n\r\n# QUE-7\r\ntemp1 = dict({\"Jay\":\"Hello\", \"Sanjay\":\"Hi\"})\r\ntemp2 = dict({\"Seema\" : \"Namastey\",\"Ramesh\" : \"Satsriakal\"})\r\ntemp1[\"Ramesh\"]\r\ntemp1.update(temp2)\r\ntemp1[\"Ramesh\"] # TEMP1 updated with the contents of TEMP2\r\ntemp2[\"Jay\"]\r\n\r\n\r\n\r\n# QUE-10\r\nfrom nltk.corpus import brown\r\nbrown_tagged_sents = brown.tagged_sents()\r\n# BELOW, WE TRAINED A UNIGRAM TAGGER\r\nunigram_tagger = nltk.UnigramTagger(brown_tagged_sents)\r\nfrom nltk.corpus import treebank\r\ntreebank.fileids()\r\ntd_sents = treebank.tagged_sents()\r\nsents = treebank.sents()\r\nunigram_tagger.tag(sents[0])\r\ntd_sents[0]\r\nnltk.help.upenn_tagset('MD') # MODAL AUXILLIARY\r\nnltk.help.upenn_tagset('JJ') # Adjective Or Numeral, Ordinal\r\nnltk.help.upenn_tagset('NNS') # NOUN, COMMON, PLURAL\r\nnltk.help.upenn_tagset('NNP') # NOUN, PROPER, SINGULAR\r\n\r\n\r\n\r\n# QUE-11\r\nhelp(nltk.AffixTagger) # TWO PARAMETERS (AFFIX LENGTH, MIN STEM LENGTH)\r\n# A tagger that chooses a token's tag based on a leading or trailing\r\n# | substring of its word string. (It is important to note that these\r\n# | substrings are not necessarily \"true\" morphological affixes). In\r\n# | particular, a fixed-length substring of the word is looked up in a\r\n# | table, and the corresponding tag is returned. Affix taggers are\r\n# | typically constructed by training them on a tagged corpus.\r\n# |\r\n# | Construct a new affix tagger.\r\n# |\r\n# | :param affix_length: The length of the affixes that should be\r\n# | considered during training and tagging. Use negative\r\n# | numbers for suffixes.\r\n# | :param min_stem_length: Any words whose length is less than\r\n# | min_stem_length+abs(affix_length) will be assigned a\r\n# | tag of None by this tagger.\r\n\r\nfrom nltk.corpus import brown\r\nbrown_tagged_sents = brown.tagged_sents()\r\nbrown_sents = brown.sents()\r\n# SEPERATING THE TRAINING AND TEST DATA\r\nsize = int(len(brown_tagged_sents)*0.9)\r\ntraindata = brown_tagged_sents[:size]\r\ntestdata= brown_tagged_sents[size:]\r\n\r\n# FOR PREFIXES\r\nresults = list()\r\nfor i in range(1,5):\r\n for j in range(1,5):\r\n tagger = nltk.AffixTagger(traindata,affix_length = i,min_stem_length = j)\r\n results.append(\"Affix Length: \" + str(i) + \", Min Stem Length: \" + str(j) + \", Accuracy: \" + str(tagger.evaluate(testdata)) )\r\n\r\n# FOR SUFFIXES\r\nresults1 = list()\r\nfor i in range(-3,0):\r\n for j in range(1,4):\r\n tagger = nltk.AffixTagger(traindata,affix_length = i,min_stem_length = j)\r\n results1.append(\"Affix Length: \" + str(i) + \", Min Stem Length: \" + str(j) + \", Accuracy: \" + str(tagger.evaluate(testdata)) )\r\n\r\n\r\ntagger = nltk.AffixTagger(traindata,affix_length = 2,min_stem_length = 2)\r\ntagger.evaluate(testdata)\r\ntagger.tag(nltk.untag(testdata[0]))\r\ntagger.tag(\"I want to play lba\".split())\r\n\r\n\r\n# QUE-12\r\nfrom nltk.corpus import brown\r\nbrown_tagged_sents = brown.tagged_sents(tagset = \"universal\")\r\nbrown_sents = brown.sents()\r\n# SEPERATING THE TRAINING AND TEST DATA\r\nsize = int(len(brown_tagged_sents)*0.9)\r\ntraindata = brown_tagged_sents[:size]\r\ntestdata= brown_tagged_sents[size:]\r\nbigram_tagger = nltk.BigramTagger(traindata)\r\nbigram_tagger.tag(nltk.untag(traindata[4]))\r\ntraindata[4]\r\nbigram_tagger.evaluate(traindata)\r\nbigram_tagger.tag(nltk.untag(testdata[5]))\r\ntestdata[5]\r\nbigram_tagger.evaluate(testdata) # 0.47\r\n\r\n\r\n# QUE-14\r\nfrom nltk.corpus import brown\r\ntagged_words = brown.tagged_words()\r\ntaglist = [tag for (word,tag) in tagged_words]\r\nreqlist = sorted(set(taglist)) # 472 (It includes (, ) and many other symbols)\r\n\r\n\r\n# QUE-15\r\nnltk.help.upenn_tagset('NNS') # NOUN, COMMON, PLURAL\r\nnltk.help.upenn_tagset('NNP') # NOUN, PROPER, SINGULAR\r\nnltk.help.upenn_tagset('NN') # NOUN, COMMON, SINGULAR OR MASS\r\nfrom nltk.corpus import brown\r\nprint(nltk.corpus.brown.readme())\r\ntagged_sents = brown.tagged_sents()\r\n# CFD with 472 Conditions\r\ncfd = nltk.ConditionalFreqDist( [(tag,word) for sent in tagged_sents for (word,tag) in sent ] )\r\nfd1 = cfd[\"NN\"] # SINGULAR\r\nfd2 = cfd[\"NNS\"] # PLURAL\r\nfd1d = dict(fd1)\r\nfd2d = dict(fd2)\r\nwordlist1 = fd1.keys()\r\nwordlist2 = fd2.keys()\r\nwordlist2 = [word[:-1] for word in wordlist2]\r\nreqlist = list()\r\nfor word1 in wordlist2:\r\n for word2 in wordlist1:\r\n if word1==word2:\r\n if fd2[word1 + \"s\"] > fd1[word2]:\r\n reqlist.append(word1)\r\nlen(reqlist) # 791\r\nfd1[\"error\"] # 34\r\nfd2[\"errors\"] # 41\r\n# fd1d.keys()\r\n# fd2d.keys()\r\n#--------------------------\r\n","sub_path":"NLP-5.py","file_name":"NLP-5.py","file_ext":"py","file_size_in_byte":12838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334814234","text":"#!/usr/bin/env python3\n###\n# (C) Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n###\nimport sys\nif sys.version_info < (3, 2):\n raise Exception('Must use Python 3.2 or later')\n\nimport hpOneView as hpov\nfrom pprint import pprint\n\n\ndef acceptEULA(con):\n # See if we need to accept the EULA before we try to log in\n con.get_eula_status()\n try:\n if con.get_eula_status() is True:\n print('EULA display needed')\n con.set_eula('no')\n except Exception as e:\n print('EXCEPTION:')\n print(e)\n\n\ndef login(con, credential):\n # Login with givin credentials\n try:\n con.login(credential)\n except:\n print('Login failed')\n\n\ndef delprofiles(srv):\n srvrs = srv.get_servers()\n for server in srvrs:\n if server['powerState'] == 'On':\n print(('Powering Off Server: %s' % server['name']))\n ret = srv.set_server_powerstate(server, 'Off', force=True)\n pprint(ret)\n\n profiles = srv.get_server_profiles()\n for profile in profiles:\n print(('Removing Profile %s' % profile['name']))\n ret = srv.remove_server_profile(profile)\n pprint(ret)\n\n\ndef defprofile(srv, sts, net):\n # See if we need to turn any servers off\n connections = []\n servers = srv.get_servers()\n ser = None\n for server in servers:\n if server['state'] == 'NoProfileApplied':\n if server['powerState'] == 'On':\n srv.set_server_powerstate(server, 'Off', force=True)\n ser = server\n break\n if not ser:\n print('Error, no valid server found to install profile')\n return\n print('Creating profile for %s' % (ser['name']))\n spp = sts.get_spps()[0]\n enets = net.get_enet_networks()\n enet = None\n networks = ['VLAN-10-A', 'VLAN-10-B', 'VLAN-20-A', 'VLAN-20-B']\n lom = dict.fromkeys(networks)\n for network in enets:\n name = network['name']\n if network['name'] in networks:\n lom[network['name']] = network\n\n for name, enet in sorted(lom.items()):\n if enet is None:\n print('Error, can not find network: %s' % name)\n return\n connections.append(hpov.common.make_profile_connection_dict(enet, requestedMbps=1500))\n\n fcnets = net.get_fc_networks()\n for fcnet in fcnets:\n if fcnet['name'] == '3PAR SAN A':\n fcneta = fcnet\n if fcnet['name'] == '3PAR SAN B':\n fcnetb = fcnet\n\n connections.append(hpov.common.make_profile_connection_dict(fcneta,\n functionType='FibreChannel',\n boot=hpov.common.make_profile_connection_boot_dict(priority='Primary')))\n connections.append(hpov.common.make_profile_connection_dict(fcnetb,\n functionType='FibreChannel',\n boot=hpov.common.make_profile_connection_boot_dict(priority='Secondary')))\n profile = hpov.common.make_profile_dict('Profile-' + ser['serialNumber'],\n ser, connections=connections)\n profile = srv.create_server_profile(profile)\n pprint(profile)\n\n\ndef main():\n parser = argparse.ArgumentParser(add_help=True, description='Usage')\n parser.add_argument('-a', '--appliance', dest='host', required=True,\n help='HP OneView Appliance hostname or IP')\n parser.add_argument('-u', '--user', dest='user', required=False,\n default='Administrator', help='HP OneView Username')\n parser.add_argument('-p', '--pass', dest='passwd', required=False,\n help='HP OneView Password')\n parser.add_argument('-c', '--certificate', dest='cert', required=False,\n help='Trusted SSL Certificate Bundle in PEM '\n '(Base64 Encoded DER) Format')\n parser.add_argument('-r', '--proxy', dest='proxy', required=False,\n help='Proxy (host:port format')\n parser.add_argument('-d', dest='delete', required=False,\n action='store_true', help='Delete all sever profiles and exit')\n\n args = parser.parse_args()\n credential = {'userName': args.user, 'password': args.passwd}\n\n con = hpov.connection(args.host)\n srv = hpov.servers(con)\n net = hpov.networking(con)\n sts = hpov.settings(con)\n\n if args.proxy:\n con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])\n if args.cert:\n con.set_trusted_ssl_bundle(args.cert)\n\n login(con, credential)\n acceptEULA(con)\n if args.delete:\n delprofiles(srv)\n sys.exit()\n\n defprofile(srv, sts, net)\n\nif __name__ == '__main__':\n import sys\n import argparse\n sys.exit(main())\n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n","sub_path":"examples/scripts/define-profile.py","file_name":"define-profile.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118983769","text":"#!/usr/bin/python3.6\n# Converts right ascension to degrees longitude. Also appends data from nbr_num and converts to number of stars per steradian on radec_decimals.txt\nimport math\n\n\n# Read data from nbr_num.txt\nwith open(\"nbr_num.txt\", \"r\") as file:\n nbr_num = file.readlines()\nfile.close()\n\n# Read data from radec_decimals.txt\nwith open(\"radec_decimals.txt\", \"r\") as file:\n coords = file.readlines()\nfile.close()\n\n# Write nbr_num data to radec_decimals.txt\nwith open(\"radec_decimals.txt\", \"w\") as file: \n # Append nbr_num data to text file\n for i, line in enumerate(coords):\n # Strip off newlines off line and nbr_num\n line = line.strip('\\n')\n nbr_num[i] = nbr_num[i].strip('\\n')\n\n # Convert right ascension to degrees longitude\n setValues = line.split(\" \")\n deg_lon = float(setValues[0]) * 15\n\n # Convert nbr_num (star density) to number of stars per steradian\n # Add 5 and subtract 5 degrees to get fringe values\n # math.sin() then calculate steradians\n ra_upper = deg_lon + 5\n ra_lower = deg_lon - 5\n if ra_lower < 0: # adjust to zero if the lower ra value ends up less than zero\n ra_lower = 0\n \n dec_upper = float(setValues[1]) + 5\n dec_upper = math.radians(dec_upper) # convert to radians so that math.sin() can work properly\n dec_lower = float(setValues[1]) - 5 \n dec_lower = math.radians(dec_lower)\n\n # Plug in mathematical formula to find the steradian for each centered ra and dec value\n # Originally taking the integral of r^2*cos(y)dydx\n # After integrating, you get [sin(y2) - sin(y1)]*[x2 - x1]\n delta_sin_y = math.sin(dec_upper) - math.sin(dec_lower) # sin(y2) - sin(y1)\n delta_x = ra_upper - ra_lower # x2 - 1\n sterradian = delta_sin_y * delta_x \n\n # Divide nbr_num by steradian to obtain proper units\n starDensity = float(nbr_num[i]) / sterradian\n nbr_num[i] = str(int(starDensity))\n\n # Concatenate converted ra as well as the star density to string\n str_lon = str(deg_lon) # convert deg_lon to string and store in a variable\n\n len_str = len(str_lon)\n if len_str < 8: # for formatting purposes, ensure \"shorter\" decimals are the same length as the longer ones\n digits = 8 - len_str # find the number of zeros that need to be added after the decimal\n\n # Add the zeros as determined by \"digits\"\n for j in range(0, digits):\n str_lon = str_lon + \"0\"\n\n line = str_lon[0:8] + \" \" + setValues[1] + \" \" + nbr_num[i] + \"\\n\" \n\n # Write newly concatenated string to coords and file\n coords[i] = line\n file.write(coords[i])\nfile.close()\n","sub_path":"stars/appendfile.py","file_name":"appendfile.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556505399","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/5/25 PM8:12\n# @Author : Qiming Zhang\n# @File : FindModeInBST\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n L = []\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root: return []\n self.traversal(root)\n result, maxCount = [], 1\n cnt = 1\n if len(self.L) == 1: return [root.val]\n for i in range(1, len(self.L)):\n if self.L[i - 1] == self.L[i]:\n cnt += 1\n else:\n cnt == 1\n if cnt > maxCount:\n maxCount = cnt\n cnt = 1\n for i in range(1, len(self.L) ):\n if self.L[i - 1] == self.L[i]:\n cnt += 1\n else:\n cnt == 1\n if cnt == maxCount:\n result.append(self.L[i])\n return result\n\n\n def traversal(self, root):\n if not root: return\n self.traversal(root.left)\n self.L.append(root.val)\n self.traversal(root.right)\n","sub_path":"BinaryTree/FindModeInBST.py","file_name":"FindModeInBST.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117597777","text":"import unittest\n\nfrom ..problem import Problem\nfrom ..algorithm_gradient_descent import GradientDescent\nfrom ..results import Results\n\n\nclass QuadraticProblem(Problem):\n\n def set(self):\n self.name = \"TestGradientOptimization\"\n self.parameters = [{'name': 'x_1', 'initial_value': 2.5, 'bounds': [0, 5]},\n {'name': 'x_2', 'initial_value': 1.5, 'bounds': [0, 3]}]\n self.costs = [{'name': 'F_1', 'criteria': 'minimize'}]\n\n def evaluate(self, individual):\n x = individual.vector\n return [x[0]**2 + x[1]**2]\n\n\nclass TestGradientDescent(unittest.TestCase):\n \"\"\" Tests simple one objective optimization problem.\"\"\"\n\n def test_gradient_descent(self):\n problem = QuadraticProblem()\n\n algorithm = GradientDescent(problem)\n algorithm.options['max_population_number'] = 100\n algorithm.options['max_population_size'] = 10\n algorithm.run()\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"artap/tests/test_problem_gradient_descent.py","file_name":"test_problem_gradient_descent.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28027886","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom Tkinter import *\n# eseménykezelők\n# definiálása\ndef move():\n \"a labda elmozdulása\"\n global x1, y1, dx, dy, flag, speed\n setColor()\n x1, y1 = x1 +dx, y1 + dy\n if x1 > 360:\n x1, dx, dy = 360, 0, 15\n if y1 > 360:\n y1, dx, dy = 360, -15, 0\n if x1 < 10:\n x1, dx, dy = 10, 0, -15\n if y1 < 10:\n y1, dx, dy = 10, 15, 0\n can1.coords(oval1, x1, y1, x1+30, y1+30)\n can1.itemconfig(oval1, fill = color)\n \n if flag > 0:\n kesleltetes = 100 / speed \n abl1.after(kesleltetes, move) # megadott millisec után ciklus\n\ndef stop_it():\n \"az animáció leáll\"\n global flag\n flag = 0\n\ndef start_it():\n \"az animáció elindítása\"\n global flag\n if flag == 0:\n flag = 1\n move()\n \ndef setSpeed(n):\n \"a sebesség állítása\"\n global speed\n speed = int(n) \n \ndef setColor():\n \"a szín állítása\"\n global color\n c = listBox.curselection()\n if c :\n n = int(c[0])\n color = colorvalues[n]\n \n \n \n#========== Főprogram =============\n# a következő változókat globális változókként fogjuk használni :\nx1, y1 = 10, 10 # kezdő koordináták\ndx, dy = 15, 0 \nflag =0 \nspeed = 5\ncolor = 'red'\n\n# A fő-widget létrehozása (\"master\") :\nabl1 = Tk()\nabl1.title(\"Animációs gyakorlat Tkinter-rel\")\n\n# a \"slave\" widget-ek létrehozása:\ncan1 = Canvas(abl1,bg='dark grey',height=400,width=400)\ncan1.pack(side=LEFT)\noval1 = can1.create_oval(x1, y1, x1+30, y1+30, width=2, fill=color)\nButton(abl1,text='Kilép',command=abl1.quit).pack(side=BOTTOM)\nButton(abl1,text='Indít',command=start_it).pack()\nButton(abl1,text='Leállít',command=stop_it).pack()\nscale = Scale(abl1, length = 200, label = \"Sebesség :\", orient = HORIZONTAL, \n from_ = 1, to = 10, command = setSpeed, showvalue = 0, tickinterval = 1)\nscale.set(5)\nscale.pack()\n\n\ncolors = [\"piros\", \"kék\", \"zöld\", \"sárga\", \"lila\", \"szürke\", \"barna\", \"fekete\", \n \"fehér\", \"narancssárga\", \"rózsaszín\", \"sötét zöld\"]\ncolorvalues = [\"red\", \"blue\", \"green\", \"yellow\", \"purple\", \"grey\", \"brown\", \"black\", \n \"white\", \"orange\", \"pink\", \"dark green\"]\n \nlistFrame = Frame(abl1)\nlistFrame.pack(padx=5, pady=5)\n \nlistBox = Listbox(listFrame, selectmode = SINGLE)\nfor c in colors :\n listBox.insert(END, c)\nlistBox.pack(side = LEFT, fill=Y) \nscrollBar = Scrollbar(listFrame)\nscrollBar.pack(side = RIGHT, fill=Y)\nscrollBar.config(command=listBox.yview)\nlistBox.config(yscrollcommand=scrollBar.set)\n\n\n\n# az eseményfogadó indítása (főciklus) :\nabl1.mainloop()\n","sub_path":"Python/level1/ablak8_a.py","file_name":"ablak8_a.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15090748","text":"#! /usr/bin/env python\nimport rospy\nimport tf\nfrom umic_test.msg import quaternion\nfrom umic_test.msg import euler\n\ndef callback(msg):\n\t# print(msg)\n\tangles = euler()\n\tangles.roll,angles.pitch,angles.yaw= tf.transformations.euler_from_quaternion([msg.x,msg.y,msg.z,msg.w])\n\tprint(angles)\n\tpub2.publish(angles)\n\n\n\nrospy.init_node(\"quaternions\")\nquaternions = quaternion()\nquaternions.x = 5\nquaternions.y = 5\nquaternions.z = 2\nquaternions.w = 8\n\nrate = rospy.Rate(1)\npub1 = rospy.Publisher('topic1',quaternion,queue_size = 10)\npub2 = rospy.Publisher('topic2',euler,queue_size = 10)\nsub = rospy.Subscriber('/topic1', quaternion, callback) \nwhile not rospy.is_shutdown():\n\tprint(quaternions)\n\tpub1.publish(quaternions)\n\trate.sleep()","sub_path":"ROS/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"310971174","text":"from odoo import fields, models, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass PickingsMerge(models.TransientModel):\n _name = 'stock.picking.merge'\n\n # RETURNS SELECTED TICKETS IDS\n @api.model\n def _selected_pickings(self):\n return [('id', 'in', self._context.get(\"actives\"))]\n\n # RETURNS SELECTED TICKETS IDS\n @api.model\n def _get_pickings(self):\n pickings = self.env['stock.picking'].search(self._selected_pickings())\n return pickings.ids\n\n @api.model\n def _get_products(self):\n products = {}\n for pick in self.all_pickings:\n for line in pick.move_ids_without_package:\n if not self.merge_bonus:\n if line.product_id.name in products:\n products[line.product_id.name]['amount'] += line.product_uom_qty\n else:\n products[line.product_id.name] = {}\n products[line.product_id.name]['amount'] = line.product_uom_qty\n\n products[line.product_id.name]['uom'] = line.product_uom.name\n products[line.product_id.name]['count'] = line.product_id.packaging_qty\n\n else:\n if line.product_id.default_code in products:\n products[line.product_id.default_code]['amount'] += line.product_uom_qty\n else:\n products[line.product_id.default_code] = {}\n products[line.product_id.default_code]['amount'] = line.product_uom_qty\n products[line.product_id.default_code]['name'] = line.product_id.name\n if (('count' in (products[line.product_id.default_code])) and (products[line.product_id.default_code]['count'] and line.product_id.packaging_qty > \\\n products[line.product_id.default_code]['count'])) or not ('count' in products[line.product_id.default_code]):\n products[line.product_id.default_code]['count'] = line.product_id.packaging_qty\n products[line.product_id.default_code]['uom'] = line.product_uom.name\n if self.merge_bonus:\n products_merged = {}\n for product in products:\n product_name = products[product]['name']\n products_merged[product_name] = dict(products[product])\n products_merged[product_name].pop('name')\n products = products_merged\n\n for product in products:\n if products[product]['count'] == 0:\n raise ValidationError(\n _(\"Please modify field 'Count' in product '%s', as it shouldn't equal zero.\" % product))\n products[product]['count_printed'] = products[product]['amount'] / products[product]['count']\n return products\n\n @api.model\n def _get_driver(self):\n driver = False\n pickings = self.env['stock.picking'].search(self._selected_pickings())\n if pickings:\n driver = pickings[0].driver_name.id\n return driver\n\n all_pickings = fields.Many2many(comodel_name='stock.picking', default=_get_pickings)\n driver_id = fields.Many2one('res.partner', 'Driver', default=_get_driver)\n date = fields.Date(string=\"Date\", default=fields.Date.today())\n merge_bonus = fields.Boolean(string=\"Merge Bonus\", default=True)\n\n @api.multi\n def print_report(self):\n assert len(self) == 1\n x = self._get_products()\n return (\n self.env['ir.actions.report'].search(\n [('report_name', '=', \"bi_pickings_report_merged.merged_pickings_template\")], limit=1).\n report_action(self.env[\"stock.picking.merge\"].browse(self.ids))\n )\n","sub_path":"bi_pickings_report_merged/models/merged_pickings_report.py","file_name":"merged_pickings_report.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180567243","text":"\"\"\"Django URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom Django import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.homepage, name='homeview'),\n path('login/', views.loginauth, name='loginview'),\n path('signup/', views.signuppage, name='signupview'),\n path('services/', views.services, name='servicesview'),\n path('services-details/', views.resultdetails, name='detailsview'),\n path('services-files/', views.resultsfile, name='filesview')\n]\n\nif settings.DEBUG :\n urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","sub_path":"Django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"405686089","text":"'''\n1st version\n\nimport csv\nfrom datetime import datetime\n\n\ndef add_msg(name, msg):\n file_pointer = open(\"Chat.csv\", \"a\")\n writer = csv.writer(file_pointer)\n writer.writerow([name, msg, datetime.now()])\n file_pointer.close()\n\ndef display_msgs():\n file_pointer = open(\"Chat.csv\", \"r\")\n rows = list(csv.reader(file_pointer))\n length = len(rows)\n i = 0\n while i < length:\n name = rows[i][0]\n message = rows[i][1]\n date = rows[i][2][11:-7]\n output = name + \"[ \" + date + \"]\" + \": \" + message\n print(output)\n i += 1\n\n\n\nname = input(\"What is your name? \")\ndisplay_msgs()\n\n\nwhile True:\n msg1 = input(\"Please add your message: \")\n add_msg(name, msg1)\n print(\"\\n\\n\\n\\n\\n\")\n display_msgs()\n\n\n'''\n\nfrom tkinter import *\nimport csv\nimport time\n\n# tic = time.perf_counter()\n\n# toc = time.perf_counter\n\n\ndef send_save_msg():\n\n chat_name = name_entrybox.get()\n msg = message_entrybox.get()\n\n chat_body.insert(END, chat_name + \":\" + msg + \"\\n\")\n message_entrybox.delete(0, END)\n\n file = open(\"chat_main.csv\", \"a\")\n newrecord = chat_name + \":\" + msg + \"\\n\"\n file.write(newrecord)\n file.close()\n\n\ndef display_msg():\n clear()\n file = open(\"chat_main.csv\", \"r\")\n rows = list(csv.reader(file))\n last_10 = rows[-10:]\n chat_body.insert(END, *last_10)\n message_entrybox.focus()\n \n\ndef clear():\n chat_body.delete(0, END)\n message_entrybox.focus()\n\n\ndef update_messages():\n display_msg()\n window.after(1000, update_messages)\n\nwindow = Tk()\nwindow.title(\"Chat\")\nwindow.geometry(\"530x520\")\n\nlabel_name = Label(text = \"Enter your name\")\nlabel_name.place (x = 30, y = 30) \n\nname_entrybox = Entry(text = \"\")\nname_entrybox.place(x = 30, y = 50, width = 100, height = 25)\n\nlabel_msg = Label(text = \"Enter your message\")\nlabel_msg.place (x = 30, y = 290) \n\nchat_body = Listbox()\nchat_body.place(x = 30, y = 80, width = 300, height = 200)\n\nmessage_entrybox = Entry(text = \"\")\nmessage_entrybox.place(x =30, y = 320, width = 300, height = 60)\n\nsend_button = Button(text = \"Send\", command = send_save_msg)\nsend_button.place(x = 350, y = 320) \n\ndisplay_msgs_button = Button(text= \"Display all messages\", command = display_msg)\ndisplay_msgs_button.place(x = 350, y = 150) \n\nclear_button = Button(text = \"Clear the messages\", command = clear)\nclear_button.place(x = 350, y = 250)\n \n\n\nupdate_messages()\nwindow.mainloop()\n\n\n\n\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"537468208","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport tobii_research as tr\nimport numpy as np\n\n###################################################\n#################### Constants ####################\n###################################################\n# tracker sample frequency (Hz = sample/sec)\n_SAMPLE_FREQ = 90\n# Identifying fixations and saccades in eye-tracking protocols - Salvucci, Dario D. Goldberg, Joseph H.\n# Sen and Megaw used a threshold of 20 degrees/second (= 0,349066 rad).\nANG_VELOCITY_THRESHOLD = 0.349066\n# Minimum duration of a fixation before it is sent to the front-end (90 here is 1 second as per _SAMPLE_FREQ)\n# Make sure this number is an integer at all times! Fixations are detected by an equality check where the number of\n# samples is compared == INTEGER ! 15 samples ~= .1666666 second\nFIXATION_TRIGGER = 1/6*_SAMPLE_FREQ\n\n###################################################\n##################### Globals #####################\n###################################################\n# previous sample (origin, u_target, s_target)\nprevious_sample = None\n# Number of samples in the current fixation\nfixation_duration = 0\n\n###################################################\n####################### IVT #######################\n###################################################\n\n\ndef gaze_data_callback(gaze_data):\n \"\"\"\n Process incoming gaze data sample.\n :param gaze_data: New data reading to be handled\n \"\"\"\n ivt(gaze_data)\n\n\ndef ivt(gaze_data):\n \"\"\"\n Determine fixation and actuate browser.\n :return:\n \"\"\"\n global previous_sample, fixation_duration, body_size, driver\n\n # parse new sample\n origin, u_target, s_target = parse_data(gaze_data)\n\n if not previous_sample:\n previous_sample = origin, u_target, s_target\n pass\n\n p_origin, p_u_target, p_s_target = previous_sample\n\n # update previous sample\n previous_sample = origin, u_target, s_target\n\n # Calculate velocity (angle/sec) between previous sample and current sample\n ray1 = np.subtract(origin, u_target)\n ray2 = np.subtract(p_origin, p_u_target)\n angle = angle_between(ray1, ray2)\n\n # Label the new sample as either part of the fixation or a saccade, in which case the current fixation ends.\n if angle*_SAMPLE_FREQ > ANG_VELOCITY_THRESHOLD:\n if fixation_duration > 0:\n # print(\"Saccade. Fixation of {} seconds at {} ended.\".format(fixation_duration/90, s_target))\n fixation_duration = 0\n else:\n fixation_duration += 1\n\n # Actuate if the current fixation exceeds the minimum trigger duration and lies in a contingent area\n if fixation_duration == FIXATION_TRIGGER:\n x, y = (body_size['width']*s_target[0], body_size['height']*s_target[1])\n if in_contingent_area(x, y):\n action_chains = ActionChains(driver)\n action_chains.move_by_offset(x, y).context_click().perform()\n action_chains.reset_actions()\n print(\"Clicked at {}{}\".format(x, y))\n\n\ndef parse_data(gaze_data):\n \"\"\"\n Parse raw gaze data into usable characteristics if all necessary data is valid.\n :param gaze_data: Gaze data to be parsed\n :return: origin, u_target, s_target || None in case of invalid input data\n \n \n \n \"\"\"\n if gaze_data[\"left_gaze_origin_validity\"] and gaze_data[\"right_gaze_origin_validity\"]:\n origin = average_vertex(\n gaze_data[\"left_gaze_origin_in_user_coordinate_system\"],\n gaze_data[\"right_gaze_origin_in_user_coordinate_system\"]\n )\n u_target = average_vertex(\n gaze_data[\"left_gaze_point_in_user_coordinate_system\"],\n gaze_data[\"right_gaze_point_in_user_coordinate_system\"],\n )\n # 2Dimensional inputs!\n s_target = average_vertex(\n gaze_data[\"left_gaze_point_on_display_area\"],\n gaze_data[\"right_gaze_point_on_display_area\"]\n )\n return origin, u_target, s_target\n return None\n\n\ndef average_vertex(v1, v2):\n \"\"\"\n Calculate the center vertex. Default case 3D, other dimensions supported (slower implementation).\n :param v1: a vertex\n :param v2: another vertex\n :return: average of v1 and v2\n \"\"\"\n if len(v1) != 3:\n return [sum(i) / len(i) for i in zip(*(v1, v2))]\n return ((v1[0]+v2[0])/2,\n (v1[1]+v2[1])/2,\n (v1[2]+v2[2])/2)\n\n\ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / np.linalg.norm(vector)\n\n\ndef angle_between(v1, v2):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2' \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n\ndef in_contingent_area(x, y):\n global body_size, driver\n selection_x = body_size['width'] / 4\n search_x = body_size['width'] / 4 * 3\n selection_y = 70 + (body_size['height'] - 70) / 3\n center_y_up = body_size['height'] - 100\n search_y = 130\n recommend_y = 70\n\n # fixation in informationview\n if x > search_x:\n return False\n # fixation in selectionview\n if x < selection_x:\n # fixation in seedview\n if y > selection_y:\n return True\n # fixation outside seedview\n return False\n # fixation in centerview\n if y > recommend_y and y < center_y_up:\n try:\n # search view\n driver.find_element_by_tag_name('input')\n if y > search_y:\n return True\n except NoSuchElementException:\n # recommendationview\n return True\n\n\ndef start_tracker():\n tracker = tr.find_all_eyetrackers()[0]\n if tracker:\n print(\"Successfully connected to eyetracker '{tracker_name}'\".format(tracker_name=tracker.device_name))\n tracker.subscribe_to(tr.EYETRACKER_GAZE_DATA, gaze_data_callback, as_dictionary=True)\n input(\"PRESS ANY KEY TO EXIT SCRIPT.\")\n tracker.unsubscribe_from(tr.EYETRACKER_GAZE_DATA, gaze_data_callback)\n\n###################################################\n###################### Selen ######################\n###################################################\n\n\ndriver = webdriver.Firefox()\ndriver.fullscreen_window()\n\n# open the application\ndriver.get('http://localhost:3000/')\n\n# wait for logged application (after oauth redirection)\nwait = WebDriverWait(driver, 300)\nwait.until(EC.url_contains(\"?access_token=\"))\n\n# get body dimension for action offset\nbody = driver.find_element_by_tag_name('body')\nbody_size = body.size\n\nstart_tracker()\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162265200","text":"\"\"\"\nBMI Calculator\nCory Stephens\nNovember 11, 2014\n\n-Made changes later in the day. Changed variable names for more clarity.\n-Made the UI slightly nicer.\n\n\"\"\"\n\n\n\nfrom tkinter import *\n\nclass App(Tk):\n\tdef __init__(self):\n\t\tTk.__init__(self)\n\t\tself.headerFont = (\"Comic Sans\", \"16\", \"bold italic\")\n\t\t\n\t\tself.title(\"BMI Calculator\")\n\t\tself.setInfo()\n\n\tdef setInfo(self):\n\t\tLabel(self, text = \"BMI Calculator\",\n\t\t\t\tfont = self.headerFont).grid(columnspan = 6)\n \n\t\t#get user's height\n\t\tLabel(self, text = \"Height (ft)\").grid(row = 1, column = 0)\n\t\tself.txtHeightFt = Entry(self)\n\t\tself.txtHeightFt.grid(row = 1, column = 1)\n\t\t#using the insert function to default all fields to \"0\"\n\t\tself.txtHeightFt.insert(0, \"0\")\n\t\t\n\t\tLabel(self, text = \" Height (in)\").grid(row = 1, column = 3)\n\t\tself.txtHeightIn = Entry(self)\n\t\tself.txtHeightIn.grid(row = 1, column = 4)\n\t\tself.txtHeightIn.insert(0, \"0\")\n\n\t\t#get user's weight\n\t\tLabel(self, text = \"Weight (lbs)\").grid(row = 2, column = 0)\n\t\tself.txtWeight = Entry(self)\n\t\tself.txtWeight.grid(row = 2, column = 1)\n\t\tself.txtWeight.insert(0, \"0\")\n\n\t\t#label for BMI output and status\n\t\tLabel(self, text = \"Your BMI:\").grid(row = 5, column = 0)\n\t\tself.lblBMI = Label(self, bg = \"#fff\", anchor = \"w\", relief = \"groove\")\n\t\tself.lblBMI.grid(row = 5, column = 1, sticky = \"we\")\n\t\tLabel(self, text = \"You are:\").grid(row = 5, column = 3)\n\t\tself.lblBMIStatus = Label(self)\n\t\tself.lblBMIStatus.grid(row = 5, column = 4)\n\n\n\t\t#button to calculate info\n\t\tself.btnCalc = Button(self, text = \"Calculate BMI\")\n\t\tself.btnCalc.grid(row = 10, columnspan = 5)\n\t\tself.btnCalc[\"command\"] = self.calcBMI\n\n\t\t\t \n\tdef calcBMI(self):\n\t\t\"\"\"calculate the BMI of a person using the formula\"\"\"\n\n\t\t#calculate BMI\n\t\tfeet = int(self.txtHeightFt.get())\n\t\tinches = int(self.txtHeightIn.get())\n\t\ttotalHeight = (12 * feet) + inches\n\n\t\tweight = float(self.txtWeight.get())\n\n\t\t#BMI needs to be a float, int * float is float\n\t\tbmi = weight * 703 / (totalHeight * totalHeight)\n\n\t\tself.lblBMI[\"text\"] = \"%.2f\" % bmi\n\n\n\t\t#label for BMI status\n\n\t\tif bmi < 18.5:\n\t\t\tself.lblBMIStatus[\"text\"] = \"Underweight\"\n\t\telif bmi < 24.9:\n\t\t\tself.lblBMIStatus[\"text\"] = \"Normal\"\n\t\telif bmi < 29.9:\n\t\t\tself.lblBMIStatus[\"text\"] = \"Overweight\"\n\t\telse:\n\t\t\tself.lblBMIStatus[\"text\"] = \"Obese\"\n\n\ndef main():\n\ta = App()\n\ta.mainloop()\n\t\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"BMICalc.py","file_name":"BMICalc.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"185558719","text":"from collections.abc import MutableMapping\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable\n\nimport appdirs\nimport tomlkit\n\nfrom pdm.exceptions import NoConfigError\nfrom pdm.utils import get_pypi_source\n\n\nclass Config(MutableMapping):\n \"\"\"A dict-like object for configuration key and values\"\"\"\n\n HOME_CONFIG = Path(appdirs.user_config_dir(\"pdm\"))\n CONFIG_ITEMS = {\n \"cache_dir\": \"The root directory of cached files\",\n \"python.path\": \"The Python interpreter path\",\n \"python.use_pyenv\": \"Use the pyenv interpreter\",\n \"pypi.url\": \"The URL of PyPI mirror, defaults to https://pypi.org/simple\",\n \"pypi.verify_ssl\": \"Verify SSL certificate when query PyPI\",\n }\n DEFAULT_CONFIG = {\n \"cache_dir\": appdirs.user_cache_dir(\"pdm\"),\n \"python.use_pyenv\": True,\n }\n DEFAULT_CONFIG.update(get_pypi_source())\n\n def __init__(self, project_root: Path):\n self.project_root = project_root\n self._data = self.DEFAULT_CONFIG.copy()\n self._dirty = {}\n\n self._project_config_file = self.project_root / \".pdm.toml\"\n self._global_config_file = self.HOME_CONFIG / \"config.toml\"\n self._project_config = self.load_config(self._project_config_file)\n self._global_config = self.load_config(self._global_config_file)\n # First load user config, then project config\n for config in (self._global_config, self._project_config):\n self._data.update(dict(config))\n\n def load_config(self, file_path: Path) -> Dict[str, Any]:\n def get_item(sub_data):\n result = {}\n for k, v in sub_data.items():\n if getattr(v, \"items\", None) is not None:\n result.update(\n {f\"{k}.{sub_k}\": sub_v for sub_k, sub_v in get_item(v).items()}\n )\n else:\n result.update({k: v})\n return result\n\n if not file_path.is_file():\n return {}\n return get_item(dict(tomlkit.parse(file_path.read_text(\"utf-8\"))))\n\n def save_config(self, is_global: bool = False) -> None:\n data = self._global_config if is_global else self._project_config\n data.update(self._dirty)\n file_path = self._global_config_file if is_global else self._project_config_file\n file_path.parent.mkdir(exist_ok=True)\n toml_data = {}\n for key, value in data.items():\n *parts, last = key.split(\".\")\n temp = toml_data\n for part in parts:\n temp = temp.setdefault(part, {})\n temp[last] = value\n\n with file_path.open(\"w\", encoding=\"utf-8\") as fp:\n fp.write(tomlkit.dumps(toml_data))\n self._dirty.clear()\n\n def __getitem__(self, key: str) -> Any:\n try:\n return self._data[key]\n except KeyError:\n raise NoConfigError(key) from None\n\n def __setitem__(self, key: str, value: Any) -> None:\n if key not in self.CONFIG_ITEMS:\n raise NoConfigError(key)\n if isinstance(value, str):\n if value.lower() == \"false\":\n value = False\n elif value.lower() == \"true\":\n value = True\n self._dirty[key] = value\n self._data[key] = value\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __iter__(self) -> Iterable[str]:\n return iter(self._data)\n\n def __delitem__(self, key) -> None:\n raise NotImplementedError\n","sub_path":"pdm/project/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308279760","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n#===============================================================================\n#\n# Filename: F5_Check_IP_Action_pool_member_v4_Check_active_member.py\n#\n# USAGE: F5_Check_IP_Action_pool_member_v4_Check_active_member.py\n#\n# DESCRIPTION: \n#\n# OPTIONS: ---\n# REQUIREMENTS: ---\n# BUGS: ---\n# NOTES: ---\n# AUTHOR: Oscarob Wu(oscarobwu@gmail.com), \n# ORGANIZATION: \n# VERSION: 1.0\n# Created Time: 2021-01-22 08:59:30\n# Last modified: 2021-01-22 08:59\n# REVISION: ---\n#===============================================================================\n__author__ = 'oscarwu'\n__version__ = '1.0'\n\n# Standard Library\nimport sys\nimport re\nimport logging\nimport getpass\nimport time\n# Local Application/Library Specific\nfrom f5.bigip import ManagementRoot\nfrom f5.utils.responses.handlers import Stats\nimport datetime\nimport sys, getopt\n\nif len(sys.argv) < 1:\n print( \"\\n\\n\\tUsage: %s host user node\" % sys.argv[0])\n sys.exit()\n\n# Get login password from CLI\nF5_host = input('F5_Host: ')\nf5user = input('Username: ')\nf5pw = getpass.getpass('Password: ')\n# Connect to BIG-IP\nmgmt = ManagementRoot(F5_host, f5user, f5pw)\n\n# Get list of pools and pool members\npools = mgmt.tm.ltm.pools.get_collection()\nFORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s'\nlogging.basicConfig(format=FORMAT, level='INFO')\nlogger = logging.getLogger('set_pool_members_state')\n# Node to search for\nnode = input('Node_清單使用,分隔_10.99.0.11,10.99.0.12,10.99.0.13\\nIPaddress: ')\nmember_list = node.split(',')\n#action = sys.argv[1]\n#action = input('[enabled, disabled, forced_offline, checked] : ')\nprint(\"選擇開關機的方式?\\n\")\nprint(\"[1] enabled\\n\")\nprint(\"[2] disabled\\n\")\nprint(\"[3] forced_offline\\n\")\nprint(\"[4] checked\\n\")\nSele = input(\"Your choice (press enter to skip): \")\nif Sele == '1':\n action = 'enabled' # 開啟\nelif Sele == '2':\n action = 'disabled' # 關閉\nelif Sele == '3':\n action = 'forced_offline' # 強制關閉\nelif Sele == '4':\n action = 'checked' # 檢查\nelse:\n action = \"\" # default value is none.\n#\nfail = mgmt.tm.sys.failover.load()\nfailOverStat = fail.apiRawValues['apiAnonymous'].rstrip()\n#\nfields = failOverStat.strip().split()\naabbcc = fields[1]\nprint( aabbcc )\n#\npool_list = []\n#pool_list.append(nodeMember + \":\" + nodeMemberPort)\n#\nfor poolna in pools:\n member_nodes = [member.fullPath.split(':')[0] for member in poolna.members_s.get_collection()]\n member_nodes_name = [member.name for member in poolna.members_s.get_collection()]\n #\n member_address = [member.address for member in poolna.members_s.get_collection()]\n #\n member = poolna.members_s.get_collection()\n for nod_ip in member:\n ckip = nod_ip.address\n for node_list in member_list:\n if node_list in ckip:\n print( \"\\t\"+poolna.name + \"\\t\" + nod_ip.name )\n pooln = mgmt.tm.ltm.pools.pool.load(name=poolna.name, partition='Common')\n pm1 = pooln.members_s.members.load(partition='Common', name=nod_ip.name)\n if aabbcc in [\"active\"]:\n for member in [pm1]:\n #time.sleep(1)\n my_pool = mgmt.tm.ltm.pools.pool.load(partition='Common', name=poolna.name)\n pool_stats = Stats(my_pool.stats.load())\n #print(pool_stats.stat.status_availabilityState)\n currm = pool_stats.stat.availableMemberCnt['value']\n if currm <= 1:\n print(currm)\n continue\n else:\n if action == 'enabled':\n # enables member\n logger.info('enables member %s, previous state: %s' %\n (member.name, member.state))\n member.state = 'user-up'\n member.session = 'user-enabled'\n elif action == 'disabled':\n # disables member\n logger.info('disables member %s, previous state: %s' %\n (member.name, member.state))\n member.session = 'user-disabled'\n elif action == 'forced_offline':\n # forces online member\n logger.info('forces online member %s, previous state: %s' %\n (member.name, member.state))\n member.state = 'user-down'\n member.session = 'user-disabled'\n elif action == 'checked':\n # Checl online member\n stt = member.session\n #logger.info('checked online member %s, previous state: %s' %\n # (member.name, member.state))\n print('\\tchecked online member %s, previous state: %s' %\n (member.name, member.state))\n if \"monitor-enabled\" in stt:\n print('\\t')\n else:\n logger.info(另外一批有異常請檢查)\n #print(False)\n\n if action is not None:\n member.update()\n print('\\t檢查的 member %s, 目前執行後狀態 : %s' %(member.name, member.state))\n pool_list.append(poolna.name)\n else:\n logger.info('readonly mode, no changes applied')\n\n logger.info('%s: %s %s' % (member.name, member.session, member.state))\n else:\n print(\"this will do Nothing 請修改 Active F5 的 IP \")\n exit()\n#\n\n\nunique = []\nfor name in pool_list: # 1st loop\n if name not in unique: # 2nd loop\n unique.append(name)\n\n#\nprint(\"\\n\")\nnow = datetime.datetime.now()\nfnames = \"開關機\"\nfor x in unique:\n my_pool = mgmt.tm.ltm.pools.pool.load(partition='Common', name=(x))\n my_pool_mbrs = my_pool.members_s.get_collection()\n Count = 0\n print ( \"\\n\" )\n print (\"\\033[0;37;44m\\tCurrent Run date and time : \\033[0m\")\n print (now.strftime(\"\\033[0;37;45m\\t%Y-%m-%d %H:%M:%S\\t\\t\\033[0m\"))\n for pool_mbr in my_pool_mbrs:\n mbr_stats = Stats(pool_mbr.stats.load())\n dic_test = mbr_stats.stat.nodeName\n dic_test1 = dic_test['description']\n dic_test2 = dic_test1.replace('/Common/', '')\n dic_btest = mbr_stats.stat.status_availabilityState\n dic_btest1 = dic_btest['description']\n dic_ctest = mbr_stats.stat.serverside_curConns\n dic_ctest1 = dic_ctest['value']\n dic_dtest = mbr_stats.stat.status_enabledState\n dic_dtest1 = dic_dtest['description']\n Count = ((Count+1))\n #print ( \"%s_%02d pool_member: [ %s ] 主機狀態 : %s 目前連線數 : \\033[43m[ %s ]\\033[0m\" % (fnames, Count, dic_test2, dic_btest1, dic_ctest1) )\n if (dic_btest1 == 'available' or dic_dtest1 != 'enabled' or dic_btest1 == 'offline'):\n if (dic_dtest1 == 'enabled' and dic_btest1 == 'available'):\n print(\"%s pool_member: [ %s ] 主機狀態 : \\033[0;37;42m[ %s ]\\033[0m 目前連線數 : \\033[43m[ %s ]\\033[0m\" %(x, dic_test2, dic_btest1, dic_ctest1))\n elif (dic_dtest1 != 'disabled' or dic_btest1 != 'offline'):\n print(\"%s pool_member: [ %s ] 主機狀態 : \\033[0;37;41m[ %s ]\\033[0m 目前連線數 : \\033[43m[ %s ]\\033[0m\" %(x, dic_test2, dic_dtest1, dic_ctest1))\n elif (dic_btest1 == 'offline'):\n print(\"%s pool_member: [ %s ] 主機狀態 : \\033[0;37;41m[ %s ]\\033[0m 目前連線數 : \\033[43m[ %s ]\\033[0m\" %(x, dic_test2, dic_btest1, dic_ctest1))\n\n\n print ( \"\\n\" )\n # vim:set nu et ts=4 sw=4 cino=>4:\n\n\n","sub_path":"F5_Check_IP_Action_pool_member_v4_Check_active_member.py","file_name":"F5_Check_IP_Action_pool_member_v4_Check_active_member.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124173270","text":"from matplotlib import pyplot as plt\n\n\ndef show_labelmap(labels, width):\n \"\"\"\n Shows a map with labels\n\n :param labels: The labels\n :param width: The width of the map\n :return:\n \"\"\"\n\n plt.close()\n plt.axis([0, width-1, 0, width-1])\n\n for idx, l in enumerate(labels):\n\n plt.annotate(l, ((idx // width), (idx % width)), size=5)\n\n plt.show()\n\n\ndef context_map(contexts, width, height):\n \"\"\"\n To be used with a recursive SOM.\n In a recursive SOM, each context is a full copy of the map.\n Hence, we need lots of subplots to show the effect of context.\n\n :param contexts:\n :param width:\n :param height:\n :return:\n \"\"\"\n\n plt.close()\n for idx, map in enumerate(contexts):\n f = plt.subplot(width, height, idx+1)\n f.axis(\"off\")\n plt.imshow(map.reshape(width, height))\n\n plt.show()","sub_path":"somber/visualization/simple_viz.py","file_name":"simple_viz.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505755264","text":"###################################\n# Script to run validation and write\n# to HTML and PDF \n# ganesans - Salilab - UCSF\n# ganesans@salilab.org\n###################################\nimport sys\nsys.path.insert(0, \"../master/pyext/src/\")\nimport pytz\nimport jinja2\nimport pandas as pd\nimport sys,os,glob\nimport numpy as np\nfrom validation import excludedvolume,get_input_information\nfrom validation import molprobity\nfrom validation import get_plots,sas,sas_plots\nfrom validation import utility\nfrom validation.Report import WriteReport\nimport pdfkit\nimport datetime,time\nimport pickle\nfrom multiprocessing import Process, Queue, Pool, Manager\nfrom collections import Counter\nimport argparse\nimport json\n#from validation.WKhtmlToPdf import wkhtmltopdf\n#import utility\n\n####################################################################################################################\n# Add input arguments for supp table\n#####################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', type=str, default='No', help =\"Physical principles used in modeling yes/no?\")\nparser.add_argument('-f',default='PDBDEV_00000001.cif',help =\"Input mmcif file\")\nparser.add_argument('-ls', type=list, default=['No location specified'], help =\"add location of your scripts\")\nparser.add_argument('-ld', type=list, default=['No location specified'], help =\"add location of your analysis files\")\nparser.add_argument('-m', type=list, default=['Method details unspecified'], help =\"add information on your method\")\nparser.add_argument('-models', type=str, default='1', help =\"number of models in an ensemble, if you have multiple ensembles, add comma-separated string\")\nparser.add_argument('-c', type=str, default='Distance threshold-based clustering used if ensembles are deposited', help =\"The type of clustering algorithm used to analyze the ensemble\")\nparser.add_argument('-mp', type=str, default='10 Å (average RMSF of the solution ensemble with respect to the centroid structure)', help =\"add model precision. Model precision is defined as average RMSF of the solution ensemble with respect to the centroid structure\")\nparser.add_argument('-sv', type=list, default=['Information related to sampling validation has not been provided' ], help =\"add model precision. Model precision is defined as average RMSF of the solution ensemble with respect to the centroid structure\")\nparser.add_argument('-v1', type=list, default=['Fit of model to information used to compute it has not been determined' ], help =\"Add information on satisfaction of input data/restraints\")\nparser.add_argument('-v2', type=list, default=['Fit of model to information not used to compute it has not been determined' ], help =\"Add information on satisfaction of data not used for modeling\")\nparser.add_argument('-dv', type=list, default=['Quality of input data has not be assessed'] , help =\"Add information on quality of input data\")\nparser.add_argument('-res', type=list, default=['Rigid bodies: 1 residue per bead.','Flexible regions: N/A'], help =\"Add information on model quality (molprobity or excluded volume)\")\n\nargs = parser.parse_args()\nif args.p.upper() == 'YES':\n physics='Excluded volume and Sequence connectivity.'\nelse:\n physics='Information about physical principles was not provided'\n#############################################################################################################################\n# Input for Jinja\n####################################################################################\nconfig = pdfkit.configuration(wkhtmltopdf='/usr/local/include/wkhtmltox/')\noptions = {\n 'page-size': 'Letter',\n 'margin-top': '0.5in',\n 'margin-right': '0.5in',\n 'margin-bottom': '0.5in',\n 'margin-left': '0.5in',\n 'enable-javascript': None,\n 'javascript-delay':'50000',\n 'header-left':'[page] of [topage]',\n 'footer-center':'Full wwPDB IM Structure Validation Report',\n 'footer-line':'',\n 'header-line':'',\n 'footer-spacing':'5',\n 'header-spacing':'5'\n}\n\noptions_supp = {\n 'page-size': 'A4',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'enable-javascript': None,\n 'javascript-delay':'500',\n 'header-left':'[page] of [topage]',\n 'footer-center':'wwPDB IM Methods Table',\n 'footer-line':'',\n 'header-line':'',\n 'footer-spacing':'5',\n 'header-spacing':'5'\n}\n\n#sys.path.append('/home/ganesans/PDB-dev/master/pyext/src/table')\n#sys.path.append('/home/ganesans/PDB-dev/master/pyext/src/table/images')\nd=datetime.datetime.now();t=pytz.timezone(\"America/Los_Angeles\");d1=t.localize(d)\ntimestamp=d1.strftime(\"%B %d, %Y -- %I:%M %p\")\n\n# Create directory\ndirNames = ['Output','Output/images','Supplementary','static','templates','static/images']\nfor name in dirNames:\n try:\n os.mkdir(name)\n print(\"Directory \" , name , \" Created \") \n except FileExistsError:\n print(\"Directory \" , name , \" already exists\")\n\ntemplateLoader = jinja2.FileSystemLoader(searchpath=\"../templates/\")\ntemplateEnv = jinja2.Environment(loader=templateLoader)\n\ntemplate_pdf = \"template_pdf.html\"\ntemplate_file_supp= \"supplementary_template.html\"\n\ntemplate_flask= [\"main.html\",\n \"data_quality.html\",\n \"model_quality.html\",\n \"model_composition.html\",\n \"formodeling.html\",\n \"notformodeling.html\",\n \"uncertainty.html\"]\n\n\nTemplate_Dict={}\nTemplate_Dict['date']=timestamp\n\n\n#############################################################################################################################\n# Jinja scripts\n#############################################################################################################################\n\ndef write_html(Template_Dict, template_file,dirName):\n template = templateEnv.get_template(template_file)\n outputText=template.render(Template_Dict)\n template_file=template_file.split('/')[1]\n with open(os.path.join(os.path.join(dirName,template_file)),\"w\") as fh:\n fh.write(outputText)\n\ndef write_pdf(mmcif_file,Template_Dict, template_file,dirName,dirName_Output):\n template = templateEnv.get_template(template_file)\n outputText=template.render(Template_Dict)\n with open(os.path.join(os.path.join(dirName,utility.get_output_file_temp_html(mmcif_file))),\"w\") as fh:\n fh.write(outputText)\n pdfkit.from_file(os.path.join(os.path.join(dirName,utility.get_output_file_temp_html(mmcif_file))), \n os.path.join(os.path.join(dirName_Output,utility.get_output_file_pdf(mmcif_file))),\n options=options)\n #os.remove(os.path.join(os.path.join(dirName,utility.get_output_file_temp_html(mmcif_file))))\n\ndef write_supplementary_table(mmcif_file,Template_Dict,template_file,dirName,dirName_supp):\n template = templateEnv.get_template(template_file)\n #(str(root_path / 'templates')\n outputText=template.render(Template_Dict)\n with open(os.path.join(os.path.join(dirName,utility.get_supp_file_html(mmcif_file))),\"w\") as fh:\n fh.write(outputText)\n pdfkit.from_file(os.path.join(os.path.join(dirName,utility.get_supp_file_html(mmcif_file))), \n os.path.join(os.path.join(dirName_supp,utility.get_supp_file_pdf(mmcif_file))) ,\n options=options_supp)\n\ndef write_json(mmcif_file,Template_Dict):\n j=json.dumps([{'Category': k, 'Itemized_List': v} for k,v in Template_Dict.items()], indent=4)\n with open(os.path.join(os.path.join(dirName,utility.get_output_file_json(mmcif_file))),\"w\") as fh:\n fh.write(j)\n fh.close()\n\ndef convert_html_to_pdf(template_file,pdf_name,dirName,dirName_Output):\n pdfkit.from_file(os.path.join(os.path.join(dirName,template_file)), \n os.path.join(os.path.join(dirName_supp,pdf_name)) ,\n options=options_supp)\n\n\n############################################################################################################################\n# Run script\n#################################################\n\nif __name__ == \"__main__\":\n utility.clean_all()\n manager = Manager() # create only 1 mgr\n d = manager.dict() # create only 1 dict\n report=WriteReport(args.f)\n template_dict=report.run_entry_composition(Template_Dict)\n template_dict,clashscore,rama,sidechain,exv_data=report.run_model_quality(template_dict)\n template_dict,sas_data,sas_fit=report.run_sas_validation(template_dict)\n report.run_quality_glance(clashscore,rama,sidechain,exv_data,sas_data,sas_fit)\n write_pdf(args.f,template_dict,template_pdf,dirNames[0],dirNames[0])\n template_dict=report.run_supplementary_table(template_dict,\n location=args.ls,\n physics=physics,\n method_details=args.m,\n sampling_validation=args.sv,\n validation_input=args.v1,\n cross_validation=args.v2,\n Data_quality=args.dv,\n clustering=args.c,\n resolution=args.res)\n write_supplementary_table(args.f,template_dict,template_file_supp,dirNames[2],dirNames[2])\n #for i in template_flask:\n # write_html(template_dict,template_flask_main,dirNames[-2])\n utility.clean_all()\n\n\n\n","sub_path":"example_sas/Execute.py","file_name":"Execute.py","file_ext":"py","file_size_in_byte":9515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"133004017","text":"import torch\nimport torch.nn as nn\n\nfrom utils.utils_tasnet import choose_layer_norm\nfrom models.dprnn import IntraChunkRNN as LocallyRecurrentBlock\n\nEPS=1e-12\n\nclass GALR(nn.Module):\n def __init__(self, num_features, hidden_channels, num_blocks=6, causal=False, norm=True, eps=EPS):\n super().__init__()\n \n # Network confguration\n net = []\n \n for _ in range(num_blocks):\n net.append(GALRBlock(num_features, hidden_channels, causal=causal, norm=norm, eps=eps))\n \n self.net = nn.Sequential(*net)\n\n def forward(self, input):\n \"\"\"\n Args:\n input (batch_size, num_features, S, chunk_size)\n Returns:\n output (batch_size, num_features, S, chunk_size)\n \"\"\"\n output = self.net(input)\n\n return output\n\nclass GALRBlock(nn.Module):\n def __init__(self, num_features, hidden_channels, causal, norm=True, eps=EPS):\n super().__init__()\n \n self.intra_chunk_block = LocallyRecurrentBlock(num_features, hidden_channels, norm=norm, eps=eps)\n self.inter_chunk_block = GloballyAttentiveBlock(num_features, hidden_channels, causal=causal, norm=norm, eps=eps)\n \n def forward(self, input):\n \"\"\"\n Args:\n input (batch_size, num_features, S, chunk_size)\n Returns:\n output (batch_size, num_features, S, chunk_size)\n \"\"\"\n x = self.intra_chunk_block(input)\n output = self.inter_chunk_block(x)\n \n return output\n\nclass GloballyAttentiveBlock(nn.Module):\n \"\"\"\n TODO: similarity of `MultiheadAttentionBlock`?\n \"\"\"\n def __init__(self, num_features, hidden_channels, causal, norm=True, eps=EPS):\n super().__init__()\n\n self.norm = norm\n\n if self.norm: \n self.norm1d = choose_layer_norm(embed_dim, causal=causal, eps=eps)\n\n self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)\n \n def forward(self, input):\n \"\"\"\n Args:\n input (batch_size, num_features, S, chunk_size)\n Returns:\n output (batch_size, num_features, S, chunk_size)\n \"\"\"\n x = input # (T, batch_size, embed_dim)\n\n residual = x\n x, _ = self.multihead_attn(x, x, x) # (T_tgt, batch_size, embed_dim), (batch_size, T_tgt, T_src), where T_tgt = T_src = T\n x = x + residual\n \n if self.norm:\n x = x.permute(1,2,0) # (batch_size, embed_dim, T)\n x = self.norm1d(x) # (batch_size, embed_dim, T)\n x = x.permute(2,0,1).contiguous() # (batch_size, embed_dim, T) -> (T, batch_size, embed_dim)\n \n output = x\n\n return output\n\ndef _test_global_attentive_block():\n pass\n\ndef _test_galr():\n batch_size = 4\n num_features, chunk_size, S = 64, 10, 4\n hidden_channels = 32\n num_blocks = 3\n \n input = torch.randint(0, 10, (batch_size, num_features, S, chunk_size), dtype=torch.float)\n\n # Causal\n causal = True\n \n model = GALR(num_features, hidden_channels, num_blocks=num_blocks, causal=causal)\n print(model)\n output = model(input)\n print(input.size(), output.size())\n \n # Non causal\n causal = False\n \n model = GALR(num_features, hidden_channels, num_blocks=num_blocks, causal=causal)\n print(model)\n output = model(input)\n print(input.size(), output.size())\n\n\nif __name__ == '__main__':\n _test_global_attentive_block()\n print()\n\n _test_galr()\n print()\n","sub_path":"src/models/galr.py","file_name":"galr.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297475223","text":"#!/usr/bin/python\n\nimport Adafruit_BMP.BMP085 as BMP085\nimport time\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nimport Image\nimport ImageFont\nimport ImageDraw\n\n\n\nsensor = BMP085.BMP085()\ndisplay = Adafruit_SSD1306.SSD1306_128_64(rst=24)\n\nfragmentX=90\nfragmentY=-1\ndCX=112\ndCY=27\npressureY=48\n\ndef showTemperature(value):\n whole=int(value)\n text=\"{0:2d}\".format(whole)\n for font in fonts[::-1]:\n (w,h)=draw.textsize(text,font=font)\n if w x2 : numpy.transpose, numpy.swapaxes, T \nx3 = np.transpose(x1)\nx4 = np.swapaxes(x1, 0, 1)\nx5 = x1.T\n\ny = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) #(10, ) <-> (10,1)\nx_pred = np.array([[10, 1.3]]) #(1, 2)\n\n#2.모델링\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=2))\nmodel.add(Dense(2))\nmodel.add(Dense(3))\nmodel.add(Dense(4))\nmodel.add(Dense(5))\nmodel.add(Dense(4))\nmodel.add(Dense(3))\nmodel.add(Dense(2))\nmodel.add(Dense(1))\n\n#3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(x3, y, epochs=2222, batch_size=1)\n\n#4. 평가, 예측\n'''\nloss = model.evaluate(x3, y)\nresult = model.predict([[10, 1.3]])\nprint('loss : ', loss)\nprint('result : ', result)\n\nepochs : 2222\nloss : 0.0006747512961737812\nresult : [[19.988302]]\n'''\ny_predict = model.predict(x3)\n\nplt.scatter(x3[:,:1], y)\nplt.plot(x3, y_predict, color='red')\nplt.show()\n","sub_path":"keras01/keras03_mlp1.py","file_name":"keras03_mlp1.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615100909","text":"# -*- coding: utf-8 -*-\n\"\"\"\n flask_jwt\n ~~~~~~~~~\n\n Flask-JWT module\n\"\"\"\n\nfrom collections import OrderedDict\nfrom datetime import timedelta\nfrom functools import wraps\n\nfrom itsdangerous import (\n TimedJSONWebSignatureSerializer,\n SignatureExpired,\n BadSignature\n)\n\nfrom flask import current_app, request, jsonify, _request_ctx_stack\nfrom flask.views import MethodView\nfrom werkzeug.local import LocalProxy\n\n__version__ = '0.2.0'\n\ncurrent_user = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_user', None))\n\n_jwt = LocalProxy(lambda: current_app.extensions['jwt'])\n\n\ndef _get_serializer():\n expires_in = current_app.config['JWT_EXPIRATION_DELTA']\n if isinstance(expires_in, timedelta):\n expires_in = int(expires_in.total_seconds())\n expires_in_total = expires_in + current_app.config['JWT_LEEWAY']\n return TimedJSONWebSignatureSerializer(\n secret_key=current_app.config['JWT_SECRET_KEY'],\n expires_in=expires_in_total,\n algorithm_name=current_app.config['JWT_ALGORITHM']\n )\n\n\ndef _default_payload_handler(user):\n return {\n 'user_id': user.id,\n }\n\n\ndef _default_encode_handler(payload):\n \"\"\"Return the encoded payload.\"\"\"\n return _get_serializer().dumps(payload).decode('utf-8')\n\n\ndef _default_decode_handler(token):\n \"\"\"Return the decoded token.\"\"\"\n try:\n result = _get_serializer().loads(token)\n except SignatureExpired:\n if current_app.config['JWT_VERIFY_EXPIRATION']:\n raise\n return result\n\n\ndef _default_response_handler(payload):\n \"\"\"Return a Flask response, given an encoded payload.\"\"\"\n return jsonify({'token': payload})\n\n\ndef _default_payload_auth_handler(request):\n data = request.get_json(force=True)\n username = data.get('username', None)\n password = data.get('password', None)\n criterion = [username, password, len(data) == 2]\n\n if not all(criterion):\n raise JWTError('Bad Request', 'Missing required credentials', status_code=400)\n\n user = _jwt.authentication_callback(username=username, password=password)\n\n return user\n\nCONFIG_DEFAULTS = {\n 'JWT_DEFAULT_REALM': 'Login Required',\n 'JWT_AUTH_URL_RULE': '/auth',\n 'JWT_AUTH_ENDPOINT': 'jwt',\n 'JWT_ALGORITHM': 'HS256',\n 'JWT_VERIFY': True,\n 'JWT_VERIFY_EXPIRATION': True,\n 'JWT_LEEWAY': 0,\n 'JWT_EXPIRATION_DELTA': timedelta(seconds=300),\n 'JWT_AUTH_HEADER_PREFIX': 'JWT',\n}\n\n\ndef jwt_required(realm=None):\n \"\"\"View decorator that requires a valid JWT token to be present in the request\n\n :param realm: an optional realm\n \"\"\"\n def wrapper(fn):\n @wraps(fn)\n def decorator(*args, **kwargs):\n verify_jwt(realm)\n return fn(*args, **kwargs)\n return decorator\n return wrapper\n\n\nclass JWTError(Exception):\n def __init__(self, error, description, status_code=400, headers=None):\n self.error = error\n self.description = description\n self.status_code = status_code\n self.headers = headers\n\n\ndef verify_jwt(realm=None):\n \"\"\"Does the actual work of verifying the JWT data in the current request.\n This is done automatically for you by `jwt_required()` but you could call it manually.\n Doing so would be useful in the context of optional JWT access in your APIs.\n\n :param realm: an optional realm\n \"\"\"\n realm = realm or current_app.config['JWT_DEFAULT_REALM']\n auth = request.headers.get('Authorization', None)\n auth_header_prefix = current_app.config['JWT_AUTH_HEADER_PREFIX']\n\n if auth is None:\n raise JWTError('Authorization Required', 'Authorization header was missing', 401, {\n 'WWW-Authenticate': 'JWT realm=\"%s\"' % realm\n })\n\n parts = auth.split()\n\n if parts[0].lower() != auth_header_prefix.lower():\n raise JWTError('Invalid JWT header', 'Unsupported authorization type')\n elif len(parts) == 1:\n raise JWTError('Invalid JWT header', 'Token missing')\n elif len(parts) > 2:\n raise JWTError('Invalid JWT header', 'Token contains spaces')\n\n try:\n handler = _jwt.decode_callback\n payload = handler(parts[1])\n except SignatureExpired:\n raise JWTError('Expired JWT', 'Token is expired', 401, {\n \"WWW-Authenticate\": 'JWT realm=\"{0}\"'.format(realm)\n })\n except BadSignature:\n raise JWTError('Invalid JWT', 'Token is undecipherable')\n\n _request_ctx_stack.top.current_user = user = _jwt.user_callback(payload)\n\n if user is None:\n raise JWTError('Invalid JWT', 'User does not exist')\n\n\ndef generate_token(user):\n \"\"\"Generate a token for a user.\n \"\"\"\n payload = _jwt.payload_callback(user)\n token = _jwt.encode_callback(payload)\n return token\n\n\nclass JWTAuthView(MethodView):\n\n def post(self):\n\n user = _jwt.payload_authentication_callback(request)\n if user:\n token = generate_token(user)\n return _jwt.response_callback(token)\n else:\n raise JWTError('Bad Request', 'Invalid credentials')\n\n\nclass JWT(object):\n\n def __init__(self, app=None):\n if app is not None:\n self.app = app\n self.init_app(app)\n else:\n self.app = None\n\n # Set default handlers\n self.response_callback = _default_response_handler\n self.encode_callback = _default_encode_handler\n self.decode_callback = _default_decode_handler\n self.payload_callback = _default_payload_handler\n self.payload_authentication_callback = _default_payload_auth_handler\n\n def init_app(self, app):\n for k, v in CONFIG_DEFAULTS.items():\n app.config.setdefault(k, v)\n app.config.setdefault('JWT_SECRET_KEY', app.config['SECRET_KEY'])\n\n url_rule = app.config.get('JWT_AUTH_URL_RULE', None)\n endpoint = app.config.get('JWT_AUTH_ENDPOINT', None)\n\n if url_rule and endpoint:\n auth_view = JWTAuthView.as_view(endpoint)\n app.add_url_rule(url_rule, methods=['POST'], view_func=auth_view)\n\n app.errorhandler(JWTError)(self._on_jwt_error)\n\n if not hasattr(app, 'extensions'): # pragma: no cover\n app.extensions = {}\n app.extensions['jwt'] = self\n\n def _on_jwt_error(self, e):\n return getattr(self, 'error_callback', self._error_callback)(e)\n\n def _error_callback(self, e):\n return jsonify(OrderedDict([\n ('status_code', e.status_code),\n ('error', e.error),\n ('description', e.description),\n ])), e.status_code, e.headers\n\n def authentication_handler(self, callback):\n \"\"\"Specifies the authentication handler function. This function receives two\n positional arguments. The first being the username the second being the password.\n It should return an object representing the authenticated user. Example::\n\n @jwt.authentication_handler\n def authenticate(username, password):\n if username == 'joe' and password == 'pass':\n return User(id=1, username='joe')\n\n :param callback: the authentication handler function\n \"\"\"\n self.authentication_callback = callback\n return callback\n\n def payload_authentication_handler(self, callback):\n \"\"\"Specifies a custom authentication handler. This function receives the\n json payload as argument.\n It should return an object representing the authenticated user. Example::\n\n @jwt.payload_authentication_handler\n def authenticate(payload):\n if payload['user'] == 'joe' and payload['pass'] == 'pass':\n return User(id=1, username='joe')\n\n :param callback: the authentication handler function\n \"\"\"\n self.payload_authentication_callback = callback\n return callback\n\n def user_handler(self, callback):\n \"\"\"Specifies the user handler function. This function receives the token payload as\n its only positional argument. It should return an object representing the current\n user. Example::\n\n @jwt.user_handler\n def load_user(payload):\n if payload['user_id'] == 1:\n return User(id=1, username='joe')\n\n :param callback: the user handler function\n \"\"\"\n self.user_callback = callback\n return callback\n\n def error_handler(self, callback):\n \"\"\"Specifies the error handler function. This function receives a JWTError instance as\n its only positional argument. It can optionally return a response. Example::\n\n @jwt.error_handler\n def error_handler(e):\n return \"Something bad happened\", 400\n\n :param callback: the error handler function\n \"\"\"\n self.error_callback = callback\n return callback\n\n def response_handler(self, callback):\n \"\"\"Specifies the response handler function. This function receives a\n JWT-encoded payload and returns a Flask response.\n\n :param callable callback: the response handler function\n \"\"\"\n self.response_callback = callback\n return callback\n\n def encode_handler(self, callback):\n \"\"\"Specifies the encoding handler function. This function receives a\n payload and signs it.\n\n :param callable callback: the encoding handler function\n \"\"\"\n self.encode_callback = callback\n return callback\n\n def decode_handler(self, callback):\n \"\"\"Specifies the decoding handler function. This function receives a\n signed payload and decodes it.\n\n :param callable callback: the decoding handler function\n \"\"\"\n self.decode_callback = callback\n return callback\n\n def payload_handler(self, callback):\n \"\"\"Specifies the payload handler function. This function receives a\n user object and returns a dictionary payload.\n\n Example::\n\n @jwt.payload_handler\n def make_payload(user):\n return {\n 'user_id': user.id,\n 'exp': datetime.utcnow() + current_app.config['JWT_EXPIRATION_DELTA']\n }\n\n :param callable callback: the payload handler function\n \"\"\"\n self.payload_callback = callback\n return callback\n","sub_path":"flask_jwt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525018830","text":"import numpy as np\nimport cv2 as cv2\n\nlastOrientation = \"BR\"\nsize = 200\n# make an array of the corner positions of a 200x200 square\nperspective = np.array([[0, 0], [size - 1, 0],\n [size - 1, size - 1], [0, size - 1]])\n\n\n# Compute homography using supplemtary homography method provided by the Professor\n# this is a custom function used in place of cv2.findHomography\ndef get_homograph(points1, points2):\n A = []\n B = []\n for i in range(0, 4):\n x_w, y_w = points2[i][0], points2[i][1]\n x_c, y_c = points1[i][0], points1[i][1]\n A = [[x_w, y_w, 1, 0, 0, 0, -x_c * x_w, -x_c * y_w, -x_c], [0, 0, 0, x_w, y_w, 1, -y_c * x_w, -y_c * y_w, -y_c]]\n B.append(A)\n B = np.reshape(B, (8, 9))\n U, S, D = np.linalg.svd(B)\n fact = (1 / D[-1, -1])\n h = D[-1, :]\n L = fact * h\n hom = np.reshape(L, (3, 3))\n return hom\n\n\ndef warp(matrix, M):\n h, w, z = matrix.shape\n warp = {'x': [], 'y': [], 'xnew': [], 'ynew': [], 'colors': []}\n aux = np.full(matrix.shape, 0, dtype='uint8')\n for j in range(h):\n for i in range(w):\n warp['x'].append(i)\n warp['y'].append(j)\n xpri = (M[0, 0] * i + M[0, 1] * j + M[0, 2]) / (M[2, 0] * i + (M[2, 1] * j) + M[2, 2])\n ypri = (M[1, 0] * i + M[1, 1] * j + M[1, 2]) / (M[2, 0] * i + (M[2, 1] * j) + M[2, 2])\n warp['xnew'].append(xpri)\n warp['ynew'].append(ypri)\n color = matrix[j, i]\n warp['colors'].append(color)\n for i in range(len(warp['x'])):\n xpri = int(warp['xnew'][i])\n ypri = int(warp['ynew'][i])\n\n if (xpri > 0 and ypri > 0) and (xpri < w and ypri < h):\n aux[ypri, xpri, :] = warp['colors'][i]\n return aux\n\n\n\n#This class exists to handle all processing related to a specific video frame\nclass frame:\n number = 0\n\n def __init__(self):\n self.number = 0\n\n def get_isolated_channels(self):\n # b = self.image.copy()\n # # set green and red channels to 0\n # b[:, :, 1] = 0\n # b[:, :, 2] = 0\n #\n # g = self.image.copy()\n # # set blue and red channels to 0\n # g[:, :, 0] = 0\n # g[:, :, 2] = 0\n #\n # r = self.image.copy()\n # # set blue and green channels to 0\n # r[:, :, 0] = 0\n # r[:, :, 1] = 0\n # return r, g, b\n channels = cv2.split(self.image)\n return channels\n\n def increment_frame(self):\n \"\"\"Advances the frame number by one. Returns the updated value.\"\"\"\n self.number += 1\n return self.number\n\n def adjust_gamma(self, gamma=1.0):\n # build a lookup table mapping the pixel values [0, 255] to\n # their adjusted gamma values\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n #print(table)\n # apply gamma correction using the lookup table\n return cv2.LUT(self.image, table)\n\n def lowpass(self, bottom):\n \"\"\"Take a bottom number. Every pixel below bottom will be set to 0 and every pixel above\n will be set to 255. The image will be returned.\"\"\"\n if self.image.shape()[2] == 3:\n print(\"Converting from color to grayscale first\")\n gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n ret, out = cv2.threshold(gray, bottom, 255, cv2.THRESH_BINARY)\n\n\n def resize(self, scale):\n \"\"\"Scales the image using open cv resize function based on the scale parameter.\n To scale down the scale parameter should be less than 1.\"\"\"\n self.image = cv2.resize(self.image, (0, 0), fx=scale, fy=scale)\n\n def smooth(self,kernalSize=5):\n \"\"\"Executes a GaussianBlur filter using a kernal size provided. It should be and odd number\"\"\"\n self.image = cv2.GaussianBlur(self.image,(kernalSize,kernalSize),0)","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352603540","text":"from memory_profiler import profile\ndef read_Data(FilePath):\n with open(FilePath) as tf:\n FileText=tf.readlines()\n for i in range(len(FileText)):\n if FileText[i][-1]=='\\n':\n FileText[i]=FileText[i][:-2]\n return FileText\n\nclass node():\n def __init__(self,X,Y,Depth):\n self.x=X\n self.y=Y\n self.depth=Depth # 节点深度\n self.path=[(X,Y)] # 从初始节点到当前节点的路径\n def get_address(self):\n return (self.x, self.y)\n def update_path(self,last):\n self.path=list(last.path)\n self.path.append(self.get_address())\n\n@profile(precision=10) \ndef BiBFS(graph, StartNode, EndNode):\n '''\n 双向宽度优先搜索\n '''\n frontier=[]\n frontier.append(Queue())# 正向搜索待搜索队列\n frontier.append(Queue())# 反向搜索待搜索队列\n explored=[]\n explored.append([])# 正向搜索已搜索列表\n explored.append([])# 反向搜索已搜索列表\n lenX=len(graph)\n lenY=len(graph[0])\n frontier[0].put(StartNode)\n frontier[1].put(EndNode)\n Index=0\n NextIndex=(Index+1)%2\n while True:\n if frontier[Index].empty():# 当前队列为空,搜索失败\n #print('Fail!')\n return False\n else:\n CurrentNode=frontier[Index].get()\n # 判断当前节点是否已被另一搜索搜过\n for OtherNode in explored[NextIndex]:\n if CurrentNode.get_address()==OtherNode.get_address():\n # 已被另一搜索搜过,搜索成功\n #print(\"Success! The length is \"+str(CurrentNode.depth+OtherNode.depth))\n #if Index==0:\n # print(CurrentNode.path)\n # print(OtherNode.path[::-1])\n #else:\n # print(OtherNode.path)\n # print(CurrentNode.path[::-1])\n return True\n # 判断当前节点是否已被当前搜索搜过\n flag=False\n for OtherNode in explored[Index]:\n if CurrentNode.get_address()==OtherNode.get_address():\n flag=True\n break\n if flag:# 已被搜过,跳过\n continue\n else: # 未被搜过,加入以搜索列表\n explored[Index].append(CurrentNode)\n # 将周围可到达的节点加入队列\n if CurrentNode.y>0 and graph[CurrentNode.x][CurrentNode.y-1]!='1':\n NextNode=node(CurrentNode.x,CurrentNode.y-1,CurrentNode.depth+1)\n NextNode.update_path(CurrentNode)\n frontier[Index].put(NextNode)\n if CurrentNode.x>0 and graph[CurrentNode.x-1][CurrentNode.y]!='1':\n NextNode=node(CurrentNode.x-1,CurrentNode.y,CurrentNode.depth+1)\n NextNode.update_path(CurrentNode)\n frontier[Index].put(NextNode)\n if CurrentNode.x> 1\n return a\n\n\ndef is_odd(a):\n \"\"\"\n 判断a 是否为奇数,是返回 True,否则False\n :param a:\n :return:\n \"\"\"\n assert isinstance(a, int), Exception(\"\")\n a = a & 1\n if a:\n return True\n else:\n return False\n\n\ndef is_Npower2(a):\n \"\"\"\n 判断a是否是2的n此方,n为整数\n :param a:\n :return:\n \"\"\"\n if not a & (a - 1):\n return True\n return False\n\n\nif __name__ == '__main__':\n print(is_odd(1))\n print(is_Npower2(6))\n print(mul2(-10))\n print(div2(3))\n","sub_path":"bit_ops/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129257573","text":"class Solution(object):\n def rectangleArea(self, rectangles):\n \"\"\"\n :type rectangles: List[List[int]]\n :rtype: int\n \"\"\"\n xs = sorted(set([x for x1, y1, x2, y2 in rectangles for x in (x1, x2)]))\n x_i = {v: i for i, v in enumerate(xs)}\n count = [0] * len(xs)\n L = []\n for x1, y1, x2, y2 in rectangles:\n L.append((y1, x1, x2, 1))\n L.append((y2, x1, x2, -1))\n L.sort()\n pre_y = cur_x_sum = res = 0\n for y, x1, x2, exist in L:\n res += (y-pre_y) * cur_x_sum\n pre_y = y\n for i in range(x_i[x1], x_i[x2]):\n count[i] += exist\n cur_x_sum = sum(x2-x1 if c else 0 for x1, x2, c in zip(xs, xs[1:], count))\n return res%(10**9+7)\n \n","sub_path":"850_RectangleAreaII_H.py","file_name":"850_RectangleAreaII_H.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457234516","text":"#!/usr/bin/env python\nimport snowboydecoder\nimport sys\nimport signal\nimport rospy\nimport os\nfrom std_msgs.msg import String, Int32\n\ninterrupted = False\n\npub = rospy.Publisher('/voice_system/asr_topic', Int32, queue_size=10)\nrospy.init_node('sw')\n\ndef signal_handler(signal, frame):\n global interrupted\n interrupted = True\n\n\ndef interrupt_callback():\n global interrupted\n return interrupted\n\nmodel = \"/home/pi/ros_assistant/src/snowboy/src/qct.pmdl\"\n\n# capture SIGINT signal, e.g., Ctrl+C\nsignal.signal(signal.SIGINT, signal_handler)\n\ndetector = snowboydecoder.HotwordDetector(model, sensitivity=0.42)\nprint('Listening... Press Ctrl+C to exit')\n\n# main loop\nwhile True:\n aa=0\n aa=detector.start(detected_callback=snowboydecoder.play_audio_file,\n interrupt_check=interrupt_callback,\n sleep_time=0.03)\n if aa==1:\n pub.publish(aa)\n aa=0\n print(\"nihao\")\n\ndetector.terminate()\n","sub_path":"snowboy/src/sw.py","file_name":"sw.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16250156","text":"from src.WebsitesScrappers.AllegroScrapper import AllegroScrapper\nfrom src.WebsitesScrappers.OlxScrapper import OlxScrapper\nfrom src.UrlHandling.FileHandler import FileHandler\nfrom src.UrlHandling.MailHandler import MailHandler\nfrom src.Options.UserOptions import UserOptions\nimport time\n\nif __name__ == \"__main__\":\n\n def check_for_new_products():\n\n def get_products(p): return OlxScrapper.get_product_offers(p) + AllegroScrapper.get_product_offers(p)\n\n print('Looking for New Products...')\n all_products = [item for p in tuple(map(get_products, UserOptions().get_products_list())) for item in p]\n new_products = FileHandler.check_with_existing_products('Resources/aul.txt', all_products)\n print('Found Some New Products') if new_products else print('Nothing New Found')\n\n FileHandler.add_new_products('Resources/aul.txt', new_products)\n FileHandler.delete_old_products('Resources/aul.txt', 30)\n\n MailHandler.send_mail(new_products, UserOptions().get_user_mail())\n\n while True:\n check_for_new_products()\n print('Going To Sleep for an hour....\\n')\n time.sleep(60 * 60) # Do this shit every hour\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379664437","text":"from guid.utils import get_timestamp, til_next_millis\nfrom guid.exceptions import TimeChangedException\n\nimport time\n\nclass Guid(object):\n DATACENTER_ID_BITS = 5\n WORKER_ID_BITS = 5\n SEQUENCE_BITS = 12\n SEQUENCE_LIMIT = (1 << SEQUENCE_BITS) - 1\n TIMESTAMP_SHIFT = DATACENTER_ID_BITS + WORKER_ID_BITS + SEQUENCE_BITS\n WORKER_ID_SHIFT = SEQUENCE_BITS\n DATACENTER_ID_SHIFT = SEQUENCE_BITS + WORKER_ID_BITS\n \n SEQUENCE_MASK = (1 << SEQUENCE_BITS) - 1\n WORKER_ID_MASK = (1 << WORKER_ID_BITS) - 1\n DATACENTER_ID_MASK = (1 << DATACENTER_ID_BITS) - 1\n\n EPOCH = time.mktime((2018, 1, 1, 0, 0, 0, 0, 0, 0))\n\n def __init__(self, datacenter_id, worker_id, epoch = None):\n self.datacenter_id = datacenter_id & Guid.DATACENTER_ID_MASK\n self.worker_id = worker_id & Guid.WORKER_ID_MASK\n self.last_timestamp = -1\n self.sequence = 0\n self.epoch = epoch if epoch != None else int(Guid.EPOCH * 1000)\n\n def next(self):\n timestamp = get_timestamp()\n print(timestamp)\n if (timestamp < self.last_timestamp):\n raise TimeChangedException(self.last_timestamp, timestamp)\n\n if (timestamp == self.last_timestamp):\n self.sequence = self.sequence + 1\n if self.sequence >= Guid.SEQUENCE_LIMIT:\n timestamp = til_next_millis(self.last_iimestamp)\n\n if (timestamp > self.last_timestamp):\n self.sequence = 0\n\n self.last_timestamp = timestamp\n timestamp -= self.epoch\n \n guid = (timestamp << Guid.TIMESTAMP_SHIFT) |\\\n (self.datacenter_id << Guid.DATACENTER_ID_SHIFT) |\\\n (self.worker_id<< Guid.WORKER_ID_SHIFT) |\\\n self.sequence\n\n return guid\n\n\nif __name__ == '__main__':\n g = Guid(0,0)\n print(g.next());\n print(g.next());\n","sub_path":"guid/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592694189","text":"# persistent solver\n# Suppose we want to know how the cost changes as we increase the number of warehouses built.\n# warehouse_location.py: Warehouse location determination problem.\n# https://pyomo.readthedocs.io/en/stable/advanced_topics/persistent_solvers.html?highlight=persistent%20solver\n\nfrom pyomo.environ import *\nfrom pyomo.bilevel import *\nfrom commons import *\n\nmodel = ConcreteModel(name=\"(WL)\")\nW = ['Harlingen', 'Memphis', 'Ashland']\nC = ['NYC', 'LA', 'Chicago', 'Houston']\nd = {('Harlingen', 'NYC'): 1956,\n\t('Harlingen', 'LA'): 1606,\n\t('Harlingen', 'Chicago'): 1410,\n\t('Harlingen', 'Houston'): 330,\n\t('Memphis', 'NYC'): 1096,\n\t('Memphis', 'LA'): 1792,\n\t('Memphis', 'Chicago'): 531,\n\t('Memphis', 'Houston'): 567,\n\t('Ashland', 'NYC'): 485,\n\t('Ashland', 'LA'): 2322,\n\t('Ashland', 'Chicago'): 324,\n\t('Ashland', 'Houston'): 1236}\n\nP = 2\nmodel.x = Var(W, C, bounds=(0,1))\nmodel.y = Var(W, within=Binary)\n\ndef obj_rule(m):\n return sum(d[w,c]*m.x[w,c] for w in W for c in C)\nmodel.obj = Objective(rule=obj_rule)\n\ndef one_per_cust_rule(m, c):\n return sum(m.x[w,c] for w in W) == 1\nmodel.one_per_cust = Constraint(C, rule=one_per_cust_rule)\n\ndef warehouse_active_rule(m, w, c):\n return m.x[w,c] <= m.y[w]\nmodel.warehouse_active = Constraint(W, C, rule=warehouse_active_rule)\n\ndef num_warehouses_rule(m):\n return sum(m.y[w] for w in W) <= P\n\nmodel.num_warehouses = Constraint(rule=num_warehouses_rule)\nopt = SolverFactory('gurobi_persistent')\nopt.set_instance(model)\n# model.pprint()\n\nt0 = time.time()\nP_list = list(range(1, N_locations+1))\nobj_list = []\nfor p in P_list:\n opt.remove_constraint(model.num_warehouses)\n model.P.value = p\n opt.add_constraint(model.num_warehouses)\n res = opt.solve(load_solutions=False, save_results=False)\n obj_list.append(res.problem.upper_bound)\n\nt1 = time.time()\nprint(t1-t0)\nplt.plot(P_list, obj_list)\nplt.show()\n","sub_path":"persistent_example.py","file_name":"persistent_example.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183829867","text":"import discord\nfrom discord.ext import commands\nfrom datetime import datetime\nimport random\nimport threading\n\n#Counts current lines in a file.\ndef LineCount():\n file = open(\"DailyQuestions.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n file.close()\n print(line_count)\n\n\nclass DailyCMD(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n\n #Waits for either the approval or denial on a question suggestion\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n if payload.user_id != 777361919211732993:\n if payload.channel_id == 787803726168588318:\n if str(payload.emoji) == \"✅\":\n channel = self.bot.get_channel(payload.channel_id)\n msg = await channel.fetch_message(payload.message_id)\n embed = msg.embeds[0]\n contentval = embed.fields[2].value\n linec, question = contentval.split(\" | \")\n file = open(\"DailyQuestions.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n file.close()\n lc = line_count + 1\n\n embed = discord.Embed(title = \"Suggestion Approved\", description = \"<@\" + str(payload.user_id) + \"> has approved a suggestion! \", color = 0x31f505)\n embed.add_field(name = \"Question Approved\", value = str(question))\n await channel.send(embed = embed)\n\n f = open(\"DailyQuestions.txt\", \"a\")\n f.write(str(lc) + \" - \" + question + \"\\n\")\n f.close()\n reactions = ['✅', '❌']\n for emoji in reactions: \n await msg.clear_reaction(emoji)\n \n \n\n \n elif str(payload.emoji) == \"❌\":\n embed = msg.embeds[0]\n contentval = embed.fields[2].value\n channel = self.bot.get_channel(payload.channel_id)\n linec, question = contentval.split(\" | \")\n embed2 = discord.Embed(title = \"Suggestion Approved\", description = \"<@\" + str(payload.user_id) + \"> has approved a suggestion! \", color = 0xf50505)\n embed.add_field(name = \"Question Approved\", value = \"Question Approved: \" + str(question))\n await channel.send(embed = embed2)\n reactions = ['✅', '❌']\n for emoji in reactions: \n await msg.clear_reaction(emoji)\n else:\n return\n else:\n return\n else:\n return\n\n\n #Lists all current questions in the textfile. \n @commands.command()\n async def listq(self, ctx):\n with open('DailyQuestions.txt', 'r') as file:\n author = ctx.message.author\n msg = file.read(984).strip()\n while len(msg) > 0:\n em = discord.Embed(title = \"Current Recorded Questions\", description = \"Requested by: \" + author.mention, color = 0xb10d9f)\n em.add_field(name = \"Questions:\", value = msg)\n await ctx.send(embed = em)\n msg = file.read(1024).strip()\n\n #Sends a random question.\n @commands.command()\n async def dailyq(self, ctx):\n await ctx.channel.purge(limit = 1)\n author = ctx.message.author\n rew = open('DailyQuestionsC.txt', \"r\")\n copy = open(\"DailyQuestions.txt\", \"a\")\n for line in rew:\n if line != \"\\n\":\n copy.write(line)\n rew.close()\n copy.close()\n file = open(\"DailyQuestions.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n lc = line_count + 1\n file.close()\n A = random.randint(0 , int(lc))\n\n with open(\"DailyQuestions.txt\", \"r\") as myFile:\n for num, line in enumerate(myFile, 1):\n if num == A:\n Numberl, Q = line.split(\" - \")\n fullLine = line\n \n with open(\"DailyQuestions.txt\", \"r\") as f:\n lines = f.readlines()\n with open(\"DailyQuestions.txt\", \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != fullLine:\n f.write(line)\n \n\n Dailyq = discord.Embed(title = \"❓ QUESTION OF THE DAY ❓\", description = \"**\" + Q + \"**\", color = 0xb10d9f)\n Dailyq.set_footer(text=\"Got a question? Use the suggest command! \\n*Usage:* >suggestq (Your Question Here)\")\n await ctx.send(embed = Dailyq)\n\n \n \n \n #Add's a question to the database\n @commands.command()\n async def addq(self, ctx, * , reason):\n file = open(\"DailyQuestions.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n LC = line_count + 1\n file.close()\n file = open(\"DailyQuestions.txt\", \"a\")\n file.write(str(LC) + \" - \" + reason + \"\\n\")\n file.close()\n file = open(\"DailyQuestionsC.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n LC = line_count + 1\n file.close()\n file = open(\"DailyQuestionsC.txt\", \"a\")\n file.write(str(LC) + \" - \" + reason + \"\\n\")\n file.close()\n await ctx.send(\"Question added to the list! \\n**Added:** \" + reason + \"\\n**Line Number:** \" + str(LC))\n \n @addq.error\n async def addq_error(self,ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You didn't include a question!\")\n\n #Removes a question from the database. [BROKEN]\n @commands.command()\n async def removeq(self, ctx, *, reason):\n file = open(\"DailyQuestions.txt\", \"r\")\n for line in file:\n Num, Q = line.split(\" - \")\n if reason == Num:\n question = line\n file.close()\n with open(\"DailyQuestions.txt\", \"r\") as f:\n lines = f.readlines()\n with open(\"DailyQuestions.txt\", \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != question:\n f.write(line)\n\n file = open(\"DailyQuestionsC.txt\", \"r\")\n for line in file:\n Num, Q = line.split(\" - \")\n if reason == Num:\n question = line\n file.close()\n with open(\"DailyQuestionsC.txt\", \"r\") as f:\n lines = f.readlines()\n with open(\"DailyQuestionsC.txt\", \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != question:\n f.write(line)\n await ctx.send(\"**REMOVED:** \" + line)\n \n @removeq.error\n async def removeq_error(self,ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You didn't include a number! Please refer to `>listq` for a question number!\")\n\n #Forces a certain question. \n @commands.command()\n async def forceq(self, ctx, *, reason):\n await ctx.channel.purge(limit = 1)\n author = ctx.message.author\n file = open(\"DailyQuestions.txt\", \"r\")\n for line in file:\n Num, Q = line.split(\" - \")\n if reason == Num:\n question = line\n file.close()\n Dailyq = discord.Embed(title = \"❓ QUESTION OF THE DAY ❓\", description = \"**\" + Q + \"**\", color = 0xb10d9f)\n await ctx.send(embed = Dailyq)\n\n \n @forceq.error\n async def forceq_error(self,ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You didn't include a number! Please refer to `>listq` for a question number!\")\n \n #Suggests a question and sends it to the moderators. \n @commands.command()\n async def suggestq(self, ctx, *, question):\n author = ctx.message.author\n channel = ctx.message.channel\n guild = ctx.message.guild\n DMChannel = await ctx.author.create_dm()\n if channel.name == \"bot-spam\":\n print(channel)\n def check(m):\n return m.content is not None and m.channel == channel and m.author is not self.bot.user\n\n await channel.send(\"Are you sure you want to submit this question for approval? \\n**Warning:** You will be subjected to a warn/mute if your suggestion is deemed inappropriate! \\n*Please respond with either `YES` or `NO`*\")\n msg2 = await self.bot.wait_for('message', check=check)\n if \"YES\" in msg2.content:\n msga = await ctx.send(\"Standby, sending your suggestion. \")\n channels = await self.bot.fetch_channel(787803726168588318)\n embed = discord.Embed(title = \"Daily Question Suggestion\", description = str(author.name) + \" suggested a question in <#\" + str(channel.id) + \">\", color = 0xfcba03)\n embed.add_field(name = \"Suggestion:\", value = str(question))\n #QuestionSuggestQ.txt\n file = open(\"QuestionSuggestQ.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n file.close()\n lc = line_count + 1\n embed.add_field(name = \"Approving/Denial Command\", value = \"\\n✅ - Approve \\n❌ - Reject\")\n embed.add_field(name = \"Developer Payload\", value = str(lc) + \" | \" + str(question))\n timestamp = datetime.now()\n embed.set_footer(text=guild.name + \" | Date: \" + str(timestamp.strftime(r\"%x\")))\n msg = await channels.send(embed = embed)\n with open(\"QuestionSuggestQ.txt\", \"a\") as f:\n f.write(str(lc) + \" - \" + question + \"\\n\")\n reactions = ['✅', '❌']\n for emoji in reactions: \n await msg.add_reaction(emoji)\n await msga.edit(content = \"I have sent your question! \\nPlease wait for an admin to approve it. \")\n else:\n print(\"invalid\")\n else:\n await ctx.channel.purge(limit = 1)\n embed = discord.Embed(title = \"Woah Slow Down!\", description = \"This command is locked to <#588728994661138494>!\\nI also sent your command in your DM's so all you have to do is just copy it and send it in the right channel!\", color = 0xb10d9f)\n msg = await ctx.send(embed = embed, delete_after = 6)\n await DMChannel.send(\"Here is your command! \\nPlease send it in #bot-spam!\")\n await DMChannel.send(\">suggestq \" + str(question))\n \n\n \n\n\n\n#Inactive as @client.event is being used\n\n'''\n @commands.command()\n async def confirmq(self, ctx, number):\n file = open(\"QuestionSuggestQ.txt\", \"r\")\n for line in file:\n Num, Q = line.split(\" - \")\n if number == Num:\n question = line\n\n #Removing Question\n with open(\"QuestionSuggestQ.txt\", \"r\") as f:\n lines = f.readlines()\n\n with open(\"QuestionSuggestQ.txt\", \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != question:\n f.write(line)\n file.close()\n\n\n ---\n\nelse:\n file = open(\"DailyQuestions.txt\", \"r\")\n line_count = 0\n for line in file:\n if line != \"\\n\":\n line_count += 1\n lc = line_count + 1\n file.close()\n A = random.randint(0 , int(lc))\n\n with open(\"DailyQuestions.txt\", \"r\") as myFile:\n for num, line in enumerate(myFile, 1):\n if num == A:\n Numberl, Q = line.split(\" - \")\n fullLine = line\n \n with open(\"DailyQuestions.txt\", \"r\") as f:\n lines = f.readlines()\n with open(\"DailyQuestions.txt\", \"w\") as f:\n for line in lines:\n if line.strip(\"\\n\") != fullLine:\n f.write(line)\n \n\n Dailyq = discord.Embed(title = \"❓ QUESTION OF THE DAY ❓\", description = \"**\" + Q + \"**\", color = 0xb10d9f)\n Dailyq.set_footer(text=\"Got a question you want to add? Use the suggest command! \\nUsage: >suggestq (Your Question Here)\")\n await ctx.send(embed = Dailyq)\n'''\n\n\n\n\n\n\ndef setup(bot):\n bot.add_cog(DailyCMD(bot))\n","sub_path":"cogs/DailyQuestionCMD.py","file_name":"DailyQuestionCMD.py","file_ext":"py","file_size_in_byte":11187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281999586","text":"# python 3 on linux\n\nimport os\nimport subprocess\nimport time\n\nfrom class_KEGG import Kegg_info\nfrom nucleotide_genbank_file import Nucleotide_gb_info\nfrom protein_genbank_file import Protein_gb_info\n\n\n\ndef dw_files():\n \"\"\"\n heeft nodig de bash script \"downloadblast.sh\" en de sequenties\n \"bpapge_seq_a1\".\n deze worden naar de map bestanden verplaats. en hier worden ze\n uitgevoerd.\n downloadblast download de genoom van de Panthera tigris altaica en\n gebruikt dat als de data base om de bpapge_seq_a1 te blasten.\n en er komt de file output_blasten.txt als uitkomst uit. en de bestanden\n worden weer terug in de main map gezet.\n :return: Niks\n \"\"\"\n os.system('mv downloadblast.sh ./bestanden/downloadblast.sh')\n os.system('mv bpapge_seq_a1.txt ./bestanden/bpapge_seq_a1.txt')\n subprocess.run(['bash', 'downloadblast.sh'], cwd='bestanden')\n os.system('mv ./bestanden/downloadblast.sh downloadblast.sh')\n os.system('mv ./bestanden/bpapge_seq_a1.txt bpapge_seq_a1.txt')\n\ndef get_output_blasten():\n \"\"\"\n\n maakt de output van blasten open en pakt alle waardes die een e-value\n van 0.0 heeft. deze worden in een lisjt gezet en aan het einde wordt de\n de lijst uniek gemaakt met set().\n :return: een lijst met unieke gene codes van de output van blasten (list)\n \"\"\"\n lijst = []\n with open('./bestanden/output_blasten.txt') as file:\n data = file.readlines()\n for x in data:\n z = x.strip().split('\\t')\n if z[10] == '0.0':\n lijst += [z[1].split('|')[3]]\n return set(lijst)\n\ndef download_alles(genen_lijst):\n \"\"\"\n :param genen_lijst: een lijst met nucleotide ID's (list)\n\n Maakt lijsten aan waar alle data inkomt te staan, hierna wordt alle\n data verkregen door gebruik van de classes Nucleotide_gb_info,\n Protein_gb_info en Kegg_info. deze downloaden en zorgen dat alle data\n uit de files wordt gehaald en worden opgeslagen in de classes zelf.\n alle data is er uit te halen met 'get'fucnties. zie de classes voor\n meer informatie.\n\n :return:\n gene_class_lijst\n protein_class_lijst\n kegg_class_lijst\n \"\"\"\n gene_class_lijst, protein_class_lijst, kegg_class_lijst = [], [], []\n protein_id_lijst, kegg_id_lijst = [], []\n for gene in genen_lijst:\n gene = Nucleotide_gb_info(str(gene))\n protein_id_lijst += [gene.get_protein_id()]\n gene_class_lijst += [gene]\n for protein_id in protein_id_lijst:\n data = Protein_gb_info(protein_id)\n kegg_id_lijst += data.get_ec_nummer()\n protein_class_lijst += [data]\n for kegg in set(kegg_id_lijst):\n kegg_class_lijst += [Kegg_info(str(kegg))]\n return gene_class_lijst, protein_class_lijst, kegg_class_lijst\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n temp_naam = os.getcwd() + '/temp'\n print('''\nWelkom bij het programma voor de opdracht van de hogeschool Leiden van de opleiding Bio-informatica.\nmet dit programma wordt het project applied genomics in jaar 2 uitgevoerd.\nik hoop dat u het leuk vind.\nMet vriendelijke groeten,\nGroep A1 (Shirley, Lotta, Hanna, Loes en Nils)\\n\\n\\n\\n''')\n time.sleep(5)\n if 'blasten.sh' not in os.listdir() or 'bpapge_seq_a1.txt' not in os.listdir():\n print('''er is een probleem met de files.\nblasten.sh of bpapge_seq_a1.txt zijn niet id de map''')\n quit()\n else:\n while True:\n keuze = input('''welkom u kan nu het programma uit te voeren.\n1) het uitvoeren van het programmma\n2) opties veranderen\n3) afsluiten\nmaak u keuzen:\\n''')\n if keuze == '1':\n os.system('mkdir ./bestanden')\n os.system('mkdir {}'.format(temp_naam))\n os.system('mkdir {}/nucleotide'.format(temp_naam))\n os.system('mkdir {}/protein'.format(temp_naam))\n os.system('mkdir {}/protein/kegg'.format(temp_naam))\n os.system('mkdir {}/pathways'.format(temp_naam))\n gene_lijst, protein_lijst, kegg_lijst = download_alles(\n get_output_blasten())\n print(len(gene_lijst),len(protein_lijst), len(kegg_lijst))\n for i in protein_lijst:\n print(i, i.get_sites())\n for x in kegg_lijst:\n print(x)\n pass\n elif keuze == '2':\n pass\n elif keuze == '3':\n exit()\n else:\n print('wrong input, probeer opnieuw.')\n\nif __name__ == '__main__':\n\n main()\n\n","sub_path":"BpapgeA1.py","file_name":"BpapgeA1.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160190912","text":"import matplotlib as plt\nfrom matplotlib import colors\nimport numpy as np\n\ndef mandlebrot(x,y):\n for xval in range(x):\n for yval in range(y):\n val = 0\n iteration = 0\n while val in range(20):\n val = val * val + x + y*1j\n iteration = iteration + 1\n plt.scatter(x,y,color=(0,0,255,iteration))\n plt.show\n\nmandlebrot(500,1000)\n","sub_path":"mandlebrot.py","file_name":"mandlebrot.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514939668","text":"def fun(a):\r\n b = a**2 + a - 5\r\n return b\r\nb = 25\r\na = 0\r\ntol = 1E-5\r\nwhile abs(a - b) > tol:\r\n temp = (a + b)/2\r\n if fun(temp) > 0:\r\n b = temp\r\n elif fun(temp) < 0:\r\n a = temp\r\n else:\r\n a = temp\r\n b = temp\r\nprint('answer is',(a+b)/2)","sub_path":"ST_python/bisection.py","file_name":"bisection.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117620606","text":"import logging\nimport os\nfrom logging import INFO as LOG_LEVEL_INFO\n\nLOG_LEVEL = int(os.environ.get(\"LOG_LEVEL\", LOG_LEVEL_INFO))\n\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nlogging.basicConfig(\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=LOG_LEVEL,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\ndef pc_logging(level, *args):\n global logging\n if logging.getLogger().isEnabledFor(level):\n evaled_args = map(lambda a: a() if callable(a) else a, args)\n logging.log(level, *evaled_args)\n\n\nlogger = AttrDict()\nlogger.critical = lambda *args: pc_logging(logging.CRITICAL, *args)\nlogger.debug = lambda *args: pc_logging(logging.DEBUG, *args)\nlogger.error = lambda *args: pc_logging(logging.ERROR, *args)\nlogger.info = lambda *args: pc_logging(logging.INFO, *args)\nlogger.warning = lambda *args: pc_logging(logging.WARNING, *args)\n","sub_path":"utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609644740","text":"from django.conf.urls import include, url\nfrom django .conf import settings\nfrom django.contrib import admin\nadmin.autodiscover()\nurlpatterns = [\n # Examples:\n url(r'^$', 'glass.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^servicios', 'glass.views.servicios', name='servicios'),\n url(r'^detalleservice/(\\d+)$', 'glass.views.detalleservice', name='detalleservice'),\n url(r'^contacto', 'glass.views.contacto', name='contacto'),\n url(r'^fotos', 'glass.views.fotos', name='fotos'),\n url(r'^quienes', 'glass.views.quienes', name='quienes'),\n url(r'^ubicacion', 'glass.views.ubicacion', name='ubicacion'),\n url(r'^login', 'glass.views.login', name='login'),\n url(r'^conf', 'glass.views.conf', name='conf'),\n url(r'^cfotos', 'glass.views.cfotos', name='cfotos'),\n url(r'^cservicios', 'glass.views.cservicios', name='cservicios'),\n url(r'^banner', 'glass.views.banner', name='banner'),\n url(r'^info', 'glass.views.info', name='info'),\n url(r'^gallery', 'glass.views.gallery', name='gallery'),\n url(r'^ccontacto', 'glass.views.ccontacto', name='ccontacto'),\n url(r'^slideupdate/(\\d+)$', 'glass.views.slideupdate', name='slideupdate'),\n url(r'^agregarinfoservi/(\\d+)$', 'glass.views.agregarinfoservi', name='agregarinfoservi'),\n url(r'^editarinfoservi/(\\d+)$', 'glass.views.editarinfoservi', name='editarinfoservi'),\n url(r'^eliminarslide/(\\d+)$', 'glass.views.eliminarslide', name='eliminarslide'),\n url(r'^eliminarfoto/(\\d+)$', 'glass.views.eliminarfoto', name='eliminarfoto'),\n url(r'^editarfoto/(\\d+)$', 'glass.views.editarfoto', name='editarfoto'),\n url(r'^eliminarinfo/(\\d+)$', 'glass.views.eliminarinfo', name='eliminarinfo'),\n url(r'^eliminarserivicio/(\\d+)$', 'glass.views.eliminarserivicio', name='eliminarserivicio'),\n url(r'^editarservicio/(\\d+)$', 'glass.views.editarservicio', name='editarservicio'),\n url(r'^eliminarbanner/(\\d+)$', 'glass.views.eliminarbanner', name='eliminarbanner'),\n url(r'^editarbanner/(\\d+)$', 'glass.views.editarbanner', name='editarbanner'),\n\n url(r'^media/(?P.*)$','django.views.static.serve',{'document_root':settings.MEDIA_ROOT,}),\n]\n","sub_path":"autoglass/autoglass/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438374365","text":"\"\"\"\r\n凝聚聚类算法(agglomerative clustering算法)\r\n\"\"\"\r\n# 常用科学计算包\r\n# import numpy as np\r\n# import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n# scikit-learn自带数据库\r\nfrom sklearn.datasets import make_blobs\r\n# 导入agglomerative clustering算法包\r\nfrom sklearn.cluster import AgglomerativeClustering\r\n# 导入PCA算法包\r\n# from sklearn.decomposition import PCA\r\n# 导入NMF算法包\r\n# from sklearn.decomposition import NMF\r\n# 导入SciPy科学计算包\r\nfrom scipy.cluster.hierarchy import dendrogram, ward\r\n\r\n# 数据实例化\r\nX, y = make_blobs(random_state=1)\r\n\r\n# 算法应用\r\nagg = AgglomerativeClustering(n_clusters=3)\r\nassignment = agg.fit_predict(X)\r\n\r\n# 数据分类可视化\r\nplt.figure()\r\nplt.scatter(X[:, 0], X[:, 1], c=assignment)\r\nplt.xlabel(\"Feature 0\")\r\nplt.ylabel(\"Feature 1\")\r\n\r\n# 层次聚类与树状图\r\nX0, y0 = make_blobs(random_state=0, n_samples=12)\r\n# 将ward聚类应用于数组X0,scipy的ward函数返回一个数组,指定执行凝聚聚类时跨越的距离\r\nlinkage_array = ward(X0)\r\n# 现在为包含簇之间距离的linkage_array绘制树状图\r\nplt.figure()\r\ndendrogram(linkage_array)\r\n# 在树中标记划分成两个簇或者三个簇的位置\r\nax = plt.gca()\r\nbounds = ax.get_xbound()\r\nax.plot(bounds, [7.25, 7.25], '--', c='k')\r\nax.plot(bounds, [4, 4], '--', c='k')\r\nax.text(bounds[1], 7.25, 'two clusters', va='center', fontdict={'size': 15})\r\nax.text(bounds[1], 4, 'three clusters', va='center', fontdict={'size': 15})\r\nplt.xlabel(\"sample index\")\r\nplt.ylabel(\"cluster distance\")\r\nplt.show()\r\n\"\"\"\r\n2019.7.22\r\nagglomerative clustering 聚类算法学习总结:\r\n 1.凝聚聚类无法对新数据进行预测,即没有predict函数,只有fit函数,但也具有fit_predict函数\r\n 2.agglomerative clustering 算法与k-means算法类似,也要指定簇的个数,在现实情况中我们是不知道簇的个数的。\r\n 3.相比于k-means聚类,凝聚聚类能够更好的看出层次聚类和树状图,整体直观性更强,但k-means聚类能够对新数据集进行预测\r\n新学习数据可视化方法:\r\n # 在树中标记划分成两个簇或者三个簇的位置\r\n ax = plt.gca()\r\n # 获得边界\r\n bounds = ax.get_xbound()\r\n # 在边界贯穿划线\r\n ax.plot(bounds, [7.25, 7.25], '--', c='k')\r\n ax.plot(bounds, [4, 4], '--', c='k')\r\n # 对边界固定位置添加文本\r\n ax.text(bounds[1], 7.25, 'two clusters', va='center', fontdict={'size': 15})\r\n ax.text(bounds[1], 4, 'three clusters', va='center', fontdict={'size': 15})\r\n\"\"\"","sub_path":"Lesson12.py","file_name":"Lesson12.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"333662886","text":"from __future__ import print_function\nimport appdaemon.plugins.hass.hassapi as hass\nimport time\nimport sys\nimport logging\n\nimport pychromecast\nimport pychromecast.controllers.dashcast as dashcast\n\n\nclass Dashcast(hass.Hass):\n def initialize(self):\n self.log(\"Init Dashcast\")\n\n self.listen_event(self.cast, \"DASHCAST\")\n\n def cast(self, event_name, data, *kwargs):\n receiver = data.get('receiver')\n url = data.get('url')\n\n self.log(\"Casting {} to {}\".format(url, receiver))\n\n if not receiver or not url:\n self.log('Both receiver and url must be specified')\n raise Exception\n\n cast = pychromecast.Chromecast(receiver)\n\n # casts = pychromecast.get_chromecasts()\n # if not casts:\n # self.log(\"No Devices Found\")\n # raise Exception\n #\n # cast_matches = list(filter(lambda x: x.device.friendly_name == receiver, casts))\n # self.log(cast_matches)\n #\n # if not cast_matches:\n # self.log('No cast device found with that name')\n # raise Exception\n #\n # cast = cast_matches[0]\n\n d = dashcast.DashCastController()\n cast.register_handler(d)\n\n print()\n print(cast.device)\n time.sleep(1)\n print()\n print(cast.status)\n print()\n print(cast.media_controller.status)\n print()\n\n if not cast.is_idle:\n print(\"Killing current running app\")\n cast.quit_app()\n time.sleep(5)\n\n time.sleep(1)\n\n # Test that the callback chain works. This should send a message to\n # load the first url, but immediately after send a message load the\n # second url.\n\n warning_message = 'If you see this on your TV then something is broken'\n\n d.load_url('https://home-assistant.io/? ' + warning_message,\n callback_function=lambda result:\n d.load_url(url))\n","sub_path":"appdaemon/apps/dashcast.py","file_name":"dashcast.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451139411","text":"\n# coding: utf-8\n\n# In[19]:\n\n\nimport numpy as np\nimport random\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\nimport sys\n\n#initializing stemmer\ntokenizer = RegexpTokenizer(r'\\w+')\nen_stop = set(stopwords.words('english'))\np_stemmer = PorterStemmer()\n\n# function that takes an input file and performs stemming to generate the output file\ndef getStemmedDocument(doc):\n raw = doc.replace(\"

\", \" \")\n tokens = tokenizer.tokenize(raw)\n temp = []\n for i in range(len(tokens)-2):\n if tokens[i]=='not':\n tokens[i+1] = 'not_' + tokens[i+1]\n tokens[i+2] = 'not_' + tokens[i+2]\n for word in tokens:\n if any(x.isupper() for x in word):\n temp.append(word)\n for i in range(len(tokens)-1):\n temp.append(tokens[i]+\"_\"+tokens[i+1])\n for ele in temp:\n tokens.append(ele)\n raw = []\n for ele in tokens:\n raw.append(ele.lower())\n stopped_tokens = [token for token in raw if token not in en_stop]\n stemmed_tokens = [p_stemmer.stem(token) for token in stopped_tokens]\n documentWords = ' '.join(stemmed_tokens)\n return documentWords\n\ndef read_document(filename):\n\tfile = open(filename, 'r', errors='ignore')\n\trev = file.readlines()\n\treturn rev\n\n\n# In[20]:\n\n\ntrain_rev_un = read_document(\"imdb/imdb_train_text.txt\")\ntrain_labels_str = read_document(\"imdb/imdb_train_labels.txt\")\ntest_rev_un = read_document(\"imdb/imdb_test_text.txt\")\ntest_labels_str = read_document(\"imdb/imdb_test_labels.txt\")\n\n\n# In[21]:\n\n\ndef transform(matr, class_prob):\n\tglobal vocab_dict\n\tlgt = len(matr)\n\ti=0\n\twhile(i5):\n\t\t\t\tflag = 1\n\t\tif (flag==0):\n\t\t\tfor j in range(0, 8):\n\t\t\t\tmatr[i][j]=0\n\t\ti += 1\n\tmatr = matr + 1\n\tdenom = np.sum(matr, axis = 0)\n\tdenom = np.reshape(denom, [1,8])\n\tlog_den = np.log(denom)\n\tlog_prob = np.log(class_prob)\n\tlog_matr = np.log(matr)\n\tlog_matr = log_matr - log_den\n\treturn log_matr, log_prob\n\ndef naive_bayes(matr, x, y):\n\treturn matr[x][y]\n\ndef create_vocab(lst):\n global vocab\n lng = len(lst)\n temp = set()\n for i in range(0, lng):\n vocab.add(lst[i])\n\ndef rat_to_label(rating):\n\tif rating<5:\n\t\treturn rating - 1\n\telse:\n\t\treturn rating - 3\n\ndef label_to_rat(label):\n\tif label<4:\n\t\treturn label + 1\n\telse:\n\t\treturn label + 3\n\n\n# In[22]:\n\n\ntrain_labels = []\n\nfor i in range(len(train_labels_str)):\n\ttrain_labels.append(int(train_labels_str[i]))\n\nclass_prob = np.zeros([8])\n\nfor i in range(len(train_labels)):\n\tclass_prob[rat_to_label(train_labels[i])] += 1\n \ntest_labels = []\n\nfor i in range(len(test_labels_str)):\n\ttest_labels.append(int(test_labels_str[i]))\n\ntrain_rev = []\nfor sent in train_rev_un:\n train_rev.append(getStemmedDocument(sent))\n\ntest_rev = []\nfor sent in test_rev_un:\n test_rev.append(getStemmedDocument(sent))\n\n\n# In[23]:\n\n\ncleaned_rev = []\nvocab = set()\nprint(\"Cleaning data\")\nfor sent in train_rev: \n\tcleaned_rev.append(sent.split())\n\tcreate_vocab(sent.split())\n\nprint(\"Creating vocab\")\n\nvocab_dict = {}\nite = 0\nfor ele in vocab:\n\tvocab_dict[ele] = ite\n\tite = ite + 1\n\ncleaned_test_rev = []\nprint(\"Cleaning Test data\")\nfor sent in test_rev:\n\tcleaned_test_rev.append(sent.split())\n\n\n# In[24]:\n\n\nmax_val = -1\nmax_cls = 0\nfor i in range(len(class_prob)):\n\tif class_prob[i]>max_val:\n\t\tmax_val = class_prob[i]\n\t\tmax_cls = i\n\nclass_prob = class_prob/len(train_labels)\n\nprint(\"Creating Matrix\")\n\nnew_matr = np.zeros([ite,8])\n\nprint(np.shape(cleaned_rev))\n\nfor i in range(len(train_labels)):\n\tlabel = rat_to_label(train_labels[i])\n\trev = cleaned_rev[i]\n\tlngt = len(rev)\n\tfor j in range(lngt):\n\t\trow = vocab_dict.get(rev[j])\n\t\tnew_matr[row][label] += 1\n\nprint(\"Log Matrix creation\")\n\nlog_matrix , log_class = transform(new_matr, class_prob)\n\nite = 0\nfor ele in vocab_dict.keys():\n\tvocab_dict[ele] = ite\n\tite = ite + 1\n\n\n# In[25]:\n\n\ntotal_count = 0\ncorr_count_nb = 0\n\nconfusion = np.zeros([8, 8])\n\nfor j in range(len(cleaned_test_rev)):\n\tif j%5000==0:\n\t\tprint(j)\n\tsent = cleaned_test_rev[j]\n\tmax_class = 0\n\tmax_prob = 0.0\n\tfor i in range(0, 8):\n\t\tsum_prob = log_class[i]\n\t\tfor it in range(len(sent)):\n\t\t\tif sent[it] in vocab_dict:\n\t\t\t\tsum_prob = sum_prob + log_matrix[vocab_dict[sent[it]]][i]\n\t\tif i==0:\n\t\t\tmax_prob = sum_prob\n\t\t\tmax_class = i\n\t\tif sum_prob>max_prob:\n\t\t\tmax_prob = sum_prob\n\t\t\tmax_class = i\n\tif label_to_rat(max_class)==test_labels[j]:\n\t\tcorr_count_nb = corr_count_nb + 1\n\ttotal_count = total_count + 1\n\tconfusion[max_class][rat_to_label(test_labels[j])] += 1\n\nfor i in range(0, 8):\n\tfor j in range(0, 8):\n\t\tprint(int(confusion[i][j]), \"\\t\", end='')\n\tprint()\n\nprint(\"Accuracy NB :\", (corr_count_nb+0.0)/total_count)\n\n\n# In[26]:\n\n\nimport pickle\nwith open('log_matrix_2.pkl', 'wb') as f:\n pickle.dump(log_matrix, f)\nwith open('log_class_2.pkl', 'wb') as f:\n pickle.dump(log_class, f)\n\n\n# In[27]:\n\n\nwith open('vocab_dict_2.pkl', 'wb') as f:\n pickle.dump(vocab_dict, f)\n\n","sub_path":"training_code/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416854859","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nimport numpy as np\nimport os\nimport pandas as pd\nimport abc\nfrom scipy import stats\nfrom text.similarity.text_similarity import TextSemanticSimilarity\nfrom decimal import Decimal\n\nclass USE_Transformer_Similarity(TextSemanticSimilarity):\n\n module_path = ''\n sentences_1 = []\n sentences_2 = []\n annotated_score = []\n\n def read_dataset(self, fileNames, *args, **kwargs):\n file_type = os.path.splitext(fileNames)[-1][1:]\n if file_type == 'csv':\n print(\"reading input csv file \")\n df = pd.read_csv(fileNames)\n self.sentences_1 = list(df['sentence_A'])\n self.sentences_2 = list(df['sentence_B'])\n self.annotated_score = list(df['relatedness_score'])\n del df\n elif file_type == 'txt':\n print(\"reading input txt file\")\n df = pd.read_csv(fileNames,names=['sentence_A','sentence_B'])\n self.sentences_1 = list(df['sentence_A'])\n self.sentences_2 = list(df['sentence_B'])\n del df\n else:\n print(\"sorry,the DataSet format should be csv or txt\")\n exit(-1)\n\n def train(self, *args, **kwargs):\n pass\n def save_model(self, *args, **kwargs):\n pass\n def load_model(self,arg_path):\n print(\"loading model...\")\n self.module_path = arg_path\n def generate_embeddings(self, input_list, *args, **kwargs):\n pass\n\n def predict(self, data_X, data_Y, *args, **kwargs):\n tf.logging.set_verbosity(tf.logging.ERROR)\n # loading module\n encoder = hub.Module(self.module_path,trainable=True)\n # set tf.placeholder variables\n sentences1 = tf.placeholder(tf.string, shape=(None))\n sentences2 = tf.placeholder(tf.string, shape=(None))\n # generate embeddings and norm it\n sts_embed_1 = tf.nn.l2_normalize(encoder(sentences1), axis=1)\n sts_embed_2 = tf.nn.l2_normalize(encoder(sentences2), axis=1)\n # compute similarity of two embeddings\n cosine_similarities = tf.reduce_sum(tf.multiply(sts_embed_1, sts_embed_2), axis=1)\n clip_cosine_similarities = tf.clip_by_value(cosine_similarities, -1.0, 1.0)\n ag_sim_scores = 1.0 - tf.acos(clip_cosine_similarities)\n print(\"encoding sentences and calculating sim scores, may takes while... \")\n # run tf session and transfer data to tf.placeholder variables by feed_dict\n with tf.Session() as session:\n session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n sim_scores = session.run(cosine_similarities, feed_dict={sentences1: data_X, sentences2: data_Y})\n print(\"finished\")\n return sim_scores.tolist()\n\n def evaluate(self, actual_values, predicted_values, *args, **kwargs):\n pearson_correlation = stats.pearsonr(predicted_values, actual_values)\n r_score = Decimal(pearson_correlation[0]).quantize(Decimal('0.00'))\n print('Pearson correlation coefficient = {0}'.format(\n r_score))\n","sub_path":"text/similarity/USE_Transformer/src/USE_Transformer.py","file_name":"USE_Transformer.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355079717","text":"# Import tasks from json, csv, zip, txt and more\n\nimport os\nimport csv\nimport hashlib\nimport shutil\nimport zipfile\nimport rarfile\nimport logging\nimport tempfile\nimport pandas as pd\ntry:\n import ujson as json\nexcept:\n import json\n\nfrom os.path import join\nfrom urllib.request import urlopen\n\nfrom .exceptions import ValidationError\nfrom .misc import Settings\nfrom label_studio.utils.functions import HOSTNAME\n\n\nsettings = Settings\nlogger = logging.getLogger(__name__)\ncsv.field_size_limit(131072 * 10)\n\n\ndef tasks_from_file(filename, file, project):\n try:\n if filename.endswith('.csv'):\n tasks = pd.read_csv(file).fillna('').to_dict('records')\n tasks = [{'data': task} for task in tasks]\n elif filename.endswith('.tsv'):\n tasks = pd.read_csv(file, sep='\\t').fillna('').to_dict('records')\n tasks = [{'data': task} for task in tasks]\n elif filename.endswith('.txt'):\n lines = file.read().splitlines()\n tasks = [{'data': {settings.UPLOAD_DATA_UNDEFINED_NAME: line.decode('utf-8')}} for line in lines]\n elif filename.endswith('.json'):\n raw_data = file.read()\n # Python 3.5 compatibility fix https://docs.python.org/3/whatsnew/3.6.html#json\n try:\n tasks = json.loads(raw_data)\n except TypeError:\n tasks = json.loads(raw_data.decode('utf8'))\n else:\n # save file to disk\n data = file.read()\n upload_dir = os.path.join(project.name, 'upload')\n os.makedirs(upload_dir, exist_ok=True)\n filename = hashlib.md5(data).hexdigest() + '-' + filename\n path = os.path.join(upload_dir, filename)\n open(path, 'wb').write(data)\n # prepare task\n tasks = [{'data': {settings.UPLOAD_DATA_UNDEFINED_NAME: HOSTNAME + '/upload/' + filename}}]\n\n except Exception as exc:\n raise ValidationError('Failed to parse input file ' + filename + ': ' + str(exc))\n\n # null in file\n if tasks is None:\n raise ValidationError('null in ' + filename + ' is not allowed')\n\n # one task as dict\n elif isinstance(tasks, dict):\n tasks = [tasks]\n\n # list\n elif isinstance(tasks, list):\n return tasks\n\n # something strange\n else:\n raise ValidationError('Incorrect task type in ' + filename + ': \"' + str(str(tasks)[0:100]) + '\". '\n 'It is allowed \"dict\" or \"list of dicts\" only')\n\n return tasks\n\n\ndef create_and_release_temp_dir(func):\n def wrapper(*args, **kwargs):\n with tempfile.TemporaryDirectory(prefix='htx_') as temp_dir:\n return func(temp_dir=temp_dir, *args, **kwargs)\n return wrapper\n\n\ndef extract_archive(archive, filename, temp_dir):\n \"\"\" Extract all files from archive and returns extracted file names\n\n :param archive: ZipFile or similar interface instance\n :param filename: zip filename\n :param temp_dir: temporary dir\n :return: extracted file names\n \"\"\"\n final_dir = join(temp_dir, filename)\n names = {join(final_dir, name): 'archive' for name in archive.namelist()}\n logger.info('ZIP archive {filename} found with {names} files inside, extracting to {final_dir}'\n .format(filename=filename, names=len(names), final_dir=final_dir))\n\n archive.extractall(final_dir)\n logger.info('ZIP archive {filename} extracted successfully')\n return names\n\n\ndef check_max_task_number(tasks):\n # max tasks\n if len(tasks) > settings.TASKS_MAX_NUMBER:\n raise ValidationError('Maximum task number is {TASKS_MAX_NUMBER}, '\n 'current task number is {num_tasks}'\n .format(TASKS_MAX_NUMBER=settings.TASKS_MAX_NUMBER, num_tasks=len(tasks)))\n\n\ndef check_file_sizes_and_number(files):\n total = sum([file.size for _, file in files.items()])\n\n if total >= settings.TASKS_MAX_FILE_SIZE:\n raise ValidationError('Maximum total size of all files is {TASKS_MAX_FILE_SIZE} bytes, '\n 'current size is {total} bytes'\n .format(TASKS_MAX_FILE_SIZE=settings.TASKS_MAX_FILE_SIZE, total=total))\n\n\ndef aggregate_files(request_files, temp_dir):\n files = {}\n\n # extract all files from archives to temp dir\n for filename, file in request_files.items():\n\n # read urlopen till end and save this file\n if hasattr(file, 'urlopen') and (filename.endswith('.zip') or filename.endswith('.rar')):\n path = os.path.join(temp_dir, 'current_file')\n with open(path, 'wb') as current_file:\n shutil.copyfileobj(file, current_file)\n current_file.close()\n file = path # rewrite file as path\n\n # zip\n if filename.endswith('.zip'):\n with zipfile.ZipFile(file, 'r') as archive:\n names = extract_archive(archive, filename, temp_dir)\n files.update(names)\n\n # rar\n elif filename.endswith('.rar'):\n with rarfile.RarFile(file, 'r') as archive:\n names = extract_archive(archive, filename, temp_dir)\n files.update(names)\n\n # other\n else:\n files[filename] = file\n\n return files\n\n\ndef aggregate_tasks(files, project):\n tasks = []\n\n # scan all files\n for filename, file in files.items():\n # extracted file from archive\n if file is 'archive':\n with open(filename) as f:\n tasks += tasks_from_file(filename, f, project)\n # file from request\n else:\n tasks += tasks_from_file(filename, file, project)\n\n check_max_task_number(tasks)\n\n return tasks\n\n\n@create_and_release_temp_dir\ndef load_tasks(request, project, temp_dir):\n \"\"\" Load tasks from different types of request.data / request.files\n \"\"\"\n # take tasks from request FILES\n if len(request.FILES):\n # check_file_sizes_and_number(request.FILES)\n files = aggregate_files(request.FILES, temp_dir)\n tasks = aggregate_tasks(files, project)\n\n # take tasks from url address\n elif 'application/x-www-form-urlencoded' in request.content_type:\n try:\n url = request.data['url']\n with urlopen(url) as file:\n # check size\n meta = file.info()\n file.size = int(meta.get(\"Content-Length\"))\n file.urlopen = True\n request_files = {url: file}\n check_file_sizes_and_number(request_files)\n\n # start parsing\n files = aggregate_files(request_files, temp_dir)\n tasks = aggregate_tasks(files, project)\n\n except ValidationError as e:\n raise e\n except Exception as e:\n raise ValidationError(str(e))\n\n # take one task from request DATA\n elif 'application/json' in request.content_type and isinstance(request.data, dict):\n tasks = [request.data]\n\n # take many tasks from request DATA\n elif 'application/json' in request.content_type and isinstance(request.data, list):\n tasks = request.data\n\n # incorrect data source\n else:\n raise ValidationError('load_tasks: No data found in DATA or in FILES')\n\n # check is data root is list\n if not isinstance(tasks, list):\n raise ValidationError('load_tasks: Data root must be list')\n\n # empty tasks error\n if not tasks:\n raise ValidationError('load_tasks: No tasks added')\n\n check_max_task_number(tasks)\n return tasks\n","sub_path":"label_studio/utils/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67169970","text":"from django import template\nfrom powerschool_apps.health_care_plan.models import Allergy, AllergyMedication, Asthma, Diabetes\n\nregister = template.Library()\n\n\n@register.filter('allergens_choice_list')\ndef allergens_choice_list(allergy_inst):\n if allergy_inst.allergens != ['']:\n choice_label_list = []\n for allergen in allergy_inst.allergens:\n if allergen == 'INSECT_STINGS':\n choice_label = dict(Allergy.ALLERGENS_CHOICES)[allergen]\n choice_label_with_insect_stings = choice_label + ' (' + allergy_inst.insect_stings + ')'\n choice_label_list.append(choice_label_with_insect_stings)\n elif allergen == 'OTHER':\n choice_label = dict(Allergy.ALLERGENS_CHOICES)[allergen]\n choice_label_with_other = choice_label + ' (' + allergy_inst.other_allergy + ')'\n choice_label_list.append(choice_label_with_other)\n else:\n choice_label_list.append(dict(Allergy.ALLERGENS_CHOICES)[allergen])\n\n return ', '.join(choice_label_list)\n else:\n return ''\n\n\n@register.filter('medication_locations_list')\ndef medication_locations_list(medication):\n if medication.locations != ['']:\n locations_labels_list = []\n for location in medication.locations:\n if location == 'OTHER':\n choice_label = dict(AllergyMedication.LOCATIONS_CHOICES)[location]\n choice_label_with_other = choice_label + ' (' + medication.other_location + ')'\n locations_labels_list.append(choice_label_with_other)\n else:\n locations_labels_list.append(dict(AllergyMedication.LOCATIONS_CHOICES)[location])\n\n return ', '.join(locations_labels_list)\n else:\n return ''\n\n\n@register.filter('asthma_green_triggers_list')\ndef asthma_green_triggers_list(asthma_inst):\n if asthma_inst.triggers != ['']:\n triggers_labels_list = []\n for trigger in asthma_inst.triggers:\n if trigger == 'OTHER':\n choice_label = dict(Asthma.TRIGGERS_CHOICES)[trigger]\n choice_label_with_other = choice_label + ' (' + asthma_inst.green_other_trigger + ')'\n triggers_labels_list.append(choice_label_with_other)\n else:\n triggers_labels_list.append(dict(Asthma.TRIGGERS_CHOICES)[trigger])\n\n return ', '.join(triggers_labels_list)\n else:\n return ''\n\n\n@register.filter('green_quick_relief_list')\ndef green_quick_relief_list(asthma_inst):\n if asthma_inst.green_quick_relief != ['']:\n green_quick_relief_label_list = []\n for relief in asthma_inst.green_quick_relief:\n if relief == 'OTHER':\n choice_label = dict(Asthma.GREEN_QUICK_RELIEF_CHOICES)[relief]\n choice_label_with_other = choice_label + ' (' + asthma_inst.green_quick_relief_other + ')'\n green_quick_relief_label_list.append(choice_label_with_other)\n else:\n green_quick_relief_label_list.append(dict(Asthma.GREEN_QUICK_RELIEF_CHOICES)[relief])\n return ', '.join(green_quick_relief_label_list)\n else:\n return ''\n\n\n@register.filter('diabetes_type')\ndef diabetes_type(diabetes_inst):\n if diabetes_inst.diabetes_type:\n return dict(Diabetes.DIABETES_TYPE_CHOICES)[diabetes_inst.diabetes_type]\n else:\n return ''\n\n","sub_path":"powerschool_apps/health_care_plan/templatetags/choice_tags.py","file_name":"choice_tags.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291324326","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom django.contrib.auth.views import LoginView\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home),\n # url('search', views.search),\n url('login', views.login),\n url('perform-signin', views.perform_signin),\n url('failed-authentication', views.failed_authentication),\n url('create-account', views.create_account),\n url('enter-user', views.enter_user),\n url('logout', views.logout),\n url('search-similar', views.similar_search),\n url('search-custom', views.custom_search),\n url('results-similar', views.similar_results),\n url('results-custom', views.custom_results),\n url('provide-strain-feedback', views.provide_strain_feedback)\n]","sub_path":"MariWanna/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637026663","text":"from rest_framework import serializers\nfrom ShadowAiRest.models import Order,Image,ProcessedImage\nfrom django.core.files.base import ContentFile\nimport uuid\nimport base64\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model=Order\n fields=('id', \n 'shadow',\n 'ext', \n 'sizeoption',\n 'req_height',\n 'req_width',\n 'responsed')\n\nclass ImageSerializer(serializers.ModelSerializer):\n class Meta:\n model=Image\n fields=('id',\n 'name',\n 'image', \n 'order')\n\n def create(self, data, orderid):\n '''\n data[0]:single base64 binary string\n data[1]:dataurl header(include original file format)\n '''\n image_name=str(uuid.uuid4())\n ext=''\n \n if data[1].find('png'):\n ext='.png'\n elif data[1].find('jp'):\n ext='.jpg'\n else:\n ext='bmp'\n\n result, created = Image.objects.get_or_create(\n name=image_name,\n image=ContentFile(base64.b64decode(data[0]),\n name=image_name+ext),\n order=orderid\n )\n return result\n\nclass ProcessedImageSerializer(serializers.ModelSerializer):\n class Meta:\n model=ProcessedImage\n fields=(\n 'id',\n 'name',\n 'imagepath',\n 'origin'\n )\n \n def create(self,filename,ext,origin_id):\n prefix='processed/'\n result, created= ProcessedImage.objects.get_or_create(\n name=filename,\n imagepath=prefix+filename,\n origin=origin_id\n )\n return result","sub_path":"ShadowAiDjango/ShadowAiRest/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6614587","text":"import urllib\nimport oauth2 as oauth\n\nfrom twisted.internet.defer import inlineCallbacks, returnValue\nfrom twisted.internet import reactor\nfrom twisted.web import client\nfrom twisted.web.http_headers import Headers\n\nimport simplegeo\nfrom simplegeo.twisted.util import StringProducer, receive_body\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nclass Client(simplegeo.Client):\n @inlineCallbacks\n def get_records(self, layer, ids):\n endpoint = self.endpoint('records', layer=layer, ids=','.join(ids))\n features = yield self._request(endpoint, \"GET\")\n returnValue(features.get('features') or [])\n \n @inlineCallbacks\n def _request(self, endpoint, method, data=None):\n body = None\n params = {}\n if method == \"GET\" and isinstance(data, dict):\n endpoint = endpoint + '?' + urllib.urlencode(data)\n else:\n if isinstance(data, dict):\n body = urllib.urlencode(data)\n else:\n body = data\n \n request = oauth.Request.from_consumer_and_token(self.consumer, \n http_method=method, http_url=endpoint, parameters=params)\n request.sign_request(self.signature, self.consumer, None)\n\n headers = request.to_header(self.realm)\n headers['User-Agent'] = 'SimpleGeo Twisted Client v%s' % simplegeo.API_VERSION\n headers = Headers(dict([(k, [v]) for k, v in headers.items()]))\n \n agent = client.Agent(reactor)\n \n response = yield agent.request(method, endpoint, headers, (body and StringProducer(body)))\n \n body = yield receive_body(response)\n \n if body: # Empty body is allowed.\n try:\n body = json.loads(body)\n except ValueError:\n raise DecodeError(resp, body)\n\n if str(response.code)[0] != '2':\n code = str(response.code)\n message = body\n if isinstance(body, dict):\n if 'code' in body:\n code = body['code']\n if 'message' in body:\n message = body['message']\n\n raise simplegeo.APIError(code, message, response.headers)\n\n returnValue(body)","sub_path":"simplegeo/twisted/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490500288","text":"from MyCode.utils import DataReader\n\n__author__ = 'eweiwi'\n\nimport os\n\nif __name__ == '__main__':\n\texePath = '/home/eweiwi/download/dense_trajectory_release/release/DenseTrack'\n\tparams = ' -L 15'\n\tfeaturesPath = '/home/eweiwi/phd_work/datasets/kth_dense_trajectories/'\n\tx = DataReader('kth_meta_data.txt','kth_annotation.txt' , 'kth_annotation_text.txt' ,'/home/eweiwi/phd_work/datasets/kth_seperate/')\n\n\tif not os.path.exists(featuresPath):\n\t\tos.mkdir(featuresPath)\n\tx._make_metadata()\n\n\tx._make_annotation_from_matadata()\n\n\tstructured_data = x._read_into_structure_array()\n\tfor i in range (structured_data.shape[0]):\n\t\tvideoPath = structured_data[i]['path'][5:]\n\t\tcommand = '%s %s %s| gzip > %s.gz'%(exePath,videoPath,params,featuresPath+str(i))\n\t\tos.system(command)\n","sub_path":"MyCode/deprecated/denseTrajectories.py","file_name":"denseTrajectories.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371013184","text":"def ESieve(a, b):\n#This function takes 2 integers if ascending order, and returns a list of all prime numbers between the 2 integers\n if a>=b:\n #Checks that the range is valid. (The first integer is less than the second.)\n return \"invalid range\"\n else:\n numbers = []\n #Creates a list of all numbers from which we'll sieve\n for x in range(2, b+1):\n numbers.append(x)\n currentPrime = 0\n #creates index of the current prime whose multiples we're trying to remove\n stillHaveMorePrimes = True\n #keep iterating until the current prime == b\n while stillHaveMorePrimes:\n checkingNumber = currentPrime + 1\n #creates index of the number we will check if multiple\n stillHaveMoreNumbers = True\n while stillHaveMoreNumbers:\n if checkingNumber >= len(numbers):\n #Then we have run out of numbers\n stillHaveMoreNumbers = False\n elif numbers[checkingNumber] % numbers[currentPrime] == 0:\n numbers.remove(numbers[checkingNumber])\n #check if the number \"checkingNumber\" is a multiple of our current prime\n #and remove if it is\n else:\n checkingNumber=checkingNumber+1\n #if it isn't, we move to the next number in \"numbers\"\n if currentPrime == len(numbers):\n stillHaveMorePrimes = False\n else:\n currentPrime = currentPrime + 1\n #the next number on the list will always be a prime because we have used previous\n #numbers to eliminate nonprimes\n #when the next number == the last number, we stop iterating\n fixedRange = False\n while fixedRange == False:\n if numbers[0] < a:\n numbers.remove(numbers[0])\n else:\n fixedRange = True\n #sets the lower range of the list\n #removes all prime numbers in \"numbers\" that are less than a\n return numbers\n #gives the list of primes or returns invalid range\n","sub_path":"Problem5/Sieve.py","file_name":"Sieve.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639708537","text":"import uuid\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom pip import req\n\n_here = os.path.dirname(__file__)\n_install_requirements = req.parse_requirements(\n 'requirements.txt', session=uuid.uuid1())\n\n\nsetup(\n name='jetway',\n version=open(os.path.join(_here, 'jetway', 'VERSION')).read().strip(),\n description=(\n 'Client library for the Jetway static site staging service.'\n ),\n url='https://github.com/grow/jetway-client',\n license='MIT',\n author='Grow SDK Authors',\n author_email='hello@grow.io',\n include_package_data=True,\n install_requires=[str(ir.req) for ir in _install_requirements],\n packages=find_packages(),\n keywords=[\n 'grow',\n 'cms',\n 'static site generator',\n 's3',\n 'google cloud storage',\n 'content management'\n ],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ])\n","sub_path":"pypi_install_script/jetway-0.0.512.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560126450","text":"#!/usr/bin/python\n\nimport os, socket\nimport q2, assemble\n\n\nHOST = '127.0.0.1'\nSERVER_PORT = 8000\nLOCAL_PORT = 1337\n\n\nASCII_MAX = 0x7f\n\n\ndef get_raw_shellcode():\n return q2.get_shellcode()\n\n\ndef get_shellcode():\n raw_shellcode = get_raw_shellcode()\n decoder = []\n encoded_shellcode = bytearray(raw_shellcode)\n\n decoder = []\n # PUT in EAX the start of the shellcode\n decoder.extend(['PUSH ESP', 'POP EAX'])\n decoder += ['DEC EAX']*(4+len(raw_shellcode))\n # PUT 0x80 into BL\n decoder.extend(['PUSH 0x7f', 'POP EBX', 'INC EBX'])\n for idx, byte in enumerate(encoded_shellcode):\n if idx > 0 and idx % 128 == 0:\n # ADD 128 to EAX so the offset address will be 8bit\n decoder += ['INC EAX']*128 ;\n if byte >= ord('\\x80'):\n encoded_shellcode[idx] = byte ^ ord('\\x80')\n decoder.append('XOR BYTE PTR [EAX+'+str(idx%128)+'], BL') \n encoded_shellcode = str(encoded_shellcode)\n\n assembly_decoder = assemble.assemble_data('\\n'.join(decoder))\n return assembly_decoder + encoded_shellcode \n '''This function returns the machine code (bytes) of the shellcode.'''\n \n\ndef get_payload():\n '''This function returns the data to send over the socket to the server.\n \n This includes everything - the 4 bytes for size, the nop slide, the\n shellcode and the return address.'''\n\n shell_code = get_shellcode()\n # I use the command INC EAX as the nop slide, as it will get its real value at the start of the decoder anyway\n nop_slide = (1040-len(shell_code)) * '\\x40'\n jump_address = int('0xbfffdd1c', 16)\n jump_address = q2.format_address(jump_address + len(nop_slide)//2)\n return q2.network_order_uint32(1044) + nop_slide + shell_code + jump_address\n\ndef main():\n payload = get_payload()\n conn = socket.socket()\n conn.connect((HOST, SERVER_PORT))\n try:\n conn.sendall(payload)\n finally:\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"4_RCE/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195907183","text":"import socket\nimport string, os, random\n\n\nclass Library:\n\n # @staticmethod\n # def geo_latlng():\n # data = socket.gethostbyname(socket.getfqdn())\n # geo = geocoder.ip(data)\n # geo = geocoder.ip(\"me\")\n # return geo.latlng\n\n @staticmethod\n def user_type(u_type):\n return {0: \"Admin\", 1: \"Consumer\", 2: \"Retailer\", 3: \"Vendor\"}[u_type]\n\n @staticmethod\n def sub_key_gen():\n length = 11\n chars = string.ascii_letters + string.digits\n random.seed = (os.urandom(1024))\n return ''.join(random.choice(chars) for i in range(length))\n\n\n\n","sub_path":"src/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618460515","text":"from django.conf.urls import include, url\nfrom . import views \n\nurlpatterns = [\n\turl(r'^martilleros/editar/(?P[0-9]+)/$', views.editar_martillero, name='editar_martillero'),\n\turl(r'^mandantes/editar/(?P[0-9]+)/$', views.editar_mandante, name='editar_mandante'),\n\turl(r'^martilleros/detalles/(?P[0-9]+)/$', views.detalles_martillero, name='detalles_martillero'),\n\turl(r'^mandantes/detalles/(?P[0-9]+)/$', views.detalles_mandante, name='detalles_mandante'),\n\turl(r'^martilleros/nuevo/$', views.nuevo_martillero, name='nuevo_martillero'),\n\turl(r'^mandantes/nuevo/$', views.nuevo_mandante, name='nuevo_mandante'),\n\turl(r'^martilleros/eliminar/(?P[0-9]+)/$', views.eliminar_martillero, name='eliminar_martillero'),\n\turl(r'^mandantes/eliminar/(?P[0-9]+)/$', views.eliminar_mandante, name='eliminar_mandante'),\t\t\n\turl(r'^martilleros/$', views.listado_martilleros, name='listado_martilleros'),\n\turl(r'^mandantes/$', views.listado_mandantes, name='listado_mandantes'),\n]","sub_path":"gestorSubastas/actores/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364367449","text":"# =========================================================================\n# Andrew's Monotone Chain Convex Hull Algorithm(复杂度降低)\n# Link: http://www.algorithmist.com/index.php/Monotone_Chain_Convex_Hull\n\n# LeetCode 587\nclass Solution:\n def outerTrees(self, points):\n \"\"\"\n :type points: List[Point]\n :rtype: List[Point]\n \"\"\"\n N = len(points)\n if N <= 3: \n return points\n lb = min(points, key = lambda p: (p.y, p.x))\n ccw = lambda p1, p2, p3: (p2.x - p1.x) * (p3.y - p1.y) - (p2.y - p1.y) * (p3.x - p1.x)\n points.sort(key = lambda p: (p.x, p.y))\n lo = []\n for x in range(N):\n while len(lo) > 1 and ccw(lo[-2], lo[-1], points[x]) < 0:\n lo.pop()\n lo.append(points[x])\n up = []\n for x in range(N - 1, -1, -1):\n while len(up) > 1 and ccw(up[-2], up[-1], points[x]) < 0:\n up.pop()\n up.append(points[x])\n return lo[:-1] + up[:-1]\n","sub_path":"Convex Hull/Andrew's Monotone Chain (凸包).py","file_name":"Andrew's Monotone Chain (凸包).py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229441466","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCrearrival_timeed on Thu Mar 12 22:18:47 2020\n\n@author: Mahmood Yousaf\n\"\"\"\ndef findWaitingTime(process, n, burst_time, waiting_time, arrival_time):\n serve_time = [0]*n\n serve_time[0]=0\n waiting_time[0]=0\n #loop for to calculate each process waiting time\n for i in range(1, n):\n serve_time[i] = (serve_time[i - 1] + burst_time[i - 1])\n waiting_time[i] = serve_time[i] - arrival_time[i] \n #if waiting time is negative than it means process is in ready state\n if waiting_time[i] < 0:\n waiting_time[i] = 0\n#funtion to calculte turn around time for each process\ndef findTurnAroundTime(process, n, burst_time, waiting_time, turn_around_time):\n for i in range(n):\n turn_around_time[i] = burst_time[i] + waiting_time[i] \ndef find_avg_time(process, n, burst_time, arrival_time):\n #decalring arrays for calculating averge time \n waiting_time = [0] * n \n turn_around_time = [0] * n \n #funtion to find waiting time for each process\n findWaitingTime(process, n, burst_time, waiting_time, arrival_time) \n #function to find turn around time for each process\n findTurnAroundTime(process, n, burst_time, waiting_time, turn_around_time) \n total_wait_tim = 0\n total_turn_arn_time = 0\n #calculating total time for calculating avergae time \n for i in range(n):\n total_wait_tim = total_wait_tim + waiting_time[i] \n total_turn_arn_time=total_turn_arn_time+turn_around_time[i]\n with open(\"results.txt\",\"a\") as file:\n file.write(f\"Avergae waiting time in FCFS for all processess is {total_wait_tim/n}\\n\")\n file.write(f\"Turn around time in FCFS for all processess is {total_turn_arn_time/n}\\n\")\n file.write(f\"Avergae Respose time in FCFS for all processess is {total_wait_tim/n}\\n\")\n file.close()\n print(f\"Avergae waiting time in FCFS for all processess is {total_wait_tim/n}\")\n print(f\"Turn around time in FCFS for all processess is {total_turn_arn_time/n}\")\n print(f\"Avergae Respose time in FCFS for all processess is {total_wait_tim/n}\")\n#Code implementurn_around_timeion starts from here\nimport random\nprocess = [0 for i in range(0,10000)]\nburst_time = [0 for i in range(0,10000)]\narrival_time = [0 for i in range(0,10000)]\npriority = [0 for i in range(0,10000)]\nwith open(\"jobs_10000.csv\",\"r\") as file:\n i=0\n reader_handler = csv.reader(file)\n for row in reader_handler:\n process[i]=int(row[0])\n arrival_time[i]=int(row[1])\n burst_time[i]=int(row[2])\n priority[i]=int(row[3])\n i += 1\nsize=len(process) \nfind_avg_time(process, size, burst_time,arrival_time) ","sub_path":"Lab_08/Mahmood_Yousaf_Hamdan_Ali_Baloch/First_come_first_serve.py","file_name":"First_come_first_serve.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609652561","text":"#!/bin/python\n\nimport os\n\n\n# Complete the cutTheSticks function below.\ndef cutTheSticks(arr):\n remaining = []\n while (len(arr)):\n remaining.append(len(arr))\n cut_size = min(arr)\n arr = [x - cut_size for x in arr if x != cut_size]\n return remaining\n\n\nif __name__ == '__main__':\n\n arr = map(int, \"5 4 4 2 2 8\".split())\n\n result = cutTheSticks(arr)\n\n print('\\n'.join(map(str, result)))\n","sub_path":"python/cut_stick.py","file_name":"cut_stick.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648139591","text":"from flask import jsonify\n\ndef api_error(status, error_string) -> tuple:\n \"\"\"\n Formats an API as it is excpected from the rest of the API.\n :param status: the HTTP status, you want to respond with.\n :type status: integer\n :params error_string: a string of the form .\n :type error_string: string\n :return The JSON formatted error, and the HTTP code, to be used by Flask.\n :rtype set\n \"\"\"\n field, error = error_string.split('.')\n err_json = jsonify({\n 'status': status,\n 'field': field,\n 'error': error\n })\n return err_json, status","sub_path":"rulesets/helpers/api_error.py","file_name":"api_error.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433558288","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n/***************************************************************************\nName : DB Manager\nDescription : Database manager plugin for QuantumGIS\nDate : May 23, 2011\ncopyright : (C) 2011 by Giuseppe Sucameli\nemail : brush.tyler@gmail.com\n\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom ..plugin import DBPlugin, Database, Schema, Table, VectorTable, RasterTable, TableField, TableConstraint, TableIndex, TableTrigger, TableRule\ntry:\n\tfrom . import resources_rc\nexcept ImportError:\n\tpass\n\nfrom ..html_elems import HtmlParagraph, HtmlList, HtmlTable\n\n\ndef classFactory():\n\treturn PostGisDBPlugin\n\nclass PostGisDBPlugin(DBPlugin):\n\n\t@classmethod\n\tdef icon(self):\n\t\treturn QIcon(\":/db_manager/postgis/icon\")\n\n\t@classmethod\n\tdef typeName(self):\n\t\treturn 'postgis'\n\n\t@classmethod\n\tdef typeNameString(self):\n\t\treturn 'PostGIS'\n\n\t@classmethod\n\tdef providerName(self):\n\t\treturn 'postgres'\n\n\t@classmethod\n\tdef connectionSettingsKey(self):\n\t\treturn '/PostgreSQL/connections'\n\n\tdef databasesFactory(self, connection, uri):\n\t\treturn PGDatabase(connection, uri)\n\n\tdef connect(self, parent=None):\n\t\tconn_name = self.connectionName()\n\t\tsettings = QSettings()\n\t\tsettings.beginGroup( u\"/%s/%s\" % (self.connectionSettingsKey(), conn_name) )\n\n\t\tif not settings.contains( \"database\" ): # non-existent entry?\n\t\t\traise InvalidDataException( 'there is no defined database connection \"%s\".' % conn_name )\n\t\n\t\tget_value_str = lambda x: unicode(settings.value(x).toString())\n\t\thost, port, database, username, password = map(get_value_str, [\"host\", \"port\", \"database\", \"username\", \"password\"])\n\n\t\t# qgis1.5 use 'savePassword' instead of 'save' setting\n\t\tif not ( settings.value(\"save\").toBool() or settings.value(\"savePassword\").toBool() ):\n\t\t\t#dlg = qgis.gui.QgsCredentialDialog(parent)\n\t\t\t#(ok, username, password) = dlg.request(selected)\n\t\t\t(password, ok) = QInputDialog.getText(parent, \"Enter password\", 'Enter password for connection \"%s\":' % conn_name, QLineEdit.Password)\n\t\t\tif not ok: return False\n\n\t\tsettings.endGroup()\n\n\t\timport qgis.core\n\t\turi = qgis.core.QgsDataSourceURI()\n\t\turi.setConnection(host, port, database, username, password)\n\t\treturn DBPlugin.connect(self, uri)\n\n\nclass PGDatabase(Database):\n\tdef __init__(self, connection, uri):\n\t\tDatabase.__init__(self, connection, uri)\n\n\tdef connectorsFactory(self, uri):\n\t\tfrom .connector import PostGisDBConnector\n\t\treturn PostGisDBConnector(uri)\n\n\n\tdef dataTablesFactory(self, row, db, schema=None):\n\t\treturn PGTable(row, db, schema)\n\n\tdef vectorTablesFactory(self, row, db, schema=None):\n\t\treturn PGVectorTable(row, db, schema)\n\n\tdef rasterTablesFactory(self, row, db, schema=None):\n\t\treturn PGRasterTable(row, db, schema)\n\n\tdef schemasFactory(self, row, db):\n\t\treturn PGSchema(row, db)\n\n\n\tdef sqlDataModel(self, sql, parent):\n\t\tfrom .data_model import PGSqlModel\n\t\treturn PGSqlModel(self, sql, parent)\n\n\nclass PGSchema(Schema):\n\tdef __init__(self, row, db):\n\t\tSchema.__init__(self, db)\n\t\tself.oid, self.name, self.owner, self.perms = row\n\t\tself.tableCount = len(self.tables())\n\n\nclass PGTable(Table):\n\tdef __init__(self, row, db, schema=None):\n\t\tTable.__init__(self, db, schema)\n\t\tself.name, schema_name, self.isView, self.owner, self.estimatedRowCount, self.pages = row\n\t\tself.estimatedRowCount = int(self.estimatedRowCount)\n\n\tdef runVacuumAnalyze(self):\n\t\tself.database().connector.runVacuumAnalyze(self.name, self.schemaName())\n\n\n\tdef runAction(self, action):\n\t\taction = unicode(action)\n\n\t\tif action.startswith( \"table/\" ):\n\t\t\tif action == \"table/vacuum\":\n\t\t\t\tself.runVacuumAnalyze()\n\t\t\t\treturn True\n\n\t\telif action.startswith( \"rule/\" ):\n\t\t\tparts = action.split('/')\n\t\t\trule_name = parts[1]\n\t\t\trule_action = parts[2]\n\n\t\t\tmsg = u\"Do you want to %s rule %s?\" % (rule_action, rule_name)\n\t\t\tif QMessageBox.question(None, \"Table rule\", msg, QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:\n\t\t\t\treturn False\n\n\t\t\tif rule_action == \"delete\":\n\t\t\t\tself.database().connector.deleteTableRule(rule_name, self.name, self.schemaName())\n\t\t\t\tself._rules = None\t# refresh rules\n\t\t\t\treturn True\n\n\t\treturn Table.runAction(self, action)\n\n\tdef tableFieldsFactory(self, row, table):\n\t\treturn PGTableField(row, table)\n\n\tdef tableConstraintsFactory(self, row, table):\n\t\treturn PGTableConstraint(row, table)\n\n\tdef tableIndexesFactory(self, row, table):\n\t\treturn PGTableIndex(row, table)\n\n\tdef tableTriggersFactory(self, row, table):\n\t\treturn PGTableTrigger(row, table)\n\n\tdef tableRulesFactory(self, row, table):\n\t\treturn PGTableRule(row, table)\n\n\n\tdef info(self):\n\t\tfrom .info_model import PGTableInfo\n\t\treturn PGTableInfo(self)\n\n\tdef dataModel(self, parent):\n\t\tfrom .data_model import PGTableModel\n\t\treturn PGTableModel(self, parent)\n\n\nclass PGVectorTable(PGTable, VectorTable):\n\tdef __init__(self, row, db, schema=None):\n\t\tPGTable.__init__(self, row[:-4], db, schema)\n\t\tVectorTable.__init__(self, db, schema)\n\t\tself.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:]\n\n\tdef info(self):\n\t\tfrom .info_model import PGVectorTableInfo\n\t\treturn PGVectorTableInfo(self)\n\n\tdef getValidUniqueFields(self, onlyOne=False):\n\t\t\"\"\" list of fields valid to load the table as layer in qgis canvas \"\"\"\n\t\tret = Table.getUniqueFieldsForLayer(self)\n\t\t# add both serial and int4 fields with an unique index\n\t\tindexes = self.indexes()\n\t\tif indexes != None:\n\t\t\tfor idx in indexes:\n\t\t\t\tif idx.isUnique and len(idx.columns) == 1:\n\t\t\t\t\tfld = idx.fields()[0]\n\t\t\t\t\tif fld and fld not in ret and fld.type in [\"oid\", \"serial\", \"int4\"]:\n\t\t\t\t\t\tret.append( fld )\n\n\t\tif onlyOne:\n\t\t\treturn ret if len(ret) > 0 else None\n\t\treturn ret\n\nclass PGRasterTable(PGTable, RasterTable):\n\tdef __init__(self, row, db, schema=None):\n\t\tPGTable.__init__(self, row[:-6], db, schema)\n\t\tRasterTable.__init__(self, db, schema)\n\t\tself.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:]\n\t\tself.geomType='RASTER'\n\n\tdef info(self):\n\t\tfrom .info_model import PGRasterTableInfo\n\t\treturn PGRasterTableInfo(self)\n\n\tdef getValidUniqueFields(self, onlyOne=False):\n\t\t\"\"\" list of fields valid to load the table as layer in qgis canvas \"\"\"\n\t\tret = Table.getUniqueFieldsForLayer(self)\n\t\t# add both serial and int4 fields with an unique index\n\t\tindexes = self.indexes()\n\t\tif indexes != None:\n\t\t\tfor idx in indexes:\n\t\t\t\tif idx.isUnique and len(idx.columns) == 1:\n\t\t\t\t\tfld = idx.fields()[0]\n\t\t\t\t\tif fld and fld not in ret and fld.type in [\"oid\", \"serial\", \"int4\"]:\n\t\t\t\t\t\tret.append( fld )\n\n\t\tif onlyOne:\n\t\t\treturn ret if len(ret) > 0 else None\n\t\treturn ret\n\t\n\tdef toMapLayer(self):\n\t\tfrom qgis.core import QgsRasterLayer \n\t\turi=self.uri()\n\t\tschema = \"schema=\"+self.schemaName() if self.schemaName() else ''\n\t\tpgrasterUri= ('PG: dbname=%s host=%s user=%s password=%s port=%s mode=2 %s table=%s') % (uri.database(), uri.host(), uri.username(), uri.password(), uri.port(),schema,self.name)\n\t\trasterLayer=QgsRasterLayer(pgrasterUri, self.name)\n\t\trasterLayer.setContrastEnhancementAlgorithm(\"StretchToMinimumMaximum\")\n\t\treturn rasterLayer\n\nclass PGTableField(TableField):\n\tdef __init__(self, row, table):\n\t\tTableField.__init__(self, table)\n\t\tself.num, self.name, self.dataType, self.charMaxLen, self.modifier, self.notNull, self.hasDefault, self.default = row\n\t\tself.primaryKey = False\n\n\t\t# find out whether fields are part of primary key\n\t\tfor con in self.table().constraints():\n\t\t\tif con.type == TableConstraint.TypePrimaryKey and self.num in con.columns:\n\t\t\t\tself.primaryKey = True\n\t\t\t\tbreak\n\n\nclass PGTableConstraint(TableConstraint):\n\tdef __init__(self, row, table):\n\t\tTableConstraint.__init__(self, table)\n\t\tself.name, constr_type, self.isDefferable, self.isDeffered, columns = row[:5]\n\t\tself.columns = map(int, columns.split(' '))\n\t\tself.type = TableConstraint.types[constr_type] # convert to enum\n\n\t\tif self.type == TableConstraint.TypeCheck:\n\t\t\tself.checkSource = row[5]\n\t\telif self.type == TableConstraint.TypeForeignKey:\n\t\t\tself.foreignTable = row[6]\n\t\t\tself.foreignOnUpdate = TableConstraint.onAction[row[7]]\n\t\t\tself.foreignOnDelete = TableConstraint.onAction[row[8]]\n\t\t\tself.foreignMatchType = TableConstraint.matchTypes[row[9]]\n\t\t\tself.foreignKeys = row[10]\n\n\nclass PGTableIndex(TableIndex):\n\tdef __init__(self, row, table):\n\t\tTableIndex.__init__(self, table)\n\t\tself.name, columns, self.isUnique = row\n\t\tself.columns = map(int, columns.split(' '))\n\n\nclass PGTableTrigger(TableTrigger):\n\tdef __init__(self, row, table):\n\t\tTableTrigger.__init__(self, table)\n\t\tself.name, self.function, self.type, self.enabled = row\n\nclass PGTableRule(TableRule):\n\tdef __init__(self, row, table):\n\t\tTableRule.__init__(self, table)\n\t\tself.name, self.definition = row\n\n\n","sub_path":"db_plugins/postgis/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":9377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651860516","text":"\"\"\"This module create tasks that can be fired of using Celery.\"\"\"\nfrom celery import shared_task\nfrom celery.utils.log import get_task_logger\nfrom integrations.mailchimp.utils.create_campaign import create_campaign\nfrom rating_process.models.rating_decision import RatingDecision\nfrom rating_process.models.press_release import PressRelease\n\nlogger = get_task_logger(__name__)\n\n\n@shared_task(autoretry_for=(Exception,),\n default_retry_delay=30,\n max_retries=10)\ndef run_create_campaign(rating_decision_pk=None):\n \"\"\"Function to set off a task to create a campaign.\"\"\"\n\n rating_decision_obj = RatingDecision.objects.get(pk=rating_decision_pk)\n press_release_obj = PressRelease.objects.get(\n rating_decision=rating_decision_obj)\n\n create_campaign(\n rating_job_id=rating_decision_pk,\n header=press_release_obj.header,\n body=press_release_obj.pre_amble,\n template_id=52657,\n issuer_type_id=rating_decision_obj.issuer.issuer_type.pk,\n )\n\n logger.info('MailChimp campaign created')\n","sub_path":"ncr_website/integrations/mailchimp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423233281","text":"from django.shortcuts import render, redirect\n\n# import models (so we can query from database)\nfrom .models import Post\n\n# import forms (so we can display it in our template)\nfrom .forms import PostForm, UserForm\n\n# import extra functionality: (authentication/login/logout/messages)\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n# import mail dependencies\nfrom django.core.mail import EmailMessage, message\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\n\n\n# Create your views here.\ndef index(request):\n my_posts = [\n {\n 'author' : 'Koki Okano',\n 'title' : 'This is my first post',\n 'content' : 'content1',\n 'date_posted' : 'August 9, 2021'\n },\n {\n 'author' : 'Sung',\n 'title' : \"This is Sung's first post\",\n 'content' : 'content1',\n 'date_posted' : 'August 9, 2021'\n },\n ]\n # \n # \n context = {'posts': my_posts}\n return render(request, 'blog/index.html', context )\n\ndef aboutPage(request):\n return render(request, 'blog/about.html')\n\ndef posts(request):\n posts = Post.objects.all()\n context = {\n 'posts': posts,\n 'page_title': 'WELCOME TO MY BLOG'\n }\n return render(request, 'blog/posts.html', context)\n\n@login_required(login_url='blog-login')\ndef createPost(request):\n form = PostForm(request.POST or None)\n if form.is_valid():\n form.save()\n context = {\n 'form': form\n } \n return render(request, 'blog/createpost.html', context)\n\ndef registerPage(request):\n form = UserForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n user = form.cleaned_data.get(\"username\")\n email = form.cleaned_data.get(\"email\")\n\n context = {'username': user}\n template = render_to_string('blog/emailtemplate.html', context)\n\n\n email_message = EmailMessage(\n 'Welcome to my Django blog',\n template,\n settings.EMAIL_HOST_USER,\n [email]\n )\n\n email_message.fail_silently = False\n email_message.send()\n\n messages.success(request, \"Account was created for \" + user)\n\n\n\n return redirect('blog-login')\n context = {\n 'form': form\n }\n return render(request, 'blog/register.html', context)\n\ndef loginPage(request):\n if request.method == \"POST\":\n username = request.POST.get(\"username\") # comes from the name attribute in the html tag\n password = request.POST.get(\"password1\")\n\n user = authenticate(request, username = username, password = password)\n if user is not None:\n login(request, user)\n print(f'{user} is logged in')\n return redirect('blog-index')\n messages.info(request, \"Incorrect username or password\")\n return render(request, 'blog/login.html')\n\n@login_required(login_url='blog-login')\ndef logoutUser(request):\n logout(request)\n return redirect('blog-login')\n\n\ndef individualPost(request, post_id):\n post = Post.objects.get(id=post_id)\n context = {'p': post}\n return render(request, 'blog/individualpost.html', context)\n\n\n@login_required(login_url='blog-login')\ndef updatePost(request, post_id):\n post = Post.objects.get(id=post_id)\n if post.author.id != request.user.id:\n messages.info(request, \"You cannot update another user's post\")\n return redirect('blog-posts')\n form = PostForm(request.POST, instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n form.save()\n messages.success(request, \"success\")\n return redirect('blog-individualpost', post_id=post_id)\n\n context = {'form': form} \n return render(request, 'blog/updatepost.html', context)\n\n\n@login_required(login_url='blog-login')\ndef deletePost(request, post_id):\n post = Post.objects.get(id=post_id)\n if post.author.id != request.user.id:\n message.info(request, \"You cannot delete another user's post\")\n return redirect('blog-posts')\n post.delete()\n messages.success(request, \"Post was deleted...\")\n return redirect('blog-posts')","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167614970","text":"from pymarc import MARCReader\nimport csv\nimport argparse\nimport re\nimport os\nfrom datetime import datetime\nimport pandas as pd\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--file')\nargs = parser.parse_args()\nif args.file:\n filename = args.file\nelse:\n filename = input('Enter filename (including \\'.mrc\\'): ')\n\n\nfileDir = os.path.dirname(__file__)\n\ndatetypes_dict = {}\nmarc_lang = {}\ncat_dict = {}\n\n\ndef createDict(csvname, column1, column2, dictname):\n with open(csvname) as codes:\n codes = csv.DictReader(codes)\n for row in codes:\n code = row[column1]\n name = row[column2]\n dictname[code] = name\n\n\n# Import type codes used in 006.\ncreateDict(os.path.join(fileDir, 'dictionaries/marc_datetypes.csv'), 'Type', 'Name', datetypes_dict)\n# Import language codes used in language.\ncreateDict(os.path.join(fileDir, 'dictionaries/marc_lang.csv'), 'Code', 'Name', marc_lang)\n# Import category codes used in 007.\ncreateDict(os.path.join(fileDir, 'dictionaries/marc_007categoryMaterial.csv'), 'Code', 'Name', cat_dict)\n\n\n# Creates k,v pair in dict where key = field_name, value = values of MARC tags in record.\ndef field_finder(field_name, tags):\n field_list = []\n field = record.get_fields(*tags)\n for my_field in field:\n my_field = my_field.format_field()\n field_list.append(my_field)\n if field_list:\n field_list = '|'.join(str(e) for e in field_list)\n mrc_fields[field_name] = field_list\n else:\n mrc_fields[field_name] = ''\n\n\n# Creates k,v pair in dict where key = field_name, value = values of specific subfield in MARC tag in record.\ndef subfield_finder(field_name, subfields, tags):\n field_list = []\n field = record.get_fields(*tags)\n for my_field in field:\n my_subfield = my_field.get_subfields(*subfields)\n for field in my_subfield:\n if field not in field_list:\n field_list.append(field)\n if field_list:\n field_list = '|'.join(str(e) for e in field_list)\n mrc_fields[field_name] = field_list\n else:\n mrc_fields[field_name] = ''\n\n\n# Converts code from MARC record into name from imported dictionaries.\ndef convert_to_name(keyname, dictname):\n v = mrc_fields.get(keyname)\n if '|' in v:\n v = v.split('|')\n for count, item in enumerate(v):\n for key, value in dictname.items():\n if item == key:\n v[count] = value\n mrc_fields[keyname] = '|'.join(v)\n else:\n for key, value in dictname.items():\n if v == key:\n mrc_fields[keyname] = value\n\n# Finds geographic subject headings from 600 fields.\ndef geo_finder(field_name, subfields, tags):\n field_list = []\n field = record.get_fields(*tags)\n for my_field in field:\n heading = []\n my_subfield = my_field.get_subfields(*subfields)\n for field in my_subfield:\n heading.append(field)\n heading = '--'.join(str(e) for e in heading)\n if heading not in field_list:\n field_list.append(heading)\n if field_list:\n field_list = '|'.join(str(e) for e in field_list)\n mrc_fields[field_name] = field_list\n else:\n mrc_fields[field_name] = ''\n\n\ndef makeBoundingBox():\n box = []\n coor_list = ['west', 'south', 'east', 'north']\n if mrc_fields.get('north'):\n for item in coor_list:\n direction = mrc_fields.get(item)\n if \"|\" in direction:\n direction = direction.split('|')\n direction = direction[0]\n else:\n direction = direction\n direction = direction.replace('+', '')\n box.append(direction)\n box = ', '.join(box)\n mrc_fields['bounding_box'] = box\n else:\n mrc_fields['bounding_box'] = ''\n for item in coor_list:\n del mrc_fields[item]\n\n\nall_fields = []\nrecord_count = 0\nwith open(filename, 'rb') as fh:\n marc_recs = MARCReader(fh, to_unicode=True)\n for record in marc_recs:\n mrc_fields = {}\n leader = record.leader\n # Finds fields/subfield values in record.\n field_finder('category', tags=['007'])\n field_finder('008', tags=['008'])\n subfield_finder('bib', subfields=['a'], tags=['910'])\n subfield_finder('oclc', subfields=['a'], tags=['035'])\n subfield_finder('links', subfields=['u'], tags=['856'])\n mrc_fields['title'] = record.title()\n subfield_finder('alt_title', subfields=['a', 'b'], tags=['246'])\n field_finder('authors', tags=['100', '110', '111', '130'])\n subfield_finder('statresp', subfields=['c'], tags=['245'])\n field_finder('contributors', tags=['700', '710', '711', '730'])\n subfield_finder('publisher', subfields=['b'], tags=['260', '264'])\n field_finder('marc_subjects', tags=['600', '610', '650', '651'])\n geo_finder('spatial_fast', tags=['650'], subfields=['z'])\n subfield_finder('spatial_lcnaf', tags=['651'], subfields=['a', 'z'])\n field_finder('description', tags=['500', '520'])\n subfield_finder('language', subfields=['a', 'b', 'c', 'd', 'f'], tags=['041'])\n subfield_finder('west', subfields=['d'], tags=['034'])\n subfield_finder('east', subfields=['e'], tags=['034'])\n subfield_finder('north', subfields=['f'], tags=['034'])\n subfield_finder('south', subfields=['g'], tags=['034'])\n subfield_finder('temporal', subfields=['x', 'y'], tags=['034'])\n subfield_finder('scale', subfields=['a'], tags=['255'])\n catValue = mrc_fields.get('category')\n if catValue:\n mrc_fields['category'] = catValue[0]\n convert_to_name('category', cat_dict)\n convert_to_name('language', marc_lang)\n\n # Edit & convert values in dictionary.\n for k, v in mrc_fields.items():\n # Find DtSt and Dates from field 008.\n if k == '008':\n if v:\n datetype = v[6]\n date1 = v[7:11].strip()\n date2 = v[11:15].strip()\n lang = v[35:38]\n else:\n datetype = ''\n date1 = ''\n date2 = ''\n lang = ''\n # Finds only oclc number, deleting prefixes.\n elif k == 'oclc' and v != '':\n oclc_list = []\n v = v.split('|')\n for item in v:\n item = str(item)\n oclc_num = re.search(r'([0-9]+)', item)\n if oclc_num:\n oclc_num = oclc_num.group(1)\n if oclc_num not in oclc_list:\n if oclc_num != mrc_fields['bib'][0]:\n oclc_list.append(oclc_num)\n v = '|'.join(str(e) for e in oclc_list)\n mrc_fields[k] = v\n\n del mrc_fields['008']\n mrc_fields['datetype'] = datetype\n convert_to_name('datetype', datetypes_dict)\n mrc_fields['date1'] = date1\n mrc_fields['date2'] = date2\n mrc_fields['lang'] = lang\n convert_to_name('lang', marc_lang)\n if mrc_fields.get('language') == '':\n mrc_fields['language'] = mrc_fields.get('lang')\n else:\n pass\n del mrc_fields['lang']\n makeBoundingBox()\n\n # Adds dict created by this MARC record to all_fields list.\n all_fields.append(mrc_fields)\n record_count = record_count + 1\n print(record_count)\n\ndf = pd.DataFrame.from_dict(all_fields)\nprint(df.head(15))\ndt = datetime.now().strftime('%Y-%m-%d %H.%M.%S')\ndf.to_csv(path_or_buf='marcRecords_'+dt+'.csv', header='column_names', encoding='utf-8', sep=',', index=False)\n","sub_path":"extractMARCToGeoCSV.py","file_name":"extractMARCToGeoCSV.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433017629","text":"\"\"\"\n Ejemplo tomado de \n http://www.pythondiario.com/2018/08/ordenamiento-por-mezcla-merge-sort.html\n\"\"\"\n\n\n# Función merge_sort\ndef merge_sort(lista):\n \"\"\"\n Lo primero que se ve en el psudocódigo es un if que\n comprueba la longitud de la lista. Si es menor que 2, 1 o 0, se devuelve la lista.\n ¿Por que? Ya esta ordenada. \n \"\"\"\n if len(lista) < 2:\n return lista\n # De lo contrario, se divide en 2\n else:\n middle = len(lista) // 2\n right = merge_sort(lista[:middle])\n left = merge_sort(lista[middle:])\n return merge(right, left)\n\n# Función merge\ndef merge(lista1, lista2):\n \"\"\"\n merge se encargara de intercalar los elementos de las dos\n divisiones.\n \"\"\"\n i, j = 0, 0 # Variables de incremento\n result = [] # Lista de resultado\n\n # Intercalar ordenadamente\n while(i < len(lista1) and j < len(lista2)):\n if (lista1[i] < lista2[j]):\n result.append(lista1[i])\n i += 1\n else:\n result.append(lista2[j])\n j += 1\n # Agregamos los resultados a la lista\n result += lista1[i:]\n result += lista2[j:]\n\n # Retornamos el resultados\n return result\n\n","sub_path":"txt por combinacion/modelado/combinacion.py","file_name":"combinacion.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559317567","text":"'''\nCreated on 2017-2-10\n\n@author: Lxxx\n'''\n# _*_ coding: utf-8 _*_\nfrom os import listdir\nfrom os.path import isfile,join\nimport os.path\n\ndef list_all_files(file_path):\n return [f for f in listdir(file_path) if isfile(join(file_path,f))]\n\ndef list_all(file_path):\n return listdir(file_path)\n\ndef list_all_recruse(file_path,list):\n f=list_all(file_path)\n for file in f:\n if (isfile(join(file_path,file))==False):\n list_all_recruse(join(file_path,file),list)\n else:\n list.append(file)\n\nmusic=[]\nlist_all_recruse(\"D:\\\\music\\\\yueyu\\\\111\",music)\n\ngroupId=\"com.hrbb.npas\"\nartifactId=\"\"\nversion=\"\"\n\ndef generate_coordinate(file_name_list):\n for f in file_name_list:\n lastdot=f.rindex('.')\n filename=f[0:lastdot]\n artifactId=filename[0:filename.rindex('-')]\n version=filename[filename.rindex('-')+1:len(filename)]\n# print(artifactId)\n# print(version)\n print(\"mvn deploy:deploy-file -DgroupId=com.creditloan.zxcx -DartifactId=%s -Dversion=%s -Dpackaging=jar -Dfile=D:\\\\music\\\\yueyu\\\\111\\\\%s -Durl=http -DrepositoryId=3rdparty\" %(artifactId,version,f))\n print(\"\")\n print(\"com.creditloan.zxcx\")\n print(\"%s\"%(artifactId))\n print(\"%s\"%(version))\n print(\"\")\n\ngenerate_coordinate(music)\n\n\n\n\n\n\n\n\n\n","sub_path":"DoncatTest/src/LiaoXueFengTest/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130125256","text":"#!/usr/bin/env python2\n#-*- coding: UTF-8 -*-\n\n#\n# Copyright Jonathan Tremesaygues - 2014\n#\n# This software is governed by the CeCILL-B license under French law and\n# abiding by the rules of distribution of free software. You can use,\n# modify and/ or redistribute the software under the terms of the CeCILL-B\n# license as circulated by CEA, CNRS and INRIA at the following URL\n# \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL-B license and that you accept its terms.\n#\n\nimport ConfigParser\nfrom email.mime.text import MIMEText\nimport logging\nimport re\nimport smtplib\nimport subprocess\nimport sys\n\n\nclass Commit(object):\n def __init__(self, repository, revision):\n self._repository = repository\n self._revision = revision\n\n def load(self):\n \"\"\"\n Load commit informations\n return boolean Success\n \"\"\"\n pipe = subprocess.Popen(['svn', 'log', '-v', '-r', str(self._revision), self._repository], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n commit_data, error = pipe.communicate()\n if len(commit_data) == 0:\n logging.getLogger().warning(error)\n return False\n\n # Cut message in lines\n commit_data = commit_data.splitlines()\n\n # Remove first and last line (svn decoration)\n commit_data = commit_data[1:-1]\n\n # Search usefull informations\n line_of_informations = commit_data[0]\n informations = re.search(\"r\\d+ \\| (?P\\w+) \\| (?P.+) \\((?P.+)\\) \\| (?P\\d+) lines?\", line_of_informations)\n if informations is None:\n logging.getLogger().warning('Cannot get usefull information')\n return False\n\n informations = informations.groupdict()\n self._author = informations['author']\n message_lenght = int(informations['message_lenght'])\n self._modified_files = commit_data[2:-(message_lenght + 1)]\n self._message = '\\n'.join(commit_data[-message_lenght:])\n self._date1 = informations['date1']\n self._date2 = informations['date2']\n\n self._branch = None\n if len(self._modified_files) > 0:\n modified_file = self._modified_files[0]\n informations = re.search('\\s+\\w+\\s+(?P.*)', modified_file)\n if informations is not None:\n informations = informations.groupdict()\n path = informations['path']\n if path.startswith('/trunk/'):\n self._branch = 'trunk'\n elif path.startswith('/branches/'):\n self._branch = 'branches/' + path.split('/')[2]\n elif path.startswith('/tags/'):\n self._branch = 'tags/' + path.split('/')[2]\n\n return True\n\n def get_repository(self):\n \"\"\"\n @return str Repository URL\n \"\"\"\n return self._repository\n\n def get_revision(self):\n \"\"\"\n @return int Revision\n \"\"\"\n return self._revision\n\n def get_author(self):\n \"\"\"\n @return str Author\n \"\"\"\n return self._author\n\n def get_modified_files(self):\n \"\"\"\n @return list Modified files\n \"\"\"\n return self._modified_files\n\n def get_message(self):\n \"\"\"\n @return str Message\n \"\"\"\n return self._message\n\n def get_branch(self):\n \"\"\"\n @return str Branch\n \"\"\"\n return self._branch\n\n def get_date1(self):\n \"\"\"\n @return str Date\n \"\"\"\n return self._date1\n\n\ndef send_commit_by_mail(config, commit):\n logging.getLogger().info('sending mail...')\n\n sender = config.get('Mail', 'from')\n recipients = config.get('Mail', 'to')\n server = config.get('Mail', 'server')\n port = config.getint('Mail', 'port')\n use_ssl = config.getboolean('Mail', 'ssl')\n use_authentification = config.getboolean('Mail', 'use_authentification')\n login = config.get('Mail', 'login')\n password = config.get('Mail', 'password')\n\n message = MIMEText('Repository: %s\\nBranch: %s\\nRevision: %s\\nAuthor: %s\\nDate: %s\\n\\n%s\\n\\nModified files:\\n%s' % (\n config.get('Repository', 'url'),\n commit.get_branch(),\n commit.get_revision(),\n commit.get_author(),\n commit.get_date1(),\n commit.get_message(),\n '\\n'.join(commit.get_modified_files())\n ))\n\n message['Subject'] = '[SVN][%s] %s | %s | %s | %s' % (\n config.get('Repository', 'name'),\n commit.get_revision(),\n commit.get_author(),\n ' '.join(commit.get_date1().split()[0:2]),\n commit.get_branch(),\n )\n message['From'] = sender\n message['To'] = recipients\n\n if use_ssl:\n SMTP_class = smtplib.SMTP_SSL\n else:\n SMTP_class = smtplib.SMTP\n\n s = SMTP_class(server, port)\n if use_authentification:\n s.login(login, password)\n s.sendmail(sender, recipients.split(','), message.as_string())\n s.quit()\n\n\ndef usage():\n print(\"svn-mailer.py \")\n\n\ndef main(argc, argv):\n if argc != 3:\n usage()\n return False\n\n try:\n converted_revision = int(argv[1])\n if converted_revision < 0:\n raise ValueError\n except ValueError:\n print('\"%s\" is not an valid revision number' % argv[1])\n return False\n else:\n revision = converted_revision\n\n config_file_path = argv[2]\n config = ConfigParser.ConfigParser()\n result = config.read(config_file_path)\n if len(result) == 0:\n print('\"%s\" is not an valid configuration file' % argv[2])\n return False\n\n # Install logger handler\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')\n file_handler = logging.FileHandler(config.get('Logging', 'log_file'))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Get svn infos\n repository = config.get('Repository', 'url')\n commit = Commit(repository, revision)\n result = commit.load()\n if not result:\n logger.error('cannot load commit informations for revision %s in repository %s', revision, repository)\n return False\n\n send_commit_by_mail(config, commit)\n\n return True\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n","sub_path":"svn-mailer.py","file_name":"svn-mailer.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"303534033","text":"#!/usr/bin/env python\r\n# Author: wangguidong\r\n# Date: 2015-06-26\r\n\r\nimport sys\r\nimport socket \r\nimport time\r\nimport errno\r\n\r\nfrom libcom.packet_drv.Tester import *\r\nfrom libcom.device_adapt.nt_adapt import *\r\n\r\n# @nt_name: like \"nt1\"\r\n# @nt_intf_index: like 0, 1, 2, ...\r\n# return: nt_intf_key, like \"intf0\"\r\ndef nt_get_intf_key(nt_name, nt_intf_index):\r\n nt_active_inft = list()\r\n nt_active_inft = NTAdapter.get_intf_key_list(nt_name)\r\n \r\n return nt_active_inft[nt_intf_index]\r\n\r\n# @nt_name: like \"nt1\"\r\n# @nt_intf_key: like \"intf0\"\r\n# return: nt_intf_name, like \"1/1/1\"\r\ndef nt_get_intf_name(nt_name, nt_intf_key):\r\n return NTAdapter.get_intf_name(nt_name, nt_intf_key)\r\n\r\n# @nt_name: like \"nt1\"\r\n# return: Tester\r\ndef nt_conn(nt_name):\r\n nt_ip = get_nt_ip(nt_name)\r\n nt_usrname = get_nt_usrname(nt_name)\r\n te = Tester()\r\n te.ConnectTester(nt_ip)\r\n te.TesterLogin(nt_usrname)\r\n \r\n return te\r\n\r\n# @te: Tester \r\n# @nt_name: like \"nt1\"\r\n# return: None\r\ndef nt_dis_conn(te, nt_name):\r\n usrname = get_nt_usrname(nt_name)\r\n te.TesterLogout(usrname)\r\n te.CloseConnect()\r\n \r\n# @te: Tester \r\n# @nt_name: like \"nt1\"\r\n# @nt_intf_key: like \"intf0\"\r\n# return: None\r\ndef nt_intf_config(te, nt_name, nt_intf_key):\r\n nt_intf_name = nt_get_intf_name(nt_name, nt_intf_key)\r\n te.TakeOwnership(nt_intf_name)\r\n te.ResetPort(nt_intf_name)\r\n medium = get_intf_medium(nt_name, nt_intf_key)\r\n te.interface_config(nt_intf_name, \"config\", phy_mode=medium)\r\n\r\n# @te: Tester \r\n# @nt_name: like \"nt1\"\r\n# @nt_intf_key: like \"intf0\"\r\n# @mode: refer to interface_config\r\n# return: None\r\ndef nt_intf_spec_config(te, nt_name, nt_intf_key, mode, interface_handle=None, autonegotiation=None, \\\r\n speed=None, duplex=None, enable_flow_control=None, op_mode=None, arp_send_enable=None, \\\r\n icmp_send_enable=None, src_mac_addr=None, intf_ip_addr=None, netmask=None, gateway=None, \\\r\n vlan=None, vlan_id=None, ipv6_intf_addr=None, ipv6_prefix_length=None, ipv6_gateway=None, \\\r\n na_send_enable=None, icmpv6_send_enable=None):\r\n nt_intf_name = nt_get_intf_name(nt_name, nt_intf_key)\r\n te.TakeOwnership(nt_intf_name)\r\n te.ResetPort(nt_intf_name)\r\n medium = get_intf_medium(nt_name, nt_intf_key)\r\n phy_mode = medium\r\n \r\n te.sequence += 1\r\n argument_dict = {\" -interface_handle \":interface_handle, \" -phy_mode \":phy_mode, \" -autonegotiation \":autonegotiation, \r\n \" -speed \":speed, \" -duplex \":duplex, \" -enable_flow_control \":enable_flow_control, \" -op_mode \":op_mode, \r\n \" -arp_send_enable \":arp_send_enable, \" -icmp_send_enable \":icmp_send_enable, \" -src_mac_addr \":src_mac_addr,\r\n \" -intf_ip_addr \":intf_ip_addr, \" -netmask \":netmask, \" -gateway \":gateway, \" -vlan \":vlan, \" -vlan_id \":vlan_id, \r\n \" -ipv6_intf_addr \":ipv6_intf_addr, \" -ipv6_prefix_length \":ipv6_prefix_length, \" -ipv6_gateway \":ipv6_gateway, \r\n \" -na_send_enable \":na_send_enable, \" -icmpv6_send_enable \":icmpv6_send_enable}\r\n \r\n cmd = str(te.sequence) + \":interface_config -port_handle \" + nt_intf_name + \" -mode \" + mode\r\n for key in argument_dict:\r\n if argument_dict[key] != None:\r\n cmd += key + str(argument_dict[key])\r\n cmd += \"\\r\\n\"\r\n\r\n data = {}\r\n data = te.__reply_from_nt(cmd)\r\n return data\r\n\r\n# @te: Tester \r\n# @nt_name: like \"nt1\"\r\n# @nt_send_key: like \"intf0\"\r\n# return: None\r\ndef nt_start_snd_pkt(te, nt_name, nt_send_key):\r\n nt_ip = get_nt_ip(nt_name)\r\n nt_usrname = get_nt_usrname(nt_name)\r\n nt_send_pt = nt_get_intf_name(nt_name, nt_send_key)\r\n te.ConnectTester(nt_ip)\r\n te.TesterLogin(nt_usrname)\r\n te.traffic_control(nt_send_pt, \"run\")\r\n te.CloseConnect()\r\n time.sleep(10)\r\n ","sub_path":"libcom/packet_drv/nt_snd.py","file_name":"nt_snd.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483664462","text":"from flask import make_response\nfrom riddle.views.student import student\nfrom riddle.views.helpers import *\nfrom riddle.models.Question import Question\nfrom riddle.models.Questionnaire import Questionnaire\nfrom riddle.models.Category import Category\nfrom riddle.models.Option import Option\nfrom riddle.models.Answer import Answer\nfrom riddle.models.Rating import Rating\nfrom riddle.models.Student import Student\nfrom riddle.models.StudentPresence import StudentPresence\nimport json\n\n@student.route('/student/status')\ndef status():\n student = get_student()\n if student:\n return student.to_json()\n else:\n return json.dumps(None)\n\n@student.route('/student/ping//')\ndef student_ping(qid):\n \"\"\"Updates user last ping timestamp and returns current question\"\"\"\n\n # Get models\n student = get_student()\n questionnaire = Questionnaire.select().where(Questionnaire.public_id == qid).get()\n\n # Update presence\n StudentPresence.update_latest(student, questionnaire)\n\n # Get current question\n question = questionnaire.presented_question()\n\n return json.dumps({'presented_question': question.id})\n\n@student.route('/student/login/', methods=['POST'])\ndef login():\n name = request.form.get('name')\n (student, created) = get_create_student(name)\n\n response = make_response(response_success())\n response.set_cookie('student_id', student.session_id, expires=datetime.datetime(2038, 1, 1))\n return response\n\n@student.route('/view//')\n@student_session\ndef show(qaire_id):\n qaires = Questionnaire.select().where(Questionnaire.public_id == qaire_id)\n ret = {}\n\n for qaire in qaires:\n category = Category.select().join(Questionnaire).where(Questionnaire.id == qaire.id)\n questions = Question.select().join(Questionnaire).where(Questionnaire.id == qaire.id)\n\n catname = ''\n\n for cat in category:\n catname = cat.name\n break\n\n ret = {'id': qaire.id, 'public_id': qaire.public_id, 'name': qaire.name, 'category': catname, 'questions' : [], 'category_name': category.get().name}\n\n for qion in questions:\n qtype = qtype2str(qion.typ)\n ret['questions'].append({'id': qion.id, 'type': qion.typ, 'description': qion.description})\n\n if qtype == 'single' or qtype == 'multi':\n ret['questions'][-1]['options'] = []\n options = Option.select().join(Question).where(Question.id == qion.id)\n\n for opt in options:\n ret['questions'][-1]['options'].append({'id': opt.id, 'text': opt.text})\n\n if not ret:\n ret = response_error('not_found', False)\n\n return json.dumps(ret)\n\n@student.route('/submit-answer/', methods=['POST'])\n@student_session\ndef submit_answer():\n student = get_current_student()\n question_id = request.form['question_id']\n\n qions = Question.select().where(Question.id == question_id)\n\n for qion in qions:\n qion_type = qtype2str(qion.typ)\n\n text_answer = request.form.get('text_answer')\n if text_answer and text_answer != '':\n Answer.create(text=text_answer, question=qion, student=student)\n\n option_ids = request.form.getlist('option_ids[]')\n if len(option_ids) > 0:\n if qion_type == 'single':\n option_ids = option_ids[:1]\n\n final_opts = []\n\n for oid in option_ids:\n opts = Option.select().where(Option.question == qion).where(Option.id == oid)\n for opt in opts:\n final_opts.append(opt)\n break\n else:\n return response_error('wrong_option')\n\n for opt in final_opts:\n Answer.create(option=opt, question=qion, student=student)\n\n return response_success()\n\n return response_error('question_not_found')\n\n@student.route('/submit-rating/', methods=['POST'])\n@student_session\ndef submit_rating():\n student = get_current_student()\n qaire_id = request.form['qaire_id']\n like = request.form['like']\n\n qaires = Questionnaire.select().where(Questionnaire.id == qaire_id)\n\n for qaire in qaires:\n ratings = Rating.select().where(Rating.student == student).where(Rating.questionnaire == qaire)\n for rating in ratings:\n return response_error('already_rated')\n\n if like == \"1\" or like == \"true\":\n like = True\n else:\n like = False\n\n Rating.create(like=like, student=student, questionnaire=qaire)\n\n return response_success()\n\n return response_error('questionnaire_not_found')\n\n\n","sub_path":"server/riddle/views/student/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327210838","text":"from django.conf.urls import url\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'main'\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^register/$', views.RegisterFormView.as_view(), name='register'),\n url(r'^logout/$', auth_views.logout, name='logout'),\n url(r'^login/$', views.LoginFormView.as_view(), name='login'),\n url(r'^logout/$', views.LogoutView.as_view(), name='logout'),\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^new/$', views.Create_post.as_view(), name='create'),\n # Страница редактирования сигнатуры\n url(r'^update/(?P[0-9]+)/$', views.Update_post.as_view(), name='update'),\n # Страница удаления сигнатуры\n url(r'^delete/(?P[0-9]+)/$', views.Delete_post.as_view(), name='delete'),\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625107454","text":"import os\nfrom .base import * # noqa\n\nJWT_AUTH['JWT_EXPIRATION_DELTA'] = datetime.timedelta(hours=8)\n\nCORS_ORIGIN_ALLOW_ALL = True\nALLOWED_HOSTS = ['*']\nAWS_STORAGE_BUCKET_NAME = 'static-files-asdf324fqsadkn1109fsadfbvmb64adf4af4142cknkj'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'tot-db',\n 'USER': 'test',\n 'PASSWORD': 'test123',\n 'HOST': 'localhost',\n 'PORT': '5432'\n }\n}\n\nDEBUG = False\n\nOPBEAT = {\n 'ORGANIZATION_ID': 'fab3fd138af14d0e8f2b4ad7efcd22ce',\n 'APP_ID': '5ea801dc6e',\n 'SECRET_TOKEN': '6799043939c43b57c3721338afd5b808fb88ec0a',\n 'DEBUG': True,\n}\n\n","sub_path":"settings/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344413936","text":"'''\nCreated on Dec 10th 2016\n@Project Name:World bank data explorer\n@Author:Liwei Song, Zoe Ma, Yichao Chen\n'''\n\nfrom plot import *\nfrom class_function import *\nimport sys\nfrom plotly import *\nimport matplotlib.pyplot as plt\n\n'''\nThis module is plot_input function. It asks users to enter a plot type and return the plot.\n'''\n\ndef plot_input(year,feature,country):\n a=1\n size=len(feature)\n feature_plot_list=[]\n for item in range(size):\n feature_df= pd.read_csv('./data/alldata/'+feature[item]+'.csv').drop('Unnamed: 0',axis=1)\n #read in all the user-required features csv files and convert them to dataframe\n feature_df = feature_df.set_index(feature_df['Year']).drop(['Year'],axis=1)\n feature_plot_list.append(plot(feature_df,country,year))\n #use plot class for all the dataframes and combine all the plot objects into a list\n try:\n while(a==1):\n print('you could choose plot types below:')\n print('1,time_series_plot 2,barplot\\n3,scatter_plot 4,histogram\\n5,boxplot_year 6,boxplot_country\\n7,heatmap 8,choropleth\\n9,pie_plot\\nEnter Q to quit and Enter R to return to previous menu')\n plot_type = input('Please select a plot type')\n if plot_type.isdigit():\n option=int(plot_type)\n elif plot_type=='Q':\n sys.exit(1)\n elif plot_type=='R':\n a=0\n break\n else:\n raise InputError\n #choose different plots\n if option==1:\n for dplot in feature_plot_list:\n dplot.time_series_plot()\n if option==2:\n for dplot in feature_plot_list:\n dplot.bar_plot()\n if option==3:\n for dplot in feature_plot_list:\n dplot.scatter_plot()\n if option==4:\n for dplot in feature_plot_list:\n dplot.histogram()\n if option==5:\n for dplot in feature_plot_list:\n dplot.boxplot_year()\n if option==6:\n for dplot in feature_plot_list:\n dplot.boxplot_country()\n if option==7:\n for dplot in feature_plot_list:\n dplot.heatmap()\n if option==8:\n for dplot in feature_plot_list:\n dplot.choropleth()\n if option==9:\n for dplot in feature_plot_list:\n dplot.pie_chart()\n #for different option, the system return different type of plot\n except InputError:\n print('Invalid input option')\n except KeyboardInterrupt:\n sys.exit(1)\n except EOFError:\n sys.exit(1)\n return a\n","sub_path":"ls4408/plot_input.py","file_name":"plot_input.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378684501","text":"import random\nfrom typing import Any\nfrom bs4 import BeautifulSoup\nimport time\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport repository\nfrom classes import BrowserType\nfrom selenium import webdriver\nfrom db.tables import FineDetails\n\n\ndef parser(type: BrowserType, car_details_list):\n browser = get_browser(type)\n\n browser.delete_all_cookies()\n browser.get('https://xn--90adear.xn--p1ai/check/fines')\n\n is_first = True\n\n for car in car_details_list:\n\n if car.get_img_data() is not None:\n continue\n\n timeout_ex = False\n action_element('checker', browser, 60)\n\n if is_first is False:\n clear_text(browser.find_element_by_name('regnum'))\n clear_text(browser.find_element_by_name('regreg'))\n clear_text(browser.find_element_by_name('stsnum'))\n\n browser.find_element_by_name('regnum').send_keys(car.reg_num)\n browser.find_element_by_name('regreg').send_keys(car.reg_reg)\n browser.find_element_by_name('stsnum').send_keys(car.sts_num)\n\n checker = browser.find_element_by_class_name('checker')\n time.sleep(random.randint(1, 3))\n checker.click()\n\n start_time = time.time()\n while True:\n try:\n action_element(\"camera-img-btn\", browser, 1)\n break\n except TimeoutException:\n try:\n action_element(\"close_modal_window\", browser, 1)\n browser.find_element_by_class_name('close_modal_window').click()\n continue\n except TimeoutException:\n if int(time.time() - start_time) >= 15:\n print('Сработал таймаут --- refresh')\n browser.refresh()\n timeout_ex = True\n car.decrement()\n break\n continue\n\n if car.get_count() == 0:\n print('Данные по реквезитам не были получены ')\n browser.quit()\n elif timeout_ex:\n car_details_list.append(car)\n continue\n\n car.set_img_data(\n parser_camera_img(browser.find_elements_by_class_name('camera-img-btn'), browser, car.get_id())\n )\n\n is_first = False\n browser.refresh()\n\n browser.quit()\n return car_details_list\n\n\ndef clear_text(element):\n element.clear()\n length = len(element.get_attribute('value'))\n element.send_keys(length * Keys.BACKSPACE)\n\n\ndef save_pic(browser, fine_id)->list:\n pics=list()\n for pic in BeautifulSoup(browser.page_source, 'html5lib').find_all('img', attrs={\"class\": \"cafap-photo-img\"}):\n str_pic = pic.__str__()\n base64_pic = str_pic[str_pic.find(',/') + 1:len(str_pic) - 3]\n pics.append(base64_pic)\n repository.save_img(fine_id=fine_id, img=base64_pic)\n\n return pics\n\n\ndef get_browser(type: BrowserType) -> Any:\n if BrowserType.SELENOID is type:\n capabilities = {\n \"browserName\": \"firefox\",\n \"version\": \"84\",\n \"platform\": \"LINUX\",\n \"selenoid:options\": {\n \"enableVNC\": True,\n \"enableVideo\": False\n }\n }\n return webdriver.Remote(\n command_executor='http://localhost:4444/wd/hub',\n desired_capabilities=capabilities\n )\n if BrowserType.FIREFOX is type:\n chromedriver = '/usr/local/Cellar/geckodriver'\n return webdriver.Firefox(chromedriver)\n\n\ndef action_element(value: str, browser: object, time_wait: int) -> Any:\n return WebDriverWait(browser, time_wait).until(\n EC.visibility_of_element_located((By.CLASS_NAME, value)))\n\n\ndef parser_camera_img(elements, browser, car_id):\n fines_details = save_fines(browser=browser, car_id=car_id)\n index = 0\n pics_list = list()\n for element in elements:\n try:\n element.click()\n action_element(\"cafap-photo-img\", browser, 15)\n pics_list.append(save_pic(browser, fine_id=fines_details[index]))\n index += 1\n except TimeoutException:\n print(\"Данные по элементу не были получены \")\n pics_list.append(list(\"Фотоматериалы не были получены\"))\n continue\n\n return pics_list\n\n\ndef save_fines(browser, car_id):\n fines_details = list()\n for element in browser.find_elements_by_class_name('finesItem'):\n fines_details.append(FineDetails(car_id=car_id, fine_description=element.text))\n repository.save_fines(fines_details)\n return fines_details\n\n\n\n\n\n","sub_path":"services/parser_executor.py","file_name":"parser_executor.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532754730","text":"#\n# Copyright (c) 2008-2015 Thierry Florac \n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n\n\"\"\"PyAMS_table.header module\n\nThis module defines columns headers.\n\"\"\"\n\nfrom urllib.parse import urlencode\n\nfrom zope.interface import implementer\n\nfrom pyams_table.interfaces import IColumnHeader\n\n\n__docformat__ = \"reStructuredText\"\n\nfrom pyams_table import _\n\n\n@implementer(IColumnHeader)\nclass ColumnHeader:\n \"\"\"ColumnHeader renderer provider\"\"\"\n\n _request_args = []\n\n def __init__(self, context, request, table, column):\n self.__parent__ = context\n self.context = context\n self.request = request\n self.table = table\n self.column = column\n\n def update(self):\n \"\"\"Override this method in subclasses if required\"\"\"\n\n def render(self):\n \"\"\"Override this method in subclasses\"\"\"\n return self.column.header\n\n def get_query_string_args(self):\n \"\"\"\n Collect additional terms from the request and include in sorting column\n headers\n\n Perhaps this should be in separate interface only for sorting headers?\n\n \"\"\"\n args = {}\n for key in self._request_args:\n value = self.request.params.get(key, None)\n if value:\n args.update({key: value})\n return args\n\n\nclass SortingColumnHeader(ColumnHeader):\n \"\"\"Sorting column header.\"\"\"\n\n def render(self):\n table = self.table\n prefix = table.prefix\n col_id = self.column.id\n\n # this may return a string 'id-name-idx' if coming from request,\n # otherwise in Table class it is intialised as a integer string\n current_sort_id = table.get_sort_on()\n try:\n current_sort_id = int(current_sort_id)\n except ValueError:\n current_sort_id = current_sort_id.rsplit(\"-\", 1)[-1]\n\n current_sort_order = table.get_sort_order()\n\n sort_id = col_id.rsplit(\"-\", 1)[-1]\n\n sort_order = table.sort_order\n if int(sort_id) == int(current_sort_id):\n # ordering the same column so we want to reverse the order\n if current_sort_order in table.reverse_sort_order_names:\n sort_order = \"ascending\"\n elif current_sort_order == \"ascending\":\n sort_order = table.reverse_sort_order_names[0]\n\n args = self.get_query_string_args()\n args.update(\n {\"%s-sort-on\" % prefix: col_id, \"%s-sort-order\" % prefix: sort_order}\n )\n query_string = \"?%s\" % (urlencode(sorted(args.items())))\n\n translate = self.request.localizer.translate\n return '%s' % (\n query_string, translate(_(\"Sort\")), translate(self.column.header))\n","sub_path":"src/pyams_table/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342588304","text":"import numpy as np\nimport pysam\nimport subprocess\nimport argparse\nimport os, pdb\n\nfrom collections import Counter\n\nMIN_MAP_QUAL = 10\nBEFORE_EXT = 0\n\n\ndef convert_rnaseq(bam_file, output_directory, bgzip_path, tabix_path):\n \"\"\"\n Given a path to an RNAseq bam file, convert to tabix format.\n :param bam_file: str Path to an RNAseq bam file\n :param bgzip_path: str Path to bgzip executable\n :param tabix_path: str Path to tabix executable\n \"\"\"\n count_file = os.path.basename(os.path.splitext(bam_file)[BEFORE_EXT])\n os.makedirs(os.path.join(output_directory, 'tabix'), exist_ok=True)\n tabix_output_path = os.path.join(output_directory, 'tabix', count_file)\n with pysam.AlignmentFile(bam_file, 'rb') as sam_handle, open(tabix_output_path, 'w') as count_handle:\n\n for cname, clen in zip(sam_handle.references, sam_handle.lengths):\n # initialize count array\n counts = Counter()\n for read in sam_handle.fetch(reference=cname):\n\n # skip read if unmapped or if mapping quality is low\n if read.is_unmapped or read.mapq < MIN_MAP_QUAL:\n continue\n\n site = read.pos + read.alen - 1 if read.is_reverse else read.pos\n counts[site] += 1\n\n # write counts to output file\n for i in sorted(counts.keys()):\n count_handle.write('\\t'.join([cname, str(i), str(i+1), str(counts[i])]) + '\\n')\n\n print('Completed {}'.format(cname))\n\n # Compress counts file\n subprocess.call([bgzip_path, '-f', tabix_output_path])\n\n # Index counts file\n subprocess.call([tabix_path, '-f', '-b', '2', '-e', '3', '-0', '{}.gz'.format(tabix_output_path)])\n\n print('Compressed file with RNA-seq counts is {}.gz'.format(tabix_output_path))\n\n return '{}.gz'.format(tabix_output_path)\n\n\ndef convert_riboseq(bam_file, output_directory, bgzip_path, tabix_path, read_lengths):\n \"\"\"\n Given a path to an Riboseq bam file, convert to tabix format.\n :param bam_file: str Path to an Riboseq bam file\n :param bgzip_path: str Path to bgzip executable\n :param tabix_path: str Path to tabix executable\n \"\"\"\n # file names and handles\n os.makedirs(os.path.join(output_directory, 'tabix'), exist_ok=True)\n count_file_path = os.path.join(output_directory, 'tabix',\n os.path.basename(os.path.splitext(bam_file)[BEFORE_EXT]) + '.{}.len{}.tbx')\n # rev_count_file = os.path.splitext(bam_file)[BEFORE_EXT] + '_rev.len{}.tbx'\n sam_handle = pysam.AlignmentFile(bam_file, 'rb')\n # fwd_handle = {r: open('{}.{}'.format(fwd_count_file, r), 'w') for r in read_lengths}\n fwd_handle = {r: open(count_file_path.format('fwd', r), 'w') for r in read_lengths}\n # rev_handle = {r: open('{}.{}'.format(rev_count_file, r), 'w') for r in read_lengths}\n rev_handle = {r: open(count_file_path.format('rev', r), 'w') for r in read_lengths}\n\n for cname, clen in zip(sam_handle.references, sam_handle.lengths):\n\n # initialize count arrays\n fwd_counts = {r: Counter() for r in read_lengths}\n rev_counts = {r: Counter() for r in read_lengths}\n\n for read in sam_handle.fetch(reference=cname):\n\n # skip reads not of the appropriate length, or if unmapped, or if mapping quality is low\n if read.rlen not in read_lengths or read.is_unmapped or read.mapq < MIN_MAP_QUAL:\n continue\n\n if read.is_reverse:\n asite = int(read.positions[-13])\n rev_counts[read.rlen][asite] += 1\n else:\n asite = int(read.positions[12])\n fwd_counts[read.rlen][asite] += 1\n\n # write counts to output files\n for r in read_lengths:\n for i in sorted(fwd_counts[r].keys()):\n fwd_handle[r].write('\\t'.join([cname, str(i), str(i+1), str(fwd_counts[r][i])]) + '\\n')\n\n for i in sorted(rev_counts[r].keys()):\n rev_handle[r].write('\\t'.join([cname, str(i), str(i+1), str(rev_counts[r][i])]) + '\\n')\n\n print('Completed {}'.format(cname))\n\n sam_handle.close()\n for r in read_lengths:\n fwd_handle[r].close()\n rev_handle[r].close()\n\n for r in read_lengths:\n\n # compress count file\n subprocess.call([bgzip_path, '-f', count_file_path.format('fwd', r)])\n subprocess.call([bgzip_path, '-f', count_file_path.format('rev', r)])\n\n subprocess.call([tabix_path, '-f', '-b', '2', '-e', '3', '-0', count_file_path.format('fwd', r) + '.gz'])\n subprocess.call([tabix_path, '-f', '-b', '2', '-e', '3', '-0', count_file_path.format('rev', r) + '.gz'])\n\n # generated_tabix.append(count_file_path.format('fwd', r) + '.gz')\n # generated_tabix.append(count_file_path.format('rev', r) + '.gz')\n\n print('Compressed file with ribosome footprint counts '\n 'on forward strand is {}.gz'.format(count_file_path.format('fwd', r)))\n print('Compressed file with ribosome footprint counts on '\n 'reverse strand is {}.gz'.format(count_file_path.format('rev', r)))\n\n return os.path.join(output_directory, 'tabix', os.path.splitext(os.path.basename(bam_file))[BEFORE_EXT])\n","sub_path":"ribohmm/contrib/bam_to_tbi.py","file_name":"bam_to_tbi.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519851078","text":"import pandas as pd\nimport torch\nfrom model import Model, DenseModel\nimport torch.nn as nn\nimport torch.optim as optim\nfrom datagenerator import DataGenerator\nfrom torch.utils.data import DataLoader\nfrom matplotlib import pyplot as plt \nimport time\nimport os\nimport numpy as np\nfrom game import *\nimport math\n\n# hyperparams\nbatch_size = 32\nnum_epochs = 10000\nlearning_rate = 1e-3\ncontinue_training_path = \"\"\nmodel_name = \"\"\neps = 1e-10\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nmodel = Model().to(device)\nbce = nn.BCELoss()\nmae = nn.L1Loss(reduction='mean')\ntrain = pd.read_csv(\"train.csv\")\ndatagen = DataGenerator(train.loc[train['delta'] == 1], aug=True)\ndataloader = DataLoader(datagen, batch_size=batch_size, shuffle=True, num_workers=4)\noptimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999))\ntest = pd.read_csv(\"test.csv\")\n\nvalids = []\nfor i in range(1, 6):\n valid_train = test.loc[test['delta'] == i].values\n valid_train = valid_train[:, 2:]\n valid_train = torch.tensor(valid_train, dtype=torch.float)\n valid_train = valid_train.view(valid_train.shape[0], 1, 25, 25)\n valids.append(valid_train[:100]) #####################################\n\ntimestamp = time.strftime(\"%y_%m_%d_%H_%M_%S\")\nif continue_training_path:\n # load a network for continued training \n print('continue training')\n model.load_state_dict(torch.load(f'{continue_training_path}/networks/{model_name}.pth', map_location=device))\n path_data = continue_training_path\nelif not os.path.exists('Training_' + timestamp):\n # generate folder structure \n path_data = 'Training_' + timestamp\n os.makedirs(path_data)\n os.makedirs(path_data + '/networks')\n os.makedirs(path_data + '/plots')\n\nloss_list = []\neval_loss_list = []\nbest_loss = 10000\n\nfor epoch in range(num_epochs):\n loss_batch = 0\n model.train()\n\n for train_batch in dataloader:\n train_batch = train_batch.view(train_batch.shape[0], 1, 25, 25).type(torch.float).to(device)\n model.zero_grad()\n labels = train_batch.clone()\n predicted_start = model(train_batch)\n output_batch = probabilistic_life_step(predicted_start)\n added_dead = predicted_start - output_batch\n added_dead = torch.clamp(added_dead, eps, 1-eps)\n loss = added_dead.mean()\n output_batch = torch.clamp(output_batch, eps, 1-eps)\n loss += bce(output_batch, labels)\n loss.backward()\n optimizer.step()\n loss_batch += loss.item() / len(dataloader)\n\n\n \n\n loss_list.append(loss_batch)\n\n # evaluation\n model.eval()\n eval_loss = 0\n with torch.no_grad():\n for i in range(1, 6):\n val_data = valids[i-1].to(device)\n val_labels = val_data.clone()\n\n for j in range(i):\n val_data = model(val_data)\n val_data = torch.where(val_data > 0.5, torch.ones_like(val_data), torch.zeros_like(val_data))\n\n for j in range(i):\n val_data = life_step(val_data)\n eval_loss += mae(val_data, val_labels).item() / 5 \n eval_loss_list.append(eval_loss)\n\n print(f'Epoch: {epoch}, loss: {loss_batch}, eval_loss: {eval_loss}, best_loss: {best_loss}')\n\n if eval_loss < best_loss:\n best_loss = eval_loss\n torch.save(model.state_dict(), path_data + f'/networks/model_{timestamp}_epoch_{epoch}.pth')\n\n if epoch % 50 == 0:\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.plot(loss_list)\n ax.plot(eval_loss_list)\n ax.legend(('train', 'eval'))\n ax.set_title('Training')\n ax.set_ylabel('Loss')\n ax.set_xlabel('Epoch')\n plt.savefig(path_data + f'/plots/plot_{timestamp}.png')","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407552259","text":"from core.component import Component\n\n\nclass SpeedConverter(Component):\n \"\"\"\n Converts samples of rotations to speed in ms\n \"\"\"\n\n def __init__(self, input: str, output: str, length: int):\n \"\"\"\n :param input: topic to listen for rotations data\n :param output: topic to send speed data\n :param length: length of circle in meters\n \"\"\"\n\n super().__init__()\n\n self._rotations_topic = input\n self._speed_topic = output\n\n self._length = length\n self._rotations = 0\n self._timestamp = 0\n\n self._io.subscribe(self._rotations_topic, self._on_update)\n\n def _on_update(self, rotations: int, timestamp: int, **kwargs):\n \"\"\"\n :param rotations: number of rotations\n :param timestamp: time of last pass in microseconds\n \"\"\"\n\n if rotations == self._rotations:\n return\n\n # update speed\n if self._timestamp:\n speed_ms = self._convert(rotations - self._rotations, timestamp - self._timestamp)\n self._io.publish(self._speed_topic, speed=speed_ms)\n\n # record increased distance\n self._timestamp = timestamp\n self._rotations = rotations\n\n def _convert(self, rotations, elapsed_time_usec):\n elapsed_time_s = elapsed_time_usec / 1e6\n distance_m = rotations * self._length\n return distance_m / elapsed_time_s\n","sub_path":"converter/speed/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266638409","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/wordtex/wp_formatting.py\n# Compiled at: 2013-11-13 15:05:09\n\"\"\"\n\n##############\n## Changing font type to courier new (should work)\n# I just need to add the following around the block\n# I think the \"style = \"padd...\" could be replaced with the std indent\n# STILL NEED TO CHECK IN WP\n

This is an example of the kind of output I want for my code\nNote that it is indented, of a different font, and has a different background color.\n\"\"\"\nrange = xrange\nimport pdb, re, copy, texlib\nfrom cloudtb import textools\nSECTION_NAME = ''\nSUBSECTION_NAME = ''\nSECTION_NUMBER = 0\nSUBSECTION_NUMBER = 0\nPARAGRAPH = ('

', '

')\n\ndef delete_self(texpart, *args, **kwargs):\n texpart.text_data = [\n '']\n\n\ndef section_num(texpart, *args, **kwargs):\n global SECTION_NUMBER\n SECTION_NUMBER += 1\n texpart.text_data.insert(0, SECTION_NAME + (' {0}: ').format(SECTION_NUMBER))\n texpart.text_data = texlib.reform_text(texpart.text_data, no_indicators=True)\n\n\ndef subsection_num(texpart, *args, **kwargs):\n global SUBSECTION_NUMBER\n SUBSECTION_NUMBER += 1\n texpart.text_data.insert(0, SUBSECTION_NAME + (' {0}.{1}: ').format(SECTION_NUMBER, SUBSECTION_NUMBER))\n texpart.text_data = texlib.reform_text(texpart.text_data, no_indicators=True)\n\n\ndef href_call(texpart, *args, **kwargs):\n hlink = re.match('\\\\\\\\href\\\\{(.*?)}', texpart.start_txt).group(1)\n html_start, html_end = texpart.add_outside\n html_start = html_start.format(hlink)\n texpart.add_outside = (html_start, html_end)\n\n\nclass list_call(object):\n\n def __init__(self, enumerate=False):\n self.enumerate = enumerate\n self.count = 1\n\n def __call__(self, texpart, *args, **kwargs):\n \"\"\"Have to do a call here only because the \"item\"s may or may not have\n an end point (particularily the last one). Have to create a start and stop \n so it can be handled by process_inout\"\"\"\n use_dict = {'itemize': begin_dict['itemize'], 'enumerate': begin_dict['enumerate']}\n texpart.no_update_text = False\n texpart.update_text(use_dict)\n regexp = re.compile('\\\\\\\\item ([\\\\w\\\\W]*?)(?=(\\\\\\\\item|$))')\n researched = []\n for n in texpart.text_data:\n if type(n) in (str, unicode):\n researched.extend(textools.re_search(regexp, n))\n else:\n researched.append(n)\n\n new_body = []\n for text in researched:\n if type(text) in (str, unicode, texlib.TexPart):\n new_body.append(text)\n else:\n self.count += 1\n assert '\\\\end{itemize}' not in text.group(1)\n new_body.append('\\\\startitem ' + text.group(1) + '\\\\enditem ')\n\n texpart.text_data = texlib.reform_text(new_body, no_indicators=True)\n line_items = [\n [\n 'item',\n tp(add_outside=('
  • ', '
  • '), no_outer_pgraphs=True)]]\n use_dict = build_dict('list_call', line_items, '\\\\\\\\start{0} ', None, '\\\\\\\\end{0}')\n texpart.update_text(use_dict=use_dict)\n texpart.update_text()\n return\n\n\nclass tabularnewline_call(object):\n \"\"\"Class which accepts default row settings\"\"\"\n\n def __init__(self, textpart_list):\n self.textpart_list = textpart_list\n\n def __call__(self, texpart, *args, **kwargs):\n body, = texpart.text_data\n columns = re.split(' [&] ', body)\n col_st, col_end = ('\\\\tabcolstart ', ' \\\\tabcolend\\n')\n columns = [ col_st + n + col_end for n in columns ]\n Tparts = []\n for i, tpart in enumerate(self.textpart_list):\n tpart.update_match_re((['\\\\\\\\tabcolstart '], [],\n [\n ' \\\\\\\\tabcolend\\\\n']))\n Tparts.extend(texlib.get_text_data([columns[i]], tpart))\n\n texpart.no_update_text = False\n texpart.text_data = Tparts\n texpart.update_text()\n\n\ndef _tabular_get_column_list(start_txt):\n get_columns_raw = '\\\\\\\\begin{tabular\\\\*?}{(.*)}'\n get_split_columns = '\\\\|'\n get_column_info = '>{\\\\\\\\(.*?)}[pm]{([0-9.]*)(\\\\\\\\?[\\\\w]*)}'\n raw_cols = re.match(get_columns_raw, start_txt).group(1)\n split_cols = re.split(get_split_columns, raw_cols)\n default_align = 'raggedright'\n default_width = (1, 'DEFAULT')\n align_data, width_data = [], []\n for col in split_cols:\n if col == '':\n continue\n if col == 'c':\n align_data.append(default_align)\n width_data.append(default_width)\n else:\n cgroup = re.match(get_column_info, col).group\n align_data.append(cgroup(1))\n width_data.append((float(cgroup(2)), cgroup(3)))\n\n align_dict = {'raggedright': 'left', 'centering': 'center'}\n for i, value in enumerate(align_data):\n align_data[i] = align_dict[value]\n\n perc_width_format = 'style=\"width: {0}%; border: 1px solid #cccccc; padding:0px 5px;\"'\n tot_width = sum([ n[0] for n in width_data ])\n last_type = -1\n for i, value in enumerate(width_data):\n amount, ctype = value\n assert last_type != -1 and ctype == last_type\n amount = int(amount * 100.0 / tot_width)\n width_data[i] = perc_width_format.format(amount)\n\n td_format = ''\n textpart_list = []\n for i, align in enumerate(align_data):\n Tpart = texlib.TexPart(add_outside=(\n td_format.format(col_align=align, row_align='top', width=width_data[i]),\n ''), no_outer_pgraphs=True)\n Tpart.label = 'tabular_column_custom dict:tabular_call'\n textpart_list.append(Tpart)\n\n return textpart_list\n\n\ndef tabular_call(texpart, *args, **kwargs):\n \"\"\"Handles formating tables.\"\"\"\n textpart_list = _tabular_get_column_list(texpart.start_txt)\n body, = texpart.text_data\n body = re.sub('\\\\\\\\hline ?\\\\n?', '', body)\n tab_st, tab_end = ('\\\\tabrowstart ', ' \\\\tabrowend\\n')\n split = body.split('\\\\tabularnewline\\n')\n assert split[(-1)] == '' or split[(-1)].find('\\\\hline ') == 0\n del split[-1]\n new_body = [ tab_st + n + tab_end for n in split ]\n texpart.text_data = [('').join(new_body)]\n tpart = texlib.TexPart(add_outside=('', ''), call_first=tabularnewline_call(textpart_list), no_update_text=True, no_outer_pgraphs=True)\n tpart.update_match_re((['\\\\\\\\tabrowstart '], [], [' \\\\\\\\tabrowend\\\\n']))\n tpart.label = 'tabrow function: ' + 'tabular_call'\n use_dict = {'tabularnewline': tpart}\n texpart.no_update_text = False\n texpart.update_text(use_dict=use_dict)\n\n\ntp = texlib.TexPart\n\ndef build_dict(name, patterns, inside_template=None, start_template=None, end_template=None, custom=None):\n mydict = {}\n i = 0\n for p, texpart in patterns:\n if custom != None:\n i += 1\n inside, start, end = p\n p = ('i {0}:{1}:{2}').format(i, inside, start, end)\n else:\n if inside_template == None:\n inside = []\n elif type(inside_template) in (str, unicode):\n inside = [\n inside_template.format(p)]\n else:\n inside = inside\n if start_template == None:\n start = []\n else:\n start = [\n start_template.format(p)]\n if end_template == None:\n end = []\n else:\n end = [\n end_template.format(p)]\n new_tp = copy.copy(texpart)\n new_tp.update_match_re((inside, start, end))\n new_tp.label = p + ' dict:' + name\n mydict[p] = new_tp\n\n return mydict\n\n\nbegin_objects = [\n [\n 'document', tp()],\n [\n 'tabular\\\\*?',\n tp(call_first=tabular_call, add_outside=('', '
    '), no_outer_pgraphs=True, no_update_text=True)],\n [\n 'lstlisting',\n tp(add_outside=('
      ', '
    '), no_update_text=True, no_std_format=True, no_outer_pgraphs=True)],\n [\n 'itemize',\n tp(add_outside=('
      ', '
    '), no_outer_pgraphs=True, no_update_text=True, call_first=list_call())],\n [\n 'enumerate',\n tp(add_outside=('
      ', '
    '), no_outer_pgraphs=True, no_update_text=True, call_first=list_call(enumerate=True))],\n [\n 'equation', tp(add_outside=('', ''))]]\nbegin_dict = build_dict('begin', begin_objects, '\\\\\\\\begin\\\\{{{0}}}(\\\\{{.*}})? *?', None, '\\\\\\\\end\\\\{{{0}}} *?')\nif_objects = [\n [\n 'blog', tp(no_outer_pgraphs=True)],\n [\n 'tex',\n tp(call_first=delete_self, no_outer_pgraphs=True)],\n [\n 'false',\n tp(call_first=delete_self, no_outer_pgraphs=True)]]\nif_dict = build_dict('if', if_objects, '\\\\\\\\if{0} ', '\\\\\\\\if.*? ', '\\\\\\\\fi ')\ntxt_attributes = [\n [\n 'textbf',\n tp(add_outside=('', ''), no_outer_pgraphs=True)],\n [\n 'textit',\n tp(add_outside=('', ''), no_outer_pgraphs=True)],\n [\n 'uline',\n tp(add_outside=(''), no_outer_pgraphs=True)],\n [\n 'section',\n tp(add_outside=('

    ', '

    '), call_first=[\n section_num])],\n [\n 'section\\\\*', tp(add_outside=('

    ', '

    '))],\n [\n 'subsection',\n tp(add_outside=('

    ', '

    '), call_first=[\n subsection_num])],\n [\n 'subsection*', tp(add_outside=('

    ', '

    '))]]\ntxt_attr_dict = build_dict('txt_attr', txt_attributes, '\\\\\\\\{0}\\\\{{', '\\\\{{', '\\\\}}')\nother_attributes = [\n [\n 'href',\n tp(add_outside=(' ', ''), no_outer_pgraphs=True, call_first=href_call)]]\nother_attr_dict = build_dict('other_attr', other_attributes, '\\\\\\\\{0}\\\\{{.*?}}\\\\{{', None, '\\\\}}')\ncustom_items = []\ncustom_dict = build_dict('custom', custom_items)\nfrom cloudtb.extra import richtext\nfinal_subs = [\n [\n '\\\\#', '#'],\n [\n '\\\\$', '$'],\n [\n '\\\\%', '%'],\n [\n '{*}', '* '],\n [\n '{[}', '['],\n [\n '{]}', ']'],\n [\n '\\\\{', '{'],\n [\n '\\\\}', '}'],\n [\n '<', '<'],\n [\n '>', '>'],\n [\n '\\\\&', '&'],\n [\n '\"', '"'],\n [\n '\\\\textbackslash{}', '\\\\'],\n [\n '\\\\textasciitilde{}', '~'],\n [\n '\\\\textasciicircum{}', '^'],\n [\n '\\\\textendash{}', '-']]\nfinal_subs = [ (textools.convert_to_regexp(n[0], compile=True), n[1]) for n in final_subs\n ]\n\ndef concatenate_dicts(*dicts):\n out = {}\n for d in dicts:\n out.update(d)\n\n return out\n\n\nevery_dict_formatting = concatenate_dicts(begin_dict, if_dict, txt_attr_dict, other_attr_dict, custom_dict)\nif __name__ == '__main__':\n import wordtex\n from cloudtb import dbe\n wordtex.main()","sub_path":"pycfiles/wordtex-0.2.21.linux-x86_64.tar/wp_formatting.py","file_name":"wp_formatting.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481016115","text":"from django.core.exceptions import NON_FIELD_ERRORS\nfrom django.forms.utils import ErrorList\nfrom django.shortcuts import render\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import CreateUserForm,CreateAddressForm,CreateOrtForm\n\n\n# Create your views here.\ndef index(request):\n return render(request, \"homepage.html\")\n\n\ndef register(request):\n global user,address,ort\n try:\n if request.method == 'POST':\n user = CreateUserForm(request.POST)\n address = CreateAddressForm(request.POST)\n ort = CreateOrtForm(request.POST)\n if user.is_valid() and address.is_valid() and ort.is_valid():\n ort.save()\n address.save()\n user.save()\n user = CreateUserForm()\n address = CreateAddressForm()\n ort = CreateOrtForm()\n context = {'user': user,'address':address,'ort':ort}\n return render(request, 'registration/register.html', context)\n except Exception:\n return render(request, 'homepage.html')\n\n","sub_path":"cronjob/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343586169","text":"import math\r\nfrom copy import deepcopy\r\nfrom functools import lru_cache\r\n\r\nimport numpy as np\r\n\r\n\r\nclass HexBoard:\r\n \"\"\"This class holds all the data for a single board state\"\"\"\r\n\r\n BLUE = 1\r\n RED = 2\r\n EMPTY = 3\r\n PLAYER_ID_TO_NAME = { 1: 'blue', 2: 'red', 3: 'empty' }\r\n POSSIBLE_NEIGHBORS = ((-1, 0), (1, 0), (-1, 1), (1, -1), (0, 1), (0, -1))\r\n\r\n def __init__(self, board_size, source_coords=None, target_coords=None, moves_made=None, overwrite = False):\r\n \"\"\"Creates a new empty board with the provided size\"\"\"\r\n self.board = {}\r\n self.size = board_size\r\n self.useful_to_check_win = False\r\n self.moves_made = 0 if moves_made is None else moves_made\r\n\r\n self.target_coords = HexBoard.get_target_coordinates(board_size) if target_coords is None else target_coords\r\n self.source_coords = HexBoard.get_source_coordinates(board_size) if source_coords is None else source_coords\r\n \r\n self.board = {k:v for k, v in HexBoard.get_empty_board(board_size).items()} if not overwrite else None\r\n \r\n def as_np(self):\r\n arr = []\r\n for x in range(self.size):\r\n arr.append([])\r\n for y in range(self.size):\r\n arr[x].append(self.board[(x, y)])\r\n \r\n return np.array(arr).astype(np.float64)\r\n \r\n def from_np(self, board_np, size, moves_made):\r\n board = {}\r\n for x in range(len(board_np)):\r\n for y in range(len(board_np[x])):\r\n board[(x, y)] = board_np[x][y]\r\n \r\n hexboard = HexBoard(size)\r\n hexboard.board = board\r\n hexboard.moves_made = moves_made\r\n\r\n return hexboard\r\n \r\n def is_empty(self, coordinates):\r\n \"\"\"Returns if the board is empty at the provided coordinate\"\"\"\r\n return self.board[coordinates] == HexBoard.EMPTY\r\n\r\n def is_color(self, coordinates, color):\r\n \"\"\"Returns if the board is a certain color at the provided coordinate\"\"\"\r\n return self.board[coordinates] == color\r\n\r\n def get_mirrored_board(self):\r\n board = self.switch_colors()\r\n new_board_np = np.fliplr(np.rot90(board.as_np(), axes=(1, 0)))\r\n return self.from_np(new_board_np, board.size, board.moves_made)\r\n\r\n def get_unmirrored_board(self):\r\n board = self.switch_colors()\r\n new_board_np = np.rot90(np.fliplr(board.as_np(), axes=(0, 1)))\r\n return self.from_np(new_board_np, board.size, board.moves_made)\r\n\r\n def switch_colors(self):\r\n new_board = self.copy()\r\n for x in range(self.size):\r\n for y in range(self.size):\r\n if new_board.board[(x, y)] != HexBoard.EMPTY:\r\n new_board.board[(x, y)] = self.get_opposite_color(new_board.board[(x, y)])\r\n\r\n return new_board\r\n\r\n def get_color(self, coordinates):\r\n \"\"\"Returns the color at the provided board coordinate\"\"\"\r\n return self.board[coordinates]\r\n\r\n def place(self, coordinates, color):\r\n \"\"\"Places the provided color at the provided coord\"\"\"\r\n if self.board[coordinates] == HexBoard.EMPTY:\r\n self.moves_made += 1\r\n self.board[coordinates] = color\r\n\r\n def make_move(self, coordinates, color):\r\n \"\"\"Should return the new board without modifying the existing board\"\"\"\r\n new_board = self.copy()\r\n new_board.place(coordinates, color)\r\n return new_board\r\n \r\n def copy(self):\r\n \"\"\"Returns an exact deep copy of itself\"\"\"\r\n new_board = HexBoard(self.size, target_coords=self.target_coords, source_coords=self.source_coords, overwrite = True)\r\n new_board.moves_made = self.moves_made\r\n new_board.board = {k:v for k,v in self.board.items()}\r\n return new_board\r\n\r\n @classmethod\r\n @lru_cache(maxsize=512) # caching this to create lower lookup times, technically can't have more than board.size ** 2 options\r\n def get_neighbors(cls, coordinates, size):\r\n \"\"\"Returns a list with the coordinates of every possible/valid neighbor.\"\"\"\r\n (cx, cy) = coordinates\r\n neighbors = [(x + cx, y + cy) for x, y in HexBoard.POSSIBLE_NEIGHBORS if HexBoard.in_bounds(cx + x, cy + y, size)]\r\n return neighbors\r\n\r\n def traverse(self, color, move, visited):\r\n \"\"\"\r\n Returns true if we can reach the end of the board using this move.\r\n Returns false if the next move is already visited or the wrong color.\r\n If we did not reach the end we recursively check each of the neighbors,\r\n and if we reach the otherside that way we return true as well.\r\n \"\"\"\r\n # if we have reached the border (target) for a specific color\r\n if HexBoard.is_at_border(color, move, self.size):\r\n return True\r\n\r\n visited[move] = True\r\n for n in HexBoard.get_neighbors(move, self.size):\r\n if self.board[n] != color or n in visited: continue\r\n if self.traverse(color, n, visited):\r\n return True\r\n\t\t\r\n return False\r\n\r\n def get_winner(self):\r\n \"\"\"Check if the game has ended, and returns the winner. If None is returned the game is ongoing\"\"\"\r\n if self.moves_made < self.size: return None\r\n elif self.check_win(HexBoard.RED): return HexBoard.RED\r\n elif self.check_win(HexBoard.BLUE): return HexBoard.BLUE\r\n elif self.check_draw(): return HexBoard.EMPTY\r\n return None\r\n\r\n def check_win(self, color):\r\n \"\"\"Check if we have made a snake from the source side to the opposing side for the provided color\"\"\"\r\n if not self.useful_to_check_win:\r\n for move in self.target_coords[color]:\r\n if self.board[move] == color:\r\n self.useful_to_check_win = True\r\n break\r\n if not self.useful_to_check_win: return False\r\n\r\n for move in self.source_coords[color]:\r\n if self.board[move] != color: continue\r\n if self.traverse(color, move, {}):\r\n return True \r\n return False\r\n\r\n def check_draw(self):\r\n \"\"\"Checks if we have any empty hexes left on the board\"\"\"\r\n return self.moves_made >= self.size ** 2\r\n\r\n def get_possible_moves(self):\r\n \"\"\"Compiles a list of all empty hexes in the current hexboard\"\"\"\r\n if self.get_winner() is not None: return []\r\n return [coord for coord, color in self.board.items() if color == HexBoard.EMPTY]\r\n\r\n def print(self):\r\n \"\"\"Outputs the board pieces to the console\"\"\"\r\n print(\" \", end=\"\")\r\n\r\n for y in range(self.size):\r\n print(chr(y + ord('a')), \"\", end=\"\")\r\n print(\"\")\r\n print(\" -----------------------\")\r\n\r\n for y in range(self.size):\r\n print(y, \"|\", end=\"\")\r\n\t\t\t\r\n for z in range(y):\r\n print(\" \", end=\"\")\r\n\r\n for x in range(self.size):\r\n piece = self.board[x, y]\r\n if piece == HexBoard.BLUE:\r\n print(\"\\u001b[36m\\u25CF\\u001b[0m \", end=\"\")\r\n elif piece == HexBoard.RED:\r\n print(\"\\u001b[31m\\u25CF\\u001b[0m \", end=\"\")\r\n else:\r\n if x == self.size:\r\n print(\"-\", end=\"\")\r\n else:\r\n print(\"- \", end=\"\")\r\n print(\"|\")\r\n\r\n print(\" -----------------------\")\r\n\r\n def __str__(self):\r\n \"\"\"Outputs the board pieces to the console\"\"\"\r\n output = \"[\"\r\n\r\n for y in range(self.size):\r\n output += \"\"\r\n\t\t\t\r\n for x in range(self.size):\r\n piece = self.board[x, y]\r\n if piece == HexBoard.BLUE:\r\n output += \"b\"\r\n elif piece == HexBoard.RED:\r\n output += \"r\"\r\n else:\r\n output += \".\"\r\n \r\n if y is not self.size - 1: output += \" \"\r\n\r\n output += \"]\"\r\n return output\r\n\r\n def hash_code(self, color=3):\r\n \"\"\"Generates a hash code that mirrors the current board state as seen by the provided player\"\"\"\r\n multiplier = 10\r\n code = color\r\n for _, value in self.board.items():\r\n code += value * multiplier\r\n multiplier *= 10\r\n return code\r\n \r\n @classmethod\r\n @lru_cache(maxsize=16)\r\n def get_reward(cls, color, winner):\r\n \"\"\"Returns the reward for the specified color, -1 if it loses, 1 if it wins, 0 on a draw\"\"\"\r\n if color == winner:\r\n return 1\r\n elif winner == HexBoard.EMPTY:\r\n return 0\r\n else:\r\n return -1\r\n\r\n @classmethod\r\n @lru_cache(maxsize=512)\r\n def is_at_border(cls, color, move, size):\r\n \"\"\"Checks if the provided coordinate is at the border for the provided color\"\"\" \r\n return (color == HexBoard.BLUE and move[0] == size-1) or (color == HexBoard.RED and move[1] == size-1)\r\n\r\n @classmethod \r\n @lru_cache(maxsize=32)\r\n def get_target_coordinates_for_color(cls, color, size):\r\n \"\"\"Returns the coordinates of the right border (for blue) or the left border (for red)\"\"\"\r\n if color == HexBoard.BLUE:\r\n return [(size - 1, i) for i in range(size)]\r\n else:\r\n return [(i, size - 1) for i in range(size)] \r\n\r\n @classmethod \r\n @lru_cache(maxsize=32)\r\n def get_target_coordinates(cls, size):\r\n \"\"\"Returns the target coordinates for both of the colors\"\"\"\r\n return {HexBoard.BLUE: HexBoard.get_target_coordinates_for_color(HexBoard.BLUE, size), HexBoard.RED: HexBoard.get_target_coordinates_for_color(HexBoard.RED, size)}\r\n\r\n @classmethod \r\n @lru_cache(maxsize=32)\r\n def get_source_coordinates(cls, size):\r\n \"\"\"Returns the source coordinates for both of the colors\"\"\"\r\n return {HexBoard.BLUE: HexBoard.get_source_coordinates_for_color(HexBoard.BLUE, size), HexBoard.RED: HexBoard.get_source_coordinates_for_color(HexBoard.RED, size)}\r\n\r\n @classmethod\r\n @lru_cache(maxsize=32)\r\n def get_source_coordinates_for_color(cls, color, size):\r\n \"\"\"Returns the coordinates of the left border (for blue) or the top border (for red)\"\"\"\r\n if color == HexBoard.BLUE:\r\n return [(0, i) for i in range(size)]\r\n else:\r\n return [(i, 0) for i in range(size)]\r\n\r\n @classmethod\r\n def from_hash_code(cls, hash_code):\r\n \"\"\"Rebuilds a board based on the hash code. This basically makes the hash code more of a serialization\"\"\"\r\n pos = str(hash_code)[:-1][::-1]\r\n board_size = int(math.sqrt(len(pos)))\r\n board = cls(board_size)\r\n\r\n i = 0\r\n for x in range(board_size):\r\n for y in range(board_size):\r\n board.place((x, y), int(pos[i]))\r\n i += 1\r\n \r\n return board\r\n\r\n @classmethod\r\n @lru_cache(maxsize=2)\r\n def get_opposite_color(cls, color):\r\n \"\"\"Returns the opposite color of the provided color. Returns BLUE if the color is not recognized\"\"\"\r\n return HexBoard.RED if color == HexBoard.BLUE else HexBoard.BLUE\r\n \r\n def get_move_between_boards(self, other_board):\r\n \"\"\"Tries to find the move that is made between the two provided boards.\"\"\"\r\n if self.size is not other_board.size:\r\n print('Trying to get the move between two boards of different sizes.')\r\n return (None, None)\r\n\r\n for x in range(self.size):\r\n for y in range(self.size):\r\n if self.board[x, y] != other_board.board[x, y]:\r\n return (x, y)\r\n \r\n return (None, None)\r\n\r\n @classmethod\r\n @lru_cache(maxsize=512)\r\n def in_bounds(cls, numx, numy, size):\r\n \"\"\"Returns if a number is still within the required constraints for the board size\"\"\"\r\n return numx >= 0 and numx < size and numy >= 0 and numy < size\r\n\r\n @classmethod\r\n @lru_cache(maxsize=128)\r\n def get_empty_board(cls, size):\r\n \"\"\"Returns an empty board that we can use to create new HexBoard instances\"\"\"\r\n board = {}\r\n for x in range(size):\r\n for y in range(size):\r\n board[x, y] = HexBoard.EMPTY\r\n return board\r\n","sub_path":"a4/util/hexboard.py","file_name":"hexboard.py","file_ext":"py","file_size_in_byte":12287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469483737","text":"#!/usr/bin/env python3\n\n# HackerRank https://www.hackerrank.com/challenges/python-tuples/problem\n\n# Sample Input 0\n#\n# 2\n# 1 2\n# Sample Output 0\n#\n# 3713081631934410656\n\n\nif __name__ == '__main__':\n\n n = int(input())\n integer_list = map(int, input().split())\n\n converted_to_list = list(integer_list)\n\n for each_value in converted_to_list:\n print(\"Value: \" + str(each_value))\n\n if type(converted_to_list) is list:\n print(\"Is list!!!!\")\n\n tuple_of_list = (*converted_to_list,)\n\n print(hash(tuple_of_list))\n\n","sub_path":"PythonPractice/scripts/hacker_rank/hash_of_tuple.py","file_name":"hash_of_tuple.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248853459","text":"#!/usr/bin/env python3\n\nimport sys\nimport subprocess\n\n\nprint(\"starting julia...\")\njulia_process = subprocess.Popen(\n \"julia --project=.. -e 'include(\\\"../PowerModelsBackend.jl\\\"); interactive_mode()'\",\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n)\n\n\nwhile True:\n sys.stdout.flush()\n julia_process.stdout.flush()\n output = julia_process.stdout.readline()\n if output.startswith(\"waiting for input line...\") or julia_process.poll() is not None:\n break\n if output:\n print(output.strip())\n\nprint(\"julia process ready for input\")\n\n\n\n\ndef run_julia_backend_command(julia_process, command):\n if julia_process.poll() is not None:\n print(\"process has terminated skipping julia command: \", command)\n\n print(\"sending command to julia: {}\".format(command))\n sys.stdout.flush()\n sys.stderr.flush()\n\n julia_process.stdin.write(command)\n julia_process.stdin.write(\"\\n\")\n julia_process.stdin.flush()\n\n results = []\n while True:\n sys.stdout.flush()\n output = julia_process.stdout.readline()\n if output.startswith(\"waiting for input line...\") or julia_process.poll() is not None:\n break\n else:\n results.append(output.strip())\n\n print(\"julia command complete\")\n\n if len(results) != 2:\n print(\"\\033[91mbad backend output, incorrect number of lines, {}\\033[0m\".format(len(results)))\n return []\n\n if results[1] != \"complete\":\n print(\"\\033[91mbad backend output, status value {}, see process stderr for details\\033[0m\".format(results[1]))\n return []\n\n return results[0]\n\n\nresult = run_julia_backend_command(julia_process, \"load_grid, data/case5.m\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"data_summary\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"run_ac_pf\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"data_summary\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"run_dc_pf\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"data_summary\"); print(\"result: {}\".format(result))\nresult = run_julia_backend_command(julia_process, \"shutdown\"); print(\"result: {}\".format(result))\n\n\nprint(\"julia stderr:\")\nsys.stdout.flush()\nsys.stderr.flush()\noutputs = julia_process.stderr.readlines()\nfor output in outputs:\n sys.stderr.write(\"\\033[91m julia:\\033[0m {}\".format(output))\n","sub_path":"test/test_pmbe.py","file_name":"test_pmbe.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"413829503","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 1 09:42:40 2019\r\n\r\n@author: emilh\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import colors\r\nimport itertools\r\n\r\nfrom PIL import Image\r\nimport glob\r\nfrom sklearn.cluster import KMeans\r\n \r\n\r\nimage_list_0 = []\r\nimage_list_1 = []\r\n\r\nsummary_list_0 = []\r\nsummary_list_1 = []\r\n\r\n\r\nrailwidth = 50#64\r\nconvfilter = np.ones((railwidth))\r\n\r\n#Program a sleeper cutter.\r\ndef ImageSegmentation(im,railwidth):\r\n \r\n #Finding the stones versus rails based on low and high difference in \r\n #column pixels values\r\n SumDifferenceCol = np.sum(im[1::1,:][0:-1,:]-im[2::1,:],axis=0)\r\n \r\n \r\n padding = np.ones(( np.int( railwidth/2)))*10e12\r\n padded_vector = []\r\n padded_vector.append(padding[:].tolist()) \r\n padded_vector.append(SumDifferenceCol[:].tolist()) \r\n padded_vector.append(padding[:-1].tolist()) \r\n padded_vector = list(itertools.chain(*padded_vector))\r\n padded_vector = np.asarray(padded_vector)\r\n \r\n #Finding the best fit, the rails has a lower column standard deviation \r\n #as compared to the stones \r\n #make a convolution with the rail width\r\n convfilter = np.ones((railwidth))\r\n \r\n #Finding the lowest value for the two rails based on a convolution\r\n convResult = np.convolve( padded_vector , convfilter[:], 'valid')\r\n #convResult.shape\r\n \r\n np.argpartition(convResult,6)\r\n \r\n sortedlist = np.argsort(convResult)[:(railwidth*2)]\r\n \r\n # Initializing KMeans\r\n kmeans = KMeans(n_clusters=2)\r\n # Fitting with inputs\r\n kmeans = kmeans.fit(sortedlist.reshape(-1, 1))\r\n # Predicting the clusters\r\n labels = kmeans.predict(sortedlist.reshape(-1, 1))\r\n # Getting the cluster centers\r\n C = kmeans.cluster_centers_\r\n \r\n rail1start=np.int(C[0]-railwidth/2)\r\n rail1end = np.int(C[0]+railwidth/2)\r\n\r\n rail2start=np.int(C[1]-railwidth/2)\r\n rail2end = np.int(C[1]+railwidth/2)\r\n \r\n #imgCut1=Image.fromarray(im[:,rail1start:rail1end])\r\n imgCut1=im[:,rail1start:rail1end]\r\n imgCut2=im[:,rail2start:rail2end]\r\n \r\n return imgCut1, imgCut2\r\n\r\n#Program a sleeper cutter.\r\n \r\n#lack normalization \r\ndef CustomFilteringmethod(imgCut1):\r\n \r\n col_mean1 = np.mean(imgCut1,axis=0)\r\n col_sd1 = np.std(imgCut1,axis=0)\r\n \r\n y = np.zeros(( imgCut1.shape[0] , imgCut1.shape[1] ))\r\n y_fault = np.abs(np.abs(imgCut1-col_mean1)-col_sd1)\r\n larger_than= y_fault > np.zeros(( imgCut1.shape[0] , imgCut1.shape[1] ))\r\n y[larger_than] = y_fault[larger_than]\r\n y = np.asarray(y, dtype=np.uint8) #y.astype(uint8)#(int)# dtype=uint8\r\n return y\r\n\r\n\r\ndef RailResults(yL,yR):\r\n max_values = ( np.max(yL), np.max(yR))\r\n std_values = ( np.std(yL), np.std(yR))\r\n \r\n #use the maximum and the std for the same rail\r\n indice_std = max_values.index(max(max_values)) \r\n std_value = std_values[indice_std] \r\n max_value=max( max_values)\r\n return max_value, std_value\r\n\r\n\r\ncountplot=0\r\nfor filename in glob.glob('H:/BaneDk/WorkFiles/WorkFiles/Images/Examplesof0/*.jpg'): #assuming gif\r\n im = cv2.imread(filename,0)\r\n image_list_0.append(im)\r\n \r\n # Segmentation\r\n imgCutL, imgCutR = ImageSegmentation(im,railwidth)\r\n\r\n\r\n \r\n #plot rail left and rail right \r\n if False:\r\n #Rail 1\r\n type(imgCut1)\r\n imgCut1.shape\r\n imgCut1\r\n #y\r\n imgCut1Plot=Image.fromarray(imgCut1)\r\n type(imgCut1Plot)\r\n imgCut1Plot\r\n \r\n #rail 2\r\n type(imgCut2)\r\n imgCut2.shape\r\n imgCut2\r\n #y\r\n imgCut2Plot=Image.fromarray(imgCut2)\r\n type(imgCut2Plot)\r\n imgCut2Plot\r\n \r\n \r\n # normalize image before filtering method ?\r\n if False:\r\n blurL = cv2.blur(imgCutL,(5,5))\r\n blurR = cv2.blur(imgCutR,(5,5))\r\n \r\n \r\n kernel = np.ones((5,5),np.uint8)\r\n \r\n tophatL = cv2.morphologyEx(imgCutL, cv2.MORPH_TOPHAT, kernel)\r\n tophatR = cv2.morphologyEx(imgCutR, cv2.MORPH_TOPHAT, kernel)\r\n \r\n tophatRPlot=Image.fromarray(255-tophatR)\r\n type(tophatRPlot)\r\n tophatRPlot\r\n \r\n blackhatL = cv2.morphologyEx(imgCutL, cv2.MORPH_BLACKHAT, kernel)\r\n blackhatR = cv2.morphologyEx(imgCutR, cv2.MORPH_BLACKHAT, kernel)\r\n \r\n blackhatRPlot=Image.fromarray(255-blackhatR)\r\n type(blackhatRPlot)\r\n blackhatRPlot\r\n \r\n imgCutLsobelx = cv2.Sobel(imgCutL,cv2.CV_64F,1,0,ksize=9)\r\n imgCutLsobely = cv2.Sobel(imgCutL,cv2.CV_64F,0,1,ksize=9)\r\n \r\n cv2.imshow('sobel',imgCutLsobely)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows() \r\n \r\n imgCutLsobelxRPlot=Image.fromarray(imgCutLsobelx)\r\n imgCutLsobelxRPlot\r\n \r\n imgplot = plt.imshow(imgCutLsobelx)\r\n \r\n\r\n \r\n #plt.hist(imgCutLsobelx.ravel(),256,[0,256]); plt.show()\r\n \r\n #cv2.imshow('img',255-blackhatR)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows() \r\n \r\n # Filtering method \r\n yL = CustomFilteringmethod(imgCutL)\r\n yR = CustomFilteringmethod(imgCutR)\r\n \r\n \r\n plt.subplot(1, 2, 1)#+countplot)\r\n plt.imshow(yL,cmap='Greys')\r\n plt.title('left rail, group 0')\r\n plt.subplot(1, 2, 2)#+countplot)\r\n plt.title('right rail, group 0')\r\n plt.imshow(yR, cmap='Greys')\r\n \r\n plt.show()\r\n \r\n countplot+=1\r\n #get the results from the correct rail\r\n max_value, std_value = RailResults(yL,yR)\r\n \r\n #append results\r\n summary_list_0.append([max_value, std_value])\r\n \r\n #plot code\r\n if False:\r\n yLPlot=Image.fromarray(255-yL)\r\n yLPlot\r\n \r\n yRPlot=Image.fromarray(255-yR)\r\n yRPlot\r\n \r\ncountplot=0 \r\nfor filename in glob.glob('H:/BaneDk/WorkFiles/WorkFiles/Images/Examplesof1/*.jpg'): #assuming gif\r\n im = cv2.imread(filename,0)\r\n #im=Image.open(filename)\r\n image_list_1.append(im)\r\n \r\n # Segmentation\r\n imgCutL, imgCutR = ImageSegmentation(im,railwidth)\r\n \r\n # Filtering method \r\n yL = CustomFilteringmethod(imgCutL)\r\n yR = CustomFilteringmethod(imgCutR)\r\n \r\n plt.subplot(1, 2, 1)#+countplot)\r\n plt.imshow(yL,cmap='Greys')\r\n plt.title('left rail, group 1')\r\n plt.subplot(1, 2, 2)#+countplot)\r\n plt.title('right rail, group 1')\r\n plt.imshow(yR, cmap='Greys')\r\n \r\n plt.show()\r\n \r\n countplot+=1\r\n \r\n #get the results from the correct rail\r\n max_value, std_value = RailResults(yL,yR)\r\n \r\n #append results\r\n summary_list_1.append([max_value, std_value])\r\n\r\n\r\n\r\n#Show results \r\nsummary_0 = np.asarray(summary_list_0) \r\nsummary_1 = np.asarray(summary_list_1) \r\n\r\nmax_0 = max(summary_0[:,0]) \r\nmax_1 = max(summary_1[:,0]) \r\n\r\n\r\nmean_0 = np.mean(summary_0[:,0]) \r\nmean_1 = np.mean(summary_1[:,0]) \r\n\r\nstd_0 = np.std(summary_0[:,0]) \r\nstd_1 = np.std(summary_1[:,0]) \r\n\r\nprint(\"max 0:\", max_0, \", max 1:\", max_1, \", mean 0:\", mean_0, \", mean 1:\", mean_1, \", std 0:\", std_0, \", std 1:\", std_1)\r\n \r\n#Normalization group 0\r\nsummary_0[:,0] = (summary_0[:,0]-max_0)/(max_1-max_0)\r\n\r\n\r\n#Normalization group 1\r\nsummary_1[:,0] = (summary_1[:,0]-max_0)/(max_1-max_0) \r\n\r\n \r\n\r\n\r\n #img = cv2.imread('H:/BaneDk/WorkFiles/WorkFiles/Images/Examplesof0/Pic2c0136bf52ce4e1eae2ea343a32fc4fb.jpg',0)\r\n #plt.imshow(yLPlot) \r\n #cv2.imshow(\"left rail\",yL) \r\n #yRPlot=Image.fromarray(255-yR)\r\n #yRPlot\r\n #cv2.waitKey(0)\r\n #cv2.destroyWindow('i') \r\n #imgCut2Plot\r\n #y = np.max( np.abs(np.abs(imgCut1-col_mean1)-col_sd1), np.zeros(( imgCut1.shape[0] , imgCut1.shape[1] )) )\r\n #y.shape\r\n #imgCut1-col_mean1 \r\n# imgCut2=im[:,rail2start:rail2end]\r\n# type(imgCut2)\r\n# imgCut2.shape\r\n# imgCut2=Image.fromarray(imgCut2) \r\n \r\n\r\n \r\n #type(imgCut2)\r\n\r\n #col_mean2 = np.mean(imgCut2,axis=0)\r\n #imgCut2-col_mean2 \r\n #imgCut2=Image.fromarray(imgCut2)\r\n #imgCut2\r\n \r\n #im=Image.open(filename)\r\n \r\n #np.int(C[1])\r\n\r\n \r\n #firstrail = sortedlist[np.argsort(sortedlist)][0:railwidth]\r\n \r\n #get the meadian\r\n \r\n #secondrail = sortedlist[np.argsort(sortedlist)][(railwidth+1):(railwidth*2)]\r\n \r\n #get the median\r\n \r\n #np.where(np.argmin(convResult)) \r\n #padded_vector = , SumDifferenceCol[:].flatten()[:], padding[:].flatten()[:]] )\r\n #np.concatenate(padding[:].flatten(), SumDifferenceCol[:].flatten(), padding[:].flatten())\r\n #if im.shape[0] % 2 == 1: \r\n #SumDifferenceCol = np.sum(im[1::2,:]-im[2::2,:],axis=0) \r\n #find the width of the rail in pixels\r\n #SumDifferenceCol.shape\r\n #np.Inf((railwidth))\r\n #left = np.zeros((1,20))\r\n #right = left = np.zeros((1,20))\r\n #SumDifferenceCol[1::railwidth]\r\n #im=Image.open(filename)\r\n #image_list_0.append(im)\r\n #img=Image.open(filename)\r\n #imgCut=Image.fromarray(im[:,405:470])\r\n #imgCut\r\n #cv2.imshow('image',im)\r\n #plt.show(im[1::2,:].shape)\r\n# im.shape\r\n# \r\n# im[1::2,0].shape\r\n# im[2::2,0].shape\r\n \r\n \r\n \r\n\r\n# cv2.imshow('frame', im)\r\n# plt.plot(SumDifference)\r\n# plt.ylabel('some numbers')\r\n# plt.show()\r\n# \r\n# \r\n# plt.plot(im)\r\n# plt.ylabel('some numbers')\r\n# plt.show()\r\n #im[1::2,:][1:-1,:].shape\r\n #im[1::2,:]-im[2::2,:].shape\r\n \r\n #im[,]-\r\n \r\n#cv2.imshow('image',im)\r\n#cv2.waitKey(0)\r\n#cv2.destroyAllWindows() \r\n \r\n \r\n \r\n#w=600\r\n#h=1300\r\n#fig=plt.figure()\r\n#for i in range(1,20):\r\n# img = image_list_1[i]#np.random.randint(10, size=(h,w))\r\n# fig.add_subplot(i,2,1)\r\n# plt.imshow(img)\r\n#plt.show()\r\n#plt.show( image_list_1[0])\r\n\r\n#image_list_0[0].shape\r\n\r\n#col_mean = np.mean(image_list_0[0],axis=0)\r\n#col_mean.shape\r\n \r\n # if False:\r\n# SumDifferenceCol = np.sum(im[1::1,:][0:-1,:]-im[2::1,:],axis=0)\r\n# \r\n# \r\n# padding = np.ones(( np.int( railwidth/2)))*10e12\r\n# padded_vector = []\r\n# padded_vector.append(padding[:].tolist()) \r\n# padded_vector.append(SumDifferenceCol[:].tolist()) \r\n# padded_vector.append(padding[:-1].tolist()) \r\n# padded_vector = list(itertools.chain(*padded_vector))\r\n# padded_vector = np.asarray(padded_vector)\r\n# \r\n# convResult = np.convolve( padded_vector , convfilter[:], 'valid')\r\n# \r\n# convResult.shape\r\n# \r\n# np.argpartition(convResult,6)\r\n# \r\n# sortedlist = np.argsort(convResult)[:(railwidth*2)]\r\n# \r\n# # Initializing KMeans\r\n# kmeans = KMeans(n_clusters=2)\r\n# # Fitting with inputs\r\n# kmeans = kmeans.fit(sortedlist.reshape(-1, 1))\r\n# # Predicting the clusters\r\n# labels = kmeans.predict(sortedlist.reshape(-1, 1))\r\n# # Getting the cluster centers\r\n# C = kmeans.cluster_centers_\r\n# \r\n# rail1start=np.int(C[0]-railwidth/2)\r\n# rail1end = np.int(C[0]+railwidth/2)\r\n# \r\n# rail2start=np.int(C[1]-railwidth/2)\r\n# rail2end = np.int(C[1]+railwidth/2)\r\n# \r\n# #imgCut1=Image.fromarray(im[:,rail1start:rail1end])\r\n# imgCut1=im[:,rail1start:rail1end]\r\n# type(imgCut1)\r\n# imgCut1.shape \r\n\r\n# col_mean1 = np.mean(imgCut1,axis=0)\r\n# col_sd1 = np.std(imgCut1,axis=0)\r\n# \r\n# #type(imgCut1)\r\n# \r\n# imgCut1.shape\r\n# col_mean1.shape\r\n# #(np.abs(np.abs(imgCut1-col_mean1)-col_sd1)).shape\r\n# y = np.zeros(( imgCut1.shape[0] , imgCut1.shape[1] ))\r\n# y_fault = np.abs(np.abs(imgCut1-col_mean1)-col_sd1)\r\n# larger_than= y_fault > np.zeros(( imgCut1.shape[0] , imgCut1.shape[1] ))\r\n# y[larger_than] = y_fault[larger_than]\r\n# y = y.astype(int)\r\n \r\n # max_values = ( np.max(yL), np.max(yR))\r\n# std_values = ( np.std(yL), np.std(yR))\r\n# \r\n# #use the maximum and the std for the same rail\r\n# indice_std = values.index(max(values)) \r\n# std_value = std_values[indice_std] \r\n# max_value=max( max_values)","sub_path":"tensorflow_yolov3/SegmentationAndCustomMethod.py","file_name":"SegmentationAndCustomMethod.py","file_ext":"py","file_size_in_byte":12094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296710888","text":"#!/usr/bin/python\n# -*- coding: UTF8 -*-\n\"\"\"\nAigor 0.1 Bot conversacional en español con traducción en vivo\nusando el traductor online de Google, pyaiml y Festival.\nCopyright (C) 2011 hashashin mail: gentoo dot power at gmail dot com \nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see http://www.gnu.org/licenses.\n\"\"\"\n\nimport aiml\nimport os.path\nfrom festival import say\nfrom xgoogle.translate import Translator\n\n\n#preparamos para usar el traductor luego\ntranslate = Translator().translate\n#creamos el \"cerebro\" del bot cargando los aiml de alice o el cerebro existente\nk = aiml.Kernel()\n# si existe el fichero lo cargamos directamente\nif os.path.isfile(\"cerebro.brn\"):\n k.bootstrap(brainFile = \"cerebro.brn\")\n#si no existe lo creamos con std-startup.xml\nelse: \n k.bootstrap(learnFiles = \"std-startup.xml\", commands = \"load aiml b\")\n#si modificamos los aiml habrá que borrar cerebro.brn para recrearlo\n k.saveBrain(\"cerebro.brn\")\n#cargamos el cerebro\n k.loadBrain(\"cerebro.brn\")\n\n#Configuración del bot\nk.setBotPredicate(\"name\",\"Aigor\")\nk.setBotPredicate(\"gender\",\"male\")\nk.setBotPredicate(\"master\",\"hashashin\")\nk.setBotPredicate(\"birthday\",\"2011\")\nk.setBotPredicate(\"birthplace\",\"Barcelona\")\nk.setBotPredicate(\"boyfriend\",\"you\")\nk.setBotPredicate(\"favoritebook\",\"Don't Read Me\")\nk.setBotPredicate(\"favoritecolor\",\"transparent\")\nk.setBotPredicate(\"favoriteband\",\"rubber\")\nk.setBotPredicate(\"favoritefood\",\"patterns\")\nk.setBotPredicate(\"favoritesong\",\"your voice\")\nk.setBotPredicate(\"favoritemovie\",\"your life story\")\nk.setBotPredicate(\"forfun\",\"talk to you\")\nk.setBotPredicate(\"friends\",\"you\")\nk.setBotPredicate(\"girlfriend\",\"you\")\nk.setBotPredicate(\"kindmusic\",\"all\")\nk.setBotPredicate(\"location\",\"here\")\nk.setBotPredicate(\"looklike\",\"you\")\nk.setBotPredicate(\"question\",\"What?\")\nk.setBotPredicate(\"sign\",\"none\")\nk.setBotPredicate(\"talkabout\",\"anything\")\nk.setBotPredicate(\"wear\",\"nothing\")\nk.setBotPredicate(\"website\",\"http://serveramd.dyndns.org\")\nk.setBotPredicate(\"email\",\"hash@serveramd.dyndns.org\")\nk.setBotPredicate(\"language\",\"any\")\nk.setBotPredicate(\"msagent\",\"no\")\n\n#saludo inicial y muestra la entrada de órdenes\nprint(\"Escribe quit o salir para cerrar el programa\\n\")\nprint(\"¿Soy Aigor que puedo hacer por tí?\")\nsay(\"soy Aigor que puedo hacer por t'i?\")\n#bucle principal\n#como Festival no entiende utf-8 he usado latin1\nwhile True:\n user_input = raw_input(\"Pregunta > \")\n if user_input == \"quit\" or user_input == \"salir\":\n break\n else:\n#Traduce la entrada a ingles para que la entienda el bot\n trans = translate(user_input, lang_to=\"en\").encode('latin1')\n#busca la respuesta \n answer = k.respond(trans)\n#Traduce la respuesta a español(o cualquier otro soportado por Google) \n traans = translate(answer, lang_to=\"es\").encode('latin1')\n#Imprime/dice la respuesta \n print(traans).decode('latin1')\n say(traans)\n","sub_path":"aigor.py","file_name":"aigor.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"390402558","text":"import cv2 as cv\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n# 画出图像的直方图\r\ndef plot_demo(image):\r\n plt.hist(image.ravel(), 256, [0, 256]) # image.ravel()将图像展开,256为bins数量,[0, 256]为范围\r\n plt.show(\"1\")\r\n\r\n\r\ndef hist_image(image):\r\n color = (\"blue\", \"green\", \"red\")\r\n for i, color in enumerate(color):\r\n hist = cv.calcHist([image], [i], None, [256], [0, 256])\r\n plt.plot(hist, color=color)\r\n plt.xlim([0, 256])\r\n plt.show(\"2\")\r\n\r\n\r\nsrc = cv.imread(\"C://bg.jpg\")\r\ncv.namedWindow(\"原来\", cv.WINDOW_NORMAL)\r\ncv.imshow(\"原来\", src)\r\nplot_demo(src)\r\nhist_image(src)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()\r\n","sub_path":"OpenCV学习/9.1.py","file_name":"9.1.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322669523","text":"from ClassLibrary.BaseClass.Object import *\nfrom ClassLibrary.ShopClass.SettleInCompany import SettleInCompany\nfrom ClassLibrary.ShopClass.SettleInUser import SettleInUser\n\n\nclass SettleInApplication(Object):\n def __init__(self):\n super(SettleInApplication, self).__init__()\n self.className = self.__class__.__name__\n\n def get_attribute_infoCompany(self):\n if self.instance and self.instance.get(attribute_infoCompany):\n settleIn = SettleInCompany()\n settleIn.get_Object(self.instance.get(attribute_infoCompany).id)\n return settleIn.output_SettleIn()\n\n def get_attribute_user(self):\n if self.instance and self.instance.get(attribute_user):\n return self.instance.get(attribute_user).id\n\n def get_attribute_user_id(self):\n if self.instance and self.instance.get(attribute_user):\n return self.instance.get(attribute_user).id\n\n def get_attribute_infoPersonal(self):\n if self.instance and self.instance.get(attribute_infoPersonal):\n settleIn = SettleInUser()\n settleIn.get_Object(self.instance.get(attribute_infoPersonal).id)\n return settleIn.output_SettleIn()\n\n def get_user(self):\n if self.instance:\n return self.instance.get(attribute_user)\n\n def get_infoPersonal(self):\n if self.instance:\n return self.instance.get(attribute_infoPersonal)\n\n def get_infoCompany(self):\n if self.instance:\n return self.instance.get(attribute_infoCompany)\n\n def get_attribute_type(self):\n if self.instance:\n return self.instance.get(attribute_type)\n\n def set_attribute_type(self, value):\n if self.instance:\n self.instance.set(attribute_type, value)\n self.save_instance()\n return True\n return None\n\n def set_attribute_user(self, value):\n if self.instance:\n self.instance.set(attribute_user, value)\n self.save_instance()\n return True\n return None\n\n def set_attribute_InfoPersonal(self, value):\n if self.instance:\n self.instance.set(attribute_infoPersonal, value)\n self.save_instance()\n return True\n return None\n\n def set_attribute_InfoCompany(self, value):\n if self.instance:\n self.instance.set(attribute_infoCompany, value)\n self.save_instance()\n return True\n return None\n\n def output_SettleInApplication(self):\n if self.instance:\n data = {\n attribute_objectId: self.get_attribute_objectId(),\n attribute_state: self.get_attribute_state(),\n attribute_infoPersonal: self.get_attribute_infoPersonal(),\n attribute_infoCompany: self.get_attribute_infoCompany(),\n attribute_type: self.get_attribute_type(),\n }\n return data\n\n def find_User(self, user):\n result = Base.queryInstanceAttribute(self.__class__.__name__, attribute_user, user)\n if result:\n return True\n return None\n\n def get_SettleInApplication_All(self, state, type1=None, page=1):\n\n print(state)\n if state is not None and page:\n if 0 == int(state):\n query = Base.queryInstanceAttribute(self.__class__.__name__, attribute_state, int(state), page)\n else:\n query = Base.queryInstanceAttribute1_Attribute2(self.__class__.__name__, attribute_type, int(type1), attribute_state, int(state), page)\n if query:\n settle = []\n for foo in query:\n settleApplication = SettleInApplication()\n settleApplication.set_instance(foo)\n settle.append(settleApplication.output_SettleInApplication())\n return settle\n return []\n\n def count_SettleInApplication_All(self, state, type1):\n if state is not None:\n if 0 == int(state):\n count = Base.queryInstanceAttributeCount(self.__class__.__name__, attribute_state, int(state))\n else:\n count = Base.queryInstanceAttribute1_Attribute2_Count(self.__class__.__name__, attribute_type, int(type1), attribute_state, int(state))\n return count\n self.print_msg__error( 'parameter is null' )\n return 0\n","sub_path":"ClassLibrary/ShopClass/SettleInApplication.py","file_name":"SettleInApplication.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205292712","text":"import cv2\nimport time\nimport argparse\nimport os\nimport torch\n\nimport posenet\nimport json\n\nimport math\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--scale_factor', type=float, default=1.0)\nparser.add_argument('--notxt', action='store_true')\nparser.add_argument('--video_dir', type=str, default='./video_in')\nparser.add_argument('--image_dir', type=str, default='./test_in')\nparser.add_argument('--output_dir', type=str, default='./test_out')\nargs = parser.parse_args()\n\ndef distance(x1, y1, x2, y2):\n result = math.sqrt( math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2))\n return result\n\ndef dotproduct(x1, y1, x2, y2):\n result = x1*x2 + y1*y2\n return result\n\ndef crossproduct(ax, ay, bx, by, cx, cy):\n a = ax*by+bx*cy+cx*ay\n b = ay*bx+by*cx+cy*ax\n return a-b\n\ndef arccos(n):\n result = math.acos(n)\n return result\n\ndef arcsin(n):\n result = math.asin(n)\n return result\n\ndef absolutevalue(x1, y1, x2, y2):\n v1 = math.sqrt(math.pow(x1, 2) + math.pow(y1, 2))\n v2 = math.sqrt(math.pow(x2, 2) + math.pow(y2, 2))\n result = v1*v2\n return result\n\ndef xy_to_feature_1(leftshoulder, rightshoulder, lefthip, righthip):\n \n d_shoulder = distance(leftshoulder[0], leftshoulder[1], rightshoulder[0], rightshoulder[1])\n d_hip = distance(lefthip[0], lefthip[1], righthip[0], righthip[1])\n\n return d_shoulder/d_hip\n\ndef xy_to_feature_2(leftshoulder, rightshoulder, leftelbow, rightelbow) :\n Al = (leftelbow[0]-leftshoulder[0], leftelbow[1]-leftshoulder[1])\n Ar = (rightelbow[0]-rightshoulder[0], rightelbow[1]-rightshoulder[1])\n\n return ((2*math.pi)-arccos(dotproduct(Al[0], Al[1], Ar[0], Ar[1])/absolutevalue(Al[0], Al[1], Ar[0], Ar[1])))/(2*math.pi)\n\ndef xy_to_feature_3(lefthip, righthip, leftknee, rightknee) :\n \n Ll = (leftknee[0]-lefthip[0], leftknee[1]-lefthip[1])\n Lr = (rightknee[0]-righthip[0], rightknee[1]-righthip[1])\n \n return (arccos(dotproduct(Ll[0], Ll[1], Lr[0], Lr[1])/absolutevalue(Ll[0], Ll[1], Lr[0], Lr[1])))/(math.pi)\n\ndef xy_to_feature_4(lefthip, righthip, leftshoulder, rightshoulder) :\n Pcenterhip = ((lefthip[0]+righthip[0])/2, (lefthip[1]+righthip[1])/2)\n neck = ((leftshoulder[0]+rightshoulder[0])/2, (leftshoulder[1]+rightshoulder[1])/2)\n h = (neck[0]-Pcenterhip[0], neck[1]-Pcenterhip[1])\n x = (1,0)\n return (arccos(dotproduct(h[0], h[1], x[0], x[1])/absolutevalue(h[0], h[1], x[0], x[1])))/(math.pi)\n\ndef xy_to_feature_5(leftshoulder, rightshoulder, leftelbow, rightelbow, leftwrist, rightwrist) :\n Al = (leftshoulder[0]-leftelbow[0], leftshoulder[1]-leftelbow[1])\n Ar = (rightshoulder[0]-rightelbow[0], rightshoulder[1]-rightelbow[1])\n Wl = (leftwrist[0]-leftelbow[0], leftwrist[1]-leftelbow[1])\n Wr = (rightwrist[0]-rightelbow[0], rightwrist[1]-rightelbow[1])\n leftelbowangle = (arcsin(crossproduct(leftelbow[0],leftelbow[1],leftshoulder[0],leftshoulder[1],leftwrist[0],leftwrist[1])/absolutevalue(Al[0], Al[1], Wl[0], Wl[1])))/(math.pi)\n rightelbowangle = (arcsin(crossproduct(rightelbow[0],rightelbow[1],rightshoulder[0],rightshoulder[1],rightwrist[0],rightwrist[1])/absolutevalue(Ar[0], Ar[1], Wr[0], Wr[1])))/(math.pi)\n\n return [leftelbowangle, rightelbowangle]\n\ndef xy_to_feature_6(lefthip, righthip, leftknee, rightknee, leftankle, rightankle):\n\n Ll = (lefthip[0]-leftknee[0], lefthip[1]-leftknee[1])\n Lr = (righthip[0]-rightknee[0], righthip[1]-rightknee[1])\n\n Cl = (leftankle[0]-leftknee[0], leftankle[1]-leftknee[1])\n Cr = (rightankle[0]-rightknee[0], rightankle[1]-rightknee[1])\n\n leftkneeangle = (arcsin(crossproduct(leftknee[0],leftknee[1],lefthip[0],lefthip[1],leftankle[0],leftankle[1])/absolutevalue(Ll[0], Ll[1], Cl[0], Cl[1])))/(math.pi)\n rightkneeangle = (arcsin(crossproduct(rightknee[0],rightknee[1],righthip[0],righthip[1],rightankle[0],rightankle[1])/absolutevalue(Lr[0], Lr[1], Cr[0], Cr[1])))/(math.pi)\n\n return [leftkneeangle, rightkneeangle]\n\ndef video2frame(invideofilename, save_path):\n vidcap = cv2.VideoCapture(invideofilename)\n count = 0\n while True:\n success,image = vidcap.read()\n if not success:\n break\n print ('Read a new frame: ', success)\n fname = \"{}.jpg\".format(\"{0:05d}\".format(count))\n cv2.imwrite(save_path + '/' + fname, image) # save frame as JPEG file\n count += 1\n print(\"{} images are extracted in {}.\". format(count, save_path))\n\ndef main():\n model = posenet.load_model(args.model)\n model = model.cuda()\n output_stride = model.output_stride\n\n if args.image_dir:\n if not os.path.exists(args.image_dir):\n os.makedirs(args.image_dir)\n\n video2frame(args.video_dir,args.image_dir)\n\n if args.output_dir:\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n filenames = [f.path for f in os.scandir(args.image_dir) if f.is_file() and f.path.endswith(('.png', '.jpg'))]\n\n start = time.time()\n for i,f in enumerate(filenames):\n\n input_image, draw_image, output_scale = posenet.read_imgfile(\n f, scale_factor=args.scale_factor, output_stride=output_stride)\n\n with torch.no_grad():\n input_image = torch.Tensor(input_image).cuda()\n\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)\n\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(\n heatmaps_result.squeeze(0),\n offsets_result.squeeze(0),\n displacement_fwd_result.squeeze(0),\n displacement_bwd_result.squeeze(0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=0.25)\n\n keypoint_coords *= output_scale\n\n if args.output_dir:\n draw_image = posenet.draw_skel_and_kp(draw_image, pose_scores, keypoint_scores, keypoint_coords,min_pose_score=0.25, min_part_score=0.25)\n cv2.imwrite(os.path.join(args.output_dir, os.path.relpath(f, args.image_dir)), draw_image)\n\n if not args.notxt:\n print()\n print(\"Results for image: %s\" % f)\n\n max_score = 0\n max_index = 0\n ignore = 0\n\n for pi in range(len(pose_scores)):\n\n if max_score > pose_scores[pi] :\n max_index = pi\n\n if pose_scores[pi] == 0.:\n ignore = 1\n break\n\n print('Pose #%d, score = %f' % (pi, pose_scores[pi]))\n\n for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):\n print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))\n \n if pose_scores[max_index] != 0. :\n tmp_data = dict()\n out_data = dict(image_name=[f[10:-4]])\n\n for ki, (s, c) in enumerate(zip(keypoint_scores[max_index, :], keypoint_coords[max_index, :, :])):\n tmp_data[posenet.PART_NAMES[ki]] = c.tolist()\n\n out_data['feature_1'] = xy_to_feature_1(tmp_data['leftShoulder'], tmp_data['rightShoulder'], tmp_data['leftHip'], tmp_data['rightHip'])\n out_data['feature_2'] = xy_to_feature_2(tmp_data['leftShoulder'], tmp_data['rightShoulder'], tmp_data['leftElbow'], tmp_data['rightElbow'])\n out_data['feature_3'] = xy_to_feature_3(tmp_data['leftHip'], tmp_data['rightHip'], tmp_data['leftKnee'], tmp_data['rightKnee'])\n out_data['feature_4'] = xy_to_feature_4(tmp_data['leftHip'], tmp_data['rightHip'], tmp_data['leftShoulder'], tmp_data['rightShoulder'])\n out_data['feature_5'] = xy_to_feature_5(tmp_data['leftShoulder'], tmp_data['rightShoulder'], tmp_data['leftElbow'], tmp_data['rightElbow'], tmp_data['leftWrist'], tmp_data['rightWrist'])\n out_data['feature_6'] = xy_to_feature_6(tmp_data['leftHip'], tmp_data['rightHip'], tmp_data['leftKnee'], tmp_data['rightKnee'], tmp_data['leftAnkle'], tmp_data['rightAnkle'])\n \n out_data['total_feature'] = list()\n out_data['total_feature'].extend([out_data['feature_1']])\n out_data['total_feature'].extend([out_data['feature_2']])\n out_data['total_feature'].extend([out_data['feature_3']])\n out_data['total_feature'].extend([out_data['feature_4']])\n out_data['total_feature'].extend([out_data['feature_5'][0]])\n out_data['total_feature'].extend([out_data['feature_5'][1]])\n out_data['total_feature'].extend([out_data['feature_6'][0]])\n out_data['total_feature'].extend([out_data['feature_6'][1]])\n\n out_data['skeleton_vector'] = tmp_data\n\n with open(os.path.join(args.output_dir,f[10:-4]+\".json\"),\"w\") as json_file :\n json.dump(out_data, json_file, indent=\"\\t\")\n\n print('Average FPS:', len(filenames) / (time.time() - start))\n\n\nif __name__ == \"__main__\":\n main()\n\n\nPART_NAMES = [\n \"nose\", \"leftEye\", \"rightEye\", \"leftEar\", \"rightEar\", \"leftShoulder\",\n \"rightShoulder\", \"leftElbow\", \"rightElbow\", \"leftWrist\", \"rightWrist\",\n \"leftHip\", \"rightHip\", \"leftKnee\", \"rightKnee\", \"leftAnkle\", \"rightAnkle\"\n]\n","sub_path":"feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37805279","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 28 15:55:01 2020\n\n@author: M. Leenstra\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport numpy as np\n#from models.CD_siamese_net import Identity\ntorch.manual_seed(0)\n\n__all__ = ['siamese_net']\n\n \nclass SiameseNet(nn.Module):\n def __init__(self, branches, joint, classifier, patch_size_lin):\n super(SiameseNet, self).__init__()\n \n self.branches = branches \n self.joint = joint\n \n self.avgpool = nn.AdaptiveAvgPool2d((patch_size_lin, patch_size_lin))\n self.classifier = classifier\n\n\n def forward(self, data, n_branches, extract_features=None, \n conv_classifier=False, use_softmax=False, **kwargs):\n \"\"\"\n forward pass through network\n\n Parameters\n ----------\n data : torch array\n DESCRIPTION.\n n_branches : int\n number of branches in the network (e.g. siamese has 2 branches)\n extract_features : list, optional\n list with the number of the layer in the branch to extract features from. \n The default is None (i.e. for training phase)\n\n Returns\n -------\n x : torch array\n output of the network for each image. Number of channels is number of classes\n used to train the network\n features : torch array\n specified featuremaps from the branch, upsampled to original patchsize\n used for feature extraction\n \"\"\"\n res = list()\n for j in range(n_branches): # Siamese/triplet nets; sharing weights\n x = data[j]\n \n # if in feature extracting phase, extract hypercolumn for specified features\n if isinstance(extract_features,list):\n activations = dict()\n names = list()\n for i, l in enumerate(self.branches):\n names.append('x'+str(i))\n if i == 0:\n activations[names[i]] = l(x)\n if activations[names[i]].shape[2:] != data[j].shape[2:]:\n activations[names[i]] = nn.functional.interpolate(\n activations[names[i]], size=data[j].shape[2:], \n mode='bilinear', align_corners=True)\n else:\n activations[names[i]] = l(activations[names[i-1]])\n if activations[names[i]].shape[2:] != data[j].shape[2:]:\n activations[names[i]] = nn.functional.interpolate(\n activations[names[i]], size=data[j].shape[2:], \n mode='bilinear', align_corners=True)\n \n # return a list of features\n #features = [x]\n features=list()\n features.extend([activations[names[i]] for i in extract_features])\n \n return features\n \n # if in training or validation phase forward images through branch \n else:\n res.append(self.branches(x))\n \n # concatenate the output of difference of branches\n x = torch.abs(res[1] - res[0])\n if n_branches == 3:\n x = torch.cat(x, torch.abs(res[2] - res[1]), 1)\n \n # joint layers\n x = self.joint(x)\n if extract_features == 'joint': \n return x\n x = nn.functional.adaptive_avg_pool2d(x, (data[0].shape[2], data[0].shape[3]))\n if not conv_classifier:\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n else:\n x = self.classifier(x)\n if use_softmax: # is True during inference\n x = nn.functional.softmax(x, dim=1)\n else:\n x = nn.functional.log_softmax(x, dim=1)\n\n return x\n \n \ndef make_layers(cfg, n_channels, batch_norm=False, first_77=False):\n layers = []\n in_channels = int(n_channels) \n if first_77:\n v = int(cfg[0])\n conv2d = nn.Conv2d(in_channels, v, kernel_size=7, padding=3)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v \n # iterate over layers and add to sequential\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'CU':\n layers += [nn.ConvTranspose2d(in_channels, v, kernel_size=2, stride=2)]\n elif v == 'BU':\n layers += [nn.Upsample(scale_factor=8, mode='bilinear')] # TODO: scale factor hard-coded \n else:\n v = int(v)\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\ndef make_conv_classifier(cfg, in_channels, batch_norm=False):\n layers = []\n in_channels = int(in_channels) \n # iterate over layers and add to sequential\n for v in cfg[:-1]: \n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'CU':\n layers += [nn.ConvTranspose2d(in_channels, v, kernel_size=2, stride=2)]\n elif v == 'BU':\n layers += [nn.Upsample(scale_factor=8, mode='bilinear')] # TODO: scale factor hard-coded \n else:\n v = int(v)\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n \n # last layer no ReLU, 1x1 kernel\n v = int(cfg[-1])\n conv2d = nn.Conv2d(in_channels, v, kernel_size=1, padding=0)\n layers += [conv2d] \n return nn.Sequential(*layers)\n\ndef make_classifier(cfg, n_channels):\n layers = []\n in_channels = int(n_channels) \n # iterate over layers and add to sequential\n for v in cfg[:-1]:\n if v == 'D':\n layers += [nn.Dropout()]\n else:\n v = int(v)\n linear = nn.Linear(in_channels, v)\n layers += [linear, nn.ReLU(inplace=True)]\n in_channels = v\n \n # last layer no ReLU\n v = int(cfg[-1])\n linear = nn.Linear(in_channels, v)\n layers += [linear] \n return nn.Sequential(*layers)\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n \n def forward(self, x):\n return x\n\n\ndef siamese_net(cfg, n_channels=13,n_classes=2, patch_size=96, batch_norm=True, n_branches=2):\n \"\"\"\n Create network\n\n Parameters\n ----------\n cfg : dictionary\n dictionairy specifying architecture of branches and top of network. \n example: {'branch': np.array([64, 'M', 128, 'M']), 'top': np.array([256])}\n integers for number of filters in conv-layers, 'M' for maxpool. \n ReLU is added after each conv-layer, batch-norm optional.\n n_channels : int, optional\n number of input channels. The default is 13.\n n_classes : int, optional\n number of output classes. The default is 2.\n patch_size : int, optional\n input size of patch (squared). The default is 96.\n batch_norm : boolean, optional\n whether to use batch norm are not. The default is False.\n n_branches : int, optional\n number of branches to use in network. The default is 2.\n\n Returns\n -------\n net : nn.Module\n deep learning network.\n\n \"\"\"\n # determine number of max-pool layers\n if cfg['branch'] is not None:\n n_mpool = np.sum(cfg['branch'] == 'M') + np.sum(cfg['top'] == 'M') \n else:\n n_mpool = 0\n \n # determine input sizes for input classification layers\n patch_size_lin = patch_size\n if cfg['top'] is not None:\n if len(cfg['top'][cfg['top'] == 'BU'] or cfg['top'][cfg['top'] == 'CU']) == 0:\n for i in range(n_mpool):\n patch_size_lin = int(patch_size_lin/2) \n else:\n for i in range(n_mpool):\n patch_size_lin = int(patch_size_lin/2) \n \n # create layers\n if cfg['branch'] is not None:\n branches = make_layers(cfg['branch'],n_channels,batch_norm=batch_norm,first_77=False)\n else:\n branches = Identity()\n if cfg['top'] is not None: \n joint = make_layers(cfg['top'],\n int(cfg['branch'][cfg['branch'] != 'M'][-1])*(n_branches-1),\n batch_norm=batch_norm)\n else:\n # does nothing because next layer is the same\n #joint = nn.AdaptiveAvgPool2d((patch_size, patch_size)) \n joint = Identity()\n \n if cfg['classifier'][0] != 'C':\n if cfg['top'] is not None:\n n_channels_lin = int(cfg['top'][cfg['top'] != 'CU'][cfg['top'] != 'BU'][-1])\n else:\n n_channels_lin = int(cfg['branch'][cfg['branch'] != 'M'][-1])*n_branches\n n_channels_classifier = n_channels_lin * patch_size_lin * patch_size_lin \n classifier = make_classifier(cfg['classifier'], n_channels_classifier)\n else:\n if cfg['top'] is not None:\n in_channels = cfg['top'][-1]\n elif cfg['branch'] is not None:\n in_channels = int(cfg['branch'][cfg['branch'] != 'M'][-1])\n else:\n in_channels = n_channels\n classifier = make_conv_classifier(cfg['classifier'][1:],in_channels,batch_norm=batch_norm)\n \n # create network\n net = SiameseNet(branches, joint, classifier, patch_size_lin) \n \n print(net)\n return net\n ","sub_path":"models/siamese_net.py","file_name":"siamese_net.py","file_ext":"py","file_size_in_byte":9953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594787419","text":"def extractDarkFish(item):\n\t\"\"\"\n\t# DarkFish Translations\n\n\t\"\"\"\n\tchp, vol, frag = extractChapterVolFragment(item['title'])\n\tif not (chp or vol) or 'preview' in item['title'].lower():\n\t\treturn None\n\tif 'She Professed Herself The Pupil Of The Wise Man'.lower() in item['title'].lower() or 'She Professed Herself The Pupil Of The Wise Man'.lower() in [tmp.lower() for\n\t tmp in item['tags']]:\n\t\treturn buildReleaseMessageWithType(item, 'Kenja no Deshi wo Nanoru Kenja', vol, chp, frag=frag)\n\treturn False\n","sub_path":"WebMirror/management/rss_parser_funcs/feed_parse_extractDarkFish.py","file_name":"feed_parse_extractDarkFish.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"94438552","text":"import re\nimport pandas as pd\n\n\n# Read through page_links_en.ttl file that contains all internal links. Then puts URI and internal link into individual strings.\n\n \nfp = open('page_links_en.ttl', 'r')\nprint(\"fp = open('page_links_en.ttl', 'r') done\")\n\nline = fp.readline()\nprint(\"line = fp.readline() done\")\ncnt = 0\nprint(\"cnt = 0 done\")\n\n#url = []\nlinks = []\n\nwhile cnt<1000008:\n if cnt<8:\n line = fp.readline()\n print(\"cnt 8\")\n cnt+=1\n else:\n links.append(line)\n #print(\"links.append(line) done\")\n line = fp.readline()\n #print(\"line = fp.readline() done\")\n cnt += 1\n \nprint(\"while done\")\nlinks_df = pd.DataFrame(links, columns=['internal_links'])\nprint(\"dataframe done\")\n\nprint(links_df)\n\nurl = []\n\ni =0\n\n\n#Split and edit each line to get URI and internal links individually as strings.\n\n \nwhile i\", \"\", item[0])\n url.append(item[0])\n print(item[0])\n #item[2] = str(item[2])\n item[2] = re.sub(\"<\", \"\", item[2])\n item[2] = re.sub(\">\", \"\", item[2])\n links_df['internal_links'][i] = item[2]\n print(item[2])\n print(item)\n i+=1\n\n\nlinks_df['url'] = url\nprint(links_df) \n\nlinks_df.to_csv(\"data_links.csv\", index= False)\nprint(\"to_csv done\")\n","sub_path":"bert_entity/preprocessing/get_links_data_from_ttl_links_file_dummy.py","file_name":"get_links_data_from_ttl_links_file_dummy.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"191216487","text":"class Solution(object):\n def combinationSum4(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n def findComb(nums, target):\n if target < 0:\n return 0\n if dp[target] != -1:\n return dp[target]\n count = 0\n for i in range(0, len(nums)):\n count += findComb(nums, target - nums[i])\n dp[target] = count\n return count\n dp = [-1] * (target + 1)\n dp[0] = 1\n return findComb(nums, target)\n\n def combinationSum4A(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums = sorted(nums)\n comb = [1] + [0] * target\n for t in range(target + 1):\n for n in nums:\n if t >= n:\n comb[t] += comb[t - n]\n else:\n break\n\n return comb[target]\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.combinationSum4([4, 2, 1], 32))\n pass\n","sub_path":"combinationSumIV.py","file_name":"combinationSumIV.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314188000","text":"import enum\nimport sys\nimport logging\nimport re\nimport random\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\nimport scw.wbn_global\nimport scw.model\n\n\nclass BreathingDialog(QtWidgets.QDialog):\n # noinspection PyArgumentList,PyUnresolvedReferences\n def __init__(self,\n i_ib_text: str=\"Breathing in I know I am breathing in\",\n i_ob_text: str=\"Breathing out I know I am breathing out\"):\n super().__init__()\n\n # self.showFullScreen()\n self.showMaximized()\n\n vbox_l2 = QtWidgets.QVBoxLayout()\n self.setLayout(vbox_l2)\n\n self.breathing_graphicsview = QtWidgets.QGraphicsView() # QGraphicsScene\n vbox_l2.addWidget(self.breathing_graphicsview)\n self.breathing_gs = QtWidgets.QGraphicsScene()\n self.breathing_graphicsview.setScene(self.breathing_gs)\n # print(self.breathing_graphicsview.sceneRect().width())\n self.breathing_graphicsview.setSceneRect(QtCore.QRectF(0,0,2500,2500))\n self.breathing_graphicsview.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.breathing_graphicsview.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n\n self.breathing_graphicsview.setBackgroundBrush(\n QtGui.QBrush(QtGui.QColor(217, 238, 252))\n )\n\n # self.addBreathingCloud()\n\n # Animation\n self._ib_qtimeline = QtCore.QTimeLine(duration=10000)\n self._ib_qtimeline.setFrameRange(1, 1000)\n self._ib_qtimeline.setCurveShape(QtCore.QTimeLine.LinearCurve)\n self._ib_qtimeline.frameChanged.connect(\n self.frame_change_breathing_in\n )\n self._ob_qtimeline = QtCore.QTimeLine(duration=20000)\n self._ob_qtimeline.setFrameRange(1, 2000)\n self._ob_qtimeline.setCurveShape(QtCore.QTimeLine.LinearCurve)\n self._ob_qtimeline.frameChanged.connect(\n self.frame_change_breathing_out\n )\n\n screen_qrect = QtWidgets.QApplication.desktop().screenGeometry()\n xpos_ft = 0.0\n ypos_ft = screen_qrect.height() / 2\n t_drawrectf = QtCore.QRectF(xpos_ft, ypos_ft, screen_qrect.width(), screen_qrect.height())\n\n # Custom dynamic breathing graphic\n self.cloud_gi = BreathingGraphicsObject(t_drawrectf)\n self.breathing_gs.addItem(self.cloud_gi)\n # self._custom_gi.update_pos_and_origin_point(self._view_width_int, self._view_height_int)\n self.cloud_gi.enter_signal.connect(self._ib_start)\n self.cloud_gi.leave_signal.connect(self._ob_start)\n\n self.ib_text_qgi = QtWidgets.QGraphicsTextItem()\n self.ib_text_qgi.setAcceptHoverEvents(False) # -so that the underlying item will not be disturbed\n self.ib_text_qgi.setHtml(i_ib_text)\n new_qfont = self.ib_text_qgi.font()\n new_qfont.setPointSize(32)\n self.ib_text_qgi.setFont(new_qfont)\n self.breathing_gs.addItem(self.ib_text_qgi)\n self.ib_text_qgi.setTextWidth(900)\n # TODO: The text width is -1 here even though we are show()-ing the dialog first\n print(\"self.ib_text_qgi.textWidth() = \" + str(self.ib_text_qgi.textWidth()))\n self.ib_text_qgi.setPos(\n (screen_qrect.width() - self.ib_text_qgi.textWidth()) / 2,\n ypos_ft * 1.5\n )\n # self.ib_text_qgi.textWidth()\n # self.text_qgi.update_pos_and_origin_point(VIEW_WIDTH_INT, VIEW_HEIGHT_INT)\n #self.text_qgi.setDefaultTextColor(QtGui.QColor(mc.mc_global.MC_DARKER_GREEN_COLOR_STR))\n\n self.ob_text_qgi = QtWidgets.QGraphicsTextItem()\n self.ob_text_qgi.setAcceptHoverEvents(False) # -so that the underlying item will not be disturbed\n self.ob_text_qgi.setHtml(i_ob_text)\n new_qfont = self.ib_text_qgi.font()\n new_qfont.setPointSize(32)\n self.ob_text_qgi.setFont(new_qfont)\n self.breathing_gs.addItem(self.ob_text_qgi)\n self.ob_text_qgi.setTextWidth(900)\n self.ob_text_qgi.setPos(\n (screen_qrect.width() - self.ob_text_qgi.textWidth()) / 2,\n ypos_ft * 0.5\n )\n\n self.show()\n\n\n def frame_change_breathing_in(self, i_frame_nr_int):\n transform = QtGui.QTransform()\n dy_ft = - i_frame_nr_int * 0.3\n transform.translate(0, dy_ft)\n # transform.rotate(4.0)\n # self.cloud_gi.setScale(1 + 0.001 * i_frame_nr_int)\n self.cloud_gi.setTransform(transform)\n\n transform = QtGui.QTransform()\n transform.translate(0, dy_ft / 2)\n\n self.ib_text_qgi.setTransform(transform)\n\n def frame_change_breathing_out(self, i_frame_nr_int):\n transform = QtGui.QTransform()\n dy_ft = self.ib_end_qtransform.dy() + i_frame_nr_int * 0.15\n transform.translate(0, dy_ft)\n # transform.rotate(4.0)\n # self.cloud_gi.setScale(1 + 0.001 * i_frame_nr_int)\n self.cloud_gi.setTransform(transform)\n # self.cloud_gi.setScale(self._peak_scale_ft - 0.0005 * i_frame_nr_int)\n\n transform = QtGui.QTransform()\n transform.translate(0, dy_ft / 2)\n\n self.ob_text_qgi.setTransform(transform)\n\n def _ib_start(self):\n self._ob_qtimeline.stop()\n self._ib_qtimeline.start()\n\n def _ob_start(self):\n self.ib_end_qtransform = self.cloud_gi.transform()\n self._ib_qtimeline.stop()\n self._ob_qtimeline.start()\n\n def addBreathingCloud(self):\n # Rectangle\n screen_qrect = QtWidgets.QApplication.desktop().screenGeometry()\n xpos_ft = 0.0\n ypos_ft = screen_qrect.height() / 2\n t_drawrect = QtCore.QRectF(xpos_ft, ypos_ft, screen_qrect.width(), screen_qrect.height())\n\n \"\"\"\n # Gradient\n y_gradient = t_drawrect.y() - GRADIENT_IN_FT\n t_start_qpointf = QtCore.QPointF(t_drawrect.x(), y_gradient)\n t_stop_qpointf = t_drawrect.bottomLeft() # QtCore.QPointF(0.0, 50.0)\n t_linear_gradient = QtGui.QLinearGradient(t_start_qpointf, t_stop_qpointf)\n t_linear_gradient.setColorAt(0.0, QtGui.QColor(219, 255, 128))\n t_linear_gradient.setColorAt(1.0, QtGui.QColor(183, 255, 0))\n \"\"\"\n\n \"\"\"\n # Adding rectangle with gradient\n t_graphics_rect_item = self.breathing_graphicsscene.addRect(\n t_drawrect,\n pen=QtGui.QPen(QtCore.Qt.NoPen),\n brush=QtGui.QBrush(QtGui.QColor(252, 252, 256))\n )\n \"\"\"\n\n t_graphics_rect_item = self.breathing_gs.addRect(\n t_drawrect,\n QtGui.QPen(QtCore.Qt.NoPen),\n QtGui.QBrush(QtCore.Qt.white)\n )\n\n\nclass TextGraphicsItem(QtWidgets.QGraphicsTextItem):\n def __init__(self):\n super().__init__()\n\n def update_pos_and_origin_point(self, i_view_width: int, i_view_height: int):\n t_pointf = QtCore.QPointF(\n i_view_width / 2 - self.boundingRect().width() / 2,\n i_view_height / 2 - self.boundingRect().height() / 2\n )\n self.setPos(t_pointf)\n\n self.setTransformOriginPoint(self.boundingRect().center())\n\n\nclass BreathingGraphicsObject(QtWidgets.QGraphicsObject):\n enter_signal = QtCore.pyqtSignal()\n leave_signal = QtCore.pyqtSignal()\n\n def __init__(self, i_drawrect: QtCore.QRectF):\n super().__init__()\n\n self.rectf = QtCore.QRectF(i_drawrect)\n self.setAcceptHoverEvents(True)\n\n # Overridden\n def paint(self, i_qpainter, i_qstyleoptiongraphicsitem, widget=None):\n t_brush = QtGui.QBrush(QtGui.QColor(245, 240, 250))\n i_qpainter.fillRect(self.rectf, t_brush)\n\n # Overridden\n def boundingRect(self):\n return self.rectf\n\n # Overridden\n def hoverEnterEvent(self, i_qgraphicsscenehoverevent):\n self.enter_signal.emit()\n\n # Overridden\n def hoverLeaveEvent(self, i_qgraphicsscenehoverevent):\n # Please note that this function is entered in case the user hovers over something on top of this graphics item\n logging.debug(\"hoverLeaveEvent\")\n self.leave_signal.emit()\n\n def update_pos_and_origin_point(self, i_view_width: int, i_view_height: int):\n t_pointf = QtCore.QPointF(\n i_view_width / 2 - self.boundingRect().width() / 2,\n i_view_height / 2 - self.boundingRect().height() / 2\n )\n self.setPos(t_pointf)\n\n self.setTransformOriginPoint(self.boundingRect().center())\n\n\nif __name__ == \"__main__\":\n scw_qapplication = QtWidgets.QApplication(sys.argv)\n bw = BreathingDialog()\n bw.show()\n sys.exit(scw_qapplication.exec_())\n","sub_path":"scw/gui/breathing.py","file_name":"breathing.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426541416","text":"import cv2\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom sklearn.externals import joblib\nimport face_recognition\nimport time\nfrom datetime import datetime, timedelta\nimport json\nfrom dateutil.parser import *\n\n# importing scikit-learn svm\nsvc = joblib.load('models/svm_model.pkl')\nencoder = joblib.load('models/name_encoder.pkl')\n\n# Setting up open CV video object\nvideo = cv2.VideoCapture('videos/test8.mp4')\nfps = video.get(cv2.CAP_PROP_FPS)\n# the size of the frame will be downscaled to a fourth of its size\nw = int(video.get(cv2.CAP_PROP_FRAME_WIDTH) / 4)\nh = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT) / 4)\n# video encoding\nvideo_encoding = cv2.VideoWriter_fourcc(*'DIVX')\n# object to write processed frames to video file\nvideo_out = cv2.VideoWriter('video_output/output8.mp4', video_encoding, 15.0, (int(w), int(h)), isColor=True)\n# total number of frames in video\ntotal_frames = video.get(7)\n# will sample the video at half the current frame rate\nframe_intervals = np.arange(1, total_frames, 4)\nprint('new frame rate', fps / 4)\n\nnp.warnings.filterwarnings('ignore')\n# saving execution times\nface_detection_times = []\nprocessing_times = []\nall_check_in_times = []\nall_recognized_faces = []\n\ncheck_in_details = {}\n\nstart1 = time.time()\n\n# sequentially selecting every n frames from video\nfor frame_no in frame_intervals:\n # reading frame #frame_no\n video.set(1, frame_no)\n status, frame = video.read()\n # downscaling frame\n frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)\n # obtaining bounding box of faces in the frame\n start_det = timer()\n faces_detected = face_recognition.faces_in_frame(frame)\n end_det = timer()\n face_detection_times.append(end_det-start_det)\n # minimum width of the faces we want to recognize/classify. eg: only classify close-by faces\n face_width = w/5\n frame_out = frame\n # time persons are seen/classified\n times_seen = []\n identities = []\n # if the frame contains faces classify the faces in the frame\n if np.array(faces_detected).size > 0:\n frame_out, face_descriptors, identities, times_seen = face_recognition.classify_face_video(frame, faces_detected,\n face_width, svc,\n encoder)\n for name, last_seen in zip(identities, times_seen):\n if name in check_in_details:\n if datetime.now() - check_in_details[name][-1] > timedelta(seconds=2):\n check_in_details[name].append(last_seen)\n else:\n check_in_details[name] = []\n check_in_details[name].append(last_seen)\n # writing frame\n video_out.write(frame_out)\n\nend1 = time.time()\n\nprint('total time taken ', end1 - start1)\nprint('face detection time', np.mean(np.array(face_detection_times)))\n# destroying running processes\nvideo.release()\nvideo_out.release()\ncv2.destroyAllWindows()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"105905986","text":"from django.conf.urls import patterns, url, include\nfrom kii.base_models import views\nfrom . import models, forms\nfrom .views import OwnerModelList, StatusModelList\n\ntitlemodel_patterns = patterns('',\n url(r'^(?P\\d+)/$', \n views.Detail.as_view(model=models.TitleModel), \n name='detail'),\n url(r'$', \n views.List.as_view(model=models.TitleModel), \n name='list'),\n)\ntitlemodel2_patterns = patterns('',\n url(r'^(?P\\d+)/$', \n views.Detail.as_view(model=models.TitleModel2), \n name='detail'),\n url(r'^create$', \n views.Create.as_view(model=models.TitleModel2), \n name='create'),\n url(r'^$', \n views.List.as_view(model=models.TitleModel2), \n name='list'),\n url(r'^(?P\\d+)/delete$', \n views.Delete.as_view(model=models.TitleModel2), \n name='delete'),\n)\n\nownermodel_patterns = patterns('',\n url(r'^create$', \n views.OwnerMixinCreate.as_view(\n form_class=forms.OwnerModelForm, \n template_name=\"base_models/modelform.html\",\n fields=['useless_field']), \n name='create'),\n url(r'^$', \n OwnerModelList.as_view(), \n name='list'),\n url(r'^(?P\\d+)/update$', \n views.OwnerMixinUpdate.as_view(form_class=forms.OwnerModelForm, \n template_name=\"base_models/modelform.html\",\n fields=['useless_field']), \n name='update'),\n url(r'^(?P\\d+)/delete$', \n views.OwnerMixinDelete.as_view(model=models.OwnerModel), \n name='delete'),\n\n)\n\nstatusmodel_patterns = patterns('',\n url(r'$', \n StatusModelList.as_view(), \n name='list'),\n)\n\n\nurlpatterns = patterns('',\n url(\n r'^statusmodel/', \n include(\n statusmodel_patterns, \n namespace='statusmodel', \n app_name='statusmodel')\n ),\n url(\n r'^titlemodel/', \n include(\n titlemodel_patterns, \n namespace='titlemodel', \n app_name='titlemodel')\n ),\n url(\n r'^titlemodel2/', \n include(\n titlemodel2_patterns, \n namespace='titlemodel2', \n app_name='titlemodel2')\n ),\n url(\n r'^ownermodel/', \n include(\n ownermodel_patterns, \n namespace='ownermodel', \n app_name='ownermodel')\n ),\n\n)","sub_path":"kii/tests/test_base_models/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"29313905","text":"from configuration.configuration import Gameconfig\nfrom gameitem.board import Location\nfrom simplelog.wlogging import create_logger\n\nlogger = create_logger(__name__,\"wump_player.log\")\n\n# this dictionary is a helper to easily know in which\n# direction we are pointing and the step increment\n# for movements\nmovement = {\n 'north' : {\n 'left': 'west',\n 'right': 'east',\n 'step': Location(-1,0)\n },\n 'east' : {\n 'left': 'north',\n 'right': 'south',\n 'step': Location(0,1)\n },\n 'south' : {\n 'left': 'east',\n 'right': 'west',\n 'step': Location(1,0)\n },\n 'west' : {\n 'left': 'south',\n 'right': 'north',\n 'step': Location(0,-1)\n }\n}\n\n\nclass Player:\n def __init__(self, config):\n if not config:\n config = Gameconfig()\n\n self.direction = 'north'\n self.arrows = config.arrows_number\n self.position = config.start_location\n self.got_gold = False\n self.dead = False\n\n def turn(self, rotation):\n logger.debug(f\"turning {rotation}. Arrows: {self.arrows}\")\n self.direction = movement[self.direction].get(rotation)\n\n","sub_path":"gameitem/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265108228","text":"import base64\n\nfrom .exception import ApiException, ApiRequestError, ApiRequestErrorDetails\n\n__author__ = 'mstipanov'\n\nimport http.client\nimport urllib\nfrom urllib.parse import urlparse\nimport json\n\nclass HttpClient:\n\n def deserialize(self, s, cls):\n vals = json.JSONDecoder().decode(s)\n return self.deserialize_map(vals, cls)\n\n def deserialize_map(self, vals, cls):\n obj = cls()\n\n if not hasattr(vals, 'items'):\n return vals\n\n for key, val in vals.items():\n t = type(getattr(obj, key, None))\n value = self.deserialize_map(val, t)\n setattr(obj, key, value)\n\n return obj\n\n def serialize(self, bodyObject):\n return bodyObject.to_JSON()\n\n def getValue(self, httpMethod, configuration, methodPath, pathParams, context, bodyObject, valueType):\n if (pathParams):\n for key, value in pathParams.iteritems():\n methodPath = methodPath.replace(\"{\" + key + \"}\", str(value))\n\n url = configuration.base_url + methodPath\n username = configuration.username\n password = configuration.password\n api_key = configuration.api_key\n token = configuration.token\n\n if context:\n if isinstance(context, dict):\n params = urllib.parse.urlencode(context)\n else:\n params = urllib.parse.urlencode(context.to_dict())\n url = url + (\"?%s\" % params)\n\n u = urlparse(url)\n if (u.scheme == \"https\"):\n connection = http.client.HTTPSConnection(u.netloc)\n else:\n connection = http.client.HTTPConnection(u.netloc)\n\n headers = {}\n if username:\n auth = base64.encodestring('%s:%s' % (username, password)).replace('\\n', '')\n headers[\"Authorization\"] = \"Basic %s\" % auth\n\n if api_key:\n headers[\"Authorization\"] = \"App %s\" % api_key\n\n if token:\n headers[\"Authorization\"] = \"IBSSO %s\" % token\n\n body_content = None\n if bodyObject:\n body_content = self.serialize(bodyObject)\n headers[\"Accept\"] = \"application/json\"\n\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"User-Agent\"] = \"Python-Client-Library\"\n\n\n connection.request(httpMethod, url, body_content, headers)\n response = connection.getresponse()\n\n statusCode = response.status\n response_content = response.read()\n if (statusCode < 200 or statusCode >= 300):\n contentType = response.getheader(\"Content-Type\")\n if contentType and contentType.startswith(\"application/json\"):\n raise self.deserialize(response_content, ApiException)\n\n raise ApiException(ApiRequestError(None, ApiRequestErrorDetails(response.reason + \" - \" + response_content)))\n\n contentType = response.getheader(\"Content-Type\")\n if contentType and contentType.startswith(\"application/json\") and not 'basestring' == valueType:\n return valueType.from_JSON(response_content.decode())\n\n return response_content\n","sub_path":"infobip/util/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623564186","text":"# -*- coding: utf-8 -*-\n\"\"\"Pype module.\"\"\"\nimport os\nimport platform\nimport functools\nimport logging\n\nfrom .settings import get_project_settings\nfrom .lib import (\n Anatomy,\n filter_pyblish_plugins,\n change_timer_to_current_context\n)\n\npyblish = avalon = _original_discover = None\n\nlog = logging.getLogger(__name__)\n\n\nPACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))\nPLUGINS_DIR = os.path.join(PACKAGE_DIR, \"plugins\")\n\n# Global plugin paths\nPUBLISH_PATH = os.path.join(PLUGINS_DIR, \"publish\")\nLOAD_PATH = os.path.join(PLUGINS_DIR, \"load\")\n\n\ndef import_wrapper(func):\n \"\"\"Wrap module imports to specific functions.\"\"\"\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n global pyblish\n global avalon\n global _original_discover\n if pyblish is None:\n from pyblish import api as pyblish\n from avalon import api as avalon\n\n # we are monkey patching `avalon.api.discover()` to allow us to\n # load plugin presets on plugins being discovered by avalon.\n # Little bit of hacking, but it allows us to add out own features\n # without need to modify upstream code.\n\n _original_discover = avalon.discover\n\n return func(*args, **kwargs)\n\n return decorated\n\n\n@import_wrapper\ndef patched_discover(superclass):\n \"\"\"Patch `avalon.api.discover()`.\n\n Monkey patched version of :func:`avalon.api.discover()`. It allows\n us to load presets on plugins being discovered.\n \"\"\"\n # run original discover and get plugins\n plugins = _original_discover(superclass)\n\n # determine host application to use for finding presets\n if avalon.registered_host() is None:\n return plugins\n host = avalon.registered_host().__name__.split(\".\")[-1]\n\n # map plugin superclass to preset json. Currenly suppoted is load and\n # create (avalon.api.Loader and avalon.api.Creator)\n plugin_type = \"undefined\"\n if superclass.__name__.split(\".\")[-1] == \"Loader\":\n plugin_type = \"load\"\n elif superclass.__name__.split(\".\")[-1] == \"Creator\":\n plugin_type = \"create\"\n\n print(\">>> Finding presets for {}:{} ...\".format(host, plugin_type))\n try:\n settings = (\n get_project_settings(os.environ['AVALON_PROJECT'])\n [host][plugin_type]\n )\n except KeyError:\n print(\"*** no presets found.\")\n else:\n for plugin in plugins:\n if plugin.__name__ in settings:\n print(\">>> We have preset for {}\".format(plugin.__name__))\n for option, value in settings[plugin.__name__].items():\n if option == \"enabled\" and value is False:\n setattr(plugin, \"active\", False)\n print(\" - is disabled by preset\")\n else:\n setattr(plugin, option, value)\n print(\" - setting `{}`: `{}`\".format(option, value))\n return plugins\n\n\n@import_wrapper\ndef install():\n \"\"\"Install Pype to Avalon.\"\"\"\n log.info(\"Registering global plug-ins..\")\n pyblish.register_plugin_path(PUBLISH_PATH)\n pyblish.register_discovery_filter(filter_pyblish_plugins)\n avalon.register_plugin_path(avalon.Loader, LOAD_PATH)\n\n project_name = os.environ.get(\"AVALON_PROJECT\")\n\n # Register studio specific plugins\n if project_name:\n anatomy = Anatomy(project_name)\n anatomy.set_root_environments()\n avalon.register_root(anatomy.roots)\n\n project_settings = get_project_settings(project_name)\n platform_name = platform.system().lower()\n project_plugins = (\n project_settings\n .get(\"global\", {})\n .get(\"project_plugins\", {})\n .get(platform_name)\n ) or []\n for path in project_plugins:\n if not path or not os.path.exists(path):\n continue\n\n pyblish.register_plugin_path(path)\n avalon.register_plugin_path(avalon.Loader, path)\n avalon.register_plugin_path(avalon.Creator, path)\n avalon.register_plugin_path(avalon.InventoryAction, path)\n\n # apply monkey patched discover to original one\n log.info(\"Patching discovery\")\n avalon.discover = patched_discover\n\n avalon.on(\"taskChanged\", _on_task_change)\n\n\ndef _on_task_change(*args):\n change_timer_to_current_context()\n\n\n@import_wrapper\ndef uninstall():\n \"\"\"Uninstall Pype from Avalon.\"\"\"\n log.info(\"Deregistering global plug-ins..\")\n pyblish.deregister_plugin_path(PUBLISH_PATH)\n pyblish.deregister_discovery_filter(filter_pyblish_plugins)\n avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)\n log.info(\"Global plug-ins unregistred\")\n\n # restore original discover\n avalon.discover = _original_discover\n","sub_path":"openpype/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"597534468","text":"# Autokey Cipher\n\nwhile True:\n\n try:\n #input\n key = input().lower()\n plaintext = input().lower()\n\n #new key\n key = (key+plaintext)[:len(plaintext)]\n\n #letter to number\n key_int = []\n for character in key:\n number = ord(character) - 97\n key_int.append(number)\n \n plaintext_int = []\n for character in plaintext:\n number = ord(character) - 97\n plaintext_int.append(number)\n\n ciphertext_int = []\n count = 0\n for num in plaintext_int:\n ciphertext_int.append((num+key_int[count]) % 26)\n count+=1\n \n #number to letter\n ciphertext = \"\"\n for number in ciphertext_int:\n character = chr(number+97)\n ciphertext+=(character)\n\n #output\n print(ciphertext)\n\n\n except:\n break\n","sub_path":"Classical Encryption Techniques/Autokey_Cipher.py","file_name":"Autokey_Cipher.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194995073","text":"from functions.functions import *\nfrom functions.mujoco_functions import *\nfrom lamcts import MCTS\n\nDims = [1,2,3] + list(range(5,41,5))\n\nfor i in Dims:\n\n f = Levy(dims = i)\n\n\n agent = MCTS(\n lb = f.lb, # the lower bound of each problem dimensions\n ub = f.ub, # the upper bound of each problem dimensions\n dims = f.dims, # the problem dimensions\n ninits = f.ninits, # the number of random samples used in initializations \n func = f, # function object to be optimized\n Cp = f.Cp, # Cp for MCTS\n leaf_size = f.leaf_size, # tree leaf size\n kernel_type = f.kernel_type, #SVM configruation\n gamma_type = f.gamma_type #SVM configruation\n )\n\n agent.search(iterations = 500)","sub_path":"python/LA-MCTS_replaceSVM/levy.py","file_name":"levy.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608804050","text":"print(\"enter a integer:\")\r\nN = int(input())\r\nstring = str(bin(N))\r\n# print(string)\r\nbinary='{:b}'.format(N)\r\nprint(binary)\r\n# print(binary[0])\r\nlen = len(binary)\r\n# print(len)\r\nZ = 0\r\nF = 0\r\nfor i in binary:\r\n if i == \"0\":\r\n Z += 1\r\n else:\r\n if Z > F :\r\n F = Z\r\n Z = 0\r\n else:\r\n Z = 0\r\nif Z > F :\r\n F = Z\r\n\r\nprint(F)\r\n","sub_path":"count_0_binary.py","file_name":"count_0_binary.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325082412","text":"#p_is_guidedfilter\nimport numpy\n\ndef guided_filter(imgar, p, r=40, eps=1e-3):\n \n H, W, C = imgar.shape \n S = __boxfilter__(numpy.ones((H, W)), r)\n \n mean_i = numpy.zeros((C, H, W))\n \n for c in range(0, C):\n mean_i[c] = __boxfilter__(imgar[:,:, c], r)/S\n \n mean_p = __boxfilter__(p, r)/S\n \n mean_ip = numpy.zeros((C, H, W))\n for c in range(0, C):\n mean_ip[c] = __boxfilter__(imgar[:,:,c]*p, r)/S\n \n cov_ip = numpy.zeros((C, H, W))\n for c in range(0, C):\n cov_ip[c] = mean_ip[c] - mean_i[c]*mean_p\n \n var_i = numpy.zeros((C, C, H, W))\n \n var_i[0, 0] = __boxfilter__(imgar[:,:,0]*imgar[:,:,0], r)/S - mean_i[0]*mean_i[0]\n\n var_i[0, 1] = __boxfilter__(imgar[:,:,0]*imgar[:,:,1], r)/S - mean_i[0]*mean_i[1]\n \n var_i[0, 2] = __boxfilter__(imgar[:,:,0]*imgar[:,:,2], r)/S - mean_i[0]*mean_i[2]\n \n var_i[1, 1] = __boxfilter__(imgar[:,:,1]*imgar[:,:,1], r)/S - mean_i[1]*mean_i[1]\n \n var_i[1, 2] = __boxfilter__(imgar[:,:,1]*imgar[:,:,2], r)/S - mean_i[1]*mean_i[2]\n \n var_i[2, 2] = __boxfilter__(imgar[:,:,2]*imgar[:,:,2], r)/S - mean_i[2]*mean_i[2]\n \n a=numpy.zeros((H,W,C))\n \n for i in range(0, H):\n for j in range(0, W):\n sigma = numpy.array([ [var_i[0, 0, i, j], var_i[0, 1, i, j], var_i[0, 2, i, j]], \n [var_i[0, 1, i, j], var_i[1, 1, i, j], var_i[1, 2, i, j]],\n [var_i[0, 2, i, j], var_i[1, 2, i, j], var_i[2, 2, i, j]]])\n \n \n cov_ip_ij = numpy.array([ cov_ip[0, i, j], cov_ip[1, i, j], cov_ip[2, i, j]]) \n \n a[i, j] = numpy.dot(cov_ip_ij, numpy.linalg.inv(sigma + eps*numpy.identity(3))) \n \n b = mean_p - a[:,:,0]*mean_i[0,:,:] - a[:,:,1]*mean_i[1,:,:] - a[:,:,2]*mean_i[2,:,:] \n \n \n pp = ( __boxfilter__(a[:,:,0], r)*imgar[:,:,0]\n +__boxfilter__(a[:,:,1], r)*imgar[:,:,1]\n +__boxfilter__(a[:,:,2], r)*imgar[:,:,2]\n +__boxfilter__(b, r) )/S\n \n return pp\n\n\ndef __boxfilter__(m, r):\n \n H, W = m.shape\n mp = numpy.zeros(m.shape) \n \n \n ysum = numpy.cumsum(m, axis=0) \n mp[0:r+1, : ] = ysum[r:(2*r)+1, : ]\n \n mp[r+1:H-r, : ] = ysum[(2*r)+1: , : ] - ysum[ :H-(2*r)-1, : ]\n mp[(-r): , : ] = numpy.tile(ysum[-1, : ], (r, 1)) - ysum[H-(2*r)-1:H-r-1, : ]\n\n \n xsum = numpy.cumsum(mp, axis=1)\n \n mp[ : , 0:r+1] = xsum[ : , r:(2*r)+1]\n \n mp[ : , r+1:W-r] = xsum[ : , (2*r)+1: ] - xsum[ : , :W-(2*r)-1]\n mp[ : , -r: ] = numpy.tile(xsum[ : , -1][:, None], (1, r)) - xsum[ : , W-(2*r)-1:W-r-1]\n\n return mp","sub_path":"source/RefineG.py","file_name":"RefineG.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"256425777","text":"\"\"\"\r\nRun on the command prompt\r\n\r\n python xml2csv.py financialprogress\r\n python xml2csv.py physicalprogress\r\n\r\nThis will create an xml-financialprogress.csv and xml-physicalprogress.csv\r\nthat is used by summarise.py\r\n\"\"\"\r\nimport urllib2\r\nfrom lxml import etree\r\nfrom os.path import exists\r\nimport pandas as pd\r\nimport sys\r\nimport datetime\r\n\r\ndate = str(datetime.date.today())\r\nresponse = urllib2.urlopen('http://tsc.gov.in/tsc/NDSAP/StatewiseDistrictwise'+sys.argv[1]+'.xml')\r\n\r\ntree = etree.parse(response)\r\nlength = int(tree.xpath('count(row)'))\r\n\r\ncols = [element.tag for element in tree.find('/row')]\r\n\r\ndata_list = []\r\nfor i in range(length):\r\n row = []\r\n for col in cols:\r\n col_data = tree.xpath('row/'+col)\r\n row.append(col_data[i].text)\r\n data_list.append(row)\r\n\r\narghyam = pd.DataFrame(data_list, columns=cols)\r\narghyam.insert(0, 'Date', date)\r\nfilename = 'xml-' + sys.argv[1] + '.csv'\r\nif exists(filename):\r\n with open(filename, 'a') as out:\r\n arghyam.to_csv(out, index=False, header=False)\r\nelse :\r\n with open(filename, 'w') as out:\r\n arghyam.to_csv(out, index=False)\r\n","sub_path":"tsc.gov.in/xml2csv.py","file_name":"xml2csv.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"417855706","text":"def merge_files(lang1, lang2, lang3):\n '''\n Takes three language names as strings\n Outputs trn files for merging the first two, and uses the third file for the tst and dev data\n '''\n\n #open .dev files for language 3 and read into list\n d = open('task0-data/DEVELOPMENT-LANGUAGES/germanic/' + lang3 + \".dev\")\n dev = [line for line in d]\n d.close()\n \n #create output .dev file, named lang1_lang2_lang3.dev, and write contents of list in\n output_dev = open('task0-data/DEVELOPMENT-LANGUAGES/custom/' + lang1 + \"_\" + lang2 + \"_\" + lang3 + \".dev\", \"w\")\n for ln in dev:\n output_dev.write(ln)\n output_dev.close()\n \n #read .trn files for langs 1 and 2 into lists and write contents into new .trn file\n tr1 = open('task0-data/DEVELOPMENT-LANGUAGES/germanic/' + lang1 + \".trn\")\n tr2 = open('task0-data/DEVELOPMENT-LANGUAGES/germanic/' + lang2 + \".trn\")\n trn1 = [line for line in tr1]\n trn2 = [line for line in tr2]\n tr1.close()\n tr2.close()\n \n output_trn = open('task0-data/DEVELOPMENT-LANGUAGES/custom/' + lang1 + \"_\" + lang2 + \"_\" + lang3 + \".trn\", \"w\")\n for ln in trn1:\n output_trn.write(ln)\n for ln in trn2:\n output_trn.write(ln)\n output_trn.close()\n \n #open .tst file for language 3 and copy it into a new file, renaming it to match .dev and .trn files\n ts = open('task0-data/GOLD-TEST/' + lang3 + \".tst\")\n tst = [line for line in ts]\n output_tst = open('task0-data/DEVELOPMENT-LANGUAGES/custom/' + lang1 + \"_\" + lang2 + \"_\" + lang3 + \".tst\", \"w\")\n for ln in tst:\n output_tst.write(ln)\n output_tst.close()\n \n #return output_dev, output_trn, output_tst\n\n#train on English and German, test on Middle Low German\nmerge_files(\"eng\", \"deu\", \"gml\")\n#train on German and Icelandic, test on Norwegian Nynorsk\nmerge_files(\"deu\", \"isl\", \"nno\")\n","sub_path":"morphology_training_gml-nno/neural-transducer_modifications/full_data_augmentation/merge_langs.py","file_name":"merge_langs.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190257174","text":"BRANCO = 0\nCINZA = 1\nPRETO = 2\nNULO = -1\n\ndef DFS_VISIT(u,c,d,t,a,tempo):\n c[u] = CINZA\n tempo+=1\n d[u] = tempo\n\n for v in l[u]: #percorre cada elemento da lista l[u]\n if(c[v]==BRANCO):\n a[v] = u\n v,c,d,t,a,tempo=DFS_VISIT(v,c,d,t,a,tempo)\n\n c[u] = PRETO\n tempo+=1\n t[u] = tempo\n return [u,c,d,t,a,tempo]\n\ndef DFS():\n c,d,t,a = [],[],[],[]\n for u in range(0,N+1):\n c.append(BRANCO) \n d.append(NULO) \n t.append(NULO) \n a.append(NULO)\n\n tempo=0\n\n for u in range(1,N+1):\n if(c[u] == BRANCO):\n u,c,d,t,a,tempo=DFS_VISIT(u,c,d,t,a,tempo)\n\n return [c,d,t,a,tempo]\n\n\ndef imprimir(lista):\n for i in range(1,N+1):\n if(i==N):\n print(lista[i])\n break\n print(str(lista[i])+\", \", end=\" \")\n\ndef inicializar(N):\n l=[]\n for i in range(N+1):\n l.append([])\n return l\n\n#main\nN=4\nl=inicializar(N)\n\n#l[u].append(v) torna v adjacente a u\nl[1].append(2)\nl[2].append(3)\nl[3].append(3)\nl[3].append(1)\nl[4].append(2)\n\nc,d,t,a,tempo=DFS()\nimprimir(c)\nimprimir(d)\nimprimir(t)\nimprimir(a)","sub_path":"grafos/DFS-listas.py","file_name":"DFS-listas.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"588090892","text":"from bee_toolkits.reconstruction.name_replace import NameReplace\nfrom multicp import multicp_logger\nfrom multicp.query.Query import Query\nfrom multicp.util.Basic import explain\nfrom multicp.util.Constant import DEFAULT_CLASS\n\n\nclass Model(metaclass=NameReplace):\n con = \"db.obj.pid\"\n\n allow_class = [DEFAULT_CLASS]\n _cache = {}\n\n def __init__(self, pid, use_class= DEFAULT_CLASS, **params):\n self.id = pid\n self.data = {}\n if pid is None:\n raise Exception(\"pid is None\")\n self.get_data(use_class, **params)\n\n def __getitem__(self, item):\n return self.data[item]\n\n def get_data(self, use_class= DEFAULT_CLASS, **params):\n q = self.get_query(use_class)\n _, _, pid_field = self.get_property()\n data = q.get({pid_field:self.id,\"$first\":True}, **params)\n self.data = data\n if not data:\n raise Exception(\"empty data! con={} pid={}\".format(self.con, self.id))\n\n\n @classmethod\n def get(cls, limit, use_class=DEFAULT_CLASS, **params):\n q = cls.get_query(use_class)\n data = q.get(limit, **params)\n return data\n\n @classmethod\n def page_get(cls, limit, use_class=DEFAULT_CLASS, yield_page=False, **params):\n if yield_page:\n def inner():\n now_page = 0\n total_page = 1\n q = cls.get_query(use_class)\n while now_page < total_page:\n now_page += 1\n limit.update({\n \"$start_page\":now_page\n })\n data, total_page, total_count = q.page_get(limit, **params)\n yield data, total_page, total_count, now_page\n return inner()\n else:\n q = cls.get_query(use_class)\n return q.page_get(limit, **params)\n\n @classmethod\n def check_repeat(cls, limit, old_id=None, **params):\n \"\"\"\n 检查是否存在某个字段的重复\n :param limit:\n :param old_id:\n :return:没有False,有的话返回id\n \"\"\"\n if \"$first\" in limit:\n del limit[\"$first\"]\n data = cls.get(limit, **params)\n isset = False\n isset_id = None\n if data:\n for a_data in data:\n if old_id and str(old_id) == str(a_data[cls.get_property()[2]]):\n continue\n isset = True\n isset_id = a_data[cls.get_property()[2]]\n break\n\n if isset:\n return isset_id\n else:\n return False\n\n @classmethod\n def add(cls, data, use_class=DEFAULT_CLASS, **params):\n q = cls.get_query(use_class)\n result = q.add(data, **params)\n return result\n\n @classmethod\n def add_by_id(cls, data, use_class=DEFAULT_CLASS, **params):\n q = cls.get_query(use_class)\n result = q.add_by_id(data, **params)\n return result\n\n\n def update(self, data, limit = None, use_class = DEFAULT_CLASS, **params):\n \"\"\"\n instance update method\n :param data:\n :param use_class:\n :param params:\n :return:\n \"\"\"\n _, _, pid_field = self.get_property()\n if not limit:\n limit = {}\n limit [pid_field] = self.id\n result = self.update_more(data, limit,use_class, **params)\n return result\n @classmethod\n def update_more(cls, data, limit, use_class=DEFAULT_CLASS, **params):\n \"\"\"\n class update method\n :param data:\n :param limit:\n :param use_class:\n :param params:\n :return:\n \"\"\"\n if not limit :\n multicp_logger.warning(\"you limit is empty in update!\")\n q = cls.get_query(use_class)\n result = q.update(limit, data, **params)\n return result\n\n @classmethod\n def delete(cls, limit, use_class = DEFAULT_CLASS, **params):\n q = cls.get_query(use_class)\n result = q.delete(limit, **params)\n return result\n @classmethod\n def get_query(cls, allow_class = DEFAULT_CLASS)->Query:\n q = Query(cls.con, allow_class)\n return q\n\n @classmethod\n def get_property(cls):\n return explain(cls.con)\n\n\n\n\n\n\n\n\n","sub_path":"multicp/query/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355706644","text":"# Copyright (C) 2020-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nfrom openvino.tools.pot.benchmark.benchmark import benchmark_embedded, set_benchmark_config\nfrom openvino.tools.pot.utils.logger import get_logger, init_logger\nfrom .utils.path import TEST_ROOT\n\n\nlogger = get_logger(__name__)\nREFERENCE_MODELS_PATH = TEST_ROOT/'../thirdparty/open_model_zoo/tools/accuracy_checker/data/test_models/SampLeNet.xml'\n\ndef test_benchmark(model=None, cfg=None):\n init_logger(level='INFO')\n if cfg:\n set_benchmark_config(cfg)\n if model:\n benchmark_embedded(model=model)\n return\n\n path_to_model_file = str(REFERENCE_MODELS_PATH)\n logger.info('Benchmark test with {}'.format(path_to_model_file))\n\n cfg = {'nireq': 0}\n set_benchmark_config(cfg)\n benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)\n\n cfg = {'nireq': 0, 'benchmark_app_dir':\"\"}\n set_benchmark_config(cfg)\n benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)\n\n cfg = {'nireq': 0, 'benchmark_app_dir':\"wrong_benchmark_dir\"}\n set_benchmark_config(cfg)\n benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)\n","sub_path":"tools/pot/tests/test_benchmark.py","file_name":"test_benchmark.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498253688","text":"\"\"\"\nCAIPEEX data processing for 2011 data: .csv --> .npy, stripping unnecessary \\\nmetadata from .csv files. \n\nInput location: /data/rev11caipeex/ames\nOutput location: /data/revrev11caipeex/npy_raw\nOutput format: .npy file containing one dictionary formatted as: \\\n {\"date\": ['YYYY', 'MM', 'DD'], \\\n \"var_names\": ['', ...], \\\n \"data\": }\n\"\"\"\nimport csv\nimport numpy as np\n\nfrom rev11caipeex import BASE_DIR, DATA_DIR, FIG_DIR\n\ninput_data_dir = DATA_DIR + 'csv/'\noutput_data_dir = DATA_DIR + 'npy_raw/'\n\ndef main():\n \"\"\"\n extract flight date, variable names, and data from csv files to numpy \\\n files. also convert error codes to np.nan.\n \"\"\"\n\n #get names of data files with no issues (see notes)\n with open('good_csv_filenames.txt','r') as readFile:\n good_csv_filenames = [line.strip() for line in readFile.readlines()]\n readFile.close()\n\n #create .npy file for each .ames file in good_ames_filenames\n for filename in good_csv_filenames:\n basename = filename[0:-4] + '.npy' #replace '.csv' with '.npy'\n if '20111027' in filename or 'RF40' in filename:\n flight_date = '20111027' #yyyymmdd\n if '20111024' in filename or 'RF36' in filename:\n flight_date = '20111024' #yyyymmdd\n data_arr = []\n if 'ALL' in filename:\n with open(input_data_dir+filename, 'r') as readFile:\n csvreader = csv.reader(readFile, delimiter=',')\n for row in csvreader:\n data_arr.append(row)\n data_arr = np.array(data_arr)\n var_names = data_arr[0, 0:9]\n data = np.array(data_arr[1:, :], dtype=float)\n elif 'FSSP' in filename:\n with open(input_data_dir+filename, 'r') as readFile:\n csvreader = csv.reader(readFile, delimiter=',')\n for row in csvreader:\n data_arr.append(row)\n #idk why this is required but it is\n data_arr[2] = data_arr[2][:-1]\n data_arr = np.array(data_arr[2:])\n var_names = data_arr[0, 0:32]\n data = np.array(data_arr[1:, :], dtype=float)\n elif 'CIP' in filename:\n with open(input_data_dir+filename, 'r') as readFile:\n csvreader = csv.reader(readFile, delimiter=',')\n for row in csvreader:\n data_arr.append(row)\n #idk why this is required but it is\n data_arr[2] = data_arr[2][:-1]\n #print(data_arr)\n data_arr = np.array(data_arr[2:])\n var_names = data_arr[0, 0:37]\n data = np.array(data_arr[1:, :], dtype=float)\n elif 'DMA' in filename:\n with open(input_data_dir+filename, 'r') as readFile:\n lines = readFile.readlines()\n data_arr = np.array([np.concatenate(( \\\n np.array([convert_time_str_to_float(line.split()[0])]), \\\n np.array(line.split()[1:]).astype(np.float))) for line \\\n in lines[1:] if line.split() != []])\n #idk why this is required but it is\n #data_arr[2] = data_arr[2][:-1]\n #print(data_arr)\n #data_arr = np.array(data_arr[2:])\n var_names = data_arr[0]\n data = np.array(data_arr[1:, :])\n elif 'PCASP' in filename:\n with open(input_data_dir+filename, 'r') as readFile:\n csvreader = csv.reader(readFile, delimiter=',')\n for row in csvreader:\n data_arr.append(row)\n if flight_date == '20111024':\n data_arr = np.array(data_arr[31:])\n var_names = data_arr[0][0:2]\n data = np.array(data_arr[1:, :], dtype=float)\n if flight_date == '20111027':\n data_arr = np.array(data_arr[20:])\n var_names = data_arr[0][0:2]\n data = np.array(data_arr[1:, :], dtype=float)\n\n #save all fields in .npy format\n data_dict = {\"date\":flight_date, \"var_names\":var_names, \"data\":data}\n np.save(output_data_dir+basename, data_dict)\n\ndef convert_time_str_to_float(time_str):\n\n hh = time_str[0:2]\n mm = time_str[3:5]\n ss = time_str[6:8]\n \n time_float = 3600.*float(hh) + 60*float(mm) + float(ss)\n\n return time_float\n\n#run main() if user enters 'python [module path].py' from command line\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/rev11caipeex/rev11caipeex_data_cleanup.py","file_name":"rev11caipeex_data_cleanup.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500739287","text":"import re\nimport json\n\n# Counts the number of tweets per topic\ndef topic_count(fname):\n\twith open(fname) as f:\n\t\ttweets = f.read()\n\n\tcount = 0\n\n\tprint(\"\\n########Turkish Tweet Count##########\\n\")\n\n\tpoliticsRegex = re.compile(r'\\\"topic\\\":\\s?\\\"Politics\\\"')\n\tmatch = politicsRegex.findall(tweets)\n\tcount += len(match)\n\tprint(\"topic - Politics: \",len(match))\n\n\tpoliticsRegex = re.compile(r'\\\"topic\\\":\\s?\\\"Tech\\\"')\n\tmatch = politicsRegex.findall(tweets)\n\tcount += len(match)\n\tprint(\"topic - Tech: \",len(match))\n\n\tpoliticsRegex = re.compile(r'\\\"topic\\\":\\s?\\\"World News\\\"')\n\tmatch = politicsRegex.findall(tweets)\n\tcount += len(match)\n\tprint(\"topic - World News: \",len(match))\n\n\tpoliticsRegex = re.compile(r'\\\"topic\\\":\\s?\\\"T.V. Series\\\"')\n\tmatch = politicsRegex.findall(tweets)\n\tcount += len(match)\n\tprint(\"topic - T.V. Series: \",len(match))\n\n\tpoliticsRegex = re.compile(r'\\\"topic\\\":\\s?\\\"Sports\\\"')\n\tmatch = politicsRegex.findall(tweets)\n\tcount += len(match)\n\tprint(\"topic - Sports: \",len(match))\n\t\n\tprint(\"\\n\\nTotal Tweets: \",count)\n\t\n\n# filters the tweets \ndef tweets(fname):\n\n\t# Correcting dates in all tweets\n\n\twith open(fname) as f:\n\t\ttweets = f.read()\n# \"tweet_date\": \"2016-09-11T02:00:00Z\"\n\tdatePattern = re.compile(r'(\\\"tweet_date\\\":\\s?\\\")(\\d{4}\\-\\d{2}\\-\\d{2}T\\d{2}\\:\\d{2}\\:\\d{2})\\\"')\n\n\ttext1 = datePattern.sub(r'\\1\\2Z\"',tweets)\n\n\n\t# Correcting coordinates\n\n\t# \"tweet_loc\": []\n\tlocPatter = re.compile(r'(\\\"tweet_loc\\\":\\s?)(\\[\\])')\n\ttext2 = locPatter.sub(r'\\1null',text1)\n\n\t# \"tweet_loc\": [151.19981289, -33.87429942]\n\t# \"tweet_loc\": \"[37.090240, -95.712891]\"\n\n\tlocPatter2 = re.compile(r'(\\\"tweet_loc\\\":\\s?)(\\[)(\\-?\\d+\\.\\d+)(\\,\\s?)(\\-?\\d+\\.\\d+)(\\])')\n\ttext = locPatter2.sub(r'\\1\"\\5\\4\\3\"',text2)\n\t# match = locPatter2.findall(text2)\n\t# print(match)\n\n\t# writing the changes to file\n\tf = open(fname,'w')\n\tf.write(text)\n\tf.close()\t\n\n\tprint('Successful....')\n\n# topic_count(\"index2_turkish.jsonl\")\ntweets(\"index2_turkish.jsonl\")\n","sub_path":"Turkish/tweet_oper.py","file_name":"tweet_oper.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123091595","text":"import random\n\nclass Portfolio(): # self = portfolio\n def __init__(self):\n self.cash = 0\n self.stock = {}\n self.fund = {}\n self.transactions = []\n\n def __str__(self):\n stock_list = [ str(k) + \" \" + str(v) for (k, v) in self.stock.items()]\n sl = '\\n'.join(stock_list)\n fund_list = [str(k)+ \" \" + str(v) for (k, v) in self.fund.items()]\n fl = '\\n'.join(fund_list)\n return f\"Cash: \\n{self.cash} \\nStock: \\n{sl} \\nFund: \\n{fl}\"\n\n def addCash(self, amount):\n self.cash += amount\n self.transactions.append(f\"Added {amount} dollars\")\n\n\n def buyStock(self, shares, name):\n if self.cash >= name.stock_price*shares:\n self.cash -= name.stock_price*shares\n self.transactions.append(f\"Bought {shares} shares of {name.stock_symbol}\")\n if name.stock_symbol in self.stock.keys():\n self.stock[name.stock_symbol] += shares\n else:\n self.stock[name.stock_symbol] = shares\n else:\n return \"Can not buy stocks: insufficient cash\"\n\n\n def buyMutualFund(self, shares, name):\n if self.cash >= 1*shares:\n self.cash -= 1*shares\n self.transactions.append(f\"Bought {shares} shares of {name.fund_symbol}\")\n if name.fund_symbol in self.fund.keys():\n self.fund[name.fund_symbol] += shares # symbol of the fund that is bought\n else:\n self.fund[name.fund_symbol] = shares\n else:\n return \"Can not buy funds: insufficient cash\"\n\n\n def withdrawCash(self, amount):\n if self.cash < amount:\n return \"Can not withdraw : insufficient cash\"\n else:\n self.cash -= amount\n self.transactions.append(f\"Withdrew {amount} dollars\")\n\n\n def sellMutualFund(self, symbol, shares):\n if symbol in self.fund.keys():\n if self.fund[symbol] >= shares:\n self.fund[symbol] -= shares\n self.cash += random.uniform(0.9, 1.2)*1*shares\n self.transactions.append(f\"Sold {shares} shares of {symbol}\")\n else:\n return f\"Can not sell funds: insufficient shares of {symbol}\"\n else:\n return f\"{symbol} does not exist\"\n\n def sellStock(self, name, shares):\n if name.stock_symbol in self.stock.keys():\n if self.stock[name.stock_symbol] >= shares:\n self.stock[name.stock_symbol] -= shares\n self.cash += random.uniform(0.5, 1.5)*name.stock_price*shares\n self.transactions.append(f\"Sold {shares} shares of {name.stock_symbol}\")\n else:\n return f\"Can not sell stocks: insufficient shares of {name.stock_symbol}\"\n else:\n return f\"{name.stock_symbol} does not exist\"\n\n def history(self):\n for transaction in self.transactions:\n print(transaction)\n\nclass Stock():\n\n def __init__(self, price, symbol):\n self.stock_price = price\n self.stock_symbol = symbol\n\nclass MutualFund():\n\n def __init__(self, symbol):\n self.fund_symbol = symbol\n\n\n\n\nportfolio = Portfolio()\nportfolio.addCash(300.50)\ns = Stock(20, \"HFH\")\nportfolio.buyStock(5, s)\nmf1 = MutualFund(\"BRT\")\nmf2 = MutualFund(\"GHT\")\nportfolio.buyMutualFund(10.3, mf1)\nportfolio.buyMutualFund(2, mf2)\nprint(portfolio)\n\nportfolio.sellMutualFund(\"BRT\", 1)\nportfolio.sellStock(s, 1)\nportfolio.withdrawCash(50)\nportfolio.history()\n","sub_path":"homeworks/hw1/hw1_yeon.py","file_name":"hw1_yeon.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165213494","text":"from flask import Flask, request\nimport psutil\nimport json\nimport os\nfrom subprocess import Popen\n\nACTIONS = {\n \"shutdown\": ['shutdown','-s','-f','-t','0'],\n \"hibernate\": ['shutdown','-h'],\n \"logoff\": ['logoff'],\n \"reboot\": ['shutdown','-r','-f','-t','0']\n}\n\ndef fetchSystemDetails():\n systemDetails = []\n\n _battery = {}\n _battery[\"tab\"] = \"Battery\"\n battery = psutil.sensors_battery()\n _battery[\"battery%\"] = round(battery.percent, 2)\n _battery[\"chargerPlugged\"] = (False, True)[battery.power_plugged]\n _battery[\"dischargeTimeMins\"] = round(\n battery.secsleft/60, 2) if not _battery[\"chargerPlugged\"] else -0\n systemDetails.append(_battery)\n\n _ram = {}\n _ram[\"tab\"] = \"RAM\"\n _ram[\"used%\"] = round(psutil.virtual_memory().percent, 2)\n # _ram[\"free%\"] = round(100 - psutil.virtual_memory().percent,2)\n # _ram[\"total\"] = f\"{round(psutil.virtual_memory().total/1024**3,2) } GB\"\n # _ram[\"used\"] = f\"{round(psutil.virtual_memory().used/1024**3,2) } GB\"\n _ram[\"free\"] = f\"{round(psutil.virtual_memory().free/1024**3,2) } GB\"\n systemDetails.append(_ram)\n\n _cpu = {}\n _cpu[\"tab\"] = \"CPU\"\n _cpu[\"cpuCores\"] = psutil.cpu_count(logical=False)\n _cpu[\"cpuLogicalCores\"] = psutil.cpu_count()\n _cpu[\"cpuUsageTotal%\"] = round(sum(psutil.cpu_percent(1, True))/8, 2)\n _cpu[\"cpuUsage%\"] = psutil.cpu_percent(1, True)\n # cpusUsage as text\n # _cpu[\"cpuUsage\"] = \", \".join(f\"{cu}%\"for cu in psutil.cpu_percent(1, True)).strip()\n\n systemDetails.append(_cpu)\n\n _diskUsage = []\n for disk in psutil.disk_partitions():\n if os.name == 'nt':\n if 'cdrom' in disk.opts or disk.fstype == '':\n continue\n diskDetails = psutil.disk_usage(disk.mountpoint)\n _diskUsage.append({\n \"tab\": f\"Disk {disk.mountpoint}\",\n # \"total\": f\"{round(diskDetails.total / (1024.0 ** 3),2)} GB\",\n # \"used\": f\"{round(diskDetails.used / (1024.0 ** 3),2)} GB\",\n \"used%\": round(diskDetails.percent, 2),\n \"free\": f\"{round(diskDetails.free / (1024.0 ** 3),2)} GB\",\n # \"free%\": round(100-diskDetails.percent,2)\n })\n systemDetails += _diskUsage\n\n # with open(\"stats.json\",'w') as f:\n # systemDetails = json.dump(systemDetails,f,indent=\"\\t\")\n\n return systemDetails\n\n\ndef mapActionToCommand(action):\n return ACTIONS.get(action)\n # return \"echo hello\"\n\napp = Flask(__name__)\napp.config['JSON_SORT_KEYS'] = False\n\ndef execCmd(endpoint):\n Popen(mapActionToCommand(endpoint.lower()))\n return f'''{endpoint.upper()} : Command executed successfully'''\n@app.route('/')\ndef index():\n systemDetails = fetchSystemDetails()\n details = json.dumps(systemDetails, indent=\" \")\n return f'''\n SysMon\n \n \n Hibernate|\n Logout|\n Shutdown|\n Reboot\n
    \n
    {details}
    \n '''\n\n@app.route('/Shutdown')\ndef Shutdown():\n return execCmd(request.endpoint)\n\n@app.route('/Hibernate')\ndef Hibernate():\n return execCmd(request.endpoint)\n\n@app.route('/Logout')\ndef Logout():\n return execCmd(request.endpoint)\n\n@app.route('/Reboot')\ndef Reboot():\n return execCmd(request.endpoint)\n\napp.run(host=\"0.0.0.0\", port=80, debug=True, threaded=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347090205","text":"\nfrom LThread import LThread\nfrom threading import Thread,Condition,Lock\n\nclass ThreadPool:\n def __init__(self,maxnum=50,minnum=10,piecenum=5,heavyLoad=0.6):\n self.__mtx=Lock()\n self.__cond=Condition()\n self.__tidCache=0\n self.__busyNum=0\n self.__freeNum=0\n self.__busyList=[]\n self.__freeList=[]\n self.maxNum=maxnum # 50\n self.availMinNum=minnum # 10\n self.pieceNum=piecenum # 5\n self.heavyLoad=heavyLoad # 0.6\n # create init threads\n for i in range(minnum):\n self.__tidCache=(self.__tidCache+1)%1000000\n thr=LThread(self,self.__tidCache)\n self.__freeList.append(thr)\n self.__freeNum=minnum\n def __spwanThread(self,force=False):\n if self.__freeNum+self.__busyNum*1.0/self.maxNum>self.heavyLoad and not force:\n return 0\n self.__cond.acquire()\n while self.__freeNum+self.__busyNum>=self.maxNum:\n self.__cond.wait()\n self.__cond.release()\n rest=self.maxNum-self.__busyNum-self.__freeNum\n num=self.pieceNum if rest>=self.pieceNum else rest\n self.__mtx.acquire()\n for i in range(num):\n self.__tidCache=(self.__tidCache+1)%1000000\n thr=Thread(self,self.__tidCache)\n self.__freeList.append(thr)\n self.__freeNum+=num\n self.__mtx.release()\n def __collectThread(self):\n if self.__freeNum*1.0/self.maxNum>self.heavyLoad:\n num=self.pieceNum if self.__freeNum>=self.pieceNum else 0\n self.__mtx.acquire()\n for i in range(num):\n thr=self.__freeList.pop()\n thr.stop()\n self.__freeNum-=num\n self.__mtx.release()\n def _moveToFreeList(self,thr):\n self.__mtx.acquire()\n self.__freeList.append(thr)\n self.__freeNum+=1\n self.__busyList.remove(thr)\n self.__busyNum-=1\n self.__mtx.release()\n # notify __spwanThread\n self.__cond.acquire()\n self.__cond.notify()\n self.__cond.release()\n self.__collectThread()\n def _moveToBusyList(self,thr):\n self.__mtx.acquire()\n self.__busyList.append(thr)\n self.__busyNum+=1\n self.__freeList.remove(thr)\n self.__freeNum-=1\n self.__mtx.release()\n def run(self,job):\n if self.__freeNum==0:\n self.__spwanThread(True,False)\n thr=self.__freeList[0]\n thr.setJob(job)\n thr.start()\n def terminateAll(self):\n self.__mtx.acquire()\n for i in range(self.__freeNum):\n self.__freeList.pop().stop()\n for i in range(self.__busyNum):\n self.__busyList.pop().stop()\n self.__freeNum=0\n self.__busyNum=0\n self.__mtx.release()\n def getFreeNum(self):\n return self.__freeNum\n def getBusyNum(self):\n return self.__busyNum\n","sub_path":"ServerPython/Lunaxy/lib/ThreadPool/py/ThreadPool.py","file_name":"ThreadPool.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169943488","text":"import smtplib\nimport string\nimport random\nimport re\n\nfrom flask import Flask, request\nfrom pymemcache.client.hash import Client\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef page_1():\n \"\"\"For GET request, display the page. For POST requests, send the email\"\"\"\n if request.method == \"POST\":\n if \"email\" not in request.form:\n return \"No email submitted\"\n elif not re.match(r\"[^@]+@[^@]+\\.[^@]+\", request.form[\"email\"]):\n return \"Invalid Email\"\n else:\n send_email(request.form[\"email\"])\n return \"Email Sent\"\n return \"\"\"
    \n
    \n \n
    \n \"\"\"\n\n@app.route(\"/page_2\")\ndef page_2():\n \"\"\"Returns a 401 status code if the key is no longer valid\"\"\"\n memcached = Client((\"127.0.0.1\", 11211))\n if memcached.get(request.args.get(\"key\")):\n return \"Success! Welcome to the second page!\"\n else:\n return \"Not Authorized\", 401\n\ndef send_email(to_address):\n \"\"\"Sends an email and sets a memcached key\"\"\"\n from_address = \"hw.neuroscouting@gmail.com\"\n password = \"ThrowAwayAccount123\"\n key = \"\".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))\n message = \"From: {}\\r\\nTo: {}\\r\\nSubject: Neuroscouting Example\\r\\n\\r\\n{}page_2?key={}\".format(\n from_address,\n to_address,\n request.url_root,\n key,\n )\n server = smtplib.SMTP(\"smtp.gmail.com:587\")\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(from_address ,password)\n server.sendmail(from_address, to_address, message)\n server.quit()\n memcached = Client((\"127.0.0.1\", 11211))\n memcached.set(key, to_address, 86400)\n return\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"small_server.py","file_name":"small_server.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330718192","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 8 20:55:34 2021\n\n@author: Asus\n\"\"\"\n\nimport numpy as np\nfrom sklearn.decomposition import FastICA\nfrom scipy.stats import kurtosis\n\n\nclass KURTOSIS():\n def __init__(self, th = 0.1):\n self.th = th\n return \n \n def set_params(self, th):\n self.th = th\n \n return\n\n def fit(self, X,y):\n \n N, C, T = X.shape\n \n X = np.transpose(X,(1,0,2)).reshape(C,-1) #Matriz con dimensiones C x NT\n ica = FastICA(n_components=C)\n S = ica.fit_transform(X.T) # Reconstruct signals\n \n self.A = ica.mixing_ # Get estimated mixing matrix #Se debe guardar \n self.W = np.linalg.inv(self.A) #Se debe guardar \n \n S = S.reshape(N, C, T)\n \n kur = kurtosis(S, axis = 2).reshape(-1)\n \n \n self.U_k = np.mean(kur) #Se debe guardar \n self.STD_k = np.std(kur)#Se debe guardar \n \n \n def transform(self, X): \n #S = self.W @ X\n S = np.array([self.W@Xn for Xn in X])\n N, D, T = S.shape\n \n kur = kurtosis(S, axis = 2) \n k_norm = (kur - self.U_k) / self.STD_k\n \n S_ = [] \n A_ = []\n Xrec = []\n \n for trial in range(N): \n ind = np.where(k_norm[trial] < self.th)[0]\n\n \n S_.append(S[trial,ind])\n A_.append(self.A[:,ind])\n\n\n Xrec.append(np.dot(A_[trial], S_[trial]))\n \n return np.array(Xrec)\n \n \n def fit_transform(self, X,y):\n self.fit(X,y)\n return self.transform(X)","sub_path":"cb_pipeline/kurtosis.py","file_name":"kurtosis.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406490360","text":"import pytest\n\nfrom sample import create_app\nfrom utils.config import Environment\n\n\n@pytest.fixture(scope='session')\ndef app():\n app = create_app(Environment.TESTING)\n from sample.store import db\n with app.app_context():\n # By using a yield statement instead of return, all the code after the yield statement serves\n # as the teardown code:\n # https://docs.pytest.org/en/latest/fixture.html#fixture-finalization-executing-teardown-code\n db.create_all()\n yield app\n db.drop_all()\n\n\n@pytest.fixture(scope='session')\ndef db(app):\n \"\"\"Session-wide test database.\"\"\"\n from sample.store import db\n db.init_app(app)\n db.create_all()\n yield db\n db.drop_all()\n\n\n@pytest.fixture(scope='function')\ndef session(db):\n \"\"\"Creates a new database session for a test.\"\"\"\n connection = db.engine.connect()\n transaction = connection.begin()\n\n options = dict(bind=connection, binds={})\n session = db.create_scoped_session(options=options)\n\n db.session = session\n yield session\n transaction.rollback()\n connection.close()\n session.remove()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384302938","text":"import json\nimport re\n\nfrom django.db.models import Sum\nfrom django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .coin_payment import CryptoPayments\n\nfrom .models import *\nfrom transaction.models import *\n\n\ndef dashboardView(request):\n user = request.user.data_user\n\n context = {\n 'balance': user.balance,\n 'invest': user.invest_set.get(is_active=True).nominal if user.invest_set.filter(is_active=True).exists() else 0,\n 'total_bonus':user.total_bonus,\n 'capping': user.invest_set.get(is_active=True).capping if user.invest_set.filter(is_active=True).exists() else 0,\n }\n return render(request,'dashboard/index.html',context)\n\ndef profileView(request):\n\n return render(request, 'dashboard/profile.html')\n\ndef generateJaringan(user,path):\n dt = {}\n node0 = f'

    {user}

    '\n node1 = user.get_children().get(position='1') if user.get_children().filter(\n position='1').exists() else \"Register Here\"\n node2 = user.get_children().get(position='0') if user.get_children().filter(\n position='0').exists() else \"Register Here\"\n wrapnode1 = f'

    {node1}

    ' if node1 != 'Register Here' else ''\n wrapnode2 = f'

    {node2}

    'if node2 != 'Register Here' else ''\n node3 = ''\n node4 = ''\n node5 = ''\n node6 = ''\n\n if node1 == \"Register Here\":\n wrapnode1 = f'

    Register Here

    '\n\n if node2 == \"Register Here\":\n wrapnode2 = f'

    Register Here

    '\n\n if type(node1)!= str and node1.get_children().filter(position='1').exists():\n node3 = node1.get_children().get(position='1')\n node3 = f'

    {node3}

    '\n elif type(node1) == str:\n node3 = f'

    Empty

    '\n else:\n node3 = f'

    Register Here

    '\n\n if type(node1)!= str and node1.get_children().filter(position='0').exists():\n node4 = node1.get_children().get(position='0')\n node4 = f'

    {node4}

    '\n elif type(node1) == str:\n node4 = f'

    Empty

    '\n else:\n node4 = f'

    Register Here

    '\n\n if type(node2)!= str and node2.get_children().filter(position='1').exists():\n node5 = node2.get_children().get(position='1')\n node5 = f'

    {node5}

    '\n elif type(node2) == str:\n node5 = f'

    Empty

    '\n else :\n node5 = f'

    Register Here

    '\n\n if type(node2)!= str and node2.get_children().filter(position='0').exists():\n node6 = node2.get_children().get(position='0')\n node6 = f'

    {node6}

    '\n elif type(node2) == str:\n node6 = f'

    Empty

    '\n else :\n node6 = f'

    Register Here

    '\n\n if node1 == \"Register Here\":\n wrapnode1 = f'

    Register Here

    '\n node3 = f'

    Empty

    '\n node4 = f'

    Empty

    '\n if node2 == \"Register Here\":\n wrapnode2 = f'

    Register Here

    '\n node5 = f'

    Empty

    '\n node6 = f'

    Empty

    '\n\n\n dt['node0'] = node0\n dt['node1'] = wrapnode1\n dt['node2'] = wrapnode2\n dt['node3'] = node3\n dt['node4'] = node4\n dt['node5'] = node5\n dt['node6'] = node6\n return dt\n\ndef treeView(request,user_id):\n user = Data_User.objects.get(id=user_id)\n path = request.build_absolute_uri(f\"/dashboard/tree/\")\n tree = generateJaringan(user,path)\n\n context = {\n 'tree':tree,\n 'self':request.user.data_user.id,\n '1level':user.parent.id if user.parent != None else request.user.data_user.id,\n }\n return render(request, 'dashboard/tree.html',context)\n\n@api_view(['POST'])\ndef registerTree(request):\n API_KEY = 'f194487ef92fa5f956bf7e6325ec99215f746a3e13ddcffeb3efc05c6745dc2c'\n API_SECRET = 'ceD85F90fd4dBE324734167f57F323f978a2134e0dEFebe0aa7b52Fa9Ef8549B'\n IPN_URL = 'https://richfarm.app/transaction/ipn-depo/'\n client = CryptoPayments(API_KEY, API_SECRET, IPN_URL)\n\n ref_by = request.user.data_user\n parent = Data_User.objects.get(id=request.data.get('parent_id'))\n\n if User.objects.filter(username__iexact=request.data.get('username')).exists():\n return Response(\"Username already exists\",status=status.HTTP_400_BAD_REQUEST)\n if Data_User.objects.filter(email__iexact=request.data.get('email')).exists():\n return Response(\"Email already exists\",status=status.HTTP_400_BAD_REQUEST)\n if re.search('[A-Z]', request.data.get('password1')) == None \\\n or re.search('[0-9]', request.data.get('password1')) == None \\\n or re.search('[^A-Za-z0-9]', request.data.get('password1')) == None or len(request.data.get('password1')) < 8:\n return Response(\"Password must contain 1 Uppercase, 1 Number, and 1 Symbol. Minimum 8 Character\",status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('password1') != request.data.get('password2'):\n return Response('Pasword doesnt match', status=status.HTTP_400_BAD_REQUEST)\n if parent.get_children().filter(position=request.data.get('position')).exists():\n return Response(\"Posisi already placed by someone, Please refresh this page\",status=status.HTTP_400_BAD_REQUEST)\n\n new_ref_code = get_random_string(length=6).upper()\n us = User.objects.create_user(username=request.data.get('username'),\n password=request.data.get('password1'))\n coin = ['DOGE','TRX','BNB']\n addr_doge = ''\n addr_trx = ''\n addr_bnb = ''\n memo_bnb = ''\n for x in coin:\n if x == 'BNB':\n post_params = {\n 'currency': x,\n 'label': request.data.get('username'),\n 'ipn_url': IPN_URL\n }\n address = client.getCallbackAddress(post_params)\n addr_bnb = json.loads(address.decode('utf-8'))['result']['address']\n memo_bnb = json.loads(address.decode('utf-8'))['result']['dest_tag']\n elif x == 'DOGE':\n post_params = {\n 'currency': x,\n 'label': request.data.get('username'),\n 'ipn_url': IPN_URL\n }\n address = client.getCallbackAddress(post_params)\n addr_doge = json.loads(address.decode('utf-8'))['result']['address']\n elif x == 'TRX':\n post_params = {\n 'currency': x,\n 'label': request.data.get('username'),\n 'ipn_url': IPN_URL\n }\n address = client.getCallbackAddress(post_params)\n addr_trx = json.loads(address.decode('utf-8'))['result']['address']\n role_user = Role.objects.get(role='user')\n new_user = Data_User.objects.create(user_rel=us,\n parent=parent,\n referal_by=ref_by,\n name=request.data.get('name'),\n email=request.data.get('email'),\n position=request.data.get('position'),\n referal_code=new_ref_code,\n trx_address=addr_trx,\n doge_address=addr_doge,\n bnb_address = addr_bnb,\n bnb_memo = memo_bnb,\n role=role_user\n )\n return Response(\"User Created Successfuly\")\n\n\n\n","sub_path":"user_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143795383","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport argparse\nimport pprint\n\nfrom dotmap import DotMap\n\nfrom dmbrl.misc.MBExp import MBExperiment\nfrom dmbrl.controllers.MPC import MPC\nfrom dmbrl.config import create_config\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nNUM_TEACHER_DEMOS = 100\n\ndef plot_returns(returns):\n plt.plot(returns)\n print(len(returns))\n plt.xlabel('Iteration')\n plt.ylabel(\"Return\")\n plt.title(\"Value Function Only\")\n plt.ylim(0, 110)\n plt.savefig('returns.png')\n plt.show()\n plt.close()\n\ndef plot_observation_trajs(observations):\n print(len(observations))\n for i in range(len(observations)):\n if i % (len(observations)//10) == 0:\n obs = observations[i]\n plt.plot(obs[:, 0], obs[:, 2], '->', label = \"Trajectory at Iteration: \" + str(i))\n\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.legend(loc='best') \n plt.savefig('observation_trajs.png')\n plt.show()\n plt.close()\n\nif __name__ == \"__main__\":\n def get_stats(data):\n mu = np.mean(data, axis=0)\n lb = mu - np.std(data, axis=0)\n ub = mu + np.std(data, axis=0)\n return mu, lb, ub\n\n import matplotlib.pyplot as plt\n import matplotlib.patches as mpatches\n from matplotlib.colors import colorConverter as cc\n import numpy as np\n \n def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None):\n # plot the shaded range of the confidence intervals\n plt.fill_between(range(mean.shape[0]), ub, lb,\n color=color_shading, alpha=.5)\n # plot the mean on top\n plt.plot(mean, color_mean)\n \n ours_1 = sio.loadmat('log/2019-07-23--17:05:37/logs.mat')['returns'][0]\n ours_1[ours_1 > 100] = 100\n # ours_1 = ours_1[212:] # Only show post curriculum\n ours_2 = sio.loadmat('log/2019-07-23--17:05:41/logs.mat')['returns'][0]\n ours_2[ours_2 > 100] = 100\n # ours_2 = ours_2[212:] # Only show post curriculum\n ours_3 = sio.loadmat('log/2019-07-23--17:05:33/logs.mat')['returns'][0]\n ours_3[ours_3 > 100] = 100\n minlen = min([len(ours_1), len(ours_2), len(ours_3)])\n # ours_3 = ours_3[212:] # Only show post curriculum\n ours = [ours_1[:minlen], ours_2[:minlen], ours_3[:minlen]]\n\n pets_1 = sio.loadmat('log/2019-07-18--19:06:37/logs.mat')['returns'][0]\n pets_1[pets_1 > 100] = 100\n # pets_1 = pets_1[212:] # Only show post curriculum\n pets_2 = sio.loadmat('log/2019-07-18--19:05:33/logs.mat')['returns'][0]\n pets_2[pets_2 > 100] = 100\n # pets_2 = pets_2[212:] # Only show post curriculum\n pets_3 = sio.loadmat('log/2019-07-18--19:06:13/logs.mat')['returns'][0]\n pets_3[pets_3 > 100] = 100\n minlen = min([len(pets_1), len(pets_2), len(pets_3)])\n # pets_3 = pets_3[212:] # Only show post curriculum\n pets = [pets_1[:minlen], pets_2[:minlen], pets_3[:minlen]]\n\n petsfd_1 = sio.loadmat('log/2019-07-21--03:38:44/logs.mat')['returns'][0]\n petsfd_1[petsfd_1 > 100] = 100\n # petsfd_1 = petsfd_1[212:] # Only show post curriculum\n petsfd_2 = sio.loadmat('log/2019-07-21--06:49:43/logs.mat')['returns'][0]\n petsfd_2[petsfd_2 > 100] = 100\n # petsfd_2 = petsfd_2[212:] # Only show post curriculum\n petsfd_3 = sio.loadmat('log/2019-07-21--06:49:39/logs.mat')['returns'][0]\n petsfd_3[petsfd_3 > 100] = 100\n minlen = min([len(petsfd_1), len(petsfd_2), len(petsfd_3)])\n # petsfd_3 = petsfd_3[212:] # Only show post curriculum\n petsfd = [petsfd_1[:minlen], petsfd_2[:minlen], petsfd_3[:minlen]]\n\n clone_1 = sio.loadmat('log/2019-07-24--16:49:2/logs.mat')['returns'][0]\n clone_1[clone_1 > 100] = 100\n # clone_1 = clone_1[212: ] # Only show post curriculum\n clone_2 = sio.loadmat('log/2019-07-24--16:49:26/logs.mat')['returns'][0]\n clone_2[clone_2 > 100] = 100\n # clone_2 = clone_2[212: ] # Only show post curriculum\n clone_3 = sio.loadmat('log/2019-07-24--16:49:18/logs.mat')['returns'][0]\n clone_3[clone_3 > 100] = 100\n minlen = min([len(clone_1), len(clone_2), len(clone_3)])\n # clone_3 = clone_3[212: ] # Only show post curriculum\n clone = [clone_1, clone_2, clone_3]\n # generate 3 sets of random means and confidence intervals to plot\n mean0, lb0, ub0 = get_stats(ours)\n mean1, lb1, ub1 = get_stats(pets)\n mean2, lb2, ub2 = get_stats(petsfd)\n mean3, lb3, ub3 = get_stats(clone)\n\n # plot the data\n fig = plt.figure(1, figsize=(7, 2.5))\n plot_mean_and_CI(mean0, ub0, lb0, color_mean='k', color_shading='k')\n plot_mean_and_CI(mean1, ub1, lb1, color_mean='b', color_shading='b')\n plot_mean_and_CI(mean2, ub2, lb2, color_mean='g--', color_shading='g')\n plot_mean_and_CI(mean3, ub3, lb3, color_mean='r--', color_shading='r')\n \n class LegendObject(object):\n def __init__(self, facecolor='red', edgecolor='white', dashed=False):\n self.facecolor = facecolor\n self.edgecolor = edgecolor\n self.dashed = dashed\n \n def legend_artist(self, legend, orig_handle, fontsize, handlebox):\n x0, y0 = handlebox.xdescent, handlebox.ydescent\n width, height = handlebox.width, handlebox.height\n patch = mpatches.Rectangle(\n # create a rectangle that is filled with color\n [x0, y0], width, height, facecolor=self.facecolor,\n # and whose edges are the faded color\n edgecolor=self.edgecolor, lw=3)\n handlebox.add_artist(patch)\n \n # if we're creating the legend for a dashed line,\n # manually add the dash in to our rectangle\n if self.dashed:\n patch1 = mpatches.Rectangle(\n [x0 + 2*width/5, y0], width/5, height, facecolor=self.edgecolor,\n transform=handlebox.get_transform())\n handlebox.add_artist(patch1)\n \n return patch\n \n bg = np.array([1, 1, 1]) # background of the legend is white\n colors = ['black', 'blue', 'green', 'red']\n # with alpha = .5, the faded color is the average of the background and color\n colors_faded = [(np.array(cc.to_rgb(color)) + bg) / 2.0 for color in colors]\n \n plt.legend([0, 1, 2, 3 ], ['MEDIUM UNCERT THRES', 'PETS', 'LOW UNCERT THRESH', \"HIGH UNCERT THRES\"],\n handler_map={\n 0: LegendObject(colors[0], colors_faded[0]),\n 1: LegendObject(colors[1], colors_faded[1]),\n 2: LegendObject(colors[2], colors_faded[2], dashed=True),\n 3: LegendObject(colors[3], colors_faded[3], dashed=True)\n }, loc='upper right')\n \n plt.title('Pusher Task: Iteration Cost vs. Time Post Curriculum')\n plt.ylabel(\"Iteration Cost\")\n plt.xlabel(\"Iteration\")\n plt.tight_layout()\n plt.grid()\n plt.savefig(\"pusher.pdf\")\n plt.show()\n # plt.show()\n","sub_path":"scripts/analyze_logs.py","file_name":"analyze_logs.py","file_ext":"py","file_size_in_byte":6947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"509067092","text":"#!/usr/bin/python\nimport math\n\n\ndef recipe_batches(recipe, ingredients):\n # recipe = amount needed\n # ingredients = amount available\n # result = amount available (ingr) // amount needed (recipe)\n # if any value in recipe > any in ingredients, return ZERO bc you can't make anything\n # if each value in recipe < each and every value in ingredients,\n # ^ perform integer division\n batches = math.inf\n# ^ set possible batches to infinity to ensure you can never exceed the num of possible batches\n# ^ we will rewrite this later with the actual num of batches we compute\n if len(recipe) > len(ingredients):\n return 0\n # this means that if what you need exceeds what you have, you can't\n # ^ make the recipe and you should just return 0. For 0 possible batches\n for i in recipe:\n if ingredients[i] < recipe[i]:\n return 0\n # ^ if the ingredients you have are less than the ingredients you need for the recipe\n # ^ return 0. This goes thru each index of the list, so every item one by one and repeats\n batch_calculation = ingredients[i] // recipe[i]\n # ^ set a new variable, using integer div // to check how many batches you can make with that specific item\n if batch_calculation < batches: # less than infinity basically\n batches = batch_calculation\n # ^ then re write batches to equal this number of batches you can make\n\n return batches # just return the number!\n\n\n########## THOUGHT PROCESS BELOW ##############\n # if recipe['flour'] > ingredients['flour']:\n # print(\"not enough ingredients available\")\n # if recipe.values() > ingredients.values():\n # print(\"not enough\")\n # print(recipe.values()[0])\n # print(ingredients.values())\n # if recipe.values()[0] > ingredients.values()[0]:\n # print(\"Not enough avail\")\n # print(\"Butter Need:\", recipe.values()[0])\n # print(\"Butter Have:\", ingredients.values()[0])\n # print([ingredients.values()] // [recipe.values()])\n # print(\"you can make this\", recipe.values())\n # if ingredients['butter'] > recipe['butter']:\n # ingredients['butter'] // recipe['butter']\n # else:\n # print(\"not enough ingredients\")\n # print(\"RECIPE NEED:\", recipe.items())\n # print(\"INGREDIENTS HAVE:\", ingredients.items())\n # print(ingredients[\"milk\"])\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test\n # your implementation with different inputs\n recipe = {'milk': 100, 'butter': 50, 'flour': 5}\n ingredients = {'milk': 132, 'butter': 48, 'flour': 51}\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(\n batches=recipe_batches(recipe, ingredients), ingredients=ingredients))\n","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125197171","text":"#************************** Web Spider of python3 ********************************\n#************************** Date 2021.8.24 ********************************\n#************************** learnning URL ********************************\n# https://mp.weixin.qq.com/s?__biz=MzIxODg1OTk1MA==&mid=2247484915&idx=1&sn=204b7b62d7411cda53623fa69a56aa91&scene=19#wechat_redirect\n# 微信公众号: Jack Cui\n\n\n\n'''\n网络爬虫的第一步就是根据 URL ,获取网页的 HTML 信息。在 Python3 中,可以使用 urllib.request 和 requests 进行网页爬取。\nurllib 库是 Python 内置的,无需我们额外安装,只要安装了 Python 就可以使用这个库。\nrequests 库是第三方库,需要我们自己安装。\nrequests 库强大好用,后续文章的实例,也都是以此为基础进行讲解。requests 库的 github 地址:\n\nhttps://github.com/requests/requests\n\n1、requests 安装\n在 cmd 中,使用如下指令安装 requests :\n\npip install requests\n\n或者:\n\neasy_install requests\n\n2、用法\n官方教程地址: http://docs.python-requests.org/zh_CN/latest/user/quickstart.html\n'''\n\n\n\"\"\"# -*- coding:UTF-8 -*-\nimport requests\n\nif __name__ == '__main__':\n target = \"http://fanyi.baidu.com/\"\n req = requests.get(url = target)\n req.encoding = 'utf-8'\n print(req.text)\"\"\"\n\n# 2、爬虫其实很简单,可以大致分为三个步骤:\n\n# A、发起请求:我们需要先明确如何发起 HTTP 请求,获取到数据。\n# B、解析数据:获取到的数据乱七八糟的,我们需要提取出我们想要的数据。\n# C、保存数据:将我们想要的数据,保存下载。\n\n# 发起请求,我们就用 requests 就行,上篇文章已经介绍过。\n\n# 解析数据工具有很多,比如xpath、Beautiful Soup、正则表达式等。本文就用一个简单的经典小工具,Beautiful Soup来解析数据。\n\n# 保存数据,就是常规的文本保存。\n\n# 3、Beautiful Soup库 的安装\n\n'''\n简单来说,Beautiful Soup 是 Python 的一个第三方库,主要帮助我们解析网页数据。\n在使用这个工具前,我们需要先安装,在 cmd 中,使用 pip 或 easy_install 安装即可。\n'''\n# pip install beautifulsoup4\n# 或者\n# easy_install beautifulsoup4\n'''安装好后,我们还需要安装 lxml,这是解析 HTML 需要用到的依赖'''\n# pip install lxml\n\n'''\nBeautiful Soup 的使用方法也很简单,可以看下我在 CSDN 的讲解或者官方教程学习,详细的使用方法:\n我的 Beautiful Soup 讲解:\n\nhttps://blog.csdn.net/c406495762/article/details/71158264\n\n官方中文教程:\n\nhttps://beautifulsoup.readthedocs.io/zh_CN/latest/\n'''\n\n\n\n\n\n \n\nimport requests\n# import time\n# from tqdm import tqdm\nfrom bs4 import BeautifulSoup\n\"\"\"\ndef get_content(target):\n req = requests.get(url = target)\n req.encoding = 'utf-8'\n html = req.text\n bf = BeautifulSoup(html, 'lxml')\n texts = bf.find('div', id='content')\n content = texts.text.strip().split('\\xa0'*4)\n return content\n\"\"\"\nif __name__ == '__main__':\n server = 'https://www.imiaobige.com/'\n book_name = '西游,开局观音姐姐要和我打扑克.txt'\n target = 'https://www.imiaobige.com/read/293121/'\n req = requests.get(url = target)\n req.encoding = 'utf-8'\n html = req.text\n chapter_bs = BeautifulSoup(html, 'lxml')\n chapters = chapter_bs.find('div', id='readerlists')\n chapters = chapters.find_all('a')\n for chapter in tqdm(chapters):\n print(chapter)\n \"\"\"\n chapter_name = chapter.string\n url = server + chapter.get('href')\n content = get_content(url)\n with open(book_name, 'a', encoding='utf-8') as f:\n f.write(chapter_name)\n f.write('\\n')\n f.write('\\n'.join(content))\n f.write('\\n')\n \"\"\"\n\n\n\n","sub_path":"web_spider/notes_2021_8_24.py","file_name":"notes_2021_8_24.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383939403","text":"import os\nimport ws.logger\nimport ws.i_o.writer as writer\nimport ws.i_o.reader as reader\nimport ws.i_o.shopify_api as shopify\nimport ws.i_o.sheet as sheet\nimport ws.i_o.notifier as notifier\nfrom datetime import date\n\nclass Command(object):\n SHEET_KEY = '1_z18UWDZjzi8Z-cI7c1wMnaJ9F6oopI_P901-2HFcLY'\n\n def __init__(self, path, args):\n self.inventory_file = os.path.join(path, 'exchange/inventory.csv')\n self.field_names = (\n ['product_id', 'name', 'ek', 'quantity', 'total']\n )\n self.title = date.today().strftime('%d.%B')\n\n def execute(self):\n inventory = reader.read_inventory(self.inventory_file)\n products = self.extract_products(inventory)\n worksheet = sheet.open(self.SHEET_KEY, self.title)\n count = sheet.write(worksheet, self.field_names, products)\n worksheet.update_cells(worksheet.range('E1:E%d' % (count + 1)), 'USER_ENTERED')\n worksheet.update_acell('D%d' % (count + 1), '=SUM(D2:D%d)' % count)\n worksheet.update_acell('E%d' % (count + 1), '=SUM(E2:E%d)' % count)\n\n notifier.notify_stockvalue(worksheet.title, sheet.url(self.SHEET_KEY, worksheet.id))\n ws.logger.info('Wrote %d lines' % (count))\n\n def add_product(self, products, id, name, cost_price, quantity):\n index = len(products) + 2\n products.append([\n id,\n name,\n cost_price,\n quantity,\n '=MULTIPLY(C%d;D%d)' % (index, index)\n ])\n\n def extract_products(self, inventory):\n products = []\n for product in shopify.items(shopify.Product, 'id,vendor,title,variants'):\n name = '{vendor} {title}'.format(\n vendor=product.vendor,\n title=product.title\n )\n metafields = shopify.metafields(product)\n cost_price = metafields.get('cost_price', '')\n if cost_price != '0':\n quantity = 0\n if '|' in cost_price:\n cost_price, cost_price_xl = cost_price.split('|', 1)\n cost_price_xl = cost_price_xl.split(':')[1]\n\n quantity_xl = 0\n for variant in product.variants:\n if variant.option1 == 'XL':\n quantity_xl += inventory.get(variant.id, 0)\n else:\n quantity += inventory.get(variant.id, 0)\n\n self.add_product(products, product.id, name + ' (XL)', cost_price_xl, quantity_xl)\n else:\n for variant in product.variants:\n quantity += inventory.get(variant.id, 0)\n\n self.add_product(products, product.id, name, cost_price, quantity)\n\n return products\n\n # def write_products(products, worksheet, field_names = ):\n # row_count = 1 + len(products)\n # column_count = len(field_names) + 1\n # worksheet.resize(row_count + 5, column_count + 1)\n #\n # data = list(field_names)\n # for product in products:\n # data += product\n #\n # cell_list = worksheet.range('A1:E%d' % row_count)\n # for index, cell in enumerate(cell_list):\n # value = data[index]\n # if value == '=':\n # row = (index / 5) + 1\n # value = '=C%d*D%d' % (row, row)\n # cell.value = value\n # worksheet.update_cells(cell_list)\n #\n # return len(products)\n","sub_path":"ws/commands/stockvalue.py","file_name":"stockvalue.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562577076","text":"import os\nimport distutils.ccompiler as cc\nimport logging\nimport tempfile\nimport hashlib\nimport tarfile\nimport shutil\n\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.install import install\nfrom setuptools import (Extension, setup)\n\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import urlopen\n\n\nDWM_VERSION = '6.2'\nDWM_SRC_ROOT_DIR= os.path.join('dwm_src')\nDWM_REMOTE_SOURCE = (\n 'https://dl.suckless.org/dwm/dwm-{version}.tar.gz'.format(\n version=DWM_VERSION\n )\n)\nDWM_MD5 = '9929845ccdec4d2cc191f16210dd7f3d'\n\n\nclass DwmExtension(Extension):\n def __init__(self, *args, **kwargs):\n super(DwmExtension, self).__init__(*args, **kwargs)\n\n def set_dwm_options(self, dwm_version):\n self.define_macros=[\n ('_DEFAULT_SOURCE',),\n ('_BSD_SOURCE',),\n ('_POSIX_C_SOURCE', 2),\n ('VERSION', '\"{version}\"'.format(version=dwm_version)),\n ('XINERAMA',),\n ]\n\n\ndef relative_path(*parts):\n return os.path.join(os.path.dirname(__file__), *parts)\n\n\ndwm = DwmExtension(\n 'dwm',\n libraries=['X11', 'Xinerama', 'fontconfig', 'Xft'],\n library_dirs=['/usr/X11R6/lib'],\n extra_compile_args=[\n '-c',\n '-fPIC',\n '-std=c99',\n '-pedantic',\n '-Wno-deprecated-declarations',\n '-Os',\n ],\n extra_link_args=[\n '-fPIC',\n ],\n include_dirs=[\n '/usr/X11R6/include',\n '/usr/include/freetype2',\n relative_path(DWM_SRC_ROOT_DIR),\n ],\n sources=[\n relative_path(DWM_SRC_ROOT_DIR, 'drw.c'),\n relative_path(DWM_SRC_ROOT_DIR, 'dwm.c'),\n relative_path(DWM_SRC_ROOT_DIR, 'util.c'),\n ],\n)\n\n\nclass BuildDwm(build_ext, object):\n user_options = build_ext.user_options + [\n ('dwm-source=', None, 'Path to DWM source')\n ]\n\n def initialize_options(self):\n self.dwm_source = None\n super(BuildDwm, self).initialize_options()\n\n def download_dwm(self):\n if not os.path.exists(relative_path(DWM_SRC_ROOT_DIR)):\n logger = logging.getLogger()\n logger.warn('Downloading {file}...'.format(file=DWM_SRC_ROOT_DIR))\n response = urlopen(DWM_REMOTE_SOURCE)\n data = response.read()\n\n os.mkdir(relative_path(DWM_SRC_ROOT_DIR))\n\n logger.warn('Validating MD5...')\n assert(hashlib.md5(data).hexdigest() == DWM_MD5)\n\n logger.warn('Extracting...')\n with tempfile.TemporaryFile() as destination_file:\n destination_file.write(data)\n destination_file.seek(0)\n with tarfile.open(\n fileobj=destination_file,\n mode='r:gz'\n ) as archive:\n archive.extractall(DWM_SRC_ROOT_DIR)\n destination_file.close()\n unpacked_dest = 'dwm-{version}'.format(version=DWM_VERSION)\n unpacked_dest = relative_path(\n DWM_SRC_ROOT_DIR,\n unpacked_dest\n )\n for file in os.listdir(unpacked_dest):\n shutil.move(\n relative_path(unpacked_dest, file),\n relative_path(DWM_SRC_ROOT_DIR),\n )\n\n def copy_default_config(self):\n dest_file_path = relative_path(DWM_SRC_ROOT_DIR, 'config.h')\n if not os.path.exists(dest_file_path):\n source_file = open(\n relative_path(\n DWM_SRC_ROOT_DIR, 'config.def.h'\n ),\n 'r'\n )\n dest_file = open(dest_file_path, 'w')\n dest_file.write(source_file.read())\n source_file.close()\n dest_file.close()\n\n def copy_dwm_source(self):\n if not os.path.exists(relative_path(DWM_SRC_ROOT_DIR)):\n shutil.copytree(self.dwm_source, DWM_SRC_ROOT_DIR)\n\n def build_extension(self, ext):\n if ext.name == 'dwm':\n self.compiler = cc.new_compiler()\n\n if self.dwm_source is None:\n self.download_dwm()\n self.copy_default_config()\n else:\n self.copy_dwm_source()\n\n del(self.dwm_source)\n return super(BuildDwm, self).build_extension(ext)\n\n def get_export_symbols(self, ext):\n return ext.export_symbols\n\n def get_ext_filename(self, ext_name):\n return ext_name + '.so'\n\n @property\n def dwm_version(self):\n return 'custom' if self.dwm_source else VERSION\n\nsetup(\n name='pydwm',\n url='https://github.com/benwah/pydwm',\n author='Benoit C. Sirois',\n author_email='benoitcsirois@gmail.com',\n version='0.1.4',\n description='A simple python wrapper around DWM.',\n long_description=(\n 'This is a very simple python wrapper around DWM. It downloads DWM, '\n 'compiles it as a shared object and exposes DWM\\'s main function as '\n 'pydwm:init_dwm. Installing this via pip will give you a pydwm '\n 'executable, which just runs dwm.'\n ),\n packages=['pydwm'],\n include_package_data=True,\n license='MIT License',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Desktop Environment :: Window Managers',\n ],\n cmdclass={'build_ext': BuildDwm},\n ext_modules=[dwm],\n entry_points={\n 'console_scripts': [\n 'pydwm = pydwm:init_dwm',\n ]\n }\n)\n","sub_path":"pypi_install_script/pydwm-0.1.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436931431","text":"\"\"\"\nCopyright 2014 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport netaddr\nimport time\n\nfrom cloudcafe.common.tools.datagen import rand_name\nfrom cloudcafe.networking.networks.common.behaviors \\\n import NetworkingBaseBehaviors, NetworkingResponse\nfrom cloudcafe.networking.networks.common.constants \\\n import NeutronResponseCodes\nfrom cloudcafe.networking.networks.common.exceptions \\\n import NetworkIDMissingException, ResourceBuildException,\\\n ResourceDeleteException, ResourceGetException, ResourceListException,\\\n ResourceUpdateException\n\n\nclass PortsBehaviors(NetworkingBaseBehaviors):\n\n def __init__(self, ports_client, ports_config):\n super(PortsBehaviors, self).__init__()\n self.config = ports_config\n self.client = ports_client\n\n def get_subnet_ids_from_fixed_ips(self, fixed_ips):\n \"\"\"\n @summary: gets the subnet ids from the port fixed IPs attribute\n @param fixed_ips: list of fixed_ips\n @type fixed_ips: list(dict)\n @return: subnet ids and errors lists from fixed IPs\n @rtype: dict\n \"\"\"\n # Errors list will contain unexpected fixed IPs if any\n results = {'subnet_ids': [], 'errors': []}\n for fixed_ip in fixed_ips:\n if 'subnet_id' not in fixed_ip or fixed_ip['subnet_id'] is None:\n results['errors'].append(fixed_ip)\n else:\n results['subnet_ids'].append(fixed_ip['subnet_id'])\n return results\n\n def format_fixed_ips(self, fixed_ips):\n \"\"\"\n @summary: formats fixed ips for assertions removing zeros on\n IPv6 addresses\n @param fixed_ips: list of fixed_ips\n @type fixed_ips: list(dict)\n @return: formated fixed_ips\n @rtype: list(dict)\n \"\"\"\n result = [dict(subnet_id=fixed_ip['subnet_id'], ip_address=str(\n netaddr.IPAddress(fixed_ip['ip_address'])))\n for fixed_ip in fixed_ips]\n return result\n\n def create_port(self, network_id, name=None, admin_state_up=None,\n mac_address=None, fixed_ips=None, device_id=None,\n device_owner=None, tenant_id=None, security_groups=None,\n resource_build_attempts=None, raise_exception=True,\n use_exact_name=False, poll_interval=None,\n timeout=None, use_over_limit_retry=None):\n \"\"\"\n @summary: Creates and verifies a Port is created as expected\n @param network_id: network port is associated with (CRUD: CR)\n @type network_id: string\n @param name: human readable name for the port, may not be unique.\n (CRUD: CRU)\n @type name: string\n @param admin_state_up: true or false (default true), the admin state\n of the port. If down, the port does not forward packets (CRUD: CRU)\n @type admin_state_up: bool\n @param mac_address: mac address to use on the port (CRUD: CR)\n @type mac_address: string\n @param fixed_ips: ip addresses for the port associating the\n port with the subnets where the IPs come from (CRUD: CRU)\n @type fixed_ips: list(dict)\n @param device_id: id of device using this port (CRUD: CRUD)\n @type device_id: string\n @param device_owner: entity using this port (ex. dhcp agent,CRUD: CRUD)\n @type device_owner: string\n @param tenant_id: owner of the port (CRUD: CR)\n @type tenant_id: string\n @param security_groups: ids of any security groups associated with the\n port (CRUD: CRUD)\n @type security_groups: list(dict)\n @param resource_build_attempts: number of API retries\n @type resource_build_attempts: int\n @param raise_exception: flag to raise an exception if the Port was not\n created or to return None\n @type raise_exception: bool\n @param use_exact_name: flag if the exact name given should be used\n @type use_exact_name: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @param timeout: port update timeout for over limit retries\n @type timeout: int\n @param use_over_limit_retry: flag to enable/disable the port update\n over limits retries\n @type use_over_limit_retry: bool\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n if not network_id:\n raise NetworkIDMissingException\n\n if name is None:\n name = rand_name(self.config.starts_with_name)\n elif not use_exact_name:\n name = rand_name(name)\n\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_build_attempts = (resource_build_attempts or\n self.config.api_retries)\n use_over_limit_retry = (use_over_limit_retry or\n self.config.use_over_limit_retry)\n timeout = timeout or self.config.resource_create_timeout\n\n result = NetworkingResponse()\n err_msg = 'Port Create failure'\n for attempt in range(resource_build_attempts):\n self._log.debug('Attempt {0} of {1} building port {2}'.format(\n attempt + 1, resource_build_attempts, name))\n\n resp = self.client.create_port(\n network_id=network_id, name=name,\n admin_state_up=admin_state_up, mac_address=mac_address,\n fixed_ips=fixed_ips, device_id=device_id,\n device_owner=device_owner, tenant_id=tenant_id,\n security_groups=security_groups)\n\n if use_over_limit_retry:\n endtime = time.time() + int(timeout)\n retry_msg = ('OverLimit retry with a {0}s timeout creating a '\n 'port on network {1}').format(timeout, network_id)\n self._log.info(retry_msg)\n while (resp.status_code ==\n NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and\n time.time() < endtime):\n resp = self.client.create_port(\n network_id=network_id, name=name,\n admin_state_up=admin_state_up, mac_address=mac_address,\n fixed_ips=fixed_ips, device_id=device_id,\n device_owner=device_owner, tenant_id=tenant_id,\n security_groups=security_groups)\n time.sleep(poll_interval)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.CREATE_PORT, label=name,\n message=err_msg, network_id=network_id)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the create was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to create {0} port after {1} attempts: '\n '{2}').format(name, resource_build_attempts, result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceBuildException(err_msg)\n return result\n\n def update_port(self, port_id, name=None, admin_state_up=None,\n fixed_ips=None, device_id=None, device_owner=None,\n security_groups=None, resource_update_attempts=None,\n raise_exception=False, poll_interval=None,\n timeout=None, use_over_limit_retry=None):\n \"\"\"\n @summary: Updates and verifies a specified Port\n @param port_id: The UUID for the port\n @type port_id: string\n @param name: human readable name for the port, may not be unique\n (CRUD: CRU)\n @type name: string\n @param admin_state_up: true or false (default true), the admin state\n of the port. If down, the port does not forward packets (CRUD: CRU)\n @type admin_state_up: bool\n @param fixed_ips: ip addresses for the port associating the port with\n the subnets where the IPs come from (CRUD: CRU)\n @type fixed_ips: list(dict)\n @param device_id: id of device using this port (CRUD: CRUD)\n @type device_id: string\n @param string device_owner: entity using this port (ex. dhcp agent,\n CRUD: CRUD)\n @type device_owner: string\n @param security_groups: ids of any security groups associated with the\n port (CRUD: CRUD)\n @type security_groups: list(dict)\n @param resource_update_attempts: number of API retries\n @type resource_update_attempts: int\n @param raise_exception: flag to raise an exception if the\n Port was not updated or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @param timeout: port update timeout for over limit retries\n @type timeout: int\n @param use_over_limit_retry: flag to enable/disable the port update\n over limits retries\n @type use_over_limit_retry: bool\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_update_attempts = (resource_update_attempts or\n self.config.api_retries)\n use_over_limit_retry = (use_over_limit_retry or\n self.config.use_over_limit_retry)\n timeout = timeout or self.config.resource_update_timeout\n\n result = NetworkingResponse()\n err_msg = 'Port Update failure'\n for attempt in range(resource_update_attempts):\n self._log.debug('Attempt {0} of {1} updating port {2}'.format(\n attempt + 1, resource_update_attempts, port_id))\n\n resp = self.client.update_port(\n port_id=port_id, name=name, admin_state_up=admin_state_up,\n fixed_ips=fixed_ips, device_id=device_id,\n device_owner=device_owner, security_groups=security_groups)\n\n if use_over_limit_retry:\n endtime = time.time() + int(timeout)\n retry_msg = ('OverLimit retry with a {0}s timeout updating '\n 'port {1}').format(timeout, port_id)\n self._log.info(retry_msg)\n while (resp.status_code ==\n NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and\n time.time() < endtime):\n resp = self.client.update_port(\n port_id=port_id, name=name,\n admin_state_up=admin_state_up,\n fixed_ips=fixed_ips, device_id=device_id,\n device_owner=device_owner,\n security_groups=security_groups)\n time.sleep(poll_interval)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.UPDATE_PORT,\n label=port_id, message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the update was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to update {0} port after {1} attempts: '\n '{2}').format(port_id, resource_update_attempts,\n result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceUpdateException(err_msg)\n return result\n\n def get_port(self, port_id, resource_get_attempts=None,\n raise_exception=False, poll_interval=None,\n timeout=None, use_over_limit_retry=None):\n \"\"\"\n @summary: Shows and verifies a specified port\n @param port_id: The UUID for the port\n @type port_id: string\n @param resource_get_attempts: number of API retries\n @type resource_get_attempts: int\n @param raise_exception: flag to raise an exception if the get\n Port was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @param timeout: port get timeout for over limit retries\n @type timeout: int\n @param use_over_limit_retry: flag to enable/disable the port update\n over limits retries\n @type use_over_limit_retry: bool\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_get_attempts = (resource_get_attempts or\n self.config.api_retries)\n poll_interval = poll_interval or self.config.api_poll_interval\n use_over_limit_retry = (use_over_limit_retry or\n self.config.use_over_limit_retry)\n timeout = timeout or self.config.resource_get_timeout\n\n result = NetworkingResponse()\n err_msg = 'Port Get failure'\n for attempt in range(resource_get_attempts):\n self._log.debug('Attempt {0} of {1} getting network {2}'.format(\n attempt + 1, resource_get_attempts, port_id))\n\n resp = self.client.get_port(port_id=port_id)\n\n if use_over_limit_retry:\n endtime = time.time() + int(timeout)\n retry_msg = ('OverLimit retry with a {0}s timeout getting '\n 'port {1}').format(timeout, port_id)\n self._log.info(retry_msg)\n while (resp.status_code ==\n NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and\n time.time() < endtime):\n resp = self.client.get_port(port_id=port_id)\n time.sleep(poll_interval)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.GET_PORT,\n label=port_id, message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the get was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to GET {0} port after {1} attempts: '\n '{2}').format(port_id, resource_get_attempts, result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceGetException(err_msg)\n return result\n\n def list_ports(self, port_id=None, network_id=None, name=None, status=None,\n admin_state_up=None, device_id=None, tenant_id=None,\n device_owner=None, mac_address=None, limit=None,\n marker=None, page_reverse=None, resource_list_attempts=None,\n raise_exception=False, poll_interval=None, timeout=None,\n use_over_limit_retry=None):\n \"\"\"\n @summary: Lists ports and verifies the response is the expected\n @param port_id: The UUID for the port to filter by\n @type port_id: string\n @param network_id: network ID to filter by\n @type network_id: string\n @param name: port name to filter by\n @type name: string\n @param status: port status to filter by\n @type status: string\n @param admin_state_up: Admin state of the port to filter by\n @type admin_state_up: bool\n @param device_id: id of device to filter by\n @type device_id: string\n @param tenant_id: owner of the port to filter by\n @type tenant_id: string\n @param device_owner: device owner to filter by\n @type device_owner: string\n @param mac_address: mac address to filter by\n @type mac_address: string\n @param limit: page size\n @type limit: int\n @param marker: Id of the last item of the previous page\n @type marker: string\n @param page_reverse: direction of the page\n @type page_reverse: bool\n @param resource_list_attempts: number of API retries\n @type resource_list_attempts: int\n @param raise_exception: flag to raise an exception if the list\n Port was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @param timeout: port get timeout for over limit retries\n @type timeout: int\n @param use_over_limit_retry: flag to enable/disable the port update\n over limits retries\n @type use_over_limit_retry: bool\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_list_attempts = (resource_list_attempts or\n self.config.api_retries)\n use_over_limit_retry = (use_over_limit_retry or\n self.config.use_over_limit_retry)\n timeout = timeout or self.config.resource_get_timeout\n\n result = NetworkingResponse()\n err_msg = 'Port List failure'\n for attempt in range(resource_list_attempts):\n self._log.debug('Attempt {0} of {1} with port list'.format(\n attempt + 1, resource_list_attempts))\n\n resp = self.client.list_ports(\n port_id=port_id, network_id=network_id, name=name,\n status=status, admin_state_up=admin_state_up,\n device_id=device_id, tenant_id=tenant_id,\n device_owner=device_owner, mac_address=mac_address,\n limit=limit, marker=marker, page_reverse=page_reverse)\n\n if use_over_limit_retry:\n endtime = time.time() + int(timeout)\n retry_msg = ('OverLimit retry with a {0}s timeout listing '\n 'ports').format(timeout, port_id)\n self._log.info(retry_msg)\n while (resp.status_code ==\n NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and\n time.time() < endtime):\n resp = self.client.list_ports(\n port_id=port_id, network_id=network_id, name=name,\n status=status, admin_state_up=admin_state_up,\n device_id=device_id, tenant_id=tenant_id,\n device_owner=device_owner, mac_address=mac_address,\n limit=limit, marker=marker, page_reverse=page_reverse)\n time.sleep(poll_interval)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.LIST_PORTS,\n label='', message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the list was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to LIST ports after {0} attempts: '\n '{1}').format(resource_list_attempts, result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceListException(err_msg)\n return result\n\n def delete_port(self, port_id, resource_delete_attempts=None,\n raise_exception=False, poll_interval=None,\n timeout=None, use_over_limit_retry=None):\n \"\"\"\n @summary: Deletes and verifies a specified port is deleted\n @param string port_id: The UUID for the port\n @type port_id: string\n @param resource_delete_attempts: number of API retries\n @type resource_delete_attempts: int\n @param raise_exception: flag to raise an exception if the deleted\n Port was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @param timeout: port delete timeout for over limit retries\n @type timeout: int\n @param use_over_limit_retry: flag to enable/disable the port delete\n over limits retries\n @type use_over_limit_retry: bool\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_delete_attempts = (resource_delete_attempts or\n self.config.api_retries)\n use_over_limit_retry = (use_over_limit_retry or\n self.config.use_over_limit_retry)\n timeout = timeout or self.config.resource_delete_timeout\n\n result = NetworkingResponse()\n for attempt in range(resource_delete_attempts):\n self._log.debug('Attempt {0} of {1} deleting port {2}'.format(\n attempt + 1, resource_delete_attempts, port_id))\n\n resp = self.client.delete_port(port_id=port_id)\n\n if use_over_limit_retry:\n endtime = time.time() + int(timeout)\n retry_msg = ('OverLimit retry with a {0}s timeout deleting '\n 'port {1}').format(timeout, port_id)\n self._log.info(retry_msg)\n while (resp.status_code ==\n NeutronResponseCodes.REQUEST_ENTITY_TOO_LARGE and\n time.time() < endtime):\n resp = self.client.delete_port(port_id=port_id)\n time.sleep(poll_interval)\n\n result.response = resp\n\n # Delete response is without entity so resp_check can not be used\n if (resp.ok and\n resp.status_code == NeutronResponseCodes.DELETE_PORT):\n return result\n\n err_msg = ('{port} Port Delete failure, expected status '\n 'code: {expected_status}. Response: {status} {reason} '\n '{content}').format(\n port=port_id,\n expected_status=NeutronResponseCodes.DELETE_PORT,\n status=resp.status_code, reason=resp.reason,\n content=resp.content)\n self._log.error(err_msg)\n result.failures.append(err_msg)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to DELETE {0} port after {1} attempts: '\n '{2}').format(port_id, resource_delete_attempts,\n result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceDeleteException(err_msg)\n return result\n\n def clean_port(self, port_id, timeout=None, poll_interval=None):\n \"\"\"\n @summary: deletes a port within a time out\n @param string port_id: The UUID for the port\n @type port_id: string\n @param timeout: seconds to wait for the port to be deleted\n @type timeout: int\n @param poll_interval: sleep time interval between API delete/get calls\n @type poll_interval: int\n @return: None if delete was successful or the undeleted port_id\n @rtype: None or string\n \"\"\"\n timeout = timeout or self.config.resource_delete_timeout\n poll_interval = poll_interval or self.config.api_poll_interval\n\n endtime = time.time() + int(timeout)\n log_msg = 'Deleting {0} port within a {1}s timeout '.format(\n port_id, timeout)\n self._log.info(log_msg)\n resp = None\n while time.time() < endtime:\n try:\n self.client.delete_port(port_id=port_id)\n resp = self.client.get_port(port_id=port_id)\n except Exception as err:\n err_msg = ('Encountered an exception deleting a port with'\n 'the clean_network method. Exception: {0}').format(err)\n self._log.error(err_msg)\n\n if (resp is not None and\n resp.status_code == NeutronResponseCodes.NOT_FOUND):\n return None\n time.sleep(poll_interval)\n\n err_msg = 'Unable to delete {0} port within a {1}s timeout'.format(\n port_id, timeout)\n self._log.error(err_msg)\n return port_id\n\n def clean_ports(self, ports_list, timeout=None, poll_interval=None):\n \"\"\"\n @summary: deletes each port from a list calling clean_port\n @param ports_list: list of ports UUIDs\n @type ports_list: list(str)\n @param timeout: seconds to wait for the port to be deleted\n @type timeout: int\n @param poll_interval: sleep time interval between API delete/get calls\n @type poll_interval: int\n @return: list of undeleted ports UUIDs\n @rtype: list(str)\n \"\"\"\n log_msg = 'Deleting ports: {0}'.format(ports_list)\n self._log.info(log_msg)\n undeleted_ports = []\n for port in ports_list:\n result = self.clean_port(port_id=port, timeout=timeout,\n poll_interval=poll_interval)\n if result:\n undeleted_ports.append(result)\n if undeleted_ports:\n err_msg = 'Unable to delete ports: {0}'.format(\n undeleted_ports)\n self._log.error(err_msg)\n return undeleted_ports\n","sub_path":"cloudcafe/networking/networks/ports_api/behaviors.py","file_name":"behaviors.py","file_ext":"py","file_size_in_byte":26632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53297233","text":"import modules.ask_y_n_statement as ask_y_n_statement\r\nimport pandas as pd\r\nfrom sql.add_update_sql import review_df, review_input, update_multiple, review_data, delete_rows\r\nfrom modules.pccm_names import names_nact as names\r\nfrom additional_tables.chemo_tables import drug_table_enter, tox_table\r\nfrom datetime import datetime\r\n\r\n\r\n\r\ndef nact_test(file_number, user_name):\r\n col_drug = names(\"NACT_Drug_Table\")\r\n drug_table = pd.DataFrame(columns=col_drug)\r\n col_tox = names('NACT_Tox_table')\r\n toxicity = pd.DataFrame(columns =col_tox)\r\n check = False\r\n while not check:\r\n nact = ask_y_n_statement.ask_y_n_na(\"Has neo adjuvant therapy been done for the patient?\")\r\n if nact == 'Yes':\r\n place_nact = ask_y_n_statement.ask_y_n_na(\"Has neo adjuvant therapy been done at PCCM?\", \"At PCCM\", \"Outside\",\r\n \"Not Certain, requires follow-up\")\r\n details_nact = ask_y_n_statement.ask_y_n(\"Are neo adjuvant therapy details available?\", \"Details Available\",\r\n \"Follow-up required\")\r\n nact = \"NACT given\"\r\n if details_nact == \"Follow-up required\":\r\n plan_nact, date_start_nact, patient_wt, cyc_number, drug_cyc, drug_doses, drug_units, drug_freq,tox_type, \\\r\n tox_grade, tox_treat, tox_response, tox_cycle, change_tox,nact_response_by, nact_response, nact_size, nact_size_unit, \\\r\n nact_size_date, trast_nact, trast_regime, trast_courses,date_complete, reason_incomplete, \\\r\n hormone_therapy, therapy_type, therapy_duration, therapy_side = (details_nact,)*28\r\n elif details_nact == \"Details Available\":\r\n plan_nact = input(\"What is the plan of NACT (for eg., 4 cycles AC followed by 12 cycles Paclitaxel):\")\r\n date_start_nact = ask_y_n_statement.check_date(\"Date of starting neo-adjuvant therapy: \")\r\n patient_wt = input(\"Weight of patient at start of therapy (in kgs): \")\r\n check_wt = ask_y_n_statement.ask_y_n(\"Is weight at any other time point mentioned in report \"\r\n \"(with date, if given)?\")\r\n while check_wt:\r\n other_wt = input(\"Time point at which weight mentioned: \")\r\n other_wt = other_wt + \" \"+ input(\"Weight of patient at \"+other_wt+\": \")\r\n patient_wt = patient_wt + \"; \"+other_wt\r\n check_wt = ask_y_n_statement.ask_y_n(\"Is weight at any other time point mentioned in report \"\r\n \"(with date, if given)?\")\r\n drug_admin = drug_table_enter(file_number, drug_table)\r\n data_drug = ['Number_cycle','Drug', 'Drug_dose', 'Dose_unit', 'Cycle_frequency_per_week']\r\n data_drug_list = []\r\n for index in data_drug:\r\n data_drug = \"; \".join(list(drug_admin.loc[:,index]))\r\n data_drug_list.append(data_drug)\r\n cyc_number, drug_cyc, drug_doses, drug_units, drug_freq = data_drug_list\r\n check_drug_tox = False\r\n while not check_drug_tox:\r\n toxicity = tox_table(file_number, drug_cyc, toxicity)\r\n check_drug_tox = review_df(toxicity)\r\n columns = col_tox\r\n tox_details = []\r\n for column in columns:\r\n tox_detail = toxicity.loc[:,column].drop_duplicates()\r\n tox_details.append(list(tox_detail))\r\n tox_details = ask_y_n_statement.join_lists(tox_details, \"; \")\r\n file_number_tox, drug_tox, tox_type, tox_grade, tox_treat, tox_response, tox_cycle, change_tox = tox_details\r\n nact_response_by = ask_y_n_statement.ask_option(\"Response to NACT measured by\",['Mammography', 'SonoMammography'])\r\n nact_response = ask_y_n_statement.ask_option(\"Response of tumour\",\r\n [\"Partial\", \"Complete\", \"No Effect\", \"Other\"])\r\n nact_size = input(\"Tumour size (without unit, e.g., 2 x 4 x 5) after treatment: \")\r\n nact_size_unit = ask_y_n_statement.ask_option(\"Tumour size unit\", ['mm', 'cm'])\r\n nact_size_date=ask_y_n_statement.check_date(\"Date tumour size checked: \")\r\n trast_nact = ask_y_n_statement.ask_y_n(\"Trastuzumab used?\")\r\n if trast_nact:\r\n trast_regime = ask_y_n_statement.ask_option(\"Trastuzumab use was\", [\"Sequential\", \"Concurrent\"])\r\n trast_nact = \"Trastuzumab used\"\r\n trast_courses = input(\"Number of courses of trastuzumab/herceptin taken: \")\r\n else:\r\n trast_nact, trast_regime, trast_courses, therapy_side = (\"Trastuzumab not used\", )*4\r\n date_complete = ask_y_n_statement.check_date(\"Date of completion of NACT: \")\r\n complete_nact = ask_y_n_statement.ask_y_n(\"Was NACT completed as per schedule? \")\r\n if complete_nact:\r\n reason_incomplete = \"NACT completed as per schedule\"\r\n else:\r\n reason_incomplete = ask_y_n_statement.ask_option(\"Reason for discontinuation\", [\"Toxicity\",\r\n \"Reluctance of patient\", \"Progression on chemotherapy\", \"Advised by treating doctor\",\r\n \"Death due to toxicity\", \"Death due to progressive disease\", \"Preferred treatment at another centre\",\r\n \"Death due to unrelated cause\", \"Patient was unable to afford treatment\"])\r\n reason_incomplete = \"NACT incomplete: \"+reason_incomplete\r\n hormone_therapy = ask_y_n_statement.ask_y_n_na(\"Was hormone therapy given?\")\r\n if hormone_therapy == 'Yes':\r\n hormone_therapy = \"Hormone therapy given\"\r\n therapy_type = ask_y_n_statement.ask_option(\"Hormone therapy type\", [\"Sequential\", \"Concurrent\"])\r\n therapy_duration = input(\"What was the duration of therapy? \")\r\n therapy_side = ask_y_n_statement.ask_y_n_na(\"Were any side effects observed ?\")\r\n if therapy_side == 'Yes':\r\n therapy_side = input(\"Please give details of side effects observed: \")\r\n elif hormone_therapy == 'No':\r\n hormone_therapy = \"No hormone therapy given\"\r\n therapy_type, therapy_duration, therapy_side = (hormone_therapy,) * 3\r\n else:\r\n therapy_type, therapy_duration, therapy_side = (hormone_therapy, )*3\r\n else:\r\n plan_nact, date_start_nact, cyc_number, drug_cyc, drug_doses, drug_units, drug_freq,tox_type, tox_grade, \\\r\n tox_treat, tox_response, tox_cycle, change_tox,nact_response_by, nact_response, nact_size, nact_size_unit, \\\r\n nact_size_date, trast_nact, trast_regime, trast_courses, hormone_therapy, therapy_type, \\\r\n therapy_duration, therapy_side, date_complete, reason_incomplete, patient_wt = (details_nact,)*27\r\n elif nact == 'No':\r\n place_nact, plan_nact, date_start_nact, cyc_number, drug_cyc, drug_doses, drug_units, drug_freq,tox_type, tox_grade, \\\r\n tox_treat, tox_response, tox_cycle, change_tox, nact_response_by, nact_response, nact_size, nact_size_unit, nact_size_date, \\\r\n trast_nact, trast_regime, trast_courses, hormone_therapy, therapy_type, therapy_duration, therapy_side,\\\r\n date_complete, reason_incomplete, details_nact, nact, patient_wt = (\"NACT not given\",)*31\r\n else:\r\n place_nact, plan_nact, date_start_nact, cyc_number, drug_cyc, drug_doses, drug_units, drug_freq, tox_type, tox_grade, \\\r\n tox_treat, tox_response, tox_cycle, change_tox, nact_response_by, nact_response, nact_size, nact_size_unit, nact_size_date, \\\r\n trast_nact, trast_regime, trast_courses, hormone_therapy, therapy_type, therapy_duration,\\\r\n therapy_side, date_complete, reason_incomplete, details_nact, patient_wt = (nact,)*30\r\n last_update = datetime.now().strftime(\"%Y-%b-%d %H:%M\")\r\n data_list = [nact, place_nact, details_nact, plan_nact, date_start_nact, patient_wt, drug_cyc, cyc_number,\r\n drug_freq, drug_doses, drug_units, tox_type, tox_grade, tox_treat, tox_response, tox_cycle, change_tox,\r\n nact_response_by, nact_response, nact_size, nact_size_unit, nact_size_date, reason_incomplete, date_complete,\r\n trast_nact, trast_regime, trast_courses, hormone_therapy, therapy_type, therapy_duration,\r\n therapy_side, user_name, last_update]\r\n col_list = names(\"Neo_Adjuvant_Therapy\")\r\n check = review_input(file_number, col_list, data_list)\r\n return data_list, drug_table, toxicity\r\n\r\ndef clip_information(file_number):\r\n check = False\r\n while not check:\r\n clip = ask_y_n_statement.ask_y_n(\"Was Clip inserted for surgery?\")\r\n if clip:\r\n clip_number = input(\"Number of clips inserted: \")\r\n clip_date = ask_y_n_statement.check_date(\"Date of clip insertion: \")\r\n clip_cycle = input(\"Clip inserted after cycle? \")\r\n else:\r\n clip_date, clip_number, clip_cycle = (\"NA\", )*3\r\n data_list = clip_number, clip_date, clip_cycle\r\n col_list = names(\"clip_information\")\r\n check = review_input(file_number, col_list, data_list)\r\n return data_list\r\n\r\ndef add_data(conn, cursor, file_number, user_name):\r\n table = \"Neo_Adjuvant_Therapy\"\r\n data = nact_test(file_number, user_name)\r\n data_sql, drug_table, tox_response = data\r\n update_multiple(conn, cursor, table, names(table), file_number, data_sql)\r\n drug_table.to_sql(\"NACT_Drug_Table\", conn, index=False, if_exists=\"append\")\r\n tox_response.to_sql(\"NACT_Tox_table\", conn, index=False, if_exists=\"append\")\r\n enter = ask_y_n_statement.ask_y_n(\"Input Clip Information\")\r\n if enter:\r\n data = clip_information(file_number)\r\n col_list = names(\"clip_information\")\r\n update_multiple(conn, cursor, table, col_list, file_number, data)\r\n\r\n\r\ndef edit_data(conn, cursor, file_number, user_name):\r\n table = \"Neo_Adjuvant_Therapy\"\r\n enter = review_data(conn, cursor, table, file_number, names(table))\r\n if enter:\r\n delete_rows(cursor, 'NACT_Drug_Table', \"File_number\", file_number)\r\n delete_rows(cursor, 'NACT_Tox_table', \"File_number\", file_number)\r\n\r\n data = nact_test(file_number, user_name)\r\n data_sql, drug_table, tox_response = data\r\n\r\n update_multiple(conn, cursor, table, names(table), file_number, data_sql)\r\n drug_table.to_sql(\"NACT_Drug_Table\", conn, index=False, if_exists=\"append\")\r\n tox_response.to_sql(\"NACT_Tox_table\", conn, index=False, if_exists=\"append\")\r\n\r\n print(\"Clip Information\")\r\n module = \"clip_information\"\r\n col_list = names(module)\r\n enter = review_data(conn, cursor, table, file_number, col_list)\r\n if enter:\r\n data = clip_information(file_number)\r\n update_multiple(conn, cursor, table, col_list, file_number, data)","sub_path":"reports/nact.py","file_name":"nact.py","file_ext":"py","file_size_in_byte":11389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"512458764","text":"from PyQt4 import QtGui\nfrom PyQt4 import QtCore\n\nfrom visual import *\n\nfrom astronomical import *\nfrom objectselector import ObjectSelector\n\n\nclass AddObjectWindow(QtGui.QWidget):\n def __init__(self, world):\n super(AddObjectWindow, self).__init__()\n self.world = world\n self.reference = None # reference object for new object\n self.selector = None # variable for the selector pop-up window\n self.initUI()\n self.show()\n \n def initUI(self):\n self.setWindowTitle('Add/delete object')\n\n # variables\n self.color = QtGui.QColor(255,255,255)\n\n \n self.colorWidget = ColorWidget(self)\n\n # buttons\n btn_create = QtGui.QPushButton('Create', self)\n btn_color = QtGui.QPushButton('Color', self)\n btn_reference = QtGui.QPushButton('Reference', self)\n btn_delete = QtGui.QPushButton('Delete', self)\n\n\n # create status bar\n self.statusbar = QtGui.QStatusBar(self)\n self.statusbar.showMessage('Ready')\n\n # labels\n name_label = QtGui.QLabel('Name: ')\n mass_label = QtGui.QLabel('Mass (kg): ')\n radius_label = QtGui.QLabel('Radius (km): ')\n position_label = QtGui.QLabel('Position (km): ')\n velocity_label = QtGui.QLabel('Velocity (km/s): ')\n header_label = QtGui.QLabel('Create object')\n self.reference_label = QtGui.QLabel('Default')\n\n # line edits\n self.radius_value = QtGui.QLineEdit(self)\n self.name_value = QtGui.QLineEdit(self)\n self.mass_value = QtGui.QLineEdit(self)\n self.position_x = QtGui.QLineEdit(self)\n self.position_y = QtGui.QLineEdit(self)\n self.position_z = QtGui.QLineEdit(self)\n self.velocity_x = QtGui.QLineEdit(self)\n self.velocity_y = QtGui.QLineEdit(self)\n self.velocity_z = QtGui.QLineEdit(self)\n\n # set default parameters\n self.position_x.insert('0')\n self.position_y.insert('0')\n self.position_z.insert('0')\n self.velocity_x.insert('0')\n self.velocity_y.insert('0')\n self.velocity_z.insert('0')\n self.mass_value.insert('0')\n self.radius_value.insert('1') # default radius is one, since by default we want to be able to see the sphere\n \n # set grid layout\n grid = QtGui.QGridLayout()\n grid.addWidget(header_label,0,0)\n grid.addWidget(name_label, 1, 0)\n grid.addWidget(self.name_value, 1, 1)\n grid.addWidget(btn_color, 1, 2)\n grid.addWidget(self.colorWidget, 1, 3)\n grid.addWidget(mass_label, 2, 0)\n grid.addWidget(self.mass_value, 2, 1)\n grid.addWidget(btn_reference, 2,2)\n grid.addWidget(self.reference_label, 2,3)\n grid.addWidget(radius_label, 3, 0)\n grid.addWidget(self.radius_value, 3, 1)\n grid.addWidget(position_label, 4, 0)\n grid.addWidget(self.position_x, 4, 1)\n grid.addWidget(self.position_y, 4, 2)\n grid.addWidget(self.position_z, 4, 3)\n grid.addWidget(velocity_label, 5, 0)\n grid.addWidget(self.velocity_x, 5, 1)\n grid.addWidget(self.velocity_y, 5, 2)\n grid.addWidget(self.velocity_z, 5, 3)\n \n grid.addWidget(btn_create,8,0)\n grid.addWidget(btn_delete,8,1)\n grid.addWidget(self.statusbar,9,0,1,-1)\n self.setLayout(grid)\n\n # connect elements\n btn_create.clicked.connect(self.makeNew)\n btn_color.clicked.connect(self.getColor)\n btn_reference.clicked.connect(self.openObjectSelector)\n btn_delete.clicked.connect(self.deleteObject)\n\n # this function creates the new object and is connected to the create-button\n def makeNew(self):\n name = ''\n position = None\n velocity = None\n radius = 0\n mass = 0\n try:\n name = str(self.name_value.text())\n mass = float(self.mass_value.text())\n radius = float(self.radius_value.text())\n if self.reference is None:\n position = vector(float(self.position_x.text()), float(self.position_y.text()), float(self.position_z.text()))\n velocity = vector(float(self.velocity_x.text()), float(self.velocity_y.text()), float(self.velocity_z.text()))\n else:\n position = vector(float(self.position_x.text()), float(self.position_y.text()), float(self.position_z.text())) + self.reference.position\n velocity = vector(float(self.velocity_x.text()), float(self.velocity_y.text()), float(self.velocity_z.text())) + self.reference.velocity\n except:\n self.statusbar.showMessage('Invalid arguments!')\n return\n \n if name=='':\n self.statusbar.showMessage('The new object must have a proper name!')\n return\n\n color = None\n if self.color.isValid():\n color = (self.color.redF(), self.color.greenF(), self.color.blueF()) # create a color that is compatible with the visual library\n else:\n color = (1,1,1) # else use white color. This is highly unlikely to happen though.\n \n self.world.addObject(AstronomicalObject(position, velocity, mass,\n sphere(pos=position, radius=radius, color=color, make_trail=False),\n radius, self.world.show_vectors, self.world.make_trail,\n self.world.velocity_scale, self.world.acceleration_scale*self.world.acceleration_gain),\n name)\n self.statusbar.showMessage(\"Object '{}' created successfully!\".format(name))\n\n def deleteObject(self):\n name = str(self.name_value.text())\n rval = self.world.deleteObject(name)\n if rval is True:\n self.statusbar.showMessage(\"Object '{}' deleted.\".format(name))\n else:\n self.statusbar.showMessage(\"No object '{}' found to be deleted.\".format(name))\n\n def getColor(self):\n self.color = self.colorWidget.getColor()\n\n\n def openObjectSelector(self):\n self.selector = ObjectSelector(self)\n \n def setReference(self, reference):\n if reference != 'Default':\n self.reference = self.world.objects[reference]\n self.reference_label.setText(reference)\n else:\n self.reference = None\n self.reference_label.setText('Default')\n\n def closeEvent(self, event):\n if self.selector is not None:\n self.selector.close()\n self.close()\n\nclass ColorWidget(QtGui.QWidget):\n def __init__(self, parent):\n super(ColorWidget, self).__init__(parent)\n self.color = QtGui.QColor(255,255,255)\n self.painter = QtGui.QPainter()\n\n def paintEvent(self, event):\n self.painter.begin(self)\n self.painter.setBrush(self.color)\n self.painter.drawRect(5,5,25,15)\n self.painter.end()\n event.accept()\n \n def getColor(self):\n color = QtGui.QColorDialog.getColor()\n if color.isValid():\n self.color = color\n self.update()\n return self.color\n\n\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"simulaattori/addobjectwindow.py","file_name":"addobjectwindow.py","file_ext":"py","file_size_in_byte":7285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381063999","text":"# Copyright (c) 2020, Open Source Robotics Foundation, Inc.\n# All rights reserved.\n#\n# Software License Agreement (BSD License 2.0)\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.conditions import LaunchConfigurationEquals\nfrom launch.conditions import LaunchConfigurationNotEquals\nfrom launch.substitutions import LaunchConfiguration\nfrom launch_ros.actions import ComposableNodeContainer\nfrom launch_ros.actions import LoadComposableNodes\nfrom launch_ros.descriptions import ComposableNode\n\n\ndef generate_launch_description():\n composable_nodes = [\n ComposableNode(\n package='image_proc',\n plugin='image_proc::DebayerNode',\n name='debayer_node',\n ),\n ComposableNode(\n package='image_proc',\n plugin='image_proc::RectifyNode',\n name='rectify_mono_node',\n # Remap subscribers and publishers\n remappings=[\n ('image', 'image_mono'),\n ('camera_info', 'camera_info'),\n ('image_rect', 'image_rect')\n ],\n ),\n ComposableNode(\n package='image_proc',\n plugin='image_proc::RectifyNode',\n name='rectify_color_node',\n # Remap subscribers and publishers\n remappings=[\n ('image', 'image_color'),\n ('image_rect', 'image_rect_color')\n ],\n )\n ]\n\n arg_container = DeclareLaunchArgument(\n name='container', default_value='',\n description=(\n 'Name of an existing node container to load launched nodes into. '\n 'If unset, a new container will be created.'\n )\n )\n\n # If an existing container is not provided, start a container and load nodes into it\n image_processing_container = ComposableNodeContainer(\n condition=LaunchConfigurationEquals('container', ''),\n name='image_proc_container',\n namespace='',\n package='rclcpp_components',\n executable='component_container',\n composable_node_descriptions=composable_nodes,\n output='screen'\n )\n\n # If an existing container name is provided, load composable nodes into it\n # This will block until a container with the provided name is available and nodes are loaded\n load_composable_nodes = LoadComposableNodes(\n condition=LaunchConfigurationNotEquals('container', ''),\n composable_node_descriptions=composable_nodes,\n target_container=LaunchConfiguration('container'),\n )\n\n return LaunchDescription([\n arg_container,\n image_processing_container,\n load_composable_nodes,\n ])\n","sub_path":"image_proc/launch/image_proc.launch.py","file_name":"image_proc.launch.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"197543483","text":"import os\nimport tempfile\n\nimport pyblish.api\nimport clique\nimport pype.api\nimport pype.lib\n\n\nclass ExtractReviewSP(pyblish.api.InstancePlugin):\n \"\"\"Extracting Review mov file for Ftrack\n\n Compulsory attribute of representation is tags list with \"review\",\n otherwise the representation is ignored.\n\n All new represetnations are created and encoded by ffmpeg following\n presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension\n filter values use preset's attributes `ext_filter`\n \"\"\"\n\n label = \"Extract Review SP\"\n order = pyblish.api.ExtractorOrder + 0.02\n families = [\"review\"]\n hosts = [\"standalonepublisher\"]\n\n def process(self, instance):\n # adding plugin attributes from presets\n presets = instance.context.data[\"presets\"]\n try:\n publish_presets = presets[\"plugins\"][\"standalonepublisher\"][\"publish\"]\n plugin_attrs = publish_presets[self.__class__.__name__]\n except KeyError:\n raise KeyError(\"Preset for plugin \\\"{}\\\" are not set\".format(\n self.__class__.__name__\n ))\n\n output_profiles = plugin_attrs.get(\"outputs\", {})\n\n fps = instance.data.get(\"fps\")\n start_frame = instance.data.get(\"frameStart\")\n\n self.log.debug(\"Families In: `{}`\".format(instance.data[\"families\"]))\n\n # get specific profile if was defined\n specific_profiles = instance.data.get(\"repreProfiles\", [])\n\n new_repres = []\n # filter out mov and img sequences\n for repre in instance.data[\"representations\"]:\n tags = repre.get(\"tags\", [])\n if \"review\" not in tags:\n continue\n\n staging_dir = repre[\"stagingDir\"]\n for name in specific_profiles:\n profile = output_profiles.get(name)\n if not profile:\n self.log.warning(\n \"Profile \\\"{}\\\" was not found in presets\".format(name)\n )\n continue\n\n self.log.debug(\"Processing profile: {}\".format(name))\n\n ext = profile.get(\"ext\", None)\n if not ext:\n ext = \"mov\"\n self.log.debug((\n \"`ext` attribute not in output profile \\\"{}\\\".\"\n \" Setting to default ext: `mov`\"\n ).format(name))\n\n if isinstance(repre[\"files\"], list):\n collections, remainder = clique.assemble(repre[\"files\"])\n\n full_input_path = os.path.join(\n staging_dir,\n collections[0].format(\"{head}{padding}{tail}\")\n )\n filename = collections[0].format('{head}')\n if filename.endswith(\".\"):\n filename = filename[:-1]\n else:\n full_input_path = os.path.join(staging_dir, repre[\"files\"])\n filename = repre[\"files\"].split(\".\")[0]\n\n # prepare output file\n repr_file = filename + \"_{0}.{1}\".format(name, ext)\n out_stagigng_dir = tempfile.mkdtemp(prefix=\"extract_review_\")\n full_output_path = os.path.join(out_stagigng_dir, repr_file)\n\n self.log.info(\"input {}\".format(full_input_path))\n self.log.info(\"output {}\".format(full_output_path))\n\n repre_new = repre.copy()\n\n new_tags = [x for x in tags if x != \"delete\"]\n p_tags = profile.get(\"tags\", [])\n self.log.info(\"p_tags: `{}`\".format(p_tags))\n\n for _tag in p_tags:\n if _tag not in new_tags:\n new_tags.append(_tag)\n\n self.log.info(\"new_tags: `{}`\".format(new_tags))\n\n input_args = []\n\n # overrides output file\n input_args.append(\"-y\")\n\n # preset's input data\n input_args.extend(profile.get(\"input\", []))\n\n # necessary input data\n # adds start arg only if image sequence\n if isinstance(repre[\"files\"], list):\n input_args.extend([\n \"-start_number {}\".format(start_frame),\n \"-framerate {}\".format(fps)\n ])\n\n input_args.append(\"-i {}\".format(full_input_path))\n\n output_args = []\n # preset's output data\n output_args.extend(profile.get(\"output\", []))\n\n if isinstance(repre[\"files\"], list):\n # set length of video by len of inserted files\n video_len = len(repre[\"files\"])\n else:\n video_len = repre[\"frameEnd\"] - repre[\"frameStart\"] + 1\n output_args.append(\n \"-frames {}\".format(video_len)\n )\n\n # letter_box\n lb_string = (\n \"-filter:v \"\n \"drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,\"\n \"drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:\"\n \"round((ih-(iw*(1/{0})))/2):t=fill:c=black\"\n )\n letter_box = profile.get(\"letter_box\", None)\n if letter_box:\n output_args.append(lb_string.format(letter_box))\n\n # output filename\n output_args.append(full_output_path)\n\n ffmpeg_path = pype.lib.get_ffmpeg_tool_path(\"ffmpeg\")\n mov_args = [\n ffmpeg_path,\n \" \".join(input_args),\n \" \".join(output_args)\n ]\n subprcs_cmd = \" \".join(mov_args)\n\n # run subprocess\n self.log.debug(\"Executing: {}\".format(subprcs_cmd))\n output = pype.api.subprocess(subprcs_cmd)\n self.log.debug(\"Output: {}\".format(output))\n\n # create representation data\n repre_new.update({\n \"name\": name,\n \"ext\": ext,\n \"files\": repr_file,\n \"stagingDir\": out_stagigng_dir,\n \"tags\": new_tags,\n \"outputName\": name,\n \"frameStartFtrack\": 1,\n \"frameEndFtrack\": video_len\n })\n # cleanup thumbnail from new repre\n if repre_new.get(\"thumbnail\"):\n repre_new.pop(\"thumbnail\")\n if \"thumbnail\" in repre_new[\"tags\"]:\n repre_new[\"tags\"].remove(\"thumbnail\")\n\n # adding representation\n self.log.debug(\"Adding: {}\".format(repre_new))\n # cleanup repre from preview\n if \"preview\" in repre:\n repre.pop(\"preview\")\n if \"preview\" in repre[\"tags\"]:\n repre[\"tags\"].remove(\"preview\")\n new_repres.append(repre_new)\n\n for repre in instance.data[\"representations\"]:\n if \"delete\" in repre.get(\"tags\", []):\n instance.data[\"representations\"].remove(repre)\n\n for repre in new_repres:\n self.log.debug(\"Adding repre: \\\"{}\\\"\".format(\n repre\n ))\n instance.data[\"representations\"].append(repre)\n","sub_path":"pype/plugins/standalonepublisher/publish/extract_review.py","file_name":"extract_review.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553360897","text":"import numpy as np\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.grid_search import GridSearchCV\r\nfrom sklearn.cross_validation import train_test_split\r\ntry:\r\n import cPickle as pickle\r\nexcept ImportError:\r\n import pickle\r\n\r\n\r\n# 载入所有特征,每个特征是一个dict\r\nfeatures_name = [\\\r\n 'face_color_hist','face_color_rect','face_gray_texture','face_power','face_lbp',\\\r\n 'tongue_color_hist','tongue_color_rect','tongue_gray_texture','tongue_lbp','tongue_power',\\\r\n 'face_block_color_hist','face_block_color_rect','face_block_gray_texture','face_block_lbp','face_block_power']\r\nfeatures_set = {}\r\npath = r'B:\\DeskTop\\SRP中医体质辨识\\体质辨识数据备份\\origin'\r\nfor feature_name in features_name:\r\n f = open(path+'\\\\'+feature_name,'rb')\r\n data = pickle.load(f)\r\n features_set[feature_name]=data\r\n# 载入所有患者ID 和 标签\r\nf = open(path + r\"\\labels\",'rb')\r\ndata = pickle.load(f)\r\n# 改变label表示方式\r\npatient_list=[]\r\nfor d in data:\r\n p = []\r\n p.append(d[0])\r\n p.append(d[1])\r\n patient_list.append(p)\r\npatient_list = np.array(patient_list)\r\nnp.random.shuffle(patient_list)\r\n# 获得特征子集和label子集\r\nfeatures = []\r\nlabels = []\r\nfor patient in patient_list:\r\n ID = patient[0]\r\n feature = []\r\n flag = True\r\n for i in features_name:\r\n if ID in features_set[i].keys():\r\n new_feature = features_set[i][ID]\r\n if (type(new_feature) != list):\r\n new_feature = new_feature.tolist()\r\n feature += new_feature\r\n else:\r\n flag = False\r\n break\r\n if flag:\r\n labels.append(patient[1])\r\n features.append(feature)\r\nfeatures = np.array(features)\r\nlabels = np.array(labels,dtype=np.int8)\r\nprint(len(features))\r\n# 特征归一化\r\nfeatures = (features - np.mean(features)) / (np.std(features, axis=0) + 1)\r\n# 切分数据集,交叉验证多次计算\r\ntrain_features,test_features,train_labels,test_labels = train_test_split(features,labels)\r\nmeasures = np.zeros(1)\r\n# 设定svm参数\r\n# cw = {}\r\n# max_labels_num = np.max(np.bincount(train_labels))\r\n# for type in range(1, 10):\r\n# cw[type] = max_labels_num / np.bincount(train_labels)[type]\r\n# grid_search\r\nturned_parameter = {'alpha': 10.0 ** -np.arange(1, 7),'solver': ['lbfgs', 'sgd', 'adam'],\\\r\n 'hidden_layer_sizes':[(100),(100,100),(100,100,100),(100,100,100,100)]}\r\nfrom sklearn.neural_network import MLPClassifier\r\nclf = GridSearchCV(MLPClassifier(),turned_parameter,cv=5)\r\nclf.fit(train_features,train_labels)\r\nprint(clf.best_params_,clf.best_score_)\r\n# svm验证\r\npredict_label = clf.predict(test_features)\r\nprint(np.bincount(predict_label))\r\n\r\n# for i in [1000]:\r\n# svm = SVC(class_weight=\"balanced\",C=i)\r\n# svm.fit(train_features,train_labels)\r\n# predict_label = svm.predict(test_features)\r\n# # 统计在该测试集上的准确率\r\n# acc = np.sum(test_labels == predict_label) / len(predict_label)\r\n# print(i,acc)\r\n# print(np.bincount(predict_label))\r\n# print(np.bincount(test_labels))\r\n","sub_path":"grid/single_label_svm_grid.py","file_name":"single_label_svm_grid.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4997631","text":"import turtle\nimport random\nimport time\nscreen=turtle.Screen()\ntrtl=turtle.Turtle()\nscreen.setup(420,320)\n#screen.bgpic('bg.gif')\ntrtl.shape('turtle')\ntrtl.color('darkgoldenrod','black')\ns=10\ntrtl.penup()\ntrtl.setpos(30,30)\nfor i in range(28):\n s=s+2\n trtl.stamp()\n trtl.forward(s)\n trtl.right(25)\n time.sleep(0.25) #activated with a break of a 1/4th of a second","sub_path":"stampexp.py","file_name":"stampexp.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148754876","text":"from tensorflow import keras\nfrom tensorflow.python.keras.constraints import max_norm\nimport tensorflow as tf\nimport numpy as np\n\noptimizer_map = {\n 'sgd': tf.train.GradientDescentOptimizer,\n 'adam': tf.train.AdamOptimizer,\n}\n\nactivation_fn_map = {\n 'tanh': tf.nn.tanh,\n 'relu': tf.nn.relu,\n 'sigmoid': tf.nn.sigmoid,\n 'softsign': tf.nn.softsign,\n 'softplus': tf.nn.softplus,\n 'leaky_relu': tf.nn.leaky_relu,\n 'hard_sigmoid': 'hard_sigmoid',\n 'selu': tf.nn.selu,\n 'relu6': tf.nn.relu6,\n 'elu': tf.nn.elu,\n 'linear': 'linear'\n}\n\n\ndef load(architecture, activation_fn, optimizer, learning_rate, input_size, output_size, max_norm_weights=False,\n initial_bias=0.0):\n input_layer = keras.layers.Input((input_size,))\n clayer = input_layer\n for n in architecture:\n clayer = keras.layers.Dense(n,\n activation=activation_fn_map[activation_fn],\n kernel_initializer=keras.initializers.TruncatedNormal(mean=0.0,\n stddev=1 / np.sqrt(float(n)),\n seed=None),\n bias_initializer=keras.initializers.Constant(value=float(initial_bias)),\n kernel_constraint=(max_norm(max_value=float(max_norm_weights))\n if max_norm_weights else None)\n )(clayer)\n output_layer = keras.layers.Dense(output_size, activation='softmax')(clayer)\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n optimizer = optimizer_map[optimizer](learning_rate=learning_rate)\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n return model\n","sub_path":"deep_bottleneck/models/feedforward.py","file_name":"feedforward.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155591166","text":"# Design steps\n# Main loop\n# Article Chain\n# Limit rate of requests\n# Print output\n\n# Request code\n# Make requests\n# Find links\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib\nimport time\n\narticle_chain = [\"https://en.wikipedia.org/wiki/Dickie_Jeeps\"]\ntarget_url = \"https://en.wikipedia.org/wiki/Greek_language\"\ndef continue_crawl(search_history, target_url, max_step=25):\n if search_history[-1] == target_url:\n print(\"Found the target article\")\n return False\n elif len(search_history) > max_step:\n print(\"The search has exceeded more than 25 urls; aborting...\")\n return False\n elif search_history[-1] in search_history[-1] in search_history[:-1]: \n print(\"Stuck in loop, already encountered the same url; aborting...\")\n return False\n else:\n return True\n\ndef find_first_link(url):\n # download html of last article in article_chain\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n content_div = soup.find(id='mw-content-text').find(class_=\"mw-parser-output\")\n first_relative_link = None\n for element in content_div.find_all(\"p\", recursive=False):\n if element.find(\"a\", recursive=False):\n first_relative_link = element.find(\"a\", recursive=False).get('href')\n break\n if not first_relative_link:\n return\n\n domain = url.split(\"//\")[0] + \"//\" + url.split(\"//\")[-1].split(\"/\")[0]\n first_link = domain + str(first_relative_link)\n return first_link\n\n\nwhile continue_crawl(article_chain, target_url):\n \t# find the first link in that html\n fst_url = find_first_link(article_chain[-1])\n # add the first link to article_chain\n article_chain.append(fst_url)\n print(fst_url)\n # delay for about two seconds\n time.sleep(2)\n","sub_path":"website_crawler/WikipediaCrawler.py","file_name":"WikipediaCrawler.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"278442632","text":"import os\nimport csv\nfrom progress.bar import Bar as ProgressBar\n\n\ndef dicts_to_csv(file_name, l):\n fields = set().union(*(d.keys() for d in l))\n with open(file_name, 'w') as f:\n writer = csv.DictWriter(f, fieldnames=fields)\n writer.writeheader()\n msg = 'Saving %s' % os.path.basename(file_name)\n progress_bar = ProgressBar(msg, max=len(l))\n for el in l:\n row = {}\n for field in fields:\n try:\n row[field] = el.get(field, None)\n except KeyError as ex:\n raise ex\n if row.keys():\n writer.writerow(row)\n progress_bar.next()\n f.close()\n print()\n return f\n","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496033910","text":"\n# coding: utf-8\n\n\n# In[3]:\n\n\nfrom sandbox.rf import *\nfrom utils.dataset import StationInvariantKddDataset\nimport pandas as pd\nimport datetime\nimport sys\n\n\n# In[28]:\n\n\ndef getDataFrame(city, date, random_state=0, use_pred=True):\n dataset = torch.load(f'data/dataset_{city}.pt')\n print(len(dataset))\n dataset.T = 7\n stations = dataset.stations\n\n x = np.vstack(\n [np.concatenate(\n [dataset.get_data(idx, date).aq.flatten(),\n np.mean(dataset.get_data(idx, date).meo_pred, axis=(2, 3)).flatten()]\n )]\n for idx in range(len(stations)))\n print(x.shape)\n x = np.nan_to_num(x)\n\n model = getRandomForestModel(date, city, random_state, use_pred)\n y_hat = model.predict(x)\n y_hat = y_hat.reshape(y_hat.shape[0], 24 * 2, -1)\n y_hat = y_hat * dataset.aq_std + dataset.aq_mean\n\n numStations = len(stations)\n numFeatures = 3 if city == 'bj' else 2\n metrics = ['PM2.5', 'PM10']\n if city == 'bj':\n metrics.append('O3')\n\n y_hat = y_hat.reshape((numStations * 48, numFeatures))\n y_hat[y_hat < 0] = 0\n\n df = pd.DataFrame(y_hat, columns=metrics)\n df.insert(0, 'test_id', [station + '#' + str(i)\n for station in stations for i in range(48)])\n\n return df\n\n\n# In[31]:\n\n\ndate_to_forc = pd.Timestamp(\n datetime.datetime.utcnow().date()) + pd.Timedelta(1, unit='d')\nm_d_str = date_to_forc.strftime('%m%d')\n\n\n# In[34]:\n\n\ndef genCSV(random_state=0, date=pd.Timestamp(datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:00:00\"))):\n bj_df = getDataFrame('bj', date, random_state)\n ld_df = getDataFrame('ld', date, random_state)\n\n df = pd.concat([bj_df, ld_df])\n submission = df.reindex(columns=['test_id', 'PM2.5', 'PM10', 'O3'])\n submission = submission.reset_index(drop=True)\n\n filename = 'results/submit_' + date.strftime('%m%d%H%M') + '_' + str(random_state) + '.csv'\n submission.to_csv(filename, index=None)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n genCSV(int(sys.argv[1]))\n else:\n genCSV(0)\n","sub_path":"sandbox/genCSV.py","file_name":"genCSV.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455125396","text":"#Code:\n\nscore=[]\nfor i in range(int(input())):\n a = sum(list(map(int , input().split())))\n score.append(a)\n\nthomas= score[0]\nscore.sort(reverse=True)\nrank= score.index(thomas)\nprint(rank+1)\n","sub_path":"1017A - The Rank.py","file_name":"1017A - The Rank.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71762852","text":"def solve(cad1, cad2):\n cad1 = set(cad1)\n cad2 = list(cad2)\n while len(cad1) > 0:\n if cad1.pop() in cad2:\n return 'YES'\n return 'NO'\n\nif __name__ == '__main__':\n # cant = int(input())\n # for i in range(cant):\n # print(solve(input(), input()))\n assert 'YES' == solve('hello','world'), '1 oops'\n assert 'NO' == solve('hi', 'world'), '2 oops'\n","sub_path":"HackerRank/Algorithms/Strings/twoStrings/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281351069","text":"from flask import Flask\n\n'''\nCreate an app by initializing components.\n'''\n\n\ndef _initialize_blueprints(application):\n #  Register Flask blueprints\n\n from app.views.books import books\n application.register_blueprint(books, url_prefix='/core/api/v1')\n\n from app.views.libraries import libraries\n application.register_blueprint(libraries, url_prefix='/core/api/v1')\n\n from app.views.users import users\n application.register_blueprint(users, url_prefix='/core/api/v1')\n\n from app.views.misc import misc\n application.register_blueprint(misc, url_prefix='/core/api/v1')\n\n\ndef _initialize_database(application):\n # Set up the database\n\n from app.models.database import db\n\n application.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///../../data/alexandria.db\"\n application.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n application.config[\"SQLALCHEMY_ECHO\"] = True # Debug purpose\n\n db.init_app(application)\n\n\ndef _initialize_application(application):\n application.config[\"JSON_SORT_KEYS\"] = False # Disable jsonify ordering\n\n\ndef _initialize_errorhandlers(application):\n # Initialize error handlers\n\n from app.errors import errors\n application.register_blueprint(errors)\n\n\ndef create_app():\n application = Flask(__name__)\n\n # application.config['JSON_AS_ASCII'] = False\n\n _initialize_blueprints(application)\n\n _initialize_database(application)\n\n _initialize_errorhandlers(application)\n\n _initialize_application(application)\n\n # Do it!\n return application\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583495519","text":"import argparse\nimport csv\nimport json\nimport os\n\nfrom loguru import logger\nfrom tqdm import tqdm\n\n\ndef read_sessions_from_training_file(training_file: str, K: int = None):\n user_sessions = []\n current_session_id = None\n current_session = []\n prods = set()\n urls = set()\n with open(training_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for idx, row in enumerate(reader):\n\n # print(row['product_action'], row)\n # if idx > 20:\n # break\n\n # if a max number of items is specified, just return at the K with what you have\n if K and idx >= K:\n break\n # just append \"detail\" events in the order we see them\n # row will contain: session_id_hash, product_action, product_sku_hash\n _session_id_hash = row[\"session_id_hash\"]\n # when a new session begins, store the old one and start again\n if (\n current_session_id\n and current_session\n and _session_id_hash != current_session_id\n and len(current_session) > 1\n ):\n user_sessions.append(current_session)\n # reset session\n current_session = []\n # check for the right type and append\n if row[\"event_type\"] in [\"detail\", \"add\", \"purchase\"]:\n prods.add(row[\"product_sku_hash\"])\n current_session.append(row[\"product_sku_hash\"])\n\n elif row[\"product_sku_hash\"] == \"\":\n\n current_session.append(row[\"hashed_url\"])\n urls.add(row[\"hashed_url\"])\n\n # update the current session id\n current_session_id = _session_id_hash\n\n # print how many sessions we have...\n print(\"# total sessions: {}\".format(len(user_sessions)))\n # print first one to check\n print(\"First session is: {}\".format(user_sessions[0]))\n # assert user_sessions[0][0] == 'd5157f8bc52965390fa21ad5842a8502bc3eb8b0930f3f8eafbc503f4012f69c'\n # assert user_sessions[0][-1] == '63b567f4cef976d1411aecc4240984e46ebe8e08e327f2be786beb7ee83216d0'\n\n return user_sessions, prods, urls\n\n\ndef get_statistic(sessions, skus, urls, max_shift=10, max_k=40):\n\n predictor = dict()\n\n last_url = None\n shift = 1\n\n for session in sessions:\n\n for el in session:\n # print(el in skus, last_url, )\n if el in skus and last_url is not None and shift < max_shift:\n if last_url in predictor.keys():\n\n if el in predictor[last_url].keys():\n predictor[last_url][el] += 1 / shift\n else:\n predictor[last_url][el] = 1 / shift\n else:\n predictor[last_url] = dict()\n predictor[last_url][el] = 1 / shift\n\n shift += 1\n\n elif el in urls:\n last_url = el\n shift = 1\n\n final_predictor = {}\n\n for el in predictor.keys():\n final_predictor[el] = [\n x[0] for x in sorted(predictor[el].items(), key=lambda x: x[1], reverse=True)\n ][:max_k]\n\n logger.info(f\"num of keys in predictor is {len(final_predictor)}\")\n return final_predictor\n\n\ndef get_sessions_from_test(PATH):\n\n with open(PATH) as json_file:\n # read the test cases from the provided file\n test_queries = json.load(json_file)\n # loop over the records and predict the next event\n\n # all_skus = list(model.index_to_key)\n test_s = []\n\n skus = set()\n for idx, t in tqdm(enumerate(test_queries)):\n\n items = [\n x[\"product_sku_hash\"] if x[\"product_sku_hash\"] is not None else x[\"hashed_url\"]\n for x in t[\"query\"]\n ]\n if len(items) >= 2:\n test_s.append(items)\n\n for x in t[\"query\"]:\n if x[\"product_sku_hash\"] != \"\" and x[\"product_sku_hash\"] is not None:\n skus.add(x[\"product_sku_hash\"])\n\n return test_s, skus\n\n\ndef get_all_sessions_and_skus(train_session_file, test_file1, test_file2, top_k_sessions):\n sessions = []\n skus = set()\n\n new_sessions, new_skus = get_sessions_from_test(test_file1)\n\n sessions.extend(new_sessions)\n for sku in new_skus:\n skus.add(sku)\n logger.info(f\"num of sessions after the first test file is {len(sessions)}\")\n logger.info(f\"num of skus after the first test file is {len(skus)}\")\n\n if test_file2 is not None:\n new_sessions, new_skus = get_sessions_from_test(test_file2)\n\n sessions.extend(new_sessions)\n for sku in new_skus:\n skus.add(sku)\n logger.info(f\"num of sessions after the second test file is {len(sessions)}\")\n logger.info(f\"num of skus after the first test file is {len(skus)}\")\n else:\n logger.info(\"the second test file is not provided\")\n\n new_sesisons, new_skus, urls = read_sessions_from_training_file(\n train_session_file, K=top_k_sessions\n )\n\n sessions.extend(new_sesisons)\n for sku in new_skus:\n skus.add(sku)\n\n return sessions, skus, urls\n\n\ndef main(args):\n\n sessions, skus, urls = get_all_sessions_and_skus(\n train_session_file=args.train_session_file,\n test_file1=args.test_file1,\n test_file2=args.test_file2,\n top_k_sessions=args.top_k_sessions,\n )\n\n logger.info(f\"num of sessions after the second test file is {len(sessions)}\")\n logger.info(f\"num of skus after the first test file is {len(skus)}\")\n logger.info(f\"num of urls after the second test file is {len(urls)}\")\n\n final_predictor = get_statistic(sessions=sessions, skus=skus, urls=urls)\n\n # dump to file\n with open(args.predictor_path, \"w\") as outfile:\n json.dump(final_predictor, outfile, indent=2)\n\n\ndef createParser():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--train_session_file\", type=str, default=\"../../sigir_data/train/browsing_train.csv\",\n )\n parser.add_argument(\"--test_file1\", type=str, default=\"../../sigir_data/rec_test_phase_1.json\")\n parser.add_argument(\n \"--test_file2\", type=str, default=\"../../sigir_data/local_test_phase_2.json\"\n )\n parser.add_argument(\"--predictor_path\", type=str, default=\"search_predictor.json\")\n parser.add_argument(\"--top_k_sessions\", type=int)\n\n return parser\n\n\nif __name__ == \"__main__\":\n\n parser = createParser()\n args, _ = parser.parse_known_args()\n\n if args.top_k_sessions == -1:\n args.top_k_sessions = None\n main(args)\n","sub_path":"submission/create_url_sku_dict.py","file_name":"create_url_sku_dict.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343876793","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 12 19:30:05 2019\n\n@author: thulfiqar\n\"\"\"\n\nimport numpy as np\nimport cv2\n\n\nimg = cv2.imread('image.png', cv2.IMREAD_GRAYSCALE)\n\ncv2.imshow('original image',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncv2.imwrite(\"gray-scale image.png\", img)\n\n# https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#bitwise-not\nimgN = cv2.bitwise_not(img)\ncv2.imshow('negative image',imgN)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncv2.imwrite(\"negative image.png\", imgN)\n\n# https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/\ndef adjust_gamma(image, gamma=1.0):\n\t# build a lookup table mapping the pixel values [0, 255] to\n\t# their adjusted gamma values\n\tinvGamma = 1.0 / gamma\n\ttable = np.array([((i / 255.0) ** invGamma) * 255\n\t\tfor i in np.arange(0, 256)]).astype(\"uint8\")\n \n\t# apply gamma correction using the lookup table\n\treturn cv2.LUT(image, table)\n\ngamma = 1.5\nadjusted = adjust_gamma(img, gamma)\n\ncv2.imshow('negative image',adjusted)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\ncv2.imwrite(\"gamma adjusted image.png\", adjusted)\n\n\n\n\n","sub_path":"exp 3/gray-level transformation.py","file_name":"gray-level transformation.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"425152231","text":"from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QVBoxLayout\n\nfrom browser.models.bars import action_bar\n\n\nclass TabContent(QWidget):\n\n def __init__(self, browser, web_page, browser_tabs):\n super().__init__()\n\n self.layout = QVBoxLayout(self)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.web_page = web_page\n\n bar = action_bar.ActionBar(browser, web_page)\n bar.setMaximumHeight(50)\n\n self.layout.addWidget(bar)\n self.layout.addWidget(browser)\n self.tabs = browser_tabs\n\n web_page.titleChanged.connect(self._update_title)\n web_page.iconChanged.connect(self._update_icon)\n\n def _update_title(self):\n self.tabs.setTabText(self.tabs.currentIndex(), self.web_page.title())\n\n def _update_icon(self):\n self.tabs.setTabIcon(self.tabs.currentIndex(), self.web_page.icon())\n","sub_path":"browser/models/tabs/tab_content.py","file_name":"tab_content.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"509080600","text":"#!/usr/bin/env python\n\n\"\"\"\n@package ion.agents.platform.test.helper\n@file ion/agents/platform/test/helper.py\n@author Carlos Rueda\n@brief Definitions and functionality to facilitate common validations in tests.\n The expected structures and error responses are currently based on the\n CI-OMS interface. This scheme may of course need to be adjusted according\n to needed refactorings when CG or other platform networks are incorporated.\n\"\"\"\n\n__author__ = 'Carlos Rueda'\n__license__ = 'Apache 2.0'\n\n\nfrom ion.agents.platform.rsn.simulator.logger import Logger\nlog = Logger.get_logger()\n\nfrom ion.agents.platform.responses import NormalResponse, InvalidResponse\n\n\nclass HelperTestMixin:\n \"\"\"\n A mixin to facilitate common validations in tests.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Sets some various IDs from network.yml, which is used by the OMS\n simulator, and ad hoc values for testing.\n\n The PLAT_NETWORK environment variable can be used to faciliate\n testing against a smaller newtork. Possibe values include:\n PLAT_NETWORK=small - small network but with children\n PLAT_NETWORK=single - network with a single platform (no children)\n \"\"\"\n cls.PLATFORM_ID = 'Node1A'\n cls.SUBPLATFORM_IDS = ['MJ01A', 'Node1B']\n cls.ATTR_NAMES = ['input_voltage', 'Node1A_attr_2']\n cls.WRITABLE_ATTR_NAMES = ['Node1A_attr_2']\n cls.VALID_ATTR_VALUE = \"7\" # within the range\n cls.INVALID_ATTR_VALUE = \"9876\" # out of range\n\n cls.PORT_ID = 'Node1A_port_1'\n cls.INSTRUMENT_ID = 'Node1A_port_1_instrument_1'\n cls.INSTRUMENT_ATTR_NAME = 'maxCurrentDraw'\n cls.VALID_INSTRUMENT_ATTR_VALUE = 12345\n\n cls.INSTRUMENT_ATTRIBUTES_AND_VALUES = {\n 'maxCurrentDraw' : 12345,\n 'initCurrent' : 23456,\n 'dataThroughput' : 34567,\n 'instrumentType' : \"FOO_INSTRUMENT_TYPE\"\n }\n\n # PLAT_NETWORK: This env variable helps use a smaller network locally.\n import os\n plat_network_size = os.getenv('PLAT_NETWORK', None)\n if \"small\" == plat_network_size:\n #\n # small network but with children.\n #\n cls.PLATFORM_ID = 'Node1D'\n print(\"PLAT_NETWORK=small -> using base platform: %r\" % cls.PLATFORM_ID)\n cls.SUBPLATFORM_IDS = ['MJ01C']\n cls.ATTR_NAMES = ['input_voltage', 'Input Bus Current']\n cls.WRITABLE_ATTR_NAMES = ['Input Bus Current']\n\n cls.PORT_ID = 'Node1D_port_1'\n cls.INSTRUMENT_ID = 'Node1D_port_1_instrument_1'\n elif \"single\" == plat_network_size:\n #\n # network with just a single platform (no children).\n #\n cls.PLATFORM_ID = 'LJ01D'\n print(\"PLAT_NETWORK=single -> using base platform: %r\" % cls.PLATFORM_ID)\n cls.SUBPLATFORM_IDS = []\n cls.ATTR_NAMES = ['input_voltage', 'Input Bus Current']\n cls.WRITABLE_ATTR_NAMES = ['Input Bus Current']\n\n cls.PORT_ID = 'LJ01D_port_1'\n cls.INSTRUMENT_ID = 'LJ01D_port_1_instrument_1'\n else:\n print(\"PLAT_NETWORK undefined -> using base platform: %r\" % cls.PLATFORM_ID)\n\n def _verify_valid_platform_id(self, platform_id, dic):\n \"\"\"\n verifies the platform_id is the only entry in the dict with a\n valid value. Returns dic[platform_id].\n \"\"\"\n self.assertTrue(platform_id in dic)\n self.assertEquals(1, len(dic))\n val = dic[platform_id]\n self.assertNotEquals(InvalidResponse.PLATFORM_ID, val)\n return val\n\n def _verify_invalid_platform_id(self, platform_id, dic):\n \"\"\"\n verifies the platform_id is the only entry in the dict with a\n value equal to InvalidResponse.PLATFORM_ID.\n \"\"\"\n self.assertTrue(platform_id in dic)\n self.assertEquals(1, len(dic))\n val = dic[platform_id]\n self.assertEquals(InvalidResponse.PLATFORM_ID, val)\n\n def _verify_valid_attribute_id(self, attr_id, dic):\n \"\"\"\n verifies the attr_id is an entry in the dict with a\n valid value. Returns dic[attr_id].\n \"\"\"\n self.assertTrue(attr_id in dic, \"%s in %s\" %(attr_id, dic))\n val = dic[attr_id]\n self.assertIsInstance(val, (tuple, list))\n self.assertNotEquals(InvalidResponse.ATTRIBUTE_NAME, val)\n return val\n\n def _verify_invalid_attribute_id(self, attr_id, dic):\n \"\"\"\n verifies the attr_id is an entry in the dict with a\n value equal to InvalidResponse.ATTRIBUTE_NAME\n \"\"\"\n self.assertTrue(attr_id in dic)\n val = dic[attr_id]\n self.assertEquals(InvalidResponse.ATTRIBUTE_NAME, val,\n \"attr_id=%r, val=%r\" % (attr_id, val))\n\n def _verify_attribute_value_out_of_range(self, attr_id, dic):\n \"\"\"\n verifies the attr_id is an entry in the dict with a\n value equal to InvalidResponse.ATTRIBUTE_VALUE_OUT_OF_RANGE\n \"\"\"\n self.assertTrue(attr_id in dic)\n val = dic[attr_id]\n self.assertEquals(InvalidResponse.ATTRIBUTE_VALUE_OUT_OF_RANGE, val,\n \"attr_id=%r, val=%r\" % (attr_id, val))\n\n def _verify_not_writable_attribute_id(self, attr_id, dic):\n \"\"\"\n verifies the attr_id is an entry in the dict with a\n value equal to InvalidResponse.ATTRIBUTE_NOT_WRITABLE.\n \"\"\"\n self.assertTrue(attr_id in dic)\n val = dic[attr_id]\n self.assertEquals(InvalidResponse.ATTRIBUTE_NOT_WRITABLE, val,\n \"attr_id=%r, val=%r\" % (attr_id, val))\n\n def _verify_valid_port_id(self, port_id, dic):\n \"\"\"\n verifies the port_id is an entry in the dict with a\n valid value. Returns dic[port_id].\n \"\"\"\n self.assertTrue(port_id in dic)\n val = dic[port_id]\n self.assertNotEquals(InvalidResponse.PORT_ID, val)\n return val\n\n def _verify_invalid_port_id(self, port_id, dic):\n \"\"\"\n verifies the port_id is an entry in the dict with a\n value equal to InvalidResponse.PORT_ID.\n \"\"\"\n self.assertTrue(port_id in dic)\n val = dic[port_id]\n self.assertEquals(InvalidResponse.PORT_ID, val)\n\n def _verify_valid_instrument_id(self, instrument_id, dic):\n \"\"\"\n verifies the instrument_id is an entry in the dict with a valid value,\n either a dict or InvalidResponse.INSTRUMENT_ALREADY_CONNECTED.\n Returns dic[instrument_id].\n \"\"\"\n self.assertTrue(instrument_id in dic)\n val = dic[instrument_id]\n self.assertTrue(\n isinstance(val, dict) or\n val == InvalidResponse.INSTRUMENT_ALREADY_CONNECTED,\n \"%r: val should be a dict but is: %s\" % (\n instrument_id, str(val)))\n return val\n\n def _verify_invalid_instrument_id(self, instrument_id, dic):\n \"\"\"\n verifies the instrument_id is an entry in the dict with a\n value that is not a dict.\n \"\"\"\n self.assertTrue(instrument_id in dic)\n val = dic[instrument_id]\n self.assertFalse(isinstance(val, dict))\n\n def _verify_instrument_disconnected(self, instrument_id, result):\n \"\"\"\n verifies the result is equal to NormalResponse.INSTRUMENT_DISCONNECTED.\n \"\"\"\n expected = NormalResponse.INSTRUMENT_DISCONNECTED\n self.assertEquals(expected, result, \"instrument_id=%r: expecting %r but \"\n \"got result=%r\" % (instrument_id, expected, result))\n","sub_path":"ion/agents/platform/test/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"81014185","text":"\n\nfrom xai.brain.wordbase.nouns._curator import _CURATOR\n\n#calss header\nclass _CURATORS(_CURATOR, ):\n\tdef __init__(self,): \n\t\t_CURATOR.__init__(self)\n\t\tself.name = \"CURATORS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"curator\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_curators.py","file_name":"_curators.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218327286","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2020 Jarosław Stańczyk \nSource code presented in the lectures \"Python programming language\"\n\n15/basic.py\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QToolTip, QPushButton, QMessageBox\nfrom PyQt5.QtGui import QFont\n\n\nclass MainWindow(QWidget):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(QWidget, self).__init__(*args, **kwargs)\n\t\tself.initUI()\n\n\tdef initUI(self):\n\t\tQToolTip.setFont(QFont('SansSerif', 10))\n\t\tself.setToolTip('This is window of our app.')\n\n\t\tbtn = QPushButton(\"Click me if you dare.\", self)\n\t\tbtn.setToolTip('This is a button for our gooey.')\n\t\tbtn.resize(btn.sizeHint())\n\t\tbtn.move(50, 50)\n\n\t\tqbtn = QPushButton(\"or just quit\", self)\n\t\tqbtn.clicked.connect(self.close)\n\t\tqbtn.resize(qbtn.sizeHint())\n\t\tqbtn.move(100, 100)\n\n\t\tself.setGeometry(300, 300, 250, 150)\n\t\tself.setWindowTitle(\"My Awesome App\")\n\t\tself.show()\n\n\tdef closeEvent(self, event):\n\t\treply = QMessageBox.question(self, \"Message\", \"Are you sure?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\t\tif reply == QMessageBox.Yes:\n\t\t\tevent.accept()\n\t\telse:\n\t\t\tevent.ignore()\n\n\nif __name__ == \"__main__\":\n\tapp = QApplication(sys.argv)\n\twindow = MainWindow()\n\tapp.exec_()\n\n# eof.\n","sub_path":"15/03.basic.py","file_name":"03.basic.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"372362651","text":"import numpy as np \r\nimport cv2 \r\n \r\ndef face_detect(img): \r\n face_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\")\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray,1.1,5,cv2.CASCADE_SCALE_IMAGE,(50,50),(100,100)) \r\n if(len(faces) > 0):\r\n print(\"CATCH!\")\r\n else :\r\n print(\"NOT YET!\")\r\n for faceRect in faces: \r\n x,y,w,h = faceRect\r\n color=[255,0,0]\r\n img[y:y+5,x:x+w]=color\r\n img[y+h:y+h+5,x:x+w]=color\r\n img[y:y+h,x:x+5]=color\r\n img[y:y+h,x+w:x+w+5]=color\r\n cv2.rectangle(img.astype(np.int32),(x,y),(x+w,y+h),(255,0,0),2,8,0) \r\n \r\n roi_gray = gray[y:y+h,x:x+w] \r\n roi_color = img[y:y+h,x:x+w] \r\n return img\r\n \r\n \r\n# img = cv2.imread(\"12644633_574341129379918_6543906287908249817_n.jpg\") \r\n# img, roi_gray, roi_color = face_detect(img)\r\n# print(roi_gray.shape)\r\n# cv2.imshow(\"img\",img) \r\n# cv2.waitKey (0) \r\n# cv2.destroyAllWindows() ","sub_path":"Face_Detection.py","file_name":"Face_Detection.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493534562","text":"## Rotate List\n\n# Example 1:\n# Input: head = [1,2,3,4,5], k = 2\n# Output: [4,5,1,2,3]\n\n# Example 2:\n# Input: head = [0,1,2], k = 4\n# Output: [2,0,1]\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n \n if head is None or head.next is None:\n return head\n \n N = 0\n ptr = head\n prev = None\n while ptr:\n N+=1\n prev = ptr\n ptr = ptr.next\n last_ptr = prev\n \n k = k%N\n # print(N, k, N-k)\n if k==0:\n return head\n \n idx=0\n ptr = head\n while idx cool\")\r\n\r\ndef sendmessage(request):\r\n if request.method == \"POST\":\r\n return HttpResponse(request)\r\n\r\n\r\ndef contact(request):\r\n\r\n # form = ContactForm()\r\n # return render(request,\r\n # \"index.html\",\r\n # {'form' : form } )\r\n # return HttpResponse(\"Hello,post action\")\r\n if request.method == \"POST\":\r\n form = ContactForm(request.POST)\r\n # check whether it's valid:\r\n data = request.POST.copy()\r\n name = data.get('name')\r\n age =data.get('age')\r\n text =data.get('text')\r\n nlp = spacy.load(\"en_core_web_sm\")\r\n doc = nlp(text)\r\n x='
      '\r\n txt=[]\r\n pos=[]\r\n dep=[]\r\n for token in doc:\r\n txt.append(token.text)\r\n pos.append(token.pos_)\r\n dep.append(token.dep_)\r\n #\r\n # x += '
    • '+( token.text+'=>'+token.pos_+'=>'+ token.dep_+\r\n # '
    • ')\r\n # x+='
    '\r\n\r\n return render(request,\r\n \"index2.html\",\r\n {'txt' : txt,\r\n 'pos':pos,\r\n 'dep':dep,\r\n 'name':name,\r\n 'age':age })\r\n\r\n else:\r\n return HttpResponse('not')\r\n\r\n @register.filter\r\n def index(List, i):\r\n return List[int(i)]\r\n\r\n @register.filter\r\n def entry_num_array(List):\r\n return range(len(List))\r\n \"\"\"\"\r\n {% for counter1 in txt|entry_num_array %}\r\n \r\n {{ txt|index:counter1 }}\r\n {{ pos|index:counter1 }}\r\n {{ dep|index:counter1 }}\r\n \r\n\r\n {% endfor %}\r\n \"\"\"\r\n\r\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"187179092","text":"import cv2\nimport math\nfrom sensors import Camera, GPS\nfrom typing import List, Tuple\nfrom devices import RaspberryPi\n\nfrom .coordinate import Coordinate\nfrom .entity import Entity\nfrom .line import Line\n\n\nclass Vigilate:\n def __init__(self, pi: RaspberryPi = None, entrance_line: int = 800, exit_line: int = 1200, preview: bool = True):\n self.entrance = Coordinate(0, entrance_line)\n self.exit = Coordinate(0, exit_line)\n self.line_thickness = 2 # If object is within n pixels of line, increase counter\n\n # Sensors\n # self.camera = Camera(0) # Webcam\n #self.camera = Camera('media/vertical.mp4')\n self.camera = Camera('media/complex.mp4')\n\n # Devices\n self.pi: RaspberryPi = pi\n\n # Settings\n self.width = 1920\n self.height = 1080\n self.camera.set(3, self.width)\n self.camera.set(4, self.height)\n self.reference_frame = None\n self.entities: List[Entity] = []\n self.entity_max_radius: int = 100\n self.preview: bool = preview\n self.lines: List[Line] = []\n\n self.entrance_counter = 0\n self.exit_counter = 0\n self.min_contour_area = 5000\n self.max_contour_area = 200000\n self.binarization_threshold = 70\n\n # Lines\n #self.add_line(Coordinate(1000, 1000), Coordinate(self.width, 1000), (0, 255, 255))\n\n @staticmethod\n def distance(a: Coordinate, b: Coordinate) -> float:\n return math.sqrt(math.pow(a.x - b.x, 2) + math.pow(a.y - b.y, 2))\n\n def add_line(self, start: Coordinate, end: Coordinate, color):\n self.lines.append(Line(start, end, color))\n\n def active_entities(self):\n for entity in self.entities:\n if entity.active:\n yield entity\n\n def at_entrance(self, entity):\n return abs(entity.position().y - self.entrance.y) <= self.line_thickness\n\n def at_exit(self, entity):\n return abs(entity.position().y - self.exit.y) <= self.line_thickness\n\n @staticmethod\n def passed_line(line: Coordinate, entity: Entity) -> bool:\n has_before: bool = False\n has_after: bool = False\n\n for position in entity.positions():\n if position.y < line.y:\n has_after = True\n if position.y > line.y:\n has_before = True\n\n if has_before and has_after:\n return True\n\n return False\n\n @staticmethod\n def gray_scale(frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (21, 21), 0)\n return frame\n\n def new_or_nearest(self, position: Coordinate) -> Entity:\n nearest: Tuple[Entity, float] = (None, self.entity_max_radius + 1)\n\n # Find closest entity (if any)\n for entity in self.active_entities():\n distance = self.distance(position, entity.position())\n if distance <= self.entity_max_radius and distance < nearest[1]:\n nearest = (entity, distance)\n\n if nearest[0] is None:\n entity = Entity(position)\n self.entities.append(entity)\n return entity\n else:\n # Existing entity found. Update position\n nearest[0].update_position(position)\n return nearest[0]\n\n @staticmethod\n def filter_mask(fg_mask):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n\n # Fill any small holes\n closing = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)\n # Remove noise\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)\n\n # Dilate to merge adjacent blobs\n dilation = cv2.dilate(opening, kernel, iterations=2)\n\n return dilation\n\n def transmit_data(self):\n data: str = f'{self.exit_counter},{self.pi.gps.location()}'\n self.pi.lorawan.transmit(bytes(data))\n\n def start(self):\n # Skip first frames to let camera calibrate itself\n #for i in range(20):\n # self.camera.read()\n\n # Subtractor\n bg_subtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=True)\n # for i in range(300):\n # _, image = self.camera.read()\n # bg_subtractor.apply(image, None, 0.01)\n\n # Get frames from camera stream\n for i, frame in enumerate(self.camera.stream()):\n\n # Transmit data over LoRaWAN every 900th frame\n if i % 900 == 0 and self.pi is not None:\n self.transmit_data()\n\n # Gray scale\n # frame_gray = bg_subtractor.apply(frame, None, 0.01)\n\n # Set reference frame if none\n # if self.reference_frame is None:\n # self.reference_frame = frame_gray\n # continue\n\n # Subtract reference frame from image\n # frame_delta = cv2.absdiff(self.reference_frame, frame_gray)\n # _, frame_threshold = cv2.threshold(frame_delta, self.binarization_threshold, 255, cv2.THRESH_BINARY)\n\n # Dilate image\n # frame_threshold = cv2.dilate(frame_threshold, None, iterations=2)\n\n fg_mask = bg_subtractor.apply(frame, None, 0.01)\n fg_mask = Vigilate.filter_mask(fg_mask)\n\n # Find contours (objects)\n contours, _ = cv2.findContours(fg_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Display foreground masking frame\n #frame = fg_mask\n\n # Plot entrance and exit lines\n # cv2.line(frame, (0, self.entrance.y), (self.width, self.entrance.y), (255, 0, 0), self.line_thickness)\n cv2.line(frame, (0, self.exit.y), (self.width, self.exit.y), (255, 0, 255), self.line_thickness)\n\n # Check contours\n for contour in contours:\n # Rectangle information\n x, y, width, height = cv2.boundingRect(contour)\n\n # Ignore contours which are either too small or too large.\n if cv2.contourArea(contour) < self.min_contour_area or cv2.contourArea(contour) > self.max_contour_area:\n continue\n\n # Find object centroid\n centroid = Coordinate(int((x + x + width) / 2), int((y + y + height) / 2))\n\n # Create or find existing entity\n entity: Entity = self.new_or_nearest(centroid)\n\n # Draw rectangle\n cv2.rectangle(frame, (x, y), (x + width, y + width), entity.color, 2)\n\n if Vigilate.passed_line(self.exit, entity):\n entity.active = False\n self.exit_counter += 1\n continue\n\n # Draw history line\n previous_position: Coordinate = None\n for position in entity.positions():\n # First position\n if previous_position is None:\n previous_position = position\n continue\n\n cv2.line(frame, (previous_position.x, previous_position.y), (position.x, position.y), entity.color,\n 1)\n cv2.circle(frame, (position.x, position.y), 5, entity.color, 2)\n previous_position = position\n\n # Write stats on screen\n # cv2.putText(frame, f'Entrances: {self.entrance_counter}', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n # (250, 0, 1), 2)\n cv2.putText(frame, f'Counter: {self.exit_counter}', (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5,\n color=(255, 0, 0), thickness=2)\n\n # Display frame preview\n if self.preview:\n cv2.imshow('Monitor', frame)\n cv2.waitKey(1)\n\n # Cleanup\n cv2.destroyAllWindows()\n","sub_path":"vigilate/vigilate.py","file_name":"vigilate.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290540598","text":"'''Script to do a typical refinement programmatically\n'''\n\nimport os,sys\nsys.path.insert(0,'C:\\GSASii2021\\GSASII')\nimport GSASIIscriptable as G2sc\nworkdir = \"C:/Users/Conrad Gillard/Documents/Programming/PythonGSASII/WorkFol\"\ndatadir = \"C:/Users/Conrad Gillard/Documents/Programming/PythonGSASII\"\n\n# Load project with background already specified\ngpx = G2sc.G2Project('EBTL31_Fresh.gpx')\n# Immediately save project so that starting file is not modified by autosave on refinement\ngpx.save('EBTL31_InitialSave.gpx')\n\n# Assign histogram to a variable, for future use in this program\nhist1 = gpx.histogram(0)\n\n# Import phases and link them to the histogram\nphase0 = gpx.add_phase(os.path.join(datadir,\"Bi_EntryWithCollCode64703.cif\"),\n phasename=\"Bi\",\n histograms=[hist1])\n\nphase1 = gpx.add_phase(os.path.join(datadir,\"BiLi_EntryWithCollCode58796.cif\"),\n phasename=\"LiBi\",\n histograms=[hist1])\n\nphase2 = gpx.add_phase(os.path.join(datadir,\"Li2Te_EntryWithCollCode60434.cif\"),\n phasename=\"Li2Te\",\n histograms=[hist1])\n\n# Increase number of cycles to improve convergence\ngpx.data['Controls']['data']['max cyc'] = 8\n\n# Do Initial Refinement (keep background refining off)\ngpx.save('EBTL31_Programmatic_PFs.gpx')\nrefdict0 = {}\ngpx.do_refinements([refdict0])\n\n# Refine Phase fractions\nphase0.setPhaseEntryValue(['Histograms', 'PWDR EBTL31.fxye Bank 1', 'Scale'], [1.0, True])\nphase1.setPhaseEntryValue(['Histograms', 'PWDR EBTL31.fxye Bank 1', 'Scale'], [1.0, True])\nphase2.setPhaseEntryValue(['Histograms', 'PWDR EBTL31.fxye Bank 1', 'Scale'], [1.0, True])\ngpx.do_refinements([refdict0])\n\n# Refine Lattice Parameters\ngpx.save('EBTL_Programmatic_PFs_LPs.gpx')\nrefdict1 = {\"set\": {\"Cell\": True}} # set the cell flag (for all phases)\ngpx.set_refinement(refdict1)\ngpx.do_refinements([{}])\n\n# Refine Zero Offset\ngpx.save('EBTL_Programmatic_PFs_LPs_Zero.gpx')\ngpx.do_refinements([{\"set\": {'Instrument Parameters': ['Zero']}}])\n\n# Constrain Sum of Phase Fractions to Equal 1.0\ngpx.add_EqnConstr(1.0,('0:0:Scale', '1:0:Scale', '2:0:Scale'))\ngpx.do_refinements([refdict0])\n\n# Refine X instrument parameter\ngpx.save('EBTL_Programmatic_PFs_LPs_Zero_X.gpx')\ngpx.do_refinements([{\"set\": {'Instrument Parameters': ['X']}}])\n","sub_path":"ProgrammaticRefinementV3.py","file_name":"ProgrammaticRefinementV3.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293332903","text":"class AudioReader(object):\n '''\n Class that reads Wav format files\n Input:\n scp_path (str): a different scp file address\n sample_rate (int, optional): sample rate (default: 8000)\n chunk_size (int, optional): split audio size (default: 32000(4 s))\n least_size (int, optional): Minimum split size (default: 16000(2 s))\n Output:\n split audio (list)\n '''\n def __init__(self, scp_path, sample_rate=8000, chunk_size=32000, least_size=16000):\n super(AudioReader, self).__init__()\n self.sample_rate = sample_rate\n self.index_dict = handle_scp(scp_path)\n self.keys = list(self.index_dict.keys())\n self.audio = []\n self.chunk_size = chunk_size\n self.least_size = least_size\n self.split()\n def split(self):\n '''\n split audio with chunk_size and least_size\n '''\n print(len(self.keys))\n i=0\n for key in self.keys:\n i+=1\n print(i)\n utt = read_wav(self.index_dict[key])\n if utt.shape[0] < self.least_size:\n continue\n if utt.shape[0] > self.least_size and utt.shape[0] < self.chunk_size:\n gap = self.chunk_size-utt.shape[0]\n self.audio.append(F.pad(utt, (0, gap), mode='constant'))\n if utt.shape[0] >= self.chunk_size:\n start = 0\n while True:\n if start + self.chunk_size > utt.shape[0]:\n break\n self.audio.append(utt[start:start+self.chunk_size])\n start += self.least_size","sub_path":"dataloader/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498089711","text":"# -*-coding:utf-8-*-\nimport base64\nimport jieba\nimport jieba.analyse\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nimport nltk\nfrom textrank4zh import TextRank4Keyword\n\ndef isNone(para):\n if para is None or para == 'None':\n para = ''\n return para\n\ndef updateChoices(para, form, type):\n if para is None or para == 'None':\n para = ''\n elif para != '' and type == 'language':\n new_choices = [(para, para)]\n for i in form.language.choices:\n if i != (para, para):\n new_choices = new_choices + [i]\n form.language.choices = new_choices\n elif para != '' and type == 'style':\n new_choices = [(para, para)]\n for i in form.style.choices:\n if i != (para, para):\n new_choices = new_choices + [i]\n form.style.choices = new_choices\n elif para != '' and type == 'scene':\n new_choices = [(para, para)]\n for i in form.scene.choices:\n if i != (para, para):\n new_choices = new_choices + [i]\n form.scene.choices = new_choices\n elif para != '' and type == 'emotion':\n new_choices = [(para, para)]\n for i in form.emotion.choices:\n if i != (para, para):\n new_choices = new_choices + [i]\n form.emotion.choices = new_choices\n elif para != '' and type == 'theme':\n new_choices = [(para, para)]\n for i in form.theme.choices:\n if i != (para, para):\n new_choices = new_choices + [i]\n form.theme.choices = new_choices\n return para\n\ndef initNumber(number):\n if number is None:\n number = 15\n return number\n\ndef wordcloudImage(playlists_column, name):\n img_file = './app/static/img/_' + name + '_wordcloud.png'\n csv_file = './app/static/files/_' + name + '_wordcloud.csv'\n df = pd.DataFrame(data=list(playlists_column))\n df.to_csv(csv_file,index = False, header = False )\n text = open(csv_file,encoding='utf8').read()\n jieba.analyse.set_stop_words('./app/static/files/_stopwords.txt')\n # cut_text = \" \".join(jieba.cut(text))\n # 基于 TF-IDF 算法的关键词抽取\n cut_text = \" \".join(jieba.analyse.extract_tags(text,100))\n wordcloud = WordCloud(\n font_path=\"./app/static/fonts/Deng.ttf\",\n background_color=\"white\", width=1000, height=860,\n margin=2).generate(cut_text)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n wordcloud.to_file(img_file)\n with open(img_file, 'rb') as img_f:\n img_stream = img_f.read()\n img_stream = base64.b64encode(img_stream)\n return img_stream.decode()\n\ndef countPlaylists(playlists):\n cats = {}\n tags = {}\n for playlist in playlists:\n cat = playlist.playlist_cat\n if len(cat) == 0:\n cat = '其他'\n if cat in cats.keys():\n cats[cat] += 1\n else:\n cats[cat] = 1\n\n tag_split = playlist.playlist_tag.split(',')\n for tag in tag_split:\n if tag in tags.keys():\n tags[tag] += 1\n else:\n tags[tag] =1\n\n cat_sorted = sorted(cats.items(),key=lambda item:item[1],reverse = True)\n tags_sorted = sorted(tags.items(), key=lambda item: item[1], reverse=True)\n return cat_sorted,tags_sorted\n\ndef lyricTokenize(lyric):\n lyric = lyric.split('\\n')\n return lyric\n\ndef recommendPlaylist(musics, original_playlist_id):\n recommend_list = []\n for music in musics:\n playlist_ids = music.playlist_id.split(',')\n for playlist_id in playlist_ids:\n if playlist_id not in recommend_list and playlist_id != original_playlist_id:\n recommend_list.append(playlist_id)\n return recommend_list\n\ndef findKeywords(column,name):\n csv_file = './app/static/files/_' + name + '_analysis.csv'\n df = pd.DataFrame(data=list(column))\n df.to_csv(csv_file,index = False, header = False )\n text = open(csv_file,encoding='utf8').read()\n jieba.analyse.set_stop_words('./app/static/files/_stopwords.txt')\n cut_text = \" \".join(jieba.analyse.extract_tags(text,100))\n print(cut_text)\n tr4w = TextRank4Keyword()\n tr4w.analyze(text=cut_text, lower=True, window=3)\n keywords = [w['word'] for w in tr4w.get_keywords(num=10, word_min_len=2)]\n return keywords\n","sub_path":"Music website/app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"87834811","text":"\"\"\"\ncatan.py\n\nThis file mostly just holds the main function, which enters the game loop. This\nis the file that you will run when starting the game.\n\"\"\"\n\nfrom setup import *\nfrom board import *\nfrom developmentCardActions import *\nfrom buildFunctions import *\nfrom gameFunctions import *\nfrom tradeFunctions import *\nfrom player import Player\nfrom logger import *\n\nclass Catan():\n def __init__(self):\n pass\n\n def printHelp(self):\n '''\n Outputs a list of commands that a user can call during their turn.\n '''\n print(\"\\t-t is for trading, either with a player or with the bank.\")\n print(\"\\t-b is for building.\")\n print(\"\\t-d is for using a development card.\")\n print(\"\\t-e is for ending your turn.\")\n\n def tallyUsedDevCards(self,player):\n '''\n Gets the player's data of the development cards they used, and how many\n points they got from victory point development cards. Function then\n sees if the player had the largest army and/or longest road. \n Finally returns a string with all the collected data.\n '''\n #tallies the total number of develpoment cards a player used\n totalDict = {\n \"Knight\": 0,\n \"Year of Plenty\": 0,\n \"Monopoly\": 0,\n \"Victory Point\": 0,\n \"Road Building\": 0\n }\n finalString = \"\"\n tmp = []\n for card in player.usedDevCards:\n totalDict[card] +=1\n\n totalDict[\"Victory Point\"] += player.devCardDict[\"Victory Point\"]\n\n for amount in totalDict:\n tmp.append(totalDict[amount])\n\n knights = str(tmp[0])+ \" Knights, \"\n monopoly = str(tmp[1])+ \" Year of Plenty, \"\n yearOfPlenty = str(tmp[2])+ \" Monopoly, \"\n roads = str(tmp[4])+ \" Road Building. \"\n\n victoryPoints = \"They also had \" + str(tmp[3])+ \" points from Victory Point cards.\"\n if player.largestArmy == False:\n largestArmy = \"They DIDN'T HAVE the largest army.\"\n else:\n largestArmy = \"They HAVE the largest army.\"\n\n if player.longestRoad == False:\n longestRoad = \"They DIDN'T HAVE the longest road.\"\n else:\n longestRoad = \"The HAVE the longest road.\"\n\n\n\n finalString = \"They used \" + knights + monopoly + yearOfPlenty + roads + victoryPoints + largestArmy + longestRoad\n\n return finalString\n\n def rankPlayers(self, playerList):\n '''\n Ranks the players in order based on the amount of victory points they ended the game with\n '''\n winList = []\n for p in playerList:\n if p.longestRoad == True:\n p.points +=2\n if p.largestArmy == True:\n p.points +=2\n totalDevCards = tallyUsedDevCards(p)\n p.points += p.devCardDict[\"Victory Point\"]\n winList.append([p.points, p.name, totalDevCards])\n winList.sort()\n winList.reverse()\n winner = winList[0][1]\n return winList,winner\n\n def printVictory(winList,amount):\n \n '''\n Prints a visual representation of what happened in the game \n (for development purposes and visual feedback in the game)\n '''\n \n if amount == 2:\n print(\"\\t PLAYER \" + winList[0][1] + \" WINS WITH \" + str(winList[0][0])+ \" POINTS. \" + winList[0][2])\n print(\"\\t PLAYER \" + winList[1][1] + \" TOOK LAST WITH \" + str(winList[1][0])+ \" POINTS. \" + winList[1][2])\n if amount == 3:\n print(\"\\t PLAYER \" + winList[0][1] + \" WINS WITH \" + str(winList[0][0])+ \" POINTS. \"+ winList[0][2])\n print(\"\\t PLAYER \" + winList[1][1] + \" TOOK SECOND WITH \" + str(winList[1][0])+ \" POINTS. \"+ winList[1][2])\n print(\"\\t PLAYER \" + winList[2][1] + \" TOOK LAST WITH \" + str(winList[2][0])+ \" POINTS. \"+ winList[2][2])\n if amount == 4:\n print(\"\\t PLAYER \" + winList[0][1] + \" WINS WITH \" + str(winList[0][0])+ \" POINTS. \"+ winList[0][2])\n print(\"\\t PLAYER \" + winList[1][1] + \" TOOK SECOND WITH \" + str(winList[1][0])+ \" POINTS. \"+ winList[1][2])\n print(\"\\t PLAYER \" + winList[2][1] + \" TOOK THIRD WITH \" + str(winList[2][0])+ \" POINTS. \"+ winList[2][2])\n print(\"\\t PLAYER \" + winList[3][1] + \" TOOK LAST WITH \" + str(winList[3][0])+ \" POINTS. \"+ winList[3][2])\n\nif __name__ == \"__main__\":\n playerList,printBool = initializePlayers()\n devCardDeck = initializeDevCards()\n board = createBoard(printBool)\n\n # Setup Phase\n placeFirstSettlements(board, playerList)\n #As of 5/1/2019 we have to exit early in order to avoid conflicts because our intelligent system has some flaws that will break the game. \n for player in playerList:\n if player.name in board.robots:\n print()\n board.printBoard(board.print_bool)\n print(\"All robots have made their implemented moves.\")\n exit()\n\n # Game Phase\n currentPlayerIndex = 0\n playing = True\n while(playing):\n currentPlayer = playerList[currentPlayerIndex]\n board.printBoard(board.print_bool)\n\n # Roll the dice and resolve consequences of the dice roll\n roll = diceRoll()\n if board.print_bool:\n print()\n print(\"A \" + str(roll) + \" was rolled.\")\n if (roll == 7):\n # Player moves robber\n moveRobber(board, currentPlayer, playerList)\n\n for player in playerList:\n if player.numResources() > 7:\n halveHand(player, player.numResources(),board)\n else:\n handOutResources(board, playerList, roll)\n\n # Begin the action phase for the current player\n if currentPlayer.name in board.humans:\n if board.print_bool:\n print(\"Player \" + currentPlayer.name + \":\")\n elif currentPlayer.name in board.rando:\n if board.print_bool:\n print(\"Rando \" + currentPlayer.name + \":\")\n else:\n if board.print_bool:\n print(\"Robot \" + currentPlayer.name + \":\")\n currentPlayer.printHand(board.print_bool)\n\n # Keep track of what development cards the player obtains in their turn. They can't immediately use them.\n obtainedDevCards = {\n \"Knight\": 0,\n \"Year of Plenty\": 0,\n \"Monopoly\": 0,\n \"Road Building\": 0,\n \"Victory Point\": 0\n }\n # Allow commands\n notDone = True\n usedDevCard = False\n while(notDone):\n if board.print_bool:\n print()\n print(\"What would you like to do? Type a command, or -h for a list of commands.\")\n if currentPlayer.name in board.robots: #Robot\n command = currentPlayer.botStartTurn()\n currentPlayer.lastcommand = command\n currentPlayer.move = command\n if board.print_bool:\n print(\"Robot(\"+currentPlayer.name+\") does \"+command)\n elif currentPlayer.name in board.rando: #Rando\n command = currentPlayer.botStartTurn()\n currentPlayer.lastcommand = command\n currentPlayer.move = command\n if board.print_bool:\n print(\"Bot(\"+currentPlayer.name+\") does \"+command)\n else:\n command = input()\n\n if (command == \"-h\"):\n printHelp()\n elif (command == \"-t\"):\n if board.print_bool:\n print(\"\\tWho would you like to trade with? Enter the player's name or type \\\"bank\\\" if you would like to trade with the bank.\")\n if currentPlayer.name in board.rando or currentPlayer.name in board.robots:\n trader = \"Bank\"\n else:\n trader = input(\"\\t\")\n trader = trader.capitalize()\n\n if (trader == currentPlayer.name):\n print(\"\\tYou can't trade with yourself.\")\n elif (trader == \"Bank\"):\n # Trade with the bank\n bankTrade(board, currentPlayer,board.print_bool)\n elif (getPlayerFromName(playerList, trader) != None):\n # Trade with another player\n playerTrade(currentPlayer, getPlayerFromName(playerList, trader))\n else:\n if board.print_bool:\n print(\"\\tInvalid command.\")\n elif (command == \"-b\"):\n if board.print_bool:\n print(\"\\tWhat would you like to build? Type -c for a city, -s for a settlement, -r for a road, or -d for a development card.\")\n if currentPlayer.name in board.robots:\n toBuild = currentPlayer.botCommand(currentPlayer.lastcommand)\n currentPlayer.lastcommand = ''\n currentPlayer.move = toBuild\n if board.print_bool:\n print(\"Rando(\"+currentPlayer.name+\") does \"+ toBuild)\n elif currentPlayer.name in board.rando:\n toBuild = currentPlayer.botCommand(currentPlayer.lastcommand)\n currentPlayer.lastcommand = ''\n currentPlayer.move = toBuild\n if board.print_bool:\n print(\"Bot(\"+currentPlayer.name+\") does \"+ toBuild)\n else:\n toBuild = input(\"\\t\")\n\n if (toBuild == \"-c\"):\n if currentPlayer.cities < 1:\n if board.print_bool:\n print(\"no more cities\")\n else:\n buildCity(board, currentPlayer)\n\n elif (toBuild == \"-s\"):\n if currentPlayer.settlements < 1:\n if board.print_bool:\n print(\"no more settlements\")\n else:\n buildSettlement(board, currentPlayer)\n elif (toBuild == \"-r\"):\n if currentPlayer.roads < 1:\n if board.print_bool:\n print(\"No more roads\")\n else:\n buildRoad(board, currentPlayer, playerList)\n elif (toBuild == \"-d\"):\n result = buildDevCard(currentPlayer, devCardDeck,board.print_bool)\n if (result != None):\n obtainedDevCards[result] += 1\n else:\n if board.print_bool:\n print(\"\\tInvalid command.\")\n elif (command == '-d'):\n if (usedDevCard):\n if board.print_bool:\n print(\"\\tYou may only use 1 development card per turn.\")\n else:\n usedDevCard = True\n if board.print_bool:\n print(\"\\tWhich development card would you like to use? Type -k to use a knight, -y to use Year of Plenty, -m to use monopoly, or -r to use road building.\")\n if currentPlayer.name in board.robots:\n toUse = currentPlayer.botCommand(currentPlayer.lastcommand)\n currentPlayer.lastcommand = toUse\n currentPlayer.move = toUse\n if board.print_bool:\n print(\"Robot(\"+currentPlayer.name+\") does \"+toUse)\n elif currentPlayer.name in board.rando:\n toUse = currentPlayer.botCommand(currentPlayer.lastcommand)\n currentPlayer.lastcommand = toUse\n currentPlayer.move = toUse\n if board.print_bool:\n print(\"Bot(\"+currentPlayer.name+\") does \"+toUse)\n else:\n toUse = input(\"\\t\")\n if (toUse == \"-k\"):\n # Ensures they have a knight, and that they didn't just get it this turn.\n if (currentPlayer.devCardDict[\"Knight\"] - obtainedDevCards[\"Knight\"] - 1 >= 0):\n useKnight(board, currentPlayer, playerList)\n currentPlayer.usedDevCards.append(\"Knight\")\n else:\n if board.print_bool:\n print(\"\\tYou can't use a knight.\")\n elif (toUse == \"-y\"):\n if (currentPlayer.devCardDict[\"Year of Plenty\"] - obtainedDevCards[\"Year of Plenty\"] - 1 >= 0):\n yearOfPlenty(currentPlayer,printBool)\n currentPlayer.usedDevCards.append(\"Year of Plenty\")\n else:\n if board.print_bool:\n print(\"You can't use year of plenty.\")\n elif (toUse == \"-m\"):\n if (currentPlayer.devCardDict[\"Monopoly\"] - obtainedDevCards[\"Monopoly\"] - 1 >= 0):\n monopoly(playerList, currentPlayer,printBool)\n currentPlayer.usedDevCards.append(\"Monopoly\")\n else:\n if board.print_bool:\n print(\"You can't use monopoly.\")\n elif (toUse == \"-r\"):\n if (currentPlayer.devCardDict[\"Road Building\"] - obtainedDevCards[\"Road Building\"] - 1 >= 0):\n roadBuilding(board, currentPlayer, playerList)\n currentPlayer.usedDevCards.append(\"Road Building\")\n else:\n if board.print_bool:\n print(\"You can't use road building.\")\n else:\n if board.print_bool:\n print(\"\\tInvalid command.\")\n usedDevCard = False\n elif (command == \"-e\"):\n usedDevCard = False\n obtainedDevCards[\"Knight\"] = 0\n obtainedDevCards[\"Year of Plenty\"] = 0\n obtainedDevCards[\"Monopoly\"] = 0\n obtainedDevCards[\"Road Building\"] = 0\n obtainedDevCards[\"Victory Point\"] = 0\n notDone = False\n else:\n if board.print_bool:\n print(\"Invalid command.\")\n\n if playerList[currentPlayerIndex].victorious():\n playing = False\n # Switch the current player\n if (currentPlayerIndex != len(playerList) - 1):\n currentPlayerIndex += 1\n else:\n currentPlayerIndex = 0\n\n #Displays the win\n winList,winner = rankPlayers(playerList)\n printVictory(winList,len(playerList))\n getWinnerData(winner)\n","sub_path":"ImprovedCatan/improvedCatan.py","file_name":"improvedCatan.py","file_ext":"py","file_size_in_byte":15000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"365218599","text":"class ArvoreBinaria:\n\n def __init__(self):\n self.root = None\n\n # Shows the tree at the console\n def show_tree(self, node, depth=0):\n if node is not None:\n print(depth * 8 * \" \", end=\"\")\n print(\"||Node key: \" + str(node.key) + \"||depth: \" + str(depth) + \"||\")\n depth = depth + 1\n if node.r is not None:\n self.show_tree(node.r, depth)\n else:\n print(depth * 8 * \" \", end=\"\")\n print(\"|| NULL NODE ||\")\n if node.l is not None:\n self.show_tree(node.l, depth=depth)\n else:\n print(depth * 8 * \" \", end=\"\")\n print(\"|| NULL NODE ||\")\n\n # First call for an recursive function\n # Deals with the first insertion and reduces the call of the recursive one\n def insert(self, node_key):\n node = self.root\n last = None\n while node is not None:\n last = node\n if node.key > node_key:\n node = node.l\n else:\n node = node.r\n if last is not None:\n if last.key > node_key:\n last.l = self.Node(node_key)\n else:\n last.r = self.Node(node_key)\n else:\n self.root = self.Node(node_key)\n\n # REMOVAL METHODS\n # Determines which node should replace any node at the tree\n def substitute(self, node):\n if node is None or node.l is None:\n return node\n return self.substitute(node.l)\n\n # Removes one specific node from the data structure tree\n def delete(self, key):\n self.root = self.recursive_delete(self.root, key)\n\n # Returns new tree root after deletion (or just plain remove it)\n def recursive_delete(self, node, key):\n # Means it has not been found\n if node is None:\n return None\n # Searching after the node\n elif node.key > key:\n node.l = self.recursive_delete(node.l, key)\n # Searching after the node\n elif node.key < key:\n node.r = self.recursive_delete(node.r, key)\n # Proper deletion since it has been found\n else:\n if node.l is None:\n temp = node.r\n node = None\n # Returns the deleted node\n return temp\n elif node.r is None:\n temp = node.l\n node = None\n # Returns the deleted node\n return temp\n\n temp = self.substitute(node.r)\n node.key = temp.key\n node.r = self.recursive_delete(node.r, temp.key)\n\n if node is None:\n return None\n\n return node\n\n # Made to make the first recursive call, starting it's query from the root\n def query(self, key):\n return self.recursive_query(self.root, key)\n\n # Recursive function that actually makes the query\n def recursive_query(self, node, key):\n # Means it has not been found\n if node is None:\n return None\n # Searching after the node\n elif node.key > key:\n return self.recursive_query(node.l, key)\n # Searching after the node\n elif node.key < key:\n return self.recursive_query(node.r, key)\n else:\n return node\n\n class Node:\n\n def __init__(self, key):\n self.key = key\n self.l = None\n self.r = None\n","sub_path":"ArvoreBinaria.py","file_name":"ArvoreBinaria.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142429927","text":"from unittest.mock import patch\nfrom django.test import TestCase, override_settings, RequestFactory\nfrom django.shortcuts import render\nfrom django.forms.renderers import Jinja2, DjangoTemplates\nfrom django.conf import settings\n\nfrom .util import TestEachExample\n\n\n@override_settings(\n TEMPLATES=[settings.DJANGO_TEMPLATE_BACKEND,\n settings.JINJA2_TEMPLATE_BACKEND]\n)\nclass TemplateEngineParityTests(TestCase, metaclass=TestEachExample):\n '''\n This renders each example using its Django and Jinja2 template\n and ensures that both produce the same HTML.\n '''\n\n def make_renderer(self, engine_name, form_renderer):\n def force_render_using_engine(req, template_name, ctx):\n ctx['form'].renderer = form_renderer\n return render(req, template_name, ctx, using=engine_name)\n return force_render_using_engine\n\n def render_example(self, request, example, renderer):\n with patch.object(example.module, 'render', renderer):\n content = example.render(request)\n return '\\n'.join([\n line for line in content.split('\\n')\n if 'csrfmiddlewaretoken' not in line\n ])\n\n def test(self, example):\n self.maxDiff = 5000\n factory = RequestFactory()\n django = self.render_example(\n factory.get('/'),\n example,\n self.make_renderer('django', DjangoTemplates())\n )\n\n jinja2 = self.render_example(\n factory.get('/'),\n example,\n self.make_renderer('jinja2', Jinja2())\n )\n\n self.assertHTMLEqual(\n django,\n jinja2,\n 'renders of \"{}\" example must match'.format(example.basename)\n )\n","sub_path":"example/app/tests/test_template_engine_parity.py","file_name":"test_template_engine_parity.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630770910","text":"import os\nimport argparse\nimport time\nimport sys\nsys.path.append('../')\n\nimport numpy as np\nfrom scipy.stats import norm, invwishart, multivariate_normal\n\nfrom utils.feature_util import radial_distance, dynamic_feature_vector\n\nimport pdb\n\n\ndef main(args):\n ptl = args.path + 'positions/' + args.subdir + '/'\n\n # Load ion positions\n anion_positions = np.load(ptl + 'anion_positions.npy')\n cation_positions = np.load(ptl + 'cation_positions.npy')\n anion_velocities = np.load(ptl + 'anion_velocities.npy')\n cation_velocities = np.load(ptl + 'cation_velocities.npy')\n\n \"\"\"\n sigma = 2.5A / 0.97\n cutoff is 1.6*sigma\n residence time of dynamics ion pairs determined by classifying ions as neighbours\n if they fall within 2.5*sigma of each other.\n residence time of static ion pairs classified as ions within 1.6*sigma of each other.\n \"\"\"\n\n sigma = 1\n r_c_static = 1.6 * sigma\n r_c_dynamic = 2.5 * sigma\n box_length = 50000.0 ** (1 / 3) * sigma\n\n assert anion_positions.shape == cation_positions.shape\n (n_snapshots, n_anions, _) = anion_positions.shape\n n_cations = cation_positions.shape[1]\n\n # Identify which ions are classified as being in a static ion pair at each snapshot\n static_pairs_dict = {}\n dynamic_pairs_dict = {}\n x_an_dict = {}\n x_cat_dict = {}\n\n for snapshot_id in range(args.n_snapshots):\n\n t0 = time.time()\n # Select ion positions at a given snapshot\n anion_pos = anion_positions[snapshot_id, :, :]\n cation_pos = cation_positions[snapshot_id, :, :]\n anion_vel = anion_velocities[snapshot_id, :, :]\n cation_vel = cation_velocities[snapshot_id, :, :]\n\n static_pairs = []\n dynamic_pairs = []\n x_cat = []\n x_an = []\n\n for anion_id in range(n_anions):\n for cation_id in range(n_cations):\n r = radial_distance(anion_pos[anion_id, 0], anion_pos[anion_id, 1], anion_pos[anion_id, 2],\n cation_pos[cation_id, 0], cation_pos[cation_id, 1], cation_pos[cation_id, 2],\n box_length)\n if r <= r_c_dynamic:\n dynamic_pairs.append((anion_id, cation_id))\n if r <= r_c_static:\n static_pairs.append((anion_id, cation_id))\n\n # Compute the feature vectors for the two ions in the ion pair i.e. [g_like, g_unlike]\n x_cat.append(dynamic_feature_vector(cation_pos, anion_pos, cation_vel, anion_vel, cation_id, box_length))\n x_an.append(dynamic_feature_vector(anion_pos, cation_pos, anion_vel, cation_vel, anion_id, box_length))\n static_pairs = np.vstack(static_pairs)\n dynamic_pairs = np.vstack(dynamic_pairs)\n x_cat = np.vstack(x_cat)\n x_an = np.vstack(x_an)\n static_pairs_dict[snapshot_id] = np.copy(static_pairs)\n dynamic_pairs_dict[snapshot_id] = np.copy(dynamic_pairs)\n x_an_dict[snapshot_id] = np.copy(x_an)\n x_cat_dict[snapshot_id] = np.copy(x_cat)\n\n if snapshot_id % args.print_freq == 0:\n print('Snapshot {}\\t Time: {:.2f}'.format(snapshot_id, (time.time() - t0)))\n\n length_dict = {}\n\n # Calculate the number of snapshots for which an ion pair remains paired\n for idx0 in range(args.n_snapshots - args.cutoff):\n pairs0 = static_pairs_dict[idx0]\n length = np.zeros(pairs0.shape[0])\n for i in range(pairs0.shape[0]):\n for idx in range(idx0 + 1, idx0 + args.cutoff):\n match = False\n for j in range(dynamic_pairs_dict[idx].shape[0]):\n if np.array_equal(pairs0[i], dynamic_pairs_dict[idx][j]):\n match = True\n if match:\n length[i] += 1\n else:\n break\n\n length_dict[idx0] = length\n\n # Create numpy matrix\n X = []\n y = []\n for i in range(args.n_snapshots - args.cutoff):\n X.append(np.concatenate((x_an_dict[i], x_cat_dict[i]), axis=1))\n y.append(length_dict[i])\n\n # Save as .npy files\n X = np.vstack(X)\n y = np.hstack(y)\n\n if not os.path.exists(args.pts):\n os.makedirs(args.pts)\n\n np.save(args.pts + 'X_{}_new.npy'.format(args.subdir), X)\n np.save(args.pts + 'y_{}_new.npy'.format(args.subdir), y)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', type=str, default='../data/md-trajectories/',\n help='Path to directory containing data.')\n parser.add_argument('--pts', type=str, default='../data/processed/',\n help='Path to directory containing data.')\n parser.add_argument('--subdir', type=str, default='6',\n help='Sub directory of interest.')\n parser.add_argument('--print_freq', type=int, default=25,\n help='Print every N snapshots.')\n parser.add_argument('--n_snapshots', type=int, default=500,\n help='Number of snapshots.')\n parser.add_argument('--cutoff', type=int, default=150,\n help='Expected maximum number of snapshots over which ions are paired.')\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"preprocessing/featurise_dynamic.py","file_name":"featurise_dynamic.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"197692852","text":"from flask import Flask, request, jsonify, make_response,session\r\nfrom flask import jsonify\r\nimport pandas as pd\r\nfrom pandas import ExcelWriter\r\nfrom pandas import ExcelFile\r\nfrom flask import request\r\nfrom flask_pymongo import PyMongo\r\nfrom flask_cors import CORS\r\nfrom flask_mongoengine import MongoEngine, Document\r\nimport jwt,time,sys,os,pymongo\r\n#from datetime import timedelta\r\nfrom datetime import datetime\r\nimport printjson,isodate\r\nfrom bson import ObjectId\r\nimport dateutil.parser as parser\r\nfrom bson.json_util import dumps\r\nimport psycopg2,logging,xlwt\r\nimport numpy as np\r\nimport pymysql\r\nfrom pymongo import MongoClient\r\nclient = MongoClient('localhost:27017')\r\n# client.admin.authenticate('satya_mongo', 'satya_mongo')\r\n# client.testdb.add_user('newTestUser', 'Test123', True)\r\n\r\napp = Flask(__name__)\r\napp.config['MONGO_DBNAME'] = 'restdb'\r\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/satya_mongo'\r\n\r\nCORS(app)\r\nmongo = PyMongo(app)\r\n#192.168.43.114\r\n#10.80.16.19\r\nCORS(app, origins=\"http://10.80.16.19:5005\",\r\n allow_headers=[\"Content-Type\", \"Authorization\", \"Access-Control-Allow-Credentials\"], supports_credentials=True)\r\ndb = MongoEngine(app)\r\n\r\n\r\n@app.route('/login', methods=['POST'])\r\ndef login():\r\n data = request.get_json()\r\n authusername = data['username']\r\n authpassword = data['password']\r\n\r\n if data is None:\r\n return make_response('status : FAILURE\\nmessage : Enter Username and Password', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login required!\"'})\r\n\r\n db = mongo.db\r\n\r\n\r\n try:\r\n db.logout()\r\n db.authenticate(authusername, authpassword, source=('satya_mongo'))\r\n message = 'Login Successful'\r\n\r\n success = True\r\n # print('message::\\t', message)\r\n\r\n token = jwt.encode({'exp': datetime.utcnow()}, 'SECRET_KEY')\r\n\r\n\r\n except Exception as e:\r\n\r\n # exc_type, exc_obj, exc_tb = sys.exc_info()\r\n # fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\r\n # print(exc_type, fname, exc_tb.tb_lineno)\r\n # print(type(e)) # the exception instance\r\n # print(e.args) # arguments stored in .args\r\n # print(e)\r\n\r\n # finally:\r\n # message = \"new user creation\"\r\n #print(message,authusername,authpassword)\r\n connection = pymysql.connect(host='192.168.100.16',\r\n user='ggnetuser',\r\n password='intra@gg1',\r\n db='gg_intranet',\r\n charset='utf8mb4',\r\n cursorclass=pymysql.cursors.DictCursor)\r\n cursor = connection.cursor()\r\n cursor.execute(\"select WORK_EMAIL from GG_EMPLOYEE\")\r\n data1 = cursor.fetchall() # returns a list\r\n\r\n\r\n # connection.close()\r\n\r\n\r\n for i in range(len(data1)):\r\n\r\n # if authusername not in db.command('usersInfo'):\r\n # print('userinformation::\\n', db.command('userInfo'))\r\n #print(data1[i])\r\n\r\n if authusername == data1[i]['WORK_EMAIL']:\r\n print(\"loop entered\")\r\n #db.add_user(authusername, authpassword)\r\n #db.addUser({authusername, authpassword})\r\n db.command(\"createUser\", authusername, pwd = authpassword, roles=[\"read\"])\r\n print('user created')\r\n db.authenticate(authusername, authpassword, source=('satya_mongo'))\r\n success = True\r\n message = 'Login Successful'\r\n token = jwt.encode({'exp': datetime.utcnow()}, 'SECRET_KEY')\r\n return jsonify({'success': success, 'token': str(token), 'message': message})\r\n # return 'Login Sucessful'\r\n else:\r\n success = False\r\n message = 'Invalid Credentials'\r\n #print(message)\r\n token=None\r\n exc_type, exc_obj, exc_tb = sys.exc_info()\r\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\r\n print(exc_type, fname, exc_tb.tb_lineno)\r\n\r\n\r\n return jsonify({'success':success,'token' : str(token),'message':message})\r\n\r\n \r\n\r\n\r\n\r\n@app.route('/insert',methods=['POST'])\r\ndef insert_key():\r\n update_list = request.get_json(silent=True)\r\n\r\n if update_list is not None:\r\n s_mongo = mongo.db.satya_mongo\r\n #l_mongo = mongo.db.login_mongo\r\n try:\r\n for i in update_list:\r\n dct = {}\r\n\r\n for val in i:\r\n if val != '_id':\r\n print('key::\\t',val)\r\n print('value::\\t',i[val])\r\n dct[val] = i[val]\r\n #DB.insert({\"Date\":datetime.now()})\r\n s_mongo.insert(dct)\r\n print('data inserted Successfully')\r\n rslt = {'message': 'Fields Updated Sucessfully', 'success': 1}\r\n\r\n\r\n def edit_something_only_once():\r\n app.logger.setLevel(logging.INFO)\r\n app.logger.info(\"Initialized Flask logger handler\")\r\n\r\n except Exception as e:\r\n exc_type, exc_obj, exc_tb = sys.exc_info()\r\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\r\n print(exc_type, fname, exc_tb.tb_lineno)\r\n print(type(e)) # the exception instance\r\n print(e.args) # arguments stored in .args\r\n print(e)\r\n rslt = {'message': 'Unsucessful', 'success': 0}\r\n else:\r\n rslt = {'message': 'Fields are empty', 'success': 0}\r\n\r\n return jsonify({'result': rslt})\r\n\r\n\r\n\r\n@app.route('/get_all', methods=['GET'])\r\ndef get_all():\r\n s_mongo = mongo.db.satya_mongo\r\n\r\n #latest 5 records based on userid #need to be written\r\n output = []\r\n for post in s_mongo.find():\r\n #print(post)\r\n for val in post:\r\n if '_id' in val:\r\n post[val] = str(post[val])\r\n # output.append(post)\r\n output.append(post)\r\n return jsonify({'Data':output,'Message':'Success'})\r\n\r\n@app.route('/get/<_id>',methods=['GET'])\r\ndef get_key(_id):\r\n s_mongo = mongo.db.satya_mongo\r\n\r\n star_id = s_mongo.find_one({'_id': ObjectId(_id)})\r\n\r\n get_value = \" retrieved successfully\"\r\n return jsonify({'values':str(star_id),'result':get_value})\r\n\r\n\r\n@app.route('/search/',methods=['GET'])\r\n\r\ndef search(date):\r\n s_mongo = mongo.db.satya_mongo\r\n output = []\r\n for post in s_mongo.find({'Date': date}):\r\n # print(post)\r\n for val in post:\r\n #if 'Date' in val:\r\n post[val]=str(post[val])\r\n # output.append(post)\r\n output.append(post)\r\n return jsonify({'Data': output, 'Message': 'Success'})\r\n\r\n@app.route('/update',methods=['POST'])\r\ndef update():\r\n\r\n s_mongo = mongo.db.satya_mongo\r\n\r\n data = request.get_json()\r\n #print(data[\"Data\"][0]['_id'])\r\n '''m = len(data[\"Data\"])\r\n #print(m)\r\n if data['Data'][0]['_id']:\r\n\r\n for i in range(0,m):\r\n #print(\"entered\")\r\n\r\n update_record = s_mongo.update_one({\"_id\": ObjectId((data[\"Data\"][i]['_id']))},\r\n {'$set': dict(Date=data[\"Data\"][i]['Date'], day=data[\"Data\"][i]['day'],\r\n month=data[\"Data\"][i]['month'],\r\n year=data[\"Data\"][i]['year'],\r\n Vertical=data[\"Data\"][i]['Vertical'],\r\n PlantName=data[\"Data\"][i]['PlantName'],\r\n Day_Gen=data[\"Data\"][i]['Day_Gen'],\r\n Capacity=data[\"Data\"][i]['Capacity'],\r\n Avgload=data[\"Data\"][i]['Avgload'],\r\n Peak_Load=data[\"Data\"][i]['Peak_Load'],\r\n Day_PLF=data[\"Data\"][i]['Day_PLF'],\r\n MTD_Gen=data[\"Data\"][i]['MTD_Gen'],\r\n MTD_PLF=data[\"Data\"][i]['MTD_PLF'],\r\n FY_Gen=data[\"Data\"][i]['FY_Gen'],\r\n FY_PLF=data[\"Data\"][i]['FY_PLF'],\r\n Day_Gen_Loss=data[\"Data\"][i]['Day_Gen_Loss'])})\r\n print(update_record)'''\r\n\r\n def recurse_keys(data):\r\n for key in data.keys():\r\n if isinstance(data[key], dict):\r\n recurse_keys(data[key])\r\n else:\r\n if key != '_id':\r\n data[key] = 'NA'\r\n\r\n # update your document, save behaves as upsert with '_id' supplied.\r\n db.collection.save(data)\r\n\r\n return \"record updated successfully\"\r\n\r\n@app.route('/delete/<_id>',methods=['DELETE'])\r\ndef delete_key(_id):\r\n s_mongo = mongo.db.satya_mongo\r\n\r\n star_id = s_mongo.delete_one({'_id': ObjectId(_id)})\r\n delete_value = \"successfully deleted\"\r\n return jsonify({'result':delete_value})\r\n\r\n@app.route('/data/',methods=['GET'])\r\ndef data(date):\r\n s_mongo = mongo.db.satya_mongo\r\n capoutput = []\r\n genoutput = []\r\n Plant = []\r\n Vertical = []\r\n Peak_Load = []\r\n\r\n # \"user\": \"solar3\",\r\n main_df = pd.DataFrame(columns=['capoutput', 'genoutput', 'Plant','Vertical','Peak_Load'])\r\n\r\n for post in s_mongo.find({'Date': date}):\r\n\r\n for val in post:\r\n\r\n if 'Capacity' and 'Day_Gen' and 'PlantName' in val:\r\n\r\n #post[val] = str(post[val])\r\n\r\n post['Capacity'] = int(str(post['Capacity']))\r\n\r\n post['Day_Gen'] = int(str(post['Day_Gen']))\r\n\r\n post['PlantName'] = str(post['PlantName'])\r\n\r\n post['Vertical'] = str(post['user'])\r\n\r\n post['Peak_Load'] = int(str(post['Peak_Load']))\r\n\r\n\r\n capoutput.append(post['Capacity'])\r\n genoutput.append(post['Day_Gen'])\r\n Plant.append(post['PlantName'])\r\n Vertical.append(post['Vertical'])\r\n Peak_Load.append(post['Peak_Load'])\r\n\r\n\r\n\r\n\r\n\r\n main_df['capoutput'] = capoutput\r\n main_df['genoutput'] = genoutput\r\n main_df['Plant'] = Plant\r\n main_df['Vertical'] = Vertical\r\n main_df['Peak_Load'] = Peak_Load\r\n\r\n for indx, rows in main_df.iterrows():\r\n if 'solar' in rows['Vertical']:\r\n main_df.loc[indx, 'Vertical'] = 'solar'\r\n if 'wind' in rows['Vertical']:\r\n main_df.loc[indx, 'Vertical'] = 'wind'\r\n if 'hydro' in rows['Vertical']:\r\n main_df.loc[indx, 'Vertical'] = 'hydro'\r\n if 'bio' in rows['Vertical']:\r\n main_df.loc[indx, 'Vertical'] = 'bio'\r\n\r\n main_df_group = main_df.groupby('Vertical')\r\n\r\n # for grpname, grp in main_df_group:\r\n # Wind_capoutput_total = sum(grp[grp['Vertical'].str.startswith('wind')]['capoutput'])\r\n # for indx, rows in grp:\r\n #\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # Wind_capoutput_total = sum(main_df[main_df['Vertical'].str.startswith('wind')]['capoutput'])\r\n # Solar_capoutput_total = sum(main_df[main_df['Vertical'].str.startswith('solar')]['capoutput'])\r\n # Hydro_capoutput_total = sum(main_df[main_df['Vertical'].str.startswith('hydro')]['capoutput'])\r\n # bio_capoutput_total = sum(main_df[main_df['Vertical'].str.startswith('bio')]['capoutput'])\r\n #\r\n # Wind_genoutput_total = sum(main_df[main_df['Vertical'].str.startswith('wind')]['genoutput'])\r\n # Solar_genoutput_total = sum(main_df[main_df['Vertical'].str.startswith('solar')]['genoutput'])\r\n # Hydro_genoutput_total = sum(main_df[main_df['Vertical'].str.startswith('hydro')]['genoutput'])\r\n # bio_genoutput_total = sum(main_df[main_df['Vertical'].str.startswith('bio')]['genoutput'])\r\n #\r\n # Wind_Peak_Load_total = sum(main_df[main_df['Vertical'].str.startswith('wind')]['Peak_Load'])\r\n # Solar_Peak_Load_total = sum(main_df[main_df['Vertical'].str.startswith('solar')]['Peak_Load'])\r\n # Hydro_Peak_Load_total = sum(main_df[main_df['Vertical'].str.startswith('hydro')]['Peak_Load'])\r\n # bio_Peak_Load_total = sum(main_df[main_df['Vertical'].str.startswith('bio')]['Peak_Load'])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n print('Main_DataFrame::\\n',main_df)\r\n main_df.to_excel('main_dataframe.xlsx')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n total1 = sum(capoutput)\r\n total2 = sum(genoutput)\r\n\r\n #print(Plant)\r\n #list = capoutput,genoutput,Plant\r\n #print(list)\r\n '''writer = ExcelWriter(r'D:\\test.xlsx',engine='openpyxl')\r\n wb = writer.book\r\n df = pd.DataFrame({'PlantName':Plant,'Capacity':capoutput,'Day_Gen':genoutput})\r\n\r\n #print('Dataframe::\\n',df)\r\n\r\n #pd.to_frame(name1='PlantName',name2='Capacity',name3='Day_Gen').to_excel(writer, sheet_name='Sheet1', index=True)\r\n #for i in range(0,len(df)):\r\n df.to_excel(writer, sheet_name='Sheet1',index=True)\r\n wb.save(r'D:\\test.xlsx')'''\r\n\r\n '''out = open(r'D:/test.xls', 'w')\r\n for row in list:\r\n for column in row:\r\n out.write('%s\\n' % column)\r\n out.write('\\n')\r\n out.close()'''\r\n\r\n '''for val in post:\r\n # if 'Date' in val:\r\n post[val] = str(post[val])\r\n # output.append(post)\r\n output.append(post)\r\n \"\"\"for document in cursor:\r\n print('searching for document keys')\r\n print(document.keys())\r\n op=[]\r\n\r\n for i in range(0,len(document.keys())):\r\n print(i)\r\n\r\n s_mongo.find({'Capacity':{'$in':['Capacity']}})\r\n\r\n #capadd = sum(document.keys())\r\n #genadd = sum(document.keys())'''\r\n return jsonify({'CapTotal':total1,'Capacity':capoutput,\r\n 'Gentotal':total2,'Day_Gen':genoutput,\r\n 'PlantName':Plant,\r\n 'message':'Success'})\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, host='10.80.16.19', port=5005)","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":14281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164022448","text":"# -*- coding: UTF-8 -*-\n# **********************************************************************************#\n# File: Batch tools\n# **********************************************************************************#\nfrom utils.error import Errors\nfrom . enums import SchemaType\nfrom . collection import MongodbCollections\nfrom .. data.mongodb_base import BatchTool\n\n\nclass MongodbBatchTools(object):\n \"\"\"\n Mongodb Batch tools\n \"\"\"\n portfolio = BatchTool(MongodbCollections.portfolio, buffer_size=2000)\n order = BatchTool(MongodbCollections.order, buffer_size=2000)\n position = BatchTool(MongodbCollections.position, buffer_size=2000)\n trade = BatchTool(MongodbCollections.trade, buffer_size=2000)\n\n\ndef switch_batch_tool(schema_type, database='mongodb'):\n \"\"\"\n Switch batch tool\n Args:\n schema_type(string): schema type\n database(string): database name\n\n Returns:\n collection(obj): collection\n \"\"\"\n if database == 'mongodb':\n if schema_type == SchemaType.portfolio:\n batch_tool = MongodbBatchTools.portfolio\n elif schema_type == SchemaType.order:\n batch_tool = MongodbBatchTools.order\n elif schema_type == SchemaType.position:\n batch_tool = MongodbBatchTools.position\n elif schema_type == SchemaType.trade:\n batch_tool = MongodbBatchTools.trade\n else:\n raise Errors.INVALID_SCHEMA_TYPE\n else:\n raise Errors.INVALID_DATABASE\n return batch_tool\n\n\n__all__ = [\n 'MongodbBatchTools',\n 'switch_batch_tool'\n]\n","sub_path":"lib/core/batch_tool.py","file_name":"batch_tool.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118961628","text":"import cherrypy\nimport json\n\nclass Ex1Site(object):\n\texposed=True\n\n\tdef GET(self,*uri,**params):\n\t\tif len(params)!=3:\n\t\t\traise cherrypy.HTTPError(400,\"The uri is not satisfied [/value/originalUnit/targetUnit]\")\n\n\t\t\n\t\toriginalUnit=params[\"originalUnit\"]\n\t\ttargetUnit=params[\"targetUnit\"]\n\n\t\ttry:\n\t\t\tvalue=float(params[\"value\"])\n\t\t\terrorConversion=0\n\t\texcept (ValueError, TypeError):\n\t\t\terrorConversion=1\n\n\t\tif errorConversion==1:\n\t\t\traise cherrypy.HTTPError(400,\"The temperature must be an integer or a float\")\n\n\t\tresult=convertValue(originalUnit,targetUnit,value)\n\n\n\t\td={}\n\n\t\td[\"Original value\"]=value\n\t\td[\"Original unit\"]=originalUnit\n\t\td[\"Converted value\"]=round(result,2)\n\t\td[\"Converted unit\"]=targetUnit\n\n\t\treturn(json.dumps(d,indent=4))\n\n\n\ndef convertValue(originalUnit,targetUnit,value):\n\tif originalUnit=='K':\n\t\tif targetUnit=='C':\n\t\t\tresult=value-273.15\n\t\telif targetUnit=='F':\n\t\t\tresult=(value-273.15)*9/5+32\n\t\telse:\n\t\t\traise cherrypy.HTTPError(404,\"Target unit not found [C-K-F]\")\n\telif originalUnit=='C':\n\t\tif targetUnit=='K':\n\t\t\tresult=value+273.15\n\t\telif targetUnit=='F':\n\t\t\tresult=(value*9/5)+32\n\t\telse:\n\t\t\traise cherrypy.HTTPError(404,\"Target unit not found [C-K-F]\")\n\telif originalUnit=='F':\n\t\tif targetUnit=='K':\n\t\t\tresult=(value-32)*5/9+273.15\n\t\telif targetUnit=='C':\n\t\t\tresult=(value-32)*5/9\n\t\telse:\n\t\t\traise cherrypy.HTTPError(404,\"Target unit not found [C-K-F]\")\n\telse:\n\t\traise cherrypy.HTTPError(404,\"Original unit not found [C-K-F]\")\n\n\treturn result\n\n\nif __name__==\"__main__\":\n\tconf = {\n\t'/':{\n\t\t'request.dispatch':cherrypy.dispatch.MethodDispatcher(),\n\t\t'tools.sessions.on':True\n\t\t}\n\t}\n\tcherrypy.tree.mount(Ex1Site(),'/converter',conf)\n\n\tcherrypy.engine.start()\n\tcherrypy.engine.block()","sub_path":"Lab_SW/Lab_SW_1/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"571267708","text":"from django.shortcuts import get_object_or_404, render\r\nfrom django.http import HttpResponse,HttpResponseRedirect\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.template import loader\r\n# Create your views here.\r\nfrom .models import Question\r\n\r\ndef index(request):\r\n latest_question_list = Question.objects.all().order_by('-pub_date')[:5]\r\n template = loader.get_template('polls/index.html')\r\n context = {\r\n 'latest_question_list': latest_question_list\r\n }\r\n return HttpResponse(template.render(context,request))\r\n\r\ndef detail(request, question_id):\r\n question = get_object_or_404(Question, pk=question_id)\r\n return render(request, 'polls/detail.html', {'question': question})\r\n\r\ndef results(request,question_id):\r\n question = get_object_or_404(Question,pk=question_id)\r\n return render(request,'polls/results.html',{'question':question})\r\n\r\ndef vote(request,question_id):\r\n question = get_object_or_404(Question,pk = question_id)\r\n try:\r\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\r\n except:\r\n return render(request,'polls/details.html',{'question':question,'error_message':\"Please select a choice\"})\r\n else:\r\n selected_choice.votes += 1\r\n selected_choice.save()\r\n\r\n return HttpResponseRedirect(reverse('polls:results',args=(question.id,)))\r\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123695378","text":"#!/usr/bin/python\n#This code was perform to calculate the beta diversity and plot the boxplot\n#ussage python Data_Analys.py matrix_table\n\nfrom scipy.stats import mannwhitneyu\nfrom math import log\nfrom math import sqrt\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom skbio.diversity import beta_diversity\nfrom skbio.diversity import beta\nfrom skbio.stats.distance import mantel\nfrom skbio.stats.ordination import pcoa\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport scipy.spatial.distance as distance\nimport seaborn as sns \n\n\n\nargvs \t\t= sys.argv\npath \t\t= argvs[1]\ntitle \t\t= str(argvs[1].split(\".csv\")[0])\nouttitle \t= title+'.beta_diversity_bray_curtis.csv'\nouttitle2 \t= title+'.alpha_diversity_unweight.csv'\nwriter \t\t= pd.ExcelWriter(title+\".beta_diver.xlsx\", engine='xlsxwriter')\ndf_table1 \t= pd.read_table(path,index_col=0)\noutfig \t\t= title+'.beta_diversity_bray_curtisbox.eps'\n\n#drop unecessary features\ntry:\n\tdf_table = df_table1.drop(['mean','std'],axis=1,inplace=False)\nexcept:\n\tdf_table = df_table1\n\ndf_table_Tpose\t= df_table.transpose()\nlist_sample\t\t= df_table_Tpose.index.values\nlist_otu \t\t= df_table.index.values\nsample_matrix \t= df_table_Tpose.as_matrix()\n\n#dataframe partition (depend on group)\nnormal \t\t=(df_table[df_table.columns[df_table.columns.to_series().str.contains('Healthy')]])\nsurgery \t=(df_table[df_table.columns[df_table.columns.to_series().str.contains('Gastrectomy')]])\n\n#bray curtis pairwise distance matrix (scikit-bio)\npw=beta_diversity('braycurtis', sample_matrix, ids=list_sample, validate=True, pairwise_func=None) #return class\n\n#convert matrix into square dataframe\ndf=pd.DataFrame(data=pw[0:,0:],index=list_sample,columns=list_sample)\n\n#remove duplicate values\ndf_values = list(distance.squareform(df))\n\n#make the group matrix\ngdf_values = list()\nfor i in range(len(list_sample)):\n\tfor j in range(i, len(list_sample)):\n\t\tg1 = list_sample[i].split('.')[1]\n\t\tif g1 == 'Healthy':\n\t\t\tg1= 'Healthy'\n\t\telse:\n\t\t\tg1= 'Gastrectomy'\n\t\tg2 = list_sample[j].split('.')[1]\n\t\tif g2 == 'Healthy':\n\t\t\tg2= 'Healthy'\n\t\telse:\n\t\t\tg2= 'Gastrectomy'\n\t\tg_g = '-'.join(sorted([g1, g2]))\n\n\t\tgdf_values.append(g_g)\n\n#boxplot\nplt.figure()\nfig,ax \t\t\t\t= plt.subplots()\npal \t\t\t\t=['#0072BC','#F0E442','#F7941D']\nsns.set(style='ticks',font_scale=0.6)\n\ndf_for_box1 \t\t= pd.DataFrame([df_values, gdf_values], index=['Bray curtis distance', 'group']).T\n#df_for_box1=df_for_box.fillna(0)\n#df_for_box = df_for_box1[(df_for_box1.T != 0).any()]\ndf_for_box \t\t\t= df_for_box1.dropna(axis=0, how='any')\n\ndf_for_box.to_csv('boxplot_distance.tsv',header=True, index=True, sep='\\t')\nsns.factorplot(y=\"Bray curtis distance\", x=\"group\", data=df_for_box, kind=\"box\", palette=pal, linewidth=1.00, width=0.5, legend=False, showfliers=True)\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.savefig(outfig, bbox_inches='tight',format='eps', dpi=1000)\ndf.to_excel(writer,'Bray_curtis')\ndf.to_csv(outtitle,header=True, index=True, sep='\\t')\n\n#statistic analysis\ndef stat_diff(dataframe,var1_group,var2_group):\n\tvar1 = (dataframe.loc[dataframe['group'] == var1_group, 'Bray curtis distance']).tolist()\n\tvar2 = (dataframe.loc[dataframe['group'] == var2_group, 'Bray curtis distance']).tolist()\n\thyp_test=mannwhitneyu(np.array(var1),np.array(var2),alternative='two-sided')\n\tp_value = hyp_test[1]\n\tmean_var1=np.array(var1).mean()\n\tstd_var1=np.array(var1).std()\n\tmean_var2=np.array(var2).mean()\n\tstd_var2=np.array(var2).std()\n\tvar1=np.array(var1).var()\n\tvar2=np.array(var2).var()\n\tvalue = list()\n\tvalue.extend([p_value,mean_var1,std_var1,mean_var2,std_var2])\n\treturn value\n\nHH_GH \t=stat_diff(df_for_box,\"Healthy-Healthy\", \"Gastrectomy-Healthy\")\nGH_GG \t=list(stat_diff(df_for_box,\"Gastrectomy-Healthy\", \"Gastrectomy-Gastrectomy\"))\nHH_GG \t=list(stat_diff(df_for_box,\"Healthy-Healthy\", \"Gastrectomy-Gastrectomy\"))\n\ndf_stats \t= pd.DataFrame([HH_GH,GH_GG,HH_GG], columns=['p_value','mean_var1','std_var1','mean_var2','std_var2'], index=['HH_GH','GH_GG','HH_GG'])\ndf_stats.to_csv('Stats_bray_curtis.tsv',header=True, index=True, sep='\\t')","sub_path":"Data_Analysis/BetaDiver_boxplot.py","file_name":"BetaDiver_boxplot.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591924935","text":"\"\"\"\n988. Smallest String Starting From Leaf (Medium)\n\nGiven the root of a binary tree, each node has a value from 0 to 25 representing the letters 'a' to 'z': a value of 0 represents 'a', a value of 1 represents 'b', and so on.\n\nFind the lexicographically smallest string that starts at a leaf of this tree and ends at the root.\n\n(As a reminder, any shorter prefix of a string is lexicographically smaller: for example, \"ab\" is lexicographically smaller than \"aba\". A leaf of a node is a node that has no children.)\n\n \n\nExample 1:\n\n\n\nInput: [0,1,2,3,4,3,4]\nOutput: \"dba\"\nExample 2:\n\n\n\nInput: [25,1,3,1,3,0,2]\nOutput: \"adz\"\nExample 3:\n\n\n\nInput: [2,2,1,null,1,0,null,0]\nOutput: \"abc\"\n \n\nNote:\n\nThe number of nodes in the given tree will be between 1 and 8500.\nEach node in the tree will have a value between 0 and 25.\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def smallestFromLeaf(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: str\n \"\"\"\n self.result = None\n self.dfs(root, [])\n result = [chr(item + ord('a')) for item in self.result]\n return \"\".join(result)\n \n def dfs(self, node, prefix):\n if node is None:\n return\n prefix += [node.val]\n # print(node.val, prefix)\n if not node.left and not node.right:\n res = tuple(prefix[::-1])\n if self.result is None or res < self.result:\n self.result = tuple(res)\n if node.left:\n self.dfs(node.left, prefix)\n if node.right:\n self.dfs(node.right, prefix)\n prefix.pop()\n","sub_path":"python/leetcode/tree/988_smallest_str.py","file_name":"988_smallest_str.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646332759","text":"#######\n#\n# classify_images.py\n#\n# This is a test driver for running our species classifiers and detectors.\n# The script classifies one or more hard-coded image files.\n#\n# Because the inference code has not been assembled into a formal package yet,\n# you should define api_root to point to the base of our repo. This\n# will be added to your Python path later in the script.\n#\n# This script has two non-code dependencies:\n#\n# * a classification model file (and, optionally, a detection model model)\n# * a taxonomy file, so the scientific names used in the training data can\n# be mapped to common names.\n#\n# Note to self... this code is compatible with pytorch 1.2, so when running on a \n# CUDA 10.0 Linux VM:\n#\n# conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch\n#\n####### \n\n\n#%% Constants and imports\n\nimport sys\nimport os\nimport pandas as pd\nimport glob\n\n# Species classification API imports deferred until later, since we have to do a little\n# path management. This also implicitly defers PyTorch imports.\n\n# Directory to which you sync'd the repo. Probably the same\n# directory this file lives in, but for portability, this file is set up to only\n# take dependencies on the repo according to this constant.\napi_root = r'/home/coyote/git/speciesclassification'\nsubdirs_to_import = ['DetectionClassificationAPI','FasterRCNNDetection','PyTorchClassification'] \n\n# Path to taxa.csv, for latin --> common mapping\n#\n# Set to None to disable latin --> common mapping\ntaxonomy_path = r'/data/species_classification/taxa.19.08.28.0536.csv' # None\n\njob_name = ''\nimages_to_classify_base = None\n\n# images_to_classify can be:\n#\n# an array of filenames\n#\n# a single string; if it's a string, it's assumed to point to a .csv file, in \n# which each row is [filename,description]\n#\n# a directory, which is recursively enumerated\nif False:\n images_to_classify = [\n '/data/species_classification/coyote.jpg',\n '/data/species_classification/meerkat.jpg',\n '/data/species_classification/elephant.jpg'\n ]\n\n# Pick images from a .csv file\nif False:\n images_to_classify = '/data/species_classification/animal_list.2018.10.23.12.58.16.csv'\n images_to_classify_base = '/data/species_classification/sample_animals'\n \n# Pick images from a folder\nif False:\n images_to_classify = '/data/species_classification/elephants_and_hippos'\n \n# Pick images from a folder\nif True:\n images_to_classify = '/data/species_classification/images/sample_images.2019.12.28'\n job_name = 'sample_images.2019.12.28'\n \n# Classification results will be written here\nclassification_output_file = None\n\nmodel_base = '/data/species_classification/models'\n\n# 2019 fixed model\n# classification_model_path = os.path.join(model_base,'iNat_all_extended/demosite-model-ensemble-resnext-inceptionV4-560-83.1/iNat_all_extended_ensemble_resnext_inceptionV4_560_83.1_model.2019.12.00.pytorch')\n\n# 2019 broken model\n# classification_model_path = os.path.join(model_base,'iNat_all_extended_buggy/demosite-model-ensemble-resnext-inceptionV4-560-81.0/iNat_all_extended_ensemble_resnext_inceptionV4_560_81.9_model.2019.10.00.pytorch')\n\n# 2018 model \nclassification_model_path = os.path.join(model_base,'iNat_original/inc4-incres2-560-78.5/inc4-incres2-560-78.5.model_deploy.pth.tar')\n\nassert(os.path.isfile(classification_model_path))\n\noutput_base = '/data/species_classification/output'\nmodel_name = os.path.basename(classification_model_path)\nclassification_output_file = os.path.join(output_base,'classifications_{}_{}.csv'.format(job_name,model_name))\n \n# Detection (i.e., bounding box generation) is optional; set to None \n# to disable detection\ndetection_model_path = None\n\n# This must be True if detection is enabled. Classification can be run\n# on the CPU or GPU.\nuse_gpu = True\n\n# List of image sizes to use, one per model in the ensemble. Images will be resized \n# and reshaped to square images prior to classification. \n#\n# We typically specify [560,560] if we're loading our Inception/InceptionResnet \n# ensemble. For ResNext, we typically specify [448].\n#\nimage_sizes = [560, 560]\n# image_sizes = [448]\n\nmak_k_to_print = 3\ndebug_max_images = -1\n\n\n#%% Path setup to import the classification code\n\nif (not api_root.lower() in map(str.lower,sys.path)):\n \n print(\"Adding {} to the python path\".format(api_root))\n sys.path.insert(0,api_root)\n\nfor s in subdirs_to_import:\n if (not s.lower() in map(str.lower,sys.path)):\n import_path = os.path.join(api_root,s)\n print(\"Adding {} to the python path\".format(import_path))\n sys.path.insert(0,import_path) \n\n\n#%% Import classification modules\n\nimport api as speciesapi\n\n\n#%% Build Latin --> common mapping\n\nlatin_to_common = {}\n\nif taxonomy_path != None:\n \n print(\"Reading taxonomy file\")\n \n # Read taxonomy file; takes ~1 minute\n df = pd.read_csv(taxonomy_path)\n df = df.fillna('')\n \n # Columns are:\n #\n # taxonID,scientificName,parentNameUsageID,taxonRank,vernacularName,wikipedia_url\n \n # Create dictionary by ID\n \n nRows = df.shape[0]\n \n for index, row in df.iterrows():\n \n latin_name = row['scientificName']\n latin_name = latin_name.strip()\n if len(latin_name)==0:\n print(\"Warning: invalid scientific name at {}\".format(index))\n latin_name = 'unknown'\n common_name = row['vernacularName']\n common_name = common_name.strip()\n latin_name = latin_name.lower()\n common_name = common_name.lower()\n latin_to_common[latin_name] = common_name\n \n print(\"Finished reading taxonomy file\")\n\n\n#%% Latin-->common lookup\n\ndef do_latin_to_common(latin_name):\n\n if len(latin_to_common) == 0:\n return latin_name\n \n latin_name = latin_name.lower()\n if not latin_name in latin_to_common:\n print(\"Warning: latin name {} not in lookup table\".format(latin_name))\n common_name = latin_name\n else:\n common_name = latin_to_common[latin_name]\n common_name = common_name.strip()\n \n if (len(common_name) == 0):\n print(\"Warning: empty result for latin name {}\".format(latin_name))\n common_name = latin_name\n\n return common_name\n\n\n#%% Create the model(s)\n\nassert os.path.isfile(classification_model_path)\nif detection_model_path != None:\n assert os.path.isfile(detection_model_path)\n\nprint(\"Loading model\")\nmodel = speciesapi.DetectionClassificationAPI(classification_model_path, \n detection_model_path, image_sizes, use_gpu)\nprint(\"Finished loading model\")\n\n\n#%% Prepare the list of images and query names\n\nqueries = None\n\nif isinstance(images_to_classify,str) and os.path.isdir(images_to_classify):\n \n images = glob.glob(os.path.join(images_to_classify,'**/*.*'), recursive=True)\n images = [fn for fn in images if os.path.isfile(fn)]\n queries = [os.path.basename(os.path.dirname(fn)) for fn in images]\n print('Loaded a folder of {} images'.format(len(images))) \n \nelif isinstance(images_to_classify,str) and os.path.isfile(images_to_classify):\n \n print(\"Reading image list file\")\n df_images = pd.read_csv(images_to_classify,header=None)\n df_images.columns = ['filename','query_string']\n nImages = len(images) \n print(\"Read {} image names\".format(len(images)))\n images = list(df_images.filename)\n queries = list(df_images.query_string)\n assert(len(queries) == len(images))\n \nelse:\n \n assert isinstance(images_to_classify,list)\n images = images_to_classify\n queries = None\n print('Processing list of {} images'.format(len(images)))\n \n\n#%% Classify images\n\nnErrors = 0\nnImagesClassified = 0\nnImages = len(images)\n\nif classification_output_file is not None:\n f = open(classification_output_file,'w+')\n\n# i_fn = 1; fn = images[i_fn] \nfor i_fn,fn in enumerate(images):\n \n print(\"Processing image {} of {}\".format(i_fn,nImages))\n fn = fn.replace('\\\\','/')\n query = ''\n if queries is not None:\n query = queries[i_fn]\n \n if images_to_classify_base is not None and len(images_to_classify_base > 0):\n fn = os.path.join(images_to_classify_base,fn)\n\n # with torch.no_grad():\n # print('Clasifying image {}'.format(fn))\n # def predict_image(self, image_path, topK=1, multiCrop=False, predict_mode=PredictMode.classifyUsingDetect):\n try:\n prediction = model.predict_image(fn, topK=min(5,mak_k_to_print), multiCrop=False, \n predict_mode=speciesapi.PredictMode.classifyOnly)\n nImagesClassified = nImagesClassified + 1\n \n except Exception as e:\n print(\"Error classifying image {} ({}): {}\".format(i_fn,fn,str(e)))\n nErrors = nErrors + 1\n continue\n\n # i_prediction = 0\n for i_prediction in range(0, min(len(prediction.species),mak_k_to_print)):\n latin_name = prediction.species[i_prediction]\n likelihood = prediction.species_scores[i_prediction]\n likelihood = '{0:0.3f}'.format(likelihood)\n common_name = do_latin_to_common(latin_name)\n s = '\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"'.format(\n i_fn,fn,query,i_prediction,latin_name,common_name,likelihood)\n if classification_output_file is not None:\n f.write(s + '\\n')\n print(s)\n \n if debug_max_images > 0 and i_fn >= debug_max_images:\n break\n\n# ...for each image\n \nif classification_output_file is not None:\n f.close()\n \nprint(\"Finished classifying {} of {} images ({} errors)\".format(nImagesClassified,nImages,nErrors))\n","sub_path":"classify_images.py","file_name":"classify_images.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112980267","text":"from rest_framework import serializers\nfrom rest_framework.permissions import (\n AllowAny, IsAdminUser, IsAuthenticated\n)\n\nfrom .models import Profile\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n adress = serializers.CharField(required=False, allow_null=True, max_length=100)\n city = serializers.CharField(required=False, allow_null=True, max_length=100)\n email = serializers.EmailField(required=False)\n personal_number = serializers.IntegerField(required=False, allow_null=True, min_value=0)\n username = serializers.CharField(source='user.username')\n phone_number = serializers.CharField(required=False, allow_null=True, max_length=25)\n postal_code = serializers.IntegerField(required=False, allow_null=True, min_value=0)\n adress = serializers.CharField(required=False, allow_null=True, max_length=100)\n city = serializers.CharField(required=False, allow_null=True, max_length=100)\n\n image = serializers.SerializerMethodField()\n #contacts = ProfileManyToManyListRelation(many=True, read_only=True)\n class Meta:\n model = Profile\n fields = (\n 'id',\n 'first_name',\n 'last_name',\n 'email',\n 'phone_number',\n 'adress',\n 'city',\n 'postal_code',\n 'personal_number',\n 'username',\n 'image'\n )\n read_only_fields = ('username', )\n\n def get_image(self, obj):\n if obj.image:\n return obj.image\n\n return ''\n","sub_path":"WeBooking/apps/profiles/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176004597","text":"\nimport io,os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image,ImageEnhance,ImageChops\nimport cv2\nimport random\nimport aug_cfg\n#root_path为图像根目录,img_name为图像名字\n\ndef move(img_name,off): #平移,平移尺度为off\n img = Image.open(img_name)\n for i in off:\n region = img.crop((i, 0, img.size[0], img.size[1]))\n offset = region.convert('RGB')\n save_name='{}_off_l{}.jpg'.format(os.path.splitext(os.path.basename(img_name))[0],i)\n offset.save(os.path.join(aug_cfg.out_path, save_name))\n for i in off:\n region = img.crop((0, 0, img.size[0]-i, img.size[1]))\n offset = region.convert('RGB')\n save_name='{}_off_r{}.jpg'.format(os.path.splitext(os.path.basename(img_name))[0],i)\n offset.save(os.path.join(aug_cfg.out_path, save_name))\n for i in off:\n region = img.crop((0, i, img.size[0], img.size[1]))\n offset = region.convert('RGB')\n save_name='{}_off_u{}.jpg'.format(os.path.splitext(os.path.basename(img_name))[0],i)\n offset.save(os.path.join(aug_cfg.out_path, save_name))\n for i in off:\n region = img.crop((0, 0, img.size[0], img.size[1]-i))\n offset = region.convert('RGB')\n save_name='{}_off_d{}.jpg'.format(os.path.splitext(os.path.basename(img_name))[0],i)\n offset.save(os.path.join(aug_cfg.out_path, save_name))\n\n\ndef flip(root_path,img_name): #翻转图像\n img = Image.open(os.path.join(root_path, img_name))\n filp_img = img.transpose(Image.FLIP_LEFT_RIGHT)\n # filp_img.save(os.path.join(root_path,img_name.split('.')[0] + '_flip.jpg'))\n return filp_img\n\ndef aj_contrast(root_path,img_name): #调整对比度 两种方式 gamma/log\n image = skimage.io.imread(os.path.join(root_path, img_name))\n gam= skimage.exposure.adjust_gamma(image, 0.5)\n # skimage.io.imsave(os.path.join(root_path,img_name.split('.')[0] + '_gam.jpg'),gam)\n log= skimage.exposure.adjust_log(image)\n # skimage.io.imsave(os.path.join(root_path,img_name.split('.')[0] + '_log.jpg'),log)\n return gam,log\ndef rotation(root_path, img_name):\n img = Image.open(os.path.join(root_path, img_name))\n rotation_img = img.rotate(90) #旋转角度\n # rotation_img.save(os.path.join(root_path,img_name.split('.')[0] + '_rotation.jpg'))\n return rotation_img\n\ndef randomGaussian(root_path, img_name, mean, sigma): #高斯噪声\n image = Image.open(os.path.join(root_path, img_name))\n im = np.array(image)\n #设定高斯函数的偏移\n means = 0\n #设定高斯函数的标准差\n sigma = 25\n #r通道\n r = im[:,:,0].flatten()\n\n #g通道\n g = im[:,:,1].flatten()\n\n #b通道\n b = im[:,:,2].flatten()\n\n #计算新的像素值\n for i in range(im.shape[0]*im.shape[1]):\n\n pr = int(r[i]) + random.gauss(0,sigma)\n\n pg = int(g[i]) + random.gauss(0,sigma)\n\n pb = int(b[i]) + random.gauss(0,sigma)\n\n if(pr < 0):\n pr = 0\n if(pr > 255):\n pr = 255\n if(pg < 0):\n pg = 0\n if(pg > 255):\n pg = 255\n if(pb < 0):\n pb = 0\n if(pb > 255):\n pb = 255\n r[i] = pr\n g[i] = pg\n b[i] = pb\n im[:,:,0] = r.reshape([im.shape[0],im.shape[1]])\n\n im[:,:,1] = g.reshape([im.shape[0],im.shape[1]])\n\n im[:,:,2] = b.reshape([im.shape[0],im.shape[1]])\n gaussian_image = gaussian_image = Image.fromarray(np.uint8(im))\n return gaussian_image\ndef randomColor(img_name,num): #随机颜色\n \"\"\"\n 对图像进行颜色抖动\n :param image: PIL的图像image\n :return: 有颜色色差的图像image\n \"\"\"\n for i in range(num):\n image = Image.open( img_name)\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度\n random_factor = np.random.randint(10, 21) / 10. # 随机因子\n brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度\n random_factor = np.random.randint(10, 21) / 10. # 随机因子\n contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n res_image=ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度\n res_image = res_image.convert('RGB')\n save_name = '{}_ran_{}.jpg'.format(os.path.splitext(os.path.basename(img_name))[0], i)\n res_image.save(os.path.join(aug_cfg.out_path, save_name))","sub_path":"data_augmentation/aug_tool.py","file_name":"aug_tool.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637831049","text":"'''\n101. Symmetric Tree\n\nGiven a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).\n\nFor example, this binary tree [1,2,2,3,4,4,3] is symmetric:\n\n 1\n / \\\n 2 2\n / \\ / \\\n3 4 4 3\nBut the following [1,2,2,null,3,null,3] is not:\n 1\n / \\\n 2 2\n \\ \\\n 3 3\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isSymmetric(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n# Best solution:\n# if not root:\n# return True\n# else:\n# return self.recIsSym(root.left, root.right)\n \n# def recIsSym(self, left, right):\n# if not left or not right:\n# if left == right:\n# return True\n# else:\n# return False\n# if left.val == right.val:\n# return self.recIsSym(left.left, right.right) and self.recIsSym(left.right, right.left)\n# else:\n# return False\n \n \n if root == None:\n return True\n if (not root.left) and (not root.right):\n return True\n elif (not (root.left and root.right)) or root.left.val != root.right.val:\n return False\n else:\n o, i = TreeNode(root.left.val), TreeNode(root.right.val)\n o.left, o.right = root.left.left, root.right.right\n i.left, i.right = root.right.left, root.left.right\n return self.isSymmetric(o) and self.isSymmetric(i)\n \n \n","sub_path":"SymmetricTree.py","file_name":"SymmetricTree.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290007390","text":"from Tests.models import Student, Question, StudentAndQuestion\nfrom django.core.signals import request_finished\nfrom django.dispatch import receiver\n\n\n@receiver(request_finished)\ndef add_question_to_students(sender, instance=None, created=None, *args, **kwargs):\n question = instance\n if created:\n StudentAndQuestion.objects.bulk_create([\n StudentAndQuestion(question=question, student_id=student_id)\n for student_id in Student.objects.values_list(\"id\", flat=True)\n ])\n\n\n@receiver(request_finished)\ndef add_student_to_questions(sender, instance=None, created=None, *args, **kwargs):\n student = instance\n if created:\n StudentAndQuestion.objects.bulk_create([\n StudentAndQuestion(question=question, student=student)\n for question in Question.objects.all()\n ])\n\n","sub_path":"Tests/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42576680","text":"def get_existing(self):\n ' Get existing configuration '\n self.cli_get_stp_config()\n if (self.interface and (self.interface != 'all')):\n self.cli_get_interface_stp_config()\n if self.stp_mode:\n if ('stp mode stp' in self.stp_cfg):\n self.cur_cfg['stp_mode'] = 'stp'\n self.existing['stp_mode'] = 'stp'\n elif ('stp mode rstp' in self.stp_cfg):\n self.cur_cfg['stp_mode'] = 'rstp'\n self.existing['stp_mode'] = 'rstp'\n else:\n self.cur_cfg['stp_mode'] = 'mstp'\n self.existing['stp_mode'] = 'mstp'\n if self.stp_enable:\n if ('stp disable' in self.stp_cfg):\n self.cur_cfg['stp_enable'] = 'disable'\n self.existing['stp_enable'] = 'disable'\n else:\n self.cur_cfg['stp_enable'] = 'enable'\n self.existing['stp_enable'] = 'enable'\n if self.stp_converge:\n if ('stp converge fast' in self.stp_cfg):\n self.cur_cfg['stp_converge'] = 'fast'\n self.existing['stp_converge'] = 'fast'\n else:\n self.cur_cfg['stp_converge'] = 'normal'\n self.existing['stp_converge'] = 'normal'\n if self.edged_port:\n if (self.interface == 'all'):\n if ('stp edged-port default' in self.stp_cfg):\n self.cur_cfg['edged_port'] = 'enable'\n self.existing['edged_port'] = 'enable'\n else:\n self.cur_cfg['edged_port'] = 'disable'\n self.existing['edged_port'] = 'disable'\n elif ('stp edged-port enable' in self.interface_stp_cfg):\n self.cur_cfg['edged_port'] = 'enable'\n self.existing['edged_port'] = 'enable'\n else:\n self.cur_cfg['edged_port'] = 'disable'\n self.existing['edged_port'] = 'disable'\n if self.bpdu_filter:\n if (self.interface == 'all'):\n if ('stp bpdu-filter default' in self.stp_cfg):\n self.cur_cfg['bpdu_filter'] = 'enable'\n self.existing['bpdu_filter'] = 'enable'\n else:\n self.cur_cfg['bpdu_filter'] = 'disable'\n self.existing['bpdu_filter'] = 'disable'\n elif ('stp bpdu-filter enable' in self.interface_stp_cfg):\n self.cur_cfg['bpdu_filter'] = 'enable'\n self.existing['bpdu_filter'] = 'enable'\n else:\n self.cur_cfg['bpdu_filter'] = 'disable'\n self.existing['bpdu_filter'] = 'disable'\n if self.bpdu_protection:\n if ('stp bpdu-protection' in self.stp_cfg):\n self.cur_cfg['bpdu_protection'] = 'enable'\n self.existing['bpdu_protection'] = 'enable'\n else:\n self.cur_cfg['bpdu_protection'] = 'disable'\n self.existing['bpdu_protection'] = 'disable'\n if self.tc_protection:\n pre_cfg = self.stp_cfg.split('\\n')\n if ('stp tc-protection' in pre_cfg):\n self.cur_cfg['tc_protection'] = 'enable'\n self.existing['tc_protection'] = 'enable'\n else:\n self.cur_cfg['tc_protection'] = 'disable'\n self.existing['tc_protection'] = 'disable'\n if self.tc_protection_interval:\n if ('stp tc-protection interval' in self.stp_cfg):\n tmp_value = re.findall('stp tc-protection interval (.*)', self.stp_cfg)\n if (not tmp_value):\n self.module.fail_json(msg='Error: Can not find tc-protection interval on the device.')\n self.cur_cfg['tc_protection_interval'] = tmp_value[0]\n self.existing['tc_protection_interval'] = tmp_value[0]\n else:\n self.cur_cfg['tc_protection_interval'] = 'null'\n self.existing['tc_protection_interval'] = 'null'\n if self.tc_protection_threshold:\n if ('stp tc-protection threshold' in self.stp_cfg):\n tmp_value = re.findall('stp tc-protection threshold (.*)', self.stp_cfg)\n if (not tmp_value):\n self.module.fail_json(msg='Error: Can not find tc-protection threshold on the device.')\n self.cur_cfg['tc_protection_threshold'] = tmp_value[0]\n self.existing['tc_protection_threshold'] = tmp_value[0]\n else:\n self.cur_cfg['tc_protection_threshold'] = '1'\n self.existing['tc_protection_threshold'] = '1'\n if self.cost:\n tmp_value = re.findall('stp instance (.*) cost (.*)', self.interface_stp_cfg)\n if (not tmp_value):\n self.cur_cfg['cost'] = 'null'\n self.existing['cost'] = 'null'\n else:\n self.cur_cfg['cost'] = tmp_value[0][1]\n self.existing['cost'] = tmp_value[0][1]\n if (self.root_protection or self.loop_protection):\n if ('stp root-protection' in self.interface_stp_cfg):\n self.cur_cfg['root_protection'] = 'enable'\n self.existing['root_protection'] = 'enable'\n else:\n self.cur_cfg['root_protection'] = 'disable'\n self.existing['root_protection'] = 'disable'\n if ('stp loop-protection' in self.interface_stp_cfg):\n self.cur_cfg['loop_protection'] = 'enable'\n self.existing['loop_protection'] = 'enable'\n else:\n self.cur_cfg['loop_protection'] = 'disable'\n self.existing['loop_protection'] = 'disable'","sub_path":"Data Set/bug-fixing-5/c3d1f9b08549f12d3a0db52e2d9eb8a2f3f6c39d--fix.py","file_name":"c3d1f9b08549f12d3a0db52e2d9eb8a2f3f6c39d--fix.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461623025","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login as auth_login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import Group\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.shortcuts import render\nfrom .forms import ContactForm, ReviewForm, CheckOutForm, CourseView\nfrom django.contrib.auth.models import Group\nfrom .login_form import LoginForm, UserLoginForm\nfrom .models import Course, CourseReview, Course_Category, CourseInstructor\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, HttpResponseRedirect\nfrom django.conf import settings\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, \"learning/index.html\")\n\n\ndef khoahoc(request):\n return render(request, \"learning/khoahoc.html\")\n\n\ndef payment_successful(request):\n return render(request, \"learning/payment_succesful.html\")\n\n\n\n\ndef tintuc(request):\n return render(request, \"learning/tintuc.html\")\n\n\ndef test(request):\n return render(request, \"learning/test.html\")\n\n\n\ndef UserRegister(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n group = Group.objects.get(name='NormalCustomer')\n user.groups.add(group)\n messages.success(request, 'Tạo tài khoản thành công')\n return redirect('/accounts/login')\n else:\n form = UserCreationForm()\n return render(request, 'registration/register.html', {'form': form})\n\n\ndef InstructorLoginView(request):\n error = ''\n form = UserLoginForm(request.POST)\n if request.method == 'POST':\n\n if form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n auth_login(request, user)\n return HttpResponseRedirect(\"/contact\",\n settings.LOGIN_REDIRECT_URL)\n else:\n error = 'Tài khoản hoặc mật khẩu sai!'\n form = UserLoginForm()\n return render(request, \"learning/instructor_login.html\", {\"form\" : form, \"error\" : error})\n\n\n'''\ndef flight(request, flight_id):\n try:\n flight = Course_Category.objects.get(pk=flight_id)\n except Course_Category.DoesNotExist:\n raise Http404(\"Flight does not exit.\")\n context = {\n \"flight\": flight,\n \"passengers\": flight.passengers.all(),\n\n }\n return render(request, \"/learning\", context)\n'''\n'''\n\n'''\n\n\ndef khoahoc(request):\n course = Course.objects.all()\n return render(request, \"learning/khoahoc.html\", {\"course\": course})\n\n\n\ndef index(request):\n context = {\n \"courses\": Course.objects.all(),\n }\n return render(request, \"learning/index.html\", context)\n\n\ndef course_overview(request, course_id):\n comment = CourseReview.objects.filter(course_id=course_id).order_by('-date_created')\n form = ReviewForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Gửi thành công !!!')\n\n try:\n course = Course.objects.get(pk=course_id)\n except Course.DoesNotExist:\n raise Http404(\"Khóa học không tồn tại.\")\n context = {\n \"course\": course,\n \"form\": form,\n \"comment\": comment,\n\n }\n return render(request, \"learning/course_overview.html\", context)\n\n\ndef checkout(request, course_id):\n form = CheckOutForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Lưu hóa đơn thành công !!!')\n return redirect('/payment_succesful')\n try:\n course = Course.objects.get(pk=course_id)\n except Course.DoesNotExist:\n raise Http404(\"Khóa học không tồn tại.\")\n context = {\n \"course\": course,\n \"form\" : form\n }\n return render(request, \"learning/checkout.html\", context)\n\n\n\ndef ContactView(request):\n form = ContactForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Gửi thành công !!!')\n else:\n error = 'Thông tin nhập không hợp lệ!'\n form = ContactForm()\n context = {'form': form}\n\n return render(request, 'learning/contact.html', context)\n\n\ndef nganh_hoc(request, course_cate_id):\n courses = Course.objects.filter(course_cate_id=course_cate_id)\n temp = Course_Category.objects.get(course_cate_id = course_cate_id)\n return render(request, \"learning/nganh_hoc.html\", {\"courses\": courses, \"nganhhoc\" : temp})\n\ndef instructor(request):\n ins = CourseInstructor.objects.all()\n context = {\n \"ins\" : ins\n }\n return render(request, \"learning/instructor.html\", context)\n\n\ndef add_new_course(request):\n message = ''\n form = CourseView(request.POST)\n if form.is_valid():\n form.save()\n message = 'Lưu khóa học thành công !!!'\n return redirect('/ins_index')\n course = Course.objects.all()\n context = {\n \"course\": course,\n \"form\" : form,\n \"message\" :message\n }\n return render(request, \"learning/ins_add_course.html\", context)\n\n\ndef ins_index(request):\n\n course = Course.objects.all()\n #usr = User.objects.filter(username=username).filter(groups__name='Instructor')\n #if usr.exists():\n #mess = True\n #else:\n #mess = False\n context = {\n \"course\" : course\n }\n return render(request, \"learning/instructor_index.html\", context)\n\n\ndef delete_view(request, course_id):\n context = {}\n # fetch the object related to passed id\n obj = get_object_or_404(Course, course_id=course_id)\n if request.method == \"POST\":\n obj.delete()\n return HttpResponseRedirect(\"/ins_index\")\n\n return render(request, \"learning/delete_view.html\", context)\n\n\ndef update_view(request, course_id):\n\n context = {}\n\n # fetch the object related to passed id\n obj = get_object_or_404(Course, course_id = course_id)\n\n # pass the object as instance in form\n form = CourseView(request.POST or None, instance=obj)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\"/ins_index\")\n\n context[\"form\"] = form\n\n return render(request, \"learning/update_view.html\", context)\n\ndef InstructorRegister(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n group = Group.objects.get(name='Instructor')\n user.groups.add(group)\n messages.success(request, 'Tạo tài khoản giảng viên thành công')\n return redirect('/accounts/login')\n else:\n form = UserCreationForm()\n return render(request, 'learning/instructor_register.html', {'form': form})\n\n","sub_path":"webside/learning/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"48960531","text":"#\r\n# [812] Rotate String\r\n#\r\n# https://leetcode.com/problems/rotate-string/description/\r\n#\r\n# algorithms\r\n# Easy (49.15%)\r\n# Total Accepted: 22.5K\r\n# Total Submissions: 45.9K\r\n# Testcase Example: '\"abcde\"\\n\"cdeab\"'\r\n#\r\n# We are given two strings, A and B.\r\n#\r\n# A shift on A consists of taking string A and moving the leftmost character to\r\n# the rightmost position. For example, if A = 'abcde', then it will be 'bcdea'\r\n# after one shift on A. Return True if and only if A can become B after some\r\n# number of shifts on A.\r\n#\r\n#\r\n# Example 1:\r\n# Input: A = 'abcde', B = 'cdeab'\r\n# Output: true\r\n#\r\n# Example 2:\r\n# Input: A = 'abcde', B = 'abced'\r\n# Output: false\r\n#\r\n#\r\n# Note:\r\n#\r\n#\r\n# A and B will have length at most 100.\r\n#\r\n#\r\n#\r\nclass Solution:\r\n def rotateString(self, A, B):\r\n \"\"\"\r\n :type A: str\r\n :type B: str\r\n :rtype: bool\r\n \"\"\"\r\n if not A:\r\n return not B\r\n if not B:\r\n return not A\r\n if len(A) != len(B):\r\n return False\r\n for shift in range(0, len(A)):\r\n # use slice to shift\r\n newA = A[shift:] + A[0:shift]\r\n if newA == B:\r\n return True\r\n return False\r\n\r\n\r\ndef main():\r\n print(Solution().rotateString('abcde', B='cdeab'))\r\n print(Solution().rotateString(A='abcde', B='abced'))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Easy/796.rotate-string.python3.py","file_name":"796.rotate-string.python3.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364914972","text":"import os\n\ndef find_files(suffix, path):\n \"\"\"\n Find all files beneath path with file name suffix.\n\n Note that a path may contain further subdirectories\n and those subdirectories may also contain further subdirectories.\n\n There are no limit to the depth of the subdirectories can be.\n\n Args:\n suffix(str): suffix if the file name to be found\n path(str): path of the file system\n\n Returns:\n a list of pathswhats\n \"\"\"\n\n list_of_paths = []\n try:\n path_objects = os.listdir(path)\n except:\n print(\"The path you have selected is not valid / there is no file in your folder\")\n return None\n for object in path_objects:\n if object.endswith(suffix) and os.path.isfile(os.path.join(path, object)):\n list_of_paths.append(os.path.join(path, object))\n else:\n try:\n if len(find_files(suffix,os.path.join(path, object))) > 0:\n list_of_paths += find_files(suffix,os.path.join(path, object))\n except:\n continue\n\n return list_of_paths\n","sub_path":"Python/1. Data structures/2.find_file_suffixes.py","file_name":"2.find_file_suffixes.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465327871","text":"import socket\nimport os\nfrom threading import Thread\n\ntcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntcp_sock.bind( ('0.0.0.0', 6667) )\ntcp_sock.listen(100)\n\ndef handle_thread(conn) :\n while True:\n try :\n perintah = conn.recv(100)\n nama = conn.recv(100)\n\n perintah = perintah.decode('ascii')\n nama = nama.decode('ascii')\n if perintah==\"new\":\n file = open(nama,\"w\") \n kirim=\"OK File \"+nama+\" Berhasil Dibuat\\n\"\n conn.send(kirim.encode('ascii'))\n print(\"=>File \"+nama+\" Berhasil Dibuat\\n\")\n elif perintah==\"del\" :\n os.remove(nama)\n kirim=\"OK File \"+nama+\" Berhasil Dihapus\\n\"\n conn.send(kirim.encode('ascii'))\n print(\"=>File \"+nama+\" Berhasil Dihapus\\n\")\n elif perintah==\"read\":\n f = open(nama,\"r\")\n kirim=f.read()\n conn.send(kirim.encode('ascii'))\n elif perintah==\"exit\":\n break\n conn.close()\n else :\n print(\"\\n\") \n except(socket.error) :\n conn.close()\n print(\"Disconnected\")\n break\n\nwhile True :\n conn, client_address = tcp_sock.accept()\n t=Thread(target=handle_thread, args=(conn,))\n t.start()\n","sub_path":"TCP(Thread, Select, Single)/File Management(With Thread)/Immanuel Sibarani_155150201111323/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"531080897","text":"\"\"\"\nv1 ip_tools resource\n\"\"\"\n\nimport socket\nimport struct\nfrom zunzuncito import tools\n\n\nclass APIResource(object):\n\n @tools.allow_methods('get')\n def dispatch(self, request, response):\n request.log.debug(tools.log_json({\n 'API': request.version,\n 'Method': request.method,\n 'URI': request.URI,\n 'vroot': request.vroot\n }, True))\n\n data = {}\n try:\n my_ip = True if request.path[0] == 'ip' else False\n except Exception:\n my_ip = False\n\n if my_ip:\n ip = request.environ.get('REMOTE_ADDR', 0)\n data['ip'] = ip\n data['inet_aton'] = struct.unpack(\"!I\", socket.inet_aton(ip))[0]\n else:\n data['API'] = request.version\n data['ip'] = request.environ.get('REMOTE_ADDR', 0)\n data['URI'] = request.URI\n data['method'] = request.method\n data['city'] = request.environ.get('HTTP_X_APPENGINE_CITY', 0)\n data['latlong'] = request.environ.get(\n 'HTTP_X_APPENGINE_CITYLATLONG', 0)\n data['country'] = request.environ.get(\n 'HTTP_X_APPENGINE_COUNTRY', 0)\n\n return tools.log_json(data, 4)\n","sub_path":"my_api/default/v1/zun_ip_tools/zun_ip_tools.py","file_name":"zun_ip_tools.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"509836783","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/rbtools/utils/repository.py\n# Compiled at: 2020-04-14 20:27:46\nfrom __future__ import unicode_literals\n\ndef get_repository_id(repository_info, api_root, repository_name=None):\n \"\"\"Get the repository ID from the server.\n\n This will compare the paths returned by the SCM client\n with those on the server, and return the id of the first\n match.\n \"\"\"\n detected_paths = repository_info.path\n if not isinstance(detected_paths, list):\n detected_paths = [\n detected_paths]\n repositories = api_root.get_repositories(only_fields=b'id,name,mirror_path,path', only_links=b'')\n for repo in repositories.all_items:\n if repo.name == repository_name or repo.path in detected_paths or getattr(repo, b'mirror_path', None) in detected_paths:\n return repo.id\n\n return","sub_path":"pycfiles/RBTools-1.0.3-py2.7/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594124131","text":"'''\r\nНабор общих юнитов\r\n@author: Kosh\r\n'''\r\nfrom streampy.units.base.pooled import Pool, Worker as Base\r\nfrom time import sleep\r\nfrom copy import deepcopy\r\n \r\nclass Worker(Base):\r\n '''\r\n Сортируем данные по ключу, чтобы редюс работал лучше \r\n '''\r\n\r\n def run(self):\r\n '''\r\n Нагребаем данных полный буфер или по таймауту \r\n его сортируем и отдаем дальше\r\n Хитрим отдавая половину буфера, тогда есть шанс \r\n что остальные опоздавшие подобьют его малым \r\n значением а не смешают все карты\r\n '''\r\n \r\n dataBuffer = []\r\n \r\n while True:\r\n inKey = 'emit'\r\n outKey = 'emit'\r\n \r\n timeout = self.config.get('timeout', 1.0)\r\n size = self.config.get('size', 1000)\r\n \r\n \r\n try:\r\n while len(dataBuffer) < size :\r\n dataBuffer.append(self.ins[inKey].get(block=True, \r\n timeout=timeout))\r\n# print('dataBuffer', len(dataBuffer))\r\n except:\r\n# print('timeout')\r\n pass\r\n \r\n# print(dataBuffer)\r\n# # сорируем и отрезаем половину\r\n\r\n packages = sorted(dataBuffer, key=lambda item:item['meta']['key'])\r\n \r\n del dataBuffer\r\n dataBuffer = []\r\n# print(packages)\r\n# sleep(10)\r\n\r\n for package in packages:\r\n if outKey in self.outs:\r\n for queue in self.outs[outKey]:\r\n queue.put(package)\r\n \r\n del packages\r\n ","sub_path":"streampy/units/mapReduce/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63498078","text":"import json\n\nimport requests\nfrom flakon import SwaggerBlueprint\nfrom flask import render_template, request\nfrom flask_login import current_user, logout_user, login_required, login_user\n\nfrom APIGateway.classes.User import User\nfrom APIGateway.forms import UserForm, LoginForm\nfrom APIGateway.urls import *\n\nauthapi = SwaggerBlueprint('gateway', '__name__', swagger_spec=os.path.join(YML_PATH, 'auth-api.yaml'))\n\n\n# Renders the Home page (index.html).\n# It renders different data, depending whether an user is logged or not.\n@authapi.operation('home')\ndef _home():\n # Stories is an empty list, so it can be iterated in the HTML\n stories = []\n\n # If there's a logged user, we get his stories\n if current_user is not None and hasattr(current_user, 'id'):\n s = requests.get(STORY_URL + '/stories/users/{}'.format(current_user.id))\n\n if check_service_up(s):\n if s.status_code < 300:\n stories = s.json()\n\n return render_template(\"index.html\", stories=stories, home_url=GATEWAY_URL)\n\n\n# Renders the Register page (create_user.html)\n@authapi.operation('getRegisterPage')\ndef _get_reg():\n form = UserForm()\n return render_template(\"create_user.html\", form=form, home_url=GATEWAY_URL)\n\n\n# The operation to register a new user into the service.\n@authapi.operation('register')\ndef _register():\n form = UserForm()\n\n # Simple checks on the input datas\n if form.validate_on_submit():\n data = ({\"firstname\": form.data['firstname'],\n \"lastname\": form.data['lastname'],\n \"password\": form.data['password'],\n \"email\": form.data['email'],\n \"dateofbirth\": str(form.data['dateofbirth'])})\n try:\n x = requests.post(USER_URL + '/users/create', data=json.dumps(data))\n except requests.exceptions.ConnectionError:\n return service_not_up()\n\n if check_service_up(x):\n body = x.json()\n\n # If everything's fine, redirect to a list of all the registered users\n if x.status_code < 300:\n return redirect(url_for(\"users._get_all_users\"))\n # Else we flash the message the microservice returned\n else:\n flash(body['description'], 'error')\n\n # If we get here, the form wasn't valid so just update the page\n return render_template(\"create_user.html\", form=form, home_url=GATEWAY_URL)\n\n\n# Renders the Login page (login.html)\n@authapi.operation('getLoginPage')\ndef _get_log():\n form = LoginForm()\n return render_template('login.html', form=form, home_url=GATEWAY_URL)\n\n\n# The operation to login an already registered user into the service.\n@authapi.operation('login')\ndef _login():\n form = request.form\n\n # Get input data, then send it to the Users microservice\n data = ({\"email\": form['email'],\n \"password\": form['password']})\n try:\n x = requests.post(USER_URL + '/users/login', data=json.dumps(data))\n except requests.exceptions.ConnectionError:\n return service_not_up()\n\n if check_service_up(x):\n body = x.json()\n\n # If the email and password were correct\n if x.status_code < 300:\n\n # flask_login requires an instance of a class User, then redirect to the Home\n user = User(body['id'], body['firstname'], body['lastname'], body['email'])\n login_user(user)\n return redirect(url_for('gateway._home'))\n\n # Else flash the returned error and refresh the login page to retry the login\n else:\n flash(body['description'], 'error')\n return redirect(url_for('gateway._get_log'))\n else:\n return redirect(url_for('gateway._get_log'))\n\n\n# The operation to log out of the service a logged user\n@authapi.operation('logout')\n@login_required\ndef _logout():\n logout_user()\n return redirect(url_for(\"gateway._home\"))\n\n\n@authapi.operation('getSearchPage')\ndef _get_search():\n return render_template('search.html', home_url=GATEWAY_URL)\n\n\n@authapi.operation('search')\ndef _search():\n form = request.form\n query = form['query']\n users_data = []\n stories_data = []\n\n try:\n # Search in users\n users_req = requests.get(USER_URL + '/search?query=' + query)\n if users_req.status_code != 204 and users_req.status_code < 300:\n users_data = users_req.json()\n # Search in stories\n stories_req = requests.get(STORY_URL + '/search?query=' + query)\n if stories_req.status_code != 204 and stories_req.status_code < 300:\n stories_data = stories_req.json()\n except requests.exceptions.ConnectionError:\n return service_not_up()\n\n ok_response = [200, 204]\n\n # Check if both are succesfull\n if users_req.status_code == 204 and stories_req.status_code == 204:\n flash(\"No match for the searched string\", 'error')\n context_vars = {\"list_of_users\": users_data, \"list_of_stories\": stories_data,\n \"home_url\": GATEWAY_URL}\n return render_template(\"search.html\", **context_vars)\n elif users_req.status_code not in ok_response or stories_req.status_code not in ok_response:\n flash(stories_data['description'], 'error')\n return redirect(url_for('gateway._search'), 304)\n else:\n context_vars = {\"list_of_users\": users_data, \"list_of_stories\": stories_data,\n \"home_url\": GATEWAY_URL}\n print(users_data, stories_data)\n return render_template(\"search.html\", **context_vars)\n","sub_path":"Homework3/dwr-api-gateway/APIGateway/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576187585","text":"from django.contrib import admin\nfrom mptt.admin import DraggableMPTTAdmin\nfrom . models import (\n #AbstractListing,\n #\n Demand,\n Powertrain,\n Site,\n #\n Plant,\n Scenario,\n #\n Clutch,\n Engine,\n Pump,\n #\n Pipe,\n Segment,\n Feeder,\n #\n Borehole,\n #\n Donor,\n #\n Funding,\n\n\n)\n\nfrom pprint import pprint\n\n\ndef data_required(modeladmin, request, queryset):\n for obj in queryset:\n obj.data_required()\ndata_required.short_description = '@precondition decorator: data required are present'\n\n\nclass AbstractListingAdmin(admin.ModelAdmin):\n list_display = [\n 'name',\n 'description',\n ]\n\n\nclass AbstractAlternativeAdmin(DraggableMPTTAdmin):\n list_display = [\n 'tree_actions',\n 'indented_title',\n 'alternative_genesis'\n ]\n list_display_links = [\n 'indented_title',\n ]\n list_filter = [\n 'alternative_genesis'\n ]\n\n\nclass ClutchAdmin(AbstractListingAdmin):\n list_display = AbstractListingAdmin.list_display + [\n 'diameter',\n ]\n list_editable = [\n 'diameter',\n ]\n\n\nclass EngineAdmin(AbstractListingAdmin):\n list_display = AbstractListingAdmin.list_display + [\n 'standard_clutch',\n 'standard_driving_pulley_diameter',\n ]\n\n def standard_driving_pulley_diameter(self, obj):\n if obj.standard_clutch:\n return obj.standard_clutch.diameter\n standard_driving_pulley_diameter.short_description = 'Driven pulley diam'\n\n\nclass DemandAdmin(AbstractAlternativeAdmin):\n list_display = AbstractAlternativeAdmin.list_display + [\n 'inherited_outcome',\n 'design_years',\n 'growth_rate',\n 'resident_population',\n ]\n\n\nclass PowertrainAdmin(AbstractAlternativeAdmin):\n list_display = AbstractAlternativeAdmin.list_display + [\n 'engine',\n 'pump',\n 'clutch_pulley_size',\n 'driven_pulley_size',\n ]\n\n\nclass DonorAdmin(AbstractListingAdmin):\n pass\n\n\nclass FundingInline(admin.TabularInline):\n model = Funding\n\n\nclass SegmentInline(admin.TabularInline):\n model = Segment\n\n\nclass FeederAdmin(admin.ModelAdmin):\n inlines = [SegmentInline,]\n list_display = [\n 'plant',\n '__str__',\n 'upload_land_survey',\n ]\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Makes Plant field read only after saving the object for the first time.\n \"\"\"\n if obj: # editing an existing object\n return self.readonly_fields + ('plant',)\n else:\n return self.readonly_fields\n\n\nclass PlantAdmin(AbstractListingAdmin):\n list_display = AbstractListingAdmin.list_display + [\n #'',\n ]\n inlines = [\n FundingInline,\n ]\n\n\nclass ScenarioAdmin(AbstractListingAdmin):\n list_display = AbstractListingAdmin.list_display + [\n 'plant',\n 'demand',\n 'borehole',\n 'powertrain',\n ]\n actions = [\n data_required,\n ]\n list_editable = [\n 'demand',\n 'powertrain',\n ]\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Makes Plant field read only after saving the object for the first time.\n \"\"\"\n if obj: # editing an existing object\n return self.readonly_fields + ('plant',)\n else:\n return self.readonly_fields\n\n\nclass SiteAdmin(admin.ModelAdmin):\n list_display = [\n 'elevation',\n 'is_humid',\n ]\n\n\nclass BoreholeAdmin(AbstractListingAdmin, SiteAdmin):\n list_display = AbstractListingAdmin.list_display + \\\n SiteAdmin.list_display + [\n 'swl',\n ]\n\n\n\nclass ProvaAdmin(admin.ModelAdmin):\n pass\n\n\nadmin.site.register(Demand, DemandAdmin)\nadmin.site.register(Powertrain, PowertrainAdmin)\nadmin.site.register(Site, SiteAdmin)\n\nadmin.site.register(Clutch, ClutchAdmin)\nadmin.site.register(Engine, EngineAdmin)\nadmin.site.register(Pump)\n\nadmin.site.register(Pipe)\nadmin.site.register(Segment)\nadmin.site.register(Feeder, FeederAdmin)\n\nadmin.site.register(Borehole, BoreholeAdmin)\n\nadmin.site.register(Scenario, ScenarioAdmin)\nadmin.site.register(Plant, PlantAdmin)\n\nadmin.site.register(Donor, DonorAdmin)\n#admin.site.register(Funding, FundingAdmin)\n\n\n","sub_path":"albero/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336281567","text":"from tkinter import *\nfrom PIL import Image, ImageTk\nfrom tkinter import ttk, messagebox\nimport sqlite3\n\n\nclass CategoryClass:\n def __init__(self, root):\n self.root = root\n self.root.geometry('1065x500+200+110')\n self.root.title('Inventory Management System')\n self.root.config(bg='white')\n self.root.focus_force()\n root.resizable(False, False)\n # variable\n self.var_catid=StringVar()\n self.var_name=StringVar()\n # title\n title = Label(self.root, text='Manage Product Category', font=('times of roman', 25, 'bold',), bg='#F7DC6F',\n bd=3, relief=RIDGE,\n fg='black')\n title.pack(side=TOP, fill=X, padx=15, pady=10)\n lbl_name = Label(self.root, text='Enter Category Name', font=('times of roman', 20, 'bold',), bg='white',\n fg='black')\n lbl_name.place(x=40, y=70)\n\n en_name = Entry(self.root, textvariable=self.var_name, font=('times of roman', 15, 'bold',), bg='lightyellow',\n fg='black')\n en_name.place(x=340, y=70,width=310)\n button_add = Button(self.root, text='Add', cursor='hand2', command=self.add,font=('gouldy old style', 15),\n bg='#F0B27A')\n button_add.place(x=360, y=120, width=120, height=28)\n button_delete = Button(self.root, text='Delete', cursor='hand2',command=self.delete, font=('gouldy old style', 15),\n bg='#F0B27c')\n button_delete.place(x=510, y=120, width=120, height=28)\n\n #Tree view or category details\n\n cat_frame = Frame(self.root, bd=2, relief=RIDGE)\n cat_frame.place(x=685,y=70,relwidth=0.35, height=100)\n\n scrolly = Scrollbar(cat_frame, orient=VERTICAL)\n scrollx = Scrollbar(cat_frame, orient=HORIZONTAL)\n self.cat_table = ttk.Treeview(cat_frame, columns=(\n 'cid', 'name'),yscrollcommand=scrolly.set, xscrollcommand=scrollx.set)\n scrollx.pack(side=BOTTOM, fill=X)\n scrolly.pack(side=RIGHT, fill=Y)\n scrollx.config(command=self.cat_table.xview)\n scrolly.config(command=self.cat_table.yview)\n\n self.cat_table.heading('cid', text='Category ID')\n self.cat_table.heading('name', text='Name')\n\n\n self.cat_table['show'] = 'headings'\n\n self.cat_table.column('cid', width=90)\n self.cat_table.column('name',width=90)\n\n self.cat_table.pack(fill=BOTH, expand=1)\n self.cat_table.bind('', self.getdata)\n self.show()\n #images\n self.icon1 = Image.open('images/cat.jpg')\n self.icon1 = self.icon1.resize((500, 280), Image.ANTIALIAS)\n self.icon1 = ImageTk.PhotoImage(self.icon1)\n self.lab_icon1=Label(self.root,image=self.icon1,bd=2,relief=RAISED)\n self.lab_icon1.place(x=20,y=190)\n\n self.icon2 = Image.open('images/category.jpg')\n self.icon2 = self.icon2.resize((500, 280), Image.ANTIALIAS)\n self.icon2 = ImageTk.PhotoImage(self.icon2)\n self.lab_icon2 = Label(self.root, image=self.icon2, bd=2, relief=RAISED)\n self.lab_icon2.place(x=540, y=190)\n #function of database\n def add(self):\n con = sqlite3.connect(database=r'ims.db')\n cur = con.cursor()\n try:\n if self.var_name.get() == '':\n messagebox.showerror('Error', \"Category Name can't be blank\", parent=self.root)\n else:\n cur.execute(\"Select * from category where name=?\",(self.var_name.get(),))\n row = cur.fetchone()\n if row != None:\n messagebox.showerror(\"Error\", 'Category already exist', parent=self.root)\n else:\n cur.execute(\"Insert into category( name) values(?)\",(self.var_name.get(),))\n con.commit()\n messagebox.showinfo(\"Sucess\",'category added Successfully', parent=self.root)\n self.show()\n except Exception as ex:\n messagebox.showerror('Error', f\"Error due to:{str(ex)}\")\n\n def delete(self):\n con = sqlite3.connect(database=r'ims.db')\n cur = con.cursor()\n try:\n if self.var_catid.get() == '':\n messagebox.showerror('Error', \"Category Name can't be blank\", parent=self.root)\n else:\n cur.execute(\"Select * from category where name=?\", (self.var_catid.get(),))\n row = cur.fetchone()\n if row == None:\n messagebox.showerror(\"Error\", 'Name not exist', parent=self.root)\n else:\n op = messagebox.askyesno('Confirm', 'Do you want to delete', parent=self.root)\n if op == True:\n cur.execute(\"delete from category where name=?\",(self.var_catid.get(),))\n con.commit()\n messagebox.showinfo('Delete', 'Delete Successfully', parent=self.root)\n self.show()\n except Exception as ex:\n messagebox.showerror('Error', f\"Error due to:{str(ex)}\")\n\n def show(self):\n con = sqlite3.connect(database=r'ims.db')\n cur = con.cursor()\n try:\n cur.execute('Select * from category')\n rows = cur.fetchall()\n self.cat_table.delete(*self.cat_table.get_children())\n for row in rows:\n self.cat_table.insert(\"\", END, values=row)\n except Exception as ex:\n messagebox.showerror('Error', f\"Error due to:{str(ex)}\")\n\n def getdata(self, ev):\n f = self.cat_table.focus()\n content = (self.cat_table.item(f))\n row = content['values']\n self.var_catid.set(row[1])\n self.var_name.set(row[1])\n\nif __name__ == '__main__':\n root = Tk()\n obj = CategoryClass(root)\n root.mainloop()\n","sub_path":"category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220819415","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 1 16:15:21 2017\n\n@author: clairelasserre\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport xlrd\nfrom matplotlib import style\nstyle.use(\"ggplot\")\nfrom sklearn.cluster import KMeans\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_digits\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\nimport pandas as pd\n\npull=[]\nwb1 = xlrd.open_workbook('Ex16sat.xltx')\nsh1 = wb1.sheet_by_index(0)\ndatanames=[]\ndataminutes= []\nstationnames=[]\n\n\n \n\nfor b in range (4,sh1.ncols-3):\n datanames.append(sh1.cell(2,b).value)\n\ns0=120\nfor i in range (96):\n if (s0==1440): \n s0=0\n dataminutes.append(s0)\n s0=s0+15\n \n \n\nfor a in range(4,sh1.nrows-1):\n stationnames.append(sh1.cell(a,1).value) \n #je veux que la ligne 0 de pull soit la data0\nfor b in range(4,sh1.ncols-3) : \n ligne=[]\n for a in range(4,sh1.nrows-1):\n ligne.append(sh1.cell(a,b).value)\n pull.append(ligne)\n\n #ici pull est donc un tableau de ligne : ligne 0 = entree pour toutes les stations pour data 0, \n #donc autant de lignes que de plages horaires et de colonnes que de stations\ndata = pull \n\ndef bench_k_means(estimator, data):\n t0 = time()\n estimator.fit(data)\n print('durée pour la clusterisation : ')\n print(time() - t0)\n print(\"les labels sont \", estimator.labels_)\n \n\n\n \ndef GetCentroid(estimator):\n return estimator.cluster_centers_\n \ndef GetCentroidAverageCoor(estimator):\n CentroidAverage=[]\n centroids = GetCentroid(estimator)\n nstations = len(centroids[0])\n for i in range (len(centroids)):\n m=0\n for j in range (nstations):\n m = m+centroids[i,j]\n CentroidAverage.append(m/(nstations))\n return CentroidAverage\n\n\n# Plot the Models Classifications\n# Store the inputs as a Pandas Dataframe and set the column names\ndef DensityLongonUnderground(nclusters,init):\n X= dataminutes \n Y=[]\n \n if (init==1):\n estimator = KMeans(init='k-means++', n_clusters=nclusters, n_init=10)\n bench_k_means(estimator, data=data)\n else:\n estimator = KMeans(init='random', n_clusters=nclusters, n_init=10)\n bench_k_means(estimator, data=data)\n labels = estimator.labels_\n centroids = GetCentroidAverageCoor(estimator)\n for i in range(len(labels)):\n Y.append(centroids[labels[i]])\n n=len(X)\n Xprime = X[:n-8]\n Xprime = X [n-8:n]+Xprime\n Yprime = Y[:n-8]\n Yprime = Y [n-8:n]+Yprime\n \n \n plt.plot(Xprime,Yprime)\n #plt.axis([0, 750, 0, 300])\n plt.xlabel('temps en minutes après minuit')\n plt.ylabel('densité centroid associé')\n plt.show()\n \n\n\n","sub_path":"densitytube.py","file_name":"densitytube.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500938469","text":"import pygame, sys, random\r\nx= 0\r\ny = 0\r\nhd = 1\r\nvd = 1\r\nv = 10\r\none,two,three = 60,60,80\r\npygame.init()\r\nscreen = pygame.display.set_mode((700,700))\r\nclock = pygame.time.Clock()\r\nimg = pygame.image.load('imj.png')\r\n\r\nwhile 1:\r\n for i in pygame.event.get():\r\n if i.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n screen.fill((one, two, three))\r\n screen.blit(img,(x,y))\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_LEFT] or key[ord('a')]:\r\n x += -v\r\n if key[pygame.K_RIGHT] or key[ord('d')]:\r\n x += v\r\n if key[pygame.K_UP] or key[ord('w')]:\r\n y += -v\r\n if key[pygame.K_DOWN] or key[ord('s')]:\r\n y += v\r\n if key[pygame.K_LSHIFT]:\r\n one,two,three = random.randint(0,100),random.randint(0,100),random.randint(0,100)\r\n if key[pygame.K_LCTRL]:\r\n pygame.quit()\r\n else:\r\n v = 10\r\n\r\n\r\n pygame.display.update()\r\n clock.tick(60)","sub_path":"pyyg/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643790335","text":"import time\nfrom absl import app, logging\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom flask import Flask, request, Response, jsonify, send_from_directory, abort\nfrom flask_cors import CORS\nimport os\nfrom eval_one_i3d import runx\nfrom w2s import init_and_load_model, translate_sentence\nimport gpt_2_simple as gpt2\n\n\n# load in weights and classes\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nif len(physical_devices) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\n# load in weights and classes\nsess = gpt2.start_tf_sess()\ngpt2.load_gpt2(sess, run_name='play')\n\n# Initialize Flask application\napp = Flask(__name__)\n\nCORS(app)\n\n@app.route('/gen_text', methods=['POST','OPTIONS'])\ndef generate_story():\n global sess\n # input text is request.form['input']\n\n try:\n tf.reset_default_graph()\n sess.close()\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name=request.form['genre'])\n print(\"GENRE\")\n print(request.form['genre'])\n generated_text = gpt2.generate(sess,\n run_name=request.form['genre'],\n length=200,\n temperature=0.8,\n prefix=str(request.form['input']),\n nsamples=1,\n batch_size=1,\n return_as_list=True\n )[0]\n return Response(response=generated_text, status=200)\n\n except:\n traceback.print_exc(file=sys.stdout)\n print('aborting gen text')\n abort(404)\n\n@app.route('/w2s', methods=['POST','OPTIONS'])\ndef generate_sentence():\n print('in w2s')\n # input text is request.form['input']\n model_path = './cc_model_5p.pt'\n model = init_and_load_model(model_path)\n sentence, logits = translate_sentence(model, request.form['input'])\n print('sentence: ', sentence)\n sentence = sentence.capitalize()\n sentence = sentence[:-2] + sentence[-1]\n\n try:\n return Response(response=sentence, status=200)\n except:\n print('aborting w2s text')\n abort(404)\n\n@app.route('/vid2text', methods=['POST','OPTIONS'])\ndef video_to_text():\n print('in video to text')\n print('**************************************************')\n print(\"Here is the request: \", request)\n print('**************************************************')\n print(\"Here is request.files:\", request.files)\n # input video is request.files['video']\n video = request.files['video']\n video_name = video.filename\n\n for f in os.listdir(os.path.join(os.getcwd(), 'eval_vids')):\n os.remove(os.path.join(os.getcwd(), 'eval_vids', f))\n video.save(os.path.join(os.getcwd(), 'eval_vids', video_name))\n\n\n mode = 'rgb'\n num_classes = 1042 #look at preprocess ms-asl class list\n save_model = './checkpoints/' #doesn't matter\n\n root = 'eval_vids' #where data is\n weights = 'weights/nslt_1042_007480_0.516498.pt' #where weights are\n\n pred = '0'\n spot = str(os.path.join(os.getcwd(), 'eval_vids', video_name))\n print(spot)\n pred = runx(spot, mode='rgb', weights=weights)\n # pred = runx(mode=mode, root=root, save_model=save_model, train_split=train_split, weights=weights, num_classes = num_classes)\n print ('prediction received in app.py: ', pred)\n print(type(pred))\n try:\n return Response(response= pred, status=200)\n\n except:\n print('aborting vid2text')\n abort(404)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host = '0.0.0.0', port=5000)","sub_path":"Flask_React/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396968935","text":"#!/usr/bin/env python\nimport sys\nimport argparse\nimport shelve\nfrom bibmanager import ADS_AUTHORIZATION_KEY\nfrom bibmanager import ads, ADS_RESULT_CACHE, PROJECT_PATH\nfrom bibmanager import bibliography\n\ndef _print_search_results(ads_results):\n \"\"\" \n Format publications, for printing to console stdout\n\n :type ads_results: [bibliography.Publication]\n :rtype: str\n \"\"\"\n\n result_str = ''\n\n author_str_size = 10\n title_str_size = 40\n\n for result in ads_results:\n author = result.authors[0].surname\n if len(author) > author_str_size:\n author = author[0:author_str_size-4] + ' ...'\n if len(author) <= author_str_size:\n author = author + ' ' * (author_str_size-len(author))\n \n title = result.title\n if len(title) > title_str_size:\n title = title[0:title_str_size-4] + ' ...'\n if len(title) <= title_str_size:\n title = title + ' ' * (title_str_size-len(title))\n\n result_str = result_str + '\\n' + author + title\n\n return result_str\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n prog=\"bibsearch\", \n description='Search for publications and cache search results ')\n parser.add_argument(\n 'search_query', type=str, nargs=1,\n help='Search query')\n\n if ADS_AUTHORIZATION_KEY is None:\n sys.stdout.write(\"\\nUpdate the configuration file with your ADS authorization key.\")\n exit(0)\n\n args = parser.parse_args()\n search_query = args.search_query\n ads_publications = ads.ads_get_publication(search_query)\n\n bibliography_collection = bibliography.Bibliography(\n publications=ads_publications\n )\n\n sys.stdout.write(_print_search_results(ads_publications))\n\n with shelve.open(PROJECT_PATH + '/' + ADS_RESULT_CACHE) as d:\n d['bibliography'] = bibliography_collection\n","sub_path":"bibsearch.py","file_name":"bibsearch.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"360740957","text":"import h5py as h5\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import os\n#import math as math\n#import time\nfrom scipy import integrate\n#from scipy import interpolate \n#import random as random\nimport sys\n#import pandas as pd\n#import mpi4py.MPI \nfrom argparse import ArgumentParser\nfrom multiprocessing import Pool\n#from multiprocess import Pool\n\n###---------------------------------------------------------------------------------------------------------------------------\n# Constants\n###--------------------------------------------------------------------------------------------------------------------------\n\n#G = 4.301e-9 \t# In Mpc\nG = 4.301e-6\t\t# In Kpc\nG_cgs = 6.674e-8\n\nM_solar_2_g = 1.99e33\n#Mpc_2_cm = 3.086e24\t\t# Mpc to cm\nMpc_2_cm = 3.086e21\t\t# Kpc to cm\t\nk_boltzmann = 1.3807e-16\n\n#cm_2_mpc = 3.24e-25\t\t# cm to Mpc\ncm_2_mpc = 3.24e-22\t\t# cm to Kpc\ng_2_M_solar = 0.5e-33\n\nh \t\t\t = 0.6751\n\ndt = np.dtype(int)\ndf = np.dtype(float)\n\n\n#*********************************************************************************************************************************\n\nclass Unbuffered(object):\n def __init__(self, stream):\n self.stream = stream\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n def writelines(self, datas):\n self.stream.writelines(datas)\n self.stream.flush()\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\nsys.stdout = Unbuffered(sys.stdout)\n\n\n\nparser = ArgumentParser()\nparser.add_argument('-n',\n\t\t\t\t\taction='store',dest='num',type=int,default=None,\n\t\t\t\t\thelp='File that has to be written')\n\nparser.add_argument('-pw',\n\t\t\t\t\taction='store',dest='path_write',type=str,default=None,\n\t\t\t\t\thelp='destination folder')\n\n\nparser.add_argument('-pr',\n\t\t\t\t\taction='store',dest='path_read',type=str,default=None,\n\t\t\t\t\thelp='reading folder')\n\nparser.add_argument('-name',\n\t\t\t\t\taction='store',dest='name_hdf5',type=str,default=None,\n\t\t\t\t\thelp='file name for the h5')\nargs = parser.parse_args()\n\n\n\n\ndef movingaverage(interval, window_size):\n\n\twindow = np.ones(int(window_size))/float(window_size)\n\treturn np.convolve(interval, window, 'same')\n\ndef split(container, count):\n \"\"\"\n Simple function splitting a container into equal length chunks.\n Order is not preserved but this is potentially an advantage depending on\n the use case.\n \"\"\"\n return [container[_i::count] for _i in range(count)]\n\n\n#************************************************************************************************************************************\ndef flux_danail(M_cold_gas,M_cold_gas_mol, distance , z):\n\n\tfHI\t\t= 1.4204\t\n\tmass \t= (M_cold_gas - M_cold_gas_mol)/h*0.74 \n\tdis_cor\t= distance/h\n\tLuminosity_HI = 6.27e-9*mass\n\tflux \t=\tLuminosity_HI/1.04e-3/(dis_cor)**2/(1+z)/fHI\n\n\treturn flux\n\n\n\ndef flux_catinella(M_cold_gas,M_cold_gas_mol, distance , z):\n\n\tfHI\t\t= 1.4204\t\n\tmass \t= (M_cold_gas - M_cold_gas_mol)/h*0.74 \n\tdis_cor\t= distance/h\n\t\n\tflux \t= mass*(1+z)/dis_cor**2/2.356*10**(-5)\n\n\n\treturn flux\n\n\n\n\n#******************************************************************************************************************************************\n\n\ndef empty_dataset(file_name):\n\t\n\t\n\tvel_cal\t\t\t=\tfile_name.create_group(\"Velocity_Calculated\")\n\tvel_cal.create_dataset(\"V_halo\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"V_disk\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"V_bulge\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"V_HI\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"V_max_circ\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"W_peak\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"W_20\", (loop_length,), dtype = 'float32')\n\tvel_cal.create_dataset(\"W_50\",(loop_length,), dtype = 'float32')\n\n\tfile_name.create_dataset(\"Theta\", data = theta)\n\n\tflux_gal\t\t= \tfile_name.create_group(\"flux\")\n\tflux_gal.create_dataset(\"flux\", data = flux_lines)\n\t\n\tem_line = file_name.create_group(\"Emission_Line\")\n\tem_line.create_dataset(\"s_peak\", (loop_length,), dtype='float32')\n\tem_line.create_dataset(\"s_50\",(loop_length,), dtype='float32')\n\tem_line.create_dataset(\"s_20\",(loop_length,),dtype='float32')\n\tem_line.create_dataset(\"v_x\",(loop_length,596), dtype = 'float32')\n\tem_line.create_dataset(\"s_normalized\",(loop_length,596), dtype = 'float32')\n\n\tvel_pro\t\t\t=\tfile_name.create_group(\"Velocity_Profile\")\n\tvel_pro.create_dataset(\"r_x\",(loop_length,250), dtype = 'float32')\n\tvel_pro.create_dataset(\"V_max_circ_profile\",(loop_length,250), dtype = 'float32')\n\n\n\ndef other_props_assigning(file_name):\n\n\n\tcentral = file_name.create_dataset('is_central', data = gal_type)\n\n\tG_Radius\t=\tfile_name.create_group('Radius_file')\n\n\tG_Radius.create_dataset('R_vir', data = R_S_halo)\n\tG_Radius.create_dataset('R_disk_star', data = rstar_disk)\n\tG_Radius.create_dataset('R_bulge_star', data = rstar_bulge)\n\tG_Radius.create_dataset('R_disk_gas', data = rgas_disk)\n\tG_Radius.create_dataset('R_bulge_gas', data = rgas_bulge)\n\t\n\tG_Radius.create_dataset('R_disk_star_apparent', data = rstar_disk_arcsec)\n\tG_Radius.create_dataset('R_bulge_star_apparent', data = rstar_bulge_arcsec)\n\tG_Radius.create_dataset('R_disk_gas_apparent', data = rgas_disk_arcsec)\n\tG_Radius.create_dataset('R_bulge_gas_apparent', data = rgas_bulge_arcsec)\n\n\t#*************************************************************************************************\n\n\n\tG_Velocity = file_name.create_group('Velocity_File')\n\n\tG_Velocity.create_dataset('Virial_velocity_host', data = vvir_hosthalo)\n\tG_Velocity.create_dataset('Virial_velocity_sub', data = vvir_subhalo)\n\t#G_Velocity.create_dataset('Max_Circular_Velocity', data = vmax_halo)\n\n\t#**************************************************************************************************\n\n\n\n\t#**************************************************************************************************\n\n\tG_Mass\t\t=\tfile_name.create_group('Mass')\n\n\tG_Mass.create_dataset('M_gas_disk', data = mgas_disk)\n\tG_Mass.create_dataset('M_gas_bulge', data = mgas_bulge)\n\tG_Mass.create_dataset('M_Host_Halo', data = mvir_hosthalo)\n\tG_Mass.create_dataset('M_Sub_Halo', data = mvir_subhalo)\n\tG_Mass.create_dataset('M_stars_bulge', data = mstars_bulge)\n\tG_Mass.create_dataset('M_stars_disk', data = mstars_disk)\n\tG_Mass.create_dataset('M_stars_tot', data = mstars)\n\tG_Mass.create_dataset('M_atom_disk', data = matom_disk)\n\tG_Mass.create_dataset('M_atom_bulge', data = matom_bulge)\n\tG_Mass.create_dataset('M_mol_disk', data = mmol_disk)\n\tG_Mass.create_dataset('M_mol_bulge', data = mmol_bulge)\n\t#*****************************************************************************************************\n\n\tG_cals\t\t= file_name.create_group('All_else')\n\n\tG_cals.create_dataset('snapshot', data = snapshot)\n\tG_cals.create_dataset('subvolume', data = subsnapshot)\n\tG_cals.create_dataset('id_galaxy_lightcone', data = id_galaxy_dan)\n\t#G_cals.create_dataset('id_galaxy_shark', data = id_galaxy_sam)\n\tG_cals.create_dataset('id_halo_shark', data = id_halo_sam)\n\n\tG_cals.create_dataset('C_halo', data = c_halo)\n\tG_cals.create_dataset('C_disk', data = c_disk)\n\tG_cals.create_dataset('C_bulge', data = c_bulge)\n\tG_cals.create_dataset('C_HI', data = c_HI)\n\tG_cals.create_dataset('Distance', data = distance)\n\tG_cals.create_dataset('z_cos', data = zcos)\n\tG_cals.create_dataset('z_obs', data = zobs)\n\tG_cals.create_dataset('dec', data = dec)\n\tG_cals.create_dataset('ra', data = ra)\n\tG_cals.create_dataset('sfr_disk', data=sfr_disk)\n\tG_cals.create_dataset('sfr_burst', data = sfr_burst)\n\tG_cals.create_dataset('s_hi', data = s_hi)\n\n\n\ndef line_emission(R_halo,R_disk,R_HI,R_bulge,M_cold_gas,M_stars_disk,M_halo,M_disk,M_bulge,c_halo,c_disk,c_bulge,c_HI, sini): # K is the index of galaxy, sini - sine of angle of inclination [theta dataset]\n\t\n\t## adresses \n\t# if M_halo > 10**10:\n\t# \tv_g = 10\n\t# else:\t\n\t# \tv_g = 20\n\n\tv_g = 10\n\n\t## Making the Radius Channel - -------------------------------------------------------------------------\n\n\t\n\txmax = 1\n\tdx = 0.004\n\tnx = int(xmax/dx)\n\tx = np.zeros(shape = nx)\n\n\n\tfor i in range(1,nx + 1):\n\t\tx[i - 1] = (i - 0.5) * dx\n\n\n\tr_x = (R_halo/1.67)*x\n\t\n\tR = R_halo/1.67\n\n\tif R_HI == 0:\n\t\tR_HI = 3*R_disk/1.67\t\n\n\t\tc_HI = R_halo/R_HI\n\n\n\n\n\t#if R_disk == 0:\n\t#\treturn None, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\n\t#if R_halo == 0:\n\t#\treturn None, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\n\n\n\n\t## Making Surface Density Profile-------------------------------------------------------------------\n\n\tGas_galaxy = np.zeros(shape = nx)\n\tP_star_galaxy = np.zeros(shape = nx)\n\tP_gas_galaxy = np.zeros(shape = nx)\n\tv_star_galaxy = np.zeros(shape=nx)\n\thstar_galaxy = R_HI/7.3*Mpc_2_cm\n\n\tM_cold_gas_cgs = M_cold_gas*M_solar_2_g\n\tM_stars_disk_cgs = M_stars_disk*M_solar_2_g\n\tR_HI_cgs = R_HI*Mpc_2_cm\n\n\t#M_stars_bulge_cgs = M_stars_bulge*M_solar_2_g\n\t#R_bulge_cgs = R_bulge*Mpc_2_cm\n\t\n\tr_x_cgs = r_x*Mpc_2_cm\n\n\n\n\tA_cgs = -r_x_cgs/R_HI_cgs\n\n\tfor i in range(0,nx):\n\n\t\tGas_galaxy[i] = M_cold_gas_cgs/(2*np.pi*(R_HI_cgs)**2)*np.exp(A_cgs[i])\n\n\t\t\n\t\tP_star_galaxy[i] = ((M_stars_disk_cgs)/(2*np.pi*(R_HI_cgs)**2)*np.exp(A_cgs[i]))\n\n\t\tP_gas_galaxy[i] = (M_cold_gas_cgs/(2*np.pi*(R_HI_cgs)**2))*np.exp(A_cgs[i])\n\n\t\tv_star_galaxy[i] = np.sqrt(np.pi*G_cgs*hstar_galaxy*P_star_galaxy[i])\n\n\n\tP_ext_galaxy = np.pi/2*G_cgs*Gas_galaxy\n\tP_ext_galaxy = P_ext_galaxy*(P_gas_galaxy + (v_g*1e5/v_star_galaxy)*P_star_galaxy)\n\tP_ext_galaxy = P_ext_galaxy/k_boltzmann\n\n\tP_ext = max(P_ext_galaxy)\n\t\n\t#P_0 = 3.7*10e4\n\tP_0 = 34673\n\n\t#R_c_galaxy = (P_ext_galaxy/P_0)**0.8\n\n\tR_c_galaxy = (P_ext_galaxy/P_0)**0.92\n\n\n\tf_H1_galaxy = 1/(1 + R_c_galaxy)\n\tf_H2_galaxy = R_c_galaxy/(1 + R_c_galaxy)\n\n\n\n\tsurface_H1_galaxy = f_H1_galaxy*Gas_galaxy\n\tH1 = np.trapz(r_x_cgs*surface_H1_galaxy, dx=dx*Mpc_2_cm)\n\tsurface_H1_galaxy = surface_H1_galaxy/H1/cm_2_mpc**2\n\n\tsurface_H2_galaxy = f_H2_galaxy*Gas_galaxy\n\tH2 = np.trapz(r_x_cgs*surface_H2_galaxy,dx = dx*Mpc_2_cm) \n\tsurface_H2_galaxy = surface_H2_galaxy/H2/cm_2_mpc**2\n\n\n\n\n\n\t#---------------------------------------------------------------------------------------------------------\n\n\t## Making Velocity Profile------------------------------------------------------------------------\n\n\n\tA = r_x/R\n\n\tV_bulge_sqr = np.zeros(shape = nx)\n\tnumerator_b = np.zeros(shape = nx)\n\tdenominator_b = np.zeros(shape = nx)\n\n\tV_halo_sqr = np.zeros(shape = nx)\n\tnumerator_h = np.zeros(shape = nx)\n\n\n\tV_disk_sqr = np.zeros(shape= nx)\n\tnumerator_d = np.zeros(shape = nx)\n\tdenominator_d = np.zeros(shape = nx)\n\n\tV_HI_sqr = np.zeros(shape= nx)\n\tnumerator_g = np.zeros(shape = nx)\n\tdenominator_g = np.zeros(shape = nx)\n\n\n\t\n\tfor i in range(0,nx):\n\n\t\tnumerator_b[i] = ((c_bulge*A[i])**2)*(c_bulge)\n\n\t\tdenominator_b[i] = (1 + (c_bulge*A[i])**2)**1.5\n\t\tV_bulge_sqr[i] = (((G*M_bulge)/R)*(numerator_b[i]/denominator_b[i]))\n\n\n\n\t\n\n\t\n\n\t\tdenominator_h = np.log(1 + c_halo) - ((c_halo/(1 + c_halo)))\n\t\tnumerator_h[i] = np.log(1 + c_halo*A[i]) - ((c_halo*A[i])/(1 + c_halo*A[i]))\n\t\tV_halo_sqr[i] = (((G*M_halo)/R)*(numerator_h[i]/(A[i]*denominator_h)))\n\n\t\t\n\n\n\n\t\n\t\tnumerator_d[i] = c_disk + 4.8*c_disk*(np.exp((-0.35*c_disk*A[i]) - (3.5/(c_disk*A[i]))))\n\t\tdenominator_d[i] = (c_disk*A[i]) + (c_disk*A[i])**(-2) + 2*((c_disk*A[i]))**(-0.5)\n\t\tV_disk_sqr[i] = (((G*M_disk)/R)*(numerator_d[i]/denominator_d[i]))\n\n\n\n\t\n\t\tnumerator_g[i] = c_HI + 4.8*c_HI*(np.exp((-0.35*c_HI*A[i]) - (3.5/(c_HI*A[i]))))\n\t\tdenominator_g[i] = (c_HI*A[i]) + (c_HI*A[i])**(-2) + 2*((c_HI*A[i]))**(-0.5)\n\t\tV_HI_sqr[i] = (((G*M_cold_gas*0.73)/R)*(numerator_g[i]/denominator_g[i]))\n\n\n\n\n\t\n\n\tV_circ = np.sqrt(V_disk_sqr + V_halo_sqr + V_bulge_sqr + V_HI_sqr)\n\n\t\n\n\tmax_Vcirc = max(V_circ)\n\tmax_Vdisk = max(np.sqrt(V_disk_sqr))\n\tmax_Vhalo = max(np.sqrt(V_halo_sqr))\n\tmax_Vbulge = max(np.sqrt(V_bulge_sqr))\n\tmax_VHI\t\t= max(np.sqrt(V_HI_sqr))\n\n\t \n\t\n\t\n\t\n\t#if max(r_x) < R_bulge and R_bulge > R_disk:\n\t#\tmax_Vcirc = max(V_circ)\n\t#elif max(r_x) < R_bulge:\n\t#\tmax_Vcirc = max(V_circ[r_x > R_bulge/2])\n\t#else:\n\t#\tmax_Vcirc = max(V_circ[r_x > R_bulge])\t\n\n\n\tif np.isnan(max_Vcirc):\n\t\treturn None, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\n\t\t\n\t#if max(V_circ) > 5000:\n\n\t#\treturn None, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\t\t#return None, None, None, None\n\n\t#-------------------------------------------------------------------------------------------------------------\n\n\n\n\t################################################################################################################3\n\n\t# STARTING THE CONVULATION #################\n\n\t# NOte - Smoothing factor for Random orientation is 20\n\t#\t\t Smoothing factor for Edge on is 50\n\n\n\n\n\n\t## Making Velocity Channel----------------------------------------------------------------------------------\n\n\tvmax = (max_Vcirc + v_g*2)*1.2\n\tnv = 300\n\tdv = vmax/nv\n\tv_x = np.zeros(shape = nv)\n\n\tfor i in range(1,nv):\n\t\tv_x[i - 1] = (i - 0.5)*dv \n\n\t#------------------------------------------------------------------------------------------------------------\n\n\t## Making the smoothing filter-------------------------------------------------------------------------------\n\n\tdisp = v_g/dv\n\n\n\n\tnfilter = int(disp)\n\n\tfilter_x = np.array([n for n in range(-nfilter,nfilter) ])\n\n\tfilter_final = np.zeros(shape = len(filter_x))\n\n\tfor j in range(0,len(filter_x)):\n\t\tfor i in filter_x:\n\n\t\t\tfilter_final[j] = np.exp(-(i**2)/disp**2/2)\n\n\tfilter_final = filter_final/sum(filter_final)\n\n\t#----------------------------------------------------------------------------------------------------------\n\n\t## Calculating the flux using the surface density calculated earlier\n\n\ts = np.zeros(shape = nv)\n\n\tfor j in range(1,nx):\n\n\t\ty = v_x/(V_circ[j-1]*sini)\n\n\t\tdy = dv/(V_circ[j-1]*sini)\n\n\t\tf = r_x[j-1]*surface_H1_galaxy[j-1]/np.pi/dv\n\n\t\tfor i in range(1,nv):\n\t\t\tym = y[i - 1] - dy/2\n\t\t\typ = y[i - 1] + dy/2\n\n\t\t\tif yp > 1:\n\t\t\t\typ = 1\n\n\t\t\tif ym > 1:\n\t\t\t\tcontinue\n\n\t\t\ts[i - 1] = s[i - 1] + f*(np.arcsin(yp) - np.arcsin(ym)) \n\n\t#s = s/sum(dv*s)\n\t#print(np.trapz(s, dx = dv))\n\n\t#-------------------------------------------------------------------------------------------------------------\n\n\t# Smooth lines by velocity dispersion\n\n\ts_2 = np.zeros(shape = nv)\n\n\tfor i in range(1,nv):\n\n\t\tfor j in range(0, len(filter_x)):\n\n\t\t\tyo = i+filter_x[j]\n\n\t\t\tif (yo < 1):\n\n\t\t\t\tyo = 1 - yo\n\n\t\t\tif (yo <= nv):\n\n\t\t\t\ts_2[i -1] = s_2[i - 1] + s[yo - 1]*filter_final[j]\n\n\n\ts_2 = s_2/sum(dv*s_2)/2\t\t\t\n\n\n\n\n\tv_x_n = -v_x\n\n\ttrial = movingaverage(s_2,50)\n\t\n\tfinal_v_x = np.hstack((v_x_n[0:nv-2], v_x[0:nv-2]))\n\n\tfinal_s = np.hstack((s_2[0:50], trial[50:nv-2]))\n\tfinal_s = np.hstack((final_s,s_2[0:50]))\n\tfinal_s = np.hstack((final_s, trial[50:nv-2]))\n\n\n\txs,ys = zip(*sorted(zip(final_v_x, final_s)))\n\n\n\n\t#------------------------------------------------------------------------------------------------------------\n\n\t# Calculating the W50 and W20 and \n\n\t#--------------------------------------------------------------------------------------------------------------\n\n\ts_peak = max(trial)\n\n\ts_central = trial[0]\n\n\tfor i in range(1,nv-1,1):\n\n\t\tif trial[i-1] == s_peak :\n\t\t\tWpeak = v_x[i-1]*2\n\t#\telse:\n\t#\t\tWpeak = 1 \n\n \n\t#\ts_high = i\n\n\tstarget_50 = 0.5*s_peak\n\n\tfor i in range(1, nv-1, 1):\n\n\t\tif trial[i - 1] > starget_50:\n\t\t\t#f = np.abs((trial[i-1] - starget_50))/np.abs(trial[i-1] - trial[i])\n\t\t\t#W50 = np.abs((1 - f)*v_x[i-1]+f*v_x[i])*2\n\t\t\tW50 = 2*v_x[i]\n\t#\telse:\n\t#\t\tW50 = 1\n\t\n\t#\ts_50 = i\n\n\n\n\tstarget_20 = 0.2*s_peak\n\n\tfor i in range(1, nv-1, 1):\n\n\t\tif trial[i-1] > starget_20:\n\t\t\t#f = np.abs((trial[i-1] - starget_20))/np.abs(trial[i-1] - trial[i])\n\t\t\t#W20 = np.abs((1 - f)*v_x[i-1]+f*v_x[i])*2\n\t\t\tW20 = 2*v_x[i]\n\t#\telse:\n\t#\t\tW20 = 1\n\t\n\t#\ts_20 = i\n\n\t\n\t\n\t\n\n\t\t\n\n\t#return xs, ys, max_Vhalo, max_Vdisk, max_Vcirc, max_Vbulge, max_VHI,Wpeak , W50, W20, s_peak, starget_50, starget_20, V_circ, r_x\n\t#-------------------------------------------------------------------------------------------------------------\n\ttry:\n\t\treturn xs, ys, max_Vhalo, max_Vdisk, max_Vcirc, max_Vbulge, max_VHI,Wpeak , W50, W20, s_peak, starget_50, starget_20, V_circ, r_x\n\texcept (UnboundLocalError):\n\t \treturn None, None, None, None, None, None, None, None, None, None, None, None, None, None, None\n\n\n#********************************************************************************************************\n\n\n#path_write \t\t\t= \t'/group/pawsey0119/gchauhan/Stingray_Output/medi-SURFS/SHArk-Lagos18-final/'\n#path_write \t\t\t\t= \t'/home/garima/Desktop/'\n\n\npath_write = args.path_write\npath_read = args.path_read\nname_hdf5 = args.name_hdf5\n\nnum = args.num\nfiles = [\"WALLABY_1\",\"WALLABY_2\",\"WALLABY_3\",\"WALLABY_4\",\"WALLABY_5\",\"WALLABY_6\",\"WALLABY_7\",\"WALLABY_8\",\"WALLABY_9\"]\n\n##------------------------------------------------------------------------------------\nfor k in [num]:\n\twith np.errstate(divide = 'ignore', invalid = 'ignore'):\n\n\n\t\tf = h5.File(path_read + files[k] + \"/mocksky.hdf5\",'r')\n\n\t\t#f = h5.File(\"/home/garima/Desktop/alfalfa_SH.hdf5\",'r')\n\n\t\tzcos \t\t\t\t= np.array(f['galaxies/zcos'])#, dtype = df)\n\t\tzobs \t\t\t\t= np.array(f['galaxies/zobs'])#, dtype = df)\n\n\t\tsnapshot \t\t\t= np.array(f['galaxies/snapshot'])#, dtype = dt)\n\t\tsubsnapshot\t\t\t= np.array(f['galaxies/subvolume'])#, dtype = dt)\n\t\tid_galaxy_dan \t\t= np.array(f['galaxies/id_galaxy_sam'])#, dtype = dt)\n\t\tid_halo_dan \t\t= np.array(f['galaxies/id_halo_sam'])#, dtype = dt)\n\t\tid_halo_sam \t\t= np.array(f['galaxies/id_halo_sam'])#, dtype = dt)\n\t\tdec \t\t\t\t= np.array(f['galaxies/dec'])#, dtype = df )\n\t\tra \t\t\t\t\t= np.array(f['galaxies/ra'])#, dtype = df )\n\t\tinclination\t\t\t= np.array(f['galaxies/inclination'])#, dtype = dt )\n\t\tmvir_hosthalo\t\t= np.array(f['galaxies/mvir_hosthalo'])#, dtype = dt)\n\t\tmvir_subhalo\t\t= np.array(f['galaxies/mvir_subhalo'])#, dtype = dt)\n\t\trgas_disk\t\t\t= np.array(f['galaxies/rgas_disk_intrinsic'])*10**3#, dtype = df)\n\t\trgas_bulge\t\t\t= np.array(f['galaxies/rgas_bulge_intrinsic'])*10**3#,dtype = df)\n\t\trstar_bulge \t\t= np.array(f['galaxies/rstar_bulge_intrinsic'])*10**3#, dtype = df)\n\t\trstar_disk \t\t\t= np.array(f['galaxies/rstar_disk_intrinsic'])*10**3#, dtype = df)\n\t\tgal_type\t \t\t= np.array(f['galaxies/type'])#, dtype = dt)\n\t\tzcos \t\t\t\t= np.array(f['galaxies/zcos'])#, dtype = df)\n\t\tzobs \t\t\t\t= np.array(f['galaxies/zobs'])#, dtype = df)\n\t\tdistance \t\t\t= np.array(f['galaxies/dc'])#, dtype = df)\n\t\tsfr_disk \t= np.array(f['galaxies/sfr_disk'])#, dtype = df)\n\t\tsfr_burst = np.array(f['galaxies/sfr_burst'])#, dtype = df)\n\t\tc_halo\t \t\t\t= np.array(f['galaxies/cnfw_subhalo'])#, dtype = df)\n\t\tmatom_bulge \t\t= np.array(f['galaxies/matom_bulge'])#, dtype = dt)\n\t\tmatom_disk \t\t\t= np.array(f['galaxies/matom_disk'])#, dtype = dt)\n\t\tmgas_disk \t\t\t= np.array(f['galaxies/mgas_disk'])#, dtype = dt)\n\t\tmgas_bulge \t\t\t= np.array(f['galaxies/msgas_bulge'])#, dtype = dt)\n\n\t\tmstars_bulge \t\t= np.array(f['galaxies/mstars_bulge'])#, dtype = dt)\t\n\t\tmstars_disk \t\t= np.array(f['galaxies/mstars_disk'])#, dtype = dt)\n\t\tmstars \t= mstars_bulge + mstars_disk\n\t\t#vmax_halo \t\t\t= np.array(f['galaxies/vmax_subhalo'])#, dtype = df)\n\t\tvvir_hosthalo \t= np.array(f['galaxies/vvir_hosthalo'])#,dtype = df)\n\t\tvvir_subhalo\t\t= np.array(f['galaxies/vvir_subhalo'])#,dtype=df)\n\t\ts_hi \t= np.array(f['galaxies/s_hi'])#,dtype=df)\n\t\t\n\t\tmmol_disk \t= np.array(f['galaxies/mmol_disk'])#,dtype=dt)\n\t\tmmol_bulge \t= np.array(f['galaxies/mmol_bulge'])#,dtype=dt)\n\n\t\trgas_disk_arcsec\t= np.array(f['galaxies/rgas_disk_apparent'])#, dtype = df)\n\t\trgas_bulge_arcsec\t= np.array(f['galaxies/rgas_bulge_apparent'])#,dtype = df)\n\t\trstar_bulge_arcsec\t= np.array(f['galaxies/rstar_bulge_apparent'])#, dtype = df)\n\t\trstar_disk_arcsec\t= np.array(f['galaxies/rstar_disk_apparent'])#, dtype = df)\n\n\n\n\t\t#------------------------------------------------------------------------------------------------------------\n\n\n\n\t\t#R_S_halo\t\t\t= \tvmax_halo*10**3/10/67.51\n\t\t\n\t\tR_S_halo = G*mvir_hosthalo/vvir_hosthalo**2\n\t\t\n\t\tprint(R_S_halo)\n\t\t# print(vmax_halo[0:1000])\n\n\n\n\n\t\tc_disk \t= np.zeros(len(rstar_disk))\n\n\t\tc_bulge = np.zeros(len(rstar_bulge))\n\n\t\tc_HI = np.zeros(len(rgas_disk))\n\n\n\t\tfor i in range(len(rgas_disk)):\n\t\t\tif rgas_disk[i] == 0:\n\t\t\t\tc_HI[i] = 0\n\t\t\telse:\n\t\t\t\tc_HI[i] \t= \tR_S_halo[i]/(rgas_disk[i]/1.67)\n\n\t\t\tif rstar_disk[i] == 0:\n\t\t\t\tc_disk[i] = 0\n\t\t\telse:\n\t\t\t\tc_disk[i]\t=\tR_S_halo[i]/(rstar_disk[i]/1.67)\n\n\t\t\tif rstar_bulge[i] == 0:\n\t\t\t\tc_bulge[i] = 0\n\t\t\telse:\n\t\t\t\tc_bulge[i]\t=\tR_S_halo[i]/(1.7*rstar_bulge[i]/1.67)\n\n\n\n\n\n\t\tM_bulge \t\t\t= \tmstars_bulge + mgas_bulge \t\t\t\t\t# Total mass of the bulge\n\t\t\t\t\t\t\n\t\tM_disk \t\t\t\t= \tmstars_disk + mgas_disk\t\t\t\t\t\t# Total mass of the disk\t\n\n\t\tmass_gas \t\t\t= \tmgas_disk/mstars\n\n\t\tmass_galaxies \t\t= \tnp.log10(mstars)\n\n\t\tB_T \t\t\t\t=\tmstars_bulge/mstars\n\n\t\ttheta \t\t\t\t= np.sin(inclination*np.pi/180)\n\n\n\n\t\t\n\t\t#print('loop-length', loop_length)\n\n\t\tempty_HI = np.zeros(len(mstars))\n\n\t\t###########################################################################################################################################\n\t\t### FLUX selection\n\t\t#---------------------------------------------------------------------------------------------------------------------------------------------------\n\n\t\tflux_all \t\t\t=\tflux_catinella(matom_disk+matom_bulge, empty_HI, distance, zcos)\n\t\tindex_z\t\t\t\t=\tnp.where(zcos < 0.26)\n\t\tindex_z\t\t\t\t=\tindex_z[0]\n\t\tindex_z\t\t\t\t=\tindex_z.tolist()\n\n\n\t\t\n\n\t\tloop_length = len(mstars)\n\t\t#loop_length = 100\n\n\t\t#################################################################################################################################33\n\n\t\tdef Line_emission_parallel(i):\n\t\t\t\n\t\t\tif i%200 == 0:\n\t\t\t\tprint(i)\n\n\t\t\tv_x, s_final, V_halo, V_disk, V_circ, V_bulge,V_HI ,W_peak, W_50 , W_20, s_high, s_50, s_20, V_circ_all, r_x \t=\t\\\n\t\t\tline_emission(R_S_halo[i],rstar_disk[i],\n\t\t\t\t\trgas_disk[i],rstar_bulge[i],mgas_disk[i],\n\t\t\t\t\tmstars_disk[i],mvir_subhalo[i],M_disk[i],\n\t\t\t\t\tM_bulge[i],c_halo[i],c_disk[i]\n\t\t\t\t\t,c_bulge[i],c_HI[i],theta[i])\n\n\t\t\tflux_lines\t=\tflux_catinella(matom_disk[i] + matom_bulge[i], empty_HI[i],distance[i],zcos[i])\n\n\t\t\treturn v_x, s_final, V_halo, V_disk, V_circ, V_bulge,V_HI ,W_peak, W_50 , W_20, s_high, s_50, s_20, V_circ_all, r_x, flux_lines\n\n\n\n\t\tv_x = {}\n\t\tr_x = {}\n\t\ts_final = {}\n\t\tV_circ_all = {}\n\n\n\n\t\tW_peak\t\t\t= np.zeros(shape=loop_length)\n\t\tW_50 = np.zeros(shape=loop_length)\n\t\tW_20 = np.zeros(shape=loop_length)\n\t\tV_halo = np.zeros(shape=loop_length)\n\t\tV_disk = np.zeros(shape=loop_length)\n\t\tV_circ = np.zeros(shape=loop_length) \n\t\tV_bulge = np.zeros(shape=loop_length) \n\t\tV_HI \t = np.zeros(shape=loop_length) \n\t\ts_high = np.zeros(shape=loop_length)\n\t\ts_50 = np.zeros(shape=loop_length)\n\t\ts_20 = np.zeros(shape=loop_length)\n\t\tflux_lines\t\t= np.zeros(shape=loop_length)\n\n\n\t\t##---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\t\t##### MPI Trial\n\t\t##---------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\t\t# task_list = index_z\n\n\t\t# rank = mpi4py.MPI.COMM_WORLD.Get_rank()\n\t\t# size = mpi4py.MPI.COMM_WORLD.Get_size()\n\n\t\t# for task,i in enumerate(task_list):\n\t\t# \tif task%size!=rank: continue\n\n\t\t# \tv_x, s_final, V_halo, V_disk, V_circ, V_bulge,V_HI ,W_peak, W_50 , W_20, s_high, s_50, s_20, V_circ_all, r_x \t=\t\\\n\t\t# \tline_emission(R_S_halo[i],rstar_disk[i],\n\t\t# \t\t\trgas_disk[i],rstar_bulge[i],mgas_disk[i],\n\t\t# \t\t\tmstars_disk[i],mvir_subhalo[i],M_disk[i],\n\t\t# \t\t\tM_bulge[i],c_halo[i],c_disk[i]\n\t\t# \t\t\t,c_bulge[i],c_HI[i],theta[i])\n\n\t\t# \tflux_lines\t=\tflux_catinella(matom_disk[i] + matom_bulge[i], empty_HI[i],distance[i],zcos[i])\n\n\n\t\t#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\t\t### Multiprocessing\n\t\t#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\t\tif __name__ == '__main__':\n\n\t\t\tpool = Pool()\n\t\t\ttotal_tasks = loop_length\n\t\t\ttasks = range(loop_length)\n\n\n\t\t\tv_x, s_final, V_halo, V_disk, V_circ, V_bulge,V_HI ,W_peak, W_50 , W_20, s_high, s_50, s_20, V_circ_all, r_x, flux_lines = zip(*pool.starmap(Line_emission_parallel,zip(tasks)))\n\t\t\t\n\t\t\tpool.close()\n\t\t\tpool.join()\n\n\t\t#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n\t\t# for i in range(loop_length):\n\t\t# \tif i%200 == 0:\n\t\t# \t\tprint(i)\n\n\t\t# \tv_x[i], s_final[i], V_halo[i], V_disk[i], V_circ[i], V_bulge[i],V_HI[i] ,W_peak[i], W_50[i] , W_20[i], s_high[i], s_50[i], s_20[i], V_circ_all[i], r_x[i] \t=\t\\\n\t\t# \tline_emission(R_S_halo[i],rstar_disk[i],\n\t\t# \t\t\trgas_disk[i],rstar_bulge[i],mgas_disk[i],\n\t\t# \t\t\tmstars_disk[i],mvir_subhalo[i],M_disk[i],\n\t\t# \t\t\tM_bulge[i],c_halo[i],c_disk[i]\n\t\t# \t\t\t,c_bulge[i],c_HI[i],theta[i])\n\n\t\t# \tflux_lines[i]\t=\tflux_catinella(matom_disk[i] + matom_bulge[i], empty_HI[i],distance[i],zcos[i])\n\n\n\n\n\n\t\thf = h5.File(path_write + name_hdf5 +'_%s.h5'%k, 'w')\n\n\n\t\tother_props_assigning(hf)\n\t\tempty_dataset(hf)\n\n\t\tfor i in range(loop_length):\n\t\t\tif v_x[i] == None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\thf[\"Emission_Line/v_x\"][i,] = v_x[i]\n\t\t\t\thf[\"Emission_Line/s_normalized\"][i,] = s_final[i]\n\t\t\t\thf[\"Velocity_Profile/r_x\"][i,] = r_x[i]\n\t\t\t\thf[\"Velocity_Profile/V_max_circ_profile\"][i,] = V_circ_all[i]\n\t\t\t\t\n\t\t\t\thf[\"Velocity_Calculated/V_halo\"][i] \t\t= V_halo[i]\n\t\t\t\thf[\"Velocity_Calculated/V_disk\"][i] \t\t= V_disk[i]\n\t\t\t\thf[\"Velocity_Calculated/V_bulge\"][i]\t\t= V_bulge[i]\n\t\t\t\thf[\"Velocity_Calculated/V_max_circ\"][i]\t\t= V_circ[i]\n\t\t\t\thf[\"Velocity_Calculated/V_HI\"][i]\t\t\t= V_HI[i]\n\t\t\t\thf[\"Velocity_Calculated/W_peak\"][i]\t\t\t= W_peak[i]\n\t\t\t\thf[\"Velocity_Calculated/W_50\"][i]\t\t\t= W_50[i]\n\t\t\t\thf[\"Velocity_Calculated/W_20\"][i]\t\t\t= W_20[i]\n\n\t\t\t\thf[\"Emission_Line/s_peak\"][i]\t\t\t\t= s_high[i]\n\t\t\t\thf[\"Emission_Line/s_20\"][i]\t\t\t\t\t= s_20[i]\n\t\t\t\thf[\"Emission_Line/s_50\"][i]\t\t\t\t\t= s_50[i]\n\n\n\t\thf.close()\n\n\n\n#'survey_lightcone_micro':wq\n\n\n\n\n","sub_path":"PhD_analysis_codes/Chapter_3/Emission_Lines_SHArk.py","file_name":"Emission_Lines_SHArk.py","file_ext":"py","file_size_in_byte":26373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508815939","text":"#!/usr/bin/env python3\n\n# ---------------------------\n# projects/collatz/Collatz.py\n# Copyright (C) 2016\n# Glenn P. Downing\n# ---------------------------\n\n# ------------\n# collatz_read\n# ------------\n\ndef collatz_read (s) :\n \"\"\"\n read two ints\n s a string\n return a list of two ints, representing the beginning and end of a range, [i, j]\n \"\"\"\n a = s.split()\n return [int(a[0]), int(a[1])]\n\n# ------------\n# collatz_eval\n# ------------\n\ndef collatz_eval (i, j) :\n \"\"\"\n i the beginning of the range, inclusive\n j the end of the range, inclusive\n return the max cycle length of the range [i, j]\n \"\"\"\n assert (i > 0)\n assert (j > 0)\n\n cache = [0] * 1000001 # Make a cache initialized with 0s\n first = i\n last = j\n max_cycle = 1\n\n # Make sure we take input range in the correct order (smaller value first, larger value second)\n if (i > j) :\n first = j\n last = i\n\n for n in range (first, last + 1) :\n cycle = 1\n temp = n # we will need to retain the original value (n) in case we need to update the cache\n old_cycle = cache[temp]\n\n # If the cycle length for n has already been found\n if (old_cycle != 0) :\n cycle = old_cycle\n\n else :\n while (temp > 1) :\n if (temp % 2 == 0) :\n temp = temp / 2\n else :\n temp = 3 * temp + 1\n cycle += 1\n # End while block\n \n assert (cycle > 0)\n cache[n] = cycle # Add new cycle length into the cache\n # End else block\n\n if (cycle > max_cycle) :\n max_cycle = cycle\n # End for loop\n\n return max_cycle\n\n# -------------\n# collatz_print\n# -------------\n\ndef collatz_print (w, i, j, v) :\n \"\"\"\n print three ints\n w a writer\n i the beginning of the range, inclusive\n j the end of the range, inclusive\n v the max cycle length\n \"\"\"\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")\n\n# -------------\n# collatz_solve\n# -------------\n\ndef collatz_solve (r, w) :\n \"\"\"\n r a reader\n w a writer\n \"\"\"\n for s in r :\n i, j = collatz_read(s)\n v = collatz_eval(i, j)\n collatz_print(w, i, j, v)\n","sub_path":"Collatz.py","file_name":"Collatz.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426936713","text":"import threading\nimport graphics\n\nwin = graphics.GraphWin('Mandelbrot', 300, 300)\n\nclass Artist(threading.Thread):\n \n def __init__(self, from_x, to_x, from_y, to_y, color):\n threading.Thread.__init__(self)\n self.daemon = True\n self.from_x = from_x\n self.to_x = to_x\n self.from_y = from_y\n self.to_y = to_y\n self.color = color\n\n def run(self):\n for x in range(self.from_x, self.to_x + 1):\n for y in range(self.from_y, self.to_y + 1):\n point = graphics.Point(x, y)\n point.setFill(self.color)\n point.draw(win)\n\nartists = [Artist(0, 150, 0, 150, 'orange'), Artist(150, 300, 150, 300, 'black')]\n\nfor art in artists:\n art.start()\n art.run()","sub_path":"Python/threads/parallelize.py","file_name":"parallelize.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269930869","text":"def outsideclininds(patient):\n ''' THE REAL CLINICALLY ANNOTATED AREAS '''\n #001\n if 'id001' in patient:\n ezinds = [26,27,32,37,54,70,78,82]\n if 'id002' in patient:\n # temporal region and the right hemisphere too\n ezinds = [3, 34, 38, 55, 56, 73]\n if 'id003' in patient:\n # occipital region\n ezinds = [7, 12, 24, 28, 35]\n if 'id004' in patient: \n # occipital region\n ezinds = [7,12,24,28,35]\n if 'id005' in patient: \n # \n ezinds = [3, 34, 17, 29]\n if 'id006' in patient: \n ezinds = []\n if 'id008' in patient:\n # in the frontal region\n ezinds = [74,75, 80, 82, 77]\n if 'id009' in patient:\n ezinds = [1, 6, 7, 30, 35]\n if 'id010' in patient:\n # from the left hemisphere\n ezinds = [3, 11, 26, 32, 37]\n if 'id011' in patient:\n ezinds = [62, 67, 74, 75, 80]\n if 'id012' in patient:\n ezinds = [3, 34, 37,39, 41]\n # 013\n if 'id013' in patient:\n # in the frontal region with rostral middlefrontal\n ezinds = [74,75, 80, 82, 77]\n # 014\n if 'id014' in patient:\n # both hemispheres in frontal region\n ezinds = [13, 25, 74, 60, 62]\n if 'id015' in patient:\n ezinds = [43, 54, 70, 72, 78, 82]\n return ezinds\n\ndef clinregions(patient):\n ''' THE REAL CLINICALLY ANNOTATED AREAS '''\n #001\n if 'id001' in patient:\n ezregions = ['ctx-rh-lateralorbitofrontal', 'ctx-rh-temporalpole']\n pzregions = ['ctx-rh-superiorfrontal', 'ctx-rh-rostralmiddlefrontal', 'ctx-lh-lateralorbitofrontal']\n if 'id002' in patient:\n ezregions = ['ctx-lh-lateraloccipital']\n pzregions = ['ctx-lh-inferiorparietal', 'ctx-lh-superiorparietal']\n if 'id003' in patient:\n ezregions = ['ctx-lh-insula']\n pzregions = ['Left-Putamen', 'ctx-lh-postcentral']\n if 'id004' in patient: \n ''' '''\n ezregions = ['ctx-lh-posteriorcingulate', 'ctx-lh-caudalmiddlefrontal', 'ctx-lh-superiorfrontal']\n pzregions = ['ctx-lh-precentral', 'ctx-lh-postcentral']\n if 'id005' in patient: \n ''' '''\n ezregions = ['ctx-lh-posteriorcingulate', 'ctx-lh-precuneus']\n pzregions = ['ctx-lh-postcentral', 'ctx-lh-superiorparietal']\n if 'id006' in patient: \n ''' '''\n ezregions = ['ctx-rh-precentral']\n pzregions = ['ctx-rh-postcentral', 'ctx-rh-superiorparietal']\n if 'id007' in patient: \n ''' '''\n ezregions = ['Right-Amygdala', 'ctx-rh-temporalpole', 'ctx-rh-lateralorbitofrontal']\n pzregions = ['Right-Hippocampus', 'ctx-rh-entorhinal', 'ctx-rh-medialorbitofrontal',\n 'ctx-rh-inferiortemporal', 'ctx-rh-temporalpole', 'ctx-rh-lateralorbitofrontal'] # 008\n if 'id008' in patient:\n ezregions = ['Right-Amygdala', 'Right-Hippocampus']\n pzregions = ['ctx-rh-superiortemporal', 'ctx-rh-temporalpole', 'ctx-rh-inferiortemporal', 'ctx-rh-medialorbitofrontal', 'ctx-rh-lateralorbitofrontal']\n if 'id009' in patient:\n ezregions = ['ctx-rh-lingual', 'ctx-rh-parahippocampal']\n pzregions = ['ctx-rh-lateraloccipital', 'ctx-rh-fusiform', 'ctx-rh-inferiorparietal'] # rlocc, rfug, ripc\n if 'id010' in patient:\n \n ezregions = ['ctx-rh-medialorbitofrontal', 'ctx-rh-frontalpole', 'ctx-rh-rostralmiddlefrontal', 'ctx-rh-parsorbitalis'] # rmofc, rfp, rrmfg, rpor \n pzregions = ['ctx-rh-lateralorbitofrontal', 'ctx-rh-rostralmiddlefrontal',\n 'ctx-rh-superiorfrontal', 'ctx-rh-caudalmiddlefrontal'] # rlofc, rrmfc, rsfc, rcmfg\n if 'id011' in patient:\n ezregions = ['Right-Hippocampus', 'Right-Amygdala'] # rhi, ramg\n pzregions = ['Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen',\n 'ctx-rh-insula', 'ctx-rh-entorhinal', 'ctx-rh-temporalpole'] # rth, rcd, rpu, rins, rentc, rtmp\n if 'id012' in patient:\n ezregions = ['Right-Hippocampus', 'ctx-rh-fusiform', 'ctx-rh-entorhinal', 'ctx-rh-temporalpole'] # rhi, rfug, rentc, rtmp\n pzregions = ['ctx-lh-fusiform', 'ctx-rh-inferiorparietal', 'ctx-rh-inferiortemporal',\n 'ctx-rh-lateraloccipital', 'ctx-rh-parahippocampal', 'ctx-rh-precuneus', 'ctx-rh-supramarginal'] # lfug, ripc, ritg, rloc, rphig, rpcunc, rsmg\n # 013\n if 'id013' in patient:\n ezregions = ['ctx-rh-fusiform']\n pzregions = ['ctx-rh-inferiortemporal','Right-Hippocampus','Right-Amygdala', \n 'ctx-rh-middletemporal','ctx-rh-entorhinal']\n # 014\n if 'id014' in patient:\n ezregions = ['Left-Amygdala', 'Left-Hippocampus', 'ctx-lh-entorhinal', 'ctx-lh-fusiform',\n 'ctx-lh-temporalpole','ctx-rh-entorhinal']\n pzregions = ['ctx-lh-superiortemporal', 'ctx-lh-middletemporal', 'ctx-lh-inferiortemporal',\n 'ctx-lh-insula', 'ctx-lh-parahippocampal']\n if 'id015' in patient:\n ezregions = ['ctx-rh-lingual', 'ctx-rh-lateraloccipital', 'ctx-rh-cuneus',\n 'ctx-rh-parahippocampal', 'ctx-rh-superiorparietal', 'ctx-rh-fusiform', 'ctx-rh-pericalcarine'] # rlgg, rloc, rcun, rphig, rspc, rfug, rpc\n pzregions = ['ctx-rh-parahippocampal', 'ctx-rh-superiorparietal', 'ctx-rh-fusiform'] # rphig, rspc, rfug\n return ezregions, pzregions\n","sub_path":"tvbsim/exp/clinregions.py","file_name":"clinregions.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192584010","text":"import pickle\nimport random\n\nfrom surprise import KNNBaseline\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise import dump\n\nfrom utils import rid_to_imdbid, imdbid_to_rid\n\n\n_, algo = dump.load('./knn.algo')\nprint('Loaded knn algorithm')\n\nrecommendations = {}\nwith open('recommendations.pickle', 'rb') as file:\n recommendations = pickle.load(file)\nprint('Loaded recommendations')\n\nuser_ratings = {}\nwith open('user_ratings.pickle', 'rb') as file:\n user_ratings = pickle.load(file) \nprint('Loaded user ratings')\n\n\ndef get_similar_movies(imdb_id, num):\n if imdb_id in imdbid_to_rid.keys():\n raw_id = algo.trainset.to_inner_iid(imdbid_to_rid[imdb_id])\n neighbors = algo.get_neighbors(raw_id, k=num)\n neighbors = (algo.trainset.to_raw_iid(inner_id)\n for inner_id in neighbors)\n neighbors = (rid_to_imdbid[rid]\n for rid in neighbors)\n else:\n neighbors = []\n\n return neighbors\n\ndef get_recommendations(ratings): \n ratings = [(imdbid_to_rid[rating[0]], rating[1]) for rating in ratings]\n uid = get_similar_uid(ratings)\n\n return recommendations[uid]\n\ndef get_similar_uid(ratings):\n max_score = 0\n max_uid = 0\n\n for uid in user_ratings.keys():\n score = compute_score(ratings, user_ratings[uid])\n if score > max_score:\n max_score = score\n max_uid = uid\n\n if max_uid == 0:\n max_uid = random.choice(list(user_ratings.keys()))\n\n return max_uid\n\ndef compute_score(ratings1, ratings2):\n score = 0\n for r1 in ratings1:\n for r2 in ratings2:\n if r1[0] != r2[0]:\n continue\n dif = abs(r1[1] - r2[1])\n if dif <= 2:\n score += 3 - dif\n\n return score","sub_path":"RSAPI/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"14950876","text":"import numpy as np\nfrom collections import defaultdict\nfrom common.policy import create_greedy_policy, make_epsilon_greedy_policy\n\n\ndef mc_on_policy_prediction(policy, env, num_episodes, discount_factor=1.0):\n \"\"\"\n Monte Carlo prediction algorithm. Calculates the value function\n for a given policy using sampling.\n\n Args:\n policy: A function that maps an observation to action probabilities.\n env: OpenAI gym environment.\n num_episodes: Number of episodes to sample.\n discount_factor: Gamma discount factor.\n\n Returns:\n A dictionary that maps from state -> value.\n The state is a tuple and the value is a float.\n \"\"\"\n # The final value function\n V = defaultdict(float)\n\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n\n for _ in xrange(num_episodes):\n s = env.reset()\n\n episode = []\n while True:\n a = policy(s)\n s_prime, reward, done, info = env.step(a)\n episode.append((s, reward))\n\n if done:\n break\n\n s = s_prime\n\n state_set = set([step[0] for step in episode])\n for s in state_set:\n first_i = next(i for i, step in enumerate(episode) if step[0] == s)\n G = sum(step[1] * (discount_factor ** i) for i, step in enumerate(episode[first_i:]))\n\n returns_sum[s] += G\n returns_count[s] += 1.0\n V[s] = returns_sum[s] / returns_count[s]\n\n return V\n\n\ndef mc_on_policy_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1, Q_default=None):\n \"\"\"\n Monte Carlo Control using Epsilon-Greedy policies.\n Finds an optimal epsilon-greedy policy.\n\n Args:\n env: OpenAI gym environment.\n num_episodes: Number of episodes to sample.\n discount_factor: Gamma discount factor.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n\n Returns:\n A tuple (Q, policy).\n Q is a dictionary mapping state -> action values.\n policy is a function that takes an observation as an argument and returns\n action probabilities\n \"\"\"\n\n # Keeps track of sum and count of returns for each state\n # to calculate an average. We could use an array to save all\n # returns (like in the book) but that's memory inefficient.\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n\n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = defaultdict(Q_default if Q_default else lambda: np.zeros(env.action_space.n))\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)\n\n for _ in xrange(num_episodes):\n episode = []\n\n s = env.reset()\n while True:\n a_probs = policy(s)\n a = np.random.choice(np.arange(len(a_probs)), p=a_probs)\n s_prime, reward, done, info = env.step(a)\n\n episode.append((s, a, reward))\n\n if done:\n break\n\n s = s_prime\n\n state_action_set = set([(s, a) for s, a, reward in episode])\n for (s, a) in state_action_set:\n first_i = next(i for i, episode_step in enumerate(episode) if episode_step[0] == s and episode_step[1] == a)\n G = sum(episode_step[2] * (discount_factor ** i) for i, episode_step in enumerate(episode[first_i:]))\n\n pair = (s, a)\n returns_sum[pair] += G\n returns_count[pair] += 1.0\n Q[s][a] = returns_sum[pair] / returns_count[pair]\n\n return Q, policy\n\n\ndef mc_off_policy_control_weighted(env, num_episodes, behavior_policy, discount_factor=1.0):\n \"\"\"\n Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.\n Finds an optimal greedy policy.\n\n Args:\n env: OpenAI gym environment.\n num_episodes: Number of episodes to sample.\n behavior_policy: The behavior to follow while generating episodes.\n A function that given an observation returns a vector of probabilities for each action.\n discount_factor: Gamma discount factor.\n\n Returns:\n A tuple (Q, policy).\n Q is a dictionary mapping state -> action values.\n policy is a function that takes an observation as an argument and returns\n action probabilities. This is the optimal greedy policy.\n \"\"\"\n\n # The final action-value function.\n # A dictionary that maps state -> action values\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n C = defaultdict(lambda: np.zeros(env.action_space.n))\n\n # Our greedily policy we want to learn\n target_policy = create_greedy_policy(Q)\n\n for _ in xrange(num_episodes):\n episode = []\n s = env.reset()\n while True:\n a_probs = behavior_policy(s)\n a = np.random.choice(np.arange(len(a_probs)), p=a_probs)\n s_prime, reward, done, info = env.step(a)\n episode.append((s, a, reward))\n\n if done:\n break\n\n s = s_prime\n\n G = 0\n W = 1\n for s, a, reward in reversed(episode):\n G = discount_factor * G + reward\n C[s][a] += W\n Q[s][a] += W / C[s][a] * (G - Q[s][a])\n\n if a != np.argmax(target_policy(s)):\n break\n\n W *= 1.0 / behavior_policy(s)[a]\n\n return Q, target_policy\n","sub_path":"topics/mc/mc_methods.py","file_name":"mc_methods.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221894927","text":"import sys\r\ndef CSV(syntax):\r\n syntax = syntax.strip()\r\n # print(syntax)\r\n csv = []\r\n ret = []\r\n\r\n counter = 1\r\n\r\n while counter < len(syntax):\r\n if syntax[counter].startswith(\"'\"):\r\n sp = syntax.split(\"'\")\r\n csv.append(sp)\r\n counter += 1\r\n # print(csv)\r\n if len(csv) > 0:\r\n for i in csv[1]:\r\n if i is not \"\":\r\n ret.append(i)\r\n\r\n # print(ret)\r\n return ret\r\n\r\nclass VirtualFile():\r\n def __init__(self,code):\r\n self.code = code\r\n def read(self):\r\n return \"\\n\".join(self.code)\r\n def readlines(self):\r\n return self.code\r\nclass Interpreter():\r\n def __init__(self,file,routine_name=\"\",start_line=0):\r\n if file != \"\":\r\n self.code = open(file)\r\n self.name = file if file != \"\" else routine_name\r\n self.maxm = 0\r\n self.line = start_line+1\r\n self.flags = {}\r\n self.memory = []\r\n self.variables = {}\r\n self.functions = {}\r\n self.subroutine = False\r\n self.subroutines = {}\r\n self.memory_spaces = {\r\n \"M1\": [],\r\n \"M2\": [],\r\n \"M3\": [],\r\n \"M4\": [],\r\n \"M5\": [],\r\n \"M6\": [],\r\n \"M7\": [],\r\n \"M8\": []\r\n }\r\n self.return_pointer = 0\r\n self.subroutinecode = []\r\n self.instruction_pointer = 0\r\n def loop(self):\r\n lines = self.code.readlines()\r\n ln = len(lines)\r\n while self.instruction_pointer < ln:\r\n line = lines[self.instruction_pointer]\r\n if not line.startswith(\"#\") :\r\n self.execute(self.breakStatement(line))\r\n self.line = self.line+1\r\n self.instruction_pointer += 1\r\n # print(line,end=\"\")\r\n # print()\r\n\r\n def breakStatement(self,statement):\r\n # print(statement)\r\n r = []\r\n e = True\r\n t = \"\"\r\n s = statement.strip()\r\n for c in s:\r\n if c == \"'\":\r\n e = not e\r\n t += c\r\n elif c == \" \" and e:\r\n if t != \"\":\r\n r.append(t)\r\n t = \"\"\r\n else:\r\n t += c\r\n r.append(t)\r\n return r\r\n def execute(self,arr):\r\n # print(arr)\r\n if arr[0] != \"\" :\r\n # print(\"Gotcha\")\r\n try:\r\n if not self.subroutine:\r\n self.functions[arr[0]](self,arr[1:])\r\n else:\r\n # print(\"~\"+arr[0]+\"~\")\r\n if arr[0] == \"end\":\r\n # print(\"End\")\r\n self.functions[arr[0]](self,arr[1:])\r\n else:\r\n self.subroutinecode.append(\" \".join(arr))\r\n except KeyError as e:\r\n print(e)\r\n print(\"Error at line \"+str(self.line)+\" : \"+arr[0]+\" is not a valid code [{routine}]\".format(routine=i.name))\r\n sys.exit()\r\n except Exception as e:\r\n print(\"Error at line \"+str(self.line)+\" : [{routine}] crashed with the following input {args}.\\nReason: {error}\".format(routine=i.name,args=arr,error=e))\r\n sys.exit()\r\n\r\n\r\n def loadFunction(self,name,f):\r\n self.functions[name] = f\r\n # pass\r\n def convertInt(self,i):\r\n r = 0\r\n try:\r\n r = int(i,10)\r\n except Exception as e:\r\n r = int(1,16)\r\n return r\r\n\r\n def resolveValue(self,a):\r\n try:\r\n try:\r\n return int(a,10)\r\n except Exception as e:\r\n return int(a,16)\r\n except Exception as e:\r\n pass\r\n if type(a) == int:\r\n return a\r\n elif a.startswith(\"@\"):\r\n return self.variables[a[1:]]\r\n elif a.startswith(\"%\"):\r\n return self.memory_spaces[a[1:]]\r\n elif a.startswith(\"'\"):\r\n return a[1:-1]\r\n elif a.startswith(\"$\"):\r\n return self.flags[a[1:]]\r\n elif a.startswith(\"^\"):\r\n return type(self.resolveValue(a[1:]))\r\n elif a.startswith(\"{\") and a.endswith(\"}\"):\r\n return len(self.resolveValue(a[1:-1].strip()))\r\n elif a.startswith(\"[\") and a.endswith(\"]\"):\r\n inner = a[1:-1].strip()\r\n arr = inner.split(\"#\")[0]\r\n index = inner.split(\"#\")[1]\r\n # print(\"IN={0}, ARR={1}, RES1={2}, RES={3}\".format(index,arr,self.resolveValue(arr),self.resolveValue(index)))\r\n return self.resolveValue(arr)[self.resolveValue(index)]\r\n\r\n def isDefinedSpace(self,v):\r\n # print(v)\r\n if len(v) == 0:\r\n return False\r\n for val in v:\r\n if val != 0:\r\n return False\r\n return True\r\n# print(CSV(\"hello 'world dudes'\"))\r\n\r\ni = Interpreter(sys.argv[1])\r\n\r\ndef start(i,arg):\r\n i.maxm = int(arg[0])\r\n # print(i.maxm)\r\n for m in range(i.maxm):\r\n i.memory.append(0)\r\n\r\ndef db(i,arg):\r\n i.variables[arg[0]] = i.resolveValue(arg[1].strip())\r\n\r\ndef flag(i,arg):\r\n i.flags[arg[0]] = arg[1]\r\n\r\ndef mov(i,arg):\r\n if arg[0].startswith(\"%\"):\r\n a1 = i.resolveValue(arg[1])\r\n # print(\"[mov] {}\".format(a1))\r\n i.memory_spaces[arg[0][1:]] = a1\r\n else:\r\n a1 = i.resolveValue(arg[1])\r\n \r\n i.variables[arg[0][1:]] = a1\r\n\r\ndef put(i,arg):\r\n size = 0\r\n try:\r\n size = int(arg[1],10)\r\n except Exception as e:\r\n size = int(arg[1],16)\r\n # print(size,i.maxm,arg[1])\r\n if size < i.maxm:\r\n i.memory[size] = i.resolveValue(arg[0])\r\n else:\r\n print(\"Error at line \"+str(i.line)+\" : Invalid memory address allocation [{routine}]\".format(routine=i.name))\r\n sys.exit()\r\n\r\ndef section(i,arg):\r\n i.subroutine = True\r\n i.subroutines[arg[0]] = Interpreter(\"\",routine_name=arg[0],start_line=i.line)\r\n\r\n\r\ndef end(i,arg):\r\n i.subroutines[arg[0]].code = VirtualFile(i.subroutinecode)\r\n i.subroutinecode = []\r\n i.subroutine = False\r\n\r\ndef dec(i,arg):\r\n sub(i,[arg[0],1])\r\n\r\ndef inc(i,arg):\r\n add(i,[arg[0],1])\r\n\r\ndef sub(i,arg):\r\n if arg[0].startswith(\"@\"):\r\n resArg1 = i.resolveValue(arg[0])\r\n resArg2 = i.resolveValue(arg[1])\r\n print(resArg1,resArg2)\r\n i.variables[arg[0][1:]] = resArg1-resArg2\r\n\r\ndef add(i,arg):\r\n if arg[0].startswith(\"@\"):\r\n resArg1 = i.resolveValue(arg[0])\r\n resArg2 = i.resolveValue(arg[1])\r\n print(resArg1,resArg2)\r\n if type(resArg1) is int and type(resArg2) is int:\r\n i.variables[arg[0][1:]] = resArg1+resArg2\r\n else:\r\n i.variables[arg[0][1:]] = str(resArg1)+str(resArg2)\r\n\r\ndef allocate(i,arg):\r\n i.variables[arg[1]] = []\r\n if arg[0] != \"%\":\r\n for m in range(i.resolveValue(arg[0])):\r\n i.variables[arg[1]].append(0)\r\n\r\n\r\n\r\ndef fetch_r(i,arg):\r\n v = i.subroutines[arg[0]].memory[127]\r\n Set(i,[arg[1],v])\r\n \r\ndef _print(i,arg):\r\n if type(i.memory[0]) == str:\r\n print(i.memory[0].replace(\"\\\\n\",\"\\n\"),end=\"\")\r\n else:\r\n print(i.memory[0],end=\"\")\r\n\r\ndef _println(i,arg):\r\n if type(i.memory[0]) == str:\r\n print(i.memory[0].replace(\"\\\\n\",\"\\n\"))\r\n else:\r\n print(i.memory[0])\r\n\r\ndef call(i,arg):\r\n for key in i.subroutines:\r\n if key != arg[0]:\r\n i.subroutines[arg[0]].subroutines[key] = i.subroutines[key]\r\n # print \"Calling\",arg[0]\r\n i.subroutines[arg[0]].functions = i.functions\r\n i.subroutines[arg[0]].variables = i.variables\r\n i.subroutines[arg[0]].memory_spaces = i.memory_spaces\r\n i.subroutines[arg[0]].flags = i.flags\r\n # print(i.subroutines[arg[0]].memory_spaces)\r\n # i.return_pointer = i.instruction_pointer\r\n i.subroutines[arg[0]].loop()\r\n # print \"Called\", arg[0]\r\n \r\ndef ret(i,arg):\r\n i.instruction_pointer = i.return_pointer\r\n # print(\"[ip]={}\".format(i.instruction_pointer))\r\n\r\ndef push(i,arg):\r\n a1 = i.resolveValue(arg[0])\r\n if i.isDefinedSpace(i.variables[arg[1][1:]]):\r\n print(\"Error at line \"+str(i.line)+\" : Cannot push to a defined memory space [{routine}]\".format(routine=i.name))\r\n sys.exit()\r\n else:\r\n i.variables[arg[1][1:]].append(a1)\r\n\r\ndef Set(i,arg):\r\n a1 = i.resolveValue(arg[1])\r\n \r\n i.variables[arg[0][1:]] = a1\r\n # print(i.variables[arg[0][1:]])\r\n\r\n\r\n\r\ndef _cmp(i,arg):\r\n v1 = i.resolveValue(arg[0])\r\n v2 = i.resolveValue(arg[1])\r\n\r\n i.flags[\"cmp\"] = True if v1==v2 else False \r\n\r\ndef jne(i,arg):\r\n try:\r\n # print(\"[jne] {}\".format(i.flags[\"cmp\"] == False))\r\n if i.flags[\"cmp\"] == False:\r\n # print(\"JNE SUCCESS\")\r\n call(i,arg)\r\n i.instruction_pointer -= 1\r\n except Exception as e:\r\n pass\r\n\r\ndef je(i,arg):\r\n try:\r\n if i.flags[\"cmp\"]:\r\n call(i,arg[0])\r\n except Exception as e:\r\n pass\r\n\r\ni.loadFunction(\"allocate\",allocate)\r\ni.loadFunction(\"section\",section)\r\ni.loadFunction(\"println\",_println)\r\ni.loadFunction(\"fetchr\",fetch_r)\r\ni.loadFunction(\"start\",start)\r\ni.loadFunction(\"print\",_print)\r\ni.loadFunction(\"push\",push)\r\ni.loadFunction(\"call\",call)\r\ni.loadFunction(\"put\",put)\r\ni.loadFunction(\"add\",add)\r\ni.loadFunction(\"sub\",sub)\r\ni.loadFunction(\"dec\",dec)\r\ni.loadFunction(\"mov\",mov)\r\ni.loadFunction(\"inc\",inc)\r\ni.loadFunction(\"end\",end)\r\ni.loadFunction(\"set\",Set)\r\ni.loadFunction(\"jne\",jne)\r\ni.loadFunction(\"ret\",ret)\r\ni.loadFunction(\"cmp\",_cmp)\r\ni.loadFunction(\"jz\",je)\r\ni.loadFunction(\"db\",db)\r\n\r\ni.loop()\r\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39206950","text":"class Solution(object):\n def shortestWordDistance(self, words, word1, word2):\n \"\"\"\n :type words: List[str]\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n ans = len(words)\n \n i1, i2 = -1, -1 # index of last appearance of word1 and word2\n for i, curr in enumerate(words):\n if curr == word1:\n i1 = i\n if i2 != -1:\n ans = min(i1 - i2, ans)\n if curr == word2: # simply change elif to if from \"Shortest Word Distance.\"\n i2 = i\n if i1 != -1 and i2 != i1:\n ans = min(i2 - i1, ans)\n return ans\n \n\n\"\"\"\nGiven a list of words and two words word1 and word2, return the shortest distance between these two words in the list.\n\nword1 and word2 may be the same and they represent two individual words in the list.\n\nExample:\nAssume that words = [\"practice\", \"makes\", \"perfect\", \"coding\", \"makes\"].\n\nInput: word1 = “makes”, word2 = “coding”\nOutput: 1\nInput: word1 = \"makes\", word2 = \"makes\"\nOutput: 3\nNote:\nYou may assume word1 and word2 are both in the list.\n\"\"\"\n","sub_path":"0245. Shortest Word Distance III.py","file_name":"0245. Shortest Word Distance III.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401674348","text":"import sys\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport nistats\nimport copy\n#import nistats.first_level_model\nfrom nistats.first_level_model import * #first_level_models_from_bids\n#from nilearn import plotting\nfrom nistats.reporting import plot_design_matrix\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nfrom funcs import *\n\n\n# input arguments\ntry:\n if len(sys.argv) < 2:\n raise RuntimeError()\n subject_ids = sys.argv[1:]\n if len(subject_ids) == 1 and subject_ids[0] == '--all':\n subject_ids = None\nexcept RuntimeError:\n print('Usage:\\n\\t\\tpython nistats_glm.py ...'\n '\\n\\tOR\\n\\t\\tpython nistats_glm.py --all')\n quit()\n\n\nderivatives_prefix = DERIVS_DIR + 'derivatives_'\nspace_label = SPACE\ntasks = ['friend','number']\nnodes = range(N_NODES)\n\nout_dir = get_thresh_dir(GLM_DIR)\n\n# ## ----------------------- fmriprep 1.2.0 regressors ----------------------------\n# col_regressors_fixed = [\n# 'a_comp_cor_00', 'a_comp_cor_01',\n# 'a_comp_cor_02', 'a_comp_cor_03',\n# 'a_comp_cor_04', 'a_comp_cor_05',\n# 't_comp_cor_00', 't_comp_cor_01',\n# 't_comp_cor_02', 't_comp_cor_03',\n# 't_comp_cor_04', 't_comp_cor_05',\n# 'trans_x','trans_y','trans_z',\n# 'rot_x','rot_y','rot_z']\n# # include all columns that start with these prefixes\n# col_regressors_prefs_all = []\n# # across all 4 runs of the task, include the minimum number of columns that start with these prefixes\n# col_regressors_prefs_min = ['cosine', 'non_steady_state_outlier']\n# # delete TRs based on these columns\n\n# ----------------------- fmriprep 1.4.0 regressors ----------------------------\ncol_regressors_fixed = []\n# include all columns that start with these prefixes\ncol_regressors_prefs_all = [\n'csf', 'white_matter', 'global_signal',\n'trans_x','trans_y','trans_z',\n'rot_x','rot_y','rot_z']\n# across all 4 runs of the task, include the minimum number of columns that start with these prefixes\ncol_regressors_prefs_min = [] #['non_steady_state_outlier']\n# delete TRs based on these columns\n\n\n\nfor subj in subject_ids:\n print(\"Starting subject \"+str(subj))\n\n # exclude runs based on threshold\n exclude_runs(subj)\n\n derivatives_dir_sub = derivatives_prefix + subj + '/'\n if not os.path.exists(derivatives_dir_sub):\n os.makedirs(derivatives_dir_sub)\n if not os.path.exists(derivatives_dir_sub+subj):\n # move derivatives to derivatives folder\n print(\"moving \" +DERIVS_DIR+subj+' to '+derivatives_dir_sub)\n shutil.move(DERIVS_DIR+subj, derivatives_dir_sub)\n\n # will run on all subjects in derivatives_dir_sub, so all must be done with fmriprep\n for t, task_label in enumerate(tasks):\n print(\"Starting task \"+str(t+1)+\" of \"+str(len(tasks))+\": \" + task_label)\n print(\"Calculating first level model...\")\n models, models_run_imgs, models_events, models_confounds = \\\n first_level_models_from_bids(\n BIDS_DIR, task_label, space_label\n , derivatives_folder=derivatives_dir_sub\n )\n\n if sys.version_info[0] == 3:\n # note, need list() around zip in python 3 but not in python 2\n model_and_args = list(zip(models, models_run_imgs, models_events, models_confounds))\n elif sys.version_info[0] == 2:\n model_and_args = zip(models, models_run_imgs, models_events, models_confounds)\n\n print(len(model_and_args))\n # iterate through subjects\n for midx, (model, imgs, events, confounds) in enumerate(model_and_args):\n print(midx)\n # do not mask\n model.mask=False\n # get subject label\n subnum = model.subject_label\n subid = 'sub-' + subnum\n print(\"Analyzing subject \"+ subid)\n\n # write subject's output directory\n write_sub_dir = os.path.join(out_dir, subid)\n if not os.path.exists(write_sub_dir):\n os.makedirs(write_sub_dir)\n\n # select only relevant rows and columns from events files\n print(\"Setting up events file\")\n for r, e in enumerate(events):\n events[r] = e[e['trial_type']=='noncatch']\n events[r] = events[r].filter(['node','onset_corrected','duration'])\n events[r] = events[r].rename(index=str,\n # columns={\"trial_type\":\"noncatch\", \"node\":\"trial_type\"})\n columns={\"node\":\"trial_type\", \"onset_corrected\":\"onset\"})\n print(events[r].columns)\n #events[r] = events[r].filter(['trial_type','onset_corrected','duration'])\n \t#events[r] = events[r].rename(columns = {\"onset_corrected\": \"onset\"})\n\n # subselect regressors from each run's counfounds file\n print(\"Subselecting relevant regressors\")\n # find the minimum prefix columns\n # iterate through this subject's counfounds files\n for r, c in enumerate(confounds):\n # r = run number, c = confounds dataframe\n curr = []\n for p in col_regressors_prefs_min:\n add_cols = list(c.filter(regex='^'+p,axis=1).columns)\n print(\"Adding columns for prefix \" + p + \":\")\n print(add_cols)\n curr = curr + add_cols\n # only include columns that have been found in all confounds files\n if r == 0:\n col_regressors_min = copy.deepcopy(curr)\n else:\n col_regressors_min = [x for x in curr if x in col_regressors_min]\n # subselect columns\n confounds_copy=copy.deepcopy(confounds)\n for r, c in enumerate(confounds):\n # find columns that start with prefixes\n col_regressors_all = copy.deepcopy(col_regressors_fixed)\n col_regressors_all = col_regressors_all + col_regressors_min\n for p in col_regressors_prefs_all:\n add_cols = list(c.filter(regex='^'+p,axis=1).columns)\n print(\"Adding columns for prefix \" + p + \":\")\n print(add_cols)\n col_regressors_all = col_regressors_all + add_cols\n print(\"Selected confounds for run \" + str(r+1) + \":\")\n print(col_regressors_all)\n confounds_copy[r] = c.filter(col_regressors_all)\n # fill in all NaN with 0\n confounds_copy[r] = confounds_copy[r].fillna(0)\n\n # fit the GLM\n print(\"Running GLM\")\n #model1=deepcopy(model)\n model.fit(imgs, events, confounds_copy)\n\n # save design matrices for each run\n print(\"Saving design matrices\")\n for r, mat in enumerate(model.design_matrices_):\n design_matrix = mat\n plot_design_matrix(design_matrix)\n #plt.show()\n filename=write_sub_dir+'/'+subid+'_task-'+task_label+'_run-0'+str(r+1)+'_designmat.png'\n plt.savefig(filename)\n print('Run '+str(r)+' design matrix image saved: '+filename)\n\n # setup contrast vectors (all zeros)\n n_columns = design_matrix.shape[1]\n con_empty = np.zeros(n_columns)\n\n # compute each contrast of interest (one per node)\n print(\"Saving each node's contrast files\")\n for node_label in nodes:\n # create contrast vector\n con = copy.deepcopy(con_empty)\n con[node_label] = 1\n # for output options, see\n # https://nistats.github.io/modules/generated/nistats.first_level_model.FirstLevelModel.html\n t_map = model.compute_contrast(con\n , stat_type='t'\n , output_type='stat'\n )\n # save t map\n filename = '%s_task-%s_space-%s_stat-t_node-0%s.nii' % (subid, task_label, space_label, str(node_label))\n t_image_path = os.path.join(write_sub_dir, filename)\n nib.save(t_map, t_image_path)\n print('File ' + write_sub_dir + '/' + filename + ' saved.')\n\n\n # move subjects' folders back to fmriprep\n print(\"Moving subject's data from \" + derivatives_dir_sub + \" to \" + DERIVS_DIR)\n shutil.move(derivatives_dir_sub+subj, DERIVS_DIR)\n os.rmdir(derivatives_dir_sub)\n","sub_path":"code/glm.py","file_name":"glm.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406040673","text":"from datetime import datetime\nfrom typing import List, Tuple, Optional\n\nfrom mason.util.logger import logger\n\nclass Response:\n def __init__(self):\n self.responses: List[dict] = []\n self.warnings: List[str] = []\n self.info: List[str] = []\n self.debug: List[str] = []\n self.errors: List[str] = []\n self.status_code: int = 200\n self.data: List[dict] = []\n self.response = {}\n \n def merge(self, response: 'Response') -> 'Response':\n self.responses = self.responses + response.responses\n self.warnings = self.warnings + response.warnings\n self.info = self.info + response.info\n self.debug = self.debug + response.debug\n self.errors = self.errors + response.errors\n self.status_code = response.status_code\n self.data = self.data + response.data\n return self\n\n def add_timestamp(self, s: Optional[str] = None) -> str:\n if logger.log_level.level > 4:\n return s or \"\"\n else:\n if s:\n return f\"{datetime.now()}: {s}\"\n else:\n return f\"{datetime.now()}\"\n\n def errored(self):\n return not (len(self.errors) == 0)\n\n def add(self, key: str, value: list):\n self.response[key] = value\n\n def add_warning(self, warning: str, log: bool = True):\n warning = self.add_timestamp(warning)\n if log:\n logger.warning(warning)\n self.warnings.append(warning)\n\n def add_info(self, info: str, log: bool = True):\n info = self.add_timestamp(info)\n if log:\n logger.info(str(info))\n self.info.append(info)\n\n def add_debug(self, debug: str, log: bool = True):\n info = self.add_timestamp(debug)\n if log:\n logger.debug(str(debug))\n self.debug.append(debug)\n\n def add_error(self, error: str, log: bool = True):\n error = self.add_timestamp(error)\n if log:\n logger.error(error)\n self.errors.append(error)\n\n def add_response(self, response: dict, log: bool = False):\n if log:\n logger.debug(f\"Response {str(response)}\")\n response[\"timestamp\"] = self.add_timestamp()\n self.responses.append(response)\n\n def set_status(self, status: int):\n self.status_code = status\n\n def add_data(self, data: dict):\n self.data.append(data)\n\n def formatted(self) -> dict:\n returns = self.response\n \n if len(self.errors) > 0:\n returns['Errors'] = self.errors\n if len(self.info) > 0:\n returns['Info'] = self.info\n if len(self.warnings) > 0:\n returns['Warnings'] = self.warnings\n\n d = [i for i in self.data if len(i) > 0]\n if len(d) > 0:\n returns['Data'] = d # type: ignore\n\n if logger.log_level.debug():\n returns['_client_responses'] = self.responses # type: ignore\n\n return returns\n\n def with_status(self) -> Tuple[dict, int]:\n return (self.formatted(), self.status_code)\n\n","sub_path":"mason/clients/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603715560","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport copy\n\nclass Problem():\n def __init__ (self):\n self.input = open(\"13.in\",\"r\")\n self.inputContents = self.input.readlines() # remove the last newline\n self.size = (len(self.inputContents[0][:-1]), len(self.inputContents))\n print(\"Size = {}\".format(self.size))\n self.karte = np.zeros(self.size, dtype=int)\n self.zugen = dict()\n self.ticks = 0\n self.karteMapping = {\n \" \" : 0,\n \"-\" : 1,\n \">\" : 1,\n \"<\" : 1,\n \"|\" : 2,\n \"^\" : 2,\n \"v\" : 2,\n \"/\" : 3,\n \"\\\\\" : 4,\n \"+\" : 5\n }\n self.reverseKarteMapping = {\n 0 : \" \" ,\n 1 : \"-\" ,\n 2 : \"|\" ,\n 3 : \"/\" ,\n 4 : \"\\\\\",\n 5 : \"+\"\n }\n self.zugenMapping = {\n \"^\" : 0,\n \">\" : 90,\n \"v\" : 180,\n \"<\" : 270,\n \" \" : -1,\n \"-\" : -1,\n \"|\" : -1,\n \"/\" : -1,\n \"\\\\\" : -1,\n \"+\" : -1\n }\n self.reverseZugenMapping = {\n 0 : \"^\",\n 90 : \">\",\n 180 : \"v\",\n 270 : \"<\",\n }\n self.richtungMapping = {\n 0: np.array([ 0, -1]),\n 90: np.array([ 1, 0]),\n 180: np.array([ 0, 1]),\n 270: np.array([-1, 0])\n }\n self.intersectionMapping = { # we %3 the number of intersections\n 1: -90,\n 2: 0,\n 0: 90\n }\n\n y = 0\n for line in self.inputContents:\n x = 0\n for char in line[:-1]:\n self.karte[x, y] = self.karteMapping[char]\n if self.zugenMapping[char] > -1:\n self.zugen[(x, y)] = {\n \"dir\": self.zugenMapping[char],\n \"intersections\": 0\n }\n x += 1\n y += 1\n\n while self.tick():\n self.ticks += 1\n\n print(\"t = {}\\tLast train standing = {}\".format(self.ticks, list(self.zugen.keys())[0]))\n\n #while True:\n # self.tick()\n # if np.amax(self.zugen) == 5:\n # maxidx = np.argmax(self.zugen)\n # print(\"Trains Crashed at ({}, {})\".format(int(maxidx/150), maxidx%150)\n # break;\n\n def checkSafe(self, x, y):\n checkCoord = tuple(np.array([x, y]) + self.richtungMapping[self.zugen[x, y][\"dir\"]])\n # print(\"Checking if train at ({}, {}) is safe to move to {}\".format(x, y, checkCoord))\n return not(checkCoord in self.zugen)\n\n def moveZug (self, x, y):\n # Trains shall always face the direction of where they are going next (i.e. at bend, they will turn on the bend)\n travelInformation = self.zugen[x, y]\n newCoord = tuple(np.array([x, y]) + self.richtungMapping[travelInformation[\"dir\"]])\n del self.zugen[(x, y)]\n\n # Calculate new travelDir\n if self.karte[newCoord[0], newCoord[1]] == 5:\n # intersection\n travelInformation[\"intersections\"] += 1\n travelInformation[\"intersections\"] %= 3\n travelInformation['dir'] += self.intersectionMapping[travelInformation[\"intersections\"]]\n elif self.karte[newCoord[0], newCoord[1]] == 3:\n # the / char\n if travelInformation['dir'] == 0 or travelInformation['dir'] == 180:\n travelInformation['dir'] += 90\n else:\n travelInformation['dir'] -= 90\n elif self.karte[newCoord[0], newCoord[1]] == 4:\n # the \\ char\n if travelInformation['dir'] == 0 or travelInformation['dir'] == 180:\n travelInformation['dir'] -= 90\n else:\n travelInformation['dir'] += 90\n\n travelInformation['dir'] %= 360\n self.zugen[newCoord] = travelInformation\n\n def tick(self):\n # self.printMap()\n currentListOfZugen = copy.deepcopy(self.zugen)\n for y in range(self.size[1]):\n for x in range(self.size[0]):\n if (x, y) in currentListOfZugen and self.checkSafe(x, y):\n self.moveZug(x, y)\n elif (x, y) in currentListOfZugen:\n # Crashed\n neuCoord = tuple(np.array([x, y]) + self.richtungMapping[self.zugen[x, y][\"dir\"]])\n print(\"Crash at {}\".format(neuCoord))\n del self.zugen[(x, y)]\n try:\n del currentListOfZugen[(x, y)]\n except:\n pass\n\n del self.zugen[neuCoord]\n try:\n del currentListOfZugen[neuCoord]\n except:\n pass\n print(len(self.zugen), \"trains left\")\n\n if(len(self.zugen) == 1):\n return False\n return True\n\n def printMap(self):\n print(\"t = {}\".format(self.ticks))\n for y in range(self.size[1]):\n for x in range(self.size[0]):\n if (x, y) in self.zugen:\n print(self.reverseZugenMapping[self.zugen[(x, y)][\"dir\"]], end=\"\")\n else:\n print(self.reverseKarteMapping[self.karte[x, y]], end=\"\")\n print(\"\")\n\nProblem()\n","sub_path":"2018/day_13/13_2.py","file_name":"13_2.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565430370","text":"import numpy as np\r\nimport jieba\r\nimport pkg_resources\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom gensim.models import Word2Vec\r\n\r\nclass Sen2Vec(object):\r\n \"\"\" A model transform sentences to vectors\r\n\r\n Parameters\r\n ----------\r\n model : gensim.models.Word2Vec\r\n a pre-trained word2vec model\r\n \r\n Attributes\r\n ----------\r\n model : gensim.models.Word2Vec\r\n \r\n size : int, number of dimensions \r\n \r\n idf_dict : dictionary, len = number of words\r\n the idf value of each word\r\n \"\"\"\r\n def __init__(self, model=None):\r\n self.model = model\r\n self.size = model.syn0.shape[1]\r\n\r\n self.idf_dict = None\r\n\r\n def fit(self, corpus):\r\n \"\"\"calculate the idf of all words\r\n \r\n Parameters\r\n ----------\r\n corpus : list of strings\r\n ex. ['今天 天氣 如何', '什麼 是 資料 科學']\r\n \"\"\"\r\n\r\n corpus = [\" \".join(document) for document in corpus]\r\n \r\n cv = CountVectorizer(token_pattern='\\w+')\r\n\r\n doc_term_matrix = cv.fit_transform(corpus)\r\n df = doc_term_matrix.getnnz(axis=0)\r\n n_doc, n_term = doc_term_matrix.shape\r\n\r\n idf = np.log(1+n_doc/df)\r\n vocab = sorted(cv.vocabulary_, key=cv.vocabulary_.get)\r\n \r\n self.idf_dict = dict(zip(vocab, idf))\r\n\r\n def transform(self, sentence):\r\n \"\"\"transform a sentence to vectors\r\n \r\n Parameters\r\n ----------\r\n sentence : string\r\n ex. '今天天氣如何?'\r\n \r\n Returns\r\n -------\r\n vector : ndarray, shape=(size,)\r\n a vector that represents the given sentence\r\n \"\"\"\r\n\r\n tool = tokenizer()\r\n dict_path = pkg_resources.resource_filename('sen2vec', 'resource/my_dict.txt')\r\n tool.add_word_by_file(dict_path)\r\n token_words = tool.tokenize(sentence)\r\n\r\n vector = np.zeros(100)\r\n sum_weights = 0\r\n for word in token_words:\r\n if word in self.model and word in self.idf_dict:\r\n idf = self.idf_dict[word]\r\n vector += idf*self.model[word]\r\n\r\n sum_weights += idf\r\n\r\n return vector/sum_weights\r\n\r\nclass tokenizer(object):\r\n \"\"\" A model transform sentences to vectors\r\n \r\n Attributes\r\n ----------\r\n jieba : jieba\r\n https://github.com/fxsjy/jieba\r\n\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.jieba = jieba\r\n \r\n def tokenize(self, sentence=None):\r\n \"\"\"transform a sentence to list\r\n \r\n Parameters\r\n ----------\r\n sentence : string\r\n ex. '今天天氣如何?'\r\n \r\n Returns\r\n -------\r\n tokenized_sentence : list\r\n a list of strings that are tokenized by jieba\r\n \"\"\"\r\n return self.jieba.lcut(sentence.strip(), cut_all=False)\r\n\r\n def add_word(self, word=None):\r\n \"\"\"add extra word to jieba's dictionary\r\n \r\n Parameters\r\n ----------\r\n word : string\r\n \"\"\"\r\n self.jieba.add_word(word, freq=None, tag=None)\r\n\r\n def add_word_by_file(self, filename=None):\r\n \"\"\"add extra words in file to jieba's dictionary\r\n \r\n Parameters\r\n ----------\r\n filename : string\r\n \"\"\"\r\n with open(filename, 'r') as reader:\r\n lines = reader.read().splitlines()\r\n \r\n for word in lines:\r\n self.add_word(word)","sub_path":"sen2vec/sen2vec.py","file_name":"sen2vec.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427031950","text":"##-------------------------------\n# Cap.10 - Arquivos e exceções\n# Python Crash Course\n# Eric Matthes\n# Autor: Washington Candeia\n# division.py, p.270\n##-------------------------------\n\nprint(\"Give two numbers, and I'll divide them\")\nprint(\"Enter 'q' to quit.\")\n\n# Laço while\nwhile True:\n first_number = input('First number: ')\n if first_number == 'q':\n break\n second_number = input('Second number: ')\n\n # Bloco try-except\n try:\n answer = int(first_number) / int(second_number)\n\n except ZeroDivisionError:\n print(\"You can't divide by zero.\")\n\n else:\n print(\"Answer: \" + str(answer))\n\n","sub_path":"10_Arquivos/division.py","file_name":"division.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270810555","text":"# -*- coding: utf-8 -*-\nimport flask\nfrom pythainlp.tokenize import word_tokenize\napp = flask.Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n@app.route(\"/api/word_tokenize\", methods=['GET','POST'])\n@app.route(\"/api/v1/word_tokenize\", methods=['GET','POST'])\ndef word_tokenize_api():\n\tif flask.request.method == 'POST':\n\t\tsource = flask.request.values.get('source')\n\t\tengine = flask.request.values.get('engine')\n\telse:\n\t\tsource = flask.request.args.get('source')\n\t\tengine = flask.request.args.get('engine')\n\tif engine==None:\n\t\tengine='newmm'\n\treturn '|'.join(word_tokenize(source,engine=engine))\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525081072","text":"import os, sys\nimport tensorflow as tf\nimport glob\nsys.path.append(\"../\")\nfrom tensorflow.python.keras.backend import set_session\nfrom deepctr.models import DeepFM\nfrom deepctr.inputs import SparseFeat, VarLenSparseFeat, get_feature_names\n\n\nif __name__ == \"__main__\":\n input_type = \"textline\"\n num_epochs = 1\n batch_size = 1024\n max_varlen = 5\n num_train = 221102858\n num_test = 2233373\n # train_dir = \"../data/dsp_ctr/tfrecords.test/\"\n train_dir = \"../data/dsp_ctr/tfrecords/train/\"\n test_dir = \"../data/dsp_ctr/tfrecords/test/\"\n train_file_list = glob.glob(os.path.join(train_dir, \"*\"))\n test_file_list = glob.glob(os.path.join(test_dir, \"*\"))\n\n feat_name_list = []\n feat_dtype_dict = {\"label\": str}\n sparse_features = {}\n varlen_features = {}\n sparse_feat_col_ids = {}\n varlen_feat_col_ids = {}\n with open(\"../bin/field_feature.txt\", 'r') as ff:\n for line in ff.readlines():\n field_id, _, field_name, _, bucket_size, feature_class = line.strip().split(\"\\t\")\n if bucket_size == \"-1\":\n bucket_size = 1000\n feat_name_list.append(field_name)\n if feature_class == \"multi-cat\":\n feat_dtype_dict[field_name] = str\n varlen_features[field_name] = int(bucket_size)\n varlen_feat_col_ids[field_name] = int(field_id)\n else:\n feat_dtype_dict[field_name] = str\n sparse_features[field_name] = int(bucket_size)\n sparse_feat_col_ids[field_name] = int(field_id)\n\n features = {feat: tf.FixedLenFeature([], tf.int64) for feat in sparse_feat_col_ids.keys()}\n varlen_feats = {feat: tf.FixedLenFeature([5], tf.int64) for feat in varlen_feat_col_ids.keys()}\n features.update(varlen_feats)\n features.update({\"prediction_layer\": tf.FixedLenFeature([], tf.int64)})\n\n def parse_example(example, is_training=True):\n\n parsed_features = tf.parse_single_example(example, features)\n labels = {\"prediction_layer\": parsed_features[\"prediction_layer\"]}\n return parsed_features, labels\n\n\n def get_dataset(files, parse_function, num_parallel_calls=10, batch_size=256):\n print('Parsing', files)\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(lambda item: parse_function(item, is_training=True),\n num_parallel_calls=num_parallel_calls)\n # dataset = dataset.shuffle(buffer_size=batch_size*10, reshuffle_each_iteration=True)\n try:\n dataset = dataset.batch(batch_size, drop_remainder=True)\n except:\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n\n dataset = dataset.prefetch(batch_size * 10)\n # return dataset\n return dataset.make_one_shot_iterator()\n # iterator = dataset.make_one_shot_iterator()\n # feats, label = iterator.get_next()\n # # return iterator\n # return feats, label\n\n train_iter = get_dataset(train_file_list, parse_example, batch_size=batch_size)\n # test_dataset = get_dataset(test_file_list, parse_example, batch_size=batch_size)\n\n # 2.count #unique features for each sparse field and generate feature config for sequence feature\n fixlen_feature_columns = [SparseFeat(feat, bucket_size)\n for feat, bucket_size in sparse_features.items()]\n varlen_feature_columns = [VarLenSparseFeat(feat, bucket_size, max_varlen, 'mean')\n for feat, bucket_size in varlen_features.items()] # Notice : value 0 is for padding for sequence input feature\n linear_feature_columns = fixlen_feature_columns + varlen_feature_columns\n dnn_feature_columns = fixlen_feature_columns + varlen_feature_columns\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.9\n set_session(tf.Session(config=config))\n\n # 4.Define Model,compile and train\n model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')\n\n model.compile(\"adam\", \"binary_crossentropy\", metrics=['binary_crossentropy'], )\n history = model.fit(train_iter,\n steps_per_epoch=num_train // batch_size,\n epochs=num_epochs,\n )\n\n","sub_path":"examples/run_dsp_ctr_tfdataset.py","file_name":"run_dsp_ctr_tfdataset.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3961479","text":"\nfrom Circuit import Circuit\nfrom MuxAndDMux import Mux\n\n\nclass DFF(Circuit):\n\t\"\"\"a data flip-flop\n\n\tAttributes:\n\t\tinput_ (Boolean): the DFF's initial input\n\t\toutput (Boolean): the DFF's initial output\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.input_ = False\n\t\tself.output = False\n\n\tdef tick(self, clock):\n\t\t\"\"\"updates the DFF chip's output if the clock signal is true\n\n\t\tArgs:\n\t\t\tclock (Boolean): the clock signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tif clock:\n\t\t\tself.output = self.input_\n\n\tdef input(self, a):\n\t\t\"\"\"updates the DFF chip's input\n\n\t\tArgs:\n\t\t\ta (Boolean): the input signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tself.input_ = a\n\n\nclass Bit(Circuit):\n\t\"\"\"a single-bit register\n\n\tAttributes:\n\t\toutput (Boolean): the bit's initial output\n\t\tdff (DFF): the bit's constituent data flip-flop\n\t\tmux (Mux): the bit's constituent Mux chip\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.output = False\n\t\tself.dff = DFF()\n\t\tself.mux = Mux()\n\n\tdef tick(self, clock):\n\t\t\"\"\"updates the bit's output\n\n\t\tArgs:\n\t\t\tclock (Boolean): the clock signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tself.dff.tick(clock)\n\n\t\tself.output = self.dff.output\n\n\tdef input(self, a, load):\n\t\t\"\"\"updates the bit's input and load signal\n\n\t\tArgs:\n\t\t\ta (Boolean): the input signal\n\t\t\tload (Boolean): the load signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tself.mux.input(self.dff.output, a, load)\n\n\t\tself.dff.input(self.mux.output)\n\n\nclass Register(Circuit):\n\t\"\"\"a 16-bit register\n\n\tAttributes:\n\t\toutput (Boolean): the register's initial output\n\t\tbit1..bit16 (Bit): the 16-bit register's constituent bits\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.output = tuple([0] * 16)\n\t\tself.bit1, self.bit2, self.bit3, self.bit4, self.bit5, self.bit6, self.bit7, self.bit8, self.bit9, self.bit10, self.bit11, self.bit12, \\\n\t\tself.bit13, self.bit14, self.bit15, self.bit16 = Bit(), Bit(), Bit(), Bit(), Bit(), Bit(), \\\n\t\tBit(), Bit(), Bit(), Bit(), Bit(), Bit(), Bit(), Bit(), Bit(), Bit()\n\n\tdef tick(self, clock):\n\t\t\"\"\"updates the register's output\n\n\t\tArgs:\n\t\t\tclock (Boolean): the clock signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tself.bit1.tick(clock), self.bit2.tick(clock), \n\t\tself.bit3.tick(clock), self.bit4.tick(clock), \n\t\tself.bit5.tick(clock), self.bit6.tick(clock), \n\t\tself.bit7.tick(clock), self.bit8.tick(clock), \n\t\tself.bit9.tick(clock), self.bit10.tick(clock), \n\t\tself.bit11.tick(clock), self.bit12.tick(clock), \n\t\tself.bit13.tick(clock), self.bit14.tick(clock), \n\t\tself.bit15.tick(clock), self.bit16.tick(clock)\n\n\t\tself.output = (self.bit1.output, self.bit2.output, self.bit3.output, \n\t\t\tself.bit4.output, self.bit5.output, self.bit6.output, \n\t\t\tself.bit7.output, self.bit8.output, self.bit9.output, \n\t\t\tself.bit10.output, self.bit11.output, self.bit12.output, \n\t\t\tself.bit13.output, self.bit14.output, self.bit15.output, \n\t\t\tself.bit16.output)\n\n\tdef input(self, a, load):\n\t\t\"\"\"updates the register's input and load signal\n\n\t\tArgs:\n\t\t\ta[16] (Boolean): the input signals\n\t\t\tload (Boolean): the load signal\n\t\tReturns:\n\t\tRaises:\n\t\t\"\"\"\n\t\tself.bit1.input(a[0], load), self.bit2.input(a[1], load), \n\t\tself.bit3.input(a[2], load), self.bit4.input(a[3], load), \n\t\tself.bit5.input(a[4], load), self.bit6.input(a[5], load), \n\t\tself.bit7.input(a[6], load), self.bit8.input(a[7], load), \n\t\tself.bit9.input(a[8], load), self.bit10.input(a[9], load), \n\t\tself.bit11.input(a[10], load), self.bit12.input(a[11], load), \n\t\tself.bit13.input(a[12], load), self.bit14.input(a[13], load), \n\t\tself.bit15.input(a[14], load), self.bit16.input(a[15], load)\n","sub_path":"SequentialGates.py","file_name":"SequentialGates.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481505356","text":"import sys\nsys.path.append('..')\nimport manybody\n\n\n# model='sh-xxz-hz'\n# param=(1.0,-0.777,10.0)\n# manybody.models.quantum_name='sh-U1'\n\n\n# model='so-xxz-hz'\n# param=(1.0,0.777,0.46674)\n# manybody.models.quantum_name='so-U1'\n\n\n# model='ldsh-lr-xxz-hz'\n# param=(1.0,1.0,0.777,0.777,0.46674)\n# manybody.models.quantum_name='ldsh-U1'\n\nmodel='ldsh-lr-xxz-hzC'\nparam=(1.0,1.0,0.777,0.777,0.46674)\nmanybody.models.quantum_name='ldsh-U1comb'\n\n\n\n# model='ldsh-lr-xxz-hzN'\n# param=(1.0,1.0,0.777,0.777,0.46674)\n# manybody.models.quantum_name='ldsh-None'\n\nL=4\n\n\n\nmpo = manybody.mpoTools.suzuki_trotter_obc_exp(-0.03, model, param, L-1, False, True)\n\n\n","sub_path":"pycode/doc/tmporary.py","file_name":"tmporary.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125899787","text":"import sys\n\n#########################\n#\t\tparams\t\t\t#\n#########################\n\ntrain_folder = './data/'\ntrain_scenes=1\nimages_per_scene=100\ntarget_path = './data.txt'\n\nif len(sys.argv)>1:\n\ttrain_folder = sys.argv[1]\nif train_folder[-1] != '/':\n\ttrain_folder += '/'\n\nif len(sys.argv)>2:\n\ttrain_scenes = int(sys.argv[2])\nif len(sys.argv)>3:\n\timages_per_scene = int(sys.argv[3])\nif len(sys.argv)>4:\n\ttarget_path = sys.argv[4]\n################################3\n#\t\t\tdefs\t\t\t\t#\n#################################\ndef img_file_names(train_folder,i):\n\tfile_name = train_folder+ format(i, '08')\n\treturn file_name+'.jpg',file_name+'s.jpg'\n\n#################################\n#\t\t\tCollect\t\t\t\t#\n#################################\nwith open(target_path,'w') as f:\n\ti = 0\n\tfor _ in range(train_scenes*images_per_scene):\n\t\timg_n,gt_n = img_file_names(train_folder,i)\n\t\tf.write(img_n+','+gt_n+'\\n')\n\t\ti += 1\n","sub_path":"Jeries/MM_libperception/Eval/gather_panda_file_names.py","file_name":"gather_panda_file_names.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209384381","text":"from tkinter import * #Tkinter library\r\nimport random #Used in Random Page Replacement Algorithm\r\nimport matplotlib.pyplot as plt #Plotted graph using matplotlib\r\nimport os #Link Theory.py with this page\r\n\r\n# Theory of PRA\r\ndef theory():\r\n file1 = 'Theory.py'\r\n os.system(file1)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Initializing the variables\r\ndef Initialize():\r\n global root\r\n global row\r\n global col\r\n global FaultRatio\r\n row = 0\r\n col = 1\r\n FaultRatio = 0\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# First In Out First Out Page Replacement Algorithm\r\ndef FIFO(pages, n, capacity, txt, animation):\r\n Initialize()\r\n global FaultRatio\r\n global col\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n\r\n if animation is True:\r\n new_window(txt, capacity)\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n s = set()\r\n front = 0\r\n indexes = []\r\n page_faults = 0\r\n fault = []\r\n for i in range(n):\r\n if (len(s) < ii):\r\n if (pages[i] not in s):\r\n s.add(pages[i])\r\n page_faults += 1\r\n fault.append(True)\r\n indexes.append(pages[i])\r\n else:\r\n fault.append(False)\r\n else:\r\n if (pages[i] not in s):\r\n s.remove(indexes[front])\r\n s.add(pages[i])\r\n indexes[front] = pages[i]\r\n page_faults += 1\r\n fault.append(True)\r\n front+=1\r\n if(front>ii-1):\r\n front=0\r\n else:\r\n fault.append(False)\r\n if ii == capacity:\r\n FaultRatio = float((page_faults) / n)\r\n dummy = indexes\r\n if animation is True:\r\n anime(capacity, pages[i], dummy, fault[i], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n col += 1\r\n fault_record.append(page_faults)\r\n hit_record.append(n-page_faults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Last IN First Out Page Replacement Algorithm\r\ndef LIFO(pages, n, capacity, txt, animation):\r\n Initialize()\r\n global col\r\n global FaultRatio\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n\r\n\r\n if animation is True:\r\n new_window(txt, capacity)\r\n\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n s = set()\r\n end_l = ii-1\r\n indexes = []\r\n page_faults = 0\r\n fault = []\r\n for i in range(n):\r\n if (len(s) < ii):\r\n if (pages[i] not in s):\r\n s.add(pages[i])\r\n page_faults += 1\r\n fault.append(True)\r\n indexes.append(pages[i])\r\n else:\r\n fault.append(False)\r\n else:\r\n if (pages[i] not in s):\r\n s.remove(indexes[end_l])\r\n s.add(pages[i])\r\n indexes[end_l] = pages[i]\r\n page_faults += 1\r\n fault.append(True)\r\n else:\r\n fault.append(False)\r\n if ii == capacity:\r\n FaultRatio = float((page_faults) / n)\r\n dummy = indexes\r\n if animation is True:\r\n anime(capacity, pages[i], dummy, fault[i], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n col += 1\r\n fault_record.append(page_faults)\r\n hit_record.append(n-page_faults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Least Recently Used Page Replacement Algorithm\r\ndef LRU(processList, n, capacity, txt, animation):\r\n Initialize()\r\n global FaultRatio\r\n global col\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n\r\n\r\n if animation is True:\r\n new_window(txt, capacity)\r\n\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n s = []\r\n fault = []\r\n st = []\r\n pageFaults = 0\r\n j = 0\r\n for i in processList:\r\n\r\n if i not in s:\r\n\r\n if (len(s) < ii):\r\n s.append(i)\r\n st.append(len(s)-1)\r\n\r\n else:\r\n ind = st.pop(0)\r\n s[ind] = i\r\n st.append(ind)\r\n\r\n pageFaults += 1\r\n fault.append(True)\r\n else:\r\n fault.append(False)\r\n st.append(st.pop(st.index(s.index(i))))\r\n if ii == capacity:\r\n FaultRatio = float((pageFaults)/n)\r\n dummy = s\r\n if animation is True:\r\n anime(capacity, processList[j], dummy, fault[j], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n j+=1\r\n col += 1\r\n fault_record.append(pageFaults)\r\n hit_record.append(n-pageFaults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Most Recently Used Page Replacement Algorithm\r\ndef MRU(processList, n, capacity, txt, animation):\r\n Initialize()\r\n global FaultRatio\r\n global col\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n\r\n\r\n if animation is True:\r\n new_window(txt, capacity)\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n j = 0\r\n s = []\r\n fault = []\r\n st = []\r\n pageFaults = 0\r\n last_used_index = 0\r\n for i in processList:\r\n\r\n if i not in s:\r\n\r\n if (len(s) < ii):\r\n s.append(i)\r\n st.append(len(s)-1)\r\n last_used_index = len(s)-1\r\n else:\r\n # ind = st.pop(0)\r\n s[last_used_index] = i\r\n st.append(last_used_index)\r\n\r\n pageFaults += 1\r\n fault.append(True)\r\n else:\r\n fault.append(False)\r\n for k in range(len(s)):\r\n if s[k] == i:\r\n last_used_index = k\r\n if ii == capacity:\r\n FaultRatio = float((pageFaults)/n)\r\n dummy = s\r\n if animation is True:\r\n anime(capacity, processList[j], dummy, fault[j], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n j+=1\r\n col += 1\r\n fault_record.append(pageFaults)\r\n hit_record.append(n-pageFaults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Optimal Page Replacement Algorithm\r\ndef Optimal(processList, n, capacity, txt, animation):\r\n Initialize()\r\n global FaultRatio\r\n global col\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n if animation is True:\r\n new_window(txt, capacity)\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n s = []\r\n fault = []\r\n pageFaults = 0\r\n occurance = [None for i in range(ii)]\r\n for i in range(n):\r\n if processList[i] not in s:\r\n if len(s) < ii:\r\n s.append(processList[i])\r\n else:\r\n for x in range(len(s)):\r\n if s[x] not in processList[i + 1:]:\r\n s[x] = processList[i]\r\n break\r\n else:\r\n occurance[x] = processList[i + 1:].index(s[x])\r\n else:\r\n s[occurance.index(max(occurance))] = processList[i]\r\n pageFaults += 1\r\n fault.append(True)\r\n else:\r\n fault.append(False)\r\n if ii == capacity:\r\n FaultRatio = float((pageFaults)/n)\r\n dummy = s\r\n if animation is True:\r\n anime(capacity, processList[i], dummy, fault[i], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n col += 1\r\n fault_record.append(pageFaults)\r\n hit_record.append(n-pageFaults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Random Page Replacement Algorithm\r\ndef Random(pages, n, capacity, txt, animation):\r\n Initialize()\r\n global FaultRatio\r\n global col\r\n global root\r\n global row\r\n\r\n fault_record = []\r\n hit_record = []\r\n frames = [c for c in range(capacity + 3)]\r\n\r\n if animation is True:\r\n new_window(txt, capacity)\r\n\r\n for ii in frames:\r\n if ii == 0:\r\n fault_record.append(0)\r\n hit_record.append(0)\r\n continue\r\n s = set()\r\n indexes = []\r\n page_faults = 0\r\n fault = []\r\n for i in range(n):\r\n if (len(s) < ii):\r\n if (pages[i] not in s):\r\n s.add(pages[i])\r\n page_faults += 1\r\n fault.append(True)\r\n indexes.append(pages[i])\r\n else:\r\n fault.append(False)\r\n else:\r\n randomIndex = random.randint(0, ii - 1)\r\n if (pages[i] not in s):\r\n s.remove(indexes[randomIndex])\r\n s.add(pages[i])\r\n indexes[randomIndex] = pages[i]\r\n page_faults += 1\r\n fault.append(True)\r\n else:\r\n fault.append(False)\r\n if ii == capacity:\r\n FaultRatio = float((page_faults) / n)\r\n dummy = indexes\r\n if animation is True:\r\n anime(capacity, pages[i], dummy, fault[i], FaultRatio, txt, n, frames, fault_record, hit_record)\r\n col += 1\r\n fault_record.append(page_faults)\r\n hit_record.append(n-page_faults)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# All functions used for the visualization of the algorithms:\r\n# Opens new window for the visualization\r\ndef new_window(txt, capacity):\r\n global root\r\n root = Tk()\r\n Basic_design(capacity)\r\n root.title(\"Visualisation Of Algorithm: \" + txt)\r\n root.geometry(\"1600x660\")\r\n\r\n# For spaces in between Frames and for better clarity and visibility\r\ndef empty_space():\r\n global root\r\n global row\r\n global col\r\n L = Label(root, text=\" \", height=\"1\", width=\"1\")\r\n L.grid(row=row, column=col)\r\n row += 1\r\n\r\n# If the reference string is too large then remove this function for better visibility\r\ndef build_EmptyLabel():\r\n global col\r\n global row\r\n MyLabel1= Label(root,text=\" \",padx=15,pady=10)\r\n MyLabel1.grid(row=1,column=col+1)\r\n col+=1\r\n\r\n# Basic layout design\r\ndef Basic_design(N):\r\n k=N\r\n\r\n RefStringLabel= Label(root,text=\"Reference String\")\r\n RefStringLabel.configure(font=(\"Century Gothic\", 15))\r\n RefStringLabel.grid(row=0,column=0,padx=20,pady=10)\r\n for i in range(N):\r\n mylabel= Label(root,text=\"Frame \"+str(k),pady=10,padx=20,fg=\"black\")\r\n mylabel.configure(font=(\"Century Gothic\", 15))\r\n mylabel.grid(row=i+1,column=0)\r\n k-=1\r\n FaultStringLabel= Label(root,text=\"Page Faults\")\r\n FaultStringLabel.configure(font=(\"Century Gothic\", 15))\r\n FaultStringLabel.grid(row=N+1,column=0,padx=20,pady=10)\r\n\r\n# Function to build the labels containing reference string\r\ndef cell(element):\r\n global root\r\n global row\r\n global col\r\n L = Label(root, text=element, padx=20,pady=10,bd=1,fg=\"green\",relief=SOLID,anchor=\"center\")\r\n L.configure(font=(\"Century Gothic\", 12))\r\n L.grid(row=row, column=col)\r\n row += 1\r\n\r\n\r\n# Label for the fault and hit ratio\r\ndef FrameRatio(FaultRatio, Frames, txt, frames, fault_record, hit_record):\r\n lenCol = int(Frames / 2)\r\n frame1 = LabelFrame(root, text=\" \"+txt+\" Page Fault Ratio \",pady=15, padx=10)\r\n frame1.configure(font=(\"Century Gothic\", 15))\r\n frame1.grid(row=Frames + 4, column=lenCol, columnspan=int(Frames))\r\n HitRatio = 1 - FaultRatio\r\n myLabel4 = Label(frame1, text=\" Hit Ratio: =\", fg=\"green\", bd=1, padx=10, pady=15, relief=FLAT)\r\n myLabel4.configure(font=(\"Century Gothic\", 14, 'bold'))\r\n myLabel4.grid(row=1, column=0)\r\n myLabel5 = Label(frame1, text=\"Fault Ratio: =\", fg=\"red\", bd=1, padx=10, pady=15, relief=FLAT)\r\n myLabel5.configure(font=(\"Century Gothic\", 14, 'bold'))\r\n myLabel5.grid(row=2, column=0)\r\n e2 = Label(frame1, text=str(round(HitRatio,11)), borderwidth=3)\r\n e2.configure(font=(\"Century Gothic\", 14))\r\n e2.grid(row=1, column=1)\r\n e3 = Label(frame1, text=str(round(FaultRatio,11)), borderwidth=3)\r\n e3.configure(font=(\"Century Gothic\", 14))\r\n e3.grid(row=2, column=1)\r\n Graph = Button(root, borderwidth=\"0\", text=\"Show Graph\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 15),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\", command=lambda: graphy(frames, fault_record, hit_record))\r\n Graph.grid(row=Frames+5, column=lenCol, columnspan=int(Frames), pady=18)\r\n Back = Button(root, borderwidth=\"0\", text=\"Back\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 15),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\", command=root.destroy)\r\n Back.grid(row=Frames+6, column=lenCol, columnspan=int(Frames))\r\n\r\n# Graph of hit and fault\r\ndef graphy(frames, fault_record, hit_record):\r\n fig = plt.figure(figsize=(8, 8), dpi=80)\r\n plt.subplot(2, 1, 1)\r\n plt.xlabel(\"No. Of Frames --->\")\r\n plt.ylabel(\"Fault --->\")\r\n plt.plot(frames, fault_record, marker='x', color=\"red\", ls=\"--\")\r\n plt.subplot(2, 1, 2)\r\n plt.xlabel(\"No. Of Frames --->\")\r\n plt.ylabel(\"Hit --->\")\r\n plt.plot(frames, hit_record, marker='o', color=\"green\")\r\n plt.show()\r\n\r\n# Main Animation Function\r\ndef anime(Frames, Page, Q, faultOrHit, FaultRatio, txt, n, frames, fault_record, hit_record):\r\n global root\r\n global row\r\n global col\r\n row = 0\r\n L = Label(root, text=Page, pady=10, fg=\"green\")\r\n L.configure(font=(\"Century Gothic\", 15))\r\n L.grid(row=row, column=col)\r\n row += 1\r\n ls = []\r\n ls = Q\r\n for i in range(Frames - len(ls)):\r\n empty_space()\r\n\r\n for i in reversed(ls):\r\n cell(i)\r\n\r\n build_EmptyLabel()\r\n\r\n if (faultOrHit == True):\r\n FaultOrHit1 = \"Fault\"\r\n L1 = Label(root, text=FaultOrHit1, fg=\"red\")\r\n L1.configure(font=(\"Century Gothic\", 12, 'bold'))\r\n L1.grid(row=row, column=col - 1)\r\n row += 1\r\n else:\r\n FaultOrHit1 = \"Hit\"\r\n L1 = Label(root, text=FaultOrHit1, font=\"Questrial\", fg=\"green\")\r\n L1.configure(font=(\"Century Gothic\", 12, 'bold'))\r\n L1.grid(row=row, column=col - 1)\r\n row += 1\r\n FrameRatio(FaultRatio, n, txt, frames, fault_record, hit_record)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Graph Function\r\ndef graph(noF, refString):\r\n plot_list=[]\r\n algos=[\"FIFO\",\"LIFO\",\"LRU\",\"MRU\",\"Optimal\",\"Random\"]\r\n dummy=0\r\n N = int(noF)\r\n pageR = list(refString.split(\" \"))\r\n n = len(pageR)\r\n\r\n Initialize()\r\n FIFO(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n Initialize()\r\n LIFO(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n Initialize()\r\n LRU(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n Initialize()\r\n MRU(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n Initialize()\r\n Optimal(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n Initialize()\r\n Random(pageR, n, N, None, False)\r\n dummy=FaultRatio\r\n plot_list.append(dummy)\r\n\r\n fig = plt.figure()\r\n plt.bar(algos, plot_list)\r\n plt.show()\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n# Visualise Button Command\r\ndef Visualise(option, noFrame, refString):\r\n noF = (int)(noFrame)\r\n pageR = list(map(int, refString.split(\" \")))\r\n N = len(pageR)\r\n\r\n txt = \"0\"\r\n if option == \"FIFO\":\r\n txt = \"First In First Out\"\r\n FIFO(pageR, N, noF, txt, True)\r\n\r\n elif option == \"LIFO\":\r\n txt = \"Last In First Out\"\r\n LIFO(pageR, N, noF, txt, True)\r\n\r\n elif option == \"LRU\":\r\n txt = \"Least Recently Used\"\r\n LRU(pageR, N, noF, txt, True)\r\n\r\n elif option == \"MRU\":\r\n txt = \"Most Recently Used\"\r\n MRU(pageR, N, noF, txt, True)\r\n\r\n elif option == \"Optimal PRA\":\r\n txt = \"Optimal PRA\"\r\n Optimal(pageR, N, noF, txt, True)\r\n\r\n elif option == \"Random PRA\":\r\n txt = \"Random PRA\"\r\n Random(pageR, N, noF, txt, True)\r\n\r\n\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# Main Page\r\nMenu = Tk()\r\nMenu.title(\"Page Replacement Algorithm\")\r\nMenu.overrideredirect(False)\r\n# Menu.iconbitmap(\"icon.ico\")\r\nMenu.geometry(\"800x750+0+0\")\r\nMenu.resizable(False, False)\r\n\r\nL1 = Label(bg=\"black\", text=\"Page Replacement Algorithm\", fg=\"white\", font=(\"Century Gothic\", 35), width=\"900\",\r\n height=\"1\").pack()\r\n\r\nF1 = Frame(bg=\"white\").pack()\r\n\r\nL2 = Label(F1, text=\"Choose Algorithm:\", font=(\"Century Gothic\", 18)).pack(pady=\"15\")\r\n\r\nvariable = StringVar()\r\nvariable.set(\"FIFO\") # default value\r\ndropDown = OptionMenu(F1, variable, \"FIFO\", \"LIFO\", \"LRU\", \"MRU\",\"Optimal PRA\", \"Random PRA\")\r\ndropDown.configure(borderwidth=\"0\", width=\"12\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 12),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\")\r\ndropDown.pack(pady=\"5\")\r\n\r\nL3 = Label(F1, text=\"Enter the no. of frames:\", font=(\"Century Gothic\", 18)).pack(pady=\"20\")\r\n\r\n# take input\r\nnoFrames = Entry(F1, width=\"15\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 15), bd=\"0\", justify=\"center\")\r\nnoFrames.pack()\r\n\r\nL4 = Label(F1, text=\"Enter Page Reference: \", font=(\"Century Gothic\", 18)).pack(pady=\"30\")\r\n\r\n# take input\r\npageRef = Entry(F1, bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 15), bd=\"0\", justify=\"center\")\r\npageRef.pack()\r\n\r\nL5 = Button(F1, borderwidth=\"0\", text=\"Visualise\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 18),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\",\r\n command=lambda: Visualise(variable.get(), noFrames.get(), pageRef.get())).pack(pady=\"25\")\r\n\r\nL6 = Button(F1, borderwidth=\"0\", text=\"Compare All Algorithms\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 18),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\", command=lambda: graph(noFrames.get(), pageRef.get())).pack()\r\n\r\nL7 = Button(F1, borderwidth=\"0\", text=\"Theory\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 18),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\", command=theory).pack(pady=\"25\")\r\n\r\nL8 = Button(F1, borderwidth=\"0\", text=\"Back\", bg=\"#e8e8e8\", fg=\"green\", font=(\"Century Gothic\", 18),\r\n activeforeground=\"black\", activebackground=\"#bbbfca\", command=Menu.destroy).pack()\r\nMenu.mainloop()\r\n","sub_path":"PageReplacement.py","file_name":"PageReplacement.py","file_ext":"py","file_size_in_byte":20593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"243043044","text":"def file__list(writeList,filename):\n with open(filename, 'w') as f:\n for item in writeList:\n f.write(\"%s\\n\" % item)\n\ndef list__file(filename):\n returnList = []\n with open(filename, 'r') as filehandle:\n for line in filehandle:\n line = line.rstrip('\\n')\n returnList.append(line)\n return returnList\n","sub_path":"ytil.py","file_name":"ytil.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18313980","text":"\"\"\"Version_0005\n\nRevision ID: 866551b68d82\nRevises: 1b356097d81b\nCreate Date: 2017-08-21 01:03:53.143224\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '866551b68d82'\ndown_revision = '1b356097d81b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('user_img', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'user_img')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/866551b68d82_version_0005.py","file_name":"866551b68d82_version_0005.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136294763","text":"from obd.iobddevice import IOBDDevice\nfrom measure.measure import Measure\nimport can\nimport os\nimport datetime\n\nclass OBDDevice(IOBDDevice):\n\t\"\"\"Mock Thermometer that always gives readings with MOCK_VALUE and MOCK_UNITS\"\"\"\n\n\tdef __init__(self):\n\t\tself._ready = False\n\t\tself.bus = None\n\t\tself.canlistener = None\n\n\n\tdef initialize(self):\n\t\tos.system('sudo ip link set can0 type can bitrate 125000 triple-sampling on')\n\t\tos.system('sudo ifconfig can0 up')\n\t\tcan.rc['interface'] = 'socketcan_native'\n\t\tself.bus = can.interface.Bus('can0')\n\t\tself._ready = True\n\t\n\t@property\n\tdef ready(self) -> bool:\n\t\treturn self._ready\n\n\tdef read_obd(self) -> Measure:\n\t\t\"\"\"This obd device to read CAN frames\"\"\"\n\t\tassert self._ready\n\t\ttime = datetime.datetime.now()\n\n\t\ttry:\n\t\t\tstream = self.bus.recv(timeout=2)\n\t\t\twhile(stream is not None):\n\t\t\t\tprint(\"Message recieved on {}\".format(self.bus.channel_info))\n\t\t\t\tprint(\"The Message recieved is:{}\".format(stream))\n\t\t\t\treturn stream\n\t\texcept can.CanError:\n\t\t\tprint(\"Message could not be recieved\")\n\n\tdef send_obd(self):\n\t\t\"\"\"This obd device to send CAN frames\"\"\"\n\t\tassert self._ready\n\t\ttime = datetime.datetime.now()\n\t\tmsg = can.Message(arbitration_id=0xc0ffee,\n\t\t\tdata=[0, 25, 0, 1, 3, 1, 4, 1],\n\t\t\textended_id=False)\n\t\ttry:\n\t\t\tself.bus.send(msg)\n\t\t\tprint(\"Message sent on {}\".format(self.bus.channel_info))\n\t\texcept can.CanError:\n\t\t\tprint(\"Message NOT sent\")\n\n\tdef close_bus(self):\n\t\tself.bus.shutdown()\n","sub_path":"software/openobd/obd/obddevice.py","file_name":"obddevice.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454856149","text":"import os\nimport shutil\nimport CapsGANModel\nimport Preproccessor\nimport numpy as np\nimport matplotlib\n\n# disable display setting\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\n\nNOISE_DIM = 100\nNUM_CHK_SAMPLES = 16\nOURPUT_IMG_DIR = 'output/'\n\nclass CaptchaGAN(object):\n\tdef __init__(self, image_shape):\n\t\tself.image_shape = image_shape\n\t\tself.Preproccessor = Preproccessor.Preprocessor(image_shape)\n\t\tself.CapsGANModel = CapsGANModel.CapsGANModel(image_shape)\n\t\tself.discriminator = self.CapsGANModel.discriminator_model()\n\t\tself.adversarial = self.CapsGANModel.adversarial_model()\n\t\tself.generator = self.CapsGANModel.generator()\n\n\t\tif os.path.exists(OURPUT_IMG_DIR):\n\t\t\tshutil.rmtree(OURPUT_IMG_DIR)\n\t\tos.makedirs(OURPUT_IMG_DIR)\n\n\tdef train(self, train_steps=2000, batch_size=256, save_interval=0):\n\t\tnoise_input = None\n\t\tif save_interval > 0:\n\t\t\tnoise_input = np.random.uniform(-1.0, 1.0, size=[NUM_CHK_SAMPLES, NOISE_DIM])\n\t\tfor i in range(train_steps):\n\t\t\timages_train, labels_train = self.Preproccessor.loadData(num_to_load=batch_size, shuffle=False)\n\t\t\tnoise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])\n\t\t\timages_fake = self.generator.predict(noise)\n\t\t\tx = np.concatenate((images_train, images_fake))\n\t\t\ty = np.ones([2*batch_size, 1])\n\t\t\ty[batch_size:, :] = 0\n\t\t\td_loss = self.discriminator.train_on_batch(x, y)\n\n\t\t\ty = np.ones([batch_size, 1])\n\t\t\tnoise = np.random.uniform(-1.0, 1.0, size=[batch_size, NOISE_DIM])\n\t\t\ta_loss = self.adversarial.train_on_batch(noise, y)\n\t\t\tlog_mesg = \"%d: [D loss: %f, acc: %f]\" % (i, d_loss[0], d_loss[1])\n\t\t\tlog_mesg = \"%s [A loss: %f, acc: %f]\" % (log_mesg, a_loss[0], a_loss[1])\n\t\t\tprint(log_mesg)\n\t\t\tif save_interval>0:\n\t\t\t\tif (i+1)%save_interval==0:\n\t\t\t\t\tself.plot_images(save2file=True, samples=NUM_CHK_SAMPLES, noise=noise_input, step=(i+1))\n\n\tdef plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):\n\t\tif fake:\n\t\t\tif noise is None:\n\t\t\t\tnoise = np.random.uniform(-1.0, 1.0, size=[samples, NOISE_DIM])\n\t\t\telse:\n\t\t\t\tfilename = \"fake_%d.png\" % step\n\t\t\timages = self.generator.predict(noise)\n\t\telse:\n\t\t\tfilename = 'real.png'\n\t\t\timages, _ = self.Preproccessor.loadData(num_to_load=samples, shuffle=True)\n\n\t\tplt.figure(figsize=(10,10))\n\t\tfor i in range(samples):\n\t\t\tplt.subplot(4, 4, i+1)\n\t\t\timage = images[i, :, :, :]\n\t\t\timage = np.reshape(image, self.image_shape)\n\t\t\tplt.imshow(image)\n\t\t\tplt.axis('off')\n\t\tplt.tight_layout()\n\t\tif save2file:\n\t\t\tplt.savefig(OURPUT_IMG_DIR + filename)\n\t\t\tplt.close('all')\n\t\telse:\n\t\t\tplt.show()","sub_path":"CaptchaGAN.py","file_name":"CaptchaGAN.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"412754093","text":"class Solution:\n # 常数意义上比扫两遍更优, 虽然都是O(n)\n def middleNode(self, head: ListNode) -> ListNode:\n slow = fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n\n # 扫两遍的算法, 时间O(n)\n def my_solution(self, head):\n size, cur = 0, head\n while cur:\n cur = cur.next\n size += 1\n cur = head\n for _ in range(size // 2):\n cur = cur.next\n return cur","sub_path":"leetcode-en/876.py","file_name":"876.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36836352","text":"file = open('popular-names.txt', 'r')\nlines = [line.strip() for line in file.readlines()]\n\ndict_line = {}\n\nfor line in lines:\n if line[0] not in dict_line:\n dict_line[line[0]] = 1\n else:\n dict_line[line[0]] = dict_line[line[0]] + 1\n\nprint(sorted(dict_line.items(), key=lambda x: x[1], reverse=True))\n\n#uniq -c : add count at the beginning of line\n#cut -f 1 -d \" \" popular-names.txt | sort | uniq -c | sort -k 1r (-o [output file name])","sub_path":"Mana/chapter02/knock19.py","file_name":"knock19.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493271124","text":"import argparse\nimport uuid\nfrom confluent_kafka.admin import AdminClient\nfrom confluent_kafka.cimpl import Consumer, Producer\nfrom streaming_data_types import deserialise_pl72, serialise_pl72\n\nCHILDREN = \"children\"\n\nINST_NAMES = [\n \"LARMOR\",\n \"ALF\",\n \"DEMO\",\n \"IMAT\",\n \"MUONFE\",\n \"ZOOM\",\n \"IRIS\",\n \"IRIS_SETUP\",\n \"ENGINX_SETUP\",\n \"HRPD\",\n \"POLARIS\",\n \"VESUVIO\",\n \"ENGINX\",\n \"MERLIN\",\n \"RIKENFE\",\n \"SELAB\",\n \"EMMA-A\",\n \"SANDALS\",\n \"GEM\",\n \"MAPS\",\n \"OSIRIS\",\n \"INES\",\n \"TOSCA\",\n \"LOQ\",\n \"LET\",\n \"MARI\",\n \"CRISP\",\n \"SOFTMAT\",\n \"SURF\",\n \"NIMROD\",\n \"DETMON\",\n \"EMU\",\n]\n\n### In progress - this will only add certain static data to the nexus file. ###\n\ndef _create_group(name, nx_class):\n return {\n \"type\": \"group\",\n \"name\": name,\n CHILDREN: [],\n \"attributes\": [{\"name\": \"NX_class\", \"values\": nx_class}],\n }\n\n\ndef _create_dataset(name, values):\n return {\"type\": \"dataset\", \"name\": name, \"attributes\": [], \"values\": values}\n\n\ndef __add_source_info(instrument):\n source = _create_group(\"source\", \"NXsource\")\n source[CHILDREN].append(_create_dataset(\"name\", \"ISIS\"))\n source[CHILDREN].append(_create_dataset(\"probe\", \"neutrons\"))\n source[CHILDREN].append(_create_dataset(\"type\", \"Pulsed Neutron Source\"))\n instrument[CHILDREN].append(source)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Amend data to runinfo messages\")\n parser.add_argument(\"-b\", \"--broker\")\n args = parser.parse_args()\n broker = args.broker\n conf = {\"bootstrap.servers\": broker, \"group.id\": str(uuid.uuid4())}\n admin_client = AdminClient(conf)\n cons = Consumer(conf)\n prod = Producer(conf)\n topics = [topic + \"_runInfo\" for topic in INST_NAMES]\n print(f\"subscribing to {topics}\")\n cons.subscribe(topics=topics)\n while True:\n try:\n # SIGINT can't be handled when polling, limit timeout to 1 second.\n msg = cons.poll(1.0)\n if msg is None:\n continue\n message_topic = msg.topic()\n instrument_name = message_topic.split(\"_runInfo\")[0]\n des = deserialise_pl72(msg.value())\n\n structure = des.nexus_structure\n entry = _create_group(\"raw_data_1\", \"NXentry\")\n detector_1 = _create_group(\"detector_1\", \"NXdetector\")\n detector_1[CHILDREN].append(structure[\"entry\"][\"events\"])\n instrument = _create_group(\"instrument\", \"NXinstrument\")\n\n __add_source_info(instrument)\n\n entry[CHILDREN].append(detector_1)\n entry[CHILDREN].append(instrument)\n entry[CHILDREN].append(_create_dataset(\"beamline\", instrument_name))\n entry[CHILDREN].append(\n _create_dataset(\"name\", instrument_name)\n ) # these seem to be the same\n\n for i in range(8):\n monitor = _create_group(f\"monitor_{i}\", \"NXmonitor\")\n entry[CHILDREN].append(monitor)\n\n new_run_message = serialise_pl72(\n filename=des.filename,\n start_time=des.start_time,\n stop_time=des.stop_time,\n run_name=des.run_name,\n service_id=des.service_id,\n instrument_name=des.instrument_name,\n broker=des.broker,\n nexus_structure=str(entry),\n job_id=des.job_id\n )\n prod.produce(topic=\"ALL_runInfo\", value=new_run_message)\n print(f\"produced: {entry}\")\n except KeyboardInterrupt:\n break\n\n cons.close()\n","sub_path":"scripts/add_data_to_runinfo.py","file_name":"add_data_to_runinfo.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471335339","text":"import urllib\r\nimport base64\r\nimport rsa\r\nimport binascii\r\npostPara = {\r\n 'entry': 'weibo',\r\n 'gateway': '1',\r\n 'from': '',\r\n 'savestate': '7',\r\n 'userticket': '1',\r\n 'ssosimplelogin': '1',\r\n 'vsnf': '1',\r\n 'vsnval': '',\r\n 'su': encodedUserName,\r\n 'service': 'miniblog',\r\n 'servertime': serverTime,\r\n 'nonce': nonce,\r\n 'pwencode': 'rsa2',\r\n 'sp': encodedPassWord,\r\n 'encoding': 'UTF-8',\r\n 'prelt': '115',\r\n 'rsakv': rsakv, \r\n 'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',\r\n 'returntype': 'META'\r\n}\r\npostData = urllib.urlencode(postPara)#网络编码\r\nprint(postData);\r\n","sub_path":"python/urllib.py","file_name":"urllib.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209251329","text":"import pytest\n\nfrom joblib import Parallel, delayed\nfrom collections import defaultdict\nimport pandas as pd\nimport numpy as np\n\nimport pkg_resources, os\nfrom natsort import natsorted\n\nfrom io import StringIO\n\n# from helper.functions\n\nimport logging\nfrom rpy2.robjects import r, pandas2ri\npandas2ri.activate()\n\nfrom rpy2.robjects.packages import importr\nimportr(\"S4Vectors\")\nbioc = importr(\"GenomicRanges\")\n\nlength_of_rle = r(\"function (x) sum(runLength(x))\")\n\n\nfrom epic.scripts.overlaps.files_to_chromosome_coverage import (files_to_chromosome_coverage)\n\n__author__ = \"Endre Bakken Stovner https://github.com/endrebak/\"\n__license__ = \"MIT\"\n\n\ndef nucleotide_overlaps_per_file(all_files, nb_cpu):\n\n rles = files_to_chromosome_coverage(all_files, nb_cpu)\n\n nucleotide_overlaps = Parallel(n_jobs=nb_cpu)(delayed(_nucleotide_overlaps_per_file)(\n f, rles) for f in rles)\n\n print(nucleotide_overlaps)\n return pd.concat(nucleotide_overlaps).sort_values([\"Main\", \"Other\"]).reset_index(drop=True)\n\n\ndef _nucleotide_overlaps_per_file(bed_file, extended_rles):\n\n base_bed = bed_file.split(\"/\")[-1].split(\".\")[0]\n logging.info(\"Finding the number of nucleotides in \" + base_bed + \" overlapping other files.\")\n\n _find_overlaps = r(\"\"\"\n function(s, o) {\n runValue(s) = as.logical(runValue(s))\n runValue(o) = as.logical(runValue(o))\n sum(s & o)\n }\n \"\"\")\n\n _find_total = r(\" function(s) {runValue(s) = as.logical(runValue(s)); sum(s) }\")\n\n cvs = extended_rles[bed_file]\n\n rowdicts = []\n for f in extended_rles:\n\n base_bed_other = f.split(\"/\")[-1].split(\".\")[0]\n # print(\"base bed other\", base_bed_other)\n cvos = extended_rles[f]\n\n overlapping_chromosomes = set(cvs.keys()).intersection(cvos.keys())\n overlaps, total = 0, 0\n for c in overlapping_chromosomes:\n\n cv, cvo = cvs[c], cvos[c]\n\n fov = _find_overlaps(cv, cvo)[0]\n tot = _find_total(cv)[0]\n\n overlaps += fov\n total += tot\n\n ratio = overlaps/total\n rowdict = {\"Chromosome\": c, \"Main\": base_bed, \"Other\": base_bed_other, \"Overlaps\": ratio}\n rowdicts.append(rowdict)\n\n return pd.DataFrame.from_dict(rowdicts)[\"Chromosome Main Other Overlaps\".split()]\n","sub_path":"epic/scripts/overlaps/nucleotides_heatmap.py","file_name":"nucleotides_heatmap.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394576773","text":"'''\nRegister histology session for example mouse\nNote, we have defined: start_time = sample_imaging_date\n'''\n# Author: Gaelle Chapuis, Steven J. West\n\nimport datetime\nfrom oneibl.one import ONE\nimport ibllib.time\nimport numpy as np\nimport json\nfrom json import JSONEncoder\n\n# override deault method of JSONEncoder to implement custom NumPy JSON serialization.\n# see https://pynative.com/python-serialize-numpy-ndarray-into-json/\n\n\nclass NumpyArrayEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return JSONEncoder.default(self, obj)\n\n\n# Test first on dev alyx for example\none = ONE(base_url='https://dev.alyx.internationalbrainlab.org')\n\nsubject = 'CSHL028' # example\nTASK_PROTOCOL = 'SWC_Histology_Serial2P_v0.0.1'\n\n# Date-Time of imaging (example), change as needed\nsample_imaging_date = datetime.date(2020, 2, 1) # Format: y - m - d\nsample_reception_date = datetime.date(2020, 4, 1)\n\n# create elastix afffine transform numpy array:\narray = np.zeros((4, 4)) # UPDATE with correct transform!\nelastix_affine_transform = {\"elastix_affine_transform\": array}\n\njson_note = {\n 'sample_reception_date': ibllib.time.date2isostr(sample_reception_date),\n 'elastix_affine_transform': array,\n 'tilt': 0,\n 'yaw': 0,\n 'roll': 0,\n 'dv_scale': 1,\n 'ap_scale': 1,\n 'ml_scale': 1\n}\n\n# use dump() to properly encode np array:\njson_note = json.dumps(json_note, cls=NumpyArrayEncoder)\n\nses_ = {\n 'subject': subject,\n 'users': ['steven.west'],\n 'location': 'serial2P_01',\n 'procedures': ['Histology'],\n 'lab': 'mrsicflogellab',\n # 'project': project['name'],\n # 'type': 'Experiment',\n 'task_protocol': TASK_PROTOCOL,\n 'number': 1,\n 'start_time': ibllib.time.date2isostr(sample_imaging_date), # Saving only the date\n # 'end_time': ibllib.time.date2isostr(end_time) if end_time else None,\n # 'n_correct_trials': n_correct_trials,\n # 'n_trials': n_trials,\n 'json': json_note\n}\n\n# overwrites the session if it already exists\nses_date = ibllib.time.date2isostr(sample_imaging_date)[:10]\nses = one.alyx.rest('sessions', 'list', subject=subject, number=1,\n date_range=[ses_date, ses_date])\nif len(ses) > 0:\n one.alyx.rest('sessions', 'delete', ses[0]['url'])\n\nsession = one.alyx.rest('sessions', 'create', data=ses_)\n","sub_path":"examples/one/histology/create_histology_session_example.py","file_name":"create_histology_session_example.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626304314","text":"import Tkinter\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport sys\n\nclass Snake(Tkinter.Tk):\n\n def __init__(self, box_perc=0.8, len_act_snake=30, size_act_box=1000, speed=1):\n\n # initializer tkinter app\n Tkinter.Tk.__init__(self)\n\n # time to wait until next decision\n self.time_to_wait = 0.0\n # number of trials done on snake\n self.trials = 0\n # number of times the snake has eaten itself\n self.eaten = 0\n # number of times the snake has made it out of the box or survived\n self.survived = 0\n # the percentage that the box will be as compared to the screen width/height\n self.box_perc = box_perc\n # length of the actual snake (in real life)\n self.len_act_snake=len_act_snake\n # size of the box the snake will be in\n self.size_act_box=size_act_box\n # speed of the snake (cm/sec)\n self.speed = speed\n # width/height that the tkinter app canvas will be\n self.width = Tkinter.Tk.winfo_screenwidth(self) * 0.9\n self.height = Tkinter.Tk.winfo_screenheight(self) * 0.9\n\n # times we randomly wait\n self.times_waiting = []\n\n self.next_step = None\n\n # if the snake eats itself or makes it out of the box, game_over=True\n self.game_over = False\n # the time between decisions\n self.time_interval = 2\n # initialize the board/canvas for simulation\n self.initialize_the_board()\n\n def initialize_the_board(self):\n '''\n a module that initializes the board right up until a random direction is chosen and the snake\n is ready to move\n :return:\n '''\n\n # start with game_over = False. This will switch if the game is over (out of box / eaten)\n self.game_over = False\n\n # initialize the tkinter canvas\n self.canvas = Tkinter.Canvas(self, width=self.width, height=self.height)\n\n # title of app\n self.title('Snake Simulation')\n\n # the snake box height (which also equals the width)\n self.box_height = self.box_perc * self.height\n\n # the dimensions of the box\n self.box_dim_x0 = (self.width - self.box_height) / 2\n self.box_dim_x1 = (self.width - self.box_height) / 2 + self.box_height\n self.box_dim_y0 = (self.height - self.box_height) / 2\n self.box_dim_y1 = (self.height - self.box_height) / 2 + self.box_height\n\n # the scale/percentage of the snake\n len_snake_perc = float(self.len_act_snake) / self.size_act_box\n\n # the length of the snake that will be used for the simulation\n self.len_snake = round(len_snake_perc * self.box_height, 4)\n\n # this is for altering the actual speed of the snake. This will tell me\n # based on the size of the snake, this is how long it should take given\n # the parameters inputted at the beginning.\n # if the actual size of the box gets smaller, than the snake will\n # inherently slow down to mimick the desired snake params\n self.time_to_travel_box = float(self.size_act_box) / 2 / self.speed\n self.dist_to_travel = self.box_height / 2\n\n # the time to sleep between moving the snake\n self.time_sleep = .00000001\n\n # the theoretical step per second a snake needs to make each iteration to make the\n # params given\n self.step_per_second = (float(self.dist_to_travel) / self.time_to_travel_box)\n\n # a direction dictionary\n self.dirDict = {\n 1: 'north',\n 2: 'south',\n 3: 'west',\n 4: 'east'\n }\n\n # opposite direction dictionary\n self.oppDirDict = {\n 1: 2,\n 2: 1,\n 3: 4,\n 4: 3\n }\n\n # resetting the snakeDict\n self.snakeDict = {}\n\n # the start of the head for x/y position\n self.head_x = self.width/2\n self.head_y = self.height/2\n\n # creating a rectangle that will be the box that the snake will be in\n self.canvas.create_rectangle(\n self.head_x-float(self.box_height)/2,\n self.head_y-float(self.box_height)/2,\n self.head_x+float(self.box_height)/2,\n self.head_y+float(self.box_height)/2)\n\n # the first_created_seg is used for knowing when the last segment of the snake is dead,\n # which segment (the first created on at the time) will need changed to have negative growth\n self.first_created_seg = None\n\n # a key for the segment of the snake that has 'neg' growth\n self.neg_key = None\n\n # the time difference between moves (due to deviations, I will adjust the step based on this difference)\n self.time_diff = 0.0\n\n # adding the first segment to the snake dict. From here on out, '2' key will be the first segment of the snake\n # or the 'head' segment\n self.snakeDict = {\n 2: {\n 'dir': 4,\n 'growth':'pos_neg',\n 'len': self.len_snake,\n 'coords': (self.head_x - self.len_snake, self.head_y, self.head_x, self.head_y)\n }\n }\n\n\n # creating our first line which will be the head segment\n self.canvas.create_line(self.snakeDict[2]['coords'])\n\n # creating our stats of the trials that will be displayed on the top\n eps = 0.0000001\n self.canvas.create_text(\n self.width / 2, 35,\n text='Number of trials: %s\\n'\n 'Number of times eaten: %s\\n'\n 'Number of times not eaten: %s\\n'\n 'Success percentage: %.6f'\n %(self.trials, self.eaten, self.survived, float(self.survived)/(self.trials+eps)))\n self.trials += 1\n\n # updating/packing canvas\n self.canvas.update()\n self.canvas.pack()\n\n # run the app in loop\n self.time_started = time.time()\n self.st = time.time()\n\n self.run()\n\n def get_random_exp_time(self):\n '''\n genrates a random exponential time with average of 5 seconds\n :return:\n '''\n self.time_to_wait = random.expovariate(1 / (5.0))\n self.times_waiting.append(self.time_to_wait)\n\n def delete_all_segs_after_trial(self, status):\n '''\n after a snake has either eaten itself or made it to the edge of the box,\n we need to delete all of the current segments from the current app\n :param status: the way in which the app needs to send (eaten, survived)\n :return:\n '''\n\n self.write_results()\n\n # delete all segs from the canvas\n for seg in self.canvas.find_all():\n self.canvas.delete(seg)\n\n # update to clear\n self.canvas.update()\n\n # print a statement on the result of the trial\n if status == 'survived':\n self.canvas.create_text(self.width / 2, self.height / 2, text='Alexa made it out of the box!')\n else:\n self.canvas.create_text(self.width / 2, self.height / 2, text='Alexa ate herself!')\n\n self.canvas.update()\n\n time.sleep(2)\n\n # delete the text\n for seg in self.canvas.find_all():\n self.canvas.delete(seg)\n\n # update and destroy canvas\n self.canvas.update()\n self.canvas.destroy()\n\n def check_for_intercept(self):\n '''\n checks if the head of the snake intercepts any segment of the snake. In general, this is how it works:\n Case: Snake head is heading south, and the segment of interest is heading west.\n 1. Was the previous y position of the snakes head less than the y position of the segment of interest\n and now greater than the y position of that segment (it crossed the plane)\n 2. Is the x position of the head between x0 and x1\n If these are true in general, then the snake has eaten itself. If this is true, I will change the 'game_over'\n variable to True, to collect stats and purge the game\n :return:\n '''\n\n for seg in self.snakeDict.keys():\n if self.snakeDict[seg]['dir'] == 1:\n if self.snakeDict[2]['dir'] == 3:\n if \\\n (self.snakeDict[2]['prev_head_x'] > self.snakeDict[seg]['coords'][0]) & \\\n (self.head_x < self.snakeDict[seg]['coords'][0]):\n if (self.head_y < self.snakeDict[seg]['coords'][1]) & (self.head_y > self.snakeDict[seg]['coords'][3]):\n self.eaten += 1\n self.game_over=True\n\n if self.snakeDict[2]['dir'] == 4:\n if (self.snakeDict[2]['prev_head_x'] < self.snakeDict[seg]['coords'][0]) & (self.head_x > self.snakeDict[seg]['coords'][0]):\n if (self.head_y < self.snakeDict[seg]['coords'][1]) & (self.head_y > self.snakeDict[seg]['coords'][3]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[seg]['dir'] == 2:\n if self.snakeDict[2]['dir'] == 3:\n if (self.snakeDict[2]['prev_head_x'] > self.snakeDict[seg]['coords'][0]) & (self.head_x < self.snakeDict[seg]['coords'][0]):\n if (self.head_y < self.snakeDict[seg]['coords'][3]) & (self.head_y > self.snakeDict[seg]['coords'][1]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[2]['dir'] == 4:\n if (self.snakeDict[2]['prev_head_x'] < self.snakeDict[seg]['coords'][0]) & (self.head_x > self.snakeDict[seg]['coords'][0]):\n if (self.head_y < self.snakeDict[seg]['coords'][3]) & (self.head_y > self.snakeDict[seg]['coords'][1]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[seg]['dir'] == 3:\n if self.snakeDict[2]['dir'] == 1:\n if (self.snakeDict[2]['prev_head_y'] > self.snakeDict[seg]['coords'][1]) & (self.head_y < self.snakeDict[seg]['coords'][1]):\n if (self.head_x < self.snakeDict[seg]['coords'][0]) & (self.head_x > self.snakeDict[seg]['coords'][2]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[2]['dir'] == 2:\n if (self.snakeDict[2]['prev_head_y'] < self.snakeDict[seg]['coords'][1]) & (self.head_y > self.snakeDict[seg]['coords'][1]):\n if (self.head_x < self.snakeDict[seg]['coords'][0]) & (self.head_x > self.snakeDict[seg]['coords'][2]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[seg]['dir'] == 4:\n if self.snakeDict[2]['dir'] == 1:\n if (self.snakeDict[2]['prev_head_y'] > self.snakeDict[seg]['coords'][1]) & (self.head_y < self.snakeDict[seg]['coords'][1]):\n if (self.head_x < self.snakeDict[seg]['coords'][2]) & (self.head_x > self.snakeDict[seg]['coords'][0]):\n self.eaten += 1\n self.game_over = True\n\n if self.snakeDict[2]['dir'] == 2:\n if (self.snakeDict[2]['prev_head_y'] < self.snakeDict[seg]['coords'][1]) & (self.head_y > self.snakeDict[seg]['coords'][1]):\n if (self.head_x < self.snakeDict[seg]['coords'][2]) & (self.head_x > self.snakeDict[seg]['coords'][0]):\n self.eaten += 1\n self.game_over = True\n\n # if the game is over, purge the board and re-initialize\n if self.game_over:\n self.delete_all_segs_after_trial(status='eaten')\n self.initialize_the_board()\n\n def pick_random_dir(self):\n '''\n picks a random direction (integer between 1->4). It can NOT be in the opposite direction\n that the snake is currently going\n :return:\n '''\n\n # if it is the first time the snake is moving, it moves east\n if self.time_to_wait == 0.0:\n self.random_dir = 4\n else:\n opp_dir = self.oppDirDict[self.snakeDict[2]['dir']]\n while True:\n self.random_dir = random.randint(1, 4)\n if (self.random_dir != opp_dir) & (self.random_dir != self.snakeDict[2]['dir']):\n break\n\n self.get_random_exp_time()\n self.decision_made_time = time.time()\n\n def check_total_length(self):\n '''\n checks the total length of the snake. This is more of a work-around to some weird\n stepping issues where the snake will either shrink/grow during the stepping or moving process.\n If difference between the actual length of the snake and the desired length of the snake is greater\n than a step, I will adjust the last segment of the snake to make up the difference to keep the snake length\n consistent throughout the trial\n :return:\n '''\n\n self.total_length = 0.0\n for seg in self.snakeDict.keys():\n self.total_length += self.snakeDict[seg]['len']\n if (self.next_step != None) & (abs(self.total_length - self.len_snake) > 0.0):\n self.compensate_len()\n\n def compensate_len(self):\n '''\n compensates the length of the snake if ran from self.check_total_length module\n :return:\n '''\n\n seg = None\n # if there is a negative segment (the last segment), we will compensate that segment. Otherwise,\n # we will compensate the head segment\n for each in self.snakeDict.keys():\n if self.snakeDict[each]['growth'] == 'neg':\n seg = each\n if seg is None: seg = 2\n\n # difference between actual/theoretical\n compensation_length = self.len_snake-self.total_length\n\n\n curr_coords = list(self.snakeDict[seg]['coords'])\n dir = self.snakeDict[seg]['dir']\n\n # based on the direction, either add/subtract\n if dir == 1:\n curr_coords[1] = curr_coords[1] + compensation_length\n elif dir == 2:\n curr_coords[1] = curr_coords[1] - compensation_length\n elif dir == 3:\n curr_coords[0] = curr_coords[0] + compensation_length\n elif dir == 4:\n curr_coords[0] = curr_coords[0] - compensation_length\n\n self.snakeDict[seg]['coords'] = tuple(curr_coords)\n self.update_lens()\n self.update_frame()\n\n def update_frame(self):\n '''\n just a simple module to update the frame or tkinter app\n :return:\n '''\n\n for seg in self.snakeDict.keys():\n self.canvas.coords(seg, self.snakeDict[seg]['coords'])\n self.canvas.update()\n\n def update_lens(self):\n '''\n calculates the lengths of each segment based on dimensions\n :return:\n '''\n\n for seg in self.snakeDict.keys():\n\n x0 = self.snakeDict[seg]['coords'][0]\n y0 = self.snakeDict[seg]['coords'][1]\n x1 = self.snakeDict[seg]['coords'][2]\n y1 = self.snakeDict[seg]['coords'][3]\n\n if self.snakeDict[seg]['dir'] in [1,2]:\n self.snakeDict[seg]['len'] = round(abs(y1-y0),10)\n else:\n self.snakeDict[seg]['len'] = round(abs(x1-x0), 10)\n\n def update_after_decision(self):\n '''\n After a random direction is chosen, I update the snake dict to\n reflect metadata for each segment\n :return:\n '''\n\n\n # make the growth of the head segment positive (growing)\n self.snakeDict[2]['growth'] = 'pos'\n\n # if only the head segment exists (1 segment), create a new segment\n # that is the current head segment\n if len(self.snakeDict.keys()) == 1:\n\n # create line that is the head segment\n line = self.canvas.create_line(self.snakeDict[2]['coords'])\n self.snakeDict[2]['growth'] = 'pos'\n\n # copy the current dictionary data of the head seg to this new line\n self.snakeDict[line] = self.snakeDict[2].copy()\n self.snakeDict[line]['growth'] = 'neg'\n\n # update coords\n self.canvas.coords(2, (self.head_x, self.head_y, self.head_x, self.head_y))\n\n\n # if there are two or more segments, add a segment based on the current head segment,\n # make the growth of this segment none (i.e., not growing or shrinking)\n elif len(self.snakeDict.keys()) >= 2:\n\n line = self.canvas.create_line(self.snakeDict[2]['coords'])\n\n self.snakeDict[line] = self.snakeDict[2].copy()\n self.snakeDict[line]['growth'] = 'none'\n\n self.canvas.coords(2, (self.head_x, self.head_y, self.head_x, self.head_y))\n\n self.snakeDict[2]['dir'] = self.random_dir\n self.snakeDict[2]['coords'] = (self.head_x, self.head_y, self.head_x, self.head_y)\n\n # update the current lengths of each segment\n self.update_lens()\n # check the total length that it is the same\n self.check_total_length()\n\n def update_coords(self):\n growth = 0.0\n '''\n this module updates the coordinates of each segment as the snake is moving based off of:\n 1. Direction\n 2. Growth\n 3. If the segment has run out of length (needs removed from canvas)\n :return:\n '''\n\n self.to_delete_seg = None\n self.get_next_step()\n\n for seg in self.snakeDict.keys():\n # get current coordinates of the segment\n x0, y0, x1, y1 = self.snakeDict[seg]['coords']\n\n if self.snakeDict[seg]['dir'] == 1:\n if self.snakeDict[seg]['growth'] == 'pos':\n y1 -= self.next_step\n growth += self.next_step\n elif self.snakeDict[seg]['growth'] == 'neg':\n y0 -= self.next_step\n growth -= self.next_step\n elif self.snakeDict[seg]['growth'] == 'pos_neg':\n y1 -= self.next_step\n y0 -= self.next_step\n if (y1 >= y0) & (seg != 2):\n self.to_delete_seg = seg\n\n if self.snakeDict[seg]['dir'] == 2:\n if self.snakeDict[seg]['growth'] == 'pos':\n y1 += self.next_step\n growth += self.next_step\n elif self.snakeDict[seg]['growth'] == 'neg':\n y0 += self.next_step\n growth -= self.next_step\n elif self.snakeDict[seg]['growth'] == 'pos_neg':\n y1 += self.next_step\n y0 += self.next_step\n if (y0 >= y1) & (seg != 2):\n self.to_delete_seg = seg\n\n if self.snakeDict[seg]['dir'] == 3:\n if self.snakeDict[seg]['growth'] == 'pos':\n x1 -= self.next_step\n growth += self.next_step\n elif self.snakeDict[seg]['growth'] == 'neg':\n x0 -= self.next_step\n growth -= self.next_step\n elif self.snakeDict[seg]['growth'] == 'pos_neg':\n x1 -= self.next_step\n x0 -= self.next_step\n if (x1 >= x0) & (seg != 2):\n self.to_delete_seg = seg\n\n if self.snakeDict[seg]['dir'] == 4:\n if self.snakeDict[seg]['growth'] == 'pos':\n x1 += self.next_step\n growth += self.next_step\n elif self.snakeDict[seg]['growth'] == 'neg':\n x0 += self.next_step\n growth -= self.next_step\n elif self.snakeDict[seg]['growth'] == 'pos_neg':\n x1 += self.next_step\n x0 += self.next_step\n if (x0 >= x1) & (seg != 2):\n self.to_delete_seg = seg\n\n if seg in self.snakeDict.keys(): self.snakeDict[seg]['coords'] = (x0, y0, x1, y1)\n\n self.update_lens()\n\n # update prev_head variables\n if seg == 2:\n self.snakeDict[2]['prev_head_x'] = self.head_x\n self.snakeDict[2]['prev_head_y'] = self.head_y\n self.head_x = x1\n self.head_y = y1\n\n if self.to_delete_seg:\n self.delete_dead_seg(self.to_delete_seg)\n # if there is a self.first_created_seg variable, delete from current dictionary\n if self.first_created_seg:\n self.snakeDict.pop(self.to_delete_seg)\n self.first_created_seg = None\n\n # if there is a self.neg_key, delete from current dictionary\n if self.neg_key:\n if self.neg_key in self.snakeDict.keys():\n self.snakeDict.pop(self.neg_key)\n self.neg_key = None\n\n self.check_total_length()\n\n def check_if_out_of_box(self):\n '''\n this module checks if the head has made it out of the box\n :return:\n '''\n\n\n if (self.head_x > self.box_dim_x1) | (self.head_x < self.box_dim_x0) | (self.head_x > self.box_dim_x1) | (self.head_x < self.box_dim_x0):\n self.survived += 1\n self.game_over = True\n self.delete_all_segs_after_trial(status='survived')\n\n self.initialize_the_board()\n\n if (self.head_y > self.box_dim_y1) | (self.head_y < self.box_dim_y0) | (self.head_y > self.box_dim_y1) | (self.head_y < self.box_dim_y0):\n self.survived += 1\n self.game_over = True\n self.delete_all_segs_after_trial(status='survived')\n\n self.initialize_the_board()\n\n def get_next_step(self):\n '''\n this adjusts the speed based on the time taken between steps. Since it depends on the computer\n and calculation speeds, I adjust the step based on time take. If it takes longer than time_sleep, then we\n increase the step, otherwise it is decreased\n :return:\n '''\n\n self.next_step = self.step_per_second * (time.time() - self.st)\n self.st = time.time()\n\n def write_results(self):\n with open('results.txt','w') as f:\n f.write('Summary Statistics:\\n')\n f.write('-------------------------\\n')\n f.write('Number of trials: %s\\n' %self.trials)\n f.write('Number of successes: %s\\n' % self.survived)\n f.write('Number of fails: %s\\n' % self.eaten)\n\n plt.hist(self.times_waiting)\n plt.xlabel('Times waited')\n plt.ylabel('Frequency')\n plt.title('Histogram of random times the snake waited before making a decision')\n plt.savefig('plot.png')\n plt.close()\n\n def run(self):\n '''\n A module that will loop over until the snake has eaten itself or made it out of the box\n :param self:\n :return:\n '''\n\n while self.game_over == False:\n\n if self.time_to_wait == 0.0:\n self.pick_random_dir()\n self.update_after_decision()\n\n\n self.update_coords()\n self.update_frame()\n self.check_if_out_of_box()\n self.check_for_intercept()\n\n # if the time has come to make another decision, it is done here\n if time.time() - self.decision_made_time >= self.time_to_wait:\n self.pick_random_dir()\n self.update_after_decision()\n\n def delete_dead_seg(self, to_delete_seg):\n '''\n this module deletes a dead segment (it has run out of length. If the length of the segments is\n 2 (head + 1 more), then I make the head a pos/neg growth meaning it length stays constant. otherwise, we\n delete the dead segment and make the \"first created segment\" other than the deleted a negative growth\n :param self:\n :param to_delete_seg:\n :return:\n '''\n static_segs = []\n for seg in self.snakeDict.keys():\n if self.snakeDict[seg]['growth'] == 'none':\n static_segs.append(seg)\n if self.snakeDict[seg]['growth'] == 'neg':\n neg_key = seg\n\n if len(self.snakeDict.keys()) == 2:\n self.snakeDict[2]['growth'] = 'pos_neg'\n self.canvas.delete(neg_key)\n self.neg_key = neg_key\n\n else:\n self.neg_key = neg_key\n self.first_created_seg = min(static_segs)\n self.snakeDict[self.first_created_seg]['growth'] = 'neg'\n self.canvas.delete(to_delete_seg)\n\n\nif __name__ == \"__main__\":\n app = Snake(len_act_snake=100,speed=25)\n app.mainloop()\n\n# stole from stack: https://stackoverflow.com/questions/10039485/tkinter-runtimeerror-maximum-recursion-depth-exceeded\n# https://github.com/RageB\n# ll/tkinter-snake/blob/master/python_snake.py","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":24952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562955906","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\ngenero = (\n ('Hombre', 'Hombre'),\n ('Mujer', 'Mujer'),\n)\n\n\nclass UserModel(AbstractUser):\n email = models.EmailField(_('email address'), unique=True)\n avatar = models.ImageField(upload_to='plots', default='plots/default.png')\n location = models.CharField(max_length=140)\n gender = models.CharField(max_length=140, choices=genero)\n information = models.TextField()\n\n def __str__(self):\n return 'Usuario: {}'.format(self.username)\n\n def save(self, *args, **kwargs):\n try:\n this = UserModel.objects.get(id=self.id)\n if this.avatar != self.avatar:\n this.avatar.delete(save=False)\n except:\n pass\n super(UserModel, self).save(*args, **kwargs)\n\n\nclass Post(models.Model):\n user = models.ForeignKey(UserModel, on_delete=models.CASCADE)\n title = models.CharField(max_length=140)\n post_text = models.TextField()\n post_date = models.DateTimeField(editable=False)\n post_modified = models.DateTimeField(editable=False)\n\n def __str__(self):\n return (\n f'Post de {self.user.username:<40} - Titulo: {self.title:>60} - Ultima modificacion:'\n f' {self.post_date}'\n )\n\n def save(self, *args, **kwargs):\n if not self.id:\n self.post_date = timezone.now()\n self.post_modified = timezone.now()\n return super(Post, self).save(*args, **kwargs)\n","sub_path":"plots/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608247400","text":"import json\nimport utils.response as res\nfrom config import dynamodb\n\ndef handler(event, context):\n\n try:\n\n # Get input from user.\n path_parameters = event.get('pathParameters')\n\n # Email\n email = path_parameters.get('email')\n\n # Check if Null.\n if (email is None): raise Exception(\"Email not found.\")\n\n # Get response from database.\n response = dynamodb.get_item(\n TableName=\"Transactions\",\n Key={ 'Email': { 'S': email } }\n ).get(\"Item\", {})\n \n if (response is None): return res.build(200, [])\n\n transactions = json.loads(response.get(\"Transactions\", {}).get('S', \"[]\"))\n\n return res.build(200, transactions)\n \n except Exception as e:\n\n print(str(e))\n\n return res.build(400, {})\n\n","sub_path":"functions/get-transactions.py","file_name":"get-transactions.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119350982","text":"\"\"\"\nUsage use with ./run-celery.sh\n\n\"\"\"\n\n\nimport signal\nimport subprocess\nfrom datetime import datetime\nfrom pathlib import Path\nfrom shutil import copyfile\n\nlogfile = 'logs/latest.log'\n\nprint('[scripts/celery.py] Restarted Celery')\nprint(f'[scripts/celery.py] Logging to {logfile}')\n# print('[scripts/celery.py] disk_usage', disk_usage(Path('logs/')))\n\nPath(logfile).touch()\nassert Path(logfile).is_file()\n\nwith open(logfile, 'w'):\n pass\n\n\ncommand_args = \\\n f'celery -E -A app.celery worker --loglevel=info -f {logfile}'.split(' ')\nproc = subprocess.Popen(command_args, shell=False)\n\ntry:\n proc.communicate()\nexcept KeyboardInterrupt:\n proc.send_signal(signal.SIGTERM)\n time = datetime.now()\n # rename the log file\n copyfile(logfile, f\"logs/log{time}.log\")\n exit(0)\n","sub_path":"scripts/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115219690","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport time\nimport MySQLdb\nimport MySQLdb.cursors\nfrom config.config import Config\nfrom utils.logger import Logger\n\n\nclass Database:\n\n def __init__(self, product):\n self.product = product\n self.config = Config().mysql(product.lower())\n self.cursor = None\n self.db = None\n self.open()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.close()\n\n def open(self):\n\n retry = 0\n\n while retry <= 3:\n\n try:\n self.db = MySQLdb.connect(self.config[\"host\"],\n self.config[\"user\"],\n self.config[\"pwd\"],\n self.config[\"schema\"],\n port=self.config[\"port\"],\n cursorclass=MySQLdb.cursors.DictCursor,\n charset=\"utf8\",\n connect_timeout=60,\n use_unicode=True)\n break\n\n except MySQLdb.OperationalError as e:\n retry += 1\n time.sleep(1)\n\n def close(self):\n self.db.close()\n\n def commit(self):\n if self.db.open:\n self.db.commit()\n # self.close()\n\n def rollback(self):\n if self.db.open:\n self.db.rollback()\n self.close()\n\n def last_id(self):\n return self.db.insert_id()\n\n def execute(self, query, args=tuple(), debug=False):\n\n if debug:\n Logger().debug({\"query\": re.sub(' +', ' ', query.replace(\"\\n\", \"\")), \"args\": args})\n\n retry = 0\n\n while retry <= 2:\n try:\n self.cursor = self.db.cursor()\n self.cursor.execute(query, args)\n return self.cursor\n except MySQLdb.OperationalError as e:\n retry += 1\n self.open()\n\n def get_attributes_from_database(self, table_name):\n\n query = \"SHOW FULL COLUMNS FROM %s\" % table_name\n\n cursor = self.execute(query)\n\n attributes = []\n for row in cursor.fetchall():\n attributes.append(row[\"Field\"])\n\n cursor.close\n return attributes\n\n\n","sub_path":"app/utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496507002","text":"#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 3 (decision tree) mini-project.\n\n Use a Decision Tree to identify emails from the Enron corpus by author: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\n\nminSamplesSplit=40\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train_1, features_test_1, labels_train_1, labels_test_1 = preprocess(selectPercentile=1)\n\n#when selectPercentile=10\nfeatures_train_10, features_test_10, labels_train_10, labels_test_10 = preprocess(selectPercentile=10)\n\n\nprint(\"number of features in training data when selectPercentile=1:\",len(features_train_1[0]))\n\nprint(\"number of features in training data when selectPercentile=10:\",len(features_train_10[0]))\n\n\n#########################################################\n### your code goes here ###\n\nfrom sklearn import tree\nclf_1=tree.DecisionTreeClassifier(min_samples_split=minSamplesSplit)\nclf_1=clf_1.fit(features_train_1, labels_train_1)\n\nacc1=clf_1.score(features_test_1,labels_test_1)\n\nclf_10=tree.DecisionTreeClassifier(min_samples_split=minSamplesSplit)\nclf_10=clf_10.fit(features_train_10, labels_train_10)\n\nacc10=clf_10.score(features_test_10,labels_test_10)\n\n#########################################################\n\nprint(\"accuracy when selectPercentile=1:\",acc1)\n\nprint(\"accuracy when selectPercentile=10:\",acc10)\n\n","sub_path":"decision_tree/dt_author_id.py","file_name":"dt_author_id.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28567945","text":"# игра викторина, в текстовом файле находятся вопросы и ответы на них\n#\nimport sys\ndef open_file(name_of_file, mode):\n try:\n the_file = open(name_of_file, mode, encoding= 'utf-8')\n print(\"file is open\")\n except IOError as e:\n print(\"this file doesn't work -\", e, \"error\")\n sys.exit()\n else:\n return the_file\ndef next_line(the_file):\n line = the_file.readline()\n line = line.replace(\"/\", \"\\n\")\n return line\ndef next_block(the_file):\n category = next_line(the_file)\n question = next_line(the_file)\n answer = []\n explanation = None\n for i in range(4):\n answer.append(next_line(the_file))\n correct = next_line(the_file)\n if correct:\n correct = correct[0]\n explanation = next_line(the_file)\n return category, question, answer, correct, explanation\ndef welcome(title):\n print(\"welcome\")\n print(title)\ndef main():\n quiz_file = open_file(\"quiz.txt\", \"r\")\n title = quiz_file.readline()\n welcome(title)\n score = 0\n category, question, answers, correct, explanation = next_block(quiz_file)\n while category:\n print(category)\n print(question)\n for i in range(4):\n print(answers[i], \"\\n\")\n answer = input(\"u're answer - \")\n if answer == correct:\n print(\"yes\")\n score += 1\n else:\n print(\"no\")\n print(explanation)\n print(\"score:\", score)\n category, question, answers, correct, explanation = next_block(quiz_file)\n print(\"it's be finally question, goodbye\")\n print(\"u're score:\", score)\n quiz_file.close()\nmain()","sub_path":"Python/quiz/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"116711440","text":"# by sam crawford, the scope of this program is to pull data from a .csv file, weather temperatures in sitka, ak for the year 2014, then to create an x,y graph of highs, and lows, while shading the area between.\n# the graph is labeled with temp on y, and date on y for the entire year of 2014. Also this is the first real pythong app I have written since completeing a course on python.\nimport csv #imports CSV module \nfrom datetime import datetime\n\nfrom matplotlib import pyplot as plt #imports plotting tools.\n\n#get dates, high and low temperatures from file\nfilename = 'sitka_weather_2014.csv'\nwith open(filename) as f: #Python opens up the file, then uses the csv module to extract data.\n reader = csv.reader(f)\n header_row = next(reader)\n\n dates, highs, lows = [], [], [] #From the various pieces of data, I chose date, High Temps, and Low Temps. I left the lists empty.\n for row in reader:\n try:\n current_date = datetime.strptime(row[0], \"%Y-%m-%d\") #The date-time module is used to convert the numerical date in the csv file, into a string stating the date.\n high = int(row[1])\n low = int(row[3])\n except ValueError:\n print(current_date, 'missing data') # I used this except statement to have the csv module discard missing data and create the graph instead of an error.\n else:\n dates.append(current_date) \n highs.append(high) \n lows.append(low)\n\n#plot data\nfig = plt.figure(dpi=128, figsize=(10, 6)) # i defined a fiure to be plotted using the matplotlib module, then defined the high temps day/temp to plot, and be red.\nplt.plot(dates, highs, c='red', alpha=0.5) # and the lows to be blues, then to fill in the space between the High Temp line and low temp line.\nplt.plot(dates, lows, c='blue', alpha=0.5)\nplt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)\n\n#Format plot-graph giving a title, labeling the x and y axis' fontsize and plot the data from the file, creating a cool x,y graph.\ntitle = \"Daily high and low temperatures - 2014\\nDeath Valley, CA\"\nplt.title(title, fontsize=20)\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel(\"Temperature (F)\", fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n\nplt.show()","sub_path":"death_valley.py","file_name":"death_valley.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"444055837","text":"#!/usr/bin/env python3\n\n# WS server example that synchronizes state across clients\n\nimport asyncio\nimport json\nimport logging\nimport websockets\nimport os\nimport base64\n\nlogging.basicConfig()\n\n\nUSERS = set()\n\nasync def enumerate_images(websocket):\n if USERS: # asyncio.wait doesn't accept an empty list\n filelist=list()\n for r,d,f in os.walk('images'):\n for file in f:\n filelist.append(file)\n message=json.dumps(filelist)\n print(message)\n await websocket.send(message)\n\n\nasync def change_image(message):\n if USERS: # asyncio.wait doesn't accept an empty list\n new_pic=message.split('|')[1]\n file=open('images/'+new_pic,'rb')\n file_content=file.read()\n encoded_string=base64.b64encode(file_content)\n #print(encoded_string)\n await asyncio.wait([user.send(encoded_string.decode(\"utf-8\")) for user in USERS])\n\n\nasync def register(websocket):\n USERS.add(websocket)\n print(USERS)\n #await notify_users()\n\n\nasync def unregister(websocket):\n USERS.remove(websocket)\n #await notify_users()\n\n\nasync def server(websocket, path):\n\n # register(websocket) sends user_event() to websocket\n #await register(websocket)\n try:\n async for message in websocket:\n print(message)\n if message=='fetch_images':\n print(\"Calling Fetch\")\n #generate list of images and send\n await enumerate_images(websocket)\n elif message=='register_client':\n await register(websocket)\n elif 'change|' in message:\n await change_image(message)\n\n finally:\n await unregister(websocket)\n\n\nstart_server = websockets.serve(server, \"192.168.19.190\", 6789)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n","sub_path":"picture_server.py","file_name":"picture_server.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"641542554","text":"# -*- encoding: utf-8 -*-\n\n__author__ = 'kotaimen'\n__date__ = '11/01/2017'\n\nimport click\n\nfrom ..utils import boto3_exception_handler\nfrom ...cli import stack\nfrom ...config import load_stack_config\nfrom .events import tail_stack_events\n\n\n@stack.command()\n@click.argument('config_file', type=click.Path(exists=True))\n@click.option('--timeout', '-t', type=click.IntRange(min=0, max=3600),\n default=300, help='wait time in seconds before exit')\n@click.option('--events', '-n', type=click.IntRange(min=0, max=100),\n default=0,\n help='number of latest stack events, 0 means fetch all'\n 'stack events')\n@click.pass_context\n@boto3_exception_handler\ndef tail(ctx, config_file, timeout, events):\n \"\"\"Print stack events and waiting for update (stop using CTRL+C)\n\n CONFIG_FILE Stack configuration file.\n \"\"\"\n session = ctx.obj['session']\n\n stack_config = load_stack_config(config_file)\n\n cfn = session.resource('cloudformation', region_name=stack_config['Region'])\n stack = cfn.Stack(stack_config['StackName'])\n\n tail_stack_events(session, stack, latest_events=events, time_limit=timeout)\n","sub_path":"awscfncli/commands/stack/tail.py","file_name":"tail.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344028347","text":"from PyQt4 import QtGui, QtCore\n\nclass DataTable(QtCore.QObject):\n cb_index_changed_signal = QtCore.pyqtSignal(QtGui.QWidget)\n def __init__(self, parent = None):\n QtCore.QObject.__init__(self)\n\n self.signalMapper = QtCore.QSignalMapper()\n self.signalMapper.mapped[QtGui.QWidget].connect(self.on_signalMapper_mapped)\n\n def insert_row_cb(self, table, cb_col):\n rows = table.rowCount()\n table.insertRow(rows)\n self.set_row_items_cb(table, cb_col)\n self.resize_rows(table)\n return table\n\n def set_row_items_cb(self, table, cb_col):\n cb = QtGui.QComboBox()\n cb.currentIndexChanged.connect(self.signalMapper.map)\n\n rows = table.rowCount()\n cols = table.columnCount()\n for col in range(cols):\n if col == cb_col:\n table.setCellWidget(rows - 1, cb_col, cb)\n cb.row = rows - 1\n cb.column = cb_col\n self.signalMapper.setMapping(cb, cb)\n else:\n table.setItem(rows - 1, col, QtGui.QTableWidgetItem(''))\n return table\n\n def on_signalMapper_mapped(self, cb):\n self.cb_index_changed_signal.emit(cb)\n","sub_path":"Code Examples/GUI Examples/dropdown/DataTable.py","file_name":"DataTable.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"510240574","text":"from tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import plot_model\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.regularizers import l2\n\ndef make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\nclass ConvBlocks:\n @classmethod # Conv + BN\n def BNConv(cls, x_in, filters, kernel_size = (1,1), strides = (1,1), l2_weight = 1e-4):\n x = layers.Conv2D(filters = filters, kernel_size = kernel_size, strides = strides, \n padding = 'same', kernel_initializer='he_normal', kernel_regularizer = l2(l2_weight))(x_in)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU(max_value = 6.0)(x) \n return x\n\n @classmethod # Depthwise Conv + BN\n def DWBNConv(cls, x_in, depth_multiplier = 1, kernel_size = (3,3), strides = (1,1), l2_weight = 1e-4):\n x = layers.DepthwiseConv2D(kernel_size = (3,3), strides= strides, depth_multiplier=1, \n padding='same', kernel_initializer='he_normal', kernel_regularizer= l2(l2_weight))(x_in)\n x = layers.BatchNormalization()(x)\n x = layers.ReLU(max_value = 6.0)(x) \n return x\n\n @classmethod # Bottleneck block \n def Bottleneck(cls, x_in, expansion_factor, output_channel, strides = (1,1), l2_weight = 1e-4):\n input_channel = K.int_shape(x_in)[-1]\n x = cls.BNConv(x_in, input_channel*expansion_factor, l2_weight = l2_weight)\n x = cls.DWBNConv(x, strides = strides, l2_weight = l2_weight)\n x = cls.BNConv(x, output_channel, l2_weight = l2_weight)\n if K.int_shape(x_in) == K.int_shape(x):\n x = layers.Add()([x_in, x])\n return x\ndef TBPPlite512_body(x, n_classes = 1000, first_block_filters = 32, \n last_block_filters = 1280, alpha = 1.0, n_downsampling = 5, l2_weight = 1e-4):\n source_layers = []\n \n assert 0 <= n_downsampling <= 5 \n dim_reduction = [1]*(5-n_downsampling)+[2]*(n_downsampling)\n\n if first_block_filters is None:\n first_block_filters = make_divisible(32 * alpha, 8)\n if last_block_filters is None:\n if alpha > 1.0:\n last_block_filters = make_divisible(1280 * alpha, 8)\n else:\n last_block_filters = 1280\n \n # initial layer ( output 112)\n \n x = ConvBlocks.BNConv(x, first_block_filters, kernel_size = (3,3), \n strides = (dim_reduction[0],)*2, l2_weight = l2_weight)\n\n # Bottleneck layers\n \n #first (output 112) Bx1 \n x = ConvBlocks.Bottleneck(x, expansion_factor = 1, output_channel = int(16*alpha), \n strides=(1,1), l2_weight = l2_weight)\n #second (output 56) Bx2\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(24*alpha), \n strides=(dim_reduction[1],)*2, l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(24*alpha), \n strides=(1,1), l2_weight = l2_weight)\n #3rd (output 28) Bx3\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(32*alpha), \n strides=(dim_reduction[2],)*2, l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(32*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(32*alpha), \n strides=(1,1), l2_weight = l2_weight)\n\n source_layers.append(x) #38x38 for input 300\n \n #4th (output 14) Bx 4\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(64*alpha), \n strides=(dim_reduction[3],)*2, l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(64*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(64*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(64*alpha), \n strides=(1,1), l2_weight = l2_weight)\n\n source_layers.append(x) #19x19 or input 300\n \n #5th (output 14, no reduction ) Bx3\n \n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(96*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(96*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(96*alpha), \n strides=(1,1), l2_weight = l2_weight)\n \n source_layers.append(x) #19x19 or input 300\n \n #6th (output 7 ) Bx3\n \n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(160*alpha), \n strides=(dim_reduction[4],)*2, l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(160*alpha), \n strides=(1,1), l2_weight = l2_weight)\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(160*alpha), \n strides=(1,1), l2_weight = l2_weight)\n \n source_layers.append(x)#10x10 or input 300\n \n #7th( output 7) B x1\n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(320*alpha), \n strides=(1,1), l2_weight = l2_weight)\n source_layers.append(x) #10x10 for input 300\n \n x = ConvBlocks.Bottleneck(x, expansion_factor = 6, output_channel = int(1280*alpha), \n strides=(1,1), l2_weight = l2_weight)\n source_layers.append(x) #10x10 for input 300\n\n\n return source_layers","sub_path":"My_work/tbpplite_body.py","file_name":"tbpplite_body.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89876062","text":"# -*- coding: utf-8 -*-\nimport itertools\n\nfrom typing import TYPE_CHECKING\n\nimport rdflib\n\nfrom pyshacl.consts import SH_object, SH_path, SH_predicate, SH_subject, SH_this\nfrom pyshacl.rules.shacl_rule import SHACLRule\n\n\nif TYPE_CHECKING:\n from pyshacl.shape import Shape\n\n\nclass TripleRule(SHACLRule):\n __slots__ = (\"s\", \"p\", \"o\")\n\n def __init__(self, shape: 'Shape', rule_node: 'rdflib.term.Identifier'):\n \"\"\"\n\n :param shape:\n :type shape: Shape\n :param rule_node:\n :type rule_node: rdflib.term.Identifier\n \"\"\"\n super(TripleRule, self).__init__(shape, rule_node)\n my_subject_nodes = set(self.shape.sg.objects(self.node, SH_subject))\n if len(my_subject_nodes) < 1:\n raise RuntimeError(\"No sh:subject\")\n elif len(my_subject_nodes) > 1:\n raise RuntimeError(\"Too many sh:subject\")\n self.s = next(iter(my_subject_nodes))\n\n my_predicate_nodes = set(self.shape.sg.objects(self.node, SH_predicate))\n if len(my_predicate_nodes) < 1:\n raise RuntimeError(\"No sh:predicate\")\n elif len(my_predicate_nodes) > 1:\n raise RuntimeError(\"Too many sh:predicate\")\n self.p = next(iter(my_predicate_nodes))\n\n my_object_nodes = set(self.shape.sg.objects(self.node, SH_object))\n if len(my_object_nodes) < 1:\n raise RuntimeError(\"No sh:object\")\n elif len(my_object_nodes) > 1:\n raise RuntimeError(\"Too many sh:object\")\n self.o = next(iter(my_object_nodes))\n\n def get_nodes_from_node_expression(self, expr, focus_node, data_graph):\n if expr == SH_this:\n return [focus_node]\n elif isinstance(expr, (rdflib.URIRef, rdflib.Literal)):\n return [expr]\n elif isinstance(expr, rdflib.BNode):\n path_nodes = set(self.shape.sg.objects(expr, SH_path))\n if len(path_nodes) > 0:\n path_results = []\n for p in path_nodes:\n vals = self.shape.value_nodes_from_path(self.shape.sg, focus_node, p, data_graph)\n path_results.extend(vals)\n return path_results\n else:\n raise NotImplementedError(\"Unsupported expression s, p, or o, in SHACL TripleRule\")\n else:\n raise NotImplementedError(\"Unsupported expression s, p, or o, in SHACL TripleRule\")\n\n def apply(self, data_graph):\n focus_nodes = self.shape.focus_nodes(data_graph) # uses target nodes to find focus nodes\n applicable_nodes = self.filter_conditions(focus_nodes, data_graph)\n for a in applicable_nodes:\n s_set = self.get_nodes_from_node_expression(self.s, a, data_graph)\n p_set = self.get_nodes_from_node_expression(self.p, a, data_graph)\n o_set = self.get_nodes_from_node_expression(self.o, a, data_graph)\n new_triples = itertools.product(s_set, p_set, o_set)\n for i in iter(new_triples):\n data_graph.add(i)\n","sub_path":"pyshacl/rules/triple/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276584786","text":"from datetime import datetime\nimport logging\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\n\nimport pandas\nimport toolz\n\nlogger = logging.getLogger(__name__)\n\ndefault_args = {\n 'owner': 'nicor88',\n 'start_date': datetime(2019, 2, 20),\n 'depends_on_past': False,\n 'provide_context': True\n}\n\ndag = DAG('my_second_dag',\n description='My second Airflow DAG',\n schedule_interval='*/15 * * * *',\n catchup=False,\n default_args=default_args)\n\n\ndef task_1(**kwargs):\n output = {'output': 'hello world 1', 'execution_time': str(datetime.now())}\n logger.info(output)\n logger.info(f'Pandas version: {pandas.__version__}')\n logger.info(f'Toolz version: {toolz.__version__}')\n return output\n\n\ndef task_2(**kwargs):\n ti = kwargs['ti']\n output_task_1 = ti.xcom_pull(key='return_value', task_ids='task_1')\n logger.info(output_task_1)\n return {'output': 'hello world 2', 'execution_time': str(datetime.now())}\n\n\ndef task_3(**kwargs):\n logger.info('Log from task 3')\n return {'output': 'hello world 3', 'execution_time': str(datetime.now())}\n\n\ndef task_4(**kwargs):\n logger.info('Log from task 4')\n return {'output': 'hello world 4', 'execution_time': str(datetime.now())}\n\n\nt1 = PythonOperator(\n task_id='task_1',\n dag=dag,\n python_callable=task_1\n)\n\nt2 = PythonOperator(\n task_id='task_2',\n dag=dag,\n python_callable=task_2\n)\n\nt3 = PythonOperator(\n task_id='task_3',\n dag=dag,\n python_callable=task_3\n)\n\nt4 = PythonOperator(\n task_id='task_4',\n dag=dag,\n python_callable=task_4\n)\n\nt5 = KubernetesPodOperator(\n task_id='task_5',\n dag=dag,\n in_cluster=True,\n is_delete_operator_pod=True,\n namespace='airflow',\n service_account_name='default',\n image='python:3.6',\n cmds=['python', '-c'],\n arguments=[\"print('hello world')\"],\n labels={'foo': 'bar'},\n name='airflow-k8s-pod-operator',\n get_logs=True\n)\n\nt1 >> [t2, t3] >> t4 >> t5\n","sub_path":"dags/my_second_dag.py","file_name":"my_second_dag.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66970132","text":"#!/usr/bin/env python3\n\nimport numpy as np\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import KFold\n\nfrom tools import prepare_signal_background, prepare_wXy, get_data_X, get_metrics\nfrom plotting import plot_sig_vs_bg, plot_decision_function, plot_data_decision_function\nfrom optimiser import Optimiser\n\n\ndata_file = 'data/data.root'\n\nsignal_trees = ['signal_tree_training;1', 'signal_tree_testing;1']\nbackground_trees = ['background_tree_training;1', 'background_tree_testing;1']\n\ndata_tree = 'Data;1'\n\nvariable_names = ['mll', 'dRll', 'pTll']\n\nplot_dir = 'plots'\n\n\ndef make_plots():\n signal, background = prepare_signal_background(\n data_file, signal_trees, background_trees, variable_names)\n\n # === make signal vs. bg plots ===\n\n plot_sig_vs_bg(signal, background, plot_dir)\n\n # === 5 fold cross-validation ===\n\n w, X, y = prepare_wXy(signal, background, variable_names)\n\n bdt = GradientBoostingClassifier(learning_rate=0.005,\n max_depth=5,\n n_estimators=100)\n\n kf = KFold(n_splits=2)\n\n bdt_output = []\n classes = []\n weights = []\n\n for train, test in kf.split(X):\n X_train, y_train, w_train = X[train], y[train], w[train]\n X_test, y_test, w_test = X[test], y[test], w[test]\n\n bdt.fit(X_train, y_train, w_train)\n\n bdt_output.append(\n bdt.decision_function(X_test)\n )\n\n classes.append(y_test)\n weights.append(w_test)\n\n bdt_output = np.concatenate(bdt_output)\n classes = np.concatenate(classes)\n weights = np.concatenate(weights)\n\n # === plot decision functions ===\n\n plot_decision_function(decision_function=bdt_output,\n classes=classes,\n weights=weights,\n plotting_dir=plot_dir)\n\n data_X = get_data_X(data_file, data_tree, variable_names)\n\n bdt.fit(X, y, w)\n data_decision_function = bdt.decision_function(data_X)\n\n plot_data_decision_function(data_decision_function, plot_dir)\n\n # === estimate the signal fraction ===\n\n tpr, tnr = get_metrics(bdt_output, classes, weights)\n\n n_data = len(data_decision_function)\n n_minus = sum(data_decision_function < 0)\n\n x = (n_minus / n_data - tnr) / (1 - tnr - tpr)\n\n print(\n ('\\n=========== Results ===========\\n'\n ' Signal event fraction\\n'\n ' x: %.3f' % x + '\\n'\n '===============================\\n')\n )\n\n\ndef optimise():\n signal, background = prepare_signal_background(\n data_file, signal_trees, background_trees, variable_names)\n\n w, X, y = prepare_wXy(signal, background, variable_names)\n\n hyperparameter_settings = {\n 'learning_rate': [0.001, 0.01]\n }\n\n optimiser = Optimiser(\n GradientBoostingClassifier, X, y,\n hyperparameter_settings=hyperparameter_settings,\n fixed_hyperparameters={'max_depth': 8, 'loss': 'deviance'},\n n_iterations=20\n )\n\n optimiser.run()\n\n\nif __name__ == '__main__':\n # optimise()\n make_plots()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108309433","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.exceptions import UndefinedMetricWarning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn import metrics\nimport pandas as pd\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.utils import shuffle\nimport logging\nimport warnings\nfrom sklearn.cluster import KMeans\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)\nwarnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)\nwarnings.filterwarnings(action='ignore', category=UserWarning)\nwarnings.filterwarnings(action='ignore', category=RuntimeWarning)\n\nclass Task(object):\n def __init__(self, taskname):\n self.name = taskname\n\n def _classfication(self, embedding, labels_np, split_ratio=0.7):\n labels_np = shuffle(labels_np)\n nodes = labels_np[:, 0]\n labels = labels_np[:, 1]\n\n lb = LabelBinarizer()\n labels = lb.fit_transform(labels)\n train_size = int(labels_np.shape[0] * split_ratio)\n features = embedding[nodes]\n\n train_x = features[:train_size, :]\n train_y = labels[:train_size, :]\n test_x = features[train_size:, :]\n test_y = labels[train_size:, :]\n clf = OneVsRestClassifier(\n LogisticRegression(class_weight='balanced', solver='liblinear', max_iter=3000,\n n_jobs=-1))\n\n clf.fit(train_x, train_y)\n y_pred = clf.predict_proba(test_x)\n y_pred = lb.transform(np.argmax(y_pred, 1))\n acc = np.sum(np.argmax(y_pred, 1) == np.argmax(test_y, 1)) / len(y_pred)\n eval_dict = {\n 'acc': acc,\n 'f1-micro': metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),\n average='micro'),\n 'f1-macro': metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),\n average='macro'),\n }\n # print(eval_dict)\n return eval_dict\n\n def classfication(self, embedding, labels_np, split_ratio=0.7, loop=1):\n eval_dict = {\n 'acc': 0.0,\n 'f1-micro': 0.0,\n 'f1-macro': 0.0,\n }\n for _ in range(loop):\n tmp_dict = self._classfication(embedding, labels_np, split_ratio)\n for key in tmp_dict.keys():\n eval_dict[key] += tmp_dict[key]\n for key in tmp_dict.keys():\n eval_dict[key] = round((1.0 * eval_dict[key]) / loop, 4)\n print(\"average performance:\")\n print(eval_dict)\n return eval_dict\n\n def _clustering(self, embedding, labels_np):\n labels_np = shuffle(labels_np)\n nodes = labels_np[:, 0]\n labels = labels_np[:, 1]\n features = embedding[nodes]\n kmeans_model = KMeans(n_clusters=max(labels) + 1, n_jobs=-1)\n\n lb = LabelBinarizer()\n labels = lb.fit_transform(labels)\n kmeans_model.fit(features)\n c_pred = kmeans_model.labels_\n\n eval_dict = {\n 'homogeneity': metrics.homogeneity_score(np.argmax(labels, 1), c_pred),\n 'completeness': metrics.completeness_score(np.argmax(labels, 1), c_pred),\n 'silhouette': metrics.silhouette_score(features, c_pred),\n }\n return eval_dict\n\n def clustering(self, embedding, labels_np, loop=10):\n eval_dict = {\n 'homogeneity': 0.0,\n 'completeness': 0.0,\n 'silhouette': 0.0,\n }\n for _ in range(loop):\n tmp_dict = self._clustering(embedding, labels_np)\n for key in tmp_dict.keys():\n eval_dict[key] += tmp_dict[key]\n for key in tmp_dict.keys():\n eval_dict[key] = round((1.0 * eval_dict[key]) / loop, 4)\n print(eval_dict)\n return eval_dict\n\n def _link_prediction(self, embed, edgeList, labels, split_ratio=0.7, method='Hadamard'):\n # lb = LabelBinarizer()\n # labels = lb.fit_transform(labels)\n # print(embed.shape)\n ft = np.zeros((len(edgeList), embed.shape[1]))\n for i in range(len(edgeList)):\n src = edgeList[i][0]\n tgt = edgeList[i][1]\n if method == 'Hadamard':\n ft[i] = embed[src, :] * embed[tgt, :]\n elif method == 'Average':\n ft[i] == np.add(embed[src, :], embed[tgt, :]) * 0.5\n train_size = int(len(edgeList) * split_ratio)\n labels.reshape((-1))\n x_train = ft[:train_size, :]\n y_train = labels[:train_size]\n x_test = ft[train_size:, :]\n y_test = labels[train_size:]\n clf = LogisticRegression(class_weight='balanced', solver='liblinear', max_iter=5000)\n clf.fit(x_train, y_train)\n y_pred = clf.predict(x_test)\n y_score = clf.predict_proba(x_test)[:, -1]\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)\n eval_dict = {'auc': metrics.auc(fpr, tpr),\n 'pr': metrics.average_precision_score(y_test, y_score),\n 'f1': metrics.f1_score(y_test, y_pred),\n 'f1-micro': metrics.f1_score(y_test, y_pred, average='micro'),\n 'f1-macro': metrics.f1_score(y_test, y_pred, average='macro')}\n print(eval_dict)\n return eval_dict\n\n def link_prediction(self, embedding, edgeList, labels, split_ratio=0.7, method='Hadamard',\n loop=100):\n eval_dict = {'auc': 0.0, 'pr': 0.0, 'f1': 0.0, 'f1-micro': 0.0, 'f1-macro': 0.0}\n for _ in range(loop):\n tmp_dict = self._link_prediction(embedding, edgeList, labels, split_ratio=0.7,\n method=method)\n for key in tmp_dict.keys():\n eval_dict[key] += tmp_dict[key]\n for key in tmp_dict.keys():\n eval_dict[key] = round((1.0 * eval_dict[key]) / loop, 4)\n print('average performance')\n print(eval_dict)\n return eval_dict\n\n def stack_label(self, df):\n node = []\n label = []\n for each in df.values:\n line = each[1].split(' ')\n label.extend(line)\n node.extend([each[0]] * len(line))\n frame = pd.DataFrame({\"id\": node, \"label\": label})\n return frame\n\n def _k_precision(self, embedding, label, k, lbl):\n nodes = label[np.where(label[:, 1] == lbl)][:, 0]\n acc = 0.0\n for node in nodes:\n distance = {}\n for i in range(embedding.shape[0]):\n if i == node:\n continue\n distance[i] = np.linalg.norm(embedding[i] - embedding[node])\n distance = sorted(distance.items(), key=lambda x: x[1])\n distance = np.array(distance)[:k]\n acc += distance[np.isin(distance[:, 0], nodes)].shape[0] / k\n acc /= len(nodes) + 1e-10\n return round(acc, 4)\n\n def k_precision(self, embedding, lbl_path, k=50):\n label = pd.read_csv(lbl_path, header=None, sep=' ')\n label.columns = ['id', 'label']\n if 'imdb' in lbl_path or 'genes' in lbl_path:\n label = self.stack_label(label)\n label['label'] = label['label'].astype(int)\n label = label.values\n if 'imdb' in lbl_path:\n eval_dict = {\n 'precision': k,\n 'Directors_acc': self._k_precision(embedding, label, k, 1),\n 'Editors_acc': self._k_precision(embedding, label, k, 2),\n 'Producers_acc': self._k_precision(embedding, label, k, 3),\n 'Writers_acc': self._k_precision(embedding, label, k, 4),\n }\n elif 'genes' in lbl_path:\n eval_dict = {\n 'precision': k,\n 'Category 1': self._k_precision(embedding, label, k, 1),\n 'Category 2': self._k_precision(embedding, label, k, 2),\n 'Category 6': self._k_precision(embedding, label, k, 6),\n 'Category 9': self._k_precision(embedding, label, k, 9),\n 'Category 10': self._k_precision(embedding, label, k, 10),\n 'Category 11': self._k_precision(embedding, label, k, 11),\n 'Category 12': self._k_precision(embedding, label, k, 12),\n }\n elif 'acl' in lbl_path:\n eval_dict = {\n 'precision': k,\n 'Category 1': self._k_precision(embedding, label, k, 1),\n }\n else:\n eval_dict = {\n 'precision': k,\n 'bots_acc': self._k_precision(embedding, label, k, 1),\n 'admins_acc': self._k_precision(embedding, label, k, 2)\n }\n print(eval_dict)\n return eval_dict\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"187127958","text":"import datetime\n\nfrom presidio_analyzer import LocalRecognizer, \\\n Pattern, \\\n RecognizerResult, \\\n EntityRecognizer, \\\n AnalysisExplanation\n\n# Import 're2' regex engine if installed, if not- import 'regex'\ntry:\n import re2 as re\nexcept ImportError:\n import regex as re\n\n\nclass PatternRecognizer(LocalRecognizer):\n\n def __init__(self, supported_entity, name=None,\n supported_language='en', patterns=None,\n black_list=None, context=None, version=\"0.0.1\"):\n \"\"\"\n :param patterns: the list of patterns to detect\n :param black_list: the list of words to detect\n :param context: list of context words\n \"\"\"\n if not supported_entity:\n raise ValueError(\n \"Pattern recognizer should be initialized with entity\")\n\n if not patterns and not black_list:\n raise ValueError(\n \"Pattern recognizer should be initialized with patterns\"\n \" or with black list\")\n\n super().__init__(supported_entities=[supported_entity],\n supported_language=supported_language,\n name=name,\n version=version)\n if patterns is None:\n self.patterns = []\n else:\n self.patterns = patterns\n self.context = context\n\n if black_list:\n black_list_pattern = self.__black_list_to_regex(\n black_list)\n self.patterns.append(black_list_pattern)\n self.black_list = black_list\n else:\n self.black_list = []\n\n def load(self):\n pass\n\n # pylint: disable=unused-argument,arguments-differ\n def analyze(self, text, entities, nlp_artifacts=None, regex_flags=None):\n results = []\n\n if self.patterns:\n pattern_result = self.__analyze_patterns(text, regex_flags)\n\n if pattern_result and self.context:\n # try to improve the results score using the surrounding\n # context words\n enhanced_result = \\\n self.enhance_using_context(\n text, pattern_result, nlp_artifacts, self.context)\n results.extend(enhanced_result)\n elif pattern_result:\n results.extend(pattern_result)\n\n return results\n\n @staticmethod\n def __black_list_to_regex(black_list):\n \"\"\"\n Converts a list of word to a matching regex, to be analyzed by the\n regex engine as a part of the analyze logic\n\n :param black_list: the list of words to detect\n :return:the regex of the words for detection\n \"\"\"\n regex = r\"(?:^|(?<= ))(\" + '|'.join(black_list) + r\")(?:(?= )|$)\"\n return Pattern(name=\"black_list\", regex=regex, score=1.0)\n\n # pylint: disable=unused-argument, no-self-use, assignment-from-none\n def validate_result(self, pattern_text):\n \"\"\"\n Validates the pattern logic, for example by running\n checksum on a detected pattern.\n\n :param pattern_text: the text to validated.\n Only the part in text that was detected by the regex engine\n :return: A bool indicating whether the validation was successful.\n \"\"\"\n return None\n\n # pylint: disable=unused-argument, no-self-use, assignment-from-none\n def invalidate_result(self, pattern_text):\n \"\"\"\n Logic to check for result invalidation by running pruning logic.\n For example, each SSN number group should not consist of all the same\n digits.\n\n :param pattern_text: the text to validated.\n Only the part in text that was detected by the regex engine\n :return: A bool indicating whether the result is invalidated\n \"\"\"\n return None\n\n @staticmethod\n def build_regex_explanation(\n recognizer_name,\n pattern_name,\n pattern,\n original_score,\n validation_result):\n explanation = AnalysisExplanation(recognizer=recognizer_name,\n original_score=original_score,\n pattern_name=pattern_name,\n pattern=pattern,\n validation_result=validation_result)\n return explanation\n\n def __analyze_patterns(self, text, flags=None):\n \"\"\"\n Evaluates all patterns in the provided text, including words in\n the provided blacklist\n\n :param text: text to analyze\n :param flags: regex flags\n :return: A list of RecognizerResult\n \"\"\"\n flags = flags if flags else re.DOTALL | re.MULTILINE\n results = []\n for pattern in self.patterns:\n match_start_time = datetime.datetime.now()\n matches = re.finditer(\n pattern.regex,\n text,\n flags=flags)\n match_time = datetime.datetime.now() - match_start_time\n self.logger.debug('--- match_time[%s]: %s.%s seconds',\n pattern.name,\n match_time.seconds,\n match_time.microseconds)\n\n for match in matches:\n start, end = match.span()\n current_match = text[start:end]\n\n # Skip empty results\n if current_match == '':\n continue\n\n score = pattern.score\n\n validation_result = self.validate_result(current_match)\n description = self.build_regex_explanation(\n self.name,\n pattern.name,\n pattern.regex,\n score,\n validation_result\n )\n pattern_result = RecognizerResult(\n self.supported_entities[0],\n start,\n end,\n score,\n description)\n\n if validation_result is not None:\n if validation_result:\n pattern_result.score = EntityRecognizer.MAX_SCORE\n else:\n pattern_result.score = EntityRecognizer.MIN_SCORE\n\n invalidation_result = self.invalidate_result(current_match)\n if invalidation_result is not None and invalidation_result:\n pattern_result.score = EntityRecognizer.MIN_SCORE\n\n if pattern_result.score > EntityRecognizer.MIN_SCORE:\n results.append(pattern_result)\n\n return results\n\n def to_dict(self):\n return_dict = super().to_dict()\n\n return_dict[\"patterns\"] = [pat.to_dict() for pat in self.patterns]\n return_dict[\"black_list\"] = self.black_list\n return_dict[\"context\"] = self.context\n return_dict[\"supported_entity\"] = return_dict[\"supported_entities\"][0]\n del return_dict[\"supported_entities\"]\n\n return return_dict\n\n @classmethod\n def from_dict(cls, entity_recognizer_dict):\n patterns = entity_recognizer_dict.get(\"patterns\")\n if patterns:\n patterns_list = [Pattern.from_dict(pat) for pat in patterns]\n entity_recognizer_dict['patterns'] = patterns_list\n\n return cls(**entity_recognizer_dict)\n","sub_path":"presidio-analyzer/presidio_analyzer/pattern_recognizer.py","file_name":"pattern_recognizer.py","file_ext":"py","file_size_in_byte":7408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"65602","text":"from .config import Config\nfrom werkzeug.utils import secure_filename\nimport os\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in Config.ALLOWED_EXTENSIONS\n\ndef upload_file(file, username):\n moleculename = file.filename.split('.')[0]\n if allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(Config.UPLOAD_FOLDER,\n username, moleculename, 'run', filename))\n return True\n else:\n return False\n\ndef upload_file_ligante(file, ligitp, liggro, username):\n moleculename = file.filename.split('.')[0]\n ligantename = ligitp.filename.split('.')[0]\n pastaname = moleculename+'_'+ligantename\n if allowed_file(file.filename):\n if allowed_file(ligitp.filename):\n if allowed_file(liggro.filename):\n filemol = secure_filename(file.filename)\n fileitp = secure_filename(ligitp.filename)\n filegro = secure_filename(liggro.filename)\n file.save(os.path.join(Config.UPLOAD_FOLDER,\n username, pastaname, 'run', filemol))\n ligitp.save(os.path.join(Config.UPLOAD_FOLDER,\n username, pastaname, 'run', fileitp))\n liggro.save(os.path.join(Config.UPLOAD_FOLDER,\n username, pastaname, 'run', filegro))\n return True\n else:\n return False\n else:\n return False\n ","sub_path":"app/upload_file.py","file_name":"upload_file.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479439845","text":"import datetime\nimport logging\nimport pandas as pd\nimport os\nimport sys\nfrom .processors import LocustResourceProcessor\n\n\nclass PreProcessor:\n RESOURCES = ['LOCUST']\n\n def __init__(self, resource, time_formatter, *args, **kwargs):\n if resource not in PreProcessor.RESOURCES:\n logging.critical(\n 'Invalid Usage: Please assign a resource defined in '\n + 'PreProcessor.RESOURCES.')\n sys.exit(1)\n if resource == 'LOCUST':\n if 'distribution_filename' in kwargs \\\n and 'requests_filename' in kwargs:\n self.resource_processor = LocustResourceProcessor. \\\n LocustResourceProcessor(\n distribution_filename=kwargs['distribution_filename'],\n requests_filename=kwargs['requests_filename'])\n elif 'distribution_filename' in kwargs:\n self.resource_processor = LocustResourceProcessor. \\\n LocustResourceProcessor(\n distribution_filename=kwargs['distribution_filename'])\n elif 'requests_filename' in kwargs:\n self.resource_processor = LocustResourceProcessor. \\\n LocustResourceProcessor(\n requests_filename=kwargs['requests_filename'])\n else:\n self.resource_processor = LocustResourceProcessor. \\\n LocustResourceProcessor()\n self.time_formatter = time_formatter\n\n def process(self, reports_path):\n \"\"\"Performance Report as pandas DataFrame.\n\n Args:\n reports_dir: directory having directory \\\n which includes locust reports.\n\n Returns:\n reports [pandas.DataFrame]: Having performance test reports and \\\n following columns.\n 1. Name: test target.\n 2. # requests: number of requests.\n 3. 99%: 99%tile Latency. any %tile Latency is available \\\n because you have to assign key when plotting charts.\n 4. Median response time: 50%tile Latency.\n 5. Average response time: ditto.\n 6. Min response time: ditto.\n 8. Max response time: ditto.\n 4. # failures: number of failures.\n 9. Requests/s: requests per second.\n 10: DateTime [pandas.TimeStamp]: date executed test.\n \"\"\"\n report_dirs = [f for f in os.listdir(reports_path) if os.path.isdir(\n os.path.join(reports_path, f))]\n\n reports_df = None\n for report_dir in report_dirs:\n tmp_df = self._process(reports_path, report_dir)\n if reports_df is None:\n reports_df = tmp_df\n else:\n reports_df = pd.concat([reports_df, tmp_df], ignore_index=True)\n\n return reports_df\n\n def _process(self, reports_path, report_dir):\n year, month, day, hour, minute, second = self.time_formatter.format(\n report_dir)\n report_df = self.resource_processor.process(reports_path + report_dir)\n report_df['DateTime'] = datetime.datetime(\n year=year, month=month, day=day,\n hour=hour, minute=minute, second=second)\n report_df.sort_values('DateTime', ascending=True, inplace=True)\n\n return report_df\n","sub_path":"src/python/scripts/modules/PreProcessor.py","file_name":"PreProcessor.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36742369","text":"#!/usr/bin/python3\n\"\"\"Script that starts a Flask web application\"\"\"\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\n\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef close(exc):\n \"\"\"Remove the current SQLAlchemy session.\"\"\"\n storage.close()\n\n\n@app.route('/states', strict_slashes=False)\ndef states_list():\n \"\"\"\n List of all State objects present in DBStorage sorted by name (A->Z)\n \"\"\"\n states = storage.all(\"State\")\n return render_template(\"7-states_list.html\", states_key=states)\n\n\n@app.route('/states/', strict_slashes=False)\ndef states_id(id):\n \"\"\"\n List the states by id ir order\n \"\"\"\n all_data = storage.all(\"State\").values()\n state = {}\n for data in all_data:\n if data.id == id:\n state = data\n break\n else:\n state = None\n return render_template(\"9-states.html\", state_key=state)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89601136","text":"import sys\nfrom collections import defaultdict\nimport logging\nlogger = logging.getLogger()\n\n\ndef setup_debug_logger():\n handler = logging.StreamHandler()\n global logger\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n\ndef read_lines(file_name):\n g = defaultdict(list)\n with open(file_name) as f:\n for line in f:\n nodes = map(int, line.strip().split(' '))\n g[nodes[0]].append(nodes[1])\n logger.debug(\"graph nodes number is {0}\".format(len(g)))\n return g\n\n\ndef dfs(g, start_node, visited_nodes, trace):\n stack = []\n\n # add start node\n stack.extend(g[start_node])\n visited_nodes.add(start_node)\n trace.append(start_node)\n\n while stack:\n next_node = stack.pop() # different with BFS\n if next_node not in visited_nodes:\n visited_nodes.add(next_node)\n for v in g[next_node]:\n if v not in visited_nodes:\n stack.append(v)\n trace.append(next_node)\n\n return trace\n\ndef dfs_loop(g):\n # loop through the whole graph to deal with isolated nodes\n # use set is more efficient than list to remember nodes visited\n visited_nodes, trace = set(), []\n\n for k in g.keys():\n if k not in visited_nodes:\n dfs(g, k, visited_nodes, trace)\n\n # logger.debug(\"trace: {0}\".format(trace))\n return trace\n\n\nif __name__ == \"__main__\":\n setup_debug_logger()\n if len(sys.argv) > 1:\n file_name = sys.argv[1]\n graph = read_lines(file_name)\n trace = dfs_loop(graph)\n print(\"DFS visited nodes number is {0}\".format(len(trace)))\n else:\n print(\"Please provide file name!\")\n","sub_path":"part1/4_scc/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235342703","text":"import datetime\nimport csv\nimport os\nfrom io import TextIOWrapper\nfrom django.core.serializers import serialize\nfrom ..models import Purchase, Business, Category\nfrom .func import to_dict\n\n\ndef jsonifyPurchases(purchases):\n '''\n returns dictionary width serializable values for the Purchase model.\n '''\n if purchases.model != Purchase:\n return None\n data = []\n for purchase in purchases:\n p = dict()\n p['model'] = 'finances.purchase'\n fields = dict()\n if type(purchase) == dict:\n p['pk'] = purchase['id']\n for k,v in purchase.items():\n if k == 'amount':\n fields[k] = float(v)\n elif k == 'date':\n fields[k] = v.isoformat()\n else:\n fields[k] = v\n elif type(purchase) == Purchase:\n p['pk'] = purchase.id\n for k,v in to_dict(purchase).items():\n if k == 'amount':\n fields[k] = float(v)\n elif k == 'date':\n fields[k] = v.isoformat()\n else:\n fields[k] = v\n p['fields'] = fields\n data.append(p)\n return {'purchases': data}\n\n\ndef prepExport(queryset):\n if queryset.model == Purchase:\n headers = [\n 'id', \n 'date', \n 'amount', \n 'business__id', \n 'business__name', \n 'category__id', \n 'category__name', \n 'desc',\n ]\n elif queryset.model == Category:\n headers = ['id', 'name', 'priority']\n elif queryset.model == Business:\n headers = ['id', 'name', 'spent', 'default_cat__id', 'default_cat__name']\n else:\n return \"unrecognized mode\"\n\n directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/exports/'\n filename = \"export_\" + str(datetime.datetime.today().timestamp()) + \".csv\"\n export = open(directory + filename, 'w', newline='', encoding='utf-8')\n export_csv = csv.writer(export)\n export_csv.writerow(headers)\n for item in queryset:\n row = []\n d = to_dict(item)\n for key in headers:\n key = key.split('__')\n if (len(key) > 1):\n row.append(d[key[0]][key[1]])\n else:\n row.append(d[key[0]])\n\n export_csv.writerow(row)\n\n export.close()\n return directory + filename \n\n\ndef importFile(filename, user):\n input_file = TextIOWrapper(filename.file, encoding=\"utf-8\")\n csv_reader = csv.reader(input_file)\n\n header_line = input_file.readline().split(',')\n if len(header_line) == 8:\n model = Purchase\n elif len(header_line) == 3:\n model = Category\n elif len(header_line) == 5:\n model = Business\n else:\n return {'result': 'error', 'message': 'incorrect import file format. incompatible header line.'}\n\n for row in csv_reader:\n obj = {'user': user}\n for i in range(0, len(header_line)):\n if '__name' in header_line[i]:\n continue\n if header_line[i] == 'category__id':\n # need to check if id exists first. maybe try/except otherwise Unique constraint error.\n try:\n c = Category.objects.get(pk=row[i])\n if c.name == row[i+1] and c.user == user:\n obj['category'] = c\n else:\n c, created = Category.objects.get_or_create(name=row[i+1], user=user)\n obj['category'] = c\n except Category.DoesNotExist:\n if Category.objects.filter(name=row[i+1], user=user).exists():\n c = Category.objects.get(name=row[i+1], user=user)\n else:\n c = Category.objects.create(pk=row[i], name=row[i+1], user=user, priority=1)\n obj['category'] = c\n continue\n elif header_line[i] == 'business__id': \n try:\n b = Business.objects.get(pk=row[i])\n if b.name == row[i+1] and b.user == user:\n obj['business'] = b\n else:\n b, created = Business.objects.get_or_create(name=row[i+1], user=user)\n obj['business'] = b\n except Business.DoesNotExist:\n if Business.objects.filter(name=row[i+1], user=user).exists():\n b = Business.objects.get(name=row[i+1], user=user)\n else:\n b = Business.objects.create(pk=row[i], name=row[i+1], user=user)\n obj['business'] = b\n continue\n obj[header_line[i].rstrip('\\n')] = row[i]\n #if model.objects.filter(pk=obj['id']).exists():\n if 'id' in obj.keys():\n obj.pop('id')\n model.objects.create(**obj)\n else:\t\n model.objects.create(**obj)\n\n return {'result': 'success1', 'message': 'items imported successfully'}\n\n\n","sub_path":"finances/bin/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"75272482","text":"'''-------------------------------------------------------------------------------------------------\nMODULE\n sars_reporting_functions\n\nDESCRIPTION\n Date : 2020-02-28\n Purpose : General functions for IT3 reports and FATCA/CRS\n Developer : Qaqamba Ntshobane\n Requester : Nhlanhleni Mchunu\n Department: PCG\n\n=======================================================================================================\nHISTORY:\n Date: Change No: Developer: Description: \n-------------------------------------------------------------------------------------------------------\n 2020-02-28 PCGDEV-325 Qaqamba Ntshobane Initial Design\n 2020-09-28 PCGDEV-540 Qaqamba Ntshobane Added BoUV function\n 2021-03-18 PCGDEV-686 Qaqamba Ntshobane Extended functions for FATCA/CRS report\n\nENDDESCRIPTION\n----------------------------------------------------------------------------------------------------'''\n\nimport acm\nimport math\nimport currentNominal\n\nfrom at_time import acm_date\nfrom collections import defaultdict\nfrom FUploaderFunctions import add_row, send_report\nfrom FUploaderParams import TODAY, ENV, EMAIL_SENDER, CALENDAR\n\nPROJECTED_AMOUNT = 'Cash Analysis Projected'\nMONEYFLOW_CS = acm.Calculations().CreateCalculationSpace('Standard', 'FMoneyFlowSheet')\nDEALSHEET_CS = acm.FCalculationMethods().CreateCalculationSpace(acm.GetDefaultContext(), 'FDealSheet')\n\n\ndef usd_rate(curr_to, date, *rest):\n\n currency = acm.FInstrument[curr_to.insid]\n return get_rate(currency, date)\n\n\ndef get_rate(currency, date):\n\n if acm.Time.DateToYMD(date)[1] in [3, 9]:\n date = acm.Time.DateAddDelta(date, 0, 0, -1)\n\n usd = acm.FInstrument['USD']\n usd_zar = usd.UsedPrice(date, 'ZAR', 'SPOT')\n curr_usd = usd.UsedPrice(date, currency, 'SPOT')\n\n if not curr_usd == 0.0:\n return float(usd_zar / curr_usd)\n\n set_global_variables(date)\n return float(1.0)\n\n\ndef ins_expires_in_period(ael_ins, end_period_date, *rest):\n\n ins = acm.FInstrument[ael_ins.insaddr]\n return ins_expires_in_period_(ins, end_period_date)\n\n\ndef ins_expires_in_period_(ins, end_period_date):\n\n end_period_date = fa_date(end_period_date)\n end_month = acm.Time.DateToYMD(end_period_date)[1]\n start_period_date = (acm.Time.DateAddDelta(end_period_date, 0, -12, 0) if end_month == 3 else \n acm.Time.DateAddDelta(end_period_date, 0, -6, 0))\n\n if ins.ExpiryDate():\n if ins.ExpiryDate().split(' ')[0] >= start_period_date:\n return 1\n return 0\n return 1\n\n\ndef get_nominal(ael_trade, date, *rest):\n\n if acm.Time.DateToYMD(date)[1] in [3, 9]:\n date = acm.Time.DateAddDelta(date, 0, 0, -1)\n\n trade = acm.FTrade[ael_trade.trdnbr]\n instrument = trade.Instrument()\n\n if instrument.InsType() == 'Deposit':\n leg = instrument.Legs()\n\n if not leg:\n return 0\n nom = currentNominal.currentNominal(leg[0], date, [trade])\n else:\n nom = trade.Nominal()\n\n if is_foreign_currency(trade):\n return nom * get_rate(trade.Currency().Name(), date)\n return nom\n\n\ndef get_bouv(ael_trade, date, *rest):\n\n trade_object = acm.FTrade[ael_trade.trdnbr]\n instrument = trade_object.Instrument()\n\n cash_end = get_cash_end(trade_object, date)\n val_end = get_val_end(trade_object, date)\n\n bouv = cash_end + val_end\n if math.isnan(bouv):\n return 0\n return bouv\n\n\ndef get_premium(ael_trade, date, *rest):\n\n trade = acm.FTrade[ael_trade.trdnbr]\n instrument = trade.Instrument()\n\n if instrument.InsType() in ['Deposit', 'CD']:\n prem = trade.FaceValue()\n else:\n prem = trade.Premium()\n\n if is_foreign_currency(trade):\n return prem * get_rate(trade.Currency().Name(), date)\n return prem\n\n\ndef sold(ael_trade, *rest):\n\n return not acm.FTrade[ael_trade.trdnbr].Bought()\n\n\ndef is_foreign_currency(ael_trade):\n\n return ael_trade.Currency().Name() != 'ZAR'\n\n\ndef get_closing_balance(trade, start_date, end_date):\n\n closing_balance = 0\n start_date = fa_date(start_date)\n end_date = fa_date(end_date)\n account_close_date = trade.Instrument().ExpiryDate().split(' ')[0]\n\n set_global_variables(start_date, end_date)\n cash_end = DEALSHEET_CS.CalculateValue(trade, 'Portfolio Cash End').Number()\n cash_balance = DEALSHEET_CS.CalculateValue(trade, 'Deposit balance').Number()\n accrued_interest = DEALSHEET_CS.CalculateValue(trade, 'Portfolio Accrued Interest').Number()\n\n if trade.Instrument().IsCallAccount():\n closing_balance = cash_balance + accrued_interest\n else:\n closing_balance = cash_end - accrued_interest\n\n remove_global_simulations()\n return amount(trade, end_date, closing_balance)\n\n\ndef fa_date(date):\n\n return CALENDAR.AdjustBankingDays(date, 0)\n\n\ndef get_period_dates(end_date):\n\n end_month = acm.Time.DateToYMD(end_date)[1]\n validate_date(end_month)\n\n start_date = (acm.Time.DateAddDelta(end_date, 0, -12, 0) if end_month in [3, 1] else \n acm.Time.DateAddDelta(end_date, 0, -6, 0))\n\n return start_date, acm.Time.DateAddDelta(end_date, 0, 0, -1)\n\n\ndef validate_date(end_month):\n\n if end_month not in [3, 9, 1]:\n raise Exception('Incorrect end of period month. Period end month should '\n 'be 3 or 9 (Mar or Sep) for IT3B or FATCA/CRS, and 1 (Jan) for VAT')\n\n\ndef get_instrument_start_date(trade):\n\n ins_start_date = trade.Instrument().StartDate()\n\n if not ins_start_date:\n return trade.ValueDay()\n return ins_start_date\n\n\ndef get_instrument_end_date(trade, end_date):\n\n ins_exp_date = trade.Instrument().ExpiryDate()\n end_date = fa_date(end_date)\n\n ins_end_date = (ins_exp_date if (ins_exp_date and ins_exp_date < end_date) else end_date)\n return str(ins_end_date)[:10]\n\n\ndef get_account_number(money_flow):\n\n if not money_flow:\n return 'N/A'\n\n money_flow = money_flow[0]\n \n if money_flow.CounterpartyAccount():\n return money_flow.CounterpartyAccount().Account()\n\n if money_flow.AcquirerAccount():\n return money_flow.AcquirerAccount().Account()\n return 'N/A'\n\n\ndef send_success_notification(email_addresses, report_type, report_name=None):\n\n report_name = report_name if report_name else 'Report'\n\n if email_addresses:\n row_info = ['SUCCESS', '{0} produced successfully. Check TradingManager folder'.format(report_name)]\n add_row('SUCCESS', row_info)\n send_report(email_addresses, EMAIL_SENDER,\n '{type} Report Status {today} - {env}'.format(type=report_type, today=TODAY, env=ENV),\n '{type} Report Status'.format(type=report_type))\n\n\ndef amount(trade, date, value):\n\n if isinstance(value, float):\n if is_foreign_currency(trade):\n return value * get_rate(trade.Currency().Name(), date)\n return value\n\n if math.isnan(value) or not hasattr(value, \"Number\"):\n return 0\n\n if is_foreign_currency(trade):\n return value.Number() * get_rate(trade.Currency().Name(), date)\n return value.Number()\n\n\ndef get_val_end(trade, date, set_global_vars=True):\n\n if set_global_vars:\n set_global_variables(fa_date(date))\n val_end = DEALSHEET_CS.CalculateValue(trade, 'Total Val End')\n\n return amount(trade, date, val_end)\n\n\ndef get_cash_end(trade, date, set_global_vars=True):\n\n if set_global_vars:\n set_global_variables(fa_date(date))\n cash_end = DEALSHEET_CS.CalculateValue(trade, 'Portfolio Cash End')\n\n return amount(trade, date, cash_end)\n\n\ndef get_accrued_interest(trade, start_date, end_date):\n\n set_global_variables(fa_date(end_date), fa_date(start_date))\n accrued_interest = DEALSHEET_CS.CalculateValue(trade, 'Portfolio Accrued Interest')\n\n return amount(trade, end_date, accrued_interest)\n\n\ndef get_settled_interest(trade, start_date, end_date):\n\n set_global_variables(fa_date(end_date), fa_date(start_date))\n settled_interest = DEALSHEET_CS.CalculateValue(trade, 'Portfolio Settled Interest')\n\n return amount(trade, end_date, settled_interest)\n\n\ndef get_projected_amount(moneyflow, end_date):\n\n projected_amount = MONEYFLOW_CS.CalculateValue(moneyflow, PROJECTED_AMOUNT)\n\n return amount(moneyflow, end_date, projected_amount)\n\n\ndef get_moneyflows(trade, start_date, end_date):\n\n moneyflows_dict = {}\n moneyflows = trade.MoneyFlows()\n payments = trade.Payments()\n\n for moneyflow in moneyflows:\n if start_date <= moneyflow.PayDay() < end_date and moneyflow.Type() != 'None':\n moneyflows_dict.setdefault(moneyflow.Type(), 0.0)\n\n amount = get_projected_amount(moneyflow, end_date)\n moneyflows_dict[moneyflow.Type()] += amount if amount < 0 else 0\n \n for payment in payments:\n if start_date <= payment.PayDay() < end_date and payment.Type() == 'Aggregated Forward Premium':\n moneyflows_dict.setdefault(payment.Type(), 0.0)\n \n amount = payment.Amount()\n moneyflows_dict[payment.Type()] += amount if amount < 0 else 0\n return moneyflows_dict\n\n\ndef set_global_variables(end_date, start_date='Inception'):\n\n if start_date == 'Inception':\n start_date = acm_date(start_date)\n\n global_simulations = [('Portfolio Profit Loss Start Date', 'Custom Date'),\n ('Portfolio Profit Loss Start Date Custom', start_date),\n ('Portfolio Profit Loss End Date', 'Custom Date'),\n ('Portfolio Profit Loss End Date Custom', end_date),\n ('Valuation Date', end_date),\n ('Valuation Parameter Date', end_date),\n ]\n\n for column, value in global_simulations:\n DEALSHEET_CS.SimulateGlobalValue(column, value)\n\n\ndef remove_global_simulations():\n\n DEALSHEET_CS.RemoveGlobalSimulation('Portfolio Profit Loss Start Date Custom')\n DEALSHEET_CS.RemoveGlobalSimulation('Portfolio Profit Loss Start Date')\n DEALSHEET_CS.RemoveGlobalSimulation('Portfolio Profit Loss End Date Custom')\n DEALSHEET_CS.RemoveGlobalSimulation('Portfolio Profit Loss End Date')\n\n DEALSHEET_CS.RemoveGlobalSimulation('Valuation Date')\n DEALSHEET_CS.RemoveGlobalSimulation('Valuation Parameter Date')\n\n","sub_path":"Python modules/sars_reporting_functions.py","file_name":"sars_reporting_functions.py","file_ext":"py","file_size_in_byte":10360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619967748","text":"import time\nimport logging\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\n\nCAT1_XPATH ='/html/body/div/div/div/div/div[1]/div[1]/div/div/form/div[1]/div/div/div/div/div/div[3]/div/div/div/div[1]/div/div[1]/div[1]/input[1]'\nCAT2_XPATH ='/html/body/div/div/div[1]/div/div[1]/div[1]/div/div/form/div[1]/div/div/div/div/div/div[3]/div/div/div/div[2]/div/div[1]/div[1]/input[1]'\nCAT3_XPATH =\"/html/body/div/div/div[1]/div/div[1]/div[1]/div/div/form/div[1]/div/div/div/div/div/div[3]/div/div/div/div[3]/div/div[1]/div[1]/input[1]\" \nCALENDAR_NEXT =\"/html/body/div/div/div/div/div[1]/div/div/div/div[1]/div/button[2]\"\n\n# カレンダーのxpath作成\ndef set_calendar_xpath(row, col):\n CALENDAR_SELECT =\"/html/body/div/div/div/div/div[1]/div/div/div/div[2]/div/div/div/div/table/tbody/tr[\" + str(row) + \"]/td[\" + str(col) + \"]\"\n logging.info('CALENDAR_SELECT %s', CALENDAR_SELECT)\n return CALENDAR_SELECT\n\nCALENDAR_DECIDE =\"/html/body/div/div/div/div/footer/div/div/div/button[2]\"\nCALENDAR_CONFIRM=\"/html/body/div/div/div[3]/div/div/div/div[3]/div/button\"\nCALENDAR_RESERVE=\"/html/body/div/div/div[1]/div/footer/div/div/div/button[2]\"\nDIALOG_MESSAGE =\"/html/body/div/div/div[4]/div/div/div/div/div\"\n\nclass Liff:\n\n def __init__(self, url, login_name, login_pw, headless):\n self.url = url\n self.login_name = login_name\n self.login_pw = login_pw\n self.calendar_row = 1 #1コマ目\n self.calendar_col = 3 #火曜日\n self.options = Options()\n if(headless):self.options.add_argument('--headless')\n self.driver = webdriver.Chrome(options=self.options)\n\n # ログイン\n def login(self):\n logging.info('ログイン開始')\n self.driver.get(self.url)\n time.sleep(2)\n id_box = self.driver.find_element_by_name('tid')\n pw_box = self.driver.find_element_by_name('tpasswd')\n id_box.send_keys(self.login_name)\n pw_box.send_keys(self.login_pw)\n pw_box.submit()\n logging.info('ログイン成功')\n\n def input_data(self):\n logging.info('帳票入力開始')\n logging.info('分類1入力')\n time.sleep(4)\n cat1_box = self.driver.find_element_by_xpath(CAT1_XPATH)\n cat1_box.send_keys('PNL1')\n cat1_box.send_keys(Keys.ENTER)\n time.sleep(0.5)\n\n logging.info('分類2入力')\n cat2_box = self.driver.find_element_by_xpath(CAT2_XPATH)\n cat2_box.send_keys('PNL2')\n cat2_box.send_keys(Keys.ENTER)\n time.sleep(0.5)\n\n logging.info('分類3を入力')\n cat3_box = self.driver.find_element_by_xpath(CAT3_XPATH)\n cat3_box.send_keys('PNL3')\n cat3_box.send_keys(Keys.ENTER)\n time.sleep(0.5)\n\n logging.info('「カレンダー設定に進む」をクリック')\n cat3_box.submit()\n time.sleep(4)\n\n def select_calendar(self):\n\n logging.info('カレンダーで次へを選択')\n btn_calendar_next = self.driver.find_element_by_xpath(CALENDAR_NEXT)\n btn_calendar_next.click()\n time.sleep(1)\n\n logging.info('カレンダーで日付を選択')\n btn_calendar_select = self.driver.find_element_by_xpath(set_calendar_xpath(self.calendar_row, self.calendar_col))\n logging.info('要素が表示されるように画面移動してクリック')\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", btn_calendar_select)\n btn_calendar_select.click()\n time.sleep(0.5)\n\n logging.info('カレンダーで日付を確定')\n btn_calendar_decide = self.driver.find_element_by_xpath(CALENDAR_DECIDE)\n btn_calendar_decide.click()\n time.sleep(0.5)\n\n logging.info('カレンダーで日付を確認')\n btn_calendar_confirm = self.driver.find_element_by_xpath(CALENDAR_CONFIRM)\n btn_calendar_confirm.click()\n time.sleep(0.5)\n\n logging.info('カレンダーで日付を予約確定')\n btn_calendar_reserve = self.driver.find_element_by_xpath(CALENDAR_RESERVE)\n btn_calendar_reserve.click()\n time.sleep(2)\n logging.info('カレンダー選択終了')\n\n def check_result(self):\n time.sleep(1)\n try:\n if(self.driver.find_element_by_xpath('//*[contains(text(), \"予約が完了しました\")]')):\n return 'row: ' + str(self.calendar_row) + ' col: ' +str(self.calendar_col) + 'コマで予約完了しました'\n except:\n pass\n\n try:\n if(self.driver.find_element_by_xpath('//*[contains(text(), \"予約済みです\")]')):\n logging.info('row: %s col: %s は予約状態のため1コマ先を予約します', self.calendar_row, self.calendar_col)\n self.driver.back()\n self.calendar_row += 1\n time.sleep(2)\n return '予約済みです'\n except:\n logging.error('帳票入力失敗')\n return '帳票入力失敗'\n\n\n def quit(self, logging_level):\n logging.info('All done')\n if(logging_level == 'DEBUG'): input('Input Enter Key...')\n self.driver.quit()","sub_path":"set/manipulate.py","file_name":"manipulate.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77678992","text":"from flask import Flask\nfrom flask import request\nfrom PIL import Image\nimport imagehash\nimport os\nfrom flask import render_template\n\napp = Flask(__name__)\nUPLOAD_FOLDER = '/home/sk/Documents/innovacer'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef upload_t():\n file = request.files['imagefile']\n if file:\n file_name = \"c\"\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n return 'OK'\n\n\n'''\ndef upload_file():\n img_f = Flask.request.files.get('imagefile')\n return hash_it(img_f)\n'''\n\n\n\n\n\n@app.route('/')\ndef index():\n return render_template('display.html')\n\n\n@app.route('/hello', methods = ['GET','POST'])\ndef hello():\n if request.method == 'POST':\n upload_t()\n\n hash = imagehash.average_hash(Image.open(\"c\"))\n return str(hash)\n\n\n\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300066535","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.base import clone\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nfrom fairness_aware_classification.classifiers import AdaFairClassifier, \\\n AdaptiveWeightsClassifier, SMOTEBoostClassifier\nfrom fairness_aware_classification.datasets import *\nfrom fairness_aware_classification.utils import sensitive_mask_from_features\n\n\nif __name__ == \"__main__\":\n \n # Load the data\n data = COMPASDataset()\n dataset_name = type(data).__name__\n \n # Split the data\n X_train, X_test, y_train, y_test, s_train, s_test = train_test_split(\n data.X,\n data.y,\n data.sensitive,\n test_size=0.5,\n )\n \n # Select base classfiers for the meta-classifiers\n base_clf_af = lambda: DecisionTreeClassifier(max_depth=2)\n base_clf_aw = LogisticRegression(solver=\"liblinear\")\n base_clf_sb = lambda: LogisticRegression()\n \n # Single base classifier for the sake of comparison\n base_clf = clone(base_clf_aw)\n base_clf_name = type(base_clf).__name__\n \n # The criterion function `objective` should be customized\n # depending on the data. It should be maximized.\n af = AdaFairClassifier(50, base_clf_af)\n aw = AdaptiveWeightsClassifier(base_clf, data.objective)\n sb = SMOTEBoostClassifier(base_clf_sb, 4, 20, 500)\n \n # Get predictions of the base classifier\n base_clf.fit(X_train, y_train)\n y_pred_base = base_clf.predict(X_test)\n acc_base = accuracy_score(y_test, y_pred_base)\n obj_base = data.objective(y_test, y_pred_base, s_test)\n \n # Get prediction for the AdaFair classifier\n af.fit(X_train, y_train, s_train)\n y_pred_af = af.predict(X_test)\n acc_af = accuracy_score(y_test, y_pred_af)\n obj_af = data.objective(y_test, y_pred_af, s_test)\n \n # Get prediction for the weighted classifier\n aw.fit(X_train, y_train, s_train)\n y_pred_aw = aw.predict(X_test)\n acc_aw = accuracy_score(y_test, y_pred_aw)\n obj_aw = data.objective(y_test, y_pred_aw, s_test)\n \n # Get prediction for the SMOTEBoost classifier\n sb.fit(X_train, y_train)\n y_pred_sb = sb.predict(X_test)\n acc_sb = accuracy_score(y_test, y_pred_sb)\n obj_sb = data.objective(y_test, y_pred_sb, s_test)\n \n print(y_pred_base)\n print('*'*10)\n print(y_pred_af)\n print('*'*10)\n print(y_pred_aw)\n print('*'*10)\n print(y_pred_sb)\n print('*'*10)\n \n # Print results\n print(\"{}:\".format(base_clf_name))\n print(\"* Accuracy: {:.4f}\".format(acc_base))\n print(\"* Objective: {:.4f}\\n\".format(obj_base))\n print(\"AdaFairClassifier\")\n print(\"* Accuracy: {:.4f}\".format(acc_af))\n print(\"* Objective: {:.4f}\\n\".format(obj_af))\n print(\"AdaptiveWeightsClassifier\")\n print(\"* Accuracy: {:.4f}\".format(acc_aw))\n print(\"* Objective: {:.4f}\\n\".format(obj_aw))\n print(\"SMOTEBoostClassifier\")\n print(\"* Accuracy: {:.4f}\".format(acc_sb))\n print(\"* Objective: {:.4f}\\n\".format(obj_sb))\n \n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"279502434","text":"def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\ntmp = \"tmp\"\nsu1 = 0\nnumbers = []\nfor i in range(3, 1000001):\n su1 = 0\n print(\"Checking \" + str(i))\n tmp = str(i)\n for o in range(0,len(tmp)):\n su1 = su1 + factorial(int(tmp[o]))\n if su1 == i:\n numbers.append(i)\n\nprint(numbers)\n\nsu2 = 0\n\nfor x in range(0,len(numbers)):\n su2 = su2 + int(numbers[x])\n\nprint(su2)","sub_path":"working/034.py","file_name":"034.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532428452","text":"\"\"\"\n Luigi tasks to take care of cross-reference the Specimen data with the tracking system reports.\n It also applies some sanitation functions to the data after dong the cross-reference operations.\n\n The general cross-reference process is:\n\n - Join the specimen information with the colony information\n - Generate the specimen allelic composition using the colony information\n - Override the data source and the project info for MGP and MGP Legacy colonies\n - Override 3i specimen project using the colony phenotyping consortium\n - Add production centre information when is missing using the colony information\n - Add life stage\n\n\"\"\"\nfrom typing import Any\n\nimport luigi\nfrom luigi.contrib.spark import PySparkTask\nfrom pyspark import SparkContext\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.functions import col, concat, lit, udf, when\nfrom pyspark.sql.types import StringType\n\nfrom impc_etl.config.constants import Constants\nfrom impc_etl.jobs.clean import ColonyCleaner\nfrom impc_etl.jobs.clean.specimen_cleaner import (\n EmbryoSpecimenCleaner,\n MouseSpecimenCleaner,\n)\nfrom impc_etl.jobs.transform.cross_ref_helper import (\n generate_allelic_composition,\n override_europhenome_datasource,\n)\nfrom impc_etl.workflow.config import ImpcConfig\n\n\nclass SpecimenCrossRef(PySparkTask):\n \"\"\"\n PysPark task to cross-reference Specimen data with tracking systems reports.\n This task depends on:\n\n - `impc_etl.jobs.clean.specimen_cleaner.SpecimenCleaner`\n (`impc_etl.jobs.clean.specimen_cleaner.MouseSpecimenCleaner`\n or `impc_etl.jobs.clean.specimen_cleaner.EmbryoSpecimenCleaner`)\n - `impc_etl.jobs.clean.colony_cleaner.ColonyCleaner`\n \"\"\"\n\n #: Name of the Spark task\n name = \"IMPC_Specimen_Cross_Reference\"\n\n #: Specimen type can be 'mouse' or 'embryo'\n specimen_type: luigi.Parameter = luigi.Parameter()\n\n #: Path of the output directory where the new parquet file will be generated.\n output_path = luigi.Parameter()\n\n def requires(self):\n \"\"\"\n Defines the luigi task dependencies\n \"\"\"\n specimen_cleaning_dep = (\n MouseSpecimenCleaner()\n if self.specimen_type == \"mouse\"\n else EmbryoSpecimenCleaner()\n )\n\n return [specimen_cleaning_dep, ColonyCleaner()]\n\n def output(self):\n \"\"\"\n Returns the full parquet path as an output for the Luigi Task\n (e.g. impc/dr15.2/parquet/mouse_specimen_cross_ref_parquet)\n \"\"\"\n return ImpcConfig().get_target(\n f\"{self.output_path}{self.specimen_type}_specimen_cross_ref_parquet\"\n )\n\n def app_options(self):\n \"\"\"\n Generates the options pass to the PySpark job\n \"\"\"\n return [\n self.input()[0].path,\n self.input()[1].path,\n self.specimen_type,\n self.output().path,\n ]\n\n def main(self, sc: SparkContext, *args: Any):\n \"\"\"\n Load the specified Specimen parquet and Colonies parquet, does some cross-reference between them\n and applies some data sanitation functions after.\n \"\"\"\n specimen_parquet_path = args[0]\n colonies_parquet_path = args[1]\n specimen_type = args[2]\n output_path = args[3]\n spark = SparkSession(sc)\n specimen_df = spark.read.parquet(specimen_parquet_path)\n colonies_df = spark.read.parquet(colonies_parquet_path)\n specimen_cross_ref_df = self.cross_reference_specimens(\n specimen_df, colonies_df, specimen_type\n )\n specimen_cross_ref_df.write.mode(\"overwrite\").parquet(output_path)\n\n def cross_reference_specimens(\n self, specimen_df: DataFrame, colonies_df: DataFrame, entity_type: str\n ) -> DataFrame:\n \"\"\"\n Cross-reference Specimen data with colony tracking report information.\n \"\"\"\n specimen_df = specimen_df.alias(\"specimen\")\n colonies_df = colonies_df.alias(\"colony\")\n\n # join the specimen information with the colony information\n specimen_df = specimen_df.join(\n colonies_df,\n (specimen_df[\"_colonyID\"] == colonies_df[\"colony_name\"]),\n \"left_outer\",\n )\n\n # generate the specimen allelic composition using the colony information\n # override 3i specimen project using the colony phenotyping consortium\n # override the data source and the project info for MGP and MGP Legacy colonies\n specimen_df = (\n specimen_df.transform(self.generate_specimen_allelic_composition)\n .transform(override_europhenome_datasource)\n .transform(self.override_3i_specimen_project)\n )\n specimen_df = specimen_df.withColumn(\n \"_productionCentre\",\n when(\n col(\"_productionCentre\").isNull(),\n when(\n col(\"_phenotypingCentre\").isNotNull(), col(\"_phenotypingCentre\")\n ).otherwise(col(\"colony.production_centre\")),\n ).otherwise(col(\"_productionCentre\")),\n )\n specimen_df = specimen_df.select(\n \"specimen.*\",\n \"_productionCentre\",\n \"allelicComposition\",\n \"colony.phenotyping_consortium\",\n )\n\n if entity_type == \"embryo\":\n specimen_df = specimen_df.transform(self.add_embryo_life_stage_acc)\n if entity_type == \"mouse\":\n specimen_df = specimen_df.transform(self.add_mouse_life_stage_acc)\n return specimen_df\n\n def generate_specimen_allelic_composition(\n self, specimen_df: DataFrame\n ) -> DataFrame:\n \"\"\"\n Takes in a specimen dataframe and adds the specimen allelic composition.\n \"\"\"\n generate_allelic_composition_udf = udf(\n generate_allelic_composition, StringType()\n )\n specimen_df = specimen_df.withColumn(\n \"allelicComposition\",\n generate_allelic_composition_udf(\n \"specimen._zygosity\",\n \"colony.allele_symbol\",\n \"colony.marker_symbol\",\n \"specimen._isBaseline\",\n \"specimen._colonyID\",\n ),\n )\n return specimen_df\n\n def override_3i_specimen_project(self, specimen_df: DataFrame):\n \"\"\"\n Takes in a Specimen dataframe and replaces the _'project'_\n value for the 3i specimens with the value for the _'phenotyping_consortium'_ coming\n from the colonies tracking report.\n \"\"\"\n specimen_df.withColumn(\n \"_project\",\n when(\n specimen_df[\"_dataSource\"] == \"3i\", col(\"phenotyping_consortium\")\n ).otherwise(\"_project\"),\n )\n return specimen_df\n\n def add_mouse_life_stage_acc(self, specimen_df: DataFrame):\n \"\"\"\n Adds life stage to mouse specimen dataframe.\n \"\"\"\n specimen_df = specimen_df.withColumn(\n \"developmental_stage_acc\", lit(\"EFO:0002948\")\n )\n specimen_df = specimen_df.withColumn(\n \"developmental_stage_name\", lit(\"postnatal\")\n )\n return specimen_df\n\n def add_embryo_life_stage_acc(self, specimen_df: DataFrame):\n \"\"\"\n Adds life stage to embryo specimen dataframe.\n \"\"\"\n efo_acc_udf = udf(self.resolve_embryo_life_stage, StringType())\n specimen_df = specimen_df.withColumn(\n \"developmental_stage_acc\", efo_acc_udf(\"_stage\")\n )\n specimen_df = specimen_df.withColumn(\n \"developmental_stage_name\", concat(lit(\"embryonic day \"), col(\"_stage\"))\n )\n return specimen_df\n\n def resolve_embryo_life_stage(self, embryo_stage):\n \"\"\"\n Resolves the life stage of an Embryo specimen using `impc_etl.config.constants.Constants.EFO_EMBRYONIC_STAGES`\n \"\"\"\n embryo_stage = str(embryo_stage).replace(\"E\", \"\")\n return (\n Constants.EFO_EMBRYONIC_STAGES[embryo_stage]\n if embryo_stage in Constants.EFO_EMBRYONIC_STAGES\n else \"EFO:\" + embryo_stage + \"NOT_FOUND\"\n )\n\n\nclass MouseSpecimenCrossRef(SpecimenCrossRef):\n #: Name of the Spark task\n name = \"IMPC_Mouse_Specimen_Cross_Reference\"\n specimen_type = \"mouse\"\n\n\nclass EmbryoSpecimenCrossRef(SpecimenCrossRef):\n #: Name of the Spark task\n name = \"IMPC_Embryo_Specimen_Cross_Reference\"\n specimen_type = \"embryo\"\n","sub_path":"impc_etl/jobs/transform/specimen_cross_ref.py","file_name":"specimen_cross_ref.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"2738872","text":"class Node(object):\n \"\"\"双向链表节点\"\"\"\n def __init__(self,item):\n self.elem = item\n self.next = None\n self.prev = None\n\nclass DoubleLinkList(object):\n \"\"\"双链表\"\"\" \n\n def __init__(self,node=None):\n self.__head = node\n\n def is_empty(self):\n \"\"\"链表是否为空\"\"\"\n return self.__head is None \n\n def length(self):\n \"\"\"链表长度\"\"\"\n # cur 游标,用来移动遍历节点\n cur = self.__head\n # count 记录数量\n count = 0\n while cur != None:\n count += 1\n cur = cur.next\n return count\n\n def travel(self):\n \"\"\"遍历整个链表\"\"\"\n cur = self.__head\n while cur != None:\n print(cur.elem,end=\" \")\n cur = cur.next\n\n def add(self,item):\n \"\"\"链表头部添加元素 头插法\"\"\"\n node = Node(item) \n node.next = self.__head\n self.__head = node\n node.next.prev = node\n\n def append(self,item):\n \"\"\"链表尾部添加元素 尾插法\"\"\"\n node = Node(item)\n if self.is_empty():\n self.__head = node\n else:\n cur = self.__head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur\n\n def insert(self,pos,item):\n \"\"\"指定位置添加元素\n :param pos 从0开始\n \"\"\"\n if pos <= 0:\n self.add(item)\n elif pos > (self.length()-1):\n self.append(item)\n else:\n cur = self.__head\n node = Node(item)\n count = 0\n while count < pos:\n count += 1\n cur = cur.next\n # 循环退出后,cur指向pos位置\n node.next = cur\n node.prev = cur.prev\n cur.prev.next = node\n cur.prev = node\n\n def remove(self,item):\n \"\"\"删除节点\"\"\"\n cur = self.__head\n while cur != None:\n if cur.elem == item:\n # 先判断此节点是否为头节点\n if cur == self.__head:\n self.__head = cur.next\n if cur.next:\n # 判断链表是否只有一个节点\n cur.next.prev = None\n else:\n cur.prev.next = cur.next\n if cur.next:\n cur.next.prev = cur.prev\n break\n else:\n cur = cur.next\n\n def search(self,item):\n \"\"\"查找节点是否存在\"\"\"\n cur = self.__head\n while cur != None:\n if cur.elem == item:\n return True\n else:\n cur = cur.next\n return False\n \n\nif __name__ == '__main__':\n dll = DoubleLinkList()\n\n # print(dll.is_empty())\n # print(dll.length())\n\n dll.append(1)\n # print(dll.is_empty())\n # print(dll.length())\n dll.append(2)\n dll.add(8)\n dll.append(3)\n dll.append(4)\n dll.append(5)\n dll.append(6)\n dll.insert(-1,9)\n dll.insert(3,10)\n dll.insert(10,200)\n dll.remove(100)\n dll.remove(9)\n dll.remove(200)\n dll.travel()\n\n\n","sub_path":"算法/suan_fa/线性表/double_link_list.py","file_name":"double_link_list.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"237773193","text":"#!/usr/bin/python3\n\n\"\"\"\nExits with non zero return code and error message (stderr) if input is False.\n\"\"\"\n\nimport sys\nimport json\n\n\ninput_string = sys.stdin.read()\n# input_string = \"Hello World!\"\nstatus = 0\n\nif not json.loads(input_string).get(\"data\"):\n status = \"Conditions to continue flow have not been met - the flow will now exit\"\n\nsys.exit(status)\n","sub_path":"services/quit if false/quit_if_false.py","file_name":"quit_if_false.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173819413","text":"\"\"\"\nLightbox\n------------------------\n\nThis plugin:\n\n- Enables automatic Lightbox for images\nTODO: Need to add a test.py for this plugin.\n\n\"\"\"\nimport os\nimport re\n\nfrom pelican import signals\nfrom pelican.contents import Content\nfrom bs4 import BeautifulSoup\n\n__LIGHTBOX2_VERSION__ = '2.8.2'\n__LIGHTBOX2_DEFAULT_SETTINGS = {\n 'LIGHTBOX2_PREFIX': 'lb-',\n 'LIGHTBOX2_SET': 'images',\n 'LIGHTBOX2_USE_THUMBNAILER': False,\n 'LIGHTBOX2_THUMBNAILER_SIZE': 'square',\n 'LIGHTBOX2_ADDJQUERY': False\n}\n\n\ndef lightbox2_initialize(p):\n \"\"\"\n create paths to lightbox2 on cdnjs.com and set default settings\n\n :param p: pelican instance\n :return: None\n \"\"\"\n # user can override lightbox2 version\n if not 'LIGHTBOX2_VERSION' in p.settings:\n p.settings['LIGHTBOX2_VERSION'] = __LIGHTBOX2_VERSION__\n\n # add jquery if user asked for it\n withjquery = '-plus-jquery' if p.settings.get(\n 'LIGHTBOX2_ADDJQUERY', False) else ''\n\n # construct the urls\n p.settings['LIGHTBOX2_JAVASCRIPT'] = 'https://cdnjs.cloudflare.com/ajax/libs/lightbox2/{0}/js/lightbox{1}.min.js' \\\n .format(p.settings['LIGHTBOX2_VERSION'], withjquery)\n p.settings['LIGHTBOX2_STYLESHEET'] = 'https://cdnjs.cloudflare.com/ajax/libs/lightbox2/{0}/css/lightbox.min.css' \\\n .format(p.settings['LIGHTBOX2_VERSION'])\n\n # create default settings\n for key, value in __LIGHTBOX2_DEFAULT_SETTINGS.items():\n if not key in p.settings:\n p.settings[key] = value\n\n\ndef lightbox2_wrap_imagetags(p):\n \"\"\" Wrap image tags in links to add Lightbox support\n\n Any image tag in the content with class={LBPREFIX}-{SETNAME} will be\n wrapped with an anchored href with Lightbox support. `LBPREFIX` is defined\n in the settings file as `LIGHTBOX_PREFIX` with a default of `'lb-'`.\n\n :param p: pelican instance\n :return: None\n \"\"\"\n\n lbprefix = p.settings['LIGHTBOX2_PREFIX']\n lbset = p.settings['LIGHTBOX2_SET']\n lbusethumbs = p.settings['LIGHTBOX2_USE_THUMBNAILER']\n lbthumbsize = p.settings['LIGHTBOX2_THUMBNAILER_SIZE']\n # pictures path for thumbnailer\n image_path = p.settings.get('IMAGE_PATH', 'pictures')\n\n if p._content is not None:\n content = p._content\n soup = BeautifulSoup(content, 'html.parser')\n\n # Wrap each image tag in an anchor with a link. Add the\n # attribute for the lightbox set to activate.\n if 'img' in content:\n for tag in soup('img'):\n\n # Skip if no class tag\n if not tag.has_attr('class'):\n continue\n\n for c in tag['class']:\n c.split(lbprefix)\n substr = c.split(lbprefix, 1)\n\n # If the first element of the split is empty then the prefix\n # is at the start of the string c. We also must check that\n # c is not empty.\n if c and not substr[0]:\n if substr[1]:\n gallery = substr[1]\n else:\n gallery = lbgallery\n\n link_wrapper = soup.new_tag(\"a\", href=tag['src'])\n # We have to add data-lightbox seperately b/c it fails\n # in the above as a seperate expression (- is a minus\n # sign)\n link_wrapper['data-lightbox'] = substr[1]\n\n # Set the title (ie: lightbox caption) to the alt-text\n if tag.has_attr('alt'):\n link_wrapper['title'] = tag['alt']\n\n # Set the title attribute as a caption, if the image is\n # wrapped in a figure\n fig = tag.find_parent('div', 'figure')\n if fig:\n caption = fig.findChild('p', 'caption')\n if caption:\n link_wrapper['title'] = caption.get_text()\n\n # use thumbnails if asked and picture is in IMAGE_PATH\n in_imagepath_regex = re.compile(\n r'^/*{0}/'.format(image_path))\n if lbusethumbs and in_imagepath_regex.match(tag['src']):\n thumb_size = tag.get('thumbsize', lbthumbsize)\n del tag['thumbsize']\n tag['src'] = in_imagepath_regex.sub('', tag['src'])\n head, tail = os.path.splitext(\n os.path.basename(tag['src']))\n tag['src'] = '/thumbnails/' + os.path.join(\n os.path.dirname(tag['src']), head+'_thumbnail_'+thumb_size+tail)\n\n tag.wrap(link_wrapper)\n\n break # So we only use the first class specified\n\n p._content = soup.decode()\n\n\ndef lightbox2_cdnhtml(generators):\n \"\"\"\n Add the html for the lightboxcdn to all generators\n\n :param generators: All the generators\n :return: None\n \"\"\"\n for generator in generators:\n generator.context['LIGHTBOX2CDN_JAVASCRIPT'] = '' \\\n .format(generator.settings['LIGHTBOX2_JAVASCRIPT'])\n generator.context['LIGHTBOX2CDN_STYLESHEET'] = '' \\\n .format(generator.settings['LIGHTBOX2_STYLESHEET'])\n\n\ndef register():\n signals.initialized.connect(lightbox2_initialize)\n signals.content_object_init.connect(lightbox2_wrap_imagetags)\n signals.all_generators_finalized.connect(lightbox2_cdnhtml)\n","sub_path":"pelican_lightbox2.py","file_name":"pelican_lightbox2.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89580959","text":"import numpy as np\nfrom scipy.stats import spearmanr\nfrom scipy import corrcoef\nfrom keras.models import load_model\nfrom sklearn.metrics import roc_curve, auc\nfrom datasets import load_data_test\nfrom optparse import OptionParser\n\n#testing script for spikefinder\n\ndef score(a, b, method, downsample=4):\n \"\"\"\n Estimate similarity score between two reslts.\n \"\"\"\n methods = {\n 'loglik': _loglik,\n 'info': _info,\n 'corr': _corr,\n 'auc': _auc,\n 'rank': _rank\n }\n if method not in methods.keys():\n raise Exception('scoring method not one of: %s' % ' '.join(methods.keys()))\n\n func = methods[method]\n\n result = [] \n for i in range(a.shape[0]):\n x = a[i,:]\n y = b[i,:]\n x = x[:len(spike_npt[k])]\n ml = min([len(x),len(y)])\n\n x = x[0:ml]\n y = y[0:ml]\n naninds = np.isnan(x) | np.isnan(y)\n x = x[~naninds]\n y = y[~naninds]\n x = _downsample(x, downsample)\n y = _downsample(y, downsample)\n\n ml = min([len(x),len(y)])\n\n x = x[0:ml]\n y = y[0:ml]\n\n if not len(x) == len(y):\n raise Exception('mismatched lengths %s and %s' % (len(x), len(y)))\n\n if func=='info':\n result.append(func(x, y,fps=100/downsample))\n else:\n result.append(func(x, y))\n\n return result\n\ndef _corr(x, y):\n return corrcoef(x, y)[0,1]\n\ndef _rank(x, y):\n return spearmanr(x, y).correlation\n\ndef _auc(x, y):\n fpr, tpr, thresholds = roc_curve(y>0,x)\n return auc(fpr,tpr)\n\ndef _downsample(signal, factor):\n \"\"\"\n Downsample signal by averaging neighboring values.\n @type signal: array_like\n @param signal: one-dimensional signal to be downsampled\n @type factor: int\n @param factor: this many neighboring values are averaged\n @rtype: ndarray\n @return: downsampled signal\n \"\"\"\n\n if factor < 2:\n return np.asarray(signal)\n\n return np.convolve(np.asarray(signal).ravel(), np.ones(factor), 'valid')[::factor]\n\n\n\n\n\n\ndef model_test(model, test_dataset):\n #model.load_weights('model/model_conv_11_5')\n test_ip = test_dataset['calcium signal padded']\n pred_test = model.predict(test_ip)\n gt_test = np.reshape(test_dataset['spikes train padded'],(test_ip.shape[0],-1))\n pred_test = np.reshape(pred_test,(test_ip.shape[0],-1))\n corrs = score(pred_test, gt_test, method='corr')\n corrs = np.asarray(corrs)\n ranks = score(pred_test, gt_test, method='rank')\n ranks = np.asarray(ranks)\n aucs = score(pred_test, gt_test, method='auc')\n aucs = np.asarray(aucs)\n measures = []\n for i in range(5):\n corre = np.mean(corrs[id_staked_t==i])\n #print(corre)\n ranke = np.mean(ranks[id_staked_t==i])\n #print(ranke)\n auce = np.mean(aucs[id_staked_t==i])\n #print(auce)\n measures.append([corre, ranke, auce])\n return measures\n\n\ndef correlation_coefficient_loss(y_true, y_pred):\n x = y_true\n y = y_pred\n mx = K.mean(x, axis=1,keepdims=True)\n my = K.mean(y, axis=1,keepdims=True)\n xm, ym = x-mx, y-my \n r_num = K.sum(xm*ym, axis=1)\n r_den = K.sqrt(K.sum(K.square(xm),axis=1) * K.sum(K.square(ym),axis=1))\n r = r_num / r_den\n r = K.maximum(K.minimum(r, 1.0), -1.0)\n return 1 - K.square(r)\n\nif __name__== '__main__':\n\n usage = 'USAGE: %prog model_path'\n parser = OptionParser(usage=usage)\n opts, args = parser.parse_args()\n\n if len(args) != 1:\n parser.usage += '\\n\\n' + parser.format_option_help()\n parser.error('Wrong number of arguments')\n \n model = args[0] #model file location\n test_dataset = load_data_test()\n id_staked_t = test_dataset['ids stacked']\n spike_npt = test_dataset['spikes train']\n\n m = load_model (model, compile=False )\n results = model_test (m, test_dataset)\n print(results)\n\n","sub_path":"test_s2s.py","file_name":"test_s2s.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"509218811","text":"#!/usr/bin/env python\nimport os, json, re, time\n\ndef writeTemp():\n\n\ttemp = os.popen(\"/opt/vc/bin/vcgencmd measure_temp\").read()\n\trawtemp = re.sub(r'[^0-9.]', \"\", temp)\n\ttimestamp = time.time()\n\n\n\twith open('data.json', 'a') as outfile: \n\t\tjson.dump({'temp' : rawtemp, 'time' : timestamp}, outfile)\n\n\t\ttime.sleep(300)\n\nwhile True: \n\twriteTemp()\n","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80274437","text":"import praw\nimport urllib\nfrom random import randint\nimport ctypes, os\n\nsubReddit = \"multiwall\"\ntag = \"Dual\"\n\nreddit = praw.Reddit(client_id='vXbBf3rtaMrYtw', client_secret='tczE75Ap7PxjR_kio5NBALDhiI4', user_agent='wallpaper_dual')\nhot_posts = reddit.subreddit(subReddit).top(limit=400)\narr = []\nfor x in hot_posts:\n if ('png' in x.url or 'jpg' in x.url) and x.link_flair_text==tag:\n arr.append(x)\n\nx = arr[randint(0, len(arr) - 1)]\ntry:\n fName = x.url.split('/')[-1]\n if fName not in os.listdir('.'):\n f = open(fName,'wb')\n print(\"Downloading Wallpaper\")\n f.write(urllib.request.urlopen(x.url).read())\n path = f.name\n f.close()\n else:\n print(\"Using previous file\")\n path = \"./\"+fName\nexcept Exception as e:\n print(\"error: \"+str(e))\n\nctypes.windll.user32.SystemParametersInfoW(20, 0, os.path.abspath(path), 3)\n","sub_path":"get_wallpaper.py","file_name":"get_wallpaper.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"504218724","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\nfile = open('fitness_price.csv' , 'w')\nwriter = csv.writer(file)\nwriter.writerow(['Item Name' , 'Company' , 'Price'])\npg_no = 1\nwhile(True):\n print(f\"scraping page : {pg_no} \")\n url = f\"https://www.netmeds.com/non-prescriptions/fitness/page/{pg_no}\"\n page = requests.get(url)\n soup = BeautifulSoup(page.text , 'html.parser')\n\n main_div = soup.find_all(\"div\", class_=\"row product-list\")[0] \n\n items = main_div.find_all(\"div\" , class_ = \"cat-item\")\n if len(items) is 0: break\n \n for item in items: \n price = item.find(\"span\" , id = \"final_price\").contents[0]\n company = item.find(\"span\" , class_ = \"drug-varients\").contents[0].replace(\"Mfr: \" , \"\")\n name = item.a['title']\n writer.writerow([name ,company , price])\n \n pg_no = pg_no + 1\n\nfile.close()\n # print(soup.find_all(\"div\", class_=\"cat-item\")[0])","sub_path":"Scraping/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485255063","text":"from API.MenuBar.Edit.Edit import Edit\nfrom API.Utility.Util import Util\nfrom API.ComponentBox import ComponentBoxConst\nfrom API.Device.AccessPoint.AccessPoint import AccessPoint\n\nfrom API.Device.LinksysRouter.LinksysRouter import LinksysRouter\n\nfrom API.Toolbar.CommonToolsBar.CommonToolsBar import CommonToolsBar\nfrom API.Toolbar.MainToolBar.MainToolbarConst import MainToolbarConst\n\nutil = Util()\neditMenu = Edit()\ncommonToolsBar = CommonToolsBar()\n\nCopyAccessPoint0 = AccessPoint(ComponentBoxConst.DeviceModel.ACCESSPOINT, 100, 50, \"CopyAccess Point0\")\nCopyLinkSysRouter0 = LinksysRouter(ComponentBoxConst.DeviceModel.LINKSYS, 200, 50, \"CopyWireless Router0\")\n\nAccessPoint0 = AccessPoint(ComponentBoxConst.DeviceModel.ACCESSPOINT, 100, 100, \"Access Point0\")\nLinkSysRouter0 = LinksysRouter(ComponentBoxConst.DeviceModel.LINKSYS, 200,100, \"Wireless Router0\")\n\ndef main():\n util.init()\n copyPaste_deviceOnWorkspace()\n\n\n\ndef copyPaste_deviceOnWorkspace():\n AccessPoint0.create() \n LinkSysRouter0.create() \n \n util.clickOnSimulation()\n util.clickOnRealtime()\n \n AccessPoint0.select()\n AccessPoint0.clickConfigTab()\n AccessPoint0.config.selectInterface(\"Port 0\")\n AccessPoint0.config.interface.port0.bandwidth(None)\n AccessPoint0.config.interface.port0.duplex(None)\n AccessPoint0.config.interface.port0.portStatus(False)\n AccessPoint0.close()\n\n LinkSysRouter0.select()\n LinkSysRouter0.clickConfigTab()\n LinkSysRouter0.config.selectInterface(\"Wireless\")\n LinkSysRouter0.config.interface.wireless.ssid(\"LinksysRouter\")\n LinkSysRouter0.config.interface.wireless.wep()\n LinkSysRouter0.config.interface.wireless.wepkey(\"0123456789\")\n LinkSysRouter0.close()\n\n util.selectObjectsOnWorkspace(AccessPoint0.x, AccessPoint0.y)\n util.selectObjectsOnWorkspace(LinkSysRouter0.x, LinkSysRouter0.y)\n util.clickButton(MainToolbarConst.COPY)\n util.clickButton(MainToolbarConst.PASTE)\n \n CopyAccessPoint0.select()\n CopyAccessPoint0.clickConfigTab()\n CopyAccessPoint0.config.settings.check.displayName(\"CopyAccess Point0\")\n CopyAccessPoint0.config.selectInterface(\"Port 0\")\n CopyAccessPoint0.config.interface.port0.check.bandwidth('Auto', False)\n CopyAccessPoint0.config.interface.port0.check.duplex('Auto', False)\n CopyAccessPoint0.config.interface.port0.check.portStatus(False)\n CopyAccessPoint0.close()\n\n CopyLinkSysRouter0.select()\n CopyLinkSysRouter0.clickConfigTab()\n CopyLinkSysRouter0.config.settings.check.displayName(CopyLinkSysRouter0.displayName)\n CopyLinkSysRouter0.config.selectInterface(\"Wireless\")\n CopyLinkSysRouter0.config.interface.wireless.check.ssid(\"LinksysRouter\")\n CopyLinkSysRouter0.config.interface.wireless.check.wepkey(\"0123456789\")\n CopyLinkSysRouter0.close()","sub_path":"trunk/workspace/Squish/src/TestScript/UI/suite_UI_51/tst_UI_51_ToolBar_Copy_Paste_Wireless/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"94607676","text":"from itertools import izip\nimport networkx as nx\nimport sys\n\ndef usage():\n '''\n Displays usage information.\n '''\n print (\"USAGE: python word_ladder.py \")\n\ndef arg_check():\n '''\n Checks if enough arguments are passed to the program.\n '''\n if len(sys.argv) < 2:\n print (\"ERROR: Not enough arguments.\")\n usage()\n sys.exit(1)\n\ndef word_length(string):\n '''\n Finds the length of a word from the start of a string.\n '''\n n = 0\n for c in string:\n if c.isalpha():\n n += 1\n return n\n\ndef parse_input():\n '''\n Parses input file and returns set of words.\n '''\n filename = sys.argv[1]\n words = set()\n with open(filename) as f:\n for line in f:\n line = line.decode()\n if not line.startswith('*'):\n if not words:\n length = word_length(line)\n word = line[:length]\n words.add(word)\n\n return words\n\ndef remove_from(char, string):\n '''\n Removes the first instance of a character from str\n '''\n if char in string:\n string = list(string)\n string.remove(char)\n return ''.join(string)\n else:\n return False\n\ndef hamming1(str1, str2):\n '''\n Returns True if the hamming distance is 1.\n Returns False otherwise\n '''\n assert len(str1) == len(str2), \"String lengths are unequal: str1 = %s, str2 = %s\"%(str1,str2)\n unequal = 0\n for c1, c2 in izip(str1, str2):\n if c1 != c2:\n if unequal == 0:\n unequal += 1\n else:\n return False\n\n return True\n\ndef combination1(str1, str2):\n '''\n Returns True if str2 is a combination of the characters of str1 except for 1.\n Returns False otherwise.\n '''\n assert len(str1) == len(str2)\n unequal = 0\n og_str2 = str2\n for c in str1:\n if c in str2:\n str2 = remove_from(c, str2)\n\n elif unequal == 0:\n unequal += 1\n else:\n return False\n\n return True\n\ndef edges_from_combo(G, words):\n '''\n Adds edges to G between nodes that share all but 1 character in any combination.\n '''\n reached_letters = set()\n letter_indexes, suffixes = dict(), dict()\n index = -1\n for word in words:\n index += 1\n sorted_suffix = str(sorted(word[1:]))\n if sorted_suffix not in suffixes:\n suffixes[sorted_suffix] = set([index])\n\n else:\n G.add_edges_from((words[i], word) for i in suffixes[sorted_suffix])\n suffixes[sorted_suffix].add(index)\n\n if word[0] not in reached_letters:\n reached_letters.add(word[0])\n letter_indexes[word[0]] = index\n\n else:\n G.add_edges_from((w, word)\n for w in words[letter_indexes[word[0]]:index]\n if combination1(w[1:], word[1:]))\n\n G.add_edges_from((w, word)\n for w in words[:letter_indexes[word[0]]]\n if not G.has_edge(w, word) and combination1(w, word))\n\n return G\n\ndef edges_from_hamming(G, words):\n '''\n Adds edges to G between nodes that have a Hamming distance of 1.\n '''\n reached_letters = set()\n letter_indexes, suffixes = dict(), dict()\n index = -1\n for word in words:\n index += 1\n if word[1:] not in suffixes:\n suffixes[word[1:]] = set([index])\n\n else:\n G.add_edges_from((words[i], word) for i in suffixes[word[1:]])\n suffixes[word[1:]].add(index)\n\n if word[0] not in reached_letters:\n reached_letters.add(word[0])\n letter_indexes[word[0]] = index\n\n else:\n G.add_edges_from((w, word)\n for w in words[letter_indexes[word[0]]:index]\n if hamming1(w[1:], word[1:]))\n\n return G\n\ndef generate_graph(words):\n '''\n Generate graph of connected words.\n '''\n from string import ascii_lowercase as lowercase\n G = nx.Graph(name=\"words\")\n G.add_nodes_from(words)\n words = sorted(words)\n if len(sys.argv) > 2 and sys.argv[2] == \"-c\":\n G = edges_from_combo(G, words)\n else:\n G = edges_from_hamming(G, words)\n\n return G\n\ndef run_tests(examples, G):\n '''\n Finds the shortest path for given test cases.\n '''\n for (source, target) in examples:\n print(\"Shortest path between %s and %s is\"%(source, target))\n try:\n sp = nx.shortest_path(G, source, target)\n for n in sp:\n print(n)\n except nx.NetworkXNoPath:\n print(\"None\")\n\ndef print_stats(G):\n '''\n Prints stats of graph.\n '''\n print(\"Loaded %s\"%sys.argv[1])\n print(\"Two words are connected if they differ in one letter.\")\n print(\"Graph has %d nodes with %d edges\"\n %(nx.number_of_nodes(G), nx.number_of_edges(G)))\n print(\"%d connected components\" % nx.number_connected_components(G))\n\ndef run():\n '''\n Runs mains components of program.\n '''\n arg_check()\n words = parse_input()\n word_length = len(next(iter(words)))\n G = generate_graph(words)\n print_stats(G)\n\n if len(sys.argv) > 2 and sys.argv[2] == \"-c\":\n examples = [('chaos', 'order')]\n run_tests(examples, G)\n\n elif word_length == 5:\n examples = [('chaos', 'order'),\n ('nodes', 'graph'),\n ('moron', 'smart'),\n ('pound', 'marks')]\n run_tests(examples, G)\n\n elif word_length == 4:\n examples = [('cold', 'warm'),\n ('love', 'hate')]\n run_tests(examples, G)\n\n# ============================================================================ #\nif __name__ == \"__main__\":\n run()\n","sub_path":"lab7/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6998023","text":"import RPi.GPIO as GPIO #linha 1\nimport time #linha 2\n\n#========| FUNÇÃO ANDAR FRENTE |==============\ndef frente(list, velocidade):\n print(\"FUNÇÃO FRENTE\")\n\n GPIO.setmode(GPIO.BOARD) #linha 8\n GPIO.setup(36,GPIO.OUT) #linha 9\n GPIO.setup(35,GPIO.OUT) #linha 10\n \n pwm= GPIO.PWM(36,100) #linha 12\n pwm.start(0) #linha 13\n pwm.ChangeDutyCycle(velocidade) #linha 14\n \n pwm2 = GPIO.PWM(35,100) #linha 16\n pwm2.start(0) #linha 17\n pwm2.ChangeDutyCycle(velocidade) #linha 18\n\n for i in range (0, len(list)): \n print(\"Setando porta %d como GPIO\", list[i])\n GPIO.setup(list[i],GPIO.OUT) #linha 22\n\n GPIO.output(list[0], GPIO.LOW) #linha 24\n GPIO.output(list[1], GPIO.HIGH) #linha 25\n\n GPIO.output(list[2], GPIO.HIGH) #linha 27\n GPIO.output(list[3], GPIO.LOW) #linha 28\n\n time.sleep(2) #linha 30\n \n#========| FUNÇÃO ANDAR ATRAS |==============\ndef atras(list, velocidade):\n print(\"FUNÇÃO TRAS\")\n\n GPIO.setmode(GPIO.BOARD) #linha 36\n\n GPIO.setup(36,GPIO.OUT) #linha 38\n pwm = GPIO.PWM(36,100) #linha 39\n pwm.start(0) #linha 40\n pwm.ChangeDutyCycle(velocidade) #linha 41\n \n GPIO.setup(35,GPIO.OUT) #linha 43\n pwm2 = GPIO.PWM(35,100) #linha 44\n pwm2.start(0) #linha 45\n pwm2.ChangeDutyCycle(velocidade) #linha 46\n \n for i in range (0, len(list)):\n print(\"Setando porta %d como GPIO\", list[i])\n GPIO.setup(list[i],GPIO.OUT) #linha 50\n\n GPIO.output(list[0], GPIO.HIGH) #linha 52\n GPIO.output(list[1], GPIO.LOW) #linha 53\n\n GPIO.output(list[2], GPIO.LOW) #linha 55\n GPIO.output(list[3], GPIO.HIGH) #linha 56\n\n time.sleep(2)\n\n#=========| FUNÇÃO VIRAR ESQUERDA |==============\ndef direita(list, velocidade):\n print(\"FUNÇÃO VIRAR DIREITA\")\n\n GPIO.setmode(GPIO.BOARD) #linha 64\n \n GPIO.setup(36,GPIO.OUT) #linha 66\n pwm = GPIO.PWM(36,100) #linha 67\n pwm.start(0) #linha 68\n pwm.ChangeDutyCycle(velocidade) #linha 69\n\n for i in range (0, len(list)):\n print(\"Setando porta %d como GPIO\", list[i])\n GPIO.setup(list[i],GPIO.OUT) #linha 73\n \n time.sleep(2) #linha 75\n\n#========| FUNÇÃO VIRAR DIREITA |==============\ndef esquerda(list, velocidade):\n print(\"FUNÇÃO VIRAR ESQUERDA\")\n\n GPIO.setmode(GPIO.BOARD) #linha 81\n \n GPIO.setup(35,GPIO.OUT) #linha 83\n pwm2 = GPIO.PWM(35,100) #linha 84\n pwm2.start(0) #linha 85\n pwm2.ChangeDutyCycle(velocidade) #linha 86\n\n\n for i in range (0, len(list)):\n print(\"Setando porta %d como GPIO\", list[i])\n GPIO.setup(list[i],GPIO.OUT) #linha 91\n \n time.sleep(2)\n \n#========| FUNÇÃO LIMPAR |==============\ndef limpar():\n print(\"LIBERANDO PORTAS\")\n GPIO.cleanup() #linha 98","sub_path":"modulo_motores_teste (outra cópia).py","file_name":"modulo_motores_teste (outra cópia).py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"458205751","text":"import os\nimport pandas as pd\nimport glob as glob\nfrom datetime import date\nfrom next.io.pipelines import Pipeline\nfrom next.io.fileutilities import get_clean_headers_for_redshift, convert_shared_drive_path\n\nschema = 'safelite'\ntable = 'publisher_spend_reporting'\noverlap_keys = ['date', 'placement_id', 'site_id']\nrs_credentials = {'schema': 'safelite'}\ns3_credentials = {'prefix': 'nextengineering/client/safelite/publisher'}\n\npathname = '*.xlsx'\ntoday = date.today().strftime('%Y-%m-%d')\nfile_output = 'display_yahoo_spend_{}.csv'.format(today)\nfile_path = convert_shared_drive_path(r'K:\\Direct Response\\Direct Response Planning\\Safelite\\Weekly Reporting\\2020 Revamp\\Publisher Spend\\Yahoo')\n\ndcm_site_name = 'Yahoo'\ndcm_site_id = 31973\nmetric_list = ['impressions', 'clicks', 'spend']\ndim_list = ['date', 'advertiser_name', 'advertiser_id', 'campaign_name', 'campaign_id', 'placement_name', 'placement_id', 'site_name', 'site_id', 'impressions', 'clicks', 'spend']\n\ndef main():\n os.chdir(file_path)\n p = Pipeline(rs_credentials=rs_credentials, s3_credentials=s3_credentials)\n list_of_excel_files = glob.glob(pathname=pathname)\n\n for excel in list_of_excel_files:\n df = pd.read_excel(excel, encoding='ISO-8859-1')\n df.columns = get_clean_headers_for_redshift(df)\n df.rename(columns = {'day': 'date', 'spend_usd': 'spend'}, inplace=True)\n df['site_name'] = dcm_site_name \n df['site_id'] = dcm_site_id\n\n for metric in metric_list:\n if df[metric].dtype == 'O':\n df[metric] = df[metric].str.replace('$', '').str.replace(',', '').str.replace('-', '0').str.strip()\n df[metric] = df[metric].fillna(0)\n else:\n df[metric] = df[metric].fillna(0)\n \n df = df[dim_list]\n df.to_csv(file_output, index=False)\n p.local_to_redshift(file_path=file_output, schema=schema, table=table, overlap_keys=overlap_keys)\n os.remove(file_output)\n \n p.close()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Safelite/Historical Data/yahoo_historical_spend.py","file_name":"yahoo_historical_spend.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290775344","text":"\"\"\"\nTests for RecordType module and view\n\nNote: this module tests for rendering specifically for RecordType values, using\ntype description sitedata files, and as such duplicates some tests covered by\nmodule test_entitygenericedit.\n\"\"\"\n\n__author__ = \"Graham Klyne (GK@ACM.ORG)\"\n__copyright__ = \"Copyright 2014, G. Klyne\"\n__license__ = \"MIT (http://opensource.org/licenses/MIT)\"\n\nimport os\nimport unittest\n\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.http import QueryDict\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions\nfrom django.test.client import Client\n\nfrom annalist.identifiers import RDF, RDFS, ANNAL\nfrom annalist import layout\nfrom annalist.models.site import Site\nfrom annalist.models.sitedata import SiteData\nfrom annalist.models.collection import Collection\nfrom annalist.models.entitydata import EntityData\nfrom annalist.models.recordtype import RecordType\nfrom annalist.models.recordtypedata import RecordTypeData\nfrom annalist.models.recordview import RecordView\nfrom annalist.models.recordlist import RecordList\n\nfrom annalist.views.recordtypedelete import RecordTypeDeleteConfirmedView\n\nfrom tests import TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir\nfrom tests import init_annalist_test_site\nfrom AnnalistTestCase import AnnalistTestCase\nfrom entity_testutils import (\n site_dir, collection_dir,\n site_view_url, collection_edit_url, \n collection_entity_view_url,\n collection_create_values,\n create_test_user\n )\nfrom entity_testtypedata import (\n recordtype_dir,\n recordtype_coll_url, recordtype_site_url, recordtype_url, recordtype_edit_url,\n recordtype_value_keys, recordtype_load_keys, \n recordtype_create_values, recordtype_values, recordtype_read_values,\n recordtype_entity_view_context_data, \n recordtype_entity_view_form_data, recordtype_delete_confirm_form_data\n )\nfrom entity_testentitydata import (\n entity_url, entitydata_edit_url, entitydata_list_type_url,\n default_fields, default_label, default_comment, error_label,\n layout_classes\n )\n\n# -----------------------------------------------------------------------------\n#\n# RecordType tests\n#\n# -----------------------------------------------------------------------------\n\nclass RecordTypeTest(AnnalistTestCase):\n \"\"\"\n Tests for RecordType object interface\n \"\"\"\n\n def setUp(self):\n init_annalist_test_site()\n self.testsite = Site(TestBaseUri, TestBaseDir)\n self.sitedata = SiteData(self.testsite)\n self.testcoll = Collection(self.testsite, \"testcoll\")\n return\n\n def tearDown(self):\n return\n\n def test_RecordTypeTest(self):\n self.assertEqual(RecordType.__name__, \"RecordType\", \"Check RecordType class name\")\n return\n\n def test_recordtype_init(self):\n t = RecordType(self.testcoll, \"testtype\", self.testsite)\n u = recordtype_coll_url(self.testsite, coll_id=\"testcoll\", type_id=\"testtype\")\n self.assertEqual(t._entitytype, ANNAL.CURIE.Type)\n self.assertEqual(t._entityfile, layout.TYPE_META_FILE)\n self.assertEqual(t._entityref, layout.META_TYPE_REF)\n self.assertEqual(t._entityid, \"testtype\")\n self.assertEqual(t._entityurl, u)\n self.assertEqual(t._entitydir, recordtype_dir(type_id=\"testtype\"))\n self.assertEqual(t._values, None)\n return\n\n def test_recordtype1_data(self):\n t = RecordType(self.testcoll, \"type1\", self.testsite)\n self.assertEqual(t.get_id(), \"type1\")\n self.assertEqual(t.get_type_id(), \"_type\")\n self.assertIn(\"/c/testcoll/_annalist_collection/types/type1/\", t.get_url())\n self.assertEqual(TestBaseUri + \"/c/testcoll/d/_type/type1/\", t.get_view_url())\n t.set_values(recordtype_create_values(type_id=\"type1\"))\n td = t.get_values()\n self.assertEqual(set(td.keys()), set(recordtype_value_keys()))\n v = recordtype_values(type_id=\"type1\")\n self.assertDictionaryMatch(td, v)\n return\n\n def test_recordtype2_data(self):\n t = RecordType(self.testcoll, \"type2\", self.testsite)\n self.assertEqual(t.get_id(), \"type2\")\n self.assertEqual(t.get_type_id(), \"_type\")\n self.assertIn(\"/c/testcoll/_annalist_collection/types/type2/\", t.get_url())\n self.assertEqual(TestBaseUri + \"/c/testcoll/d/_type/type2/\", t.get_view_url())\n t.set_values(recordtype_create_values(type_id=\"type2\"))\n td = t.get_values()\n self.assertEqual(set(td.keys()), set(recordtype_value_keys()))\n v = recordtype_values(type_id=\"type2\")\n self.assertDictionaryMatch(td, v)\n return\n\n def test_recordtype_create_load(self):\n t = RecordType.create(self.testcoll, \"type1\", recordtype_create_values(type_id=\"type1\"))\n td = RecordType.load(self.testcoll, \"type1\").get_values()\n v = recordtype_read_values(type_id=\"type1\")\n self.assertKeysMatch(td, v)\n self.assertDictionaryMatch(td, v)\n return\n\n def test_recordtype_default_data(self):\n t = RecordType.load(self.testcoll, \"Default_type\", altparent=self.testsite)\n self.assertEqual(t.get_id(), \"Default_type\")\n self.assertIn(\"/c/testcoll/_annalist_collection/types/Default_type\", t.get_url())\n self.assertEqual(t.get_type_id(), \"_type\")\n td = t.get_values()\n self.assertEqual(set(td.keys()), set(recordtype_load_keys(type_uri=True)))\n v = recordtype_read_values(type_id=\"Default_type\")\n v.update(\n { 'rdfs:label': 'Default record type'\n , 'rdfs:comment': 'Default record type, applied when no type is specified when creating a record.'\n , 'annal:uri': 'annal:Default_type'\n })\n self.assertDictionaryMatch(td, v)\n return\n\n# -----------------------------------------------------------------------------\n#\n# RecordTypeEditView tests\n#\n# -----------------------------------------------------------------------------\n\nclass RecordTypeEditViewTest(AnnalistTestCase):\n \"\"\"\n Tests for record type edit views\n \"\"\"\n\n def setUp(self):\n init_annalist_test_site()\n self.testsite = Site(TestBaseUri, TestBaseDir)\n self.testcoll = Collection.create(self.testsite, \"testcoll\", collection_create_values(\"testcoll\"))\n self.no_options = ['(no options)']\n self.view_options = sorted(\n [ vid for vid in self.testcoll.child_entity_ids(RecordView, self.testsite) \n if vid != \"_initial_values\"\n ])\n self.list_options = sorted(\n [ lid for lid in self.testcoll.child_entity_ids(RecordList, self.testsite) \n if lid != \"_initial_values\"\n ])\n # For checking Location: header values...\n self.continuation_url = TestHostUri + entitydata_list_type_url(coll_id=\"testcoll\", type_id=\"_type\")\n # Login and permissions\n create_test_user(self.testcoll, \"testuser\", \"testpassword\")\n self.client = Client(HTTP_HOST=TestHost)\n loggedin = self.client.login(username=\"testuser\", password=\"testpassword\")\n self.assertTrue(loggedin)\n return\n\n def tearDown(self):\n return\n\n # -----------------------------------------------------------------------------\n # Helpers\n # -----------------------------------------------------------------------------\n\n def _create_record_type(self, type_id, entity_id=\"testentity\"):\n \"Helper function creates record type entry with supplied type_id\"\n t = RecordType.create(self.testcoll, type_id, recordtype_create_values(type_id=type_id))\n d = RecordTypeData.create(self.testcoll, type_id, {})\n e = EntityData.create(d, entity_id, {})\n return (t, d, e)\n\n def _check_record_type_values(self, type_id, update=\"RecordType\"):\n \"Helper function checks content of record type entry with supplied type_id\"\n self.assertTrue(RecordType.exists(self.testcoll, type_id))\n t = RecordType.load(self.testcoll, type_id)\n self.assertEqual(t.get_id(), type_id)\n self.assertEqual(t.get_view_url(), TestHostUri + recordtype_url(\"testcoll\", type_id))\n v = recordtype_values(type_id=type_id, update=update)\n self.assertDictionaryMatch(t.get_values(), v)\n return t\n\n def _check_context_fields(self, response, \n type_id=\"(?type_id)\", \n type_label=\"(?type_label)\",\n type_help=\"(?type_help)\",\n type_uri=\"(?type_uri)\",\n type_view=\"Default_view\",\n type_list=\"Default_list\"\n ):\n r = response\n self.assertEqual(len(r.context['fields']), 7)\n # 1st field - Id\n type_id_help = (\n \"A short identifier that distinguishes this type from all other types in the same collection.\"\n )\n self.assertEqual(r.context['fields'][0]['field_id'], 'Type_id')\n self.assertEqual(r.context['fields'][0]['field_name'], 'entity_id')\n self.assertEqual(r.context['fields'][0]['field_label'], 'Id')\n self.assertEqual(r.context['fields'][0]['field_help'], type_id_help)\n self.assertEqual(r.context['fields'][0]['field_placeholder'], \"(type id)\")\n self.assertEqual(r.context['fields'][0]['field_property_uri'], \"annal:id\")\n self.assertEqual(r.context['fields'][0]['field_placement'].field, \"small-12 medium-6 columns\")\n self.assertEqual(r.context['fields'][0]['field_value_type'], \"annal:Slug\")\n self.assertEqual(r.context['fields'][0]['field_value'], type_id)\n self.assertEqual(r.context['fields'][0]['options'], self.no_options)\n # 2nd field - Label\n type_label_help = (\n \"Short string used to describe record type when displayed\"\n )\n self.assertEqual(r.context['fields'][1]['field_id'], 'Type_label')\n self.assertEqual(r.context['fields'][1]['field_name'], 'Type_label')\n self.assertEqual(r.context['fields'][1]['field_label'], 'Label')\n self.assertEqual(r.context['fields'][1]['field_help'], type_label_help)\n self.assertEqual(r.context['fields'][1]['field_placeholder'], \"(label)\")\n self.assertEqual(r.context['fields'][1]['field_property_uri'], \"rdfs:label\")\n self.assertEqual(r.context['fields'][1]['field_placement'].field, \"small-12 columns\")\n self.assertEqual(r.context['fields'][1]['field_value_type'], \"annal:Text\")\n self.assertEqual(r.context['fields'][1]['field_value'], type_label)\n self.assertEqual(r.context['fields'][1]['options'], self.no_options)\n # 3rd field - comment\n type_comment_help = (\n \"Descriptive text about a record type\"\n )\n type_comment_placeholder = (\n \"(type description)\"\n )\n self.assertEqual(r.context['fields'][2]['field_id'], 'Type_comment')\n self.assertEqual(r.context['fields'][2]['field_name'], 'Type_comment')\n self.assertEqual(r.context['fields'][2]['field_label'], 'Comment')\n self.assertEqual(r.context['fields'][2]['field_help'], type_comment_help)\n self.assertEqual(r.context['fields'][2]['field_placeholder'], type_comment_placeholder)\n self.assertEqual(r.context['fields'][2]['field_property_uri'], \"rdfs:comment\")\n self.assertEqual(r.context['fields'][2]['field_placement'].field, \"small-12 columns\")\n self.assertEqual(r.context['fields'][2]['field_value_type'], \"annal:Longtext\")\n self.assertEqual(r.context['fields'][2]['field_value'], type_help)\n self.assertEqual(r.context['fields'][2]['options'], self.no_options)\n # 4th field - URI\n type_uri_help = (\n \"Entity type URI\"\n )\n type_uri_placeholder = (\n \"(URI)\"\n )\n self.assertEqual(r.context['fields'][3]['field_id'], 'Type_uri')\n self.assertEqual(r.context['fields'][3]['field_name'], 'Type_uri')\n self.assertEqual(r.context['fields'][3]['field_label'], 'URI')\n self.assertEqual(r.context['fields'][3]['field_help'], type_uri_help)\n self.assertEqual(r.context['fields'][3]['field_placeholder'], type_uri_placeholder)\n self.assertEqual(r.context['fields'][3]['field_property_uri'], \"annal:uri\")\n self.assertEqual(r.context['fields'][3]['field_placement'].field, \"small-12 columns\")\n self.assertEqual(r.context['fields'][3]['field_value_type'], \"annal:Identifier\")\n self.assertEqual(r.context['fields'][3]['field_value'], type_uri)\n self.assertEqual(r.context['fields'][3]['options'], self.no_options)\n # 5th field - view id\n type_view_id_help = (\n \"Default view id for this type\"\n )\n type_view_id_placeholder = (\n \"(view id)\"\n )\n self.assertEqual(r.context['fields'][4]['field_id'], 'Type_view')\n self.assertEqual(r.context['fields'][4]['field_name'], 'Type_view')\n self.assertEqual(r.context['fields'][4]['field_label'], 'Default view')\n self.assertEqual(r.context['fields'][4]['field_help'], type_view_id_help)\n self.assertEqual(r.context['fields'][4]['field_placeholder'], type_view_id_placeholder)\n self.assertEqual(r.context['fields'][4]['field_property_uri'], \"annal:type_view\")\n self.assertEqual(r.context['fields'][4]['field_placement'].field, \"small-6 columns\")\n self.assertEqual(r.context['fields'][4]['field_value_type'], \"annal:View\")\n self.assertEqual(r.context['fields'][4]['field_value'], type_view)\n self.assertEqual(r.context['fields'][4]['options'], self.view_options)\n # 6th field - list id\n type_list_id_help = (\n \"Default list id for this type\"\n )\n type_list_id_placeholder = (\n \"(list id)\"\n )\n self.assertEqual(r.context['fields'][5]['field_id'], 'Type_list')\n self.assertEqual(r.context['fields'][5]['field_name'], 'Type_list')\n self.assertEqual(r.context['fields'][5]['field_label'], 'Default list')\n self.assertEqual(r.context['fields'][5]['field_help'], type_list_id_help)\n self.assertEqual(r.context['fields'][5]['field_placeholder'], type_list_id_placeholder)\n self.assertEqual(r.context['fields'][5]['field_property_uri'], \"annal:type_list\")\n self.assertEqual(r.context['fields'][5]['field_placement'].field, \"small-6 columns\")\n self.assertEqual(r.context['fields'][5]['field_value_type'], \"annal:List\")\n self.assertEqual(r.context['fields'][5]['field_value'], type_list)\n self.assertEqual(r.context['fields'][5]['options'], self.list_options)\n return\n\n # -----------------------------------------------------------------------------\n # Form rendering tests\n # -----------------------------------------------------------------------------\n\n def test_get_form_rendering(self):\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.get(u+\"?continuation_url=/xyzzy/\")\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n # log.info(r.content)\n self.assertContains(r, \"Collection testcoll\")\n self.assertContains(r, \"

    '_type' data in collection 'testcoll'

    \")\n field_vals = default_fields(coll_id=\"testcoll\", type_id=\"_type\", entity_id=\"00000001\")\n formrow1 = \"\"\"\n
    \n
    \n
    \n

    Id

    \n
    \n
    \n \n
    \n
    \n
    \n \"\"\"%field_vals(width=6)\n formrow2 = \"\"\"\n
    \n
    \n
    \n

    Label

    \n
    \n
    \n \n
    \n
    \n
    \n \"\"\"%field_vals(width=12)\n formrow3 = \"\"\"\n
    \n
    \n
    \n

    Comment

    \n
    \n
    \n \n
    \n
    \n
    \n \"\"\"%field_vals(width=12)\n formrow4 = \"\"\"\n
    \n
    \n
    \n

    URI

    \n
    \n
    \n \n
    \n
    \n
    \n \"\"\"%field_vals(width=12)\n formrow5 = (\"\"\"\n
    \n
    \n
    \n
    \n  \n
    \n
    \n
    \n
    \n
    \n
    \n \n \n
    \n
    \n
    \n
    \n \"\"\")%field_vals(width=12)\n self.assertContains(r, formrow1, html=True)\n self.assertContains(r, formrow2, html=True)\n self.assertContains(r, formrow3, html=True)\n self.assertContains(r, formrow4, html=True)\n self.assertContains(r, formrow5, html=True)\n return\n\n def test_get_new(self):\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.get(u+\"?continuation_url=/xyzzy/\")\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n # Test context\n type_url = collection_entity_view_url(coll_id=\"testcoll\", type_id=\"_type\", entity_id=\"00000001\")\n self.assertEqual(r.context['coll_id'], \"testcoll\")\n self.assertEqual(r.context['type_id'], \"_type\")\n self.assertEqual(r.context['entity_id'], \"00000001\")\n self.assertEqual(r.context['orig_id'], \"00000001\")\n self.assertEqual(r.context['entity_url'], type_url)\n self.assertEqual(r.context['entity_uri'], None)\n self.assertEqual(r.context['action'], \"new\")\n self.assertEqual(r.context['continuation_url'], \"/xyzzy/\")\n # Fields\n self._check_context_fields(r, \n type_id=\"00000001\",\n type_label=default_label(\"testcoll\", \"_type\", \"00000001\"),\n type_help=default_comment(\"testcoll\", \"_type\", \"00000001\"),\n type_uri=\"\"\n )\n return\n\n def test_get_copy(self):\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.get(u)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n # Test context (values read from test data fixture)\n type_url = collection_entity_view_url(coll_id=\"testcoll\", type_id=\"_type\", entity_id=\"Default_type\")\n self.assertEqual(r.context['coll_id'], \"testcoll\")\n self.assertEqual(r.context['type_id'], \"_type\")\n self.assertEqual(r.context['entity_id'], \"Default_type\")\n self.assertEqual(r.context['orig_id'], \"Default_type\")\n self.assertEqual(r.context['entity_url'], type_url)\n self.assertEqual(r.context['entity_uri'], None)\n self.assertEqual(r.context['action'], \"copy\")\n self.assertEqual(r.context['continuation_url'], \"\")\n # Fields\n self._check_context_fields(r, \n type_id=\"Default_type\",\n type_label=\"Default record type\",\n type_help=\"Default record type, applied when no type is specified when creating a record.\",\n type_uri=\"\"\n )\n return\n\n def test_get_copy_not_exists(self):\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"notype\", view_id=\"Type_view\")\n r = self.client.get(u)\n # log.info(r.content)\n self.assertEqual(r.status_code, 404)\n self.assertEqual(r.reason_phrase, \"Not found\")\n self.assertContains(r, \"Annalist error\", status_code=404)\n self.assertContains(r, \"

    404: Not found

    \", status_code=404)\n err_label = error_label(\"testcoll\", \"_type\", \"notype\")\n self.assertContains(r, \"

    %s does not exist

    \"%(err_label), status_code=404)\n return\n\n def test_get_edit(self):\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.get(u)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n # Test context (values read from test data fixture)\n type_url = collection_entity_view_url(coll_id=\"testcoll\", type_id=\"_type\", entity_id=\"Default_type\")\n self.assertEqual(r.context['coll_id'], \"testcoll\")\n self.assertEqual(r.context['type_id'], \"_type\")\n self.assertEqual(r.context['entity_id'], \"Default_type\")\n self.assertEqual(r.context['orig_id'], \"Default_type\")\n self.assertEqual(r.context['entity_url'], type_url)\n self.assertEqual(r.context['entity_uri'], \"annal:Default_type\")\n self.assertEqual(r.context['action'], \"edit\")\n self.assertEqual(r.context['continuation_url'], \"\")\n # Fields\n self._check_context_fields(r, \n type_id=\"Default_type\",\n type_label=\"Default record type\",\n type_help=\"Default record type, applied when no type is specified when creating a record.\",\n type_uri=\"annal:Default_type\"\n )\n return\n\n def test_get_edit_not_exists(self):\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"notype\", view_id=\"Type_view\")\n r = self.client.get(u)\n # log.info(r.content)\n self.assertEqual(r.status_code, 404)\n self.assertEqual(r.reason_phrase, \"Not found\")\n self.assertContains(r, \"Annalist error\", status_code=404)\n self.assertContains(r, \"

    404: Not found

    \", status_code=404)\n err_label = error_label(\"testcoll\", \"_type\", \"notype\")\n self.assertContains(r, \"

    %s does not exist

    \"%(err_label), status_code=404)\n return\n\n # -----------------------------------------------------------------------------\n # Form response tests\n # -----------------------------------------------------------------------------\n\n # -------- new type --------\n\n def test_post_new_type(self):\n self.assertFalse(RecordType.exists(self.testcoll, \"newtype\"))\n f = recordtype_entity_view_form_data(type_id=\"newtype\", action=\"new\", update=\"RecordType\")\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n # print r.content\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that new record type exists\n self._check_record_type_values(\"newtype\", update=\"RecordType\")\n return\n\n def test_post_new_type_cancel(self):\n self.assertFalse(RecordType.exists(self.testcoll, \"newtype\"))\n f = recordtype_entity_view_form_data(\n type_id=\"newtype\", action=\"new\", cancel=\"Cancel\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that new record type still does not exist\n self.assertFalse(RecordType.exists(self.testcoll, \"newtype\"))\n return\n\n def test_post_new_type_missing_id(self):\n f = recordtype_entity_view_form_data(action=\"new\", update=\"RecordType\")\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n # print r.content\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n # Test context\n expect_context = recordtype_entity_view_context_data(action=\"new\", update=\"RecordType\")\n self.assertDictionaryMatch(r.context, expect_context)\n return\n\n def test_post_new_type_invalid_id(self):\n f = recordtype_entity_view_form_data(\n type_id=\"!badtype\", orig_id=\"orig_type_id\", action=\"new\", update=\"RecordType\"\n )\n u = entitydata_edit_url(\"new\", \"testcoll\", \"_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n # Test context\n expect_context = recordtype_entity_view_context_data(\n type_id=\"!badtype\", orig_id=\"orig_type_id\", action=\"new\", update=\"RecordType\"\n )\n self.assertDictionaryMatch(r.context, expect_context)\n return\n\n # -------- copy type --------\n\n def test_post_copy_type(self):\n self.assertFalse(RecordType.exists(self.testcoll, \"copytype\"))\n f = recordtype_entity_view_form_data(\n type_id=\"copytype\", orig_id=\"Default_type\", action=\"copy\", update=\"RecordType\"\n )\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that new record type exists\n self._check_record_type_values(\"copytype\", update=\"RecordType\")\n return\n\n def test_post_copy_type_cancel(self):\n self.assertFalse(RecordType.exists(self.testcoll, \"copytype\"))\n f = recordtype_entity_view_form_data(\n type_id=\"copytype\", orig_id=\"Default_type\", action=\"copy\", cancel=\"Cancel\", update=\"RecordType\"\n )\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that target record type still does not exist\n self.assertFalse(RecordType.exists(self.testcoll, \"copytype\"))\n return\n\n def test_post_copy_type_missing_id(self):\n f = recordtype_entity_view_form_data(\n action=\"copy\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n expect_context = recordtype_entity_view_context_data(action=\"copy\", update=\"Updated RecordType\")\n self.assertDictionaryMatch(r.context, expect_context)\n return\n\n def test_post_copy_type_invalid_id(self):\n f = recordtype_entity_view_form_data(\n type_id=\"!badtype\", orig_id=\"Default_type\", action=\"copy\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"copy\", \"testcoll\", \"_type\", entity_id=\"Default_type\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n expect_context = recordtype_entity_view_context_data(\n type_id=\"!badtype\", orig_id=\"Default_type\", \n action=\"copy\", update=\"Updated RecordType\"\n )\n self.assertDictionaryMatch(r.context, expect_context)\n return\n\n # -------- edit type --------\n\n def test_post_edit_type(self):\n self._create_record_type(\"edittype\")\n self._check_record_type_values(\"edittype\")\n f = recordtype_entity_view_form_data(\n type_id=\"edittype\", orig_id=\"edittype\", \n action=\"edit\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"edittype\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that new record type exists\n self._check_record_type_values(\"edittype\", update=\"Updated RecordType\")\n return\n\n def test_post_edit_type_new_id(self):\n # Check logic applied when type is renamed\n (t, d1, e1) = self._create_record_type(\"edittype1\", entity_id=\"typeentity\")\n self.assertTrue(RecordType.exists(self.testcoll, \"edittype1\"))\n self.assertFalse(RecordType.exists(self.testcoll, \"edittype2\"))\n self.assertTrue(RecordTypeData.exists(self.testcoll, \"edittype1\"))\n self.assertFalse(RecordTypeData.exists(self.testcoll, \"edittype2\"))\n self.assertTrue(EntityData.exists(d1, \"typeentity\"))\n self._check_record_type_values(\"edittype1\")\n f = recordtype_entity_view_form_data(\n type_id=\"edittype2\", orig_id=\"edittype1\", \n action=\"edit\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"edittype1\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that new record type exists and old does not\n self.assertFalse(RecordType.exists(self.testcoll, \"edittype1\"))\n self.assertTrue(RecordType.exists(self.testcoll, \"edittype2\"))\n self._check_record_type_values(\"edittype2\", update=\"Updated RecordType\")\n # Check that type data directory has been renamed\n self.assertFalse(RecordTypeData.exists(self.testcoll, \"edittype1\"))\n self.assertTrue(RecordTypeData.exists(self.testcoll, \"edittype2\"))\n self.assertFalse(EntityData.exists(d1, \"typeentity\"))\n d2 = RecordTypeData.load(self.testcoll, \"edittype2\")\n self.assertTrue(EntityData.exists(d2, \"typeentity\"))\n return\n\n def test_post_edit_type_cancel(self):\n self._create_record_type(\"edittype\")\n self._check_record_type_values(\"edittype\")\n f = recordtype_entity_view_form_data(\n type_id=\"edittype\", orig_id=\"edittype\", \n action=\"edit\", cancel=\"Cancel\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"edittype\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertEqual(r['location'], self.continuation_url)\n # Check that target record type still does not exist and unchanged\n self._check_record_type_values(\"edittype\")\n return\n\n def test_post_edit_type_missing_id(self):\n self._create_record_type(\"edittype\")\n self._check_record_type_values(\"edittype\")\n # Form post with ID missing\n f = recordtype_entity_view_form_data(\n action=\"edit\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"edittype\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n # Test context for re-rendered form\n expect_context = recordtype_entity_view_context_data(action=\"edit\", update=\"Updated RecordType\")\n self.assertDictionaryMatch(r.context, expect_context)\n # Check original data is unchanged\n self._check_record_type_values(\"edittype\")\n return\n\n def test_post_edit_type_invalid_id(self):\n self._create_record_type(\"edittype\")\n self._check_record_type_values(\"edittype\")\n # Form post with invalid ID\n f = recordtype_entity_view_form_data(\n type_id=\"!badtype\", orig_id=\"edittype\", action=\"edit\", update=\"Updated RecordType\"\n )\n u = entitydata_edit_url(\"edit\", \"testcoll\", \"_type\", entity_id=\"edittype\", view_id=\"Type_view\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.reason_phrase, \"OK\")\n self.assertContains(r, \"

    Problem with record type identifier

    \")\n # Test context\n expect_context = recordtype_entity_view_context_data(\n type_id=\"!badtype\", orig_id=\"edittype\", \n action=\"edit\", update=\"Updated RecordType\"\n )\n self.assertDictionaryMatch(r.context, expect_context)\n # Check original data is unchanged\n self._check_record_type_values(\"edittype\")\n return\n\n# -----------------------------------------------------------------------------\n#\n# ConfirmRecordTypeDeleteTests tests for completion of record deletion\n#\n# -----------------------------------------------------------------------------\n\nclass ConfirmRecordTypeDeleteTests(AnnalistTestCase):\n \"\"\"\n Tests for record type deletion on response to confirmation form\n \"\"\"\n\n def setUp(self):\n init_annalist_test_site()\n self.testsite = Site(TestBaseUri, TestBaseDir)\n self.testcoll = Collection.create(self.testsite, \"testcoll\", collection_create_values(\"testcoll\"))\n # Login and permissions\n create_test_user(self.testcoll, \"testuser\", \"testpassword\")\n self.client = Client(HTTP_HOST=TestHost)\n loggedin = self.client.login(username=\"testuser\", password=\"testpassword\")\n self.assertTrue(loggedin)\n return\n\n def tearDown(self):\n return\n\n def test_CollectionActionViewTest(self):\n self.assertEqual(RecordTypeDeleteConfirmedView.__name__, \"RecordTypeDeleteConfirmedView\", \"Check RecordTypeDeleteConfirmedView class name\")\n return\n\n # NOTE: test_collection checks the appropriate response from clicking the delete button, \n # so here only need to test completion code.\n def test_post_confirmed_remove_type(self):\n t = RecordType.create(self.testcoll, \"deletetype\", recordtype_create_values(\"deletetype\"))\n self.assertTrue(RecordType.exists(self.testcoll, \"deletetype\"))\n # Submit positive confirmation\n u = TestHostUri + recordtype_edit_url(\"delete\", \"testcoll\")\n f = recordtype_delete_confirm_form_data(\"deletetype\")\n r = self.client.post(u, f)\n self.assertEqual(r.status_code, 302)\n self.assertEqual(r.reason_phrase, \"FOUND\")\n self.assertEqual(r.content, \"\")\n self.assertMatch(r['location'], \n \"^\"+TestHostUri+\n collection_edit_url(\"testcoll\")+\n r\"\\?info_head=.*&info_message=.*deletetype.*testcoll.*$\"\n )\n # Confirm deletion\n self.assertFalse(RecordType.exists(self.testcoll, \"deletetype\"))\n return\n\n# End.\n","sub_path":"src/annalist_root/annalist/tests/test_recordtype.py","file_name":"test_recordtype.py","file_ext":"py","file_size_in_byte":37947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520430501","text":"\"\"\"\nProblem Statement\nThis exercise is Part 1 of 4 of the birthday data exercise series. The other exercises are: Part 2, Part 3, and Part 4.\n\nFor this exercise, we will keep track of when our friend’s birthdays are, and be able to find that information based on their name.\nCreate a dictionary (in your file) of names and birthdays. When you run your program it should ask the user to enter a name, and\nreturn the birthday of that person back to them. The interaction should look something like this:\n\nHappy coding!\n\"\"\"\ndef main():\n birthday_dict = {}\n birthday_dict['Amit Shetty'] = '01/01/1991'\n birthday_dict['Amrita Sharma'] = '02/02/1992'\n birthday_dict['Amrit Shagun'] = '03/03/1993'\n birthday_dict['Anut Shana'] = '04/04/1994'\n # print(birthday_dict)\n\n print(\"Welcome to the birthday dictionary. We know the birthdays of:\\n\")\n for i in birthday_dict.keys():\n print(i)\n\n name_to_check = input(\"Who's birthday do you want to look up?\\n\").strip()\n if name_to_check in birthday_dict.keys():\n print(\"{}'s birthday is on {}\".format(name_to_check, birthday_dict[name_to_check]))\n else:\n print('Name entered does not match any records. Enter the full name as shown earlier.')\n\nif __name__ == '__main__':\n main()","sub_path":"PracticePythonOrg/Solutions/33_Birthday_Dict.py","file_name":"33_Birthday_Dict.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168858025","text":"#!/usr/bin/env python\n\nLIMIT = 101\n\nflags = [False] * LIMIT\n\nfor i in range(2, LIMIT):\n if flags[i]:\n continue\n\n print(i, end=' ')\n for j in range(2 * i, LIMIT, i):\n flags[j] = True\n\nprint()\n","sub_path":"sieve_answer.py","file_name":"sieve_answer.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67382034","text":"# coding=utf-8\nimport os\nimport os.path\nimport shutil\nimport re\nimport subprocess\nimport threading\nimport time\n\nfrom django.conf import settings\n\nfrom main.uploader import utils\n\ncurrent_chunked = dict()\nthread_lock = threading.Lock()\n\n\ndef handle_upload(f, fileattrs):\n dest = os.path.join(settings.UPLOAD_DIRECTORY, fileattrs[\"qquuid\"], fileattrs[\"qqfilename\"])\n path = dest\n if fileattrs[\"qqtotalparts\"] is None:\n os.makedirs(os.path.dirname(dest))\n chunked = False\n elif int(fileattrs[\"qqtotalparts\"]) <= 1:\n os.makedirs(os.path.dirname(dest))\n chunked = False\n else:\n chunked = True\n dest = os.path.join(settings.CHUNKS_DIRECTORY, fileattrs[\"qquuid\"], fileattrs[\"qqfilename\"].replace(\".\", \"_\"),\n str(fileattrs[\"qqpartindex\"]))\n thread_lock.acquire()\n if fileattrs[\"qquuid\"] not in current_chunked:\n os.makedirs(os.path.dirname(dest))\n current_chunked[fileattrs[\"qquuid\"]] = 1\n else:\n current_chunked[fileattrs[\"qquuid\"]] += 1\n thread_lock.release()\n utils.save_upload(f, dest)\n if chunked:\n if fileattrs[\"qqtotalparts\"] - 1 == fileattrs[\"qqpartindex\"]:\n while True:\n if current_chunked[fileattrs[\"qquuid\"]] == fileattrs[\"qqtotalparts\"]:\n os.makedirs(os.path.dirname(path))\n utils.combine_chunks(fileattrs[\"qqtotalparts\"], os.path.dirname(dest), path)\n shutil.rmtree(os.path.dirname(os.path.dirname(dest)))\n del current_chunked[fileattrs[\"qquuid\"]]\n break\n else:\n print(\"[Information] Waiting For Asynchronous Upload To Finish\")\n time.sleep(0.5)\n else:\n return {\"success\": True}\n filetype = utils.get_filetype(path)\n if filetype == \"unknown\":\n shutil.rmtree(os.path.dirname(path))\n return {\"success\": False, \"preventRetry\": True, \"error\": \"该文件的类型不可识别,请上传一个有效的FLAC/WAV/ALAC文件。\"}\n filename_without_ext = re.search(r\"(.+)\\.(.+)$\", fileattrs[\"qqfilename\"]).group(1)\n if filetype == \"WAV\":\n new_path = path\n else:\n new_filename = filename_without_ext + \".wav\"\n new_path = os.path.join(settings.UPLOAD_DIRECTORY, fileattrs[\"qquuid\"], new_filename)\n ffmpeg = subprocess.Popen([settings.FFMPEG, \"-y\", \"-i\", path, new_path], stderr=subprocess.PIPE)\n ffmpeg.wait()\n if ffmpeg.returncode != 0:\n return {\"success\": False, \"preventRetry\": True, \"error\": ffmpeg.stderr.read()}\n metadata = utils.get_metadata(path, filename_without_ext, filetype)\n result = {\"success\": True}\n result.update(metadata)\n return result\n","sub_path":"main/uploader/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"27688379","text":"#import the libary for the project\nfrom gtts import gTTS as tts\nimport os\n\n#setup the save folder directory\nos.system('cls' if os.name == 'nt' else 'clear')\nprint('Paste your folder path directory that you want to save your file')\nos.chdir(input('save to : '))\nos.system('cls' if os.name == 'nt' else 'clear')\n\n#welcome text\nprint('Welcome to text to spleech program !')\nprint('Check supported language list and language code here : https://cloud.google.com/text-to-speech/docs/voices')\n\n#select language for the spleech synthesizer\ninpLang = input('Language = ')\n\n#input source text for spleech synthesizer\ninpText = input('Input your word here : ')\n\n#input filename\nfilename = input('Filename : ')\n\n#generate and save audio file from the source text as .mp3 format\nprint('Generating...')\nto_spleech = tts(text = inpText, lang = inpLang)\nto_spleech.save(filename + '.mp3')\n\n#print out success messeage\nprint('Successfully generated audio file !')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476191547","text":"import telebot\nimport re\nimport os\nimport operator\nimport ephem\nimport json\nimport csv\nfrom datetime import datetime, date\nfrom collections import Counter\n\n\nbot = telebot.TeleBot(os.environ.get(\"TELETOKEN\"))\n\n\ndef writelog(message):\n timestamp = datetime.fromtimestamp(message.date).strftime('%Y-%m-%d %H:%M:%S')\n line = '{0} {1}: \\'{2}\\'\\n'.format(timestamp, message.from_user.username, message.text)\n with open('log', 'a') as logger:\n logger.write(line)\n\n\ndef writecsv(message):\n date = datetime.fromtimestamp(message.date).strftime('%Y-%m-%d')\n time = datetime.fromtimestamp(message.date).strftime('%H:%M:%S')\n log = [{'date': date, 'time': time, 'username': message.from_user.username, 'content': message.text}]\n with open('log.csv', 'a', encoding='utf-8') as logger:\n fields = [field for field in log[0].keys()]\n writer = csv.DictWriter(logger, fields, delimiter=';')\n if not os.path.isfile('log.csv'):\n writer.writeheader()\n writer.writerow(log[0])\n\n\ndef init_phrasebook():\n with open('answers.json', 'r') as json_data:\n return json.load(json_data)\n\n\ndef words_to_digits(expression):\n tokens = {'один': '1',\n 'два': '2',\n 'три': '3',\n 'четыре': '4',\n 'пять': '5',\n 'шесть': '6',\n 'семь': '7',\n 'восемь': '8',\n 'девять': '9',\n 'плюс': '+',\n 'минус': '-',\n 'умножить': '*',\n 'разделить': '/'\n }\n for token, value in tokens.items():\n expression = expression.replace(token, value)\n expression = expression.replace('и', '.')\n pattern = '(?:(?![0-9.\\-*+=\\/]).)*'\n expression = re.sub(pattern, '', expression)\n return expression + '='\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n writecsv(message)\n reply = bot.reply_to(message, 'Init1')\n writecsv(reply)\n reply\n\n\n@bot.message_handler(commands=['count'])\ndef count_words(message):\n count = message.text.split(' ')\n bot.reply_to(message, len(count[1:]))\n\n\n@bot.message_handler(func=lambda message: message.text.rstrip()[-1] == '=')\n# FIXME: Приоритет знаков\ndef calculate(message):\n expression = message.text.replace(' ', '')\n tokens = {'+': operator.add,\n '-': operator.sub,\n '*': operator.mul,\n '/': operator.truediv\n }\n pattern = '([0-9.]|\\d[+\\-*/]?)+(?<=\\d)='\n if re.fullmatch(pattern, expression) is None:\n bot.reply_to(message, 'Похоже, вы допустили ошибку! Проверьте выражение и попробуйте ещё раз!')\n return\n digits = re.findall('[0-9.]+|\\d+', expression)\n digits = [float(x) for x in reversed(digits)]\n operators = re.findall('[+\\-*/]', expression)\n for op in operators:\n result = tokens[op](digits.pop(), digits.pop())\n if not digits:\n bot.reply_to(message, result)\n digits.append(result)\n\n\n@bot.message_handler(func=lambda message: message.text.lower().startswith('сколько будет'))\ndef words_calculator(message):\n message.text = words_to_digits(message.text.lower())\n calculate(message)\n\n\n@bot.message_handler(func=lambda message: 'полнолуние' in message.text.lower())\ndef next_newmoon(message):\n date = re.sub('[А-я]+\\s+', '', message.text)\n if not date:\n bot.reply_to(message, 'Не забудьте написать дату!')\n reply = ephem.next_new_moon(date)\n bot.reply_to(message, reply)\n\n\n@bot.message_handler(func=lambda message: 'нового года' in message.text.lower())\ndef newyear_countdown(message):\n today = date.today()\n newyear = date(today.year + 1, 1, 1)\n result = newyear - today\n result = result.days\n if result % 100 in range(5, 21) or result % 100 == 0:\n days = 'дней'\n if result % 10 in range(5, 9) or result % 10 == 0:\n days = 'дней'\n if result % 10 == 1:\n days = 'день'\n if result % 10 in range(2, 5):\n days = 'дня'\n bot.reply_to(message, \"До нового {0} года осталось {1} {2}.\".format(newyear.year, result, days))\n\n\n@bot.message_handler(func=lambda message: 'осталось до' in message.text.lower())\ndef date_countdown(message):\n today = date.today()\n user_date = re.sub('(?!\\-)\\D+', '', message.text)\n if not user_date:\n bot.reply_to(message, 'Пожалуйста, введите дату в формате дд-мм-гггг')\n return\n user_date = datetime.strptime(user_date, '%d-%m-%Y').date()\n result = user_date - today\n result = result.days\n if result < 0:\n bot.reply_to(message, 'Введённая дата уже прошла!')\n return\n if result % 100 in range(5, 21) or result % 100 == 0:\n days = 'дней'\n if result % 10 in range(5, 9) or result % 10 == 0:\n days = 'дней'\n if result % 10 == 1:\n days = 'день'\n if result % 10 in range(2, 5):\n days = 'дня'\n bot.reply_to(message, 'До {0} осталось {1} {2}'.format(user_date, result, days))\n\n\n@bot.message_handler(func=lambda message: re.match('\\d+, \\d+', message.text))\ndef top_names(message):\n month, year = message.text.split(', ')\n names = list()\n with open('girl_names.csv', 'r') as file:\n reader = csv.reader(file, delimiter=';')\n for row in reader:\n if year in row and month in row:\n names.append(row[-3])\n result = Counter(names).most_common(5)\n answer = str()\n for name in result:\n answer += name[0] + '\\n'\n if not answer:\n bot.reply_to(message, 'К сожалению, никаких имён нет')\n return\n bot.reply_to(message, answer)\n\n\n@bot.message_handler(func=lambda message: re.sub('\\W+', '', message.text.lower()) in ANSWERS)\ndef answer(message):\n phrase = re.sub('\\W+', '', message.text)\n bot.reply_to(message, ANSWERS.get(phrase.lower()))\n\n\nif __name__ == '__main__':\n ANSWERS = init_phrasebook()\n bot.polling()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"444400718","text":"# CARLINI-WAGNER L0 ATTACK\n\nimport argparse\nimport torch\nfrom torch.autograd import Variable\n# import main\n#from main import LeNet, View\nfrom torchvision import datasets, transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport random\nimport pandas as pd\n\n# import the CW L2 attack\n# import cwL2attack\nfrom cwL2attack import f, atanh, update, cw_L2_attack\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='Carlini-Wagner L0 Attack on MNIST data')\nparser.add_argument('--data-dir', type=str, default='./data',metavar='DIR')\nparser.add_argument('--targeted', type=bool, default=True, metavar='TARG') # whether or not to do a targeted attack\nparser.add_argument('--attack-mnist', type=bool, default=True, metavar='AM') # whether or not to perform the attack on the MNIST test images\nparser.add_argument('--show-images', type=bool, default=False, metavar='SHOW') # whether or not to visualize the attack on one MNIST test image\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n# Print out arguments to the log\nprint('Carlini-Wagner L0 Attack on MNIST data')\nfor p in vars(args).items():\n print(' ',p[0]+': ',p[1])\nprint('\\n')\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\n# define the model\n#model = torch.load('mnist-trained-model.pt')\n#model.eval()\n#if args.cuda:\n# model = model.cuda()\n\ndef cw_L0_attack(model, x, y, targeted):\n \"\"\"\n Input:\n x -> initial, unperturbed image\n y -> target label (if targeted=True), initial label (if targeted=False)\n\n Output\n x_adv -> the adversarial image\n iterations -> the number of rounds of L2 attacks we had to perform\n success -> whether or not the attack was successful\n \"\"\"\n\n shape = x.shape\n success = True\n\n J = [j for j in range(shape[0]*shape[1]*shape[2]*shape[3])]\n J_comp = [] # the compliment of the set J\n\n iteration = 0\n while len(J) > 0:\n x_adv, adv_label, c = cw_L2_attack(model, x, y, gamma=0.999999, c_lower=3.2768, c_init=6.5536, c_upper=10e10, b_steps=25, max_iter=100, targeted=targeted)\n\n # fix the pixels we do not wish to attack/modify\n for j in J_comp:\n (x_adv.view(-1))[j] = (x.view(-1))[j]\n\n # store the current adversarial label, we want to eventually return it\n adv_label_old = adv_label\n\n # find the adversarial label\n adv_label = model(x_adv).argmax().item()\n\n # terminate the L0 attack if the L2 attack fails\n if targeted == True and adv_label != y:\n break\n if targeted == False and adv_label == y:\n break\n\n # if L2 attack is still successful, remove a pixel from the allowed set\n x_adv = Variable(x_adv.data, requires_grad=True)\n delta = x_adv - x\n objective = torch.sum(torch.pow(x_adv - x, 2)) + c*f(model, x_adv, y, targeted)\n objective.backward()\n g = x_adv.grad.data\n values = [(g.data.view(-1))[j]*(delta.data.view(-1))[j] for j in J]\n j_ast = (torch.tensor(values)).argmin().item()\n del J[j_ast]\n J_comp.append(j_ast)\n\n #print(iteration, \", adversarial label =\", adv_label)\n\n iteration+=1\n\n if len(J) == 0:\n success = False\n\n return x_adv, adv_label_old\n\ndef attack(model, loader, num_images, targeted, save):\n images_perturbed = 0\n l1, l2, linf, imgs, lbls = [], [], [], [], []\n for batch_idx, (images, labels) in enumerate(loader):\n for i, (x, y) in enumerate(zip(images, labels)):\n if args.cuda:\n x = x.cuda()\n if targeted:\n target_label = random.randint(0,9)\n while target_label == y: # make sure target label isnt the predicted label\n target_label = random.randint(0,9)\n x_pert, adv_label = cw_L0_attack(model=model, x=x.unsqueeze(0), y=target_label, targeted=targeted)\n else:\n x_pert, adv_label = cw_L0_attack(model=model, x=x.unsqueeze(0), y=y, targeted=targeted)\n images_perturbed += 1\n\n l1.append(torch.norm(x - x_pert, 1).item())\n l2.append(torch.norm(x - x_pert, 2).item())\n linf.append(torch.norm(x - x_pert, float('inf')).item())\n\n imgs.append(x_pert)\n lbls.append(adv_label)\n\n if images_perturbed == num_images:\n dists = pd.DataFrame({'l1': np.array(l1),\n 'l2': np.array(l2),\n 'linf': np.array(linf)})\n if save:\n imgs = {'perturbed': np.array(imgs),\n 'labels': np.array(lbls)}\n else:\n imgs = None\n return dists, imgs\n","sub_path":"cwL0attack_oneimg.py","file_name":"cwL0attack_oneimg.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117179492","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import User\nfrom events.models import EventCategory, Event \nfrom events.views import EventListView\nfrom .forms import CreateUserForm, LoginForm\n\n\n\n\n\n\n@login_required(login_url='login')\ndef dashboard(request):\n user = User.objects.count()\n event_ctg = EventCategory.objects.count()\n event = Event.objects.count()\n complete_event = Event.objects.filter(status='completed').count()\n events = Event.objects.all()\n context = {\n 'user': user,\n 'event_ctg': event_ctg,\n 'event': event,\n 'complete_event': complete_event,\n 'events': events\n }\n return render(request, 'dashboard.html', context)\n\n\n\ndef registerPage(request):\n if request.user.is_authenticated:\n return redirect('home')\n else:\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user=form.save()\n username = form.cleaned_data.get('username')\n groupe=Group.objects.get(name='part')\n user.groups.add(groupe)\n return redirect('login')\n context = {'form': form}\n return render(request, 'register.html', context)\n\ndef homepart_page(request):\n model_event = Event.objects.all().values()\n context = {\n 'events': model_event,\n \n }\n \n return render(request, 'homepart.html',context)\n\ndef login_page(request):\n forms = LoginForm()\n if request.method == 'POST':\n forms = LoginForm(request.POST)\n if forms.is_valid():\n username = forms.cleaned_data['username']\n password = forms.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user.is_superuser:\n return redirect('http://127.0.0.1:8000/admin/')\n elif user.groups.filter(name='part').exists() :\n login(request, user)\n return redirect('homepart')\n elif user.groups.filter(name='org').exists() :\n login(request, user)\n return redirect('dashboard')\n \n context = {\n 'form': forms\n }\n return render(request, 'login.html', context)\n\ndef logut_page(request):\n logout(request)\n return redirect('login')","sub_path":"Eventize/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382293655","text":"from typing import List\n\nfrom loguru import logger\n\nfrom uzen.models.matches import Match\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.matches import MatchResult\nfrom uzen.services.rule_matcher import RuleMatcher\nfrom uzen.tasks import AbstractTask\n\n\nclass MatchinbgTask(AbstractTask):\n def __init__(self, snapshot: Snapshot):\n self.snapshot = snapshot\n\n async def _process(self):\n logger.debug(\"Start matching job...\")\n\n snapshot_ = await Snapshot.get(id=self.snapshot.id).prefetch_related(\"_scripts\")\n matcher = RuleMatcher(snapshot_)\n results: List[MatchResult] = await matcher.scan()\n\n matches = [\n Match(\n snapshot_id=self.snapshot.id,\n rule_id=result.rule_id,\n script_id=result.script_id,\n matches=[match.dict() for match in result.matches],\n )\n for result in results\n ]\n await Match.bulk_create(matches)\n\n logger.debug(f\"Snapshot {self.snapshot.id} matches with {len(matches)} rule(s)\")\n logger.debug(\"Matching job is finished\")\n\n @classmethod\n async def process(cls, snapshot: Snapshot):\n instance = cls(snapshot)\n return await instance._process()\n","sub_path":"uzen/tasks/matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42376079","text":"# Copyright: 2013 Bastian Blank \n# License: BSD 2-clause, see LICENSE for details.\n\nfrom pygments.lexers import get_all_lexers\n\nfrom . import blueprint\n\n\n@blueprint.route('/')\ndef index():\n from flask import render_template\n contenttypes = []\n for lexer_info in get_all_lexers():\n contenttypes.extend(lexer_info[3])\n return render_template('index.html',\n contenttypes=contenttypes)\n","sub_path":"bepasty/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244271618","text":"# Time: O(nlogn)\n# Space: O(1)\n\n# 910\n# Given an array A of integers, for each integer A[i]\n# we need to choose either x = -K or x = K, and add x to A[i] (only once).\n#\n# After this process, we have some array B.\n#\n# Return the smallest possible difference\n# between the maximum value of B and the minimum value of B.\n#\n# Example 1:\n#\n# Input: A = [1], K = 0\n# Output: 0\n# Explanation: B = [1]\n# Example 2:\n#\n# Input: A = [0,10], K = 2\n# Output: 6\n# Explanation: B = [2,8]\n# Example 3:\n#\n# Input: A = [1,3,6], K = 3\n# Output: 3\n# Explanation: B = [4,6,3]\n#\n# Note:\n# - 1 <= A.length <= 10000\n# - 0 <= A[i] <= 10000\n# - 0 <= K <= 10000\n\n# Solution:\n# Intuition is increase smaller A[i] (go up) and decrease larger A[i] (go down).\n# Formalize this concept: if A[i] < A[j], we don't need to consider when A[i] goes down\n# while A[j] goes up. Because the interval (A[i]+K, A[j]-K) is a subset of (A[i]-K, A[j]+K).\n#\n# For sorted A, *say A[i] is the largest i that goes up.* We don't care A[j]-K for 0<=j 0:\n qtdcam = len(self.filaCaminhoes)\n self.numeroentidades.append(qtdcam) #quantidade de caminhoes na fila naquele momento\n caminhao = self.filaCaminhoes.popleft()\n caminhao.adicionaTempoAtendimentoCarregamento(tempoatendimento) #Ja calcula tempo na fila\n tempocarga = self.calc.calcula(self.funcao, self.args)\n temposaida = tempoatendimento + tempocarga\n caminhao.adicionaTempoSaidaCarregamento(temposaida) #Ja calcula o tempo de carga junto\n self.tempoOcupacao += tempocarga\n self.qtdCarregados += 1\n self.proxCarregamento = temposaida\n return caminhao\n else:\n print('Nao ha caminhoes na fila para carregar')\n return None\n\n def getMenorNumeroEntidadeNaFila(self):\n \"\"\"\n Busca e retorna o menor numero de entidades na fila registrado.\n @return menor: menor numero de entidades na fila registrado ate o momento.\n \"\"\"\n menor = 0\n if len(self.numeroentidades) > 0:\n menor = min(self.numeroentidades)\n return menor\n\n def getMedioNumeroEntidadeNaFila(self):\n \"\"\"\n Busca e retorna tamanho medio de entidades na fila registrado ate o momento.\n @return menor: tamanho medio de entidades na fila registrado ate o momento.\n \"\"\"\n medio = 0\n if len(self.numeroentidades) > 0:\n medio = sum(self.numeroentidades)/len(self.numeroentidades)\n return medio\n\n def getMaiorNumeroEntidadeNaFila(self):\n \"\"\"\n Busca e retorna o maior numero de entidades na fila registrado.\n @return menor: maior numero de entidades na fila registrado ate o momento.\n \"\"\"\n maior = 0\n if len(self.numeroentidades) > 0:\n maior = max(self.numeroentidades)\n return maior\n\n def getTempoOcupacao(self):\n \"\"\"\n Busca o tempo de ocupacao do recurso.\n @return tempo: tempo de ocupacao do recurso.\n \"\"\"\n tempo = self.tempoOcupacao\n return tempo\n\n def getQtdCarregados(self):\n \"\"\"\n Busca quantos caminhoes foram carregados\n @return qtd: quantidade de caminhoes ja carregados.\n \"\"\"\n qtd = self.qtdCarregados\n return qtd\n\n def getProxCarregamento(self):\n \"\"\"\n Retorna o proximo tempo disponivel livre para carregar.\n @return tempo: proximo tempo disponivel livre para carregar.\n \"\"\"\n tempo = self.proxCarregamento\n return tempo\n\n def adicionaCaminhao(self, cam):\n \"\"\"\n Adiciona o caminhao cam a lista de caminhoes esperando pra serem carregados.\n @param cam: caminhao a ser adicionado na lista.\n \"\"\"\n self.filaCaminhoes.append(cam)\n return\n","sub_path":"src/carregador.py","file_name":"carregador.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653333803","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport util\nimport pandas as pd\n\n\ndef plot_key(key, results_path, fig_path):\n fig, axes = plt.subplots(4, 5, sharey=True, sharex=True)\n\n means = pd.read_csv(\n results_path.joinpath(\"theta_accuracy_stimulus.csv\"))\\\n .set_index(['stimulus', 'flipped', 'model'])\\\n .groupby(level='model').get_group(key)\n\n for i, (stim, sdf) in enumerate(means.groupby(level='stimulus')):\n ax = axes.flat[i]\n\n for flipped, stats in sdf.groupby(level='flipped'):\n lower = stats['median'] - stats['lower']\n upper = stats['upper'] - stats['median']\n ax.errorbar(\n stats['modtheta'], stats['median'],\n yerr=[lower, upper],\n label=flipped, lw=3)\n\n ax.set_xlim(-10, 190)\n ax.set_xticks([0, 60, 120, 180])\n ax.set_xlabel(\"Rotation\")\n util.clear_right(ax)\n util.clear_top(ax)\n util.outward_ticks(ax)\n ax.set_title(\"Stim %s\" % stim)\n\n fig.set_figheight(8)\n fig.set_figwidth(10)\n\n plt.draw()\n plt.tight_layout()\n\n pths = [fig_path.joinpath(\"accuracy_stimuli_%s.%s\" % (key, ext))\n for ext in ('png', 'pdf')]\n for pth in pths:\n util.save(pth, close=False)\n return pths\n\n\ndef plot(results_path, fig_path):\n pths = []\n for key in ['exp', 'hc', 'bq', 'bqp']:\n pths.extend(plot_key(key, results_path, fig_path))\n return pths\n\n\nif __name__ == \"__main__\":\n util.make_plot(plot)\n","sub_path":"analysis/plots/accuracy_stimuli.py","file_name":"accuracy_stimuli.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421486665","text":"from datetime import date, datetime\n\nfrom .converters import serialize_value\nfrom .exceptions import MissingConverterError\n\n\ndef py2xml(obj, root=True, root_name='object'):\n \"\"\"Renders an ojbect as XML.\n\n :param bool root: Boolean value indicating if the obj is the XML root.\n :param str root_name: XML tag name.\n :raises MissingConverterError: if no converter is found for the object.\n \"\"\"\n if isinstance(obj, dict):\n return dict2xml(obj, root, root_name)\n elif isinstance(obj, (list, tuple)):\n return list2xml(obj, root, root_name)\n elif isinstance(obj, (int, float, basestring, datetime, date,\n type(None))):\n return convert_property(obj, root_name)\n\n raise MissingConverterError(\n 'No converter for type: %s' % type(obj).__name__\n )\n\n\ndef dict2xml(obj, root=True, root_name='object'):\n \"\"\"Renders a dict object to XML.\n\n :param dict obj: Dict object.\n :param bool root: Boolean value indicating if the obj is the XML root.\n :param str root_name: XML tag name.\n \"\"\"\n output = []\n addline = output.append\n\n if root is True:\n addline('')\n\n addline('<%s type=\"dict\">' % root_name)\n for name, value in obj.items():\n addline(py2xml(value, root=False, root_name=name))\n addline('' % root_name)\n\n return '\\n'.join(output)\n\n\ndef list2xml(obj, root=True, root_name='object'):\n \"\"\"Renders a list object to XML.\n\n :param list obj: List object.\n :param bool root: Boolean value indicating if the obj is the XML root.\n :param str root_name: XML tag name.\n \"\"\"\n output = []\n addline = output.append\n\n if root is True:\n addline('')\n\n addline('<%s type=\"list\">' % root_name)\n for value in obj:\n addline(py2xml(value, root=False, root_name='element'))\n addline('' % root_name)\n\n return '\\n'.join(output)\n\n\ndef convert_property(obj, root_name='property'):\n \"\"\"Renders any base type objects to XML.\n\n :param obj: Object to be rendered as xml.\n :type obj: int, float, basestring, datetime, date or None\n :param str root_name: XML tag name.\n \"\"\"\n value = serialize_value(obj)\n xml = '<{name} type=\"{type}\">{value}'.format(\n name=root_name,\n type=obj.__class__.__name__,\n value=xml_escape(value),\n )\n return xml\n\n\ndef xml_escape(value):\n \"\"\"Creates a XML safe value. Useful for richtext (html) values.\"\"\"\n value = value.replace('&', '&')\n value = value.replace('\"', '"')\n value = value.replace('\\'', ''')\n value = value.replace('<', '<')\n value = value.replace('>', '>')\n return value\n\n__all__ = ['py2xml', 'dict2xml', 'list2xml', 'convert_property']\n","sub_path":"py2xml/to_xml.py","file_name":"to_xml.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327905470","text":"from flask import current_app, g\nfrom flask.cli import with_appcontext\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndef getDatabaseConnection(databaseString):\n \"\"\"Attempt connection to the database\"\"\"\n \n sqlsession = None\n\n try:\n sqlengine = sqlalchemy.create_engine(databaseString)\n SQLSession = sessionmaker(bind=sqlengine)\n sqlsession = SQLSession()\n\n print(\"Connection to \" + databaseString + \" successfull\")\n logging.info(\"Connection to \" + databaseString + \" successfull\")\n except Exception as e:\n logging.error(\"Error in connection to the database\")\n logging.error(traceback.format_exc())\n print(\"Error in connection to the database\")\n\n return sqlsession\n\n\ndef get_db():\n if 'db' not in g:\n\n #connectionString = current_app.config['SQLALCHEMY_DATABASE_URI']\n #g.db = getDatabaseConnection(connectionString)\n\n g.db = SQLAlchemy(current_app)\n\n '''\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n '''\n\n return g.db\n\n\ndef close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()\n\n\ndef init_app(app):\n app.teardown_appcontext(close_db)\n g.db = SQLAlchemy(app)\n #app.cli.add_command(get_db)","sub_path":"code/webServiceTest/iHvac/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476606501","text":"from pymongo import MongoClient\nimport re \n\n#client = MongoClient(config.mongoConfigs['host'], config.mongoConfigs['port'], j=True)\n#db = client[config.mongoConfigs['db']] \n\nclient = MongoClient('mongodb://10.237.26.159:27017/')\ndb = client['eventwise_media-db'] \n\ndef modifyName(entity):\n titles=[]\n titlePattern= '(Mr\\.?|Ms\\.?|Mrs\\.?|Dr\\.?|Prof\\.?|prof\\.?|Sir|Shri)\\s+'\n wordPattern='.*(Chairman|Principal|General|Commissioner|Minister|Ambassador|Superintendent|Commander|Justice|Speaker|Professor|President|Mayor|Inspector|Police|Officer|Collector|(t|T)rustee|Dean|Magistrate|Director|Secretary|Judge|Engineer|Governor|Chancellor|Vice-Chancellor)(\\s+of)?'\n compiledTitle=re.compile(titlePattern)\n compiledWord=re.compile(wordPattern)\n\n name=entity['stdName']\n matchedTitle=compiledTitle.match(name)\n \n if matchedTitle:\n t=matchedTitle.group(0)\n name=name.replace(t,'')\n \n title=''\n m=compiledWord.match(name)\n \n if m:\n title=m.group(0)\n name=name.replace(title,'')\n \n entity['stdName']=name.strip()\n \n if not (title.strip()=='' or (title in titles)):\n titles.append(title)\n \n #clean aliases\n for i in range(len(entity['aliases'])):\n name=entity['aliases'][i]\n matchedTitle=compiledTitle.match(name)\n if matchedTitle:\n t=matchedTitle.group(0)\n name=name.replace(t,'')\n \n title=''\n m=compiledWord.match(name)\n \n if m:\n title=m.group(0)\n name=name.replace(title,'')\n \n entity['aliases'][i]=name.strip()\n \n if not (title.strip()=='' or (title in titles)):\n titles.append(title)\n\n \n if titles and not entity.get('title'):\n entity['title']=[]\n \n for title in titles:\n entity['title'].append({'text':title,'articleIds':[entity['articleIds'][0]]})\n\ndef extractPersonEntity(article,matchedEntity,entityJson):\n modifyName(entityJson)\n \n articleText=article['text']\n articleId=article['_id']\n \n enText=entityJson['stdName'] #assumption that the stdName contains the name extracted from the first line and cleaned by modifyName method\n enText=enText.replace('(','')\n enText=enText.replace(')','')\n \n #add associated entities with count \n assEn=filter(lambda x:x['type'] in ['Person','Company','Organization'] and x['name']!=matchedEntity['name'],article['entities'])\n shrtAssEn=[{'text':a['name'],'count':1} for a in assEn]\n entityJson['associatedEntities']=shrtAssEn\n\n #title\n leftTitlePattern='(([A-Z]([\\w\\.\\-]*)(\\s+))((([A-Z]([\\w\\.\\-(),]*))|(for|of|the|and|an|a))(\\s+))*?)(,\\s+)?'+enText\n rightPattern=enText+',(([\\s]+)([A-Z]([\\w\\.]*)(\\s+))((([A-Z]([\\w\\.\\-,]*))|(for|of|the|and|an|at|a))(\\s*))+)'\n weakLeftPattern='(said\\s+([\\w,\\-().]+\\s+){1,5}'+enText+')'\n tenWords='([\\w,\\-.()]+\\s+){0,5}'+enText+'\\s*([\\w,\\-.()]+\\s*){0,5}'\n \n m = re.search(leftTitlePattern,articleText) or re.search(rightPattern,articleText) or re.search(weakLeftPattern,articleText) or re.search(tenWords,articleText)\n \n title=m.group(0).strip() if m else None\n if title:\n if not entityJson.get('title'):\n entityJson['title']=[]\n \n tFound=False\n for t in entityJson.get('title'):\n if title == t['text'].strip():\n tFound=True\n break\n \n if not tFound:\n entityJson['title'].append({\n 'text':title,\n 'articleIds':[articleId]\n }\n ) \n return entityJson\n \ndef save(entityJson):\n print(\"Saving...\")\n collection = db['farmers_opinion_unresolved']\n collection.insert_one(entityJson)\n\ndef extract():\n collection = db['farmers_opinion']\n #cursor=collection.find({'$and':[{'entities':{'$exists':True}},{'extracted':{'$exists':False}}]})#.limit(1000)\n cursor=collection.find({'entities':{'$exists':True}})#.limit(1000)\n for article in cursor:\n entities=article['entities']\n articleId=article['_id']\n print(articleId)\n try:\n for matchedEntity in entities:\n if matchedEntity['type'] not in ['Person','City','Country','Continent','Company','Organization','ProvinceOrState']:\n continue\n print('processing: ',matchedEntity['name'])\n entityJson={\n 'stdName':matchedEntity['name'],\n 'type':matchedEntity['type'],\n 'aliases':matchedEntity['aliases'],\n 'articleIds':[articleId],\n 'resolved':False\n }\n if matchedEntity.get('resolutions') and matchedEntity['type']!='Person':\n entityJson['resolutions']=matchedEntity['resolutions']\n\n if matchedEntity['type']=='Person':\n extractPersonEntity(article,matchedEntity,entityJson)\n save(entityJson)\n except Exception:\n print(\"Some error!!\\n\\n\")\n continue\n #collection.update_one({'_id':articleId},{'$set':{'extracted':True}})\n cursor.close() \n \nif __name__=='__main__':\n # while(1):\n extract() \n \n client.close() \n \n","sub_path":"Entity Resolution/Mass Media to Mass Media/extract_entities_oc_aadhar_opinion.py","file_name":"extract_entities_oc_aadhar_opinion.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553359186","text":"import unittest\n\nfrom freezegun import freeze_time\nfrom rest_framework import status, test\n\nfrom waldur_core.structure.tests import factories as structure_factories\nfrom waldur_core.structure.tests import fixtures as structure_fixtures\nfrom waldur_mastermind.marketplace import models, plugins\nfrom waldur_mastermind.marketplace.tests import factories, fixtures\nfrom waldur_mastermind.marketplace.tests import utils as test_utils\n\n\nclass CustomerResourcesFilterTest(test.APITransactionTestCase):\n def setUp(self):\n self.fixture1 = structure_fixtures.ServiceFixture()\n self.customer1 = self.fixture1.customer\n self.offering = factories.OfferingFactory(customer=self.customer1)\n self.resource1 = factories.ResourceFactory(\n offering=self.offering, project=self.fixture1.project\n )\n\n self.fixture2 = structure_fixtures.ServiceFixture()\n self.customer2 = self.fixture2.customer\n\n def list_customers(self, has_resources):\n list_url = structure_factories.CustomerFactory.get_list_url()\n self.client.force_authenticate(self.fixture1.staff)\n if has_resources:\n return self.client.get(list_url, {'has_resources': has_resources}).data\n else:\n return self.client.get(list_url).data\n\n def test_list_customers_with_resources(self):\n self.assertEqual(1, len(self.list_customers(True)))\n\n def test_list_all_customers(self):\n self.assertEqual(2, len(self.list_customers(False)))\n\n\nclass ServiceProviderFilterTest(test.APITransactionTestCase):\n def setUp(self):\n self.fixture1 = structure_fixtures.ServiceFixture()\n self.service_provider1 = self.fixture1.customer\n self.offering1 = factories.OfferingFactory(customer=self.service_provider1)\n self.resource1 = factories.ResourceFactory(\n offering=self.offering1, project=self.fixture1.project\n )\n\n self.fixture2 = structure_fixtures.ServiceFixture()\n self.service_provider2 = self.fixture2.customer\n factories.OfferingFactory(customer=self.service_provider2)\n\n def list_customers(self, service_provider_uuid):\n list_url = structure_factories.CustomerFactory.get_list_url()\n self.client.force_authenticate(self.fixture1.staff)\n return self.client.get(\n list_url, {'service_provider_uuid': service_provider_uuid}\n ).data\n\n def test_list_offering_customers(self):\n customers = self.list_customers(self.service_provider1.uuid.hex)\n self.assertEqual(1, len(customers))\n self.assertEqual(customers[0]['uuid'], self.resource1.project.customer.uuid.hex)\n\n def test_list_is_empty_if_offering_does_not_have_customers(self):\n self.assertEqual(0, len(self.list_customers(self.service_provider2.uuid.hex)))\n\n def test_filter_customer_keyword(self):\n list_url = factories.ServiceProviderFactory.get_list_url()\n provider_1 = factories.ServiceProviderFactory()\n factories.ServiceProviderFactory()\n provider_1.customer.name = 'It is test_name.'\n provider_1.customer.abbreviation = 'test abbr'\n provider_1.customer.save()\n self.client.force_authenticate(self.fixture1.staff)\n\n response = self.client.get(list_url, {'customer_keyword': 'test_name'})\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(1, len(response.data))\n self.assertEqual(response.data[0]['uuid'], provider_1.uuid.hex)\n\n response = self.client.get(list_url, {'customer_keyword': 'abbr'})\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(1, len(response.data))\n self.assertEqual(response.data[0]['uuid'], provider_1.uuid.hex)\n\n\nclass ResourceFilterTest(test.APITransactionTestCase):\n def setUp(self):\n with freeze_time('2020-01-01'):\n self.fixture = structure_fixtures.UserFixture()\n self.resource_1 = factories.ResourceFactory(\n backend_metadata={\n 'external_ips': ['200.200.200.200', '200.200.200.201'],\n 'internal_ips': ['192.168.42.1', '192.168.42.2'],\n },\n backend_id='backend_id',\n )\n\n with freeze_time('2021-01-01'):\n factories.ResourceFactory(backend_id='other_backend_id')\n\n self.url = factories.ResourceFactory.get_list_url()\n\n def test_backend_id_filter(self):\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url, {'backend_id': 'backend_id'})\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['uuid'], self.resource_1.uuid.hex)\n\n def test_backend_metadata_filter(self):\n self.client.force_authenticate(self.fixture.staff)\n # check external IP lookup\n response = self.client.get(self.url, {'query': '200.200.200.200'})\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['uuid'], self.resource_1.uuid.hex)\n\n # check internal IP lookup\n response = self.client.get(self.url, {'query': '192.168.42.1'})\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['uuid'], self.resource_1.uuid.hex)\n\n def test_field_filter(self):\n self.client.force_authenticate(self.fixture.staff)\n\n response = self.client.get(self.url, {'field': ['state', 'offering']})\n self.assertTrue(all([len(fields) == 2 for fields in response.data]))\n\n def test_filter_created(self):\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url)\n self.assertEqual(len(response.data), 2)\n response = self.client.get(self.url, {'created': '2021-01-01'})\n self.assertEqual(len(response.data), 1)\n\n\nclass FilterByScopeUUIDTest(test.APITransactionTestCase):\n def setUp(self):\n plugins.manager.register(\n offering_type='TEST_TYPE',\n create_resource_processor=test_utils.TestCreateProcessor,\n )\n self.fixture = fixtures.MarketplaceFixture()\n self.fixture.offering.type = 'TEST_TYPE'\n self.fixture.offering.save()\n self.url = factories.ResourceFactory.get_list_url()\n self.scope = structure_factories.TestNewInstanceFactory()\n\n def test_scope_uuid_filter(self):\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url, {'query': self.scope.uuid.hex})\n self.assertEqual(len(response.data), 0)\n\n self.fixture.resource.scope = self.scope\n self.fixture.resource.save()\n response = self.client.get(self.url, {'query': self.scope.uuid.hex})\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['uuid'], self.fixture.resource.uuid.hex)\n\n\nclass OrderFilterTest(test.APITransactionTestCase):\n def setUp(self):\n self.fixture = fixtures.MarketplaceFixture()\n self.url = factories.OrderFactory.get_list_url()\n\n def test_order_items_type_filter_positive(self):\n user = self.fixture.staff\n self.client.force_authenticate(user)\n response = self.client.get(self.url, {'type': 'Create'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 1)\n\n def test_order_items_type_filter_negative(self):\n self.fixture.order_item.type = models.RequestTypeMixin.Types.UPDATE\n self.fixture.order_item.save()\n user = self.fixture.staff\n self.client.force_authenticate(user)\n response = self.client.get(self.url, {'type': 'Create'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 0)\n\n\nclass CategoryFilterTest(test.APITransactionTestCase):\n def setUp(self):\n self.fixture = fixtures.MarketplaceFixture()\n self.offering = self.fixture.offering\n self.offering.state = models.Offering.States.ACTIVE\n self.offering.save()\n self.category = self.offering.category\n self.customer = self.offering.customer\n self.url = factories.CategoryFactory.get_list_url()\n factories.CategoryFactory()\n\n def test_customer_uuid_filter_positive(self):\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url, {'customer_uuid': self.customer.uuid.hex})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 1)\n self.assertEqual(response.data[0]['uuid'], self.category.uuid.hex)\n self.assertEqual(response.data[0]['offering_count'], 1)\n\n def test_customer_uuid_filter_negative(self):\n new_customer = structure_factories.CustomerFactory()\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url, {'customer_uuid': new_customer.uuid.hex})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 0)\n\n @unittest.skip('Temporary disable till counters are fixed')\n def test_customer_uuid_filter_with_offering_state_positive(self):\n self.client.force_authenticate(self.fixture.staff)\n self.offering.state = 1\n self.offering.save()\n response = self.client.get(\n self.url,\n {'customer_uuid': self.customer.uuid.hex, 'customers_offerings_state': 1},\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 1)\n self.assertEqual(response.data[0]['uuid'], self.category.uuid.hex)\n self.assertEqual(response.data[0]['offering_count'], 1)\n\n def test_customer_uuid_filter_with_offering_state_negative(self):\n new_customer = structure_factories.CustomerFactory()\n self.client.force_authenticate(self.fixture.staff)\n self.offering.state = 2\n self.offering.save()\n response = self.client.get(\n self.url,\n {'customer_uuid': new_customer.uuid.hex, 'customers_offerings_state': 1},\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 0)\n\n @unittest.skip('Temporary disable till counters are fixed')\n def test_offering_count_if_shared_is_passed(self):\n factories.OfferingFactory(\n category=self.category,\n customer=self.customer,\n state=models.Offering.States.ACTIVE,\n shared=False,\n )\n url = factories.CategoryFactory.get_url(self.category)\n\n self.client.force_authenticate(self.fixture.staff)\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['offering_count'], 2)\n\n response = self.client.get(url, {'shared': True})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['offering_count'], 1)\n\n response = self.client.get(url, {'shared': False})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['offering_count'], 1)\n\n def test_category_has_shared(self):\n self.offering.shared = False\n self.offering.save()\n\n self.client.force_authenticate(self.fixture.staff)\n response = self.client.get(self.url, {'has_shared': True})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 0)\n\n self.offering.shared = True\n self.offering.save()\n\n response = self.client.get(self.url, {'has_shared': True})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n\nclass PlanComponentFilterTest(test.APITransactionTestCase):\n def setUp(self):\n self.fixture_1 = fixtures.MarketplaceFixture()\n self.fixture_2 = fixtures.MarketplaceFixture()\n self.fixture_1.offering.shared = True\n self.fixture_1.offering.state = models.Offering.States.ACTIVE\n self.fixture_1.offering.save()\n self.fixture_2.offering.shared = True\n self.fixture_2.offering.state = models.Offering.States.ACTIVE\n self.fixture_2.offering.save()\n self.url = factories.PlanComponentFactory.get_list_url()\n\n def test_offering_uuid_filter(self):\n self.client.force_authenticate(self.fixture_1.staff)\n response = self.client.get(self.url)\n self.assertEqual(len(response.json()), 2)\n response = self.client.get(\n self.url,\n {'offering_uuid': self.fixture_1.offering.uuid.hex},\n )\n self.assertEqual(len(response.json()), 1)\n","sub_path":"src/waldur_mastermind/marketplace/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":12791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"232111681","text":"import PikaStdLib\nimport STM32\n\ntime = STM32.Time()\nadc1 = STM32.ADC()\n\nadc1.init()\nadc1.setPin('PA1')\nadc1.enable()\n\nwhile True:\n val = adc1.read()\n print('adc1 value:')\n print(val)\n time.sleep_ms(500)\n \n","sub_path":"examples/ADC/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"2632189","text":"from scipy import io\nfname = \"data/aut-avn.mat\"\ncontent = io.loadmat(fname, struct_as_record=True)\nXt = content['X']\ny = content['Y']\nprint(type(Xt))\nprint(type(y))\nres = y.T @ y\nprint(res)\n\n","sub_path":"LSDA/wtf.py","file_name":"wtf.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"398566757","text":"import tensorflow as tf\nimport time\nfrom model.CNN import CNN\nfrom utils.MNISTLoader import MNISTLoader\n\n\nnum_batches = 400\nbatch_size = 50\nlearning_rate = 0.001\n\n\n@tf.function\ndef train_one_step(X, y, optimizer):\n with tf.GradientTape() as tape:\n y_pred = model(X)\n loss = tf.keras.losses.sparse_categorical_crossentropy(y_true = y, y_pred = y_pred)\n loss = tf.reduce_mean(loss)\n # 注意这里使用了 TensorFlow 内置的 tf.print(), @tf.function 不支持 Python 内置的 print 方法\n tf.print(\"loss\", loss)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(grads_and_vars = zip(grads, model.variables))\n\n\nif __name__ == \"__main__\":\n # data\n data_loader = MNISTLoader()\n # model\n model = CNN()\n # optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate = learning_rate)\n # model training\n start_time = time.time()\n \n for batch_index in range(num_batches):\n X, y = data_loader.get_batch(batch_size)\n train_one_step(X, y, optimizer)\n \n end_time = time.time()\n print(end_time - start_time)\n","sub_path":"src/src_tensorflow/tensorflow_keras_src/MNIST_tf_function.py","file_name":"MNIST_tf_function.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"62402007","text":"import json\nimport arcpy\nimport arcgis\nimport urllib\nfrom arcgis.gis import GIS\nfrom arcgis.mapping import WebMap\nimport requests, datetime, time, smtplib, urllib3,os\nfrom datetime import timedelta\n\ngis = GIS(\"https://www.arcgis.com\", \"AlishaD_ess\", \"ESRI@1994\")\ntargetMapItem = gis.content.get('ff87c91da4254455849500a72fa2dd2d')\n\npolygon_fs = arcpy.FeatureSet()\npolygon_fs.load(\"https://services.arcgis.com/Wl7Y1m92PbjtJs5n/arcgis/rest/services/Thane_Mapboun/FeatureServer/5/query?where=1%3D1&outFields=*&returnGeometry=true&f=json\")\n\n\nbase_URL = \"https://services.arcgis.com/Wl7Y1m92PbjtJs5n/arcgis/rest/services/Thane_Mapboun/FeatureServer/replace\"\nlayer_list=[0,1,2,3,4]\nfor x in layer_list:\n point_url= urllib.parse.urljoin(base_URL,str(x)+'/query?where=1%3D1&outFields=*&returnGeometry=true&f=json')\n point_fs = arcpy.FeatureSet()\n point_fs.load(point_url)\n print(point_url)\n\n names=[]\n ward_nums=[]\n Email=[]\n with arcpy.da.SearchCursor(polygon_fs,['SHAPE@','Ward_Name','Ward_No','Email']) as cursor1:\n for row in cursor1:\n row1=row[0]\n name=row[1]\n num=row[2]\n Email.append(row[3])\n with arcpy.da.SearchCursor(point_fs,['SHAPE@']) as cursor2:\n for row in cursor2:\n if row[0].within(row1):\n names.append(name)\n ward_nums.append(num)\n break\n fs_animals= targetMapItem.layers[int(x)]\n test=[]\n fl_animals=fs_animals.query()\n all_features = fl_animals.features\n print(names)\n print(ward_nums)\n for a,b,c in zip(all_features,names,ward_nums):\n a.attributes['Ward_name']=b\n a.attributes['Ward_num']=c\n test.append(a)\n fs_animals.edit_features(updates= test)\n uniqueID = 'OBJECTID' # i.e. OBJECTID\n dateField = 'CreationDate' # Date field to query\n ward='Ward_name'\n #Email='Email'\n hoursValue = 3 # Number of hours to check when a feature was added\n\n # Create empty list for uniqueIDs\n oidList = []\n for name1 in names:\n query= \"Ward_name=\"+\"\\'\"+str(name1)+\"\\'\"\n # Send the POST request:\n URL= urllib.parse.urljoin(base_URL,str(x)+'/query')\n payload = {'f': 'pjson', 'where': query, 'outfields' : '{0},{1},{2}'.format(uniqueID, dateField,ward), 'returnGeometry' : 'false'}\n data=requests.get(URL,params=payload).json()\n # Loop through the features in the extracted JSON:\n for feat in data['features']:\n createDate = feat['attributes'][dateField]\n createDate = int(str(createDate)[0:-3])\n t = datetime.datetime.now() - timedelta(hours=hoursValue)\n t = time.mktime(t.timetuple())\n if createDate > t:\n oidList.append(feat['attributes'][uniqueID])\n print(oidList)\n continue\n # Email Info to send the email\n # Set the email ID:\n for email in Email:\n print(email)\n fromEmail = 'thanamunicipalcorp@gmail.com' # Email sender\n toEmail= email\n smtpServer = 'smtp.gmail.com'\n portNumber = 465 # SMTP Server port\n FROM = fromEmail\n TO = [toEmail]\n SUBJECT = 'New Issue Reported'\n TEXT = \"\\n Hello, \\n \\n New issues with numbers {0} were reported for your department. \\n \\n Regrads, \\n Muncipality Team\".format(oidList)\n message = \"\"\"From: %s\nTo: %s\nSubject: %s\n\n%s\n\"\"\" % (FROM, \", \".join(TO), SUBJECT, TEXT)\n gmail_user='abc@gmail.com' #Your emailid\n gmail_password='P@ssword' #your password\n # # If new features exist, send email\n if len(oidList) > 0:\n smtpObj = smtplib.SMTP_SSL(host=smtpServer, port=portNumber)\n smtpObj.login(gmail_user, gmail_password)\n smtpObj.sendmail(FROM, TO, message)\n print(\"Successfully sent email\" )\n smtpObj.quit()\n break\n else:\n print(\"No email sent\")\n break\n test.clear()\n names.clear()\n oidList.clear()\n","sub_path":"SpatialQueryAndSendEmail/ForWardens.py","file_name":"ForWardens.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406743669","text":"import sys\nimport json\n\ndef straightDistance(x1,y1,x2,y2):\n distance = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return distance\n\ndef taxicabDistance(xA,yA,xB,yB):\n distance = abs(xA-xB) + abs(yA-yB)\n return distance\n\ndef mykwargs(argv):\n '''\n Processes argv list into plain args and kwargs.\n Just easier than using a library like argparse for small things.\n Example:\n python file.py arg1 arg2 arg3=val1 arg4=val2 -arg5 -arg6 --arg7\n Would create:\n args[arg1, arg2, -arg5, -arg6, --arg7]\n kargs{arg3 : val1, arg4 : val2}\n\n Params with dashes (flags) can now be processed seperately\n Shortfalls:\n spaces between k=v would result in bad params\n Returns:\n tuple (args,kargs)\n '''\n args = []\n kargs = {}\n\n for arg in argv:\n if '=' in arg:\n key,val = arg.split('=')\n kargs[key] = val\n else:\n args.append(arg)\n return args,kargs\n\ndef load_colors(infile):\n with open(infile,'r') as f:\n data = f.read()\n colors = json.loads(data)\n return colors\n","sub_path":"Assignments/P01.1/helper_module.py","file_name":"helper_module.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542098925","text":"# -*- coding: utf-8 -*-\nimport datetime\n\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.crypto import get_random_string\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.hashers import make_password\nfrom tools.fields import EncryptedCharField\n\n\n@python_2_unicode_compatible\nclass AppToken(models.Model):\n \"\"\"\n The app authorization token model.\n \"\"\"\n key = models.CharField(_(\"Key\"), max_length=255, blank=True, )\n app = models.CharField(_(\"App\"), max_length=200)\n created = models.DateTimeField(_(\"Created\"), auto_now_add=True)\n expire_time = models.DateTimeField(_(\"Expire time\"))\n\n class Meta:\n abstract = 'frame.app_authentication.apptoken' not in settings.INSTALLED_APPS\n verbose_name = _(\"AppToken\")\n verbose_name_plural = _(\"AppTokens\")\n\n def save(self, *args, **kwargs):\n if not self.key:\n self.key = self.generate_key()\n self.expire_time = timezone.now() + datetime.timedelta(seconds=settings.APP_TOKEN_EXPIRE)\n else:\n if self.expire_time < timezone.now():\n self.key = self.generate_key()\n self.expire_time = timezone.now() + datetime.timedelta(seconds=settings.APP_TOKEN_EXPIRE)\n\n return super(AppToken, self).save(*args, **kwargs)\n\n\n def generate_key(self):\n return get_random_string(length=40)\n\n def __str__(self):\n return self.key","sub_path":"WiseEyeIAMService/frame/app_authentication/apptoken.py","file_name":"apptoken.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190676527","text":"# -*- coding: utf-8 -*-\r\nimport datetime\r\nimport json\r\nimport time\r\nimport traceback\r\nfrom tkinter import *\r\nimport urllib\r\n#提交时的响应事件\r\nfrom tkinter.ttk import Combobox\r\nfrom urllib.parse import urlparse\r\nimport tkinter\r\n\r\nimport requests\r\n\r\nfrom TcpData.MyPyMysql import MyPyMysql\r\nfrom TcpData.StunumInfo import StunumInfo\r\nfrom Tools.TimeTools import get_time_delta, strTimeToDatetime, datetime_2string, generate_timestamp, getPreMintime, \\\r\n string_2datetime, strA_2strB\r\n\r\n#日志照片编号\r\n\r\nphotos = []\r\n\r\n#事件\r\ndef btn_setSQL(e):\r\n ip = e_ip.get()\r\n port = int(e_port.get())\r\n username = e_name.get()\r\n password = e_pwd.get()\r\n db = e_sql.get()\r\n #插入数据的条数\r\n try:\r\n st = MyPyMysql(ip, port, username, password, db,get_stunum()) # 实例化类,传入必要参数\r\n # st = MyPyMysql('192.168.1.186', 3306, 'root', '123456', 'jp_test',get_stunum()) # 实例化类,传入必要参数\r\n #插入数据的条数\r\n result = st.counts\r\n global photos\r\n photos = st.imgs\r\n except Exception as e:\r\n #将结果输出(更新)到文本域\r\n text_result.delete(0.0, END)\r\n text_result.insert(1.0, traceback.format_exc())\r\n else:\r\n #将结果输出(更新)到文本域\r\n text_result.delete(0.0, END)\r\n text_result.insert(1.0, result)\r\n text_result.insert(2.0, photos)\r\n\r\n#日志请求\r\ndef btn_submitOp(e):\r\n\r\n #拼接url\r\n url = get_postUrl(\"classrecord\")\r\n\r\n\r\n #拼接json\r\n stu = get_stunum()\r\n stu = stu.__dict__\r\n stu [\"recnum\"] = str(e_recnum.get())\r\n stu [\"duration\"] = e_duration.get()\r\n del stu['time']\r\n re_data = {\"mileage\": 80,\r\n \"total\": 10,\r\n \"part1\": 1,\r\n \"part2\": 2,\r\n \"part3\": 3,\r\n \"part4\": 4,\r\n \"platformvalidtime\": 100\r\n }\r\n stu.update(re_data)\r\n if stu[\"subjcode\"][0] == 2 or stu[\"subjcode\"][0] == 4:\r\n del stu['carnum']\r\n del stu['simunum']\r\n if stu[\"subjcode\"][0] == 3:\r\n del stu['carnum']\r\n if len(photos) == 0:\r\n btn_setSQL(e)\r\n if len(photos) == 1:\r\n stu[\"photo1\"] = stu[\"photo3\"] = stu[\"photo2\"] = str(photos[0])\r\n if len(photos) != 1 and len(photos) != 0:\r\n stu[\"photo1\"] = stu[\"simunum\"]+str(photos[0])\r\n stu[\"photo3\"] = stu[\"simunum\"]+str(photos[-1])\r\n stu[\"photo2\"] = stu[\"simunum\"]+str(photos[1])\r\n data=json.dumps(stu,ensure_ascii=False,indent=2)\r\n send_post(url,data)\r\n\r\n#阶段请求\r\ndef btn_submitTra(e):\r\n\r\n #拼接url\r\n url = get_postUrl(\"stagetrainningtime\")\r\n tra_record_list = {}\r\n inscode = entry_inscode.get()\r\n subject = cvJ.get()\r\n stunum = entry_stunum.get()\r\n\r\n tra_record_list[\"inscode\"] = str(inscode)\r\n tra_record_list[\"subject\"] = str(subject)\r\n tra_record_list[\"stunum\"] = str(stunum)\r\n re_data = {\"duration\": 201,\r\n \"examresult\": \"70\",\r\n \"mileage\": 80,\r\n \"pdfid\": 9,\r\n \"esignature\":\"esignature\",\r\n \"totaltime\": 200,\r\n \"vehicletime\": 40,\r\n \"classtime\": 30,\r\n \"simulatortime\": 20,\r\n \"networktime\": 10,\r\n \"rectype\": 4,\r\n \"commitFlag\": \"2\",\r\n \"recarray\": [{\r\n \"rnum\": \"123456789012345600005\"\r\n }]\r\n }\r\n tra_record_list.update(re_data)\r\n data=json.dumps(tra_record_list,ensure_ascii=False,indent=2)\r\n send_post(url,data)\r\n\r\n#结业请求\r\ndef btn_submitGra(e):\r\n graduation_list = {}\r\n #拼接url\r\n url = get_postUrl(\"graduation\")\r\n autinscode = entry_inscode.get()\r\n stunum = entry_stunum.get()\r\n graduation_list[\"gracertnum\"] = str(stunum)\r\n graduation_list[\"stunum\"] = str(stunum)\r\n graduation_list[\"autinscode\"] = str(autinscode)\r\n re_data = {\"grantdate\":\"20200110\",\"pdfid\":3,\"esignature\":\"esignature\"}\r\n graduation_list.update(re_data)\r\n data=json.dumps(graduation_list,ensure_ascii=False,indent=2)\r\n send_post(url,data)\r\n\r\ndef get_stunum():\r\n inscode = entry_inscode.get()\r\n stunum = entry_stunum.get()\r\n starttime = e_startDay.get()\r\n endtime = e_endDay.get()\r\n coachnum = entry_coachnum.get()\r\n carnum = entry_carnum.get()\r\n simunum = entry_devnum.get()\r\n subjcode = cv.get()\r\n time = e_time.get()\r\n latitude = e_lat.get()\r\n longitude = e_longitude.get()\r\n platnum = e_platnum.get()\r\n is_theory_image = 0\r\n #非实车是否有分钟学时及过程照片,buss_theory_image,\r\n if cv_devlevel.get() == \"非实车有\":\r\n is_theory_image = 1\r\n return StunumInfo(inscode,stunum,starttime,endtime,coachnum,carnum,simunum,subjcode,time,latitude,longitude,platnum,is_theory_image)\r\n\r\n#拼接url\r\ndef get_postUrl(part):\r\n url = \"?v=1.0.0.e1&ts=1477034912021&sign=6117D6F0C2A2237AE80FB0A8BA090D3CB39BDB81C10587FD443E9F1C818D0ACF7B85EA24D935D005081D4F1F10E4B7D3F1ED49F827E743F69ADE9A6F582C910AC577D81C1176AF318A0AFAF2C858095AF675D6D5836D7D0A881DC5A45C8367D70D9DB38C18BD6CA81E99B469EB40CA91D5A4AF9BB257590FCAFF643F58E95A8BD5C1EACD497D28E0CEC45E195B5BC15B1F30E553C206FC30C732F904BBF7B8F56BF31DA429E85595E90182B8B8AC072DE0A9ABCE4B6DB174D0FC7AF6CA631CF968983A89C565E626FD5F653B0E491CA11439422BA82C5020FD21381F9334E74FE1B2C867476BBEC0E32C03D31CE488EA28D484DE1DC608E6EDBA11C856A861BA&user=\"\r\n e_dzurl_data = str(e_dzurl.get())+ part\r\n e_user_data = str(e_user.get())\r\n return e_dzurl_data + url + e_user_data\r\n\r\n#发送post请求\r\ndef send_post(url,data):\r\n headers = {'content-type': 'application/json'}\r\n try:\r\n r = requests.post(url=url.encode(\"utf-8\"),data=data,timeout=60,headers = headers)\r\n r.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n except requests.RequestException as e:\r\n text_result.delete(0.0, END)\r\n text_result.insert(1.0, e)\r\n else:\r\n #将结果输出(更新)到文本域\r\n result =r.text +\"\\n\" + url + \"\\n\"+data+\"\\n\"\r\n text_result.delete(0.0, END)\r\n text_result.insert(1.0, result)\r\n#####创建窗口#####\r\nwin = Tk()\r\nwin.title(\"通用请求\")\r\nwin.geometry('800x500')\r\n\r\n#####创建控件#####\r\n#第一行 inscode\r\nlbl_inscode = Label(win, text=\"inscode:\")\r\nlbl_stunum = Label(win, text=\"stunum:\")\r\nlbl_coachnum = Label(win, text=\"coachnum:\")\r\nlbl_carnum = Label(win, text=\"carnum:\")\r\n\r\nentry_inscode = Entry(win, width=18)\r\nentry_stunum = Entry(win, width=18)\r\nentry_coachnum = Entry(win, width=18)\r\nentry_carnum = Entry(win, width=18)\r\n\r\nlbl_inscode.grid(row=0, column=0, sticky=W, pady=5, padx=5)\r\nentry_inscode.grid(row=0, column=1, sticky=W)\r\nlbl_stunum.grid(row=0, column=2, sticky=W, pady=5, padx=5)\r\nentry_stunum.grid(row=0, column=3, sticky=W)\r\nlbl_coachnum.grid(row=0, column=4, sticky=W, pady=5, padx=5)\r\nentry_coachnum.grid(row=0, column=5, sticky=W)\r\nlbl_carnum.grid(row=0, column=6, sticky=W, pady=5, padx=5)\r\nentry_carnum.grid(row=0, column=7, sticky=W)\r\n\r\n\r\n#第2行 日期控件\r\n\r\ne_startDay = tkinter.Variable()\r\ne_endDay = tkinter.Variable()\r\nlbl_startDay = Label(win, text=\"starttime:\")\r\nlbl_endDay = Label(win, text=\"endtime:\")\r\nentry_startDay = Entry(win,textvariable=e_startDay, width=18)\r\nentry_endDay = Entry(win,textvariable=e_endDay, width=18)\r\nlbl_startDay.grid(row=1, column=0, sticky=W, pady=5, padx=5)\r\nentry_startDay.grid(row=1, column=1, sticky=W)\r\nlbl_endDay.grid(row=1, column=2, sticky=W, pady=5, padx=5)\r\nentry_endDay.grid(row=1, column=3, sticky=W)\r\ndt = datetime.datetime.now()\r\ne_endDayT = datetime_2string(dt=dt,fmt='%Y%m%d%H%M%S')\r\ne_startDayT = getPreMintime(dt,60)\r\ne_startDay.set(strA_2strB(e_startDayT))\r\ne_endDay.set(e_endDayT)\r\n\r\ne_duration = tkinter.Variable()\r\nlbl_duration = Label(win, text=\"duration:\")\r\nentry_duration = Entry(win,textvariable=e_duration,width=18)\r\nlbl_duration.grid(row=1, column=4, sticky=W, pady=5, padx=5)\r\nentry_duration.grid(row=1, column=5, sticky=W)\r\ne_duration.set(\"60\")\r\n\r\nlbl_devnum = Label(win, text=\"simunum:\")\r\nentry_devnum = Entry(win, width=18)\r\nlbl_devnum.grid(row=1, column=6, sticky=W, pady=5, padx=5)\r\nentry_devnum.grid(row=1, column=7, sticky=W)\r\n\r\n\r\n#第3行 下拉选择框\r\nlbl_subjcode = Label(win, text=\"subjcode:\")\r\n# 绑定变量\r\ncv = tkinter.StringVar()\r\n\r\ncom = Combobox(win, textvariable=cv)\r\n# 设置下拉数据\r\ncom[\"value\"] = (\"2211360000\",\"4211360000\",\"3212360000\",\"1212360000\",\"3213360000\",\"1213360000\",\"2214360000\",\"4214360000\")\r\n\r\n# 设置默认值\r\ncom.current(0)\r\n# 绑定事件\r\ndef func(event):\r\n # print(com.get())\r\n print(cv.get())\r\ncom.bind(\"<>\", func)\r\nlbl_subjcode.grid(row=2, column=0, sticky=W, pady=5, padx=5)\r\ncom.grid(row=2, column=1, sticky=W)\r\n#图片间隔时间\r\ne_time = tkinter.Variable()\r\nlbl_time = Label(win, text=\"照片间隔:\")\r\nentry_time = Entry(win,textvariable=e_time, width=18)\r\nlbl_time.grid(row=2, column=2, sticky=W, pady=5, padx=5)\r\nentry_time.grid(row=2, column=3, sticky=W)\r\ne_time.set(\"15\")\r\n\r\n#纬度\r\ne_lat = tkinter.Variable()\r\nlbl_lat = Label(win, text=\"lat:\")\r\nentry_lat = Entry(win,textvariable=e_lat, width=18)\r\nlbl_lat.grid(row=2, column=4, sticky=W, pady=5, padx=5)\r\nentry_lat.grid(row=2, column=5, sticky=W)\r\ne_lat.set(\"28.264879\")\r\n\r\n#经度\r\ne_longitude = tkinter.Variable()\r\nlbl_longitude = Label(win, text=\"longitude:\")\r\nentry_longitude = Entry(win,textvariable=e_longitude, width=18)\r\nlbl_longitude.grid(row=2, column=6, sticky=W, pady=5, padx=5)\r\nentry_longitude.grid(row=2, column=7, sticky=W)\r\ne_longitude.set(\"117.170653\")\r\n\r\n\r\n#第4行 user\r\nlbl_user = Label(win, text=\"user:\")\r\nlbl_user.grid(row=3, column=0, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_user = tkinter.Variable()\r\nentry_user = Entry(win, textvariable=e_user,width=18)\r\nentry_user.grid(row=3, column=1, sticky=W)\r\ne_user.set(\"1598BAE7757\")\r\n\r\n#第4行 platnum\r\nlbl_platnum = Label(win, text=\"platnum:\")\r\nlbl_platnum.grid(row=3, column=2, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_platnum = tkinter.Variable()\r\nentry_platnum = Entry(win, textvariable=e_platnum,width=18)\r\nentry_platnum.grid(row=3, column=3, sticky=W)\r\ne_platnum.set(\"A0002\")\r\n\r\n#第四行 DBip\r\nlbl_ip= Label(win, text=\"DBip:\")\r\nlbl_ip.grid(row=3, column=4, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_ip = tkinter.Variable()\r\nentry_ip = Entry(win, textvariable=e_ip,width=18)\r\nentry_ip.grid(row=3, column=5, sticky=W)\r\ne_ip.set(\"localhost\")\r\n\r\n#第4行 port\r\nlbl_port = Label(win, text=\"DBport:\")\r\nlbl_port.grid(row=3, column=6, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_port = tkinter.Variable()\r\nentry_port = Entry(win, textvariable=e_port,width=18)\r\nentry_port.grid(row=3, column=7, sticky=W)\r\ne_port.set(\"3306\")\r\n\r\n#第5行 数据库\r\nlbl_para = Label(win, text=\"数据库:\")\r\nlbl_para.grid(row=4, column=0, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_sql = tkinter.Variable()\r\nentry_para = Entry(win, textvariable=e_sql,width=18)\r\nentry_para.grid(row=4, column=1, sticky=W)\r\ne_sql.set(\"jp_neimengnew2_test\")\r\n\r\n#第四行 用户名\r\nlbl_name = Label(win, text=\"用户名:\")\r\nlbl_name.grid(row=4, column=2, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_name = tkinter.Variable()\r\nentry_name = Entry(win, textvariable=e_name,width=18)\r\nentry_name.grid(row=4, column=3, sticky=W)\r\ne_name.set(\"root\")\r\n\r\n#第四行 密码\r\nlbl_pwd = Label(win, text=\"密码:\")\r\nlbl_pwd.grid(row=4, column=4, sticky=W, pady=5, padx=5)\r\n# 绑定变量\r\ne_pwd = tkinter.Variable()\r\nentry_pwd = Entry(win, textvariable=e_pwd,width=18)\r\nentry_pwd.grid(row=4, column=5, sticky=W)\r\ne_pwd.set(\"123456\")\r\n\r\n\r\n#阶段选择\r\ncv_devlevel = tkinter.StringVar()\r\ndevlevel = Combobox(win, textvariable=cv_devlevel,width = 5)\r\n# 设置下拉数据\r\ndevlevel[\"value\"] = (\"非实车无\",\"非实车有\")\r\n# 设置默认值\r\ndevlevel.current(0)\r\n# 绑定事件\r\ndef funcDevlevel(event):\r\n print(cv_devlevel.get())\r\ndevlevel.bind(\"<>\", funcDevlevel)\r\ndevlevel.grid(row=4, column=6, sticky=W)\r\n\r\n\r\n#第6行 构造过程图片,分钟学时\r\nbtn_sql = Button(win, text=\"构造数据\",bg = \"PaleGreen\")\r\nbtn_sql.bind('', btn_setSQL)\r\nbtn_sql.grid(row=4, column=7, sticky=W, padx=5,pady=5)\r\n\r\n\r\n\r\n\r\n#第6行 地址\r\ne_dzurl = tkinter.Variable()\r\nlbl_dzurl = Label(win, text=\"请求:\")\r\nlbl_dzurl.grid(row=5, column=0, sticky=W, pady=5, padx=5)\r\nentry_url = Entry(win, width=50,textvariable=e_dzurl)\r\nentry_url.grid(row=5, column=1,columnspan=4, sticky=W)\r\ne_dzurl.set(\"http://localhost/gjxx-interface-web/\")\r\n\r\n# 绑定变量\r\ne_recnum = tkinter.Variable()\r\nentry_recnum = Entry(win, textvariable=e_recnum,width=18)\r\nentry_recnum.grid(row=5, column=5, sticky=W)\r\ne_recnum.set(34001)\r\n\r\nbtn_rec_period = Button(win, text=\"日志请求\",bg = \"PaleGreen\")\r\nbtn_rec_period.bind('', btn_submitOp,)\r\nbtn_rec_period.grid(row=5, column=7, sticky=W, padx=5,pady=5)\r\n\r\n\r\n#阶段选择\r\ncvJ = tkinter.StringVar()\r\ncomJ = Combobox(win, textvariable=cvJ,width = 5)\r\n# 设置下拉数据\r\ncomJ[\"value\"] = (\"1\",\"2\",\"3\",\"4\")\r\n# 设置默认值\r\ncomJ.current(0)\r\n# 绑定事件\r\ndef funcJ(event):\r\n # print(com.get())\r\n print(cvJ.get())\r\ncomJ.bind(\"<>\", funcJ)\r\ncomJ.grid(row=6, column=6, sticky=W)\r\n\r\nbtn_tra_record = Button(win, text=\"阶段请求\",bg = \"PaleGreen\")\r\nbtn_tra_record.bind('', btn_submitTra)\r\nbtn_tra_record.grid(row=6, column=7, sticky=W, padx=5,pady=5)\r\n\r\nbtn_graduation = Button(win, text=\"结业请求\",bg = \"PaleGreen\")\r\nbtn_graduation.bind('', btn_submitGra)\r\nbtn_graduation.grid(row=7, column=7, sticky=W, padx=5,pady=5)\r\n\r\n#第6行 ���码\r\nlbl_recnum = Label(win, text=\"recnum:\")\r\nlbl_recnum.grid(row=5, column=4, sticky=W, pady=5, padx=5)\r\n\r\n\r\n#第五行 返回结果\r\ntext_result = Text(win, width=100, height=25)\r\ntext_result.grid(row=6,rowspan=4, column=0, columnspan=6, sticky=W, padx=10)\r\n\r\n# #第六行 其它\r\nLabel(win, text=\"-- by lele\").grid(row=6, column=7, sticky=SE, padx=10, pady=10)\r\n\r\nwin.mainloop()\r\n\r\n","sub_path":"jp_data/TcpData/ApplicationJs.py","file_name":"ApplicationJs.py","file_ext":"py","file_size_in_byte":14032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427682162","text":"class maxheap:\n def __init__(self):\n self.a = [None] * 50\n self.count = 0\n ### heapify\n def arrayheap(self,a):\n for i in range(0, len(a)):\n self.a[i + 1] = a[i]\n self.count = len(a)\n for i in range(int(self.count / 2), 0, -1):\n self.heapify(i)\n def heapify(self,i):\n if (i <= self.count) and ((2*i) <= self.count):\n if (self.a[2 * i + 1] != None) and (self.a[2 * i] > self.a[2 * i + 1]):\n if self.a[i] > self.a[2 * i + 1]:\n self.a[i],self.a[2*i+1] = self.a[2 * i+1],self.a[i]\n self.heapify(2*i+1)\n elif self.a[2*i] != None:\n if self.a[i] > self.a[2 * i]:\n self.a[i],self.a[2*i] = self.a[2 * i],self.a[i]\n self.heapify(2*i)\n def insertheap(self,k):\n self.count=self.count+1\n self.a[self.count]=k\n self.bottomtopbalance(self.count)\n def bottomtopbalance(self,i):\n if (self.a[i//2] != None) and (self.a[i//2] > self.a[i] ):\n self.a[i],self.a[i//2]=self.a[i//2],self.a[i]\n self.bottomtopbalance(i//2)\n else:\n return\n def extractmax(self):\n temp=self.a[1]\n self.a[1]=self.a[self.count]\n self.a[self.count ] = None\n self.count=self.count-1\n self.heapify(1)\n return temp\n def display(self):\n print(\"heap contains\",self.a)\n def maximum(self):\n print(\"the minimum is\",self.a[1])\ndef main():\n a = [1, 2, 7, 8, 3, 4, 9, 70, 13, 15, 20]\n d = maxheap()\n d.arrayheap(a)\n d.insertheap(5)\n print(\"the extracted min element \",d.extractmax())\n print(\"the extracted min element \",d.extractmax())\n d.display()\n d.maximum()\n\nif __name__ == '__main__':\n main()\n","sub_path":"3/dsa/heaps/minheap.py","file_name":"minheap.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204311106","text":"from socket import *\nfrom time import ctime\n\nimport re\nimport requests\nimport smtplib\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nimport json\nimport datetime\n\ndef get_form():\n url = \"http://172.28.10.66/login.asp\"\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363\"\n }\n\n respone = requests.get(url, headers=headers)\n\n data = respone.content.decode()\n pattern = re.compile(\"var userCode = '(.*)'\")\n result = pattern.findall(data)\n return result[0]\n\ndef get_record():\n login_url = \"http://172.28.10.66/confirm.asp\"\n record_url = \"http://172.28.10.66/hrinfo/attendance/viewRecord.asp\"\n headers = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18363\"\n }\n\n #自动保存cokkie\n session = requests.session()\n # 代码登录\n str_form = get_form()#获取动态表单\n login_form_data = {\n \"RedirectURL\": \"\",\n \"push_type\": \"1\",\n \"userName\"+str_form: \"xh3033\",\n \"password\": \"vbZqYmg5\"\n }\n\n login_response = session.post(login_url, data=login_form_data, headers=headers)\n with open(\"confirm.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(login_response.content.decode())\n #2 登录成功后,session带着有效的cookie,再去访问\n data = session.get(record_url, headers=headers).content.decode()\n pattern = re.compile('(2020+.*)')\n result = pattern.findall(data)\n print(result)\n\n with open(\"index.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(data)\n return result\n\ndef time_handle(result):\n time_list = []\n for time in result:\n date_time = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\")\n time_list.append(date_time)\n # today_record = time_list[0].split(\" \")\n return \"近两天打卡记录如下\" + \"\\n\" + result[0] + \"\\n\" + result[1] + \"\\n\" + result[2] + \"\\n\" + result[3]\n\n # time_compare = time_list[1].split(\":\")\n # result_str = \"获取失败\"\n # if (int(time_compare[0]) < 9 and int(time_compare[1]) <= 30) or (int(time_compare[0]) >= 17 and int(time_compare[1]) >= 30):\n # result_str = time_list[0] + \" 打卡成功\"\n # else:\n # result_str = time_list[0] + \" 打卡失败\"\n # return time_list[0] + \" 打卡记录:\" + \"\\n\" + result[0] + \"\\n\" + result[1]\n\n\n\ndef sendEmail(title, content):\n\n message = MIMEText(content, 'plain', 'utf-8') # 内容, 格式, 编码\n message['From'] = \"{}\".format(sender)\n message['To'] = \",\".join(receivers)\n message['Subject'] = title\n\n try:\n smtpObj = smtplib.SMTP_SSL(mail_host, 465) # 启用SSL发信, 端口一般是465\n smtpObj.login(mail_user, mail_pass) # 登录验证\n smtpObj.sendmail(sender, receivers, message.as_string()) # 发送\n print(\"mail has been send successfully.\")\n except smtplib.SMTPException as e:\n print(\"send failed!\")\n print(e)\n\ndef send_email2(SMTP_host, from_account, from_passwd, to_account, subject, content):\n email_client = smtplib.SMTP(SMTP_host)\n email_client.login(from_account, from_passwd)\n # create msg\n msg = MIMEText(content, 'plain', 'utf-8')\n msg['Subject'] = Header(subject, 'utf-8') # subject\n msg['From'] = from_account\n msg['To'] = to_account\n email_client.sendmail(from_account, to_account, msg.as_string())\n\n email_client.quit()\n\ndef get_access_token():\n \"\"\"\n 获取微信全局接口的凭证(默认有效期俩个小时)\n 如果不每天请求次数过多, 通过设置缓存即可\n \"\"\"\n result = requests.get(\n url=\"https://api.weixin.qq.com/cgi-bin/token\",\n params={\n \"grant_type\": \"client_credential\",\n \"appid\": \"wxb1a20486d2d6bbf5\",\n \"secret\": \"b4f84fbf2667cfa6b8069a8e945d2130\",\n # \"secret\": \"4d1918bc868f815713ef0e1e42f1ff28\",\n }\n ).json()\n\n if result.get(\"access_token\"):\n access_token = result.get('access_token')\n else:\n access_token = None\n return access_token\n\ndef get_openid():\n access_token = get_access_token()\n response = requests.get(\n url=\"https://api.weixin.qq.com/cgi-bin/user/get\",\n params={\n 'access_token': access_token,\n 'next_openid': \"\"\n }\n )\n print(response.json())\n\ndef sendmsg(openid, msg):\n\n access_token = get_access_token()\n body = {\n \"touser\": openid,\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": msg\n }\n }\n response = requests.post(\n url=\"https://api.weixin.qq.com/cgi-bin/message/custom/send\",\n params={\n 'access_token': access_token\n },\n data=bytes(json.dumps(body, ensure_ascii=False), encoding='utf-8')\n )\n # 这里可根据回执code进行判定是否发送成功(也可以根据code根据错误信息)\n result = response.json()\n print(result)\n\n\nif __name__ == '__main__':\n\n\n\n #------邮箱发送-------\n # mail_host = \"smtp.163.com\" # SMTP服务器\n # mail_user = \"xuhan9191991@163.com\" # 用户名\n # mail_pass = \"9191991\" # 授权密码,非登录密码\n # sender = 'xuhan9191991@163.com' # 发件人邮箱(最好写全, 不然会失败)\n # receivers = ['1589420163@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n #\n # content = time_data[0]\n # title = result_str # 邮件主题\n # sendEmail(title, content)\n # receiver = '***'\n # send_email2(mail_host, mail_user, mail_pass, receiver, title, content)\n\n host = ''\n port = 12345\n buffsize = 2048\n ADDR = (host,port)\n\n tctime = socket(AF_INET,SOCK_STREAM)\n tctime.bind(ADDR)\n tctime.listen(3)\n\n while True:\n print('Wait for connection ...')\n tctimeClient,addr = tctime.accept()\n print(\"Connection from :\",addr)\n\n while True:\n data = tctimeClient.recv(buffsize).decode()\n if not data:\n break\n print(data)\n\n if data == \"1\":\n form_data = get_form()\n time_data = get_record()\n result_str = time_handle(time_data)\n sendmsg('oiCxq59xZe_klq_WkwHcfbbhIBB4', result_str)\n\n tctimeClient.send(('[%s] %s' % (ctime(),data)).encode())\n tctimeClient.close()\n tctimeClient.close()","sub_path":"arcvideo/test_socket.py","file_name":"test_socket.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"400894331","text":"import os\nimport yaml\nimport pandas as pd\nimport numpy as np\n\n\ndef read_yaml(x):\n with open(x, \"r\") as con:\n config = yaml.safe_load(con)\n return config\n\n\ndef write_yaml(x, f):\n with open(f, \"w\") as con:\n yaml.safe_dump(x, con)\n\n\nif __name__ == \"__main__\":\n\n # read environment variables\n path_repo = os.environ.get(\"PATH_REPO\")\n\n # read config\n config = read_yaml(f\"{path_repo}/exercises/h02/config.yaml\")\n path_data = config[\"path\"]\n path_processed = f\"{path_data}/processed\"\n os.makedirs(path_processed, exist_ok=True)\n\n # load data\n products = pd.read_parquet(f\"{path_data}/products.parquet\")\n baskets = pd.read_parquet(f\"{path_data}/baskets.parquet\")\n orders = pd.read_parquet(f\"{path_data}/orders.parquet\")\n prediction_index = pd.read_parquet(f\"{path_data}/prediction_index.parquet\")\n\n # the target is a binary variable derived from `y` (days to next order). we predict\n # days to next order so we shift `days_since_prior_order` by 1 order. we can construct\n # multiple training samples for each user. the prediction target is the last\n # observation for each shopper (where y is not available).\n #\n # we sort orders so we can simply shift by `-1` in the next step\n # first and last values for `days_since_prior_order` are NaN\n orders = orders.sort_values([\"user_id\", \"order_number\"], ascending=True)\n # verify that first `days_since_prior_order` values for all shoppers are NaN\n # and that the last `days_since_prior_order` values for all shoppers are not NaN\n # this is a prerequsite for simply shifting by -1, regardless of `user_id`\n assert np.all(orders.groupby(\"user_id\").days_since_prior_order.head(1).isnull())\n assert np.all(orders.groupby(\"user_id\").days_since_prior_order.tail(1).notnull())\n # define the target variable\n orders[\"y_int_l1\"] = (orders[\"days_since_prior_order\"] <= 14).astype(int)\n orders[\"y\"] = orders[\"days_since_prior_order\"].shift(-1)\n orders[\"y_int\"] = (orders[\"y\"] <= 14).astype(int)\n\n # add number of products per baskets\n n_product_order = (\n baskets.groupby(\"order_id\")[[\"product_id\"]].nunique().reset_index()\n )\n n_product_order.rename(columns={\"product_id\": \"size\"}, inplace=True)\n orders = orders.merge(n_product_order, on=\"order_id\", how=\"left\").reset_index(\n drop=True\n )\n\n # inverse order counter (used in building training data)\n orders[\"order_number_inv\"] = (\n orders.groupby(\"user_id\").order_number.transform(max) - orders[\"order_number\"]\n )\n\n # shorten variable names\n orders.rename(columns={\"days_since_prior_order\": \"dspo\"}, inplace=True)\n\n # build training data\n # we create training/testing samples for each value of `order_number_inv`, starting\n # with 0. `order_number_inv=0` is our test set and `order_number_inv>0` is our\n # training/validation set.\n # not all shoppers have observations for a given `o`, this depends on length of their\n # order history. also note that we keep all data prior to `o`, regardless of the\n # length of the order history. this means that we use a different amount of data in\n # building features across shoppers. our reason for doing this is that we do not have\n # trending features (in this case we need to either normalize or use a constant time\n # window) and that more data yields more accurate feature values for the given\n # statistics. we avoid leakage by only using data available at each given point in\n # time (i.e., before the next order). consider replacing min/max by percentiles (e.g.,\n # 10% and 90%) to deal with outliers.\n # this preprocessing introduces three hyperparameters:\n # - the number of observations used for constructing our feature data `o`\n # - the lags used in feature computation `l`\n # - the feature set (e.g., base variables, aggregation functions)\n x_list = []\n y_list = []\n L = config[\"data\"][\"L\"]\n # code below assumes that first value is the full window (and the largest lag)\n # a bit hacky... but does the job ;)\n assert L[0] > orders.order_number_inv.max()\n assert np.all(L[1:] < orders.order_number_inv.max())\n # loop through lags and build features\n for o in range(0, config[\"data\"][\"O\"]):\n\n data_sets = []\n\n # loop over selected lags\n for i, l in enumerate(L):\n\n orders_o_l = orders[\n (orders[\"order_number_inv\"] >= o)\n & (orders[\"order_number_inv\"] < (o + l))\n ]\n\n # for first iteration, include `last` to get the latest values of the feature\n # variables\n if i == 0:\n features_o_l = orders_o_l.groupby(\"user_id\").agg(\n {\n \"dspo\": [\"last\", \"mean\", \"min\", \"max\"],\n \"size\": [\"last\", \"mean\", \"min\", \"max\"],\n \"y_int_l1\": [\"last\", \"mean\"],\n }\n )\n else:\n features_o_l = orders_o_l.groupby(\"user_id\").agg(\n {\n \"dspo\": [\"mean\", \"min\", \"max\"],\n \"size\": [\"mean\", \"min\", \"max\"],\n \"y_int_l1\": [\"mean\"],\n }\n )\n\n # flatten column names\n features_o_l.columns = [\"_\".join(c) for c in features_o_l.columns.values]\n\n # add lag to column names\n if i > 0:\n features_o_l.columns = [f\"{c}_l{l}\" for c in features_o_l.columns]\n\n data_sets.append(features_o_l)\n\n # merge data sets\n data_o = data_sets[0]\n for i in range(1, len(data_sets)):\n data_o = data_o.merge(data_sets[i], on=\"user_id\")\n\n # additional (derived) features\n # add 1 because `dspo_mean` can be 0 (and we can't divide by 0)\n data_o[\"ratio_dspo\"] = data_o[\"dspo_last\"] / (1 + data_o[\"dspo_mean\"])\n data_o[\"ratio_size\"] = data_o[\"size_last\"] / data_o[\"size_mean\"]\n data_o[\"trend_dspo\"] = data_o[\"dspo_mean\"] - data_o[f\"dspo_mean_l{min(L)}\"]\n data_o[\"trend_size\"] = data_o[\"size_mean\"] - data_o[f\"size_mean_l{min(L)}\"]\n\n # write to list\n data_o[\"order_number_inv\"] = o\n x_list.append(data_o)\n y_list.append(\n orders[orders[\"order_number_inv\"] == o][\n [\"user_id\", \"order_number_inv\", \"y_int\"]\n ]\n )\n\n # data set for making final predictions\n x_predict = x_list[0].reset_index()\n\n # train and test dat\n x_df = pd.concat(x_list[1:]).reset_index()\n y_df = pd.concat(y_list[1:])\n\n # save processed data sets\n x_predict.to_parquet(f\"{path_processed}/x_predict.pt\")\n x_df.to_parquet(f\"{path_processed}/x_df.pt\")\n y_df.to_parquet(f\"{path_processed}/y_df.pt\")\n","sub_path":"exercises/h02/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471925754","text":"#!/usr/bin/env python\nimport os\nimport cv2\nimport sys\nimport time\nimport rospy\nimport roslib\nroslib.load_manifest('perception_camera')\nimport argparse\nfrom viz import Draw\nfrom tracker import Tracker\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom perception_camera.msg import CameraObstacle\n\npublish_topic_name = rospy.get_param(\"image_obstacle_publish_topic\")\npb_path = rospy.get_param(\"pb_path\")\ncam_cfg_path = rospy.get_param(\"cam_cfg_path\")\nsub_image_topic = rospy.get_param(\"sub_image_topic\")\n\nbridge = CvBridge()\npub = rospy.Publisher(publish_topic_name, CameraObstacle, queue_size=10)\ntrack = Tracker(\n pb_path = pb_path, \n cam_cfg_path=cam_cfg_path, \n score_threshold = 0.5)\n\ndef callBack(data):\n start_time = time.time()\n cv_image = bridge.imgmsg_to_cv2(data, \"bgr8\")\n\n tracking_results, frame = track.begins_tracking(cv_image)\n\n camera_obstacle = CameraObstacle()\n if tracking_results is not None and tracking_results.shape[0] > 0:\n for tracking_result in tracking_results:\n camera_obstacle.xmin.append(tracking_result[0])\n camera_obstacle.ymin.append(tracking_result[1])\n camera_obstacle.xmax.append(tracking_result[2])\n camera_obstacle.ymax.append(tracking_result[3])\n camera_obstacle.confidence.append(tracking_result[4])\n camera_obstacle.object_class.append(int(tracking_result[5]))\n camera_obstacle.x.append(tracking_result[6])\n camera_obstacle.y.append(tracking_result[7])\n camera_obstacle.w.append(tracking_result[8])\n camera_obstacle.h.append(tracking_result[9])\n camera_obstacle.header.stamp = data.header.stamp\n pub.publish(camera_obstacle)\n prev_time=time.time()\n print(\"============================= camera python =================================\")\n print(prev_time - start_time)\n print(\"============================= camera python =================================\")\n\n fps = 1.0\n result_image = frame\n if tracking_results is not None and tracking_results.shape[0] > 0:\n drawer = Draw(frame, tracking_results, fps)\n result_image = drawer.bboxes()\n cv2.imshow(\"img\",result_image)\n cv2.waitKey(1)\n\ndef main():\n \n rospy.init_node('perception_camera_image', anonymous=True)\n rospy.Subscriber(sub_image_topic, Image, callBack)\n rospy.spin()\n\n pass\n\nif __name__ == '__main__':\n main()\n \n\n ","sub_path":"src/perception/perception_camera/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100977661","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# ./woa_processing.py -s noaa -c nsst-dif-500 --dateini 2019-01-01 --dateend 2019-05-01 --directory /home/vnc/Tmp/oceaner/boyas/\n# ./woa_processing.py -s noaa -c nsst-dif-500 --dateini 1985-01-01 --dateend 2005-12-31 --directory /home/vnc/Tmp/oceaner/boyas/\n# ./woa_processing.py -i /mnt/kaitain/data/woa/raw/temperature/2005_2017/woa18_A5B7_t12_01.nc -v t_an -md 2 -c p-m-dst -s noaa -m 750 -dir /mnt/kaitain/data/woa/temperature -di 2005-12-01 -de 2017-12-31\nimport re\nimport os\nimport sys\nimport csv\nimport numpy as np\nimport netCDF4\nimport argparse\nimport datetime\nimport calendar\nimport cftime\n\nfrom afai.db import check_nc, add_nc, check_mapserver, add_mapserver_file, check_geotiff, add_geotiff, get_files\n\nfrom esgf.utilities import delete_file, mapfile_template, get_image_bounds, get_image_stats, execute_command\n\nfrom esgf.processing import convert_to_rgb, create_generic_lut, save_geotiff, save_array, save_data, \\\n fill_data, get_mask, get_etopo1_mask, get_raw_data, create_relief, fill_no_data, get_view\n\nfrom esgf.image import create_kmz, create_png\n\nfrom get_values import get_latlon_data\n\n\nnp.warnings.filterwarnings('ignore')\n\n\ndef valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n msg = \"Fecha en formato no valido: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1', 's', 'si'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0', 'no'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef csv_filename(id_bouy, sensor, composition, directory, date_ini, date_end):\n filename_out = os.path.join(directory, composition, sensor + '_buoy_' + str(\n id_bouy) + '_' + date_ini + '_' + date_end + '.csv')\n return filename_out\n\n\ndef add_to_db(product_date, product_end, composition, sensor, variable, filename_out, filename_map, filename_rlf, mapserver_url):\n\n rid = check_nc(os.path.basename(filename_out))\n\n if not rid:\n rid = add_nc(product_date.strftime(\"%Y\"), product_date.strftime(\"%V\"), product_date.strftime(\"%j\"),\n composition, sensor, 'L4', variable, os.path.basename(filename_out), filename_out,\n product_date.strftime(\"%Y-%m-%d\"), url=None, month=product_date.strftime(\"%m\"),\n product_end=product_end.strftime(\"%Y-%m-%d\"))\n\n x_min, y_min, x_max, y_max = get_image_bounds(filename_out)\n\n if not check_mapserver(rid):\n add_mapserver_file(rid, filename_map, mapserver_url + \"?map=\" + filename_map + \"&\", x_min, y_min, x_max, y_max)\n else:\n print(\"\\033[93mWARNING\\033[0m: Mapfile on database [%s]\" % str(rid))\n\n if not check_geotiff(rid, os.path.basename(filename_out)):\n minv, maxv, meanv, std_dev, bit_depth, projection, pix_res, no_data = get_image_stats(filename_out)\n\n add_geotiff(rid, os.path.basename(filename_out), filename_out, 'GTiff',\n projection, bit_depth, pix_res, minv, maxv, meanv, std_dev, no_data,\n x_min, y_min, x_max, y_max)\n else:\n print(\"\\033[93mWARNING\\033[0m: Relief on database [%s]\" % str(rid))\n\n else:\n print(\"\\033[93mWARNING\\033[0m: GTiff on database [%s]\" % str(rid))\n\n\ndef get_month_range(img_date):\n r = calendar.monthrange(int(img_date.strftime(\"%Y\")), int(img_date.strftime(\"%m\")))\n\n i = datetime.datetime(int(img_date.strftime(\"%Y\")), int(img_date.strftime(\"%m\")), 1)\n e = datetime.datetime(int(img_date.strftime(\"%Y\")), int(img_date.strftime(\"%m\")), r[1])\n\n return i, e\n\n\nepilog = \"\"\"\n\"\"\"\nparsr = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)\n\nparsr.add_argument('-i', '--filename_in', required=False, type=str, help='Archivo de entrada.')\nparsr.add_argument('-d', '--debug', required=False, type=str2bool, nargs='?', help='Activa debug.')\nparsr.add_argument('-di', '--dateini', required=False, help='', type=valid_date)\nparsr.add_argument('-de', '--dateend', required=False, help='', type=valid_date)\nparsr.add_argument('-md', '--maxdistance', required=False, type=int, help='')\nparsr.set_defaults(maxdistance=1)\nparsr.add_argument('-c', '--composition', required=True, type=str, help='')\nparsr.add_argument('-s', '--sensor', required=True, type=str, help='')\nparsr.add_argument('-v', '--variable', required=False, type=str, help='')\nparsr.add_argument('-m', '--depth', required=False, type=int, help='')\nparsr.add_argument('-dir', '--directory', required=False, type=str, help='')\nparsr.add_argument('-gt', '--lut', required=False, type=str2bool, help='Usar LUT generico')\nparsr.set_defaults(lut=False)\n\nopts = parsr.parse_args()\n\n\ndef get_arr_info(arr):\n\n nrows, ncols = arr.shape\n\n xres = (xmax - xmin) / float(ncols)\n yres = (ymax - ymin) / float(nrows)\n\n geot = (xmin, xres, 0, ymax, 0, -yres)\n\n return geot, nrows, ncols\n\n\nif opts.composition in ['p-m-dst']:\n#if opts.filename_in and opts.composition and opts.variable and opts.directory:\n\n #if opts.composition in ['p-m-nsst']:\n if opts.filename_in and opts.composition and opts.variable and opts.directory \\\n and opts.depth and opts.dateini and opts.dateend:\n\n mapserver_url = 'https://simar.conabio.gob.mx:8443/cgi-bin/mapserv'\n\n if not opts.filename_in:\n print(\"\\033[91mERROR\\033[0m: File not found [%s].\" % opts.filename_in)\n sys.exit()\n\n var_name = opts.variable.lower()\n\n filename_in = opts.filename_in\n filename_in = os.path.realpath(filename_in)\n dirname, basename = os.path.split(filename_in)\n\n ncfile = netCDF4.Dataset(filename_in, 'r')\n\n filename_in = re.sub('_[0-9]{6}-[0-9]{6}', '', filename_in)\n\n var_data = ncfile.variables[var_name]\n\n lats = ncfile.variables['lat']\n lons = ncfile.variables['lon']\n time = ncfile.variables['time']\n\n date_product = cftime.num2date(time[:][0], time.units, '360_day')\n\n date_end = datetime.datetime.strptime(opts.dateend, '%Y-%m-%d')\n date_ini = datetime.datetime.strptime(opts.dateini, '%Y-%m-%d')\n\n xmin, ymin, xmax, ymax = [lons[:].min(), lats[:].min(), lons[:].max(), lats[:].max()]\n\n filename_mask = \"satmo_proclim_mask.tif\"\n filename_mask = os.path.realpath(filename_mask)\n\n data = np.array(var_data[0][:])\n\n arr = None\n if opts.depth == 500:\n arr = data[37]\n elif opts.depth == 750:\n arr = data[42]\n elif opts.depth == 1000:\n arr = data[47]\n else:\n print(\"Wrong depth\")\n sys.exit()\n\n if arr.any():\n dirname, basename = os.path.split(filename_in)\n\n dirname_out = os.path.join(opts.directory, date_ini.strftime(\"%Y\") + \"_\" + date_end.strftime(\"%Y\"))\n if not os.path.isdir(dirname_out):\n os.makedirs(dirname_out)\n\n dr = date_ini.strftime(\"%Y%j\") + date_end.strftime(\"%Y%j\")\n\n filename_out = os.path.join(dirname_out, \"NOAA%s_WOA2018_%s_M%s.tif\" % (dr, str(opts.composition).upper() + str(opts.depth),\n date_product.strftime(\"%m\")))\n filename_png = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \".png\")\n filename_kmz = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \".kmz\")\n filename_kmt = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_tmp.kmz\")\n filename_tmp = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_tmp.tif\")\n filename_map = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \".map\")\n filename_rlf = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_relief.tif\")\n filename_fll = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_fll.tif\")\n filename_sub = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_sub.tif\")\n filename_msk = os.path.join(dirname_out, os.path.splitext(filename_out)[0] + \"_msk.tif\")\n\n geot, nrows, ncols = get_arr_info(arr)\n\n delete_file(filename_tmp)\n\n save_geotiff(filename_tmp, arr, ncols, nrows, geot, no_datav=9.96921E36)\n\n if os.path.isfile(filename_tmp):\n print(\"\\033[92mPROCESSED\\033[0m: File created %s\" % filename_tmp)\n\n cmd = ['gdal_edit.py', '-a_srs', 'EPSG:4326', '-a_nodata', '-32767', filename_tmp]\n execute_command(cmd, opts.debug)\n\n delete_file(filename_fll)\n\n cmd = ['gdal_fillnodata.py', '-md', str(opts.maxdistance), filename_tmp, filename_fll]\n execute_command(cmd, opts.debug)\n\n if os.path.isfile(filename_fll):\n\n cmd = ['gdal_edit.py', '-a_srs', 'EPSG:4326', '-a_nodata', '-32767', filename_fll]\n execute_command(cmd, opts.debug)\n\n delete_file(filename_sub)\n\n cmd = ['gdalwarp', '-r', 'cubic', '-ts', '6401', '3201', '-te', '-123', '1', '-59', '33',\n '-srcnodata', '-32767', '-dstnodata', '-32767', '-co', 'TILED=YES', '-co', 'COMPRESS=LZW',\n filename_fll, filename_sub]\n execute_command(cmd, opts.debug)\n\n if os.path.isfile(filename_sub):\n\n delete_file(filename_msk)\n\n mask = get_mask(filename_mask)\n mask_etopo1 = get_etopo1_mask(depth=opts.depth)\n\n arr, ndv, cols, rows, trans, proj = get_raw_data(filename_sub)\n\n arr_rst = np.multiply(arr, mask_etopo1)\n arr_rst = np.multiply(arr_rst, mask)\n\n save_data(arr_rst, ndv, trans, proj, cols, rows, filename_out)\n\n del arr_rst\n del arr\n\n filename_lut = None\n\n if var_name == 't_an':\n filename_lut = \"luts/color-sst-depth.txt\"\n filename_lut = os.path.realpath(filename_lut)\n\n if os.path.isfile(filename_out):\n\n delete_file(filename_rlf)\n\n create_relief(filename_out, filename_rlf, filename_lut)\n\n if os.path.isfile(filename_rlf):\n cmd = ['gdal_edit.py', '-a_srs', 'EPSG:4326', '-a_nodata', '0', filename_rlf]\n execute_command(cmd, opts.debug)\n\n x_min, y_min, x_max, y_max = get_image_bounds(filename_out)\n\n mapfile = open(filename_map, 'w')\n mapfile.write(\n mapfile_template(mapserver_url, filename_rlf, filename_map, x_min, y_min, x_max, y_max))\n mapfile.close()\n\n if os.path.isfile(filename_rlf):\n create_png('1985', '01', opts.composition, var_name, filename_rlf, filename_png,\n date_ini=date_ini, date_end=date_end, date_product=date_product, depth=opts.depth)\n\n if os.path.isfile(filename_out) and os.path.isfile(filename_rlf):\n create_kmz('1985', '01', opts.composition, var_name, filename_out, filename_rlf, filename_kmz, filename_kmt)\n\n add_to_db(date_ini, date_end, opts.composition + str(opts.depth), opts.sensor, var_name, filename_out, filename_map, filename_rlf,\n mapserver_url)\n\n if not opts.debug:\n\n delete_file(filename_fll)\n delete_file(filename_tmp)\n delete_file(filename_sub)\n else:\n print(\"\\033[91mERROR\\033[0m: Variables not set!\")\n\nelif re.search(\"^nsst-dif-\", opts.composition) and opts.dateini and opts.dateend:\n\n date_ini = datetime.datetime.strptime(str(opts.dateini), \"%Y-%m-%d %H:%M:%S\")\n date_ini = date_ini.strftime(\"%Y-%m-%d\")\n\n date_end = datetime.datetime.strptime(str(opts.dateend), \"%Y-%m-%d %H:%M:%S\")\n date_end = date_end.strftime(\"%Y-%m-%d\")\n\n from db import MyDb\n\n depth = re.findall('\\d+', opts.composition)\n\n if not str(depth[0]) in ['500', '750', '1000']:\n print(\"Wrong depth [%s]\" % str(depth[0]))\n sys.exit()\n\n db = MyDb()\n buoys_list = db.get_buoys_type(type=\"OCR\")\n\n for b in buoys_list:\n\n print(\"Processing [%s] [%s]\" % (str(b['id']), b['code']))\n\n file_array = db.get_files('day', 'ghrsst', opts.dateini, opts.dateend)\n\n filename_out = csv_filename(b['id'], 'noaa', 'noaa_nsst-dif-' + str(depth[0]),\n opts.directory, date_ini, date_end)\n\n if not os.path.exists(os.path.dirname(filename_out)):\n os.makedirs(os.path.dirname(filename_out))\n\n writer = csv.writer(open(os.path.realpath(filename_out), 'w'))\n writer.writerow([\"Latitude: \" + str(b['latitude'])])\n writer.writerow([\"Longitude: \" + str(b['longitude'])])\n writer.writerow([\"Description: \" + str(b['description'])])\n writer.writerow([\"rid\", \"date\", \"value\"])\n\n for (i, f) in enumerate(file_array):\n if os.path.isfile(f['path']):\n refe = db.get_file_yearmonth('p-m-dst' + str(depth[0]), 'noaa',\n f['product_date'].strftime(\"%Y\"), int(f['product_date'].strftime(\"%m\")))\n if refe:\n orig = get_latlon_data(f['path'], str(b['latitude']), str(b['longitude']))\n noaa = get_latlon_data(refe['path'], str(b['latitude']), str(b['longitude']))\n dif = float(orig) - float(noaa)\n writer.writerow([f['rid'],\n f['product_date'],\n str(dif)])\n\n print(\"Buoy [%s] with file [%s]\" % (b['id'], os.path.basename(f['path'])))\n\nelif re.search(\"^max-m-nsst-dif-\", opts.composition):\n #print(opts.composition)\n depth = re.findall('\\d+', opts.composition)\n print(depth)\n\nelif re.search(\"^min-m-nsst-dif-\", opts.composition):\n #print(opts.composition)\n depth = re.findall('\\d+', opts.composition)\n print(depth)\n\nelse:\n print(\"\\033[93mWARNING\\033[0m: Composition not valid!\")","sub_path":"pysimar/woa_processing.py","file_name":"woa_processing.py","file_ext":"py","file_size_in_byte":14564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58201252","text":"import itertools\n\nclass IntcodeComputer():\n\n # map defining the recognized opcodes\n # and how many parameters they take\n NPARAM = {1:3, 2:3, 3:1, 4:1, 5:2, 6:2, 7:3, 8:3, 9:1, 99:0}\n\n # initialize with the program to be run\n def __init__(self, intcode, inputs=None, storeOutputs=False):\n self.intcode = intcode[:]\n self.pointer = 0\n self.halt = False\n self.wait = False\n self.relbase = 0\n\n self.setInputs(inputs)\n self.setStoreOutputs(storeOutputs)\n\n # wrapper for setting inputs, used by the constructor or the user\n def setInputs(self, inputs):\n if inputs is not None:\n self.inputs = list(reversed(inputs))\n else:\n self.inputs = None\n\n # wrapper for programmatically adding inputs\n # inputs are consumed by popping off a stack, so\n # adding inputs must be done by adding to the front\n # use this when the computer is in the WAIT state before running again\n def addInput(self, code):\n if self.inputs is not None:\n self.inputs.insert(0, code)\n\n # wrapper for storing outputs, used by the constructor or the user\n def setStoreOutputs(self, flag=True):\n self.storeOutputs = flag\n if self.storeOutputs:\n self.outputs = []\n\n # run the computer from pointer 0 with the stored program\n # the WAIT boolean PAUSES execution if there are not enough inputs\n # the only time WAIT is currently True is if there is a pending input\n # so assert that there's at least one input before resetting WAIT and continuing\n def run(self):\n if self.wait:\n assert(len(self.inputs) > 0)\n self.wait = False\n while not self.halt and not self.wait:\n self.compute()\n\n # for debugging purposes\n # print the current pointer, the halt and wait booleans, the inputs and outputs\n # inputs should be represented in reverse order so that inputs can be popped on and off\n def inspectState(self):\n print('Pointer: {:3d} Halt: {:5s} Wait: {:5s} RelBase: {} Inputs: {} Outputs: {}'.format(\n self.pointer,\n str(self.halt),\n str(self.wait),\n self.relbase,\n self.inputs,\n self.outputs,\n ))\n\n\n # abstract the access and write so that it can also allocate more memory if necessary\n # could have been done more cutely with __index__, I suppose. anyway,\n # this is so that a program can access / write to memory beyond the size of the program itself\n # whenever accessing or writing to a position, make sure that the position exists\n # the method for doing this is simple: add memory till, given a position\n # then all access and writes can proceed without out-of-bound errors\n # access: p ~ intcode[p]; p, q ~ intcode[p:q], but with memory addition\n def access(self, p, q=None):\n self.addMemoryTill(p)\n if q is None:\n return self.intcode[p]\n else:\n self.addMemoryTill(q)\n return self.intcode[p:q]\n \n # write: p, value ~ intcode[p] = value\n def write(self, p, value):\n self.addMemoryTill(p)\n self.intcode[p] = value\n\n # increase allocated memory until p is accessible\n def addMemoryTill(self, p):\n while p > len(self.intcode)-1:\n self.intcode.append(0)\n\n\n # given a pointer, cut up the opcode and get the modes\n # Suppose the opcode is 01102: this is opcode 2, with the params in mode 1 1 0\n # so get 110 by doing 01102 // 100 ( = 11), right justifying with 0s ( = 011), and reversing\n def getModes(self, p):\n opcode = self.access(p) % 100\n if opcode not in IntcodeComputer.NPARAM:\n raise Exception('{} is not a valid op code'.format(opcode))\n parcodes = str(self.access(p) // 100).rjust(IntcodeComputer.NPARAM[opcode], '0')\n modes = list( reversed([ int(i) for i in parcodes ]) )\n return modes\n\n # implementation of parameter modes\n # don't use for write positions; instead, use getAddress()\n # mode 0: position mode , return access(p)\n # mode 1: immediate mode, return p\n # mode 2: relative mode , return access(p + relbase)\n def getValue(self, p, mode):\n if mode == 0:\n return self.access(p)\n elif mode == 1:\n return p\n elif mode == 2:\n return self.access(p + self.relbase)\n else:\n raise Exception(f'Unknown (read) parameter mode {mode}')\n\n # implementation of parameter mode 2 for write addresses\n # mode 0: position mode, return o\n # mode 2: relative mode, return o + relbase\n def getAddress(self, o, mode):\n if mode == 0:\n return o\n elif mode == 2:\n return o + self.relbase\n else:\n raise Exception(f'Unknown (write) parameter mode {mode}')\n\n # main computer\n # processes the intcode at the current pointer\n def compute(self):\n\n p = self.pointer\n\n # opcode is the last two digits of intcode[p]\n # modes are the parameter modes, in the correct order (and of the correct length of parameters)\n # parameters are just intcode[p+1] through intcode[p+1 + nParams]\n # newp is where the pointer should move to; this is usually p + nParams + 1\n # compute it first, and reset it if the instruction demands it\n\n opcode = self.access(p) % 100\n modes = self.getModes(p)\n params = self.access(p+1, p+IntcodeComputer.NPARAM[opcode]+1)\n newp = p + IntcodeComputer.NPARAM[opcode] + 1\n\n # add\n if opcode == 1:\n x , y , o = params\n mx, my, mo = modes\n self.write(self.getAddress(o, mo), self.getValue(x, mx) + self.getValue(y, my))\n\n # multiply\n elif opcode == 2:\n x , y , o = params\n mx, my, mo = modes\n self.write(self.getAddress(o, mo), self.getValue(x, mx) * self.getValue(y, my))\n\n # input\n elif opcode == 3:\n o = params[0]\n mo = modes [0]\n\n # get input from the command line if inputs is None\n if self.inputs is None:\n i = input('Provide input: ')\n\n # get input by popping off the inputs list if it exists\n else:\n\n # consume the next available input\n if len(self.inputs) > 0:\n i = self.inputs.pop()\n\n # if there aren't any, pause execution\n # the run loop will break when this wait boolean is True\n # immediately end the computation\n # the next time run is called, wait will be reset to False\n # no pointers have changed, no modifications were made\n # so execution will resume from where it last left off\n else:\n self.wait = True\n return\n\n self.write(self.getAddress(o, mo), int(i))\n\n # output\n elif opcode == 4:\n x = params[0]\n mx = modes [0]\n\n # print output or store the outputs\n value = self.getValue(x, mx)\n if not self.storeOutputs:\n print(value)\n else:\n self.outputs.append(value)\n\n # jump if true\n elif opcode == 5:\n t , v = params\n mt, mv = modes\n if self.getValue(t, mt) != 0:\n newp = self.getValue(v, mv)\n\n # jump if false\n elif opcode == 6:\n t , v = params\n mt, mv = modes\n if self.getValue(t, mt) == 0:\n newp = self.getValue(v, mv)\n\n # less than\n elif opcode == 7:\n a , b , o = params\n ma, mb, mo = modes\n if self.getValue(a, ma) < self.getValue(b, mb):\n self.write(self.getAddress(o, mo), 1)\n else:\n self.write(self.getAddress(o, mo), 0)\n\n # equal to\n elif opcode == 8:\n a , b , o = params\n ma, mb, mo = modes\n if self.getValue(a, ma) == self.getValue(b, mb):\n self.write(self.getAddress(o, mo), 1)\n else:\n self.write(self.getAddress(o, mo), 0)\n\n # relative base adjust\n elif opcode == 9:\n x = params[0]\n mx = modes [0]\n value = self.getValue(x, mx)\n self.relbase += value\n\n # halt\n elif opcode == 99:\n self.pointer = newp\n self.halt = True\n return\n\n # error\n else:\n raise Exception(f'{opcode} is not a valid op code')\n\n self.pointer = newp\n self.halt = False","sub_path":"2019/aoc19/intcode.py","file_name":"intcode.py","file_ext":"py","file_size_in_byte":8748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518469722","text":"\"\"\"\nClone of 2048 game.\n\"\"\"\nimport poc_2048_gui\nimport random\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\n# Offsets for computing tile indices in each direction.\n# DO NOT MODIFY this dictionary.\nOFFSETS = {UP: (1, 0),\n DOWN: (-1, 0),\n LEFT: (0, 1),\n RIGHT: (0, -1)}\n\n\ndef merge(line):\n \"\"\"\n More clear with a better subscription\n \"\"\"\n len_line = len(line)\n\n # merge the line\n # boolen line to check if this num has been merged\n bool_line = [0] * len_line\n # a merged line\n merged_line = []\n # process of merging\n var_i = 0\n for var_j in range(len_line):\n if line[var_j] != 0:\n # when merged_line is empty\n # just push the first non-zero num to there\n if var_i == 0:\n merged_line.append(line[var_j])\n var_i += 1\n # check if the last merged_num equals to current_num\n # check if the last merged_num have been merged or folded\n elif merged_line[var_i - 1] == line[var_j] and\\\n bool_line[var_i - 1] == 0:\n merged_line[var_i - 1] += line[var_j]\n bool_line[var_i - 1] = 1\n # else fold the last last merged_num\n # and add a new merged_num\n else:\n merged_line.append(line[var_j])\n bool_line[var_i - 1] = 1\n var_i += 1\n\n # add zero to the end of merged_line\n for var_i in range(len_line):\n if var_i >= len(merged_line):\n merged_line.append(0)\n # end\n return merged_line\n\n# the test\n# print(merge([0, 0, 2, 3, 4, 3, 0, 2, 2, 2]))\n\n\nclass TwentyFortyEight:\n \"\"\"\n Class to run the game logic.\n \"\"\"\n\n def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n\n # Initiate the 2D grid\n self._grid = [[0 for dummy_i in range(\n self._grid_width)] for dummy_j in range(\n self._grid_height)]\n\n self._intial_tiles = {\n UP: [(0, i) for i in range(grid_width)],\n DOWN: [(grid_height - 1, i) for i in range(grid_width)],\n LEFT: [(i, 0) for i in range(grid_height)],\n RIGHT: [(i, grid_width - 1) for i in range(grid_height)],\n }\n self._move_helper_func = {\n UP: self.helper_merge_up,\n DOWN: self.helper_merge_down,\n LEFT: self.helper_merge_left,\n RIGHT: self.helper_merge_right,\n }\n self._flag_changed = False\n self.reset()\n\n def reset(self):\n \"\"\"\n Reset the game so the grid is empty.\n \"\"\"\n for var_i in range(self._grid_height):\n for var_j in range(self._grid_width):\n self._grid[var_i][var_j] = 0\n\n self.new_tile()\n self.new_tile()\n\n def __str__(self):\n \"\"\"\n Return a string representation of the grid for debugging.\n \"\"\"\n res_str = \"[\"\n for var_i in range(self._grid_height):\n if var_i == 0:\n res_str += \"[\"\n else:\n res_str += \" [\"\n for var_j in range(self._grid_width):\n if var_j < self._grid_width - 1:\n res_str += str(self._grid[var_i][var_j]) + \", \"\n else:\n res_str += str(self._grid[var_i][var_j])\n if var_i < self._grid_height - 1:\n res_str += \"]\\n\"\n else:\n res_str += \"]\"\n res_str += \"]\"\n return res_str\n\n def get_grid_height(self):\n \"\"\"\n Get the height of the board.\n \"\"\"\n return self._grid_height\n\n def get_grid_width(self):\n \"\"\"\n Get the width of the board.\n \"\"\"\n return self._grid_width\n\n def helper_merge_up(self, temp_list_new, ele):\n \"\"\"\n Move the merged list to the original grid, UP direction\n \"\"\"\n for var_i in range(self.get_grid_height()):\n if self._flag_changed is False and\\\n self._grid[var_i][ele[1]] != temp_list_new[var_i]:\n self._flag_changed = True\n self.set_tile(var_i, ele[1], temp_list_new[var_i])\n\n def helper_merge_down(self, temp_list_new, ele):\n \"\"\"\n Move the merged list to the original grid, DOWN direction\n \"\"\"\n for var_i in range(self.get_grid_height()):\n if self._flag_changed is False and\\\n self._grid[self.get_grid_height() -\n var_i - 1][ele[1]] != temp_list_new[var_i]:\n self._flag_changed = True\n self.set_tile(\n self.get_grid_height() - var_i - 1,\n ele[1], temp_list_new[var_i])\n\n def helper_merge_left(self, temp_list_new, ele):\n \"\"\"\n Move the merged list to the original grid, LEFT direction\n \"\"\"\n for var_i in range(self.get_grid_width()):\n if self._flag_changed is False and\\\n self._grid[ele[0]][var_i] != temp_list_new[var_i]:\n self._flag_changed = True\n self.set_tile(ele[0], var_i, temp_list_new[var_i])\n\n def helper_merge_right(self, temp_list_new, ele):\n \"\"\"\n Move the merged list to the original grid, RIGHT direction\n \"\"\"\n for var_i in range(self.get_grid_width()):\n if self._flag_changed is False and\\\n self._grid[ele[0]][self.get_grid_width() -\n var_i - 1] != temp_list_new[var_i]:\n self._flag_changed = True\n self.set_tile(\n ele[0], self.get_grid_width() -\n var_i - 1, temp_list_new[var_i])\n\n def move(self, direction):\n \"\"\"\n Move all tiles in the given direction and add\n a new tile if any tiles moved.\n \"\"\"\n self._flag_changed = False\n for ele in self._intial_tiles[direction]:\n temp_index_list = []\n temp_list = []\n temp_index_list.append(ele)\n while temp_index_list[-1][0] >= 0 and\\\n temp_index_list[-1][0] < self.get_grid_height() and\\\n temp_index_list[-1][1] >= 0 and\\\n temp_index_list[-1][1] < self.get_grid_width():\n temp_list.append(\n self.get_tile(\n temp_index_list[-1][0], temp_index_list[-1][1]))\n temp_index_list.append(\n (temp_index_list[-1][0] + OFFSETS[direction][0],\n temp_index_list[-1][1] + OFFSETS[direction][1]))\n temp_list_new = merge(temp_list)\n # make the merged tiles into the original ones\n self._move_helper_func[direction](temp_list_new, ele)\n # If any tile moved, gen a new tile\n if self._flag_changed:\n self.new_tile()\n\n def new_tile(self):\n \"\"\"\n Create a new tile in a randomly selected empty\n square. The tile should be 2 90% of the time and\n 4 10% of the time.\n \"\"\"\n # Count the num of zero nums\n non_zero_count = 0\n for var_i in range(self._grid_height):\n for var_j in range(self._grid_width):\n if self._grid[var_i][var_j] == 0:\n non_zero_count += 1\n\n # Generate a random num according to the whole the size of zero nums\n random_choice = random.randrange(0, non_zero_count)\n\n var_k = 0\n generated_new_tile = False\n\n # Iterate the whole grid to find the random place\n for var_i in range(self._grid_height):\n for var_j in range(self._grid_width):\n if generated_new_tile is False and\\\n self._grid[var_i][var_j] == 0:\n if var_k == random_choice:\n # Choose to put which of 2 or 4\n random_num = random.randrange(100)\n if random_num < 90:\n self._grid[var_i][var_j] = 2\n else:\n self._grid[var_i][var_j] = 4\n # Already generated a new tile\n generated_new_tile = True\n # count the zero nums to certain the place\n var_k += 1\n\n def set_tile(self, row, col, value):\n \"\"\"\n Set the tile at position row, col to have the given value.\n \"\"\"\n self._grid[row][col] = value\n\n def get_tile(self, row, col):\n \"\"\"\n Return the value of the tile at position row, col.\n \"\"\"\n return self._grid[row][col]\n\npoc_2048_gui.run_gui(TwentyFortyEight(4, 4))\nprint(TwentyFortyEight(2, 2).__str__())\n","sub_path":"Computer_Fundamentals/Principles_Computing/Week.2/2048_full.py","file_name":"2048_full.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"184955680","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n@author: Pieter Huycke\nGitHub: phuycke\n\"\"\"\n\n\n#%%\n\n# import: general and scikit-learn specific\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.datasets import make_circles\n\n#%%\n\n# general plotting parameters\nsns.set(style = \"ticks\", \n color_codes = True)\n\n#%%\n\n# make the dataset\ncoord, label = make_circles(n_samples = 150,\n noise = .075, \n factor = 0.5, \n random_state = 1)\ncoord += 4\n\n# make it a Pandas DataFrame\ndf = pd.DataFrame({'Feature 1': coord[:,0], \n 'Feature 2': coord[:,1],\n 'Class': label})\n\n# remap the labels\nmymap = {0: 'Flower A', 1: 'Flower B'}\ndf = df.applymap(lambda s: mymap.get(s) if s in mymap else s)\n\n#%%\n\n# plot the data\nax = sns.scatterplot(x = df['Feature 1'], \n y = df['Feature 2'], \n hue = df['Class'],\n markers = [\"o\", \"s\"],\n palette = sns.color_palette('colorblind', 2))\n\n# Put the legend out of the figure\nplt.legend(bbox_to_anchor = (1.05, 1), \n loc = 2, \n borderaxespad = 0.)\n\n#%%\n\n# reshape label, and put together\nlabel = np.reshape(label, (150, 1))\ndataset = np.concatenate((coord, label),axis=1)\n\ndel coord, label\n\n#%%\n\n# save the data in a NumPy array\npath = r'C:\\Users\\pieter\\Downloads\\GitHub\\modeling-master\\AY 2019 - 2020\\Lesson 10\\Practical session\\exercises'\nnp.save(os.path.join(path, 'dataset_novice.npy'),\n dataset)","sub_path":"AY 2019 - 2020/Lesson 10/Practical session/solutions/preproc/ch5_circular_preproc.py","file_name":"ch5_circular_preproc.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101364703","text":"#!/usr/bin/env python\n\nimport os\nimport sys \nimport json\nfrom collections import OrderedDict\n\nlib_path = os.path.abspath('../../mdwf_lib')\nsys.path.append(lib_path)\nimport mdwf_functions as mdwf\n\n\"\"\" A python script to help setup the optimization phase of a MD simulation.\"\"\"\n\njobid = sys.argv[1]\njobtype = sys.argv[2]\n\ndef main():\n # open and modify local job details file. \n ljdf_t = mdwf.read_local_job_details_file(\".\", \"local_job_details.json\")\n ljdf_t['CurrentJobId'] = jobid\n ljdf_t['JobStatus'] = 'submitted'\n if \"opt\" in jobtype:\n ljdf_t[\"RunCountDown\"] = ljdf_t[\"TotalRuns\"]\n\n\n\n with open(\"local_job_details.json\", 'w') as outfile:\n json.dump(ljdf_t, outfile, indent=2)\n outfile.close()\n\n\n # check Ok for job to run:\n# mdwf.check_disk_quota(ljdf[\"Account\"],ljdf[\"DiskSpaceCutoff\"])\n# mdwf.check_for_pausejob()\n \n # initialize job counter if optimize flag set. \n \nif __name__ == \"__main__\":\n main()\n\n\n'''\n#!/usr/bin/env python\n\n#!/usr/bin/env python\n\nimport sys\nfrom ../mdwf_lib import mdwf_functions as mdwf\n\n\"\"\" A python script to post process data files after a MD simulation.\"\"\"\n\njobid = argv[2]\njobtype = argv[3]\n\ndef main():\n\n # initialize job counter if optimize flag set. \n if not \"optimize\" in jobtype:\n ljdf[\"RunCountDown\"] = ljdf[\"TotalRuns\"]\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n'''\n\n","sub_path":"Setup_and_Config/prejob_processing.temp.py","file_name":"prejob_processing.temp.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250038811","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSkoðum hvernig við getum lesið inn .csv skrár með pandas.\nKíkjum á gögn fyrir hlutabréf icelandair síðustu ár.\nPrófum svo að teikna smá með pakkanum matplotlib\n@author: helgi\n\"\"\"\n\nimport datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# lesum skránna sótta af keldunni: https://www.keldan.is/market/shares/ICEAIR \niceair_data = pd.read_csv(\"iceair.csv\")\n\n# skoðum hvernig fyrstu línurnar í gögnunum líta út\nprint(iceair_data.head())\n\n# Hvað er meira hægt að vita um gagnasafnið okkar?\nprint(iceair_data.info())\n\n# breytum nöfnunum á dálkunum í eitthvað þægilegra\niceair_data.columns = ['DateTime', 'Verd', 'Magn']\n\n# Breytum datetime breytunni í date:\niceair_data['DateTime'] = pd.to_datetime(iceair_data['DateTime'])\n\n# getum tekið út gildi sem vantar (NaN) með .dropna()\niceair_data = iceair_data.dropna()\n\n# Tékkum núna hvernig gögnin líta út...\nprint(iceair_data.head())\nprint(iceair_data.info())\n# ...miklu betra!\n\n## Nokkrar leiðir til að nálgast dálka, t.d. fyrir dálkinn DateTime:\n# print(iceair_data['DateTime'])\n# print(iceair_data.DateTime)\n# print(iceair_data.loc[:,'DateTime'])\n\n### Skoðum smá tölfræði\n\n# Hvert er hæsta verðið sem icelandair hefur fengið?\nprint(\"Hæsta verð:\" , max(iceair_data['Verd']))\n\n# Hvað er meðal magnið yfir allt tímabilið?\nprint(\"Meðalmagn:\", iceair_data['Magn'].mean())\n\n# Getum filterað gagnasafnið okkar\n# skoðum gögnin bara síðan wow fór á hausinn\nwow_hrun_date = datetime.datetime(2019, 3, 28)\niceair_since_wow_hrun = iceair_data[iceair_data['DateTime'] > wow_hrun_date]\n\n# -------- Prófum að teikna með matplotlib --------\n\n\n# Teiknum gengi Iceland Air síðustu 5 ár\niceAirStock = plt.plot(iceair_data['DateTime'],iceair_data['Verd'])\nplt.show()\n\n# Teiknum gengi Iceland Air síðan wow varð gjaldþrota\niceAirStockSinceWOW = plt.plot(iceair_since_wow_hrun['DateTime'],\n iceair_since_wow_hrun['Verd'] )\nplt.show()\n","sub_path":"Vika_4/iceair.py","file_name":"iceair.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436663539","text":"import time\nstartTime=time.clock()\nimport sys\nfrom msTools import *\nfrom fvTools import *\nimport random\nimport allel\nimport numpy as np\nimport math\n\n'''usage eg:\npMisPol=`python2 stairwayPlotToPMisPol.py AOM.meru_mela.sfs.sp.summary`\npython2 testing_convert_to_FVs.py spNeut.msOut.gz 2L,2R,3L,3R 5000 11 0.25 $pMisPol AOM_partial_stats.txt Anopheles-gambiae-PEST_CHROMOSOMES_AgamP3.accessible.fa anc.meru_mela.fa ./ spNeut.msOut.fvec\n'''\n\nif len(sys.argv)!=12:\n sys.exit(\"usage:\\npython2 testing_convert_to_FVs.py trainingDataFileName chrArmsForMasking subWinSize numSubWins unmaskedFracCutoff pMisPol partialStatAndDafFileName maskFileName ancestralArmFaFileName statDir fvecFileName\\n\")\nelse:\n trainingDataFileName, chrArmsForMasking, subWinSize, numSubWins, unmaskedFracCutoff, pMisPol, partialStatAndDafFileName, maskFileName, ancestralArmFaFileName, statDir, fvecFileName = sys.argv[1:]\n\nsubWinSize,numSubWins = int(subWinSize),int(numSubWins)\nassert subWinSize>0 and numSubWins>1\ntotalPhysLen=(subWinSize*numSubWins)\ntrainingDataFileObj,sampleSize,numInstances=openMsOutFileForSequentialReading(trainingDataFileName)\nchrArmsForMasking=chrArmsForMasking.split(\",\")\nunmaskedFracCutoff,pMisPol=float(unmaskedFracCutoff),float(pMisPol)\nif unmaskedFracCutoff>1.0:\n sys.exit(\"unmaskedFracCutoff must lie within [0, 1].\\n\")\nif pMisPol>1.0:\n sys.exit(\"pMisPol must lie within [0, 1].\\n\")\nstandardizationInfo=readStatsDafsComputeStandardizationBins(partialStatAndDafFileName,nBins=50,pMisPol=pMisPol)\nif maskFileName.lower() in [\"none\", \"false\"]:\n unmaskedFracCutoff=1.0\n unmasked=[True]*totalPhysLen\n sys.stderr.write(\"Warning: not doing any masking! (i.e. mask.fa file for the chr arm with all masked sites N'ed out, or at least the reference with Ns, is not provided)\\n\")\n maskFileName=False\nelse:\n if ancestralArmFaFileName.lower() in [\"none\", \"false\"]:\n maskData=readMaskDataForTraining(maskFileName,totalPhysLen,subWinSize,chrArmsForMasking,shuffle=True,cutoff=unmaskedFracCutoff)\n else:\n maskData=readMaskAndAncDataForTraining(maskFileName,ancestralArmFaFileName,totalPhysLen,subWinSize,chrArmsForMasking,shuffle=True,cutoff=unmaskedFracCutoff)\n if len(maskData)=subWinBounds[subWinIndex][0] and positions[i]<=subWinBounds[subWinIndex][1]):\n subWinIndex+=1\n snpIndicesInSubWins[subWinIndex].append(i)\n return snpIndicesInSubWins\nquantiles={\"Lower95%\":2.5,\"Lower50%\":25,\"Upper50%\":75,\"Upper95%\":97.5}\nif statDir.lower() in [\"none\",\"false\",\"default\"]:\n statDir='./'\nstatFiles=[]\nfor subWinIndex in range(numSubWins):\n statFileName='/'.join((\"%s/%s.subWin%d.stats\" %(statDir, trainingDataFileName.split(\"/\")[-1].replace(\".msOut.gz\",\"\"), subWinIndex)).split('//'))\n statFiles.append(open(statFileName,\"w\"))\n statFiles[-1].write(\"\\t\".join(statNames)+\"\\n\")\nif fvecFileName.lower() in [\"none\",\"false\",\"default\"]:\n fvecFileName=\"%s.fvec\" %trainingDataFileName.split(\"/\")[-1].replace(\".msOut.gz\",\"\")\nfvecFile=open(fvecFileName,\"w\")\nfvecFile.write(header+\"\\n\")\n\nfor instanceIndex in range(numInstances):\n hapArrayIn,positionArray=readNextMsRepToHaplotypeArrayIn(trainingDataFileObj,sampleSize,totalPhysLen)\n for statName in statNames:\n statVals[statName].append([])\n if maskFileName:\n if drawWithReplacement:\n unmasked=random.choice(maskData)\n else:\n unmasked=maskData[instanceIndex]\n assert len(unmasked)==totalPhysLen\n snpIndicesToKeep=[x for x in range(len(positionArray)) if unmasked[positionArray[x]-1]]\n if len(snpIndicesToKeep)==0:\n for subWinIndex in range(numSubWins):\n for statName in statNames:\n appendStatValsForMonomorphic(statName,statVals,instanceIndex,subWinIndex)\n statFiles[subWinIndex].write(\"\\t\".join([str(statVals[statName][instanceIndex][subWinIndex]) for statName in statNames])+\"\\n\")\n else:\n haps=allel.HaplotypeArray(hapArrayIn,dtype='i1').subset(sel0=snpIndicesToKeep)\n if pMisPol>0:\n misPolarizeCorrectionIndex=np.random.binomial(1,pMisPol,len(haps))\n for i in range(len(misPolarizeCorrectionIndex)):\n if misPolarizeCorrectionIndex[i]==1:\n for j in range(len(haps[i])):\n if haps[i][j]==0:\n haps[i][j]=1\n else:\n haps[i][j]=0\n genos=haps.to_genotypes(ploidy=2)\n alleleCounts=genos.count_alleles()\n positions=[positionArray[x] for x in snpIndicesToKeep]\n precomputedStats={}\n dafs=alleleCounts[:,1]/float(sampleSize)\n ihsVals=allel.stats.selection.ihs(haps,positions,use_threads=False,include_edges=False)\n nonNanCount=[x for x in np.isnan(ihsVals)].count(False)\n nonInfCount=[x for x in np.isinf(ihsVals)].count(False)\n sys.stderr.write(\"number of iHS scores: %d (%d non-nan; %d non-inf)\\n\" %(len(ihsVals),nonNanCount,nonInfCount))\n if nonNanCount==0:\n precomputedStats[\"iHS\"]=[]\n for subWinIndex in range(numSubWins):\n precomputedStats[\"iHS\"].append([])\n else:\n ihsVals=standardize_by_allele_count_from_precomp_bins(ihsVals,dafs,standardizationInfo[\"iHS\"])\n precomputedStats[\"iHS\"]=windowVals(ihsVals,subWinBounds,positions,keepNans=False,absVal=True)\n nslVals=allel.stats.selection.nsl(haps,use_threads=False)\n nonNanCount=[x for x in np.isnan(nslVals)].count(False)\n sys.stderr.write(\"number of nSL scores: %d (%d non-nan)\\n\" %(len(nslVals),nonNanCount))\n if nonNanCount==0:\n precomputedStats[\"nSL\"]=[]\n for subWinIndex in range(numSubWins):\n precomputedStats[\"nSL\"].append([])\n else:\n nslVals=standardize_by_allele_count_from_precomp_bins(nslVals,dafs,standardizationInfo[\"nSL\"])\n precomputedStats[\"nSL\"]=windowVals(nslVals,subWinBounds,positions,keepNans=False,absVal=True)\n snpIndicesInSubWins=getSnpIndicesInSubWins(subWinBounds,positions)\n for subWinIndex in range(numSubWins):\n subWinStart,subWinEnd = subWinBounds[subWinIndex]\n unmaskedFrac=unmasked[subWinStart-1:subWinEnd].count(True)/float(subWinSize)\n assert unmaskedFrac>=unmaskedFracCutoff\n if len(snpIndicesInSubWins[subWinIndex])==0:\n for statName in statNames:\n appendStatValsForMonomorphic(statName,statVals,instanceIndex,subWinIndex)\n else:\n hapsInSubWin=haps.subset(sel0=snpIndicesInSubWins[subWinIndex])\n for statName in [x for x in statNames if x[0:3]!=\"HAF\" and x[0:3]!=\"phi\" and x[0:3]!=\"kap\" and x[0:3]!=\"SFS\" and x[0:3]!=\"SAF\"]:\n calcAndAppendStatVal(alleleCounts, positions, statName, subWinStart, subWinEnd, statVals, instanceIndex, subWinIndex, hapsInSubWin, unmasked, precomputedStats)\n haplotypes={}\n for i in range(len(hapsInSubWin[0])):\n haplotype=[hapsInSubWin[x][i] for x in range(len(hapsInSubWin))]\n haplotype=\"\".join(str(x) for x in haplotype)\n if haplotype in haplotypes:\n haplotypes[haplotype].append(i)\n else:\n haplotypes[haplotype]=[i]\n HAF=[]\n HAFunique={}\n for i in haplotypes:\n HAFunique[i]=0\n for j in range(len(hapsInSubWin)):\n if hapsInSubWin[j][haplotypes[i][0]]==1:\n HAFunique[i]+=sum([hapsInSubWin[j][x] for x in range(len(hapsInSubWin[j]))])\n for j in range(len(haplotypes[i])):\n HAF.append(HAFunique[i])\n phi=[]\n kappa=[]\n SAFE=[]\n for i in range(len(hapsInSubWin)):\n phi.append(0)\n kappa.append([])\n phiDenom=0\n for j in haplotypes:\n phi[i]+=(int(list(j)[i])*HAFunique[j]*len(haplotypes[j]))\n if int(list(j)[i])==1 and HAFunique[j] not in kappa[i] and HAFunique[j]!=0:\n kappa[i].append(HAFunique[j])\n phiDenom+=(HAFunique[j]*len(haplotypes[j]))\n phi[i]/=float(phiDenom)\n kappa[i]=len(kappa[i])/float(len(set([HAFunique[x] for x in HAFunique if HAFunique[x]!=0])))\n if dafs[snpIndicesInSubWins[subWinIndex]][i]==0 or dafs[snpIndicesInSubWins[subWinIndex]][i]==1:\n SAFE.append(0.0)\n else:\n SAFE.append((phi[i]-kappa[i])/float(math.sqrt(dafs[snpIndicesInSubWins[subWinIndex]][i]*(1-dafs[snpIndicesInSubWins[subWinIndex]][i]))))\n for i in [\"HAF\", \"HAFunique\", \"phi\", \"kappa\", \"SFS\", \"SAFE\"]:\n if i==\"SFS\":\n windowStats=dafs[snpIndicesInSubWins[subWinIndex]]\n elif i==\"HAFunique\":\n windowStats=[eval(i)[x] for x in eval(i)]\n else:\n windowStats=eval(i)\n statVals[i+\"-Mean\"][instanceIndex].append(np.mean(windowStats))\n statVals[i+\"-Median\"][instanceIndex].append(np.median(windowStats))\n if(len(np.unique(windowStats,return_counts=True)[1])==1):\n statVals[i+\"-Mode\"][instanceIndex].append(windowStats[0])\n else:\n if(sorted(np.unique(windowStats,return_counts=True)[1])[-1]!=sorted(np.unique(windowStats,return_counts=True)[1])[-2]):\n statVals[i+\"-Mode\"][instanceIndex].append(scipy.stats.mstats.mode(windowStats)[0][0])\n else:\n mode=min(windowStats)\n for j in range(1,51):\n if len([x for x in windowStats if x>=(min(windowStats)+(j*((max(windowStats)-min(windowStats))/50))) and x<(min(windowStats)+((j+1)*((max(windowStats)-min(windowStats))/50)))]) >= len([x for x in windowStats if x>=mode and x<(mode+((max(windowStats)-min(windowStats))/50))]):\n mode=min(windowStats)+(j*((max(windowStats)-min(windowStats))/50))\n statVals[i+\"-Mode\"][instanceIndex].append(mode+((max(windowStats)-min(windowStats))/100))\n for j in quantiles:\n statVals[i+\"-\"+j][instanceIndex].append(np.percentile(windowStats,quantiles[j]))\n statVals[i+\"-Max\"][instanceIndex].append(max(windowStats))\n statVals[i+\"-Var\"][instanceIndex].append(np.var(windowStats))\n statVals[i+\"-SD\"][instanceIndex].append(np.std(windowStats))\n statVals[i+\"-Skew\"][instanceIndex].append(scipy.stats.skew(windowStats))\n statVals[i+\"-Kurt\"][instanceIndex].append(scipy.stats.kurtosis(windowStats))\n statFiles[subWinIndex].write(\"\\t\".join([str(statVals[statName][instanceIndex][subWinIndex]) for statName in statNames])+\"\\n\")\n outVec=[]\n for statName in statNames:\n outVec+=normalizeFeatureVec(statVals[statName][instanceIndex])\n fvecFile.write(\"\\t\".join([str(x) for x in outVec])+\"\\n\")\n\ncloseMsOutFile(trainingDataFileObj)\nfor subWinIndex in range(numSubWins):\n statFiles[subWinIndex].close()\nfvecFile.close()\nsys.stderr.write(\"total time spent calculating summary statistics and generating feature vectors: %g secs\\n\" %(time.clock()-startTime))\n\n","sub_path":"testing_convert_to_FVs.py","file_name":"testing_convert_to_FVs.py","file_ext":"py","file_size_in_byte":12229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362987584","text":"from pygame.sprite import Group, Sprite\nfrom utils.settings import *\nimport pygame\nfrom pygame import Rect\nimport json\n\n\nclass TerrainGroup(Group):\n \"\"\"\n A group of sprites representing the terrain\n At the base layer\n Every thing will drawn onto this\n \"\"\"\n\n layer = 0\n\n def __init__(self, width_matrix, height_matrix, sprite_matrix, *sprites):\n super(TerrainGroup, self).__init__(*sprites)\n self.sprite_matrix = sprite_matrix\n self.width_matrix = width_matrix\n self.height_matrix = height_matrix\n\n @classmethod\n def load(cls, level_matrix, tile_cache):\n \"\"\"\n Load all the json file tiles as Terrain(Sprite)\n And return them as a SpriteGroup -> TerrainGroup\n :return TerrainGroup\n \"\"\"\n raw_tiles = {}\n height_matrix = len(level_matrix)\n width_matrix = len(level_matrix[0])\n sprite_matrix = [[None for x in range(width_matrix)] for y in range(height_matrix)]\n sprites = []\n\n with open(TILES_FILENAME, 'r', encoding='utf8') as file:\n content = file.read()\n tiles = json.loads(content)\n for tile in tiles:\n raw_tiles[tile['id']] = {\n 'name': tile['name'],\n 'image': tile['image']\n }\n\n offset_x = 0\n offset_y = 0\n\n for y, line in enumerate(level_matrix):\n for x, case in enumerate(line):\n rect = Rect(x * TILE_WIDTH + offset_x, y * TILE_HEIGHT + offset_y, TILE_WIDTH, TILE_HEIGHT)\n sprite = TerrainSprite(rect, tile_cache.get_image(raw_tiles[case]['image']))\n sprite_matrix[y][x] = sprite\n sprites.append(sprite)\n\n return cls(width_matrix, height_matrix, sprite_matrix, sprites)\n\n def update(self, camera):\n \"\"\"call the update method of every member sprite\n Calls the update method of every member sprite. All arguments that\n were passed to this method are passed to the Sprite update function.\n \"\"\"\n for s in self.sprites():\n s.update(camera)\n\n def action_replace_case(self, scene):\n \"\"\"\n Replace at the cursor pos the case by the case_selected\n Then reload the terrain\n \"\"\"\n tile = scene.drawing_scene_mode.selected_tile\n\n raw_cursor_x, raw_cursor_y = pygame.mouse.get_pos()\n cursor_x = raw_cursor_x - scene.camera.rect.x\n cursor_y = raw_cursor_y - scene.camera.rect.y\n\n # We check if the cursor is in the map\n if 0 <= cursor_x < TILE_WIDTH * self.width_matrix and 0 <= cursor_y < TILE_HEIGHT * self.height_matrix:\n case_x = cursor_x // TILE_WIDTH\n case_y = cursor_y // TILE_HEIGHT\n offset_x = scene.camera.offset_x\n offset_y = scene.camera.offset_y\n\n sprite_to_change = self.sprite_matrix[case_y][case_x]\n sprite_to_change.rect = Rect(case_x * TILE_WIDTH + offset_x, case_y * TILE_HEIGHT + offset_y, TILE_WIDTH, TILE_HEIGHT)\n sprite_to_change.image = scene.tile_cache.get_image(tile['image'])\n\n def action_save_level(self, editor_scene):\n \"\"\"\n Save the level on disk\n \"\"\"\n #TODO\n pass\n # json_level = json.dumps(editor_scene.matrix_level)\n # with open('maps/level_1.json', 'w') as file:\n # file.write(json_level)\n\n\nclass TerrainSprite(Sprite):\n \"\"\"\n A Sprite representing the terrain\n Both image and rect are used to display the sprite\n \"\"\"\n\n def __init__(self, rect, image):\n super(TerrainSprite, self).__init__()\n self.image = image\n self.rect = rect\n\n def update(self, camera):\n \"\"\"\n We need to update the sprites if we move the camera\n :param camera: Camera\n \"\"\"\n self.rect = self.rect.move(camera.last_move)\n","sub_path":"layers/terrain.py","file_name":"terrain.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96672518","text":"# from app import app\nimport urllib.request,json\nfrom .models import Source\n# from .models import source\nfrom .models import Article\nfrom datetime import datetime\n\n# Source = source.Source\n\n# Article = article.Article\n\n\n# Getting api key\napi_key = None\n# Getting the news base url\nbase_url = None\nbase_article_url = None\n\ndef configure_request(app):\n global api_key,base_url,base_article_url\n api_key = app.config['NEWS_API_KEY']\n base_url = app.config['NEWS_API_BASE_URL']\n base_article_url = app.config[\"ARTICLES_API_BASE_URL\"]\n# Getting api key\n# api_key = app.config['NEWS_API_KEY']\n\n# Getting the news base url\n# base_url = app.config[\"NEWS_API_BASE_URL\"]\n# base_article_url = app.config[\"ARTICLES_API_BASE_URL\"]\n\ndef get_sources(source):\n '''\n Function that gets the json response to our url request\n '''\n get_source_url = base_url.format(api_key)\n\n with urllib.request.urlopen(get_source_url) as url:\n get_sources_data = url.read()\n get_sources_response = json.loads(get_sources_data)\n\n source_results = None\n\n if get_sources_response['sources']:\n source_results_list = get_sources_response['sources']\n source_results = process_results(source_results_list)\n\n\n return source_results\n\ndef process_results(source_news_list):\n '''\n Function that processes the source result and transform them to a list of Objects\n\n Args:\n source_list: A list of dictionaries that contain source details\n\n Returns :\n source_results: A list of source objects\n '''\n source_results = []\n for source_item in source_news_list:\n id = source_item .get('id')\n name = source_item .get('name')\n description = source_item .get('description')\n url = source_item .get('url')\n category = source_item .get('category')\n country = source_item .get('country')\n\n source_object = Source(id,name,description,url,category,country)\n source_results.append(source_object)\n\n return source_results\n\ndef getting_articles(id):\n getting_article_url = base_article_url.format(id,api_key)\n with urllib.request.urlopen(getting_article_url) as url:\n article_news_data = url.read()\n article_news_response = json.loads(article_news_data)\n\n article_news_results = None\n # if article_news_response:\n # author = article_news_response.get('author')\n # title = article_news_response.get('title')\n # description = article_news_response.get('description')\n # url = article_news_response.get('url')\n # image = article_news_response.get('urlToImage')\n # publishedAt = article_news_response.get('publishedAt')\n # content = article_news_response.get('content')\n\n # article_object = Article(author,title,description,url,image,publishedAt,content)\n\n if article_news_response['articles']:\n article_news_list = article_news_response['articles']\n article_news_results = process_articles(article_news_list)\n\n\n return article_news_results\n\ndef process_articles(articles_list):\n '''\n Function that processes the articles results and transform them to a list of Objects\n Args:\n article_list: A list of dictionaries that contain sources details\n Returns:\n article_results: A list of sources objects\n '''\n articles_object = []\n for article_response in articles_list:\n # id = article_response.get('id')\n author = article_response.get('author')\n title = article_response.get('title')\n description = article_response.get('description')\n url = article_response.get('url')\n urlToImage = article_response.get('urlToImage')\n publishedAt = article_response.get('publishedAt')\n content = article_response.get('content')\n dates = datetime.strptime(publishedAt, '%Y-%m-%dT%H:%M:%SZ')\n date = dates.strftime('%Y.%m.%d')\n articles_result = Article( author, title, description, url, urlToImage, date, content)\n articles_object.append(articles_result)\n return articles_object\n \n\n\n\n\n\n# def get_sources(category):\n# '''\n# Function that gets the json response to our url request\n# '''\n# get_source_url = base_url.format(category,api_key)\n\n# with urllib.request.urlopen(get_source_url) as url:\n# get_sources_data = url.read()\n# get_sources_response = json.loads(get_sources_data)\n\n# source_results = None\n\n# if get_sources_response['sources']:\n# source_results_list = get_sources_response['sources']\n# source_results = process_results(source_results_list)\n\n\n# return source_results\n\n# def process_results(source_news_list):\n# '''\n# Function that processes the source result and transform them to a list of Objects\n\n# Args:\n# source_list: A list of dictionaries that contain source details\n\n# Returns :\n# source_results: A list of source objects\n# '''\n# source_results = []\n# for source_item in source_news_list:\n# id = source_item .get('id')\n# name = source_item .get('name')\n# description = source_item .get('description')\n# url = source_item .get('url')\n# category = source_item .get('category')\n# country = source_item .get('country')\n\n# source_object = Source(id,name,description,url,category,country)\n# source_results.append(source_object)\n\n# return source_results\n\n","sub_path":"app/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582662890","text":"import json\nimport os\nimport random\nimport shutil\nimport threading\nfrom pathlib import Path\n\nfrom PIL import Image\nfrom django.db import transaction\n\nfrom MryangService import ServiceHelper\nfrom MryangService.mpath import MediaPath\nfrom MryangService.utils import EmailUtil\nfrom Mryang_App.DBHelper import MediaHelp\n\n# 这里需要制作一下 删除src没有的所有文件文件夹. 是否需要抽出成一个公共函数?\nfrom Mryang_App.models import Media, MPath, Dir\nfrom frames import yutils, ypath, logger, Globals, TmpUtil\nfrom frames.xml import XMLBase\n\nFFMPEG_KEY = 'FFMPEG_KEY'\nFFPROBE_KEY = 'FFPROBE_KEY'\nmovie_config = XMLBase.list_cfg_infos('media_info') # XMLMedia.get_infos()\nsrc_root = movie_config.dir_root\nffmpeg_tools = str(TmpUtil.input_note(FFMPEG_KEY, '输入对应的ffmpeg文件位置(参照link_gitProj_files.txt下载对应的文件):\\n'))\nffprobe_tools = str(\n TmpUtil.input_note(FFPROBE_KEY, '输入对应的ffprobe文件位置(参照link_gitProj_files.txt下载对应的文件):\\n'))\nmulit_audio_dir = movie_config.base_info.mulit_audio_dir\n\nlock = threading.Lock()\n\n\ndef src_dbs():\n return MediaPath.pdc().src_list\n\n\ndef media_root(dir_root):\n return ypath.join(dir_root, src_root)\n\n\ndef desc_path(media_db: Media):\n return ypath.join(media_root(media_db.desc_mpath.path), media_db.desc_path)\n\n\n# 检查该状态是否正确\ndef check_media_db_state(media_db: Media):\n if media_db.state == MediaHelp.STATE_AUDIO_FINISH:\n if not os.path.exists(media_db.abs_path):\n media_db.state = MediaHelp.STATE_INIT\n if media_db.state == MediaHelp.STATE_COMPRESS_HLS or media_db.state == MediaHelp.STATE_VIDEO_TS or media_db.state == MediaHelp.STATE_VIDOE_COMPRESS_FINISH:\n if not os.path.exists(media_db.abs_path) or media_db.desc_mpath == None:\n media_db.state = MediaHelp.STATE_INIT\n desc = desc_path(media_db) # ypath.join(media_db.desc_mpath.path, media_db.desc_path)\n if not os.path.exists(desc):\n media_db.state = MediaHelp.STATE_AUDIO_FINISH\n\n # 创建dir 目录\n\n\ndef gen_dir():\n def db_dir_exist(db_dirs):\n # dir不存在则删除.\n exist_pic_dirs = {}\n for pic_db in db_dirs:\n digout = False\n for src_db in src_dbs():\n if media_root(src_db.path) in pic_db.abs_path and os.path.isdir(pic_db.abs_path):\n exist_pic_dirs[pic_db.abs_path] = pic_db\n digout = True\n break\n if not digout:\n pic_db.delete()\n return exist_pic_dirs\n\n def get_db_dirs():\n for dir in src_dbs():\n ypath.del_none_dir(media_root(dir.path))\n all_pic_dirs = Dir.objects.filter(type=yutils.M_FTYPE_MOIVE)\n db_dirs = db_dir_exist(all_pic_dirs)\n return db_dirs\n\n def folder_call(folder_list, is_root):\n if is_root:\n return\n # if is_root:\n # if folder_list.path not in exist_pic_dirs:\n # exist_pic_dirs[folder_list.path] = ServiceHelper.create_dir(exist_pic_dirs, folder_list,\n # yutils.M_FTYPE_MOIVE)\n # return\n save_list = []\n for dir in folder_list:\n if dir.path not in exist_media_dirs:\n db_dir = ServiceHelper.create_dir(exist_media_dirs, dir,\n yutils.M_FTYPE_MOIVE, save_it=False)\n exist_media_dirs[dir.path] = db_dir\n save_list.append(db_dir)\n # PicHelper.handle_files_md5(src_file, dir_md5)\n for save in save_list:\n save.save()\n pass\n\n exist_media_dirs = get_db_dirs()\n for src_db in src_dbs():\n # ypath.path_res(src_dir)\n ypath.ergodic_folder(media_root(src_db.path), folder_call_back=folder_call)\n return exist_media_dirs\n\n\n# 删除本地不存在的数据\ndef handle_meida_db_exists():\n posix_file_list = []\n for dir in src_dbs():\n posix_file_list.extend(\n [ypath.convert_path(file.as_posix()) for file in Path(media_root(dir.path)).rglob('*.*')])\n all_media_set = Media.objects.all()\n with transaction.atomic():\n for media_db in all_media_set:\n if media_db.abs_path not in posix_file_list:\n media_db.delete()\n\n\n# 获取到media的数据库字段.(Media,MPath)\ndef get_media_mpath_db(src_db, file_path: str, mdirs):\n # ypath.convert_path(src.replace(self.src_root, '')),\n # str(file.as_posix()), exist_media_dirs, self.desc_root\n if os.path.isdir(file_path):\n return None\n if not yutils.is_movie(file_path):\n return None\n file_path = ypath.convert_path(file_path)\n media_db_query = Media.objects.filter(abs_path=file_path)\n cur_media_db = None\n if len(media_db_query) > 0:\n cur_media_db = media_db_query[0]\n return cur_media_db\n else:\n # target = ypath.decompose_path(\n # file, str(src_root), str(convert_root), exten='.mp4')\n media_db = Media()\n if (os.path.dirname(file_path) in mdirs):\n media_db.src_dir = mdirs[os.path.dirname(file_path)] # media_db.src_mpath.dir_id\n else:\n logger.info('该文件没有父文件夹:' + file_path)\n return None\n media_db.abs_path = file_path\n media_db.state = MediaHelp.STATE_INIT\n media_db.file_name = os.path.basename(file_path)\n media_db.src_mpath = src_db\n media_db.desc_path = ypath.del_exten(file_path.replace(media_root(src_db.path), '')) + '.mp4'\n\n # media_db.nginx_path = target.replace(str(convert_root.as_posix()), '')\n # create_db_list.append(media_db)\n # media_db.folder_key = dm_dict[os.path.dirname(file_path)]\n return media_db\n\n\n# 转码音频\ndef analysis_audio_info(media_db: Media, src_db):\n def movie_info_res(cmdlist, _):\n if len(cmdlist) <= 0:\n modify_state(media_db, MediaHelp.STATE_AUDIO_FINISH)\n return\n jsonbean = json.loads(''.join(cmdlist))\n if 'streams' not in jsonbean.keys():\n modify_state(media_db, MediaHelp.STATE_SRC_ERROR)\n return\n streamlist = jsonbean['streams']\n format = jsonbean['format']\n media_db.md5 = yutils.get_md5(media_db.abs_path)\n media_db.duration = int(float(format['duration']))\n # cur_file_info['duration'] =\n media_db.size = int(format['size'])\n if len(streamlist) <= 0:\n modify_state(media_db, MediaHelp.STATE_AUDIO_FINISH)\n return\n audio_streams = []\n decode_map = ''\n for stream_item in streamlist:\n if stream_item['codec_type'] == 'audio':\n logger.info(str(stream_item))\n audio_streams.append(stream_item)\n else:\n if stream_item['codec_type'] == 'video':\n # 如果是视频.保存视频信息\n media_db.codec_type = stream_item['codec_name']\n media_db.codec_long_name = stream_item['codec_long_name']\n media_db.width = int(stream_item['width'])\n media_db.height = int(stream_item['height'])\n try:\n media_db.r_frame_rate = round(eval(stream_item['r_frame_rate']))\n media_db.avg_frame_rate = round(eval(stream_item['avg_frame_rate']))\n except ZeroDivisionError:\n media_db.r_frame_rate = 0\n media_db.avg_frame_rate = 0\n\n decode_map += ' -map 0:' + str(stream_item['index'])\n\n # 有多个语种\n digout = False\n if len(audio_streams) > 1:\n for audio_stream in audio_streams:\n if 'tags' in audio_stream and 'title' in audio_stream['tags'] \\\n and ('国语' == audio_stream['tags']['title'] or '中语' == audio_stream['tags']['title']):\n decode_map += ' -map 0:' + str(audio_stream['index'])\n media_db.audio_long_name = audio_stream['codec_long_name']\n media_db.audio_name = audio_stream['codec_name']\n digout = True\n break\n else:\n if len(audio_streams) == 1:\n media_db.audio_long_name = audio_streams[0]['codec_long_name']\n media_db.audio_name = audio_streams[0]['codec_name']\n logger.info('该��频音轨只有一个,不需要转换:' + media_db.abs_path)\n modify_state(media_db, MediaHelp.STATE_AUDIO_FINISH)\n return\n\n if not digout:\n if len(audio_streams) >= 1:\n media_db.audio_long_name = audio_streams[0]['codec_long_name']\n media_db.audio_name = audio_streams[0]['codec_name']\n # media_db.audio_long_name = stream_item['codec_long_name']\n # media_db.audio_name = stream_item['codec_name']\n for audio_stream in audio_streams:\n decode_map += ' -map 0:' + str(audio_stream['index'])\n # out_content += str(index) + ':' + str(audio_stream) + '\\n'\n # index += 1\n # select_audio = len(audio_streams)\n # while len(audio_streams) <= select_audio or select_audio < 0:\n # select_audio = int(input(out_content + '选择音轨:'))\n\n with lock:\n mulit_audio_path = ypath.join(MediaPath.src(), mulit_audio_dir)\n desc_mulit_path = ypath.decompose_path(media_db.abs_path, src_db.path, str(mulit_audio_path))\n\n out_file = desc_mulit_path + '.chi' + ypath.file_exten(media_db.abs_path)\n ypath.create_dirs(desc_mulit_path)\n if os.path.exists(out_file):\n os.remove(out_file)\n logger.info(out_file)\n copy_cmd = ffmpeg_tools + ' -i \\\"' + media_db.abs_path + '\\\"' + decode_map + ' -vcodec copy -acodec copy \\\"' + out_file + '\\\"'\n yutils.process_cmd(copy_cmd, done_call=rm_on_audio_copy, param=(media_db.abs_path, out_file, desc_mulit_path))\n\n if MediaHelp.is_err(media_db.state):\n return\n\n # 音轨结束后,保存源文件到movie_otherformat目录,替换原有文件名.\n def rm_on_audio_copy(_, files):\n shutil.move(files[0], files[2])\n shutil.move(files[1], files[0])\n modify_state(media_db, MediaHelp.STATE_AUDIO_FINISH)\n\n if media_db.state >= MediaHelp.STATE_AUDIO_FINISH:\n logger.info('该文件的音轨已经转换过了:' + media_db.abs_path)\n return\n logger.info('开始转换:' + media_db.abs_path)\n yutils.process_cmd(\n ffprobe_tools + ' \\\"' + media_db.abs_path + '\\\" -print_format json -show_format -show_streams',\n done_call=movie_info_res)\n\n\n# 转码视频\ndef compress_media(media_db: Media):\n if MediaHelp.is_err(media_db.state):\n return\n if media_db.desc_mpath == None:\n with lock:\n media_db.desc_mpath = MediaPath.pdc().search_by_abs_path(MediaPath.desc(), is_src=False)\n pass\n d_abs_path = desc_path(media_db) # ypath.join(media_db.desc_mpath.path, media_db.desc_path)\n # 如果开关开着. 则不管desc是否已有.,根据数据库去覆盖.\n if media_db.state < MediaHelp.STATE_COMPRESS_HLS:\n # 标记为 未转码完毕\n if os.path.exists(d_abs_path):\n if Globals.MEDIA_SERVICE_COVER_DESC:\n os.remove(d_abs_path)\n else:\n logger.info('MediaService.target exists 所以直接修改数据')\n modify_state(media_db, MediaHelp.STATE_COMPRESS_HLS)\n else:\n if not os.path.exists(d_abs_path): # 状态是转码完毕后. 但是desc文件不存在. 则需要重新转码\n modify_state(media_db, MediaHelp.STATE_AUDIO_FINISH)\n if media_db.state >= MediaHelp.STATE_COMPRESS_HLS:\n logger.info('该文件已经转码过了:' + media_db.abs_path)\n return\n if os.path.exists(d_abs_path):\n os.remove(d_abs_path)\n ypath.create_dirs(d_abs_path)\n # media_db.nginx_path = target\n if media_db.codec_type == 'h264':\n logger.info('这个视频是 h264流视频, 可以直接复制' + media_db.abs_path)\n can_audio_copy = (media_db.audio_name == 'aac' or media_db.audio_name == 'mp3')\n if not can_audio_copy:\n EmailUtil.send('该文件需要检查audio:' + media_db.abs_path + ' \\nmedia_db.audio_name:' + media_db.audio_name)\n # input('该文件需要检查audio:' + media_db.abs_path)\n if media_db.abs_path.endswith('.mp4') and can_audio_copy:\n os.symlink(media_db.abs_path, d_abs_path)\n else:\n # 这里进行复制内容\n audio_cmd = '-acodec copy' if can_audio_copy else '-acodec mp3'\n # audio_cmd = '-acodec copy'\n yutils.process_cmd(\n ffmpeg_tools + ' -i \\\"' + media_db.abs_path + '\\\" -vcodec copy ' + audio_cmd + ' \\\"' + d_abs_path + '\\\"')\n\n else:\n # '\\\"%s\\\" -i \\\"%s\\\" \\\"%s\\\"' % (ffmpeg_tools, src_path, target)\n logger.info('这个视频不是:' + media_db.abs_path)\n yutils.process_cmd('\\\"%s\\\" -i \\\"%s\\\" \\\"%s\\\"' % (ffmpeg_tools, media_db.abs_path, d_abs_path))\n if not os.path.exists(d_abs_path):\n logger.error('源文件错误:%s' % d_abs_path)\n modify_state(media_db, MediaHelp.STATE_SRC_ERROR)\n else:\n modify_state(media_db, MediaHelp.STATE_COMPRESS_HLS)\n\n\ndef create_ts(media_db: Media):\n # d_abs_path = ypath.join(media_root(media_db), media_db.desc_path)\n desc_root = media_db.desc_mpath.path\n media_ts_dir = ypath.join(desc_root, movie_config.ts_info.ts_dir, media_db.desc_path)\n if media_db.state >= MediaHelp.STATE_VIDEO_TS and os.path.isdir(media_ts_dir):\n logger.info('该视频已经切片过了:' + media_db.abs_path)\n return\n if os.path.exists(media_ts_dir):\n shutil.rmtree(media_ts_dir)\n ypath.create_dirs(media_ts_dir, True)\n media_desc_path = desc_path(media_db) # ypath.join(desc_path(media_db), media_db.desc_path)\n m3u8_file = ypath.join(media_ts_dir, movie_config.ts_info.u8name)\n cmd = '\\\"' + ffmpeg_tools + '\\\" -i \\\"' + media_desc_path + \\\n '\\\" -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list \\\"' + \\\n m3u8_file + '\\\" -segment_time 30 \\\"' + media_ts_dir + '/%05d.ts\\\"'\n yutils.process_cmd(cmd)\n modify_state(media_db, MediaHelp.STATE_VIDEO_TS)\n # m3u8_path = ypath.join(media_ts_dir, movie_config.ts_info.u8name)\n\n # media_db.m3u8_path = ypath.join(media_db.desc_path, movie_config.ts_info.u8name)\n print(desc_root)\n\n\n# 生成缩略图\ndef create_thum(media_db: Media):\n if MediaHelp.is_err(media_db.state):\n return\n desc_root = media_db.desc_mpath.path\n media_tum_root = ypath.join(desc_root,\n movie_config.img_info.img_root) # TmpUtil.desc() / movie_config.img_info.img_root\n # convert_root\n # if media_db.state >= MediaHelp.STATE_VIDEO_THUM:\n # logger.info('该文件已经转缩略图过了:' + media_db.abs_path)\n # return\n target_img_dir = ypath.join(media_tum_root, media_db.desc_path)\n ypath.create_dirs(target_img_dir)\n desc = ypath.join(target_img_dir, movie_config.img_info.img)\n desc_thum = ypath.join(target_img_dir, movie_config.img_info.thum)\n if os.path.exists(desc) and os.path.exists(desc_thum):\n logger.info('该视频不需要做缩略图裁切,因为已有:%s' % desc)\n return\n # 裁切缩略图的比例\n thum_percent = int(movie_config.base_info.thum_w) / int(movie_config.base_info.thum_h)\n\n max_thum_time = int(movie_config.base_info.max_thum_time)\n min_thum_time = int(movie_config.base_info.min_thum_time)\n\n ypath.create_dirs(desc)\n\n r_time = random.randint(min_thum_time if media_db.duration > min_thum_time else 0,\n max_thum_time if media_db.duration > max_thum_time else media_db.duration)\n d_abs_path = ypath.join(media_root(desc_root), media_db.desc_path)\n cmd = ffmpeg_tools + ' -i \\\"' + d_abs_path + '\\\" -y -vframes 1 -ss 00:00:' + str(\n r_time) + ' -f image2 \\\"' + desc + '\\\"'\n yutils.process_cmd(cmd)\n if not os.path.exists(desc):\n return\n img = Image.open(desc)\n w, h = img.size\n crop_img = img.crop(yutils.crop_size(w, h, thum_percent))\n crop_img.save(desc_thum)\n\n\ndef modify_state(media_db, state):\n media_db.state = state\n media_db.save()\n","sub_path":"MrYangServer/MryangService/video/VideoHelper.py","file_name":"VideoHelper.py","file_ext":"py","file_size_in_byte":16529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421992442","text":"import logging\n\nfrom flask import abort\nfrom sqlalchemy import literal_column\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom config.db_lib import db_session\nfrom models import (\n Activator,\n Application,\n Notification,\n NotificationActivator,\n NotificationActivatorSchema,\n NotificationApplicationDeployment,\n NotificationApplicationDeploymentSchema,\n NotificationSchema,\n NotificationSolutionDeployment,\n NotificationSolutionDeploymentSchema,\n NotificationTeam,\n NotificationTeamSchema,\n NotificationType,\n Solution,\n Team,\n)\nfrom tb_houston_service import security\nfrom tb_houston_service.extendedSchemas import ExtendedNotificationSchema\nfrom tb_houston_service.tools import ModelTools\n\nlogger = logging.getLogger(\"tb_houston_service.notification\")\n\n\ndef read_all(\n typeId=None, isRead=None, isActive=None, page=None, page_size=None, sort=None\n):\n logger.debug(\"read_all: %s\", typeId)\n with db_session() as dbs:\n # pre-process sort instructions\n if sort is None:\n notifications_query = dbs.query(Notification).order_by(\n Notification.lastUpdated + \" desc\"\n )\n else:\n try:\n sort_inst = [si.split(\":\") for si in sort]\n orderby_arr = []\n for si in sort_inst:\n si1 = si[0]\n if len(si) > 1:\n si2 = si[1]\n else:\n si2 = \"asc\"\n orderby_arr.append(f\"{si1} {si2}\")\n # print(\"orderby: {}\".format(orderby_arr))\n notifications_query = dbs.query(Notification).order_by(\n literal_column(\", \".join(orderby_arr))\n )\n except SQLAlchemyError as e:\n logger.warning(\"Exception: %s\", e)\n notifications_query = dbs.query(Notification).order_by(\n Notification.lastUpdated + \" desc\"\n )\n\n user = security.get_valid_user_from_token(dbsession=dbs)\n if not user:\n abort(404, \"No valid user found!\")\n toUserId = user.id\n notifications_query = notifications_query.filter(\n (typeId is None or Notification.typeId == typeId),\n (toUserId is None or Notification.toUserId == toUserId),\n (isRead is None or Notification.isRead == isRead),\n (isActive is None or Notification.isActive == isActive),\n )\n\n # do limit and offset last\n if page is None or page_size is None:\n notifications = notifications_query.all()\n else:\n notifications = notifications_query.limit(page_size).offset(\n page * page_size\n )\n\n for n in notifications:\n n.type = (\n dbs.query(NotificationType)\n .filter(NotificationType.id == n.typeId)\n .one_or_none()\n )\n if n.typeId == 1:\n n.details = (\n dbs.query(NotificationActivator)\n .filter(\n n.id == NotificationActivator.notificationId,\n Activator.id == NotificationActivator.activatorId,\n )\n .one_or_none()\n )\n elif n.typeId == 2:\n n.details = (\n dbs.query(NotificationTeam)\n .filter(\n n.id == NotificationTeam.notificationId,\n Team.id == NotificationTeam.teamId,\n )\n .one_or_none()\n )\n elif n.typeId == 3:\n n.details = (\n dbs.query(NotificationApplicationDeployment)\n .filter(\n n.id == NotificationApplicationDeployment.notificationId,\n Application.id\n == NotificationApplicationDeployment.applicationId,\n )\n .one_or_none()\n )\n elif n.typeId == 4:\n n.details = (\n dbs.query(NotificationSolutionDeployment)\n .filter(\n n.id == NotificationSolutionDeployment.notificationId,\n Solution.id == NotificationSolutionDeployment.solutionId,\n )\n .one_or_none()\n )\n schema = ExtendedNotificationSchema(many=True)\n data = schema.dump(notifications)\n return data, 200\n\n\ndef create(notification, typeId, dbsession):\n # if id is zero or None (null), we create a a new notification otherwise\n # we update an existing notification.\n oid = notification.get(\"id\", None)\n logger.debug(\"oid: %s\", oid)\n notification[\"typeId\"] = typeId\n notification[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n logger.debug(\"create notification: %s\", notification)\n\n if not oid:\n # Insert\n if notification.get(\"isActive\", None) is None:\n notification[\"isActive\"] = True\n if notification.get(\"isRead\", None) is None:\n notification[\"isRead\"] = False\n if notification.get(\"typeId\") == 1:\n tmp_notification = {}\n tmp_notification[\"typeId\"] = notification.get(\"typeId\")\n tmp_notification[\"message\"] = notification.get(\"message\")\n tmp_notification[\"isActive\"] = notification.get(\"isActive\", True)\n tmp_notification[\"isRead\"] = notification.get(\"isRead\")\n tmp_notification[\"importance\"] = notification.get(\"importance\")\n tmp_notification[\"toUserId\"] = notification.get(\"toUserId\")\n tmp_notification[\"fromUserId\"] = notification.get(\"fromUserId\")\n tmp_notification[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n aSchema = NotificationSchema()\n new_notification = aSchema.load(tmp_notification, session=dbsession)\n dbsession.add(new_notification)\n dbsession.flush()\n naSchema = NotificationActivatorSchema()\n notificationActivator = {}\n notificationActivator[\"notificationId\"] = new_notification.id\n notificationActivator[\"activatorId\"] = notification.get(\"activatorId\")\n notificationActivator[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n notificationActivator[\"isActive\"] = notification.get(\"isActive\", True)\n logger.debug(\"notificationActivator: %s\", notificationActivator)\n new_na = naSchema.load(notificationActivator, session=dbsession)\n dbsession.add(new_na)\n elif notification.get(\"typeId\") == 2:\n tmp_notification = {}\n tmp_notification[\"typeId\"] = notification.get(\"typeId\")\n tmp_notification[\"message\"] = notification.get(\"message\")\n tmp_notification[\"isActive\"] = notification.get(\"isActive\", True)\n tmp_notification[\"isRead\"] = notification.get(\"isRead\")\n tmp_notification[\"importance\"] = notification.get(\"importance\")\n tmp_notification[\"toUserId\"] = notification.get(\"toUserId\")\n tmp_notification[\"fromUserId\"] = notification.get(\"fromUserId\")\n tmp_notification[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n aSchema = NotificationSchema()\n new_notification = aSchema.load(tmp_notification, session=dbsession)\n dbsession.add(new_notification)\n dbsession.flush()\n naSchema = NotificationTeamSchema()\n notificationTeam = {}\n notificationTeam[\"notificationId\"] = new_notification.id\n notificationTeam[\"teamId\"] = notification.get(\"teamId\")\n notificationTeam[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n notificationTeam[\"isActive\"] = notification.get(\"isActive\", True)\n new_na = naSchema.load(notificationTeam, session=dbsession)\n dbsession.add(new_na)\n elif notification.get(\"typeId\") == 3:\n tmp_notification = {}\n tmp_notification[\"typeId\"] = notification.get(\"typeId\")\n tmp_notification[\"message\"] = notification.get(\"message\")\n tmp_notification[\"isActive\"] = notification.get(\"isActive\", True)\n tmp_notification[\"isRead\"] = notification.get(\"isRead\")\n tmp_notification[\"importance\"] = notification.get(\"importance\")\n tmp_notification[\"toUserId\"] = notification.get(\"toUserId\")\n tmp_notification[\"fromUserId\"] = notification.get(\"fromUserId\")\n tmp_notification[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n aSchema = NotificationSchema()\n new_notification = aSchema.load(tmp_notification, session=dbsession)\n dbsession.add(new_notification)\n dbsession.flush()\n nadSchema = NotificationApplicationDeploymentSchema()\n notificationApplicationDeployment = {}\n notificationApplicationDeployment[\"notificationId\"] = new_notification.id\n notificationApplicationDeployment[\"applicationId\"] = notification.get(\n \"applicationId\"\n )\n notificationApplicationDeployment[\n \"lastUpdated\"\n ] = ModelTools.get_utc_timestamp()\n notificationApplicationDeployment[\"isActive\"] = notification.get(\n \"isActive\", True\n )\n new_na = nadSchema.load(\n notificationApplicationDeployment, session=dbsession\n )\n dbsession.add(new_na)\n elif notification.get(\"typeId\") == 4:\n tmp_notification = {}\n tmp_notification[\"typeId\"] = notification.get(\"typeId\")\n tmp_notification[\"message\"] = notification.get(\"message\")\n tmp_notification[\"isActive\"] = notification.get(\"isActive\", True)\n tmp_notification[\"isRead\"] = notification.get(\"isRead\")\n tmp_notification[\"importance\"] = notification.get(\"importance\")\n tmp_notification[\"toUserId\"] = notification.get(\"toUserId\")\n tmp_notification[\"fromUserId\"] = notification.get(\"fromUserId\")\n tmp_notification[\"lastUpdated\"] = ModelTools.get_utc_timestamp()\n aSchema = NotificationSchema()\n new_notification = aSchema.load(tmp_notification, session=dbsession)\n dbsession.add(new_notification)\n dbsession.flush()\n nsdSchema = NotificationSolutionDeploymentSchema()\n notificationSolutionDeployment = {}\n notificationSolutionDeployment[\"notificationId\"] = new_notification.id\n notificationSolutionDeployment[\"solutionId\"] = notification.get(\n \"solutionId\"\n )\n notificationSolutionDeployment[\n \"lastUpdated\"\n ] = ModelTools.get_utc_timestamp()\n notificationSolutionDeployment[\"isActive\"] = notification.get(\n \"isActive\", True\n )\n new_na = nsdSchema.load(notificationSolutionDeployment, session=dbsession)\n dbsession.add(new_na)\n else:\n logger.error(\n \"Unknown notification type, the transaction will be rolled back for this notification!\"\n )\n dbsession.rollback()\n else:\n # Update\n aSchema = NotificationSchema()\n if notification.get(\"typeId\") == 1:\n notification.pop(\"typeId\")\n updated_notification = aSchema.load(notification, session=dbsession)\n dbsession.merge(updated_notification)\n dbsession.flush()\n notificationActivator = (\n dbsession.query(NotificationActivator)\n .filter(NotificationActivator.notificationId == updated_notification.id)\n .one()\n )\n notificationActivator.lastUpdated = ModelTools.get_utc_timestamp()\n notificationActivator.isActive = notification.get(\n \"isActive\", notificationActivator.isActive\n )\n dbsession.merge(notificationActivator)\n elif notification.get(\"typeId\") == 2:\n notification.pop(\"typeId\")\n updated_notification = aSchema.load(notification, session=dbsession)\n dbsession.merge(updated_notification)\n dbsession.flush()\n notificationTeam = (\n dbsession.query(NotificationTeam)\n .filter(NotificationTeam.notificationId == updated_notification.id)\n .one()\n )\n notificationTeam.lastUpdated = ModelTools.get_utc_timestamp()\n notificationTeam.isActive = notification.get(\n \"isActive\", notificationTeam.isActive\n )\n dbsession.merge(notificationTeam)\n elif notification.get(\"typeId\") == 3:\n notification.pop(\"typeId\")\n updated_notification = aSchema.load(notification, session=dbsession)\n dbsession.merge(updated_notification)\n dbsession.flush()\n notificationApplicationDeployment = (\n dbsession.query(NotificationApplicationDeployment)\n .filter(\n NotificationApplicationDeployment.notificationId\n == updated_notification.id\n )\n .one()\n )\n notificationApplicationDeployment.lastUpdated = (\n ModelTools.get_utc_timestamp()\n )\n notificationApplicationDeployment.isActive = notification.get(\n \"isActive\", notificationApplicationDeployment.isActive\n )\n dbsession.merge(notificationApplicationDeployment)\n elif notification.get(\"typeId\") == 4:\n notification.pop(\"typeId\")\n updated_notification = aSchema.load(notification, session=dbsession)\n dbsession.merge(updated_notification)\n dbsession.flush()\n notificationSolutionDeployment = (\n dbsession.query(NotificationSolutionDeployment)\n .filter(\n NotificationSolutionDeployment.notificationId\n == updated_notification.id\n )\n .one()\n )\n notificationSolutionDeployment.lastUpdated = ModelTools.get_utc_timestamp()\n notificationSolutionDeployment.isActive = notification.get(\n \"isActive\", notificationSolutionDeployment.isActive\n )\n dbsession.merge(notificationSolutionDeployment)\n else:\n logger.error(\n \"typeId is missing, the transaction will be rolled back for this notification!\"\n )\n dbsession.rollback()\n logger.debug(\"processed: %s\", notification)\n return notification\n\n\ndef create_all(\n notificationListDetails,\n typeId,\n isRead=None,\n isActive=None,\n page=None,\n page_size=None,\n sort=None,\n):\n logger.debug(\"create_all: %s\", notificationListDetails)\n with db_session() as dbs:\n for n in notificationListDetails:\n create(n, typeId, dbsession=dbs)\n\n (data, resp_code) = read_all(\n typeId=typeId,\n isRead=isRead,\n isActive=isActive,\n page=page,\n page_size=page_size,\n sort=sort,\n )\n logger.debug(\"data: %s, resp_code: %s\", data, resp_code)\n return data, 201\n\n\ndef meta(typeId=None, isRead=None, isActive=None):\n \"\"\"\n Responds to a request for /api/notificationsMeta/.\n\n :param activator:\n :return: total count of notifications\n \"\"\"\n with db_session() as dbs:\n user = security.get_valid_user_from_token(dbsession=dbs)\n if not user:\n abort(404, \"No valid user found!\")\n toUserId = user.id\n count = (\n dbs.query(Notification)\n .filter(\n (typeId is None or Notification.typeId == typeId),\n (toUserId is None or Notification.toUserId == toUserId),\n (isRead is None or Notification.isRead == isRead),\n (isActive is None or Notification.isActive == isActive),\n )\n .count()\n )\n data = {\"count\": count}\n return data, 200\n\n\n# service functions\ndef delete(oid, dbsession):\n logger.debug(\"delete: %s\", oid)\n na = (\n dbsession.query(NotificationActivator)\n .filter(\n NotificationActivator.notificationId == oid, NotificationActivator.isActive\n )\n .one_or_none()\n )\n if na:\n na.isActive = False\n na.lastUpdated = ModelTools.get_utc_timestamp()\n\n nt = (\n dbsession.query(NotificationTeam)\n .filter(NotificationTeam.notificationId == oid, NotificationTeam.isActive)\n .one_or_none()\n )\n if nt:\n nt.isActive = False\n nt.lastUpdated = ModelTools.get_utc_timestamp()\n\n nad = (\n dbsession.query(NotificationApplicationDeployment)\n .filter(\n NotificationApplicationDeployment.notificationId == oid,\n NotificationApplicationDeployment.isActive,\n )\n .one_or_none()\n )\n if nt:\n nad.isActive = False\n nad.lastUpdated = ModelTools.get_utc_timestamp()\n\n nsd = (\n dbsession.query(NotificationSolutionDeployment)\n .filter(\n NotificationSolutionDeployment.notificationId == oid,\n NotificationSolutionDeployment.isActive,\n )\n .one_or_none()\n )\n if nt:\n nsd.isActive = False\n nsd.lastUpdated = ModelTools.get_utc_timestamp()\n\n n = (\n dbsession.query(Notification)\n .filter(Notification.id == oid, Notification.isActive)\n .one_or_none()\n )\n if n:\n n.isActive = False\n n.lastUpdated = ModelTools.get_utc_timestamp()\n\n\ndef dismiss(\n fromUserId,\n dbsession,\n activatorId=None,\n teamId=None,\n applicationId=None,\n solutionId=None,\n):\n if activatorId:\n ns = (\n dbsession.query(Notification)\n .filter(\n NotificationActivator.notificationId == Notification.id,\n NotificationActivator.isActive,\n Notification.isActive,\n Notification.fromUserId == fromUserId,\n )\n .all()\n )\n for n in ns:\n delete(n.id, dbsession)\n if teamId:\n n = (\n dbsession.query(Notification)\n .filter(\n NotificationTeam.teamId == Notification.id,\n NotificationTeam.isActive,\n Notification.isActive,\n Notification.fromUserId == fromUserId,\n )\n .all()\n )\n for n in ns:\n delete(n.id, dbsession)\n if applicationId:\n n = (\n dbsession.query(Notification)\n .filter(\n NotificationApplicationDeployment.teamId == Notification.id,\n NotificationApplicationDeployment.isActive,\n Notification.isActive,\n Notification.fromUserId == fromUserId,\n )\n .all()\n )\n for n in ns:\n delete(n.id, dbsession)\n if solutionId:\n n = (\n dbsession.query(Notification)\n .filter(\n NotificationSolutionDeployment.teamId == Notification.id,\n NotificationSolutionDeployment.isActive,\n Notification.isActive,\n Notification.fromUserId == fromUserId,\n )\n .all()\n )\n for n in ns:\n delete(n.id, dbsession)\n","sub_path":"tb_houston_service/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":19657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"595319321","text":"from telebot import *\nfrom db import Product\n\n\nTOKEN = \"1883154030:AAGF21aoXyK3TYzLvlrRrOqxKUG6rObEG2U\"\n\nbot = TeleBot(TOKEN)\nproducts = Product(\"product.sql\")\n# products.create_table()\n\nname = \"\"\namount = \"\"\nprice = \"\"\ncategory = \"\"\n\n@bot.message_handler(commands=['start'])\ndef greeting(message):\n bot.send_message(message.chat.id, \"Напиште что хотите сделать\")\n\n@bot.message_handler(commands=['create'])\ndef create_product(message):\n bot.send_message(message.chat.id, \"Введите название товара:\")\n bot.register_next_step_handler(message, ask_amount)\n\n@bot.message_handler(commands=['show'])\ndef show_products(message):\n data = products.select_products()\n for product in data:\n temp = \"ID: \" + product[0] + \"\\n\" + \"Name: \" + product[1] + \"\\n\" + \"Amount: \" + str(product[2]) + \"\\n\" + \"Price: \" + str(product[3]) + \"\\n\" + \"Category: \" + product[4]\n bot.send_message(message.chat.id, temp)\n\n\n\ndef ask_amount(message):\n global name\n name = message.text\n bot.send_message(message.chat.id, \"Введите колво товара:\")\n bot.register_next_step_handler(message, ask_price)\n\ndef ask_price(message):\n global amount\n amount = int(message.text)\n bot.send_message(message.chat.id, \"Введите цену товара:\")\n bot.register_next_step_handler(message, ask_category)\n\n\ndef ask_category(message):\n global price\n price = int(message.text)\n bot.send_message(message.chat.id, \"Введите категорию товара:\")\n bot.register_next_step_handler(message, insert_product)\n\ndef insert_product(message):\n global category\n category = message.text\n insert_final()\n\n\ndef insert_final():\n global name, price, amount, category\n ID = len(products.select_products()) + 1\n products.insert_product(ID, name, amount, price, category)\n data = products.select_products()\n print(data)\n name = \"\"\n amount = \"\"\n price = \"\"\n category = \"\"\n\n\nbot.polling()","sub_path":"bot_db/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"338019884","text":"from pylab import * \nimport numpy as np\nimport xarray as xr\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nfrom matplotlib.ticker import StrMethodFormatter, NullFormatter\n#from IPython.display import set_matplotlib_formats\n#set_matplotlib_formats('png', 'pdf')\nimport glob\nimport os\n\n\n### load datasets ###\n\nos.chdir(\"/nfs3m/archive/sfa_cache09/users/g00/imitevsk/E2.1_CO2_runs/pytropd/output\")\nt_05 = xr.open_dataset('temp_0.5_surface.nc')\nt_1 = xr.open_dataset('temp_1_surface.nc')\nt_15 = xr.open_dataset('temp_1.5_surface.nc')\nt_2 = xr.open_dataset('temp_2_surface.nc')\nt_3 = xr.open_dataset('temp_3_surface.nc')\nt_4 = xr.open_dataset('temp_4_surface.nc')\nt_5 = xr.open_dataset('temp_5_surface.nc')\nt_6 = xr.open_dataset('temp_6_surface.nc')\nt_7 = xr.open_dataset('temp_7_surface.nc')\nt_8 = xr.open_dataset('temp_8_surface.nc')\nos.chdir(\"/nfs3m/archive/sfa_cache09/users/g00/imitevsk/E2.1_CO2_runs/pytropd/plots_figures\")\n\ndef supplemental_fig_ice_extent():\n\t\"\"\"\n\tPlots Supplemental Figure 2 (sea-ice extent) \n\t\"\"\"\n\t\n\tx = np.array([0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8])\n\tx = np.log(x) * 5.35\n\n\t### sea-ice area where ice content is greater than 15% \t\n\tsie_05 = t_05.ice.where(t_05.ice > 15) / 100\n\tsie_1 = t_1.ice.where(t_1.ice > 15) / 100\n\tsie_15 = t_15.ice.where(t_15.ice > 15) / 100\n\tsie_2 = t_2.ice.where(t_2.ice > 15) / 100\n\tsie_3 = t_3.ice.where(t_3.ice > 15) / 100\n\tsie_4 = t_4.ice.where(t_4.ice > 15) / 100\n\tsie_5 = t_5.ice.where(t_5.ice > 15) / 100\n\tsie_6 = t_6.ice.where(t_6.ice > 15) / 100\n\tsie_7 = t_7.ice.where(t_7.ice > 15) / 100\n\tsie_8 = t_8.ice.where(t_8.ice > 15) / 100\n\t\n\tdef y_sie(l_s, l_e):\n\t\t\"\"\"\n\t\tSea-ice extent in equilibrium\n\t\t\n\t\t:Input:\n\t\t - *l_s* (int) - starting latitude \n\t\t - *l_e* (int) - ending latitude\n\t\t:Output:\n\t\t - *y_sie* (ndarray) - sea-ice extent at 0.5,1,1.5,2,3,4,5,6,7,8xCO2\n\t\t\"\"\"\n\t\ty_sie = np.array([\t\n\t\t\t(sie_05 * t_05.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_1 * t_1.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_15 * t_15.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_2 * t_2.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_3 * t_3.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_4 * t_4.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_5 * t_5.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_6 * t_6.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_7 * t_7.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ,\n\t\t\t(sie_8 * t_8.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean() / 1e12 ])\n\t\treturn y_sie\n\t\n\tdef y_sie_error(l_s, l_e):\n\t\t\"\"\"\n\t\tSea-ice extent annual 1 \\sigma in equilibrium \n\t\t\n\t\t:Input:\n\t\t - *l_s* (int) - starting latitude \n\t\t - *l_e* (int) - ending latitude\n\t\t:Output:\n\t\t - *y_sie_error* (ndarray) - sea-ice extent 1 \\sigma at 0.5,1,1.5,2,3,4,5,6,7,8xCO2\n\t\t\"\"\"\n\t\ty_sie_error = np.array([\n\t\t\t((sie_05 * t_05.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_1 * t_1.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_15 * t_15.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_2 * t_2.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_3 * t_3.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_4 * t_4.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_5 * t_5.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_6 * t_6.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_7 * t_7.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std(),\n\t\t\t((sie_8 * t_8.axyp).sel(year=slice(1950,2000), lat=slice(l_s,l_e)).sum(dim=['lat','lon']).mean('month') / 1e12).std() ])\n\t\treturn y_sie_error\n\t\n\tfig = plt.figure()\n\tfig.set_figwidth(fig.get_figwidth() * 1)\n\tfig.set_figheight(fig.get_figheight() * 1)\n\t\t\t\n\taxes = fig.add_subplot(1,1,1)\n\taxes.errorbar(x, y_sie(0,90), y_sie_error(0,90), linewidth = 3, marker='o', markersize=12, capsize = 8, color = 'red', label='NH')\n\taxes.errorbar(x, y_sie(-90,0), y_sie_error(-90,0), linewidth = 3, marker='o', markersize=12, capsize = 8, color = 'blue', label='SH')\t\n\tplt.xlabel('Radiative Forcing (W/m$^2$)', fontsize=20)\n\tplt.ylabel('10$^6$ km$^2$', fontsize=18)\n\tplt.title(\"Sea-ice extent\", fontsize = 20)\n\taxes.legend(loc = 0)\n\tplt.xticks([-4,0,4,8,12], fontsize = 15)\n\tplt.yticks([2.5,7.5,12.5,17.5], fontsize = 15)\n\t\t\n\tplt.tight_layout()\n\tplt.savefig('supplemental_fig_ice_extent.pdf')\n\tplt.show()\n\t\nsupplemental_fig_ice_extent() \n","sub_path":"sf_sea_ice_extent.py","file_name":"sf_sea_ice_extent.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108337781","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom unittest import TestLoader, TextTestRunner, TestSuite\n\nfrom tests.test_simple_functionality import SimpleFunctionalityTC, ValidationTC\n\n\ndef testSuiteFromTCs(*tcs):\n loader = TestLoader()\n for tc in tcs:\n # skip AxiLiteEndpointTC because we need one to test original methods\n # from SimTestCase\n tc._multiprocess_can_split_ = True\n loadedTcs = [loader.loadTestsFromTestCase(tc) for tc in tcs]\n suite = TestSuite(loadedTcs)\n return suite\n\n\nsuite = testSuiteFromTCs(\n SimpleFunctionalityTC,\n ValidationTC\n)\n\nif __name__ == '__main__':\n runner = TextTestRunner(verbosity=2)\n\n try:\n from concurrencytest import ConcurrentTestSuite, fork_for_tests\n useParallerlTest = True\n except ImportError:\n # concurrencytest is not installed, use regular test runner\n useParallerlTest = False\n\n if useParallerlTest:\n # Run same tests across 4 processes\n concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests())\n runner.run(concurrent_suite)\n else:\n runner.run(suite)\n","sub_path":"tests/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129310077","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('opencv-python-foreground.mp4')\nfgbg = cv2.BackgroundSubtractorMOG()\n\nwhile True:\n ret, frame = cap.read()\n fgmask = fgbg.apply(frame)\n\n cv2.imshow('original',frame)\n cv2.imshow('fg',fgmask)\n\n k=cv2.waitKey(30) & 0xFF\n if k==27:\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"OpenCv13.py","file_name":"OpenCv13.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308607840","text":"from sklearn.linear_model import LinearRegression\r\nfrom sklearn import datasets\r\nfrom sklearn.datasets import load_diabetes\r\nfrom sklearn import preprocessing\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.externals import joblib\r\n\r\nMODEL_NAME = 'Linear.m'\r\n\r\n\r\nclass Linear(object):\r\n def __init__(self):\r\n pass\r\n \r\n def create_model(self,x,y):\r\n model = LinearRegression()\r\n model.fit(x,y)\r\n coef = model.coef_\r\n intercept = model.intercept_\r\n score = model.score(x, y)\r\n self.save_model(model)\r\n return coef,intercept,score\r\n \r\n def get_data(self):\r\n x, y = datasets.make_regression(n_samples=300, n_features=3, n_targets=1, noise=5)\r\n # plt.figure()\r\n # plt.scatter(x,y)\r\n # plt.show()\r\n return x, y\r\n \r\n def set_train_split(self):\r\n pass\r\n \r\n def pre_procession(self,data):\r\n return preprocessing.scale(data)\r\n \r\n def save_model(self,model):\r\n joblib.dump(model,MODEL_NAME)\r\n \r\n def restore_model(self):\r\n return joblib.load(MODEL_NAME)\r\n \r\n def main(self):\r\n x,y = self.get_data()\r\n # x = self.pre_procession(x)\r\n # y = self.pre_procession(y)\r\n coef, intercept, score = self.create_model(x, y)\r\n print('==== coef ====')\r\n print(coef, )\r\n print('==== intercept ====')\r\n print(intercept)\r\n print('==== score ====')\r\n print(score)\r\n\r\n\r\nrun = Linear()\r\nrun.main()\r\n# ==== coef ====\r\n# [0.39072646 0.29663471 0.54071077 0.09709144 0.40517252 0.3021027\r\n# 0.21999778 0.05294022 0.21614554 0.42508272]\r\n# ==== intercept ====\r\n# 9.965950341549611e-17\r\n# ==== score ====\r\n# 0.9992915426769156\r\n\r\n","sub_path":"code/linear_regression/linear_demo1.py","file_name":"linear_demo1.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532429641","text":"from mwxml import Dump\nfrom mwtypes.files import reader\n\nfrom wikiwho.wikiwho import Wikiwho\n\n\ndef process_xml_dump(xml_file_path):\n # more info about reading xml dumps: https://github.com/mediawiki-utilities/python-mwxml\n dump = Dump.from_file(reader(xml_file_path))\n for page in dump:\n wikiwho = Wikiwho(page.title)\n wikiwho.analyse_article_from_xml_dump(page)\n break # process only first page\n return wikiwho\n\nif __name__ == '__main__':\n # link to xml dumps: https://dumps.wikimedia.org/enwiki/\n xml_file_path = 'path/to/xml'\n wikiwho_obj = process_xml_dump(xml_file_path)\n print(wikiwho_obj.title)\n print(wikiwho_obj.ordered_revisions)\n","sub_path":"WikiWho/examples/process_xml_dump.py","file_name":"process_xml_dump.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326205684","text":"#### VBSjjlnu semileptonic samples and configs\n\n# Mainly 2017 as a inclusive starting point\nvbsjjlnu_samples_bkg = ['WmTo2J_ZTo2L_QCD','WmToLNu_WmTo2J_QCD','WmToLNu_ZTo2J_QCD','WpTo2J_WmToLNu_QCD','WpTo2J_ZTo2L_QCD',\n 'WpToLNu_WpTo2J_QCD','WpToLNu_ZTo2J_QCD','ZTo2L_ZTo2J_QCD','WpToLNu_WmTo2J_QCD',\n 'WWW','WWZ','WZZ','ZZZ','WWG',\n 'WLNuJJ_EWK','EWKZ2Jets_ZToLL_M-50',\n 'DYJetsToLL_M-5to50-LO',\n 'DYJetsToLL_M-10to50-LO', 'DYJetsToLL_M-10to50-LO_ext1', \n 'DYJetsToLL_M-50-LO', 'DYJetsToLL_M-50-LO_ext1', \n 'DYJetsToLL_M-50','DYJetsToLL_M-50_ext1',\n 'DYJetsToLL_M-4to50_HT-100to200','DYJetsToLL_M-4to50_HT-100to200_ext1',\n 'DYJetsToLL_M-4to50_HT-200to400','DYJetsToLL_M-4to50_HT-200to400_ext1',\n 'DYJetsToLL_M-4to50_HT-400to600','DYJetsToLL_M-4to50_HT-400to600_ext1',\n 'DYJetsToLL_M-4to50_HT-600toInf','DYJetsToLL_M-4to50_HT-600toInf_ext1',\n 'DYJetsToLL_M-50_HT-70to100', #---> missing XS\n 'DYJetsToLL_M-50_HT-100to200',\n 'DYJetsToLL_M-50_HT-200to400','DYJetsToLL_M-50_HT-200to400_ext1',\n 'DYJetsToLL_M-50_HT-400to600_ext1','DYJetsToLL_M-50_HT-400to600_ext2',\n 'DYJetsToLL_M-50_HT-600to800',\n 'DYJetsToLL_M-50_HT-800to1200',\n 'DYJetsToLL_M-50_HT-1200to2500',\n 'DYJetsToLL_M-50_HT-2500toInf',\n 'ST_t-channel_top', 'ST_t-channel_antitop',\n 'ST_tW_antitop','ST_tW_top','ST_tW_antitop_ext1','ST_tW_top_ext1',\n 'ST_s-channel','ST_s-channel_ext1',\n 'TTTo2L2Nu',\n 'TTToSemiLeptonic',\n 'TTWjets','TTWjets_ext1',\n 'TTZjets','TTZjets_ext1',\n 'WJetsToLNu-LO', 'WJetsToLNu-LO_ext1',\n 'WJetsToLNu_HT70_100',\n 'WJetsToLNu_HT100_200',\n 'WJetsToLNu_HT200_400',\n 'WJetsToLNu_HT400_600',\n 'WJetsToLNu_HT600_800',\n 'WJetsToLNu_HT800_1200',\n 'WJetsToLNu_HT1200_2500',\n 'WJetsToLNu_HT2500_inf',\n 'WJetsToLNu_Pt50to100',\n 'WJetsToLNu_Pt100to250',\n 'WJetsToLNu_Pt250to400',\n 'WJetsToLNu_Pt400to600',\n 'WJetsToLNu_Pt600toInf'\n \n # 'QCD_Pt-15to20_MuEnrichedPt5','QCD_Pt-20to30_MuEnrichedPt5','QCD_Pt-30to50_MuEnrichedPt5',\n # 'QCD_Pt-50to80_MuEnrichedPt5','QCD_Pt-80to120_MuEnrichedPt5','QCD_Pt-120to170_MuEnrichedPt5',\n # 'QCD_Pt-170to300_MuEnrichedPt5','QCD_Pt-20to30_EMEnriched','QCD_Pt-30to50_EMEnriched','QCD_Pt-50to80_EMEnriched'\n ]\n\n\nvbsjjlnu_samples_bkg_2016 = [\n 'DYJetsToLL_M-5to50_HT-70to100', # not exist 2016 DYJetsToLL_M-5to50 on das\n 'DYJetsToLL_M-5to50_HT-100to200',\n 'DYJetsToLL_M-5to50_HT-100to200_ext1',\n 'DYJetsToLL_M-5to50_HT-200to400',\n 'DYJetsToLL_M-5to50_HT-200to400_ext1',\n 'DYJetsToLL_M-5to50_HT-400to600',\n 'DYJetsToLL_M-5to50_HT-400to600_ext1',\n 'DYJetsToLL_M-5to50_HT-600toinf_ext1',\n 'DYJetsToLL_M-5to50_HT-600toinf',\n 'DYJetsToLL_M-10to50', 'DYJetsToLL_M-10to50_ext1',\n 'DYJetsToLL_M-50',\n 'DYJetsToLL_M-50_HT-70to100',\n 'DYJetsToLL_M-50_HT-100to200',\n 'DYJetsToLL_M-50_HT-100to200_ext1',\n 'DYJetsToLL_M-50_HT-200to400',\n 'DYJetsToLL_M-50_HT-200to400_ext1',\n 'DYJetsToLL_M-50_HT-400to600',\n 'DYJetsToLL_M-50_HT-400to600_ext1',\n 'DYJetsToLL_M-50_HT-600to800',\n 'DYJetsToLL_M-50_HT-800to1200',\n 'DYJetsToLL_M-50_HT-1200to2500',\n 'DYJetsToLL_M-50_HT-2500toinf',\n 'TTWJetsToLNu_ext2',\n\n 'WJetsToLNu', 'WJetsToLNu_ext2', 'WJetsToLNu-LO_ext2',\n 'WJetsToLNu_HT100_200_ext2', 'WJetsToLNu_HT200_400_ext2', 'WJetsToLNu_HT400_600_ext1',\n 'WJetsToLNu_HT600_800_ext1', 'WJetsToLNu_HT800_1200_ext1', 'WJetsToLNu_HT1200_2500_ext1',\n 'WJetsToLNu_HT2500_inf_ext1',\n 'WJetsToLNu_Pt100To250_ext4', 'WJetsToLNu_Pt250To400_ext4', 'WJetsToLNu_Pt400To600_ext4', 'WJetsToLNu_Pt600ToInf_ext4',\n 'WJetsToLNu_Wpt100t0200_ext1','WJetsToLNu_Wpt200toInf_ext1', \n]\n\n\nvbsjjlnu_samples_bkg_2018 = [ 'DYJetsToLL_M-50_ext2', 'TTWJetsToLNu', 'TTToSemiLeptonic_ext3']\n\n\nvbsjjlnu_samples_bkg += vbsjjlnu_samples_bkg_2016\nvbsjjlnu_samples_bkg += vbsjjlnu_samples_bkg_2018\n\nvbsjjlnu_samples_signal = [ 'WmTo2J_ZTo2L','WmToLNu_WmTo2J','WmToLNu_ZTo2J','WpTo2J_WmToLNu','WpTo2J_ZTo2L',\n 'WpToLNu_WpTo2J', 'WpToLNu_ZTo2J','ZTo2L_ZTo2J','WpToLNu_WmTo2J',\n 'WmTo2J_ZTo2L_aQGC', 'WmToLNu_WmTo2J_aQGC','WmToLNu_ZTo2J_aQGC','WpTo2J_WmToLNu_aQGC','WpTo2J_ZTo2L_aQGC',\n 'WpToLNu_WpTo2J_aQGC', 'WpToLNu_ZTo2J_aQGC','ZTo2L_ZTo2J_aQGC','WpToLNu_WmTo2J_aQGC', ]\n\nvbsjjlnu_samples_data2016 = ['SingleElectron_Run2016B-Nano1June2019_ver2-v1','SingleElectron_Run2016C-Nano1June2019-v1',\n 'SingleElectron_Run2016D-Nano1June2019-v1','SingleElectron_Run2016E-Nano1June2019-v1',\n 'SingleElectron_Run2016F-Nano1June2019-v1','SingleElectron_Run2016G-Nano1June2019-v1', \n 'SingleElectron_Run2016H-Nano1June2019-v1','SingleMuon_Run2016B-Nano1June2019_ver2-v1',\n 'SingleMuon_Run2016C-Nano1June2019-v1','SingleMuon_Run2016D-Nano1June2019-v1',\n 'SingleMuon_Run2016E-Nano1June2019-v1', 'SingleMuon_Run2016F-Nano1June2019-v1',\n 'SingleMuon_Run2016G-Nano1June2019-v1', 'SingleMuon_Run2016H-Nano1June2019-v1']\nvbsjjlnu_samples_data2017 = ['SingleElectron_Run2017B-Nano1June2019-v1','SingleElectron_Run2017C-Nano1June2019-v1',\n 'SingleElectron_Run2017D-Nano1June2019-v1','SingleElectron_Run2017E-Nano1June2019-v1',\n 'SingleElectron_Run2017F-Nano1June2019-v1','SingleMuon_Run2017B-Nano1June2019-v1',\n 'SingleMuon_Run2017C-Nano1June2019-v1','SingleMuon_Run2017D-Nano1June2019-v1',\n 'SingleMuon_Run2017E-Nano1June2019-v1','SingleMuon_Run2017F-Nano1June2019-v1']\nvbsjjlnu_samples_data2018 = ['SingleMuon_Run2018A-Nano25Oct2019-v1','SingleMuon_Run2018B-Nano25Oct2019-v1',\n 'SingleMuon_Run2018C-Nano25Oct2019-v1','SingleMuon_Run2018D-Nano25Oct2019_ver2-v1',\n 'EGamma_Run2018A-Nano25Oct2019-v1','EGamma_Run2018B-Nano25Oct2019-v1',\n 'EGamma_Run2018C-Nano25Oct2019-v1','EGamma_Run2018D-Nano25Oct2019_ver2-v1']\n\n\nvbsjjlnu_preselection_mc_2016 = '\"nLepton>=1 && Lepton_pt[0]>30 \\\n && ( Lepton_isTightElectron_mva_90p_Iso2016[0] > 0.5 \\\n || Lepton_isTightMuon_cut_Tight80x[0] > 0.5 ) \\\n && Alt$(Lepton_pt[1],0)<=10 && Alt$(Lepton_isLoose[1],1)> 0.5 \\\n && ( Alt$(Lepton_isTightElectron_mva_90p_Iso2016[1], 0) < 0.5 \\\n && Alt$(Lepton_isTightMuon_cut_Tight80x[1],0) < 0.5 ) \\\n \"'\nvbsjjlnu_preselection_data_2016 = '\"nLepton>=1 && Lepton_pt[0]>30 \\\n && Alt$(Lepton_pt[1],0)<=10 && Alt$(Lepton_isLoose[1],1)> 0.5 \\\n && ( Alt$(Lepton_isTightElectron_mva_90p_Iso2016[1], 0) < 0.5 \\\n && Alt$(Lepton_isTightMuon_cut_Tight80x[1],0) < 0.5 ) \\\n \"'\n\nvbsjjlnu_preselection_mc_2017 = '\"nLepton>=1 && Lepton_pt[0]>30 \\\n && ( Lepton_isTightElectron_mvaFall17V1Iso_WP90[0] > 0.5 \\\n || Lepton_isTightMuon_cut_Tight_HWWW[0] > 0.5 ) \\\n && Alt$(Lepton_pt[1],0)<=10 && Alt$(Lepton_isLoose[1],1)> 0.5 \\\n && ( Alt$(Lepton_isTightElectron_mvaFall17V1Iso_WP90[1], 0) < 0.5 \\\n && Alt$(Lepton_isTightMuon_cut_Tight_HWWW[1],0) < 0.5 ) \\\n \"'\n\nvbsjjlnu_preselection_data_2017 = '\"nLepton>=1 && Lepton_pt[0]>30 \\\n && Alt$(Lepton_pt[1],0)<=10 && Alt$(Lepton_isLoose[1],1)> 0.5 \\\n && ( Alt$(Lepton_isTightElectron_mvaFall17V1Iso_WP90[1], 0) < 0.5 \\\n && Alt$(Lepton_isTightMuon_cut_Tight_HWWW[1],0) < 0.5 ) \\\n \"'\n\nvbsjjlnu_preselection_mc_2018 = vbsjjlnu_preselection_mc_2017\nvbsjjlnu_preselection_data_2018 = vbsjjlnu_preselection_data_2017\n","sub_path":"NanoGardenerFrameworks/HWWSemilepHM/20200304_dennis_step/samples/VBSjjlnu_samples.py","file_name":"VBSjjlnu_samples.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157919991","text":"# Given an array nums of size n, return the majority element. \n# \n# The majority element is the element that appears more than ⌊n / 2⌋ times. \n# You may assume that the majority element always exists in the array. \n# \n# \n# Example 1: \n# Input: nums = [3,2,3]\n# Output: 3\n# Example 2: \n# Input: nums = [2,2,1,1,1,2,2]\n# Output: 2\n# \n# \n# Constraints: \n# \n# \n# n == nums.length \n# 1 <= n <= 5 * 10⁴ \n# -2³¹ <= nums[i] <= 2³¹ - 1 \n# \n# \n# \n# Follow-up: Could you solve the problem in linear time and in O(1) space? \n# Related Topics Array Hash Table Divide and Conquer Sorting Counting 👍 6610 👎 291\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n count = 0\n candidate = None\n\n for num in nums:\n if count == 0:\n candidate = num\n count += (1 if num == candidate else -1)\n return candidate\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n#\n# s = Solution()\n# print(s.majorityElement([3, 2, 3]))\n","sub_path":"leetcode/editor/en/[169]Majority Element.py","file_name":"[169]Majority Element.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174590377","text":"\"\"\"URL Configuration for Game app\"\"\"\n\nfrom django.urls import path\n\n# pylint: disable=relative-beyond-top-level, invalid-name\nfrom . import views\n\napp_name = \"orders\"\nurlpatterns = [\n path(\n '',\n views.api_overview,\n name='api_overview'\n ),\n\n path(\n 'rating/seller/',\n views.SellerRatingList.as_view(),\n # name='all_seller_review'\n name='all_seller_ratings'\n ),\n\n path(\n 'rating/game/',\n views.GameRatingList.as_view(),\n # name='all_games_review_for_given_seller'\n name='all_game_ratings'\n ),\n\n path(\n 'rating/seller//',\n views.SellerRating.as_view(),\n name='seller_rating'\n ),\n\n path(\n 'rating/game//',\n views.GameRating.as_view(),\n name='game_rating'\n ),\n\n path(\n 'rating/seller//game/',\n views.all_games_rating_for_given_seller,\n name='all_games_rating_for_given_seller'\n ),\n\n path(\n 'rating/game//seller/',\n views.all_sellers_rating_for_given_game,\n name='all_sellers_rating_for_given_game'\n ),\n\n path(\n 'rating/seller//game/',\n views.game_rating_for_given_seller,\n name='game_rating_for_given_seller'\n ),\n\n path(\n 'rating/game//seller/',\n views.seller_rating_for_given_game,\n name='seller_rating_for_given_game'\n ),\n\n path(\n 'review/order/',\n views.OrderReviewList.as_view(),\n name='all_orders_review'\n ),\n path(\n 'review/order//',\n views.OrderReviewDetail.as_view(),\n name='detail_order_review'\n ),\n\n path(\n 'order//',\n views.ChangeOrderRequirements.as_view(),\n name='update_order_requirements'\n ),\n\n\n\n]\n","sub_path":"orders/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214611585","text":"#coding = utf-8\n#for python3.*\nimport dnsconf # 导入配置\nimport json\nimport urllib.request\nimport urllib.parse\n\ndm = dnsconf.dm\ntoken = dnsconf.token\nmd = dnsconf.md\n\n\ndef http_post(_url, _payload):\n try:\n _payload = urllib.parse.urlencode(_payload)\n _payload = _payload.encode('utf-8')\n rq = urllib.request.Request(_url)\n rq.add_header(\"Content-Type\",\n \"application/x-www-form-urlencoded;charset=utf-8\")\n rs = urllib.request.urlopen(rq, _payload)\n rs = rs.read().decode('utf-8')\n return rs\n except:\n return []\n\n\ndef getip(_mod=0):\n if _mod == 0:\n try:\n page = urllib.request.urlopen('http://ip.3322.net', timeout=10)\n data = page.read()\n data = data.decode('utf8')\n if (data.count('.') != 3):\n page = urllib.request.urlopen('http://ip.cip.cc', timeout=10)\n data = page.read()\n data = data.decode('utf8')\n\n def getmidstr(content, startStr, endStr):\n startIndex = content.index(startStr)\n if startIndex >= 0:\n startIndex += len(startStr)\n endIndex = content.index(endStr)\n return content[startIndex:endIndex]\n\n return (getmidstr(data, \"\", \"\\n\"))\n except:\n return '#网络故障'\n elif _mod == 1:\n try:\n import socket\n def get_local_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('baidu.com', 80))\n ip = s.getsockname()[0]\n s.close()\n except:\n ip = 'N/A'\n return ip\n return get_local_ip()\n except:\n return '#网络故障'\n\n\ndef dnsip(_sub, _domain):\n url = 'https://dnsapi.cn/Record.list'\n payload = {\"login_token\": token, \"format\": \"json\", \"domain\": _domain}\n rs = http_post(url, payload)\n rs = json.loads(rs)\n rcd = rs['status']['code']\n rs = rs.get('records')\n\n _urt = []\n if rcd == '1':\n for i in rs:\n if i['name'] == _sub and i['type'] == 'A':\n _rt = i['value']\n _dmid = i['id']\n _urt = [i['name'], _domain, _dmid, _rt]\n return _urt\n\n\ndef dnsset(_subdm2, _dm, _rid, _ip):\n url = 'https://dnsapi.cn/Record.Modify'\n playload = {\"login_token\": token, \"sub_domain\": _subdm2, \"domain\": _dm, \"record_type\": \"A\",\n \"record_id\": _rid, \"value\": _ip, \"format\": \"json\", \"record_line\": \"默认\"}\n rs = http_post(url, playload)\n rs = json.loads(rs)\n rs = rs.get('status')\n\n rs = rs['code']\n _rt = ''\n if rs == '1':\n _rt = '#设置成功'\n elif rs == '8':\n _rt = '#域名不存在'\n elif rs == '10004':\n _rt = '#token错误'\n elif rs == '104':\n _rt = '#Domain record already exists'\n else:\n _rt = '#其他错误'\n return _rt\n\n\ndef main():\n lip = getip(md)\n if len(lip) > 5:\n if len(dm) > 0:\n for i in dm:\n p = dnsip(i[0], i[1])\n if p != []:\n if p[3] == lip:\n print(i[0]+'.'+i[1]+' '+lip+' #ip相同无需更新')\n else:\n t = dnsset(p[0], p[1], p[2], lip)\n print(i[0]+'.'+i[1]+' '+p[3]+'-->'+lip+' '+t)\n else:\n print(i[0]+'.'+i[1]+' #请查看设置的域名是否已经在DNSpod上添加好')\n else:\n print('#配置文件有误!')\n else:\n print(lip)\n\nif __name__ == '__main__':\n main()","sub_path":"ddnspod.py","file_name":"ddnspod.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269627276","text":"import requests # requests 라이브러리 설치 필요\n\n# requests 를 사용해 요청(Request)하기\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\nresponse_data = requests.get('http://www.bluesman.co.kr/exec/front/Product/SubCategory',headers=headers)\n# 응답(response) 데이터인 json을 쉽게 접근할 수 있게 만들어 city_air 에 담고\nbrand_list = response_data.json()\n\n\nfor brand in brand_list:\n if (brand['parent_cate_no'] >= 99 and brand['parent_cate_no'] <= 200):\n print(brand['name'], brand['param'])\n\n#pont color red는 강조하는 문법\n#처음에는 서울시 미세먼지 api하는 것처럼 가져왔지만, 막혔었음! -> header(크롤링 막는 걸 속이는 방법)을 사용하니깐 뚫렸다~\n","sub_path":"shop_list/sh_bluesman.py","file_name":"sh_bluesman.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290329849","text":"import json\nfrom helper import*\nimport random\n\n\nclass Car:\n\n\tdef __init__(self, car, model, max_speed):\n\t\tself.car = car\n\t\tself.model = model\n\t\tself.max_speed = max_speed\n\n\tdef __str__(self):\n\t\treturn \"{} {} with max_speed {}\".format(self.car, self.model, self.max_speed)\n\n\t\n\nclass Driver:\n\n\tdef __init__(self, name, car):\n\t\tself.name = name\n\t\tself.car = car\n\n\tdef __str__(self):\n\t\treturn \"{} drives {}\".format(self.name, self.car)\n\n\tdef get_name(self):\n\t\treturn self.name\n\n\n\nclass Race:\n\n\tdef __init__(self, drivers):\n\t\tself.drivers = drivers\n\t\tself.points = [8, 6, 4]\n\n\n\n\tdef result(self):\n\t\tcrashed = []\n\t\tnot_crashed = []\n\n\t\tfor person in self.drivers:\n\t\t\tif random.randint(0, 1) == 1:\n\t\t\t\tcrashed.append(person)\n\t\t\telse:\n\t\t\t\tnot_crashed.append(person)\n\n\t\trandom.shuffle(not_crashed)\n\t\tz = zip(not_crashed, self.points)\n\t\n\n\t\treturn crashed, dict(z)\n\t\n\n\t\t\n\n\nclass Championship:\n\n\tdef __init__(self, name, races_count, race):\n\t\tself.name = name\n\t\tself.races_count = races_count\n\t\tself.race = race\n\n\n\n\tdef save_points_after_final_race(self):\n\t\t\n\t\tresult_for_the_race = {}\n\t\tfinal_result = {}\n\t\tfor race in range(1, self.races_count + 1):\n\t\t\tresult = {}\n\t\t\tresult[\"Crashed\"],result[\"Not_Crashed\"] = self.race.result() \n\n\t\t\tprint(\"RACE #{}\".format(race))\n\t\t\tprint(\".............START................\")\n\t\t\tfor not_crashed in result[\"Not_Crashed\"]:\n\t\t\t\tprint(not_crashed, \"-\", result[\"Not_Crashed\"][not_crashed])\n\t\t\tfor crashed in result[\"Crashed\"]:\n\t\t\t\tprint(\"Unfortunately, {} has crashed.\".format(crashed))\n\t\t\tprint(\"..................................\")\n\t\t\tresult_for_the_race[\"Race #{}\".format(race)] = [result]\n\t\t\n\t\tfinal_result[self.name] = result_for_the_race\n\t\tupdate_json(\"result.json\", final_result)\n\t\n\t\treturn \"Race is saved.\"\n\n\n\n\tdef count_the_points(self):\n\t\tdict_drivers = read_json(\"result.json\")\n\t\tresult = {}\n\t\tfor race in dict_drivers[self.name]:\n\t\t\tfor name in dict_drivers[self.name][race][0][\"Not_Crashed\"]:\n\t\t\t\tnew_driver = name\n\t\t\t\tif new_driver not in result:\n\t\t\t\t\tresult[new_driver] = dict_drivers[self.name][race][0][\"Not_Crashed\"][name]\n\t\t\t\telse:\n\t\t\t\t\tpoints = result[new_driver] + dict_drivers[self.name][race][0][\"Not_Crashed\"][name]\n\t\t\t\t\tresult[new_driver] = points\n\n\t\treturn result\n\n\n\t\n\tdef total_championship_standings(self):\n\t\td = self.count_the_points()\n\t\tfor x in sorted(d, key=d.get, reverse=True):\n\t\t\tprint(x ,d[x])\n\t\treturn \" \"\n\n\n\t\n\nclass CLI:\n\n\tdef __init__(self, race):\n\t\tself.race = race\n\t\t\n\n\tdef messageHello(self):\n\t\treturn \"Hello! Please, call command with the proper argument:\"\n\n\n\n\tdef start(self):\n\t\tprint(self.messageHello())\n\n\t\twhile True:\n\t\t\tcommand = input(\"Enter command: \")\n\t\t\ttry:\n\t\t\t\tif command == \"exit\":\n\t\t\t\t\tbreak\n\n\t\t\t\tif command == \"start\":\n\t\t\t\t\tparameter1_input = input(\"Enter race name: \")\n\t\t\t\t\tparameter2_input = input(\"Enter race count: \")\n\t\t\t\t\tc = Championship(parameter1_input, int(parameter2_input), self.race)\n\t\t\t\t\tprint(c.save_points_after_final_race())\n\n\t\t\t\tif command == \"standings\":\n\t\t\t\t\tprint(c.total_championship_standings())\n\t\t\texcept:\n\t\t\t\tprint(\"Invalid command.\")\n\t\n\ndef main():\n\n\tcars = read_json(\"cars.json\")\n\tlist_of_drivers = []\n\n\tfor person in cars[\"people\"]:\n\t\tc = Car(person[\"car\"], person[\"model\"],person[\"max_speed\"])\n\t\td = Driver(person[\"name\"], c)\n\t\tlist_of_drivers.append(d.get_name())\n\n\n\tr = Race(list_of_drivers)\n\tcli = CLI(r)\n\tcli.start()\n\t\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week3/4-Car-Racing/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639435376","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# author: syaofox@gmail.com\nimport logging\nfrom time import sleep, time\n\nimport requests\nfrom PyQt5.QtCore import QObject\n\nfrom requests import RequestException\n\nimport utils\nfrom models.cache import DownCache\nfrom models.videostation_api import clearMeta\n\n\nclass BaseSpider:\n searchStartUrl = ''\n needLogin = False\n isLogined=False\n\n def __init__(self, name):\n self.name = name\n self.cache = DownCache(table_name='spider_cache')\n self.RequestSession = requests.session()\n self.RequestSession.headers.update({'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'})\n self.urls = set()\n self.oldurls = set()\n self.logger = logging.getLogger('spider:{}'.format(name))\n self.stoped = False\n\n def getTitlekeyStr(self, meta):\n if '标题' in meta['meta']:\n return '标题'\n if '集标题' in meta['meta']:\n return '集标题'\n if '电视节目标题' in meta['meta']:\n return '电视节目标题'\n return ''\n\n def getDateTitleStr(self, meta):\n if '发布日期' in meta['meta']:\n return '发布日期'\n\n if '发布日期(集)' in meta['meta']:\n return '发布日期(集)'\n\n if '发布日期(电视节目)' in meta['meta']:\n return '发布日期(电视节目)'\n\n return ''\n\n #检测是否登陆\n def checkLogin(self, res=None):\n pass\n #获得验证码图像\n def refreshVerifCode(self, url):\n pass\n #获取登陆参数\n def getVerifInfo(self):\n pass\n #登陆\n def login(self, account, psw, code, shash, referer):\n pass\n\n def parseSearchHtml(self, respone, meta):\n if not respone or not meta:\n return\n meta = clearMeta(meta)\n yield meta\n\n def search(self, keyword, meta):\n self.stoped = False\n if keyword.startswith('http'):\n meta['head']['dital_url'] = keyword\n for each in self.dital(meta,'','',True):\n yield each\n else:\n keyword = keyword.replace('/', ' ')\n self.add_urls(self.searchStartUrl.format(keyword))\n while self.has_url():\n if self.stoped:\n break\n url = self.get_urls()\n res = self.getPage(url)\n if res:\n for each in self.parseSearchHtml(res, meta):\n yield each\n else:\n utils.Log.addLog('搜索失败')\n\n def setMetaValue(self,meta,key,subkey,value,fillNull):\n\n if subkey in meta[key]:\n if fillNull:\n if not meta[key][subkey]:\n meta[key][subkey] = value\n else:\n meta[key][subkey] = value\n else:\n meta[key][subkey] = value\n\n def parseDital(self, respone, meta, season,episode,isSearch=False,fillNull=False):\n if not respone or not meta:\n return\n meta = clearMeta(meta)\n yield meta\n\n def dital(self, meta=None,season=None, episode=None, isSearch=False,fillNull=False):\n self.stoped = False\n\n url = meta['head']['dital_url']\n if not url:\n return\n res = self.getPage(url)\n if res:\n for each in self.parseDital(res, meta,season,episode, isSearch,fillNull):\n yield each\n\n # def login(self):\n # return True\n\n def clear(self):\n self.urls.clear()\n self.oldurls.clear()\n\n def add_urls(self, url, fource=False):\n if not url:\n return\n if (url not in self.oldurls and url not in self.urls) or fource:\n self.urls.add(url)\n\n def get_urls(self):\n if self.has_url():\n url = self.urls.pop()\n self.oldurls.add(url)\n return url\n else:\n return None\n\n def has_url(self):\n return len(self.urls) > 0\n\n def getPage(self, url, retry=0, params=None, headers=None, ignoreCache=False, ignoreRedirect=False):\n if self.stoped:\n return\n if not url:\n utils.Log.addLog('get请求页面出错,url为空')\n return\n try:\n if not ignoreCache:\n res = self.cache.get_cache(url)\n if res:\n return res\n proxies = {\n 'http': '127.0.0.1:9743',\n }\n if headers:\n self.RequestSession.headers.update(headers)\n res = self.RequestSession.get(url, timeout=utils.GET_PAGE_TIME_OUT, params=params, allow_redirects=not ignoreRedirect,proxies=proxies)\n sleep(utils.getRandomInterVal())\n if ignoreRedirect and res.status_code in [301, 302]:\n return None\n if res.status_code == 200:\n self.RequestSession.headers.update({'referer': res.url, 'Referer': res.url})\n if not ignoreCache:\n if utils.IMG_CACHE_KEEP_INFINITE_TIME and utils.ImageEditor.IsValidImage(res.content):\n self.cache.save_cache(url, res, 0, 0)\n else:\n self.cache.save_cache(url, res, time())\n return res\n elif 400 <= res.status_code <= 499:\n return None\n else:\n if retry >= utils.GET_PAGE_RETRY_MAX:\n utils.Log.addLog('get请求页面出错,重试达到最大数', url)\n return None\n else:\n retry += 1\n utils.Log.addLog('get请求页面出错{},5秒后重试{}……'.format(url, retry))\n sleep(5)\n return self.getPage(url, retry)\n\n except RequestException:\n if retry >= utils.GET_PAGE_RETRY_MAX:\n utils.Log.addLog('get请求页面出错,重试达到最大数', url)\n return None\n else:\n retry += 1\n utils.Log.addLog('get请求页面出错{},5秒后重试{}……'.format(url, retry))\n sleep(5)\n return self.getPage(url, retry)\n\n def postPage(self, url, retry=0, data=None):\n if not url or not data:\n utils.Log.addLog('get请求页面出错:url:', url, 'data:', data)\n return\n\n try:\n\n res = self.RequestSession.post(url, timeout=utils.GET_PAGE_TIME_OUT, data=data)\n sleep(utils.getRandomInterVal())\n if res.status_code == 200:\n self.RequestSession.headers.update({'referer': res.url, 'Referer': res.url})\n else:\n if retry >= utils.GET_PAGE_RETRY_MAX:\n utils.Log.addLog('get请求页面出错,重试达到最大数', url)\n return None\n else:\n retry += 1\n utils.Log.addLog('get请求页面出错{},5秒后重试{}……'.format(url, retry))\n sleep(5)\n return self.getPage(url, retry)\n except RequestException:\n if retry >= utils.GET_PAGE_RETRY_MAX:\n utils.Log.addLog('get请求页面出错,重试达到最大数', url)\n return None\n else:\n retry += 1\n utils.Log.addLog('get请求页面出错{},5秒后重试{}……'.format(url, retry))\n sleep(5)\n return self.getPage(url, retry)\n\nif __name__ == '__main__':\n x = BaseSpider('a')\n x.search('a',{})","sub_path":"spiders/base_spider.py","file_name":"base_spider.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15255372","text":"import csv\nimport networkx as nx\nimport math\nimport matplotlib.pyplot as plt\n\nNX_GRAPH = \"\"\n\ndef get_subway_graph(csv_dir, Klass):\n is_personal_graph_class = True\n if str(Klass) == NX_GRAPH:\n is_personal_graph_class = False\n G = Klass()\n # \"line\",\"name\",\"colour\",\"stripe\"\n lines = {}\n with open(csv_dir+'/lines.csv', 'r') as csvfile:\n creader = csv.reader(csvfile)\n next(creader, None)\n for row in creader:\n lines[int(row[0])] = {\"name\": row[1], \"color\": row[2], \"stripe\": row[3], \"line\":int(row[0])}\n\n with open(csv_dir+'/connections.csv', 'r') as csvfile:\n creader = csv.reader(csvfile)\n next(creader, None)\n\n if is_personal_graph_class:\n for row in creader:\n G.add_edge(int(row[0]), int(row[1]), attr_dict=lines[int(row[2])])\n else:\n for row in creader:\n G.add_edge(int(row[0]), int(row[1]), name=lines[int(row[2])]['name'],\n color=lines[int(row[2])]['color'],\n stripe=lines[int(row[2])]['stripe'],\n line=lines[int(row[2])]['line'])\n\n with open(csv_dir+'/stations.csv', 'r') as csvfile:\n creader = csv.reader(csvfile)\n next(creader, None)\n if is_personal_graph_class:\n for row in creader:\n G.node[int(row[0])] = {\"latitude\": float(row[1]),\n \"longitude\": float(row[2]),\n \"name\": row[3],\n \"display_name\": row[4],\n \"zone\": float(row[5]),\n \"total_lines\": int(row[6]),\n \"rail\": row[7]\n }\n else:\n for row in creader:\n G.add_node(int(row[0]), latitude=float(row[1]),\n longitude=float(row[2]),\n name=row[3],\n display_name=row[4],\n zone=float(row[5]),\n total_lines=int(row[6]),\n rail=row[7])\n\n for node1, node2 in G.edges():\n norm = math.sqrt(\n (G.node[node1]['longitude'] - G.node[node2]['longitude'])**2 +\n (G.node[node1]['latitude'] - G.node[node2]['latitude'])**2\n )\n if is_personal_graph_class:\n G.edge[node1][node2].update({'distance': norm})\n else:\n G.add_edge(node1, node2, distance=norm)\n\n return G, lines\n\ndef draw_subway_graph(G, lines, figsize=(10,6), show_labels=False):\n plt.figure(figsize=figsize)\n plt.axis('off')\n if str(type(G)) != NX_GRAPH:\n G2 = graph2nx(G)\n else:\n G2 = G\n pos = {x: (G2.node[x]['longitude'], G2.node[x]['latitude']) for x in G2.node.keys()}\n nx.draw_networkx_nodes(G2, \n pos, \n node_size=1,\n )\n if show_labels:\n nx.draw_networkx_labels(G2,pos,\n {x: G2.node[x]['name'] for x in G2.nodes()},font_size=4)\n\n if str(type(G)) == NX_GRAPH:\n for line in lines.keys():\n nx.draw_networkx_edges(\n G2,\n pos,\n edgelist=[x for x in G2.edges() if G.edges[x[0],x[1]]['line'] == line],\n edge_color=\"#\"+lines[line]['color'],\n )\n else:\n for line in lines.keys():\n nx.draw_networkx_edges(\n G2,\n pos,\n edgelist=[x for x in G2.edges() if G.edge[x[0]][x[1]]['line'] == line],\n edge_color=\"#\"+lines[line]['color'],\n )\n\n plt.show()\n\n\ndef graph2nx(gr):\n G = nx.Graph()\n for node1 in gr.edge.keys:\n for node2, value in gr.edge[node1].items():\n G.add_edge(node1, node2, **value)\n\n for node, value in gr.node.items():\n G.add_node(node, **value)\n \n return G\n\ndef dijkstra_tester(graph_class,dijkstra_method,origen=10,destino=235):\n # Graph loading using NetworkX Graph library\n nxG, lines_nxG = get_subway_graph('csv', nx.Graph)\n # Graph loading using my Graph library\n G, lines_G = get_subway_graph('csv', graph_class)\n # Graph in NetworkX format converting from my graph\n G2nx = graph2nx(G)\n\n # Get shortest path for all 3 graphs\n real_path = nx.dijkstra_path(nxG, origen, destino, 'distance')\n my_path = dijkstra_method(G, origen, destino)\n my_maybe_path = nx.dijkstra_path(G2nx, origen, destino, 'distance')\n\n real_distance = 0\n for i in range(len(real_path) - 1):\n real_distance += (nxG[real_path[i]][real_path[i + 1]]['distance'])\n\n my_maybe_distance = 0\n for i in range(len(my_maybe_path) - 1):\n my_maybe_distance += (G[my_maybe_path[i]][my_maybe_path[i + 1]]['distance'])\n\n print(\"##### DISTANCES #####\")\n print((\"Real : %.10f\" % real_distance))\n print((\"Mine : %.10f\" % my_path['distance']))\n print((\"Mine? : %.10f\" % my_maybe_distance))\n\n print(\"\\n##### PATHS #####\")\n print(\"Real : \" + str(real_path))\n print(\"Mine : \" + str(my_path['path']))\n print(\"Mine?: \" + str(my_maybe_path))\n\n if my_path['path'] == real_path:\n print(\"\\nTodo correcto!\")\n elif my_path['path'] == my_maybe_path and my_path['path'] != real_path:\n print(\"\\nLibreria de grafos incorrecta\")\n else:\n print(\"\\nDijkstra incorrecto\")\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5919133","text":"import getpost\nfrom getpost import headers\nimport unittest\nimport requests\nimport re\nfrom unittest import TestCase as Base\nfrom unittest import skipIf\nfrom datetime import datetime\n\nfrom models import User, Post\nfrom models import testsession as session\nfrom models import init_testdb as init_db\n\nSKIP = True\nreason = 'Done'\nskip = skipIf(SKIP, None)\n\n\nclass ModelTestCase(Base):\n\n def setUp(self):\n init_db()\n\n @skip\n def test_user(self):\n u = User(1, 'a')\n session.add(u)\n session.commit()\n user = session.query(User).filter(User.name == 'a').first()\n self.assertTrue(user.user_id == 1)\n\n @skip\n def test_post(self):\n time = datetime(1, 1, 1)\n p = Post(1, 1, time)\n session.add(p)\n session.commit()\n post = session.query(Post).filter(Post.user_zid == 1).first()\n self.assertTrue(post.user_id == 1)\n\n @skip\n def test_orm(self):\n aas = session.query(User).filter(User.user_id == 1).first()\n user_id = 1\n test = session.query(User).filter(User.user_id == user_id)\n print('\\n', aas.name, aas.post_times)\n users = session.query(User).all()\n posts = session.query(Post).all()\n print(len(users), len(posts))\n\n# @skipIf(SKIP==True, reason)\n\n\nclass GetPostTestCase(Base):\n \"\"\"docstring for GetPostTestCase\"\"\"\n\n def setUp(self):\n self.base_url = 'http://bbs.hexun.com/futures/board_46_all_1_d.html'\n self.headers = {\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0 \"\n }\n self.page = requests.get(self.base_url, headers=self.headers).text\n\n @skip\n def test_get_post_hrefs(self):\n hrefs = getpost.get_post_hrefs(self.page)\n for h in hrefs:\n print(h)\n try:\n post_page = requests.get(h, headers=self.headers).text\n except:\n continue\n\n @skip\n def test_next_page(self):\n while True:\n i = getpost.next_page(self.page)\n print(i)\n if not i:\n break\n self.page = requests.get(i, headers=self.headers).text\n\n def test_run(self):\n pass\n\n\nclass SendMsgTestCase(Base):\n base_url = r'http://bbs.hexun.com/funds/board_30_all_1_d.html'\n html = requests.get(base_url, headers=headers).text\n pattern = r'http://bbs.hexun.com[/\\w]*board_\\d*_all_1_d.html'\n boards = re.findall(pattern, html)\n print('Find {} boards.'.format(len(boards)))\n u = \"http://bbs.hexun.com/futures/board_46_all_1_d.html\"\n print(u in boards)\n\n\nunittest.main()\n\n#python \"C:\\Program Files\\Python36\\Lib\\site-packages\\pywrap.py\" D:\\python3code\\hexun\\client.py","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342158646","text":"# create a 300x300 canvas.\n# create a line drawing function that takes 2 parameters:\n# the x and y coordinates of the line's starting point\n# and draws a line from that point to the center of the canvas.\n# fill the canvas with lines from the edges, every 20 px, to the center.\n\nfrom tkinter import *\n\nroot = Tk()\n\ncanvas = Canvas(root, width='300', height='300' ,bg='black')\ncanvas.pack()\n\nw = 300\nh = 300\n\ndef line(x,y):\n center_1 = w/2\n center_2 = h/2\n green_line = canvas.create_line(x, y, center_1, center_2, fill='green')\n\nfor n in range(16):\n x = n * 20\n y = n * 20\n line(x,0)\n line(x,300)\n line(0,y)\n line(300,y)\n\nroot.mainloop()\n","sub_path":"week-04/day-03/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92077115","text":"# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport importlib\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorforce import TensorForceError\n\n\nepsilon = 1e-6\n\n\ndef prod(xs):\n p = 1\n for x in xs:\n p *= x\n return p\n\n\ndef shape(x, unknown=-1):\n return tuple(unknown if dims is None else dims for dims in x.get_shape().as_list())\n\n\ndef cumulative_discount(rewards, terminals, discount):\n if discount == 0.0:\n return rewards\n cumulative = 0.0\n for n, (reward, terminal) in reversed(list(enumerate(zip(rewards, terminals)))):\n if terminal:\n cumulative = 0.0\n cumulative = reward + cumulative * discount\n rewards[n] = cumulative\n return rewards\n\n\ndef np_dtype(dtype):\n if dtype == 'float' or dtype == float:\n return np.float32\n elif dtype == 'int' or dtype == int:\n return np.int32\n elif dtype == 'bool' or dtype == bool:\n return np.bool_\n else:\n raise TensorForceError()\n\n\ndef tf_dtype(dtype):\n if dtype == 'float' or dtype == float:\n return tf.float32\n elif dtype == 'int' or dtype == int:\n return tf.int32\n else:\n raise TensorForceError()\n\n\ndef function(f, predefined=None):\n if predefined is not None and f in predefined:\n return predefined[f]\n module_name, function_name = f.rsplit('.', 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)\n\n\n\n# def make_function(data, fk):\n# \"\"\"\n# Take data dict and convert string function reference with key `fk` to a real function reference, using\n# `fk`_args as *args and `fk`_kwargs as **kwargs, removing these keys from the data dict.\n\n# :param data: data dict\n# :param fk: string function key\n# :return: boolean\n# \"\"\"\n# fn = data.get(fk)\n\n# if fn is None:\n# return True\n# elif callable(fn):\n# return True\n# else:\n# args_val = \"{}_args\".format(fk)\n# kwargs_val = \"{}_kwargs\".format(fk)\n\n# args = data.pop(args_val, None)\n# kwargs = data.pop(kwargs_val, None)\n\n# func = get_function(fn)\n\n# if args is None and kwargs is None:\n# # If there are no args and no kwargs, just return the function reference\n# data[fk] = func\n# return True\n\n# # Otherwise, call the function\n# if args is None:\n# args = []\n# if kwargs is None:\n# kwargs = {}\n\n# data[fk] = func(*args, **kwargs)\n# return True\n\n\n\n\n# def repeat_action(environment, action, repeat_action=1):\n# \"\"\"\n# Repeat action `repeat_action_count` times. Cumulate reward and return last state.\n#\n# :param environment: Environment object\n# :param action: Action to be executed\n# :param repeat_action: How often to repeat the action\n# :return: result dict\n# \"\"\"\n# if repeat_action <= 0:\n# raise ValueError('repeat_action lower or equal zero')\n#\n# reward = 0.\n# terminal_state = False\n# for count in xrange(repeat_action):\n# result = environment.execute_action(action)\n#\n# state = result['state']\n# reward += result['reward']\n# terminal_state = terminal_state or result['terminal_state']\n# info = result.get('info', None)\n#\n# return dict(state=state,\n# reward=reward,\n# terminal_state=terminal_state,\n# info=info)\n#\n\n\n\n\n\n\n\n# preprocessors = {\n# 'concat': preprocessing.Concat,\n# 'grayscale': preprocessing.Grayscale,\n# 'imresize': preprocessing.Imresize,\n# 'maximum': preprocessing.Maximum,\n# 'normalize': preprocessing.Normalize,\n# 'standardize': preprocessing.Standardize\n# }\n#\n#\n# def build_preprocessing_stack(config):\n# stack = preprocessing.Stack()\n#\n# for preprocessor_conf in config:\n# preprocessor_name = preprocessor_conf[0]\n#\n# preprocessor_params = []\n# if len(preprocessor_conf) > 1:\n# preprocessor_params = preprocessor_conf[1:]\n#\n# preprocessor_class = preprocessors.get(preprocessor_name, None)\n# if not preprocessor_class:\n# raise ConfigError(\"No such preprocessor: {}\".format(preprocessor_name))\n#\n# preprocessor = preprocessor_class(*preprocessor_params)\n# stack += preprocessor\n#\n# return stack\n\n\n\n# def create_agent(agent_type, config, scope='prefixed_scope'):\n# \"\"\"\n# Create agent instance by providing type as a string parameter.\n#\n# :param agent_type: String parameter containing agent type\n# :param config: Dict containing configuration\n# :param scope: Scope prefix used for distributed tensorflow scope separation\n# :return: Agent instance\n# \"\"\"\n# agent_class = agents.get(agent_type)\n#\n# if not agent_class:\n# raise TensorForceError(\"No such agent: {}\".format(agent_type))\n#\n# return agent_class(config, scope)\n\n\n# def get_default_config(agent_type):\n# \"\"\"\n# Get default configuration from agent by providing type as a string parameter.\n#\n# :param agent_type: String parameter containing agent type\n# :return: Default configuration dict\n# \"\"\"\n# agent_class = agents.get(agent_type)\n#\n# if not agent_class:\n# raise TensorForceError(\"No such agent: {}\".format(agent_type))\n#\n# return Configuration(agent_class.default_config), Config(agent_class.model_ref.default_config)\n\n\n#\n# agents = {\n# 'RandomAgent': RandomAgent,\n# 'DQNAgent': DQNAgent,\n# 'NAFAgent': NAFAgent,\n# 'TRPOAgent': TRPOAgent,\n# 'VPGAgent': VPGAgent,\n# 'DQFDAgent': DQFDAgent,\n# }\n","sub_path":"tensorforce/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"491298575","text":"#http://practice.geeksforgeeks.org/problems/bottom-view-of-binary-tree/1\n\nfrom collections import defaultdict\nimport sys\n\nclass Node:\n def __init__(self,val):\n self.right=None\n self.data=int(val)\n self.left=None\n\n\nclass Btree:\n def __init__(self):\n self.root=None\n\n def findInsert(self,root,val,newval,pos):\n temp=root\n if temp.data==int(val):\n if pos=='L':\n temp.left=Node(newval)\n return True\n else:\n temp.right=Node(newval)\n return True\n if temp.left:\n flagL=self.findInsert(temp.left,val,newval,pos)\n else:\n flagL=False\n if temp.right and not flagL:\n flagR=self.findInsert(temp.right,val,newval,pos)\n else:\n flagR=False\n return flagL or flagR\n\n def insert(self,s):\n for i in range(0,len(s),3):\n self.findInsert(self.root,s[i],s[i+1],s[i+2])\n\n\n\ndef bottomView(root,hmap,pos):\n hmap[pos]=root.data\n if root.left:\n bottomView(root.left,hmap,pos-1)\n if root.right:\n bottomView(root.right,hmap,pos+1)\n\n\n\n\n\nif __name__ == '__main__':\n t=int(input())\n while(t>0):\n n=int(input())\n s=list(input().strip().split(' '))\n bt=Btree()\n bt.root=Node(s[0])\n bt.insert(s)\n hmap=defaultdict()\n bottomView(bt.root,hmap,0)\n h=sorted(hmap.items())\n for item in h:\n print(item[1],'',end='')\n print()\n t-=1\n","sub_path":"Tree/Bottom View of Binary Tree.py","file_name":"Bottom View of Binary Tree.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501659268","text":"import numpy as np\n\n# número de horas estudiando , número de horas durmiendo\n#entradas ( inputs)\nx = np.array(([2,9],[1,5],[3,6]),dtype=float)\n\n# calificación del examen base 100\ny = np.array(([92],[86],[89]),dtype=float)\n\nx_predicted = np.array(([4,8]), dtype=float)\n\n# escala de unidades\nscale_x = x/np.amax(x,axis=0)\nscale_y = y/100\nscale_x_predicted = x_predicted/np.max(x_predicted,axis=0)\n\n#print(scale_x,\"\\n\")\n#print(scale_y,\"\\n\")\n#print(scale_x_predicted,\"\\n\")\n\n\n\n#parametros\nInputSize = 2\nOutputSize = 1\nHiddenSize = 3\n#pesos\nw1 = np.random.randn(InputSize,HiddenSize)\nw2 = np.random.randn(HiddenSize,OutputSize)\n\ndef forward(x,w1,w2):\n z = np.dot(x,w1)\n z2 = sigmoid(z)\n z3 = np.dot(z2, w2)\n o = sigmoid(z3)\n return o\n\ndef sigmoid(s):\n return 1/(1+np.exp(-s))\n\ndef sigmoidPrime(s):\n return s*(1 - s)\n\ndef backward(x,y,o,w1,w2):\n o_error = y - o\n o_delta = o_error * sigmoidPrime(o)\n\n z2_error = o_delta.dot(w2.T)\n z2_delta = z2_error * sigmoidPrime(z2)\n\n w1 += x.T.dot(z2_delta)\n w2 += z2.T.dot(o_delta)\n\ndef train(x,y,w1,w2):\n o = forward(x,w1,w2)\n backward(x,y,o,w1,w2)\n\ndef predict(x_predicted,w1,w2):\n print(\"Predicted data based on trained wights: \")\n print(\"Input (scaled): \\n\" + str(x_predicted))\n print(\"Output: \\n\" + str(forward(x_predicted,w1,w2)))\n\ndef saveweights(w1,w2):\n np.savetxt(\"w1.txt\", w1, fmt=\"%s\")\n np.savetxt(\"w2.txt\", w2, fmt=\"%s\")\n\n\n#neu_net = Neural_Network()\n#o = neu_net.forward(x)\n#print(\"predicted output: \\n\" + str(o))\n#print(\"Actual output: \\n\" + str(y)) \n\n#print(neu_net.forward(x),\"\\n\")\n#print( ( y - neu_net.forward(x) )/100 )\n\nfor i in range(10000): # trains the NN 1,000 times\n print(\"Input: \\n\" + str(scale_x))\n print(\"Actual Output: \\n\" + str(scale_y))\n print(\"Predicted Output: \\n\" + str(forward(scale_x,w1,w2)))\n print(\"Loss: \" + str( np.mean( np.square( scale_y - forward( scale_x,w1,w2 ) ) ) ) ) # mean sum squared loss\n print(\"\\n\")\n train(scale_x, scale_y,w1,w2)\n\n#neu_net.saveweights()\n#neu_net.predict()","sub_path":"neural_network/N_N_test_1.py","file_name":"N_N_test_1.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433951803","text":"import random\nimport time\nprint('欢迎使用QQ'.center(50,'*'))\nprint('请先注册你的QQ号'.center(50,' '))\nwhile True:\n\ta = input('请输入你要注册的QQ号:')\n\tb = input('请输入你要注册QQ号的密码:')\n\t#判断是否输入正确\n\tif len(a) == 11 and a.startswith('1') == True and len(b)>=6:\n\t\tprint('输入正确')\n\t\tbreak #如果输入正确,结束本次循环\n\telse:\n\t\tprint('输入有误,请重新输入')\n\nprint('正在给你发送验证码...'.center(50,' '))\nc = random.randint(1000,9999)\ntime.sleep(3) \nprint('你的验证码为%d'%c)\nwhile True:\n\tf =int(input('请输入你的验证码:'))\n\tif f == c :\n\t\tprint('注册成功')\n\t\tbreak\n\telse :\n\t\tprint('验证码输入有误,请重新输入')\nprint('请登录QQ'.center(50,'-'))\nwhile True:\n\td = input('请输入你的QQ号')\n\te = input('请输入你的QQ密码')\n\tif d == a and b == e:\n\t\tprint('登陆成功')\n\t\tbreak\n\telse :\n\t\tprint('登录失败,请重新登录')\nprint('*'*50)\n#注册登录完成,进入选择功能\t\t\nlist=[] #定义空列表\ndef gn(): #功能函数\n\tprint('1:添加好友'.center(50,' '))\n\tprint('2:查找好友'.center(50,' '))\n\tprint('3:修改好友'.center(50,' '))\n\tprint('4:删除好友'.center(50,' '))\n\tprint('5:显示全部好友及信息'.center(50,' '))\n\tprint('6:退出系统'.center(50,' '))\ndef xz():#选择功能\n\twhile True:\n\t\ta = int(input('请选择功能'))\n\t\tif a == 1:\n\t\t\tadd() #调用添加函数\n\t\t\tprint('-'*50)\n\t\tif a == 2:\n\t\t\tfind() #调用查找函数\n\t\t\tprint('-'*50)\n\t\tif a == 3:\n\t\t\txg() #调用修改函数\n\t\t\tprint('-'*50)\n\t\tif a == 4:\n\t\t\tsc() #调用删除函数\n\t\t\tprint('-'*50)\n\t\tif a == 5:\n\t\t\txs() #调用显示全部内容函数\n\t\t\tprint('-'*50)\n\t\tif a == 6:\n\t\t\tprint('退出成功,欢迎下次使用'.center(50,'*'))\n\t\t\tbreak \ndef add():#添加\n\tl = {}\n\twhile True:\n\t\tb = input('请输入要添加好友姓名')\n\t\t#判断添加名字是否符合以下条件,符合结束循环,否则重新输入\n\t\tif len(b) <=4: \n\t\t\tbreak\n\t\telse:\n\t\t\tprint('输入有误,请重新输入') \n\twhile True:\t\n\t\tc = input('请输入要添加好友的手机号')\n\t\t#判断添加手机号是否符合以下条件,符合结束循环,否则重新输入\n\t\tif len(c) ==11 and c.startswith('1'):\n\t\t\tbreak\n\t\telse:\n\t\t\tprint('输入有误,请重新输入')\n\twhile True:\n\t\td = input('请输入要添加好友的地址')\n\t\t#判断添加地址是否符合以下条件,符合结束循环,否则重新输入\n\t\tif len(d) <=6:\n\t\t\tbreak\n\t\telse:\n\t\t\tprint('输入有误,请重新输入')\n#把内容添加到字典\n\tl['b']=b\n\tl['c']=c\n\tl['d']=d\n\tlist.append(l) #添加到列表\n\tprint('添加成功')\n\ndef find():#查找\n\tb1 = input('请输入你要查找的姓名')\n\tflag = False #假设里面没有要查找的名字\n\tfor i in list: #把字典从列表里遍历出来\n\t\tif i['b'] == b1: #如果我要查找的名字在字典里,打印内容\n\t\t\tprint('姓名:%s\\n微信号:%s\\n地址:%s'%(i['b'],i['c'],i['d']))\n\t\t\tflag = True\t#表示找到了\n\t\t\tbreak\n\tif flag == False: \n\t\tprint('没有你要查找的姓名')\n\n\n\ndef xg():#修改\n\tb2 = input('请输入要修改的名字')\n\tflag = False #假设里面没有要修改的名字\n\tfor i in list:\n\t\twhile True:\n\t\t\tif i['b'] == b2:\n\t\t\t\tflag = True\n\t\t\t\tprint('1:请输入要修改的名字')\n\t\t\t\tprint('2:请输入要修改的手机号')\n\t\t\t\tprint('3:请输入要修改的地址')\n\t\t\t\tprint('4:退出修改系统')\n\t\t\t\tj = int(input('请选择要修改的功能'))\n\t\t\t\tif j == 1:\n\t\t\t\t\tb = input('请输入新的名字')\n\t\t\t\t\ti['b'] = b\n\t\t\t\t\tprint('修改成功')\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\telif j == 2:\n\t\t\t\t\tc = input('请输入新的手机号')\n\t\t\t\t\ti['c'] = c\n\t\t\t\t\tprint('修改成功')\n\t\t\t\t\tbreak\n\t\t\t\telif j == 3:\n\t\t\t\t\td = input('请输入新的地址')\n\t\t\t\t\ti['d'] = d\n\t\t\t\t\tprint('修改成功')\n\t\t\t\t\tbreak\n\t\t\t\telif j == 4:\n\t\t\t\t\tprint('退出成功')\n\t\t\t\t\tbreak\n\t\t\telif flag == False:\n\t\t\t\tprint('没有要修改的名字')\n\t\t\t\tbreak\t\n\n\ndef sc():#删除\n\tb3 = input('请输入你要删除的名字')\n\tfor i in list:\n\t\t\tfor position,i in enumerate(list):\n\t\t\t\t\tlist.pop(position) #删除字典所在列表的索引值\n\t\t\t\t\tprint('删除成功')\n\n\n\n\ndef xs():#显示所有内容\n\tprint('名字\\t手机号\\t\\t地址')\n\tfor i in list:\n\t\tprint(i['b']+'\\t'+i['c']+'\\t'+i['d'])\n\n\n\n\n\t\t\ngn() #调用功能函数\nxz() #调用选则功能函数\t\n","sub_path":"答辩项目/01-答辩项目.py","file_name":"01-答辩项目.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"366821934","text":"\n\nclass OrganizationJSON(object):\n\n RELATIONSHIPS = [\n 'current_team', 'offices', 'headquarters', 'categories', 'news', 'funding_rounds', 'websites']\n\n RELATIONSHIP_FIELDS = {}\n RELATIONSHIP_FIELDS['current_team'] = [\n 'first_name', 'last_name', 'title', 'path']\n RELATIONSHIP_FIELDS['offices'] = [\n 'name', 'street_1', 'street_2', 'city', 'region', 'country_code']\n RELATIONSHIP_FIELDS['headquarters'] = [\n 'name', 'street_1', 'street_2', 'city', 'region', 'country_code']\n RELATIONSHIP_FIELDS['categories'] = ['name']\n RELATIONSHIP_FIELDS['news'] = ['posted_on', 'url', 'title']\n RELATIONSHIP_FIELDS['funding_rounds'] = ['name', 'path']\n RELATIONSHIP_FIELDS['websites']=['url','title']\n\n ORG_BOOL_FIELDS = ['is_closed', 'role_company', 'role_investor']\n ORG_INT_FIELDS = ['total_funding_usd', 'number_of_employees']\n ORG_STRING_FIELDS = ['permalink', 'primary_role', 'name',\n 'short_description', 'description', 'homepage_url', 'email_address']\n\n def __init__(self, init_json_block):\n self._data = {}\n self._raw=init_json_block\n init_source = init_json_block['data']['properties']\n for field in OrganizationJSON.ORG_BOOL_FIELDS:\n if field in init_source:\n self._data[field] = (init_source[field] == 'true')\n else:\n self._data[field] = False\n\n for field in OrganizationJSON.ORG_STRING_FIELDS:\n if field in init_source:\n self._data[field] = init_source[field]\n else:\n self._data[field] = ''\n\n for field in OrganizationJSON.ORG_INT_FIELDS:\n if field in init_source:\n self._data[field] = int(init_source[field])\n else:\n self._data[field] = 0\n for field in OrganizationJSON.RELATIONSHIPS:\n self._data[field] = []\n\n def is_closed(self):\n return self._data['is_closed']\n\n def get_funding(self):\n return self._data['total_funding_usd']\n\n def has_funding_round(self):\n if 'funding_rounds' in self._data:\n return len(self._data['funding_rounds'])>0\n else:\n return False\n\n\n def get_field(self,f_string):\n if f_string in self._data:\n return self._data[f_string]\n else:\n return u''\n\n def add_relationship(self, field, field_json_list):\n self._data[field] = field_json_list\n\n def get_all_data(self):\n return self._data\n\n def get_people(self):\n if 'current_team' in self._data:\n return self._data['current_team']\n\n def get_rounds(self):\n if 'funding_rounds' in self._data:\n return self._data['funding_rounds']\n\n def __str__(self):\n return str(self._data)\n","sub_path":"organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6952049","text":"from django.shortcuts import render\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom datetime import datetime\nfrom content.models import *\nfrom django.views.generic import TemplateView, ListView\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\n\ncourses = Course.objects.all()\nspeakers = Speaker.objects.all()\narticles = Article.objects.all()\nstudents = Student.objects.all()\nreviews = Review.objects.all()\nenrolles = Enrollee.objects.all()\n\ncategories = Category.objects.all()\n\nyear = datetime.today().year\nyear_work = year - 2012\n\ndef index(request):\n\treturn render(\n\t\trequest,\n\t\t'index.html',\n\t\t{\n\t\t\t'title': 'Online IT school',\n\t\t\t'courses': courses,\n\t\t\t'coursecn': courses.count(),\n\t\t\t'studentcn': '322',\n\t\t\t'reviewcn': reviews.count(),\n\t\t\t'year_work': year_work,\n\t\t\t'articles': articles,\n\t\t\t'reviews': reviews,\n\t\t\t'year': year,\n\t\t}\n\t)\n\ndef course(request):\n\treturn render(\n\t\trequest,\n\t\t'course.html',\n\t\t{\n\t\t\t'title': 'Courses',\n\t\t\t'subtitle': 'Make Your Success a Priority',\n\t\t\t'courses': courses,\n\t\t\t'coursecn': courses.count(),\n\t\t\t'year': year,\n\t\t\t'categories': categories,\n\t\t\t'reviews': reviews,\n\t\t}\n\t)","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229047306","text":"from multiprocessing import Pool\nimport os\nimport time\ndef run(**kw):\n time.sleep(1)\n print(\"进程为\",os.getpid(),kw)\nif __name__ == '__main__':\n pool = Pool(5)\n for i in range(10):\n pool.apply(func=run,kwds={\"计数\":i+1})\n pool.close()\n pool.join()","sub_path":"day0218/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478731160","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom ..items import TencentItem\nfrom urllib import parse\nimport requests\n# 1. 导入scrapy_redis中RedisSpider\nfrom scrapy_redis.spiders import RedisSpider\n\n# 2. 继承RedisSpider类\nclass TencentSpider(RedisSpider):\n name = 'tencent'\n allowed_domains = ['careers.tencent.com']\n one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1566266592644&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex={}&pageSize=10&language=zh-cn&area=cn'\n two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1566266695175&postId={}&language=zh-cn'\n # 想办法生成第1个要抓取的地址\n user_input = input('请输入工作类型:')\n user_input = parse.quote(user_input)\n page_one_url = one_url.format(user_input,1)\n # 3. 去掉start_urls\n # 4. 设置redis_key\n redis_key = 'tencent:spider'\n\n def parse(self,response):\n # 获取到总页数:total\n total = self.get_total(self.user_input)\n for index in range(1,11):\n url = self.one_url.format(self.user_input,index)\n yield scrapy.Request(\n url = url,\n callback = self.parse_one_page\n )\n # 获取总页数\n def get_total(self,user_input):\n url = self.one_url.format(user_input,1)\n html = requests.get(url=url).json()\n total = html['Data']['Count'] // 10 + 1\n\n return total\n\n def parse_one_page(self, response):\n html = response.text\n html = json.loads(html)\n for job in html['Data']['Posts']:\n\n post_id = job['PostId']\n url = self.two_url.format(post_id)\n yield scrapy.Request(\n url = url,\n callback = self.parse_two_page\n )\n\n # 解析二级页面\n def parse_two_page(self,response):\n item = TencentItem()\n html = json.loads(response.text)['Data']\n item['job_name'] = html['RecruitPostName']\n item['job_type'] = html['CategoryName']\n item['job_duty'] = html['Responsibility']\n item['job_require'] = html['Requirement']\n item['job_address'] = html['LocationName']\n item['job_time'] = html['LastUpdateTime']\n\n yield item\n\n\n\n\n\n\n\n\n\n","sub_path":"xiaojian/xiaojian/forth_phase/spider/day10/spider_day10_note_course/day10/Tencent_redis_key/Tencent/spiders/tencent.py","file_name":"tencent.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518489392","text":"\"\"\"\nBinary Tree Preorder Traversal\n- Link: https://leetcode.com/explore/learn/card/data-structure-tree/134/traverse-a-tree/928/\n\"\"\"\n\n\n# Approach 1: Recursive\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n ans = []\n if not root:\n return ans\n self.helper(root, ans)\n return ans\n\n def helper(self, node: TreeNode, ans: List[TreeNode]):\n ans.append(node.val)\n if node.left:\n self.helper(node.left, ans)\n if node.right:\n self.helper(node.right, ans)\n","sub_path":"leetcode/traversal/binary_tree_preorder_traversal.py","file_name":"binary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644094594","text":"# -*- coding:utf-8 -*-\n#抓取网页信息 首页\n\nimport threading\nimport time\nimport parsetask\n\n#IOM解析\nclass IOMThread(threading.Thread):\n def run(self):\n while True:\n IOM = parsetask.LoginIOM()\n # IOM.login()\n # print(IOM.login())\n if IOM.login()!= None:\n IOM.fillList()\n time.sleep(60)\n else:\n return false\n\ndef startParse():\n #IOM工单解析\n parseIOM = IOMThread()\n parseIOM.start()\n\nif __name__ == '__main__':\n startParse()","sub_path":"zhihu/newone.py","file_name":"newone.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"178214570","text":"class Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def top(self):\n return self.items[-1]\n\n def size(self):\n return len(self.items)\n \n def length(self):\n return len(self.items)\n\n\ndef isOperator(value):\n string = \"+-*/^\"\n if value in string:\n return True\n else:\n return False\n \ndef eval_postfix(exp):\n stack = Stack()\n digits = \"0123456789\"\n result = None\n for char in exp:\n try:\n result = None\n if char in digits:\n stack.push(int(char))\n elif isOperator(char) and not stack.isEmpty():\n op1 = stack.pop()\n op2 = stack.pop()\n if char ==\"+\":\n result = op2 + op1\n elif char ==\"-\":\n result = op2 - op1\n elif char ==\"*\":\n result = op2 * op1\n elif char ==\"/\":\n result = op2 / op1\n elif char ==\"^\":\n result = op2 ^ op1\n \n if result is not None:\n stack.push(result)\n else:\n pass\n except:\n return -1\n \n return stack.pop()\n \nexp = input() \nprint(eval_postfix(exp))","sub_path":"Stack/eval_postfix_or_-1.py","file_name":"eval_postfix_or_-1.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391401747","text":"# dcm2gibbs\n# created by Anonto Zaman, translated to python by Kirthi Kumar\n# Conversion of direction cosine matrix to 3-1-2 gibbs vector\n# Output is a row vector with components representing rotations about the\n# 3-1-2 axes respectively\n\nimport numpy as npy\nimport math\n\ndef dcm2gibbs(dcm):\n g = ((1, 3))\n np.zeros(3)\n g[1, 1] = -1 *math.atan(dcm[2,1]/dcm[2,2])\n g[1, 2] = math.asin(dcm[2, 3])\n g[1, 3] = math.atan(dcm[1,3]/dcm[3,3])\n return g\n","sub_path":"reference/Python_Methods/dcm2gibbs.py","file_name":"dcm2gibbs.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487875784","text":"import socket\nimport threading\n\nbind_ip = '0.0.0.0'\nbind_port = 9999\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((bind_ip, bind_port))\nserver.listen(5)\n\nprint (' - Listening on {0}:{1}'.format(bind_ip, bind_port))\n\n# Client handling thread\ndef handle_client(client_socket):\n\t# Print data sent by client\n\trequest = client_socket.recv(1024)\n\tprint (' - Received: {0}'.format(request))\t\n\t# Return packet\n\tclient_socket.send('ACK!')\n\tclient_socket.close()\n\nwhile True:\n\tclient,addr = server.accept()\n\tprint (' - Accepted connection from: {0}:{1}'.format(addr[0], addr[1]))\n\t# Handle received data\n\tclient_handler = threading.Thread(target=handle_client, args=(client, ))\n\tclient_handler.start()\n\ndef main(client_socket):\n\thandle_client(client_socket)\n\nif __name__ == '__main__':\n\tmain(client_socket)","sub_path":"Network Programming/Basic_TCPServer.py","file_name":"Basic_TCPServer.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"94625980","text":"from __future__ import division # This is only for Python 2.7\nimport os\n\nROOT_DIR = os.path.dirname(os.path.dirname(__file__))\n\nPATHS = {}\n\nPATHS['MESHES'] = os.path.join(ROOT_DIR, \"meshes/\")\nPATHS['SNAPSHOTS'] = os.path.join(ROOT_DIR, \"output/snapshots/\")\nPATHS['PLOTS'] = os.path.join(ROOT_DIR, \"output/plots/\")\n\nif not os.path.isdir(PATHS['SNAPSHOTS']):\n os.makedirs(PATHS['SNAPSHOTS'])\n\nif not os.path.isdir(PATHS['PLOTS']):\n os.makedirs(PATHS['PLOTS'])\n\nif not os.path.isdir(PATHS['MESHES']):\n print('\\nThe \"meshes\" folder is missing from the project folder.')\n ans = ''\n while ans not in ['A', 'S']:\n ans = input('Abort (A) or create a Symlink (S)?\\n').upper()\n\n if ans == 'S':\n print('\\nPlease input the real path of your world data for creating a symlink:')\n inp_path = ''\n while True:\n inp_path = input().strip('\\'\\\"')\n if os.path.isdir(inp_path):\n break\n else:\n print(\"\\nInvalid path. Please try again.\\n\")\n os.symlink(inp_path, PATHS['MESHES'].rstrip('/'))\n del inp_path, ans\n else:\n exit()\n\ndel os\n\nDEF_DISPSIZE = (960, 720)\n","sub_path":"navig/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395067729","text":"import torch\nimport ipdb\nfrom copy import deepcopy\nimport random\nimport requests\nimport json\nfrom itertools import (takewhile, repeat, islice)\n\n\ndef modify_sentence(ids, min_change=2, prob=0.1, k=2):\n def _random_deletion(rids):\n num_deletion = max(min_change, int(prob*len(rids)))\n delete_idx = random.sample(range(len(rids)), num_deletion)\n n_ids = [rids[i] for i in range(len(rids)) if i not in delete_idx]\n return n_ids\n def _random_swap(rids):\n num_swap = max(min_change, int(prob*len(rids)))\n swap_idx = [random.sample(range(len(rids)), 2) for _ in range(num_swap)]\n n_ids = deepcopy(rids)\n for i, j in swap_idx:\n n_ids[i], n_ids[j] = n_ids[j], n_ids[i]\n return n_ids\n def _random_duplicate(rids):\n # 1-gram or 2-gram\n num_duplicate = max(min_change, int(prob*len(rids)))\n duplicate_idx = random.sample(range(len(rids)-1), num_duplicate)\n n_rids = []\n for idx, i in enumerate(rids):\n if idx in duplicate_idx:\n if random.random() > 0.5:\n # 2-gram\n n_rids.extend([rids[idx], rids[idx+1], rids[idx], rids[idx+1]])\n else:\n n_rids.extend([rids[idx], rids[idx]])\n else:\n n_rids.append(i)\n return n_rids\n rest = []\n for _ in range(k):\n rids = _random_deletion(ids)\n rids = _random_swap(rids)\n rids = _random_duplicate(rids)\n rest.append(rids)\n return rest\n\n\ndef truncate_pair_with_other_ids(cids, rids, tcids, trids, scids, srids, max_length):\n # change the cids and rids in place\n max_length -= 3 # [CLS], [SEP], [SEP]\n while True:\n l = len(cids) + len(rids)\n if l <= max_length:\n break\n if len(cids) > 2 * len(rids):\n cids.pop(0)\n tcids.pop(0)\n scids.pop(0)\n else:\n rids.pop()\n trids.pop()\n srids.pop()\n\n\ndef truncate_pair_with_labels(cids, cids_labels, rids, max_length, rids_labels=None):\n # change the cids and rids in place\n max_length -= 3 # [CLS], [SEP], [SEP]\n while True:\n l = len(cids) + len(rids)\n if l <= max_length:\n break\n if len(cids) > 2 * len(rids):\n cids.pop(0)\n cids_labels.pop(0)\n else:\n rids.pop()\n if rids_labels:\n rids_labels.pop()\n\n\ndef truncate_pair(cids, rids, max_length):\n # change the cids and rids in place\n max_length -= 3 # [CLS], [SEP], [SEP]\n while True:\n l = len(cids) + len(rids)\n if l <= max_length:\n break\n if len(cids) > 2 * len(rids):\n cids.pop(0)\n else:\n rids.pop()\n\n\ndef truncate_pair_two_candidates(cids, rids1, rids2, max_length, sids=None):\n max_length -= 4 # [CLS] ctx [SEP] rids1 [SEP] rids2 [SEP]\n while True:\n l = len(cids) + len(rids1) + len(rids2)\n if l <= max_length:\n break\n if len(cids) > len(rids1) + len(rids2):\n cids.pop(0)\n if sids:\n sids.pop(0)\n elif len(rids1) > len(rids2):\n rids1.pop()\n else:\n rids2.pop()\n\n\ndef generate_mask(ids, pad_token_idx=0):\n '''generate the mask matrix of the ids, default padding token idx is 0'''\n mask = torch.ones_like(ids)\n mask[ids == pad_token_idx] = 0.\n return mask\n # attn_mask_index = ids.nonzero().tolist() # [PAD] IS 0\n # attn_mask_index_x, attn_mask_index_y = [i[0] for i in attn_mask_index], [i[1] for i in attn_mask_index]\n # attn_mask = torch.zeros_like(ids)\n # attn_mask[attn_mask_index_x, attn_mask_index_y] = 1\n # return attn_mask\n\n\ndef to_cuda(*args):\n '''map the tensor on cuda device'''\n if not torch.cuda.is_available():\n return args\n tensor = []\n for i in args:\n i = i.cuda()\n tensor.append(i)\n return tensor\n\n\ndef mask_sentence(\n ids, min_mask_num, max_mask_num, masked_lm_prob, \n special_tokens=[], mask=-1, vocab_size=21128,\n ):\n '''change the ids, and return the mask_label'''\n num_valid = len([i for i in ids if i not in special_tokens])\n num_mask = max(\n min_mask_num,\n min(\n int(masked_lm_prob * num_valid),\n max_mask_num,\n )\n )\n\n mask_pos = [idx for idx, i in enumerate(ids) if i not in special_tokens]\n mask_idx = random.sample(mask_pos, num_mask)\n mask_label = []\n for idx, i in enumerate(ids):\n if idx in mask_idx:\n ratio = random.random()\n if ratio < 0.8:\n ids[idx] = mask\n elif ratio < 0.9:\n # random change\n ids[idx] = random.choice(list(range(vocab_size)))\n mask_label.append(i)\n else:\n mask_label.append(-1)\n return mask_label\n\n# ========== dual-bert ========== #\ndef length_limit(ids, max_len):\n '''the first token must be [CLS]'''\n if len(ids) > max_len:\n ids = [ids[0]] + ids[-(max_len-1):]\n return ids\n\ndef length_limit_res(ids, max_len, sep=0):\n '''the last token must be [SEP], and the first token must be [CLS]'''\n if len(ids) > max_len:\n ids = ids[:max_len-1] + [sep]\n return ids\n\n# ======== Evaluation Perturbation ========== # \ndef delete(ids, tids, delete_ratio=0.15, min_delete_num=2, special_tokens=[]):\n delete_num = max(\n min_delete_num,\n min(\n len(ids),\n int(len(ids) * delete_ratio),\n )\n )\n delete_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]\n delete_idx = random.sample(delete_idx, delete_num)\n\n new_ids, delete_label, new_tids = [], [], []\n for i in ids:\n if i not in delete_idx:\n new_ids.append(i)\n delete_label.append(-1)\n else:\n delete_label.append(len(new_ids))\n pert_label = [-1 if i == -1 else 0 for i in delete_label]\n return new_ids, delete_label, pert_label\n\ndef duplicate(ids, duplicate_ratio=0.15, min_duplicate_num=2, special_tokens=[]):\n duplicate_num = max(\n min_duplicate_num,\n min(\n len(ids),\n int(len(ids) * duplicate_ratio),\n )\n )\n duplicate_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]\n duplicate_idx = random.sample(duplicate_idx, duplicate_num)\n\n new_ids, duplicate_label = [], []\n for i in ids:\n if i not in duplicate_idx:\n new_ids.append(i)\n duplicate_label.append(-1)\n else:\n num = random.choice([2, 3, 4])\n new_ids.extend([i] * num)\n duplicate_label.extend([len(new_ids)-i_ for i_ in range(num)])\n pert_label = [-1 if i == -1 else 1 for i in duplicate_label]\n return new_ids, duplicate_label, pert_label\n\n\ndef replacement(ids, replace_ratio=0.15, min_replace_num=2, vocab_size=0, special_tokens=[]):\n replace_num = max(\n min_replace_num,\n min(\n len(ids),\n int(len(ids) * replace_ratio),\n )\n )\n replace_idx = [i for i in range(len(ids)) if ids[i] not in special_tokens]\n replace_idx = random.sample(replace_idx, replace_num)\n\n new_ids, replace_label = [], []\n for i in ids:\n if i not in replace_idx:\n new_ids.append(i)\n replace_label.append(-1)\n else:\n # random replace\n new_ids.append(random.choice(range(vocab_size)))\n replace_label.append(i)\n pert_label = [-1 if i == -1 else 2 for i in replace_label]\n return new_ids, replace_label, pert_label\n\n\ndef mask_sentence_only_mask(\n ids, min_mask_num, max_mask_num, masked_lm_prob, \n special_tokens=[], mask=-1, vocab_size=21128,\n ):\n '''change the ids, and return the mask_label'''\n num_valid = len([i for i in ids if i not in special_tokens])\n num_mask = max(\n min_mask_num,\n min(\n int(masked_lm_prob * num_valid),\n max_mask_num,\n )\n )\n mask_pos = [idx for idx, i in enumerate(ids) if i not in special_tokens]\n mask_idx = random.sample(mask_pos, num_mask)\n mask_label = []\n for idx, i in enumerate(ids):\n if idx in mask_idx:\n ids[idx] = mask\n mask_label.append(i)\n else:\n mask_label.append(-1)\n return mask_label\n\n# ========== context augmentation ========== #\ndef sentence_shuffle(context_utterances):\n if len(context_utterances) == 1:\n return context_utterances\n else:\n random_idx = list(range(len(context_utterances)))\n while True:\n random.shuffle(random_idx)\n if random_idx[-1] != len(context_utterances) - 1:\n break\n context_utterances = [context_utterances[i] for i in random_idx]\n return context_utterances\n\ndef token_shuffle(context_utterances):\n for i in range(len(context_utterances)):\n random.shuffle(context_utterances[i])\n return context_utterances\n\ndef sentence_deletion(context_utterances):\n if len(context_utterances) == 1:\n return context_utterances\n else:\n random_idx = random.choice(range(len(context_utterances)-1))\n context_utterances = [context_utterances[i] for i in range(len(context_utterances)) if i != random_idx]\n return context_utterances\n\ndef replace_last_utterance(context_utterances, pool):\n response = random.choice(pool)['rids']\n response = response[1:-1]\n context_utterances[-1] = response\n return context_utterances\n\ndef random_insert_before_context(context_utterances, pool):\n u = random.choice(random.choice(pool)['cids'])\n context_utterances.insert(0, u)\n return context_utterances\n\ndef random_insert_context(context_utterances, pool):\n u = random.choice(random.choice(pool)['cids'])\n idx = random.choice(range(len(context_utterances)))\n context_utterances.insert(idx, u)\n return context_utterances\n\n\n# texsmart chinese tokenization\ndef texsmart_segmentation(engine, text, useful_pos_tag=None):\n output = engine.parse_text(text)\n seg_sentence = []\n for each_word in output.phrases():\n # if each_word.tag in useful_pos_tag:\n seg_sentence.append(each_word.str)\n return seg_sentence\n\n# count lines of the large file\ndef iter_count(file_name):\n buffer = 1024 * 1024\n with open(file_name) as f:\n buf_gen = takewhile(lambda x: x, (f.read(buffer) for _ in repeat(None)))\n return sum(buf.count('\\n') for buf in buf_gen)\n\n# iter load the lines\ndef load_lines_chunk(file, num_lines):\n next_n_lines = list(islice(file, num_lines))\n return next_n_lines\n","sub_path":"easynlp/dataloader/util_func.py","file_name":"util_func.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"600854945","text":"# -*- coding: utf-8 -*-\n\nfrom tourapi.detail import TourAPI, get_string\nfrom tourapi.config import ServiceKey, Languages\nfrom mysql_config import MysqlHost, MysqlUser, MysqlPass, MysqlDB\nfrom mysql_lib import update_common, update_intro, update_info, update_image\nimport pymysql\nimport json\n\n\ndef isError(detail):\n if ('resultCode' in detail): # API Error\n print(\"API Error: \", detail)\n return True\n else:\n return False\n\nconn = pymysql.connect(host = MysqlHost, user = MysqlUser, password = MysqlPass, db = MysqlDB)\ncurs = conn.cursor()\n\nfor language in Languages:\n print(language)\n # if language[\"code\"] == \"Eng\":\n # continue\n\n api = TourAPI(ServiceKey, language[\"code\"])\n\n query = \"SELECT id, typeId, title FROM content WHERE language=%s AND isModified=1\"\n curs.execute(query, (language[\"code\"]))\n rows = curs.fetchall()\n for row in rows:\n print(row)\n id = row[0]\n type_id = row[1]\n title = row[2]\n\n common = api.get_detail_common(id)\n if (isError(common)):\n break\n else:\n update_common(curs, id, common)\n\n intro = api.get_detail_intro(id, type_id)\n if (isError(intro)):\n break\n else: \n update_intro(curs, id, intro)\n\n info = api.get_detail_info(id, type_id)\n if (isError(info)):\n break\n update_info(curs, id, info)\n\n images = api.get_detail_image(id, False)\n if (isError(images)):\n break\n else:\n update_image(curs, id, is_food=0, image=images)\n\n # 음식점이면 음식 이미지 추가\n if type_id == 39 or type_id == 82: # 39는 국문, 82는 외국어\n foodImages = api.get_detail_image(id, True)\n if (isError(images)):\n break\n else:\n update_image(curs, id, is_food=1, image=foodImages)\n\n update_sql = \"UPDATE content SET isModified = 0 WHERE id = %s\"\n curs.execute(update_sql, (id))\n conn.commit()\n\nconn.close()\n","sub_path":"00_update.py","file_name":"00_update.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38704848","text":"#!/usr/bin/env python\n\"\"\"\nRuns pylint on all contained python files in this directory, printint out\nnice colorized warnings/errors without all the other report fluff\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom pylint.lint import Run\n\n__author__ = \"Matthew 'MasterOdin' Peveler\"\n__license__ = \"The MIT License (MIT)\"\n\nIGNORE_FOLDERS = [\".git\", \".idea\", \"__pycache__\"]\n\n\ndef run_runner():\n \"\"\"\n Runs pylint on all python files in the current directory\n \"\"\"\n\n pylint_files = get_files_from_dir(os.curdir)\n print(\"pylint running on the following files:\")\n for pylint_file in pylint_files:\n print(pylint_file)\n print(\"----\")\n Run(pylint_files)\n\n\ndef get_files_from_dir(current_dir):\n \"\"\"\n Recursively Walk through a directory and get all python files and then walk\n through any potential directories that are found off current directory,\n so long as not within IGNORE_FOLDERS\n :return: all python files that were found off current_dir\n \"\"\"\n files = []\n for dir_file in os.listdir(current_dir):\n if current_dir != \".\":\n file_path = current_dir + dir_file\n else:\n file_path = dir_file\n if os.path.isfile(file_path):\n file_split = os.path.splitext(dir_file)\n if len(file_split) == 2 and file_split[0] != \"\" \\\n and file_split[1] == '.py':\n print(file_path)\n files.append(file_path)\n elif os.path.isdir(dir_file) and dir_file not in IGNORE_FOLDERS:\n files += get_files_from_dir(dir_file+\"/\")\n return files\n\nif __name__ == \"__main__\":\n run_runner()\n","sub_path":"pylint_runner.py","file_name":"pylint_runner.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"449673021","text":"import os\n\nimport click\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom model import Model, dataset\n\n\n@click.command()\n@click.help_option(\"-h\", \"--help\")\n@click.option(\"--epochs\", type=click.INT, default=20, help=\"Number of epochs.\")\n@click.option(\"--window\", type=click.INT, default=22, help=\"Number of window size.\")\n@click.option(\"--step\", type=click.INT, default=1, help=\"Number of step size.\")\ndef train_model(epochs=10, window=22, step=1):\n if not os.path.isdir(\"./model\"):\n os.mkdir(\"./model\")\n\n dataset_tr = dataset(task=\"train\", window_size=window, step_size=step)\n dataset_ev = dataset(task=\"evaluate\", window_size=window, step_size=step)\n model = Model(dataset_tr.features())\n print(\n f\"model built! It will training on {len(dataset_tr)} seqs and evaluate on {len(dataset_ev)} seqs\"\n )\n print(model)\n print(f\"Trainable Perameters: {sum(p.numel() for p in model.parameters())}\\n\")\n\n opt = optim.Adam(model.parameters(), lr=1e-4)\n criterion = nn.CrossEntropyLoss()\n\n d_loader_tr = DataLoader(\n dataset=dataset_tr, shuffle=True, num_workers=2, batch_size=128\n )\n\n d_loader_ev = DataLoader(\n dataset=dataset_ev, shuffle=True, num_workers=2, batch_size=1024\n )\n\n losses = {\"train\": [], \"eval\": []}\n best_loss = 1e9\n no_improve = 0\n for epoch in range(epochs):\n with tqdm(d_loader_tr, f\"Epoch-#{epoch + 1}\") as training:\n model.train()\n losses_tr = []\n for s, batch in enumerate(training):\n opt.zero_grad()\n\n out = model(batch[\"x\"])\n\n loss = criterion(out, batch[\"y\"].view(-1))\n loss.backward()\n opt.step()\n\n training.set_description(f\"Epoch-#{epoch + 1} Loss={loss.item():.4f}\")\n losses_tr.append(loss.item())\n losses[\"train\"].append(np.mean(losses_tr))\n\n with torch.no_grad():\n model.eval()\n losses_ev = []\n for s, batch in enumerate(tqdm(d_loader_ev, \"Evaluating\")):\n out = model(batch[\"x\"])\n loss = criterion(out, batch[\"y\"].view(-1))\n losses_ev.append(loss.item())\n losses[\"eval\"].append(np.mean(losses_ev))\n\n print(\n f\"Epoch-#{epoch + 1} AVG training loss: {losses['train'][-1]:.4f} / evaluation loss: {losses['eval'][-1]:.4f}\"\n )\n if losses[\"eval\"][-1] <= best_loss:\n print(\n f\"Evaluation loss improved form {best_loss} to {losses['eval'][-1]}, save best model.\\n\"\n )\n best_loss = losses[\"eval\"][-1]\n torch.save(model.state_dict(), \"./model/model\")\n no_improve = 0\n else:\n no_improve += 1\n print(f\"Model no improve for {no_improve} times.\")\n\n if no_improve == 10:\n print(\"Model no improve reach 10 times. Early Stop!\")\n break\n\n print(\"Training finished!\")\n\n\nif __name__ == \"__main__\":\n train_model()\n","sub_path":"exam3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"243891094","text":"# Name\n# Author\n# Date\n\n# This is one-hidden layer neural network for facial recognition\n# We will use softmax for multi-class classification\n# Warning: this program requires much more RAM than your previous files since we will use all classes\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ANN1FacialUtil import getData, softmax, cost2, y2indicator, error_rate, relu\nfrom sklearn.utils import shuffle\n\n\nclass ANN(object):\n def __init__(self, M):\n \"\"\"\n\n :param M: number of hidden unit in the hidden layer\n \"\"\"\n self.M = M # this is the property for the object\n\n # function for learning\n def fit(self, X, Y, learning_rate=10e-7, reg=10e-7, epochs=7000, show_fig=False):\n X, Y = shuffle(X, Y) # make sure you get random data by using shuffle\n\n # Make training and validation data set\n Xvalid, Yvalid = X[-1000:], Y[-1000:] # Takes last thousand elements and assigns to X and Y valid\n X, Y = X[:-1000], Y[:-1000] # Takes all samples up UNTIL last 1000\n\n N, D = X.shape\n K = len(set(Y[:, 0]))\n T = y2indicator(Y)\n\n # W1 is weight matrix of hidden layer\n # We want to make sure the variance of W1 is 1/D so that my weight is small\n # Self.W1 makes W1 instance variable for object\n # We initialize W1 randomly using normal distribution\n\n self.W1 = np.random.randn(D, self.M) / np.sqrt(D)\n self.b1 = np.zeros((1, self.M))\n\n # W2 is weight matrix for output layer\n\n self.W2 = np.random.randn(self.M, K) / np.sqrt(self.M)\n self.b2 = np.zeros((1, K))\n\n costs = []\n best_validation_error = 1\n # perform epochs number of iterations for gradient descent\n for i in range(epochs):\n # A1 is activation for first hidden layer.\n # A2 is activation for output layer.\n\n A2, A1 = self.forward(X)\n dZ2 = A2 - T\n dW2 = A1.T.dot(dZ2)\n db2 = dZ2.sum(axis=0, keepdims=True)\n\n # purpose of reg*self.W2 is to make sure the weight is small in order to\n # prevent over fitting\n self.W2 = self.W2 - learning_rate * (dW2 + reg*self.W2)\n self.b2 = self.b2 - learning_rate * (db2 + reg*self.b2)\n\n dA1 = dZ2.dot(self.W2.T)\n # 1 - A1 * A1 is derivative of tanh\n dZ1 = dA1 * (1 - A1*A1)\n dW1 = X.T.dot(dZ1)\n db1 = dZ1.sum(axis=0, keepdims=True)\n self.W1 = self.W1 - learning_rate * (dW1 + reg*self.W1)\n self.b1 = self.b1 - learning_rate * (db1 + reg*self.b1)\n\n # check our cross validation results\n if i % 10 == 0:\n # _ means I do not care about this value\n # A2 is output, A1 is activation\n\n pYvalid, _ = self.forward(Xvalid)\n c = cost2(Yvalid, pYvalid) # cost 2 is used for multi-class classification.\n costs.append(c)\n # testResults is rank one array\n # testResults is nx1 prediction vector containing predicted class\n testResults = np.argmax(pYvalid, axis=1)\n # I want testResults to be an nx1 column vector\n testResults = np.reshape(testResults, (testResults.shape[0], 1))\n e = error_rate(Yvalid, testResults)\n print(\"i: \", i, \"cost: \", c, \"error: \", e, \"best_error: \", best_validation_error)\n if e < best_validation_error:\n best_validation_error = e\n\n # after the for loop\n print(\"best_validation_error: \", best_validation_error)\n\n if show_fig:\n plt.plot(costs)\n plt.show()\n\n\n def forward(self, X):\n \"\"\"\n\n :param X: N x D matrix\n :return: N x K matrix normalized matrix (A2) and N x M activation matrix for first\n hidden layer\n \"\"\"\n\n Z1 = X.dot(self.W1) + self.b1\n A1 = np.tanh(Z1) # A1 is the activation value for first hidden layer using tanh function\n\n # A2 is the activation for the output layer (prediction value)\n Z2 = A1.dot(self.W2) + self.b2\n A2 = softmax(Z2) # softmax is used for multi-class classification\n return A2, A1\n\n def predict(self, X):\n \"\"\"\n\n :param X: NxK input matrix\n :return: Nx1 prediction vector\n \"\"\"\n pY, _ = self.forward(X)\n pred = np.argmax(pY, axis=1)\n pred = np.reshape(pred, (pred.shape[0], 1))\n return pred\n\n def score(self, X, Y):\n \"\"\"\n\n :param X: training set which is 2d array\n :param Y: vector of target\n :return: accuracy of model\n \"\"\"\n prediction = self.predict(X)\n return 1 - error_rate(Y, prediction)\n\n\ndef main():\n print(\"ANN for facial expression\")\n X, Y = getData() # get the data for all classes\n model = ANN(200) # 200 hidden unit\n model.fit(X, Y, reg=0, show_fig=True)\n print('model accuracy: ', model.score(X, Y))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ANNFacial.py","file_name":"ANNFacial.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136703981","text":"class Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n result = []\n path = []\n def dfs(start, s):\n if s == target:\n result.append(path[:])\n return\n \n for i in range(start, len(candidates)):\n if s + candidates[i] > target:\n break\n \n if i == start or candidates[i] != candidates[i - 1]:\n path.append(candidates[i])\n dfs(i + 1, s + candidates[i])\n path.pop()\n dfs(0, 0)\n return result\n","sub_path":"0001-0100/0040-Combination Sum II/0040-Combination Sum II.py","file_name":"0040-Combination Sum II.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"316504237","text":"# encoding:utf-8\r\n# import cv2\r\n# img = cv2.imread('1234.jpg',1)\r\n# crop_img = img[1:520, 200:500]\r\n# cv2.imshow(\"image\", crop_img)\r\n# cv2.imshow(\"images\", img)\r\n# cv2.waitKey(0)\r\n\r\n# def FindColorFools(a, b, c, d, img):\r\n# length = len(d)\r\n# for subscript, i in enumerate(d):\r\n# for x in range(2):\r\n# if x == 0:\r\n# crop_img = img[a[1]:b[1], i:d[i + 1]]\r\n# subscript += 1\r\n# else:\r\n# crop_img = img[b[1]:c[1], i:d[i + 1]]\r\n# subscript += length\r\n# # img = cv2.imread(\"1234.png\")\r\n# # crop_img = img[187:302, 1:229]\r\n# hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)\r\n# cimg = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\r\n# # 设定蓝色的阈值\r\n# # lower_blue=np.array([110,50,50])\r\n# # upper_blue=np.array([130,255,255])\r\n# # lower_blue = np.array([26, 43, 46])\r\n# # upper_blue = np.array([34, 255, 255])\r\n# lower_blue = np.array([0, 130, 50])\r\n# upper_blue = np.array([34, 255, 255])\r\n# # lower_blue = np.array([30, 4, 219])\r\n# # upper_blue = np.array([51, 150, 255])\r\n#\r\n# # lower_blue = np.array([20, 241, 129])\r\n# # upper_blue = np.array([25, 255, 157])\r\n# # 根据阈值构建掩模\r\n# mask = cv2.inRange(hsv, lower_blue, upper_blue)\r\n# res = cv2.bitwise_and(crop_img, crop_img, mask=mask)\r\n# img, contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\r\n#\r\n# area_max_contour = 0\r\n# # if contours:\r\n# # a = contours[0]\r\n# # x,y,w,h = cv2.boundingRect(a)\r\n# #\r\n# # for cnt in contours:\r\n# # contour_area_temp = np.fabs(cv2.contourArea(cnt))\r\n# # x, y, w, h = cv2.boundingRect(cnt)\r\n# # # print x,y,w,h\r\n# # # 轮廓外接矩形宽长相差不超过指定像素(根据实际情况调整)\r\n# # if np.fabs(w - h) < 80:\r\n# # # 找寻满足条件的最大外接矩形面积的轮廓\r\n# # if contour_area_temp > area_max_contour:\r\n# # area_max_contour = contour_area_temp\r\n# # a = cnt\r\n# # # print a\r\n# # # (x, y) 轮廓a的外接矩形左上角的点 w为宽 h为高\r\n# # x,y,w, h = cv2.boundingRect(a)\r\n# # # print x,y,w,h\r\n# # # print '123455655'\r\n# # if w+h>100:\r\n# # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n#\r\n# if contours:\r\n# a = contours[0]\r\n# x, y, w, h = cv2.boundingRect(a)\r\n#\r\n# for cnt in contours:\r\n# # contour_area_temp = np.fabs(cv2.contourArea(cnt))\r\n# x, y, w, h = cv2.boundingRect(cnt)\r\n# print x, y, w, h\r\n# if w + h > 5:\r\n# cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n# cv2.imshow('%d' % subscript, crop_img)\r\n# # print x,y,w,h\r\n# # 轮廓外接矩形宽长相差不超过指定像素(根据实际情况调整)\r\n# # if np.fabs(w - h) < 80:\r\n# # # 找寻满足条件的最大外接矩形面积的轮廓\r\n# # if contour_area_temp > area_max_contour:\r\n# # area_max_contour = contour_area_temp\r\n# # a = cnt\r\n# # print a\r\n# # (x, y) 轮廓a的外接矩形左上角的点 w为宽 h为高\r\n# # x,y,w, h = cv2.boundingRect(a)\r\n# # print x,y,w,h\r\n# # print '123455655'\r\n#\r\n#\r\n#\r\n# # cv2.HoughCircles(cimg, cv2.HOUGH_STANDARD, 1, 20, circles, param1=50, param2=30, minRadius=0, maxRadius=0)\r\n# # circles1 = cv2.HoughCircles(mask, cv2.HOUGH_GRADIENT, 1,\r\n# # 100, param1=100, param2=30, minRadius=200, maxRadius=300)\r\n# # print type(circles1)\r\n# # if circles1 is not None:\r\n# # circles = circles1[0, :, :] # 提取为二维\r\n# # circles = np.uint16(np.around(circles)) # 四舍五入,取整\r\n# # for i in circles[:]:\r\n# # # draw the outer circle\r\n# # cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 2)\r\n# # # draw the center of the circle\r\n# # cv2.circle(frame, (i[0], i[1]), 2, (0, 0, 255), 3)\r\n# # # 对原图像和掩模进行位运算\r\n#\r\n# # 显示图像\r\n# # cv2.imshow('frame', crop_img)\r\n# # cv2.imshow('mask', mask)\r\n# # cv2.imshow('res', res)\r\n# # crop_img = photo[a:b, c:d]\r\n#\r\n# # print crop_img\r\n# # cv2.imshow(\"photo\", crop_img)\r\n# cv2.waitKey(0)\r\n# import cv2\r\n# import numpy as np\r\n# from matplotlib import pyplot as plt\r\n# import threading\r\n# import Queue\r\n# # img = cv2.imread('IMG_3779.JPG', 0)\r\n# # img2 = img.copy()\r\n# # template = cv2.imread('1403.jpg', 0)\r\n# # w, h = template.shape[::-1]\r\n# # print w, h\r\n# # # All the 6 methods for comparison in a list\r\n# # methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\r\n# # 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\r\n# # # methods = ['cv2.TM_SQDIFF_NORMED']\r\n# # for meth in methods:\r\n# # img = img2.copy()\r\n# # #exec 语句用来执行储存在字符串或文件中的Python 语句。\r\n# # # 例如,我们可以在运行时生成一个包含Python 代码的字符串,然后使用exec 语句执行这些语句。\r\n# # #eval 语句用来计算存储在字符串中的有效Python 表达式\r\n# # method = eval(meth)\r\n# # # Apply template Matching\r\n# # res = cv2.matchTemplate(img, template, method)\r\n# # # print res\r\n# # min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\r\n# # # 使用不同的比较方法,对结果的解释不同\r\n# # # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\r\n# # if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\r\n# # top_left = min_loc\r\n# # else:\r\n# # top_left = max_loc\r\n# # print top_left\r\n# # # print max_loc\r\n# # print max_val\r\n# # print min_val\r\n# # # cv2.minMaxLoc()\r\n# # bottom_right = (top_left[0] + w, top_left[1] + h)\r\n# # cv2.rectangle(img, top_left, bottom_right, 255, 2)\r\n# # plt.subplot(121),plt.imshow(template, cmap = 'gray')\r\n# # plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\r\n# # plt.subplot(122),plt.imshow(img,cmap = 'gray')\r\n# # plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\r\n# # plt.suptitle(meth)\r\n# # plt.show()\r\n# # q = Queue.Queue()\r\n# # def Threads(sub):\r\n# # print \"Thread-%d\" % sub\r\n# # q.put(([1,2,3], sub))\r\n# #\r\n# # def ThreadMain():\r\n# # threads = []\r\n# # for i in range(5):\r\n# # threadings = threading.Thread(target=Threads, args=(i,))\r\n# # threads.append(threadings)\r\n# # return threads\r\n# #\r\n# # if __name__ == \"__main__\":\r\n# # threads = ThreadMain()\r\n# # for t in threads:\r\n# # t.start()\r\n# # while not q.empty():\r\n# # print q.get()[0][1]\r\n#\r\n# # str = raw_input()\r\n# #\r\n# # str = str.split(\" \")\r\n# #\r\n# # print len(str[len(str) - 1])\r\n# # str = raw_input()\r\n# # st = raw_input()\r\n# #\r\n# # print str.count(st)\r\n# # while 1:\r\n# # for i in range(10):\r\n# # if i == 0:\r\n# # break\r\n# # print 123\r\n# # counts = raw_input()\r\n# # lists = []\r\n# # for i in range(int(counts)):\r\n# # str = raw_input()\r\n# # if str in lists:\r\n# # pass\r\n# # else:\r\n# # for x in range(len(lists))\r\n# # lists.append(str)\r\n# # print lists\r\n# #\r\n# #\r\n# # for i in range(len(lists)):\r\n# # print lists[i]\r\n# # delsign = []\r\n# # addnumber = []\r\n# # sp = [300, 5200]\r\n# # newline = [740, 1440, 2140, 2240, 3540, 4240]\r\n# # subscript = 3\r\n# # signs = [1, 0, 1, 3, 1, 1]\r\n# # breadth = 100\r\n# # for i in range(subscript):\r\n# # newline[i] = abs(newline[subscript] - (subscript - i) * breadth)\r\n# # for i in range(subscript + 1, len(newline)):\r\n# # # 第三版本竖线识别修改部分\r\n# # if float((newline[i] - newline[subscript])) / breadth < (i - subscript) + 0.6:\r\n# # # 该判断是为了防止新添竖线超过图像最大横坐标\r\n# # if newline[subscript] + (i - subscript) * breadth > sp[1]:\r\n# # # newline[i] = sp[1] - 5\r\n# # delsign.append(newline[i])\r\n# # else:\r\n# # newline[i] = newline[subscript] + (i - subscript) * breadth\r\n# # else:\r\n# # if float((newline[i] - newline[subscript])) / breadth < (i - subscript) + 1.0:\r\n# # adds = ((newline[i] - newline[subscript]) / breadth) + 1\r\n# # addnumber.append(i)\r\n# # newline[i] = newline[subscript] + adds * breadth\r\n# # if newline[i] > sp[1]:\r\n# # del newline[i]\r\n# # del signs[i]\r\n# # else:\r\n# # xx = float((newline[i] - newline[subscript])) / breadth - (i - subscript)\r\n# # yy = int(xx)\r\n# # zz = xx - yy\r\n# # if zz > 0.6:\r\n# # n = yy\r\n# # else:\r\n# # n = yy - 1\r\n# # for x in range(n):\r\n# # addnumber.append(i + x)\r\n# # if zz > 0.6:\r\n# # adds = ((newline[i] - newline[subscript]) / breadth) + yy + 1\r\n# # else:\r\n# # adds = ((newline[i] - newline[subscript]) / breadth) + yy\r\n# # newline[i] = newline[subscript] + adds * breadth\r\n# # print newline[i]\r\n# #\r\n# # if addnumber:\r\n# # for i in addnumber:\r\n# # if newline[subscript] + (i - subscript) * breadth < sp[1]:\r\n# # newline.insert(i, newline[subscript] + (i - subscript) * breadth)\r\n# #\r\n# # print newline\r\n#\r\n# # lists = [1, 2, 4, 5]\r\n# # insert = [2, 3]\r\n# # if insert:\r\n# # for x in range(len(insert)):\r\n# # insert[x] += x\r\n# # print insert\r\n# # for i in insert:\r\n# # lists.insert(i, i + 1)\r\n# # for x in range(len(insert)):\r\n# # insert[x] -= x\r\n# # for i in insert:\r\n# # del lists[i]\r\n# #\r\n# # print lists\r\n#\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport time\r\nimport threading\r\nimport Queue\r\n\r\ndef ImagePartition(original_image, number):\r\n img = cv2.imread(original_image)\r\n sp = img.shape\r\n # print sp[1]\r\n # print (2 * sp[0] / 3)\r\n # edges = cv2.Canny(img, 100, 150)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\r\n # edge = edges.copy()\r\n # lines = cv2.HoughLines(edges, 1, np.pi/180, 100)\r\n\r\n # 寻找霍夫直线\r\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, 10, 0)\r\n\r\n # 横线列表\r\n transverseline = []\r\n # 竖线列表\r\n verticalline = []\r\n\r\n for i in lines:\r\n for x1, y1, x2, y2 in i:\r\n # if y1 < 500:\r\n if abs(y1 - y2) < 10:\r\n if abs(x1 - x2) > 15:\r\n if (1 * sp[0] / 3) < y1 < (3 * sp[0] / 4):\r\n if 250 < x1:\r\n transverseline.append([x1, y1, x2, y2])\r\n if abs(x1 - x2) < 10:\r\n if abs(y1 - y2) > 15:\r\n if y1 < (2 * sp[0] / 3):\r\n verticalline.append([x1, y1, x2, y2])\r\n # cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\r\n\r\n for i in transverseline:\r\n cv2.line(img, (i[0], i[1]), (i[2], i[3]), (0, 255, 0), 2)\r\n\r\n plt.subplot(121), plt.imshow(img, cmap='gray')\r\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\r\n # plt.subplot(122), plt.imshow(edges, cmap='gray')\r\n # plt.title('Edge Image'), plt.xticks([]), plt.yticks([])\r\n # plt.subplot(122), plt.imshow(imgs, cmap='gray')\r\n # plt.title('Third Image'), plt.xticks([]), plt.yticks([])\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n ImagePartition(\"IMG_20170310_142125.jpg\", 1)","sub_path":"123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":12506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270355","text":"\"\"\"backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('core.urls')),\n\n #rest_framework_jwt_views\n path('api-token-auth/', obtain_jwt_token),\n path('api-token-refresh/', refresh_jwt_token),\n path('api-token-verity/', verify_jwt_token),\n\n #SIGNUP, LOGIN, LOGOUT endpoint\n path('rest-auth/', include('rest_auth.urls')), #LOGIN(username or email, password), LOGOUT(token will be deleted)\n path('rest-auth/registration/', include('rest_auth.registration.urls')), #SIGNUP (username, email, password1, password2)\n\n]\n","sub_path":"backend/backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528056435","text":"\"\"\"\nPlotting of 3D arrays in 2D plots\n\"\"\"\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport numpy.ma as ma\nimport seaborn as sns\nfrom plotting.base import plotting_base\n\n\ndef heatmap_plot(data, **kwargs):\n \"\"\"\n Heat map plot using seaborn heatmap\n\n Parameters\n ----------\n data : ndarray | pandas.DataFrame\n ndarray of heatmap values or\n pandas DataFrame of heat map values with tick labels as index\n and column labels\n kwargs : dict\n kwargs for seaborn.heatmap and plotting_base\n\n See Also\n --------\n seaborn.heatmap : plotting function\n\n plotting.base.plotting_base : plotting base\n \"\"\"\n def plot_func(axis, data, **kwargs):\n sns.heatmap(data, ax=axis, **kwargs)\n\n plotting_base(plot_func, data, **kwargs)\n\n\ndef add_colorbar(axis, cf, ticks, size, padding,\n location='right', label=None, lines=None, fontsize=14):\n \"\"\"\n Add a colorbar legend to given axis\n\n Parameters\n ----------\n axis : matplotlib.axis\n Axis objet to add colorbar to\n cf : matplotlib.cm.ScalarMappable\n Contour set or colormap mappable to use for colors\n ticks : list\n list of tick values\n size : tuple\n colorbar size\n padding : float\n how much to pad around the colorbar\n location : str, optional\n Location of colorbar, by default 'right'\n label : str, optional\n Label for colorbar, by default None\n lines : list, optional\n list of lines to add, by default None\n fontsize : int, optional\n fontsize for label\n tick size = fontsize -2\n by default 14\n \"\"\"\n divider = make_axes_locatable(axis)\n\n caxis = divider.append_axes(location, size=size,\n pad=padding)\n\n if location in ['top', 'bottom']:\n orientation = 'horizontal'\n else:\n orientation = 'vertical'\n\n cbar = plt.colorbar(cf, ticks=ticks, cax=caxis,\n orientation=orientation,\n ticklocation=location)\n\n cbar.ax.tick_params(labelsize=fontsize - 2)\n\n if label is not None:\n cbar.set_label(label, size=fontsize)\n\n if lines is not None:\n cbar.add_lines(lines)\n\n\ndef contour_plot(data, **kwargs):\n \"\"\"\n Create a contoured colormap from data shape = (n, 3)\n\n Parameters\n ----------\n data : ndarray\n n X 3 array of data to plot of form (x, y, c)\n figsize : tuple, optional\n Figure size, by default (8, 6)\n fontsize : int, optional\n Labels font size, by default 14\n zlim : float, optional\n z / c limit, by default None\n major_spacing : float, optional\n space between major contours, by default None\n minor_spacing : float, optional\n space between minor contours, by default None\n contour_width : int, optional\n contour line width, by default 1\n contour_color : str, optional\n contour line color, by default 'k'\n opacity : float, optional\n opacity of colormap, by default 1.\n colorbar : bool, optional\n Display color bar, by default True\n colorbar_location : str, optional\n Location of colorbar, by default 'right'\n colorbar_label : str, optional\n Colorbar label, by default None\n colorbar_lines : bool, optional\n Plot lines on colorbar, by default True\n colorbar_ticks : int, optional\n Number of colorbar ticks, by default None\n colormap : str, optional\n colormap style, by default 'jet'\n kwargs : dict\n kwargs for plotting_base\n\n See Also\n --------\n matplotlib.pyplot.contour : plotting function\n matplotlib.pyplot.countourf : plotting function\n\n plotting.base.plotting_base : plotting base\n \"\"\"\n def plot_func(axis, data, figsize=(8, 6), fontsize=14, zlim=None,\n major_spacing=None, minor_spacing=None, contour_width=1,\n contour_color='k', opacity=1., colorbar=True,\n colorbar_location='right', colorbar_label=None,\n colorbar_lines=True, colorbar_ticks=None, colormap='jet'):\n\n assert len(data) == 3, 'Data must be of shape (x, y, c)'\n\n x, y, z = data\n z_m = ma.masked_invalid(z)\n\n a_ratio = z.shape\n a_ratio = a_ratio[1] / a_ratio[0]\n\n if isinstance(figsize, (int, float)):\n figsize = [figsize * a_ratio, figsize]\n else:\n figsize = max(figsize)\n figsize = [figsize * a_ratio, figsize]\n\n if zlim is None:\n zmin, zmax = np.nanmin(z), np.nanmax(z)\n else:\n zmin, zmax = zlim\n\n if major_spacing is None:\n major_spacing = (zmax - zmin) / 10\n if minor_spacing is None:\n minor_spacing = major_spacing / 10\n\n cl_levels = np.arange(zmin, zmax + major_spacing, major_spacing)\n cf_levels = np.arange(zmin, zmax + minor_spacing, minor_spacing)\n\n if colorbar_ticks is None:\n l_levels = cl_levels[::2]\n else:\n l_levels = (zmax - zmin) / colorbar_ticks\n l_levels = np.arange(zmin, zmax + l_levels, l_levels)\n\n orientation = 'vertical'\n if colorbar_location in ['top', 'bottom']:\n orientation = 'horizontal'\n\n cf = plt.contourf(x, y, z_m, alpha=opacity, levels=cf_levels,\n extend='both', antialiased=True)\n\n if contour_color is not None:\n cl = plt.contour(cf, levels=cl_levels, colors=(contour_color,),\n linewidths=(contour_width,))\n\n if colormap is not None:\n cf.set_cmap(colormap)\n\n if colorbar:\n cbar_padding = 0.1\n if colorbar_location in ['top', 'bottom']:\n figsize[1] += figsize[1] / 10\n cbar_size = figsize[0] / 20\n else:\n figsize[0] += figsize[0] / 10\n cbar_size = figsize[1] / 20\n\n divider = make_axes_locatable(axis)\n\n caxis = divider.append_axes(colorbar_location, size=cbar_size,\n pad=cbar_padding)\n\n cbar = plt.colorbar(cf, ticks=l_levels, cax=caxis,\n orientation=orientation,\n ticklocation=colorbar_location)\n\n cbar.ax.tick_params(labelsize=fontsize - 2)\n\n if colorbar_label is not None:\n cbar.set_label(colorbar_label, size=fontsize)\n\n if colorbar_lines is not None:\n if contour_color is not None:\n cbar.add_lines(cl)\n\n plotting_base(plot_func, data, **kwargs)\n\n\ndef colorbar(zlim, ticks=None, lines=None, line_color='k', linewidth=1,\n colormap='jet', extend='neither', ticklocation='right',\n fontsize_other=18, label=None, fontsize_label=21, figsize=6,\n dpi=100, showfig=True, filename=None):\n\n \"\"\"\n Create colorbar\n\n Parameters\n ----------\n zlim : tuple\n List or tuple indicating zmin and zmax.\n tick : int\n Number of ticks to label.\n lines : int\n Number of lines to draw on colorbar.\n line_color : str\n Color of lines drawn on colorbar.\n linewidth : int\n Line width for each line drawn on colorbar.\n colormap : str\n Color scheme for colorbar.\n extend : str\n Direction to extend colors beyond zmin and zmax.\n ticklocation : str\n Orientation of colorbar and location of tick marks.\n fontsize_other : int\n Font size of tick numbers.\n label : str\n Label for colorbar\n fontsize_label : int\n Font size of label.\n figsize : tuple\n Width and height of figure\n dpi : int\n DPI resolution of figure.\n showfig : bool\n Whether to show figure.\n filename : str\n Name of file/path to save the figure to.\n \"\"\"\n\n a_ratio = 20\n\n if isinstance(figsize, (list, tuple)):\n figsize = max(figsize)\n\n if ticklocation in ['right', 'left']:\n figsize = (figsize / a_ratio, figsize)\n orientation = 'vertical'\n else:\n figsize = (figsize, figsize / a_ratio)\n orientation = 'horizontal'\n\n if ticks is not None:\n ticks = (zlim[1] - zlim[0]) / ticks\n ticks = np.arange(zlim[0], zlim[1] + ticks, ticks)\n\n fig = plt.figure(figsize=figsize, dpi=dpi)\n axis = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n\n norm = mpl.colors.Normalize(vmin=zlim[0], vmax=zlim[1])\n\n cb = mpl.colorbar.ColorbarBase(axis, cmap=colormap, norm=norm,\n orientation=orientation, extend=extend,\n ticks=ticks, ticklocation=ticklocation)\n cb.ax.tick_params(labelsize=fontsize_other)\n\n if label is not None:\n cb.set_label(label, size=fontsize_label)\n\n if lines is not None:\n lines = (zlim[1] - zlim[0]) / lines\n lines = np.arange(zlim[0], zlim[1] + lines, lines)\n cb.add_lines(lines, colors=(line_color,) * len(lines),\n linewidths=(linewidth,) * len(lines))\n\n if filename is not None:\n plt.savefig(filename, dpi=dpi, transparent=True,\n bbox_inches='tight')\n\n if showfig:\n plt.show()\n\n plt.close()\n","sub_path":"plotting/colormaps.py","file_name":"colormaps.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560583676","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv, orm\nimport openerp.addons.decimal_precision as dp\nfrom openerp.tools.translate import _\nfrom openerp import tools\nfrom datetime import datetime\n\nclass stock_inventory_entry(osv.osv_memory):\n _name = \"stock.inventory.entry\"\n _description = \"Create account entry base inventory\"\n _columns = {\n 'period_id' : fields.many2one('account.period', '会计期间', required=True),\n 'journal_id': fields.many2one('account.journal', '账簿', required=True ),\n 'account_id' : fields.many2one('account.account', '会计科目', required=True),\n }\n _defaults = {\n\n }\n\n def default_get(self, cr, uid, fields, context):\n \"\"\" To get default values for the object.\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param fields: List of fields for which we want default values\n @param context: A standard dictionary\n @return: A dictionary which of fields with values.\n \"\"\"\n res = super(stock_inventory_entry, self).default_get(cr, uid, fields, context=context)\n period_id = self.pool.get('account.period').find(cr, uid, datetime.now(), context=context)[0]\n res['period_id'] = period_id\n account_ids = self.pool.get('account.account').search(cr, uid, [('name','ilike','损溢')], context=context)\n res['account_id'] = account_ids and account_ids[0]\n\n try:\n model, journal_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock_account', 'stock_journal')\n res['journal_id'] = journal_id\n except (orm.except_orm, ValueError):\n pass\n return res\n\n def create_inventory_entry(self, cr, uid, ids, context=None):\n inventory_obj = self.pool.get('stock.inventory')\n wiz = self.browse(cr, uid, ids[0])\n period_id = wiz.period_id.id\n journal_id = wiz.journal_id.id\n account_id = wiz.account_id.id\n inventory_ids = context.get('active_ids', False)\n inventory_obj.create_inventory_entry(cr, uid, inventory_ids, account_id, period_id, journal_id, context = context)\n \n return {}\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"ems_mian_dan/stock_account_cn/wizard/stock_inventory_entry.py","file_name":"stock_inventory_entry.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314040598","text":"import requests\nimport json\nimport slackweb\nimport settings\nimport datetime\nfrom pytz import timezone\n\nAPI_KEY = settings.APIKEY\nWEBHOOKURL = settings.WEBHOOKURL\n\ndef WeatherSendToSlack():\n BASE_URL = \"http://api.openweathermap.org/data/2.5/forecast?\"\n payload = {\n 'q': 'tokyo',\n 'cnt': '21',\n 'units': 'metric',\n 'appid': API_KEY\n }\n r = requests.get(BASE_URL+API_KEY, params=payload)\n result = r.json()\n slack = slackweb.Slack(url=WEBHOOKURL)\n weatherResult = []\n for text in result['list']:\n dateTime = timezone('Asia/Tokyo').localize(datetime.datetime.fromtimestamp(text['dt']))\n dateTime = str(dateTime)[5:16]\n tempNow = str(text['main']['temp'])[0:4]\n weather = text['weather'][0]['description']\n rainfall= '0'\n if 'rain' in text and '3h' in text['rain']:\n rainfall = text['rain']['3h']\n message = dateTime + ' ' + tempNow + '℃' + ' ' + weather + ' ' + str(rainfall) + 'mm'\n weatherResult.append(message)\n weatherResult = '\\n'.join(weatherResult)\n\n attachments = []\n attachment = {\"title\": \"per 3hour forecaste\", \"color\": \"#2eb886\", \n \"pretext\": \"push the more button\", \"text\": weatherResult, \n \"mrkdwn_in\": [\"text\", \"pretext\"]}\n attachments.append(attachment)\n slack.notify(attachments=attachments)\n\ndef init():\n WeatherSendToSlack()\n\ninit()","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460875769","text":"#coding=utf8\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nfrom model.model_utils import Registrable\nfrom model.encoder.functions import scaled_exp, div_by_z, src_dot_dst\n\nclass ScoreFunction(nn.Module):\n\n def __init__(self, hidden_size, mlp=1, method='biaffine'):\n super(ScoreFunction, self).__init__()\n assert method in ['dot', 'bilinear', 'affine', 'biaffine']\n self.mlp = int(mlp)\n self.hidden_size = hidden_size // self.mlp\n if self.mlp > 1: # use mlp to perform dim reduction\n self.mlp_q = nn.Sequential(nn.Linear(hidden_size, self.hidden_size), nn.Tanh())\n self.mlp_s = nn.Sequential(nn.Linear(hidden_size, self.hidden_size), nn.Tanh())\n self.method = method\n if self.method == 'bilinear':\n self.W = nn.Linear(self.hidden_size, self.hidden_size)\n elif self.method == 'affine':\n self.affine = nn.Linear(self.hidden_size * 2, 1)\n elif self.method == 'biaffine':\n self.W = nn.Linear(self.hidden_size, self.hidden_size)\n self.affine = nn.Linear(self.hidden_size * 2, 1)\n\n def forward(self, context, node):\n \"\"\"\n @args:\n context(torch.FloatTensor): num_nodes x hidden_size\n node(torch.FloatTensor): num_nodes x hidden_size\n @return:\n scores(torch.FloatTensor): num_nodes\n \"\"\"\n if self.mlp > 1:\n context, node = self.mlp_q(context), self.mlp_s(node)\n if self.method == 'dot':\n scores = (context * node).sum(dim=-1)\n elif self.method == 'bilinear':\n scores = (context * self.W(node)).sum(dim=-1)\n elif self.method == 'affine':\n scores = self.affine(torch.cat([context, node], dim=-1)).squeeze(-1)\n elif self.method == 'biaffine':\n scores = (context * self.W(node)).sum(dim=-1)\n scores += self.affine(torch.cat([context, node], dim=-1)).squeeze(-1)\n else:\n raise ValueError('[Error]: Unrecognized score function method %s!' % (self.method))\n return scores\n\n@Registrable.register('without_pruning')\nclass GraphOutputLayer(nn.Module):\n\n def __init__(self, args):\n super(GraphOutputLayer, self).__init__()\n self.hidden_size = args.gnn_hidden_size\n\n def forward(self, inputs, batch):\n \"\"\" Re-scatter data format:\n inputs: sum(q_len + t_len + c_len) x hidden_size\n outputs: bsize x (max_q_len + max_t_len + max_c_len) x hidden_size\n \"\"\"\n outputs = inputs.new_zeros(len(batch), batch.mask.size(1), self.hidden_size)\n outputs = outputs.masked_scatter_(batch.mask.unsqueeze(-1), inputs)\n if self.training:\n return outputs, batch.mask, torch.tensor(0., dtype=torch.float).to(outputs.device)\n else:\n return outputs, batch.mask\n\n@Registrable.register('with_pruning')\nclass GraphOutputLayerWithPruning(nn.Module):\n\n def __init__(self, args):\n super(GraphOutputLayerWithPruning, self).__init__()\n self.hidden_size = args.gnn_hidden_size\n self.graph_pruning = GraphPruning(self.hidden_size, args.num_heads, args.dropout, args.score_function)\n\n def forward(self, inputs, batch):\n outputs = inputs.new_zeros(len(batch), batch.mask.size(1), self.hidden_size)\n outputs = outputs.masked_scatter_(batch.mask.unsqueeze(-1), inputs)\n\n if self.training:\n g = batch.graph\n question = inputs.masked_select(g.question_mask.unsqueeze(-1)).view(-1, self.hidden_size)\n schema = inputs.masked_select(g.schema_mask.unsqueeze(-1)).view(-1, self.hidden_size)\n loss = self.graph_pruning(question, schema, g.gp, g.node_label)\n return outputs, batch.mask, loss\n else:\n return outputs, batch.mask\n\nclass GraphPruning(nn.Module):\n\n def __init__(self, hidden_size, num_heads=8, feat_drop=0.2, score_function='affine'):\n super(GraphPruning, self).__init__()\n self.hidden_size = hidden_size\n self.node_mha = DGLMHA(hidden_size, hidden_size, num_heads, feat_drop)\n self.node_score_function = ScoreFunction(self.hidden_size, mlp=2, method=score_function)\n self.loss_function = nn.BCEWithLogitsLoss(reduction='sum')\n\n def forward(self, question, schema, graph, node_label):\n node_context = self.node_mha(question, schema, graph)\n node_score = self.node_score_function(node_context, schema)\n loss = self.loss_function(node_score, node_label)\n return loss\n\nclass DGLMHA(nn.Module):\n \"\"\" Multi-head attention implemented with DGL lib\n \"\"\"\n def __init__(self, hidden_size, output_size, num_heads=8, feat_drop=0.2):\n super(DGLMHA, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.num_heads = num_heads\n self.d_k = self.hidden_size // self.num_heads\n self.affine_q, self.affine_k, self.affine_v = nn.Linear(self.output_size, self.hidden_size),\\\n nn.Linear(self.hidden_size, self.hidden_size, bias=False), nn.Linear(self.hidden_size, self.hidden_size, bias=False)\n self.affine_o = nn.Linear(self.hidden_size, self.output_size)\n self.feat_dropout = nn.Dropout(p=feat_drop)\n\n def forward(self, context, node, g):\n q, k, v = self.affine_q(self.feat_dropout(node)), self.affine_k(self.feat_dropout(context)), self.affine_v(self.feat_dropout(context))\n with g.local_scope():\n g.nodes['schema'].data['q'] = q.view(-1, self.num_heads, self.d_k)\n g.nodes['question'].data['k'] = k.view(-1, self.num_heads, self.d_k)\n g.nodes['question'].data['v'] = v.view(-1, self.num_heads, self.d_k)\n out_x = self.propagate_attention(g)\n return self.affine_o(out_x.view(-1, self.hidden_size))\n\n def propagate_attention(self, g):\n # Compute attention score\n g.apply_edges(src_dot_dst('k', 'q', 'score'))\n g.apply_edges(scaled_exp('score', math.sqrt(self.d_k)))\n # Update node state\n g.update_all(fn.src_mul_edge('v', 'score', 'v'), fn.sum('v', 'wv'))\n g.update_all(fn.copy_edge('score', 'score'), fn.sum('score', 'z'), div_by_z('wv', 'z', 'o'))\n out_x = g.nodes['schema'].data['o']\n return out_x","sub_path":"PSP/model/encoder/graph_output.py","file_name":"graph_output.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616834697","text":"def bordered_lu(A, tilde_ell, tilde_u):\n \"\"\"Implementation of a bordered LU decomposition.\"\"\"\n \n from scipy.linalg import solve_triangular\n \n n = tilde_ell.shape[0]\n \n w = solve_triangular(tilde_ell, A[:-1,-1], lower=True)\n ell = solve_triangular(tilde_u, A[-1,:-1], trans='T')\n ut = A[-1, -1] - np.dot(w, ell)\n \n L = np.block([\n [tilde_ell, np.zeros((n, 1))],\n [ell.reshape((1, n)), 1]\n ])\n \n U = np.block([\n [tilde_u, w.reshape((n, 1))],\n [np.zeros((1, n)), ut]\n ])\n \n return L, U\n\ndef recursive_lu(A):\n \"\"\"Implementation of a recursive LU decomposition.\"\"\"\n \n if A.shape == (1, 1):\n # Scalar case\n return np.atleast_2d(1), A\n \n tilde_ell, tilde_u = recursive_lu(\n np.atleast_2d(A[:-1, :-1])\n )\n return bordered_lu(A, tilde_ell, tilde_u)\n\nn = 20\nrand = np.random.RandomState(0)\n\nA = rand.randn(n, n)\nb = np.ones(n, dtype='float64')\n\n# Example solution with Scipy\nP, L, U = lu(A)\nx_compare = solve_triangular(\n U, solve_triangular(L, P.T @ b, lower=True))\n# Print the residual\nprint(f\"Residual with Scipy LU: {np.linalg.norm(b - A @ x_compare)}\")\n\nL_recurse, U_recurse = recursive_lu(A)\nx_recurse = solve_triangular(\n U_recurse, solve_triangular(L_recurse, b, lower=True))\nprint(f\"Residual with recursive LU: {np.linalg.norm(b - A @ x_recurse)}\")\n","sub_path":"mysol.py","file_name":"mysol.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45858747","text":"import argparse\nimport os\nimport sys\n\nparent_dir = os.path.abspath(\".\")\nsys.path.append(parent_dir)\n\nimport pandas as pd\n\nfrom lib import mysql_utils\n\nINTERSECTIONS = [5083, 5082, 6081, 5091, 5072, 6082, 6083]\nINTERSECTION_OR_BOOLEAN = \" OR \".join([\"IntersectionID = {}\".format(intersection) for intersection in INTERSECTIONS])\nMIN_RELEVANT_DATE_SUBQUERY = \\\n \"SELECT MAX(t.EndDate) FROM (\\\n SELECT MIN(EndDate) AS EndDate FROM phase_time_2017\\\n WHERE {}\\\n GROUP BY IntersectionID\\\n ) AS t\"\\\n .format(INTERSECTION_OR_BOOLEAN)\n#PHASE_TIMINGS_QUERY = \\\n# \"SELECT IntersectionID, EndDate, EndTime, PhaseActualGreenTime FROM phase_time_2017 \\\n# WHERE ({})\\\n# AND (EndDate > ({})\\\n# OR EndTime > (\\\n# SELECT MAX(t2.EndTime) FROM (SELECT Min(EndTime) AS EndTime FROM phase_time_2017\\\n# WHERE {}\\\n# AND EndDate IN ({})\\\n# GROUP BY IntersectionID) AS t2\\\n# )\\\n# );\"\\\n# .format(INTERSECTION_OR_BOOLEAN, MIN_RELEVANT_DATE_SUBQUERY, INTERSECTION_OR_BOOLEAN, MIN_RELEVANT_DATE_SUBQUERY)\nDETECTOR_INVENTORY_QUERY = \\\n \"SELECT IF(SensorID < 10, CONCAT(IntersectionID, 0, SensorID), CONCAT(IntersectionID, SensorID)) AS Sensor,\\\n IntersectionID, SensorID, Direction, Movement FROM detector_inventory\\\n WHERE {}\"\\\n .format(INTERSECTION_OR_BOOLEAN)\n\n\n\ndef generate_graph_connections(detector_inventory, edges, phases, phase_plans, plan_name):\n edges.loc[:, \"Distance\"] = 0\n\n for i in range(edges.shape[0]):\n sensor_from = edges.iloc[i, 0]\n sensor_to = edges.iloc[i, 1]\n intersection_from, direction_from = detector_inventory.loc[str(sensor_from), [\"IntersectionID\", \"Direction\"]]\n intersection_to, direction_to = detector_inventory.loc[str(sensor_to), [\"IntersectionID\", \"Direction\"]]\n\n if intersection_from == intersection_to:\n edges.iloc[i, 2] = 1\n continue\n\n edge_phases = phases[(phases[\"From\"] == direction_from) & (phases[\"To\"] == direction_to)][\"Phase\"].values\n edge_plans = phase_plans[phase_plans[\"Intersection\"] == intersection_from]\n edge_plans = edge_plans if not plan_name else edge_plans[edge_plans[\"PlanName\"] == plan_name]\n total_hours = 0.0\n edge_green_time_fraction = 0.0\n\n for j in range(edge_plans.shape[0]):\n plan = edge_plans.iloc[j]\n plan_cycle = plan[\"Cycle\"]\n green_times = plan[\"PhasePlannedGreenTime\"].split(\";\")\n yr_times = plan[\"PhasePlannedYRTime\"].split(\";\")\n phase_weight = plan[\"EndTime\"] - plan[\"StartTime\"]\n total_hours += phase_weight\n\n for k in edge_phases:\n phase_green_time = int(green_times[k-1])\n phase_yr_time = int(yr_times[k-1])\n\n edge_green_time_fraction += phase_weight * (phase_green_time + phase_yr_time) / plan_cycle\n\n edge_green_time_fraction = edge_green_time_fraction / total_hours if total_hours > 0 else 0\n edges.loc[edges.index[i], \"Distance\"] = edge_green_time_fraction\n\n return edges\n\ndef add_self_edges(detector_list, graph_connections):\n columns = graph_connections.columns\n for detector in detector_list:\n graph_connections = graph_connections.append(pd.Series(dict(zip(columns, [detector, detector, 1]))),\n ignore_index=True)\n\n return graph_connections\n\n\n\ndef main(args):\n plan_name = args.plan_name\n detector_list = args.detector_list\n distances_path = args.distances_path\n\n #phase_timings = mysql_utils.execute_query(PHASE_TIMINGS_QUERY)\n detector_inventory = mysql_utils.execute_query(DETECTOR_INVENTORY_QUERY)\n\n # if phase_timings == None or detector_inventory == None:\n if detector_inventory == None:\n sys.exit()\n\n # phase_timings = pd.DataFrame(phase_timings, columns=[\"IntersectionID\", \"EndDate\", \"EndTime\", \"PhaseTimings\"])\n detector_inventory = pd.DataFrame(detector_inventory,\n columns=[\"Sensor\", \"IntersectionID\", \"SensorID\", \"Direction\", \"Movement\"])\n detector_inventory.set_index(\"Sensor\", inplace=True)\n edges = pd.read_csv(\"data/inputs/model/edges.csv\", header=None)\n phases = pd.read_csv(\"data/inputs/model/phases.csv\")\n phase_plans = pd.read_csv(\"data/inputs/model/phase_plans.csv\")\n\n # detector_list = detector_inventory.index.values\n # print(detector_list)\n # if args.detector_list:\n # with open(\"data/inputs/model/sensors_advanced.txt\", \"w\") as f:\n # f.write(\",\".join(detector_list))\n\n relevant_edges = edges[edges[0].isin(detector_list) & edges[1].isin(detector_list)].copy()\n\n graph_connections = generate_graph_connections(detector_inventory, relevant_edges, phases, phase_plans, plan_name)\n graph_connections = add_self_edges(detector_list, graph_connections)\n\n if args.distances_path:\n graph_connections.to_csv(distances_path, header=[\"from\", \"to\", \"cost\"], index=False)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--plan_name\", help=\"name of plan: E, P1, P2, or P3\")\n parser.add_argument(\"--detector_list\", \"--dl\", nargs=\"+\", help=\"list of sensors to generate connections for\")\n parser.add_argument(\"--distances_path\", help=\"output file for distances, if one is generated\")\n args = parser.parse_args()\n\n main(args)\n","sub_path":"scripts/generate_graph_connections.py","file_name":"generate_graph_connections.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437407218","text":"from sklearn.datasets import make_moons, make_circles, make_classification\r\nfrom sklearn.cluster import KMeans\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport numpy as np\r\nimport matplotlib.animation as animation\r\n#调整图片风格\r\nmpl.style.use('fivethirtyeight')\r\n#定义xy网格,用于绘制等值线图\r\n\r\nX = np.random.random([1000, 2])\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\r\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\r\nxx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),\r\n np.arange(y_min, y_max, 0.1))\r\n#预测可能性\r\nclt = KMeans(n_clusters=3, random_state=6)\r\nclt.fit(X)\r\nZ = clt.predict(np.c_[xx.ravel(), yy.ravel()])\r\nZ = Z.reshape(xx.shape)\r\nyp = clt.predict(X)\r\nax.contourf(xx, yy, Z, alpha=.8)\r\n#绘制散点图\r\nax.scatter(X[:, 0], X[:, 1], c=yp, edgecolors='k')\r\n\r\ndef update(num, img, X, xx, yy):\r\n #X = np.random.random([1000, 2])\r\n clt = KMeans(n_clusters=3, random_state=num*10)\r\n clt.fit(X)\r\n Z = clt.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = Z.reshape(xx.shape)\r\n yp = clt.predict(X)\r\n img.cla()\r\n img.text(0.0, 0.6, \"p=%f\"%num)\r\n img.contourf(xx, yy, Z, alpha=.8)\r\n #绘制散点图\r\n img.scatter(X[:, 0], X[:, 1], c=yp, edgecolors='k')\r\n\r\nline_ani = animation.FuncAnimation(fig, update, 200000, fargs=(ax, X, xx, yy),\r\n interval=50, blit=False)\r\nplt.axis(\"equal\")\r\nplt.show()","sub_path":"1.27_courses/cluster/Kmeans-random.py","file_name":"Kmeans-random.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564284293","text":"n= int(input())\r\nsum1 = 0\r\nwhile True:\r\n r= n%10#7\r\n n = n//10#0\r\n sum1 = sum1+r**2#49\r\n if(n == 0):\r\n n = sum1#n = 49 now\r\n sum1 = 0\r\n if(1<=n<+9):\r\n break;\r\nif n==1:\r\n print(\"happy\")\r\nelse:\r\n print(\"not a happy number\")\r\n \r\n \r\n \r\n","sub_path":"happy_num.py","file_name":"happy_num.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508637656","text":"'''\n437. Path Sum III\n\nYou are given a binary tree in which each node contains an integer value.\n\nFind the number of paths that sum to a given value.\n\nThe path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).\n\nThe tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.\n\nExample:\n\nroot = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8\n\n 10\n / \\\n 5 -3\n / \\ \\\n 3 2 11\n / \\ \\\n3 -2 1\n\nReturn 3. The paths that sum to 8 are:\n\n1. 5 -> 3\n2. 5 -> 2 -> 1\n3. -3 -> 11\n'''\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass BurceForce_Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: int\n \"\"\"\n def __dfs(root, target): # use root as start node to traverse\n if not root:\n return\n print(root.val, target)\n if root.val == target:\n self.cnt[0] += 1\n print(self.cnt)\n __dfs(root.left, target - root.val)\n __dfs(root.right, target - root.val)\n\n self.cnt = [0]\n if root: # use each node as new start\n # print(__dfs(root.right, sum), __dfs(root, sum))\n __dfs(root, sum)\n self.pathSum(root.left, sum)\n self.pathSum(root.right, sum)\n return self.cnt[0]\n\nimport collections\nclass Two_sum_Solution(object):\n def pathSum(self, root, sum):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: int\n \"\"\"\n self.cnt = 0\n def __dfs(root, target, so_far, memo):\n if not root:\n return\n print(memo)\n remaining = so_far + root.val - target\n print(\"node:\", root.val, \"sum:\", target, \"so far:\", so_far, \"what's left:\", remaining)\n if remaining in memo:\n self.cnt += memo[remaining]\n print(memo[remaining])\n memo[so_far + root.val] += 1\n print(root.val)\n print(memo)\n # each node only traverse once, use memo to record remaining\n __dfs(root.right, target, so_far + root.val, memo)\n __dfs(root.left, target, so_far + root.val, memo)\n memo[so_far + root.val] -= 1 # cannot reuse paths (unique)\n memo = collections.defaultdict(int)\n memo[0] = 1\n __dfs(root, sum, 0, memo)\n return self.cnt\n\n\nif __name__ == \"__main__\":\n root = TreeNode(8)\n root.left = TreeNode(5)\n root.right = TreeNode(-3)\n root.right.right = TreeNode(11)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(8)\n root.left.right.right = TreeNode(1)\n root.left.left.left = TreeNode(3)\n root.left.left.right = TreeNode(-2)\n\n #root = TreeNode(1)\n #root.right = TreeNode(2)\n #root.right.right = TreeNode(3)\n #root.right.right.right = TreeNode(4)\n #root.right.right.right.right = TreeNode(5)\n\n #root = TreeNode(1)\n #root.left = TreeNode(-2)\n #root.right = TreeNode(-3)\n #root.right.left = TreeNode(-2)\n #root.left.left = TreeNode(1)\n #root.left.right = TreeNode(3)\n #root.left.left.left = TreeNode(-1)\n\n res = BurceForce_Solution().pathSum(root, 8)\n print(res)\n","sub_path":"437_pathSum3.py","file_name":"437_pathSum3.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407325219","text":"assignments = []\n\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\n\nrows = 'ABCDEFGHI'\ncols = '123456789'\n\n\ndef cross(a, b):\n \"\"\"Cross product of elements in a and elements in b.\"\"\"\n return [s + t for s in a for t in b]\n\n\nboxes = cross(rows, cols)\nunitlist = []\nunits = dict()\npeers = dict()\n\n\ndef init(diagonal=True):\n \"\"\"\n Init internal variables to solve sudoku with specified parameters like if it should be diagonal or not\n :param diagonal: True if solving sudoku should have diagonal constraints\n :return: None\n \"\"\"\n\n row_units = [cross(r, cols) for r in rows]\n column_units = [cross(rows, c) for c in cols]\n square_units = [cross(rs, cs) for rs in ('ABC', 'DEF', 'GHI') for cs in ('123', '456', '789')]\n\n global unitlist\n if diagonal:\n # To solve diagonal sudoku we add 2 units for two diagonals into units collection\n diag_units = [[a + b for (a, b) in zip(list(rows), list(cols))]] + [\n [a + b for (a, b) in zip(list(rows), list(cols[::-1]))]]\n unitlist = row_units + column_units + square_units + diag_units\n else:\n unitlist = row_units + column_units + square_units\n\n global units\n units = dict((s, [u for u in unitlist if s in u]) for s in boxes)\n\n global peers\n peers = dict((s, set(sum(units[s], [])) - {s}) for s in boxes)\n\n\ndef eliminate_from_boxes_collection(values, boxes_collection, digit):\n \"\"\"Removes digit from all boxes in boxes_collection\"\"\"\n for peer in boxes_collection:\n assign_value(values, peer, values[peer].replace(digit, ''))\n\n\ndef find_twins(values):\n \"\"\"Find all twins is sudoku, don't take into account unit/peers structure\n\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n dict with a key is twins_value and value is set of boxes that have this twins_value\"\"\"\n # dict with boxes as a key and set of boxes as a value\n twins = {}\n for v, box in [(values[box], box) for box in values.keys() if len(values[box]) == 2]:\n if v in twins:\n twins[v].add(box)\n else:\n twins[v] = {box}\n # Remove boxes which has 2-length value, but no pairs\n return {v: boxs for (v, boxs) in twins.items() if len(boxs) > 1}\n\n\ndef eliminate_twins(values, twins, unit):\n \"\"\"Eliminate twins from the unit\n\n :param values: whole sudoku state\n :param twins: dict with all twins, key is twins values, value is twins boxes set\n :param unit: unit to search and eliminate twin values from\n :return: new values state\n \"\"\"\n for i in range(0, len(unit)):\n box1 = unit[i]\n box1_value = values[box1]\n if box1_value in twins:\n for j in range(i + 1, len(unit)):\n box2 = unit[j]\n if box2 in twins[box1_value]:\n # even if a value of this box is been modified already by other loop iteration - it's still fine\n # we could continue to safely consider them as twins\n for digit in box1_value:\n other_unit_boxes = [box for box in unit if box not in {box1, box2}]\n eliminate_from_boxes_collection(values, other_unit_boxes, digit)\n return values\n\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n\n # Find all instances of naked twins\n twins = find_twins(values)\n\n # Eliminate the naked twins as possibilities for their peers\n for unit in unitlist:\n values = eliminate_twins(values, twins, unit)\n\n return values\n\n\ndef grid_values(grid):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n chars = []\n digits = '123456789'\n for c in grid:\n if c in digits:\n chars.append(c)\n if c == '.':\n chars.append(digits)\n assert len(chars) == 81\n return dict(zip(boxes, chars))\n\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-' * (width * 3)] * 3)\n for r in rows:\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\n for c in cols))\n if r in 'CF':\n print(line)\n print()\n\n\ndef eliminate(values):\n \"\"\"\n Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all\n its peers.\n Input: A sudoku in dictionary form.\n Output: The resulting sudoku in dictionary form.\n \"\"\"\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n eliminate_from_boxes_collection(values, peers[box], digit)\n return values\n\n\ndef only_choice(values):\n \"\"\"\n Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value\n to this box.\n Input: A sudoku in dictionary form.\n Output: The resulting sudoku in dictionary form.\n \"\"\"\n for unit in unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in values[box]]\n if len(dplaces) == 1:\n assign_value(values, dplaces[0], digit)\n return values\n\n\ndef reduce_puzzle(values):\n \"\"\"\n Iterate eliminate(), only_choice() and naked_twins() if required.\n If at some point, there is a box with no available values, return False.\n If the sudoku is solved, return the sudoku.\n If after an iteration of both functions, the sudoku remains the same, return the sudoku.\n :param values: A sudoku in a dictionary form.\n :return: The resulting sudoku in dictionary form.\n \"\"\"\n stalled = False\n while not stalled:\n assert isinstance(values, dict)\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n values = eliminate(values)\n values = only_choice(values)\n assert isinstance(values, dict)\n\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n stalled = solved_values_before == solved_values_after\n # use an expensive naked twins only if stalled with basic methods\n if stalled:\n values = naked_twins(values)\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n stalled = solved_values_before == solved_values_after\n\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\n\ndef search(values):\n \"\"\"Using depth-first search and propagation, try all possible values.\"\"\"\n assert isinstance(values, dict)\n\n values = reduce_puzzle(values)\n if values is False:\n return False # Failed earlier\n assert isinstance(values, dict)\n\n if all(len(values[s]) == 1 for s in boxes):\n return values # Solved!\n # Chose one of the unfilled square s with the fewest possibilities\n n, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n\n\ndef solve(grid, diagonal=True):\n \"\"\"\n Find the solution to a Sudoku grid.\n :param grid: string serialized representation of a sudoku grid\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n :param diagonal: if you want to solve diagonal soduku or general one\n :return: The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n init(diagonal)\n return search(grid_values(grid))\n\n\nif __name__ == '__main__':\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(solve(diag_sudoku_grid))\n\n try:\n from visualize import visualize_assignments\n\n visualize_assignments(assignments)\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168545268","text":"stats=open(\"StatsPlayers.txt\",\"r+\")\n\n\n\n\ndef player_stats():\n stats.seek(0)\n \n List=stats.readline()\n\n List=List.split(\",\")\n\n List=List[:-1]\n \n \n\n for i in range(0,len(List)):\n List[i]=int(List[i])\n\n\n Ind1=int(len(List)/3)\n \n Ind2=0\n\n Ind3=3\n\n Res1=[] \n\n while Ind1>0:\n Counter=3\n Res1.append([])\n \n while Counter>0:\n\n Res1[-1].append(List[Ind2])\n\n Ind2+=1\n\n Counter-=1\n\n Ind1-=1\n \n\n return Mat1(Res1)\n\ndef Mat1(List):\n\n Ind=3\n\n Ind2=0\n\n Mat=[]\n \n while Ind:\n \n Mat.append([])\n\n Counter=7\n \n while Counter>0:\n Counter-=1\n \n Mat[-1].append(List[Ind2])\n \n Ind2+=1\n Ind-=1\n\n return Mat\n\n\n\ndef player_adding(Team,Player,Ind):\n Mat=player_stats()\n\n Mat[Team][Player][Ind]+=1\n\n rewrite(Mat)\n\n return player_stats()\n\ndef player_rewrite(List):\n\n rewritable=\"\"\n\n for fil in List:\n for col in fil:\n for ele in col:\n\n rewritable+=str(ele)\n\n rewritable+=\",\"\n\n stats.seek(0)\n\n stats.write(rewritable)\n\n stats.seek(0)\n \n \n\n return stats.readline()\n\n\ndef player_reset():\n\n List=player_stats()\n\n rewritable=\"\"\n\n for fil in List:\n for col in fil:\n for ele in col:\n for i in range(0,len(str(ele))):\n\n rewritable+=str(0)\n\n rewritable+=\",\"\n\n\n stats.seek(0)\n\n stats.write(rewritable)\n\n stats.seek(0)\n \n \n\n return player_stats()\n\n\n\n\n \n\n \n\n\n\n \n\n\n","sub_path":"ProyectoFinal/PlayerStats.py","file_name":"PlayerStats.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319739216","text":"'''\n@Description: \n@Author: 妄想\n@Date: 2020-06-25 13:00:50\n@LastEditTime: 2020-06-25 16:01:15\n@LastEditors: 妄想\n'''\nfrom util.config import Config\nfrom util.models import MySpider\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\nclass MysqlPipeline(object):\n \n def __init__(self):\n engine = create_engine(Config.SQLALCHEMY_DATABASE_URI)\n DBSession = sessionmaker(bind=engine)\n self.session = DBSession()\n \n def process_item(self, item, spider):\n model = MySpider(\n url=item['url'],\n size=item['size']\n )\n self.session.add(model)\n self.session.commit()\n print('New item {} added to DB.'.format(item['url']))\n return item\n \n def close_spider(self, spider):\n try:\n self.session.commit()\n except:\n self.session.rollback()\n raise\n finally:\n self.session.close()\n","sub_path":"codes/distributed-crawler-leo6033/myspider-salve/myspider/mysql_pipeline.py","file_name":"mysql_pipeline.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616330057","text":"class Student(object):\n def __init__(self):\n self.function_exercises = set()\n self.function_percent = 0\n\n self.condition_exercises = set()\n self.condition_percent = 0\n\n self.iteration_exercises = set()\n self.iteration_percent = 0\n\n self.iteration_intro_exercises = set()\n self.intro_iteration_percent = 0\n\n self.variables_intro_exercises = set()\n self.intro_variables_percent = 0\n\n self.objects_intro_exercises = set()\n self.intro_objects_percent = 0\n\n self.functions_intro_exercises = set()\n self.intro_functions_percent = 0\n\n self.name = \"\"\n self.date_submitted = None\n self.replit_points = 0\n self.replit_profile = \"\"\n self.codecademy_percent = 0\n self.codecademy_profile = \"\"\n\n def __repr__(self):\n return (\n f'{self.name}\\n'\n f'Codecademy: {self.codecademy_percent}%\\n'\n f'Repl.it Intro to Variables: {self.intro_variables_percent:.1f}\\n'\n f'Repl.it Intro to Arrays: {self.intro_iteration_percent:.1f}\\n'\n f'Repl.it Intro to Objects: {self.intro_objects_percent:.1f}\\n'\n f'Repl.it Intro to Functions: {self.intro_functions_percent:.1f}\\n'\n f'Repl.it Conditions: {self.condition_percent:.1f}\\n'\n f'Repl.it Iteration: {self.iteration_percent:.1f}\\n'\n f'Repl.it Functions: {self.function_percent:.1f}\\n'\n )","sub_path":"Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272168086","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nfrom resources import DEFAULT_DECK_NA, DEFAULT_BET, DEFAULT_CARDS, DEFAULT_SCORE, DEFAULT_BUDGET, NUM_PLAYERS, NUM_DECKS, \\\n DEFAULT_DECK_LEN, BET_MIN\nfrom random import sample\nfrom typing import NewType\nfrom typing import List, Tuple\nfrom copy import deepcopy, copy\nfrom utility import add_cards, add_cards_dealer\nimport colours as col\nimport time\n\n\n# UWAGA poniższy kod korzysta z pewnych założeń, których spełnienie jest konieczne do poprawnego działania programu;\n# 1. Gracze mają różne imiona.\n# 2. Nie można użyć split wiecej niż raz na 'rundę' (rozdzielonych kart nie można ponownie rozdzielić)\n# 3. Od razu mam rozwiązanie dla wielu graczy,w game tworzona jest lista obiektów player\n# 4. Zakładam, że defaultowe wartości gry wynoszą odpowiednio:\n# score = 0, player.cards = [[]], bet = 10, dealer.cards = [[]], imie = \"player{numer gracza}\"\n# budget = 200, liczba graczy = 1, liczba talii = 1\n#\n# 5. Jest problem z draw - potrzeba instancji klasy game żeby istniała talia na której draw wykonuje operacje\n\ndef create_player_names():\n names = []\n for i in range(NUM_PLAYERS):\n names.append(f\"player {i + 1}\")\n return names\n\n\n# TWORZY TALIĘ DO GRY\n\ndef create_deck():\n return sample(NUM_DECKS * DEFAULT_DECK_NA, NUM_DECKS * DEFAULT_DECK_LEN)\n\n\ndef score_without_aces(cards):\n aces_with_indexes = {}\n override_cards = []\n score = 0\n for index, card in enumerate(cards):\n condition = card[1] == 0\n if condition:\n aces_with_indexes[index] = card ## WRÓCIĆ WAŻNE!!!!\n else:\n _, points, _ = card\n score += points\n override_cards.append(card)\n return score, aces_with_indexes, override_cards\n\n\ndef print_stat(player):\n stats = \"\\n\"\n stats += col.MAGENTA + f\"{player.name}\" + col.WHITE + \"'s turn \" + col.MAGENTA + \"(˵ ͡° ͜ʖ ͡°˵)\\n\" \\\n + col.WHITE + f\"\\n\\tscore: {player.score}\\n\\tbet: {player.bet}\\n\\tbudget: {player.budget}\\n\"\n print(stats)\n\n\nDECK = deepcopy(create_deck())\n\n\n# FUNKCJA DO TWORZENIA LISTY GRACZY\n\n\ndef create_players(scores, players_cards, bets, players_names, budgets):\n players = []\n for i in range(NUM_PLAYERS):\n players.append(Player(cards=deepcopy(players_cards[i]), score=scores[i], bet=bets[i],\n budget=budgets[i], name=players_names[i]))\n return players\n\n\nCard = NewType(\"Card\", Tuple[str, int, str])\nCards = NewType(\"Cards\", List[Card])\n\n\n# GŁÓWNA KLASA KTÓRA ZAWIERA WSZYSTKIE DANE O GRZE\n# INFORMACJE PRZECHOWYWANE SĄ W POSTACI LIST (ROZWIĄZAUJE TO PROBLEM GRY DLA WIELU GRACZY)\n\n\nclass Game:\n\n def __init__(self, scores: List[int] = None, players_cards: List[Cards] = None,\n players_bets: List[float] = None, dealer_cards: List[Cards] = None,\n players_names: List[str] = None, budgets=None):\n self.__scores = scores if scores is not None else copy(NUM_PLAYERS * DEFAULT_SCORE)\n self.__bets = players_bets if players_bets is not None else copy(NUM_PLAYERS * DEFAULT_BET)\n self.__players_cards = players_cards if players_cards is not None else copy(NUM_PLAYERS * DEFAULT_CARDS)\n self.__players_names = players_names if players_names is not None else create_player_names()\n self.__budgets = budgets if budgets is not None else copy(NUM_PLAYERS * DEFAULT_BUDGET)\n\n self.pl_stood = []\n self.pl_broken = []\n self.pl_busted = []\n self.pl_split = []\n\n self.shared_budgets = {}\n self.dealer = Dealer(dealer_cards) if dealer_cards is not None else Dealer(DEFAULT_CARDS[0], 0)\n self.player_list = create_players(scores=self.__scores, budgets=self.__budgets,\n players_cards=self.__players_cards, bets=self.__bets,\n players_names=self.__players_names)\n\n def __str__(self) -> str:\n report = \"\"\n for index, player in enumerate(self.player_list):\n report += f\"\\nPlayer {index + 1}. data:\\ncards: {player.cards}\\n\" \\\n f\"score: {player.score}\\nbet: {player.bet}\\nbudget: {player.budget}\\nname: {player.name}\\n\"\n\n return f\"Created {len(self.player_list)} player(s):\" + report\n\n def run_game(self):\n raise NotImplementedError\n\n def print_table(self):\n beg = \"________________________________________________________\\n\" \\\n \"|\\n\" \\\n \"| Dealer's cards:\\n\"\n\n cards = \"\" + add_cards_dealer(self.dealer.cards) + \"| \\n| \"\n for player in self.player_list:\n cards += f\"{player.name}'s cards:\\n\"\n cards += add_cards(player.cards) + \"| \"\n cards += \"\\n|_______________________________________\\n\" if len(self.pl_stood) != 0 else \"\\n\"\n for player_st in self.pl_stood:\n cards += f\"| {player_st.name}'s cards:\\n\"\n cards += add_cards(player_st.cards) + \"| \\n\"\n\n end = \"|_______________________________________________________\\n\"\n\n print(beg + cards + end)\n\n def print_table_final_round(self):\n beg = \"________________________________________________________\\n\" \\\n \"|\\n\" \\\n \"| Dealer's cards:\\n\"\n\n cards = \"\" + add_cards(self.dealer.cards) + \"| \\n| \"\n for player in self.player_list:\n cards += f\"{player.name}'s cards:\\n\"\n cards += add_cards(player.cards) + \"| \"\n cards += \"\\n|_______________________________________\\n\" if len(self.pl_stood) != 0 else \"\\n\"\n for player_st in self.pl_stood:\n cards += f\"| {player_st.name}'s cards:\\n\"\n cards += add_cards(player_st.cards) + \"| \\n\"\n\n end = \"|_______________________________________________________\\n\"\n\n print(beg + cards + end)\n\n # PONIŻSZE METODY SĄ DO DOPRACOWANIA / OBGADANIA Z KIMŚ KTO OGARNIA PĘTLĘ GRY\n\n # Poziom rundy\n\n def run_game_loop(self):\n return len(self.pl_broken) != NUM_PLAYERS\n\n def run_round_loop(self):\n return len(self.pl_busted + self.pl_stood) - len(self.pl_split) != NUM_PLAYERS\n\n def final_round(self):\n print(\"All players either lost or chose to stand!\\n\")\n time.sleep(3)\n self.calculate_and_verify_scores()\n self.print_table_final_round()\n self.dealer.draw_until_17_or_higher()\n self.print_table_final_round()\n self.calculate_round_outcome()\n\n def next_round(self):\n #self.print_table()\n self.calculate_and_verify_scores()\n self.player_action_loop()\n\n def first_round(self) -> None:\n for player in self.player_list:\n player.draw_hand()\n self.dealer.draw_hand()\n self.subtract_bets_from_budgets()\n self.print_table()\n self.calculate_and_verify_scores()\n self.player_action_loop()\n self.split_if_flagged()\n\n def player_action_loop(self):\n for player in self.player_list:\n print(player)\n print_stat(player)\n self.round_menu(player)\n\n def calculate_round_outcome(self):\n for player in self.pl_stood:\n difference = abs(self.dealer.score - 21) - abs(player.score - 21)\n if difference > 0:\n player.win()\n print(f\"{player.name} won this round.\\n\")\n elif difference == 0:\n player.loss()\n print(f\"{player.name} lost this round.\\n\")\n else:\n player.budget += player.bet\n print(f\"{player.name} had the same score as dealer.\\n\")\n\n def calculate_and_verify_scores(self):\n for index, player in enumerate(self.player_list):\n player.add_points()\n if player.score > 21:\n print(col.RED + f\"{player.name} has busted! (score > 21)\" + col.WHITE)\n self.pl_busted.append(self.player_list.pop(index))\n\n # Poziom gry\n\n def check_if_can_aff_new_round(self):\n override_pllist = []\n for player in self.player_list:\n if player.budget >= BET_MIN:\n player.budget -= player.bet\n override_pllist.append(player)\n else:\n player.budget = 0\n print(f\"{player.name} can't afford a new bet and is out of game!\")\n self.pl_broken.append(player)\n self.player_list = override_pllist\n\n def round_menu(self, player):\n can_use = [0]\n if player.can_hit():\n col_code = col.GREEN\n can_use.append(1)\n else:\n col_code = col.RED\n print(col_code + \"\\t\\t1) hit\" + col.WHITE)\n if player.can_double_down():\n col_code = col.GREEN\n can_use.append(2)\n else:\n col_code = col.RED\n print(col_code + \"\\t\\t2) double down\" + col.WHITE)\n if player.can_split():\n col_code = col.GREEN\n can_use.append(3)\n else:\n col_code = col.RED\n print(col_code + \"\\t\\t3) split\" + col.WHITE)\n if can_insure(dealer=self.dealer):\n col_code = col.GREEN\n can_use.append(4)\n else:\n col_code = col.RED\n print(col_code + \"\\t\\t4) insure\" + col.WHITE)\n print(col.GREEN + \"\\t\\t0) stand\\n\" + col.WHITE)\n run = True\n override_pllist = []\n while run:\n c = input(\"I choose: \")\n if c == \"1\" and 1 in can_use:\n player.hit()\n run = False\n override_pllist.append(player)\n elif c == \"2\" and 2 in can_use: # TRZEBA BEDZIE TO POPRAWIC\n player.double_down()\n run = False\n override_pllist.append(player)\n elif c == \"3\" and 3 in can_use:\n player.split()\n run = False\n override_pllist.append(player)\n elif c == \"4\" and 4 in can_use:\n player.insure()\n run = False\n override_pllist.append(player)\n elif c == \"0\":\n player.stand()\n run = False\n self.pl_stood.append(player)\n else:\n print(\"Invalid input, please try again.\")\n self.player_list = override_pllist\n\n def subtract_bets_from_budgets(self): # OBLICZA BUDŻET PO ODJĘCIU ZAKŁĄDU (PRZY WEJŚCIU DO NOWEJ RUNDY)\n for player in self.player_list:\n player.budget -= player.bet\n\n # def check_budgets(self): # SPRAWDZA CZY GRACZA STAĆ NA WEJŚCIE DO NOWEJ RUNDY\n # for player in self.player_list:\n # if player.budget <= player.bet: # POWINNO BYĆ <= min_bet (zakład ma jakąś minimalną wartość)\n # Player.can_enter_new_round = False\n # print(f\"{player.name} is broken!\")\n\n # TEJ FUNCKJI UŻYWA METODA DO REALIZACJI SPLIT'U, PRZYJMUJE ONA GRACZA I JEGO INDEX NA LIŚCIE GRACZY A NASTĘPNIE\n # ROZDZIELA GO NA \"2 RĘCE\" CZYLI DZIELI BUDŻET, KARTY I ZMIENIA IMIONA.\n\n def create_hands(self, player, player_index):\n self.shared_budgets[f\"{player.name} shared budget\"] = SharedBudget(player.budget, player_index)\n first_hand = Player(cards=[deepcopy(player.cards[0])], score=deepcopy(player.score), bet=deepcopy(player.bet),\n budget=0, name=f\"{player.name}'s first hand\")\n second_hand = Player(cards=[deepcopy(player.cards[1])], score=deepcopy(player.score), bet=deepcopy(player.bet),\n budget=0, name=f\"{player.name}'s second hand\")\n first_hand.budget = self.shared_budgets[f\"{player.name} shared budget\"]\n second_hand.budget = self.shared_budgets[f\"{player.name} shared budget\"]\n return [first_hand, second_hand]\n\n # TUTAJ ZASTOSOWAŁEM TAKIE SŁABE ROZWIĄZANIE, JEŚLI GRACZ CHCE SPLITOWAĆ TO USTAWIA U NIEGO WARTOŚĆ POLA\n #\n\n def split_if_flagged(self):\n for player_index, player in enumerate(self.player_list):\n if player.do_split:\n hands = self.create_hands(player, player_index)\n self.pl_split.append(self.player_list.pop(player_index))\n self.player_list.insert(player_index, hands[0])\n self.player_list.insert(player_index + 1, hands[1])\n\n else:\n pass\n\n\nclass SharedBudget: # KLASA UŻYWANA DO TWORZENIA WSPÓŁDZIELONYCH BUDŻETÓW (DLA SPLITU)\n def __init__(self, budget, index):\n self.budget = budget\n self.index = index\n\n # PONIŻSZE METODY IMPLEMENTUJĄ OPERATORY +, -, *, -=, +=, <, >, <=, >=, ==\n\n def __add__(self, other):\n return self.budget + other\n\n def __lt__(self, other):\n return self.budget < other\n\n def __gt__(self, other):\n return self.budget > other\n\n def __le__(self, other):\n return self.budget <= other\n\n def __ge__(self, other):\n return self.budget >= other\n\n def __sub__(self, other):\n return self.budget - other\n\n def __isub__(self, other):\n return self.budget - other\n\n def __iadd__(self, other):\n return self.budget + other\n\n def __mul__(self, other):\n return self.budget * other\n\n def __eq__(self, other):\n return self.budget == other\n\n def __str__(self):\n return f\"Shared Budget of player {self.index + 1} budget: {self.budget}\"\n\n def change_budget(self, x): # NIE WIEM CZY TO POTRZEBNE LEPIEJ ODWOŁYWAĆ SIĘ BEZPOŚREDNIO DO POLA ?\n self.budget = x\n\n\nclass Entity(object): # KLASA MACIERZYSTA DLA KLAS PLAYER I DEALER\n\n def __init__(self, cards=None, score=0):\n self.cards = cards if cards is not None else []\n self.score = score\n\n def draw(self):\n self.cards.append(DECK.pop(0))\n\n def draw_hand(self):\n self.draw()\n self.draw()\n\n\ndef can_insure(dealer):\n if dealer.cards[0][0] == \"Ace\":\n return True\n else:\n return False\n\n\nclass Player(Entity):\n\n def __init__(self, cards: list, score: int, bet: float, name: str, budget):\n super().__init__(cards=cards, score=score)\n self.bet = bet\n self.name = name\n self.budget = budget\n self.insurance = 0\n self.do_split: bool = False #\n self.had_hit: bool = False #\n self.had_split: bool = False # TE ZMIENNE PRZECHOWUJĄ INFORMACJE O RUCHACH GRACZA TZN\n self.had_stood: bool = False # JAKICH METOD UŻYŁ POTRZEBNE DO SPRAWDZANIA CZY NP GRACZ MOŻE\n self.had_doubled: bool = False # SPLITOWAĆ (MOŻLIWE TYLKO W \"1\" TURZE)\n self.can_enter_new_round = True # jeśli false gracz przegrywa\n\n def __str__(self):\n report = \"Player __str__ called\"\n report += f\"\\nPlayer data:\\ncards: {self.cards}\\n\" \\\n f\"score: {self.score}\\nbet: {self.bet}\\nbudget: {self.budget}\\nname: {self.name}\\n\"\n\n return report\n\n # USTAWIA WARTOŚĆ ZAKŁADU / PATRZĄC Z PERSPEKTYWY CZASU TA FUNKCJA JEST DO WYWALENIA DO POLA BĘDZIE SIĘ\n # / ODWOŁYWAĆ BEZPOŚREDNIO PRZEZ PLAYER.BET = NOWA_WARTOŚĆ\n\n def add_points(self):\n self.score, aces_to_assign_value, self.cards = score_without_aces(self.cards)\n if len(aces_to_assign_value) != 0:\n print(f\"{self.name} have {len(aces_to_assign_value)} aces.\\n\")\n print(\"\\t1) 1 point\\n\")\n print(\"\\t2) 11 points\\n\")\n run = True\n value = 11\n cards_to_insert = {}\n for loop, index_ace in enumerate(aces_to_assign_value.items()):\n index, ace = index_ace\n _, _, colour = ace\n while run:\n print(\"Your current score is \" + col.GREEN + f\"{self.score}\\n\" + col.WHITE)\n print(f\"Choose the value of the {loop + 1} ace\\n\")\n choice = input(\"I choose: \")\n if choice == \"1\":\n value = 1\n run = False\n if choice == \"2\":\n value = 11\n run = False\n else:\n print(\"Invalid input number! Please try again.\")\n self.score += value\n cards_to_insert[index] = (\"Ace\", value, colour)\n print(f\"Your final score is {self.score}\")\n for index, card in cards_to_insert.items():\n self.cards.insert(index, card)\n\n def set_bet(self, new_bet=None):\n if new_bet is None:\n new_bet = input(col.GREEN + \"Input new bet value: \\n\")\n\n if self.budget >= new_bet >= BET_MIN:\n self.bet = new_bet\n else:\n print(col.RED + \"You cannot set your bet to that value\\n\")\n\n def win(self):\n self.budget += 2 * self.bet\n self.cards = []\n self.insurance = 0\n self.do_split: bool = False\n self.had_hit: bool = False\n self.had_split: bool = False\n self.had_stood: bool = False\n self.had_doubled: bool = False\n self.can_enter_new_round = True\n\n def loss(self):\n if self.insurance:\n self.budget += 2 * self.insurance\n self.cards = []\n self.insurance = 0\n self.do_split: bool = False\n self.had_hit: bool = False\n self.had_split: bool = False\n self.had_stood: bool = False\n self.had_doubled: bool = False\n self.can_enter_new_round = True\n\n def r_draw(self):\n self.budget += self.bet\n self.cards = []\n self.insurance = 0\n self.do_split: bool = False\n self.had_hit: bool = False\n self.had_split: bool = False\n self.had_stood: bool = False\n self.had_doubled: bool = False\n self.can_enter_new_round = True\n\n # METODY SPRAWDZAJĄCE:\n\n def can_hit(self):\n if not self.had_doubled and not self.had_stood:\n return True\n else:\n return False\n\n # SPRAWDZA CZY GRACZ MOŻE UŻYĆ SPLIT\n def can_split(self):\n if len(self.cards) >= 2:\n _, card1, _ = self.cards[0]\n _, card2, _ = self.cards[1]\n\n if card1 == card2 and not self.had_hit and not self.had_split and not self.had_stood:\n return True\n else:\n return False\n\n # SPRAWDZA CZY GRACZ MOŻE UŻYĆ DOUBLE DOWN\n def can_double_down(self):\n if not self.had_hit and not self.had_stood and not self.had_doubled:\n return True\n else:\n return False\n\n # SPRAWDZA CZY GRACZ MOŻE UŻYĆ INSURANCE\n\n # ZWYKŁA FUNKCJA\n\n # METODY WŁAŚCIWE\n\n def hit(self):\n if self.can_hit():\n self.draw()\n self.had_hit = True\n else:\n print(col.RED + 'After doubling down you cannot draw any more cards')\n\n def stand(self):\n self.had_stood = True\n\n def double_down(self):\n if self.can_double_down():\n self.set_bet(2 * self.bet)\n self.hit()\n self.had_doubled = True\n else:\n print('You cannot double down')\n\n def split(self): # USTAWIA FLAGĘ\n if self.can_split():\n self.do_split = True\n else:\n print('You cannot split')\n\n def insure(self):\n self.insurance = 0.5 * self.bet\n self.budget -= self.insurance\n\n\nclass Dealer(Entity):\n\n def __init__(self, cards: List[Cards] = None, score=None):\n super().__init__(cards=cards, score=score)\n\n def add_points(self):\n self.score, aces_with_indexes, self.cards = score_without_aces(self.cards)\n for index, card in aces_with_indexes.items():\n _, point, colour = card\n if self.score <= 10:\n point = 11\n else:\n point = 1\n new_card = \"Ace\", point, colour\n self.score += point\n self.cards.insert(index, new_card)\n\n def draw_until_17_or_higher(self):\n self.add_points()\n print(f\"Dealer's score is {self.score}\\n\")\n time.sleep(5)\n if self.score < 17:\n print(\"dealer draws!\\n\")\n time.sleep(3)\n self.draw()\n self.add_points()\n self.draw_until_17_or_higher()\n else:\n pass\n print(f\"Dealer's final score is {self.score}\\n\")\n","sub_path":"GameLogic/classes_console.py","file_name":"classes_console.py","file_ext":"py","file_size_in_byte":20436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602262714","text":"import logging\nimport pytest\n\nimport app\nfrom Api.apiFactory import ApiFactory\n\n@pytest.mark.run(order=0)\nclass TestUser:\n def test_get_token(self):\n \"\"\"获取token\"\"\"\n res = ApiFactory.get_user_api().get_token_api()\n # 打印请求地址 打印请求参数 打印请求响应数据\n logging.info(\"请求地址:{}\".format(res.url))\n logging.info(\"响应数据:{}\".format(res.json()))\n # 打印token\n print(\"获取的token:{}\".format(res.json()))\n # 断言状态码\n assert res.status_code == 200\n # 断言token\n assert \"token\" in res.json()\n # 保存token\n app.headers[\"token\"] = res.json().get(\"token\")\n\n def test_verify_token(self):\n \"\"\"验证token\"\"\"\n res = ApiFactory.get_user_api().token_verify_api()\n # 打印请求地址 打印请求参数 打印请求响应数据\n logging.info(\"请求地址:{}\".format(res.url))\n logging.info(\"响应数据:{}\".format(res.json()))\n # 断言状态码\n assert res.status_code == 200\n # 断言isvalid\n assert res.json().get(\"isValid\") is True\n\n def test_get_adress_info(self):\n \"\"\"获取地址信息\"\"\"\n res = ApiFactory.get_user_api().get_adress_info_api()\n # 打印请求地址 打印请求参数 打印请求响应数据\n logging.info(\"请求地址:{}\".format(res.url))\n logging.info(\"响应数据:{}\".format(res.json()))\n # 断言状态码\n assert res.status_code == 200\n # 断言name、mobile、province、country、detail\n assert res.json().get(\"name\") == \"大王\"\n assert res.json().get(\"mobile\") == \"13888888888\"\n assert res.json().get(\"province\") == \"上海市\"\n assert res.json().get(\"country\") == \"浦东新区\"\n assert res.json().get(\"detail\") == \"111号\"\n","sub_path":"Scripts/testuser.py","file_name":"testuser.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"163916263","text":"import unittest\nimport imp\nimport os\n\n\njump = imp.load_source('jump', 'jump')\nprocess_paths = jump.process_paths\nparse_options = jump.parse_options\nos.remove('jumpc')\n\n\nclass MockOpts(object):\n pass\n\n\nclass Jump_TC(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_named_paths_file(self):\n '''\n named_paths_file(): can exercise\n '''\n npf = jump.named_paths_file()\n self.assertIsInstance(npf, str)\n\n def test_abspath(self):\n '''\n abspath(): can exercise\n '''\n tests = [\n '.',\n '../../blort',\n '~/foo/bar',\n ]\n for p in tests:\n ap = jump.abspath(p)\n self.assertIsInstance(ap, str)\n\n def test_exit_info(self):\n '''\n exit_info()\n '''\n opts = MockOpts()\n opts.cd = False\n # Success code.\n code = 0\n msg = 'hi'\n ei = jump.exit_info(opts, code, msg)\n exp = dict(code = code, stderr = None, stdout = msg)\n self.assertEqual(ei, exp)\n # Fail code, in non-cd mode.\n code = 1\n ei = jump.exit_info(opts, code, msg)\n exp = dict(code = code, stderr = msg, stdout = None)\n self.assertEqual(ei, exp)\n # Fail code, in cd mode.\n code = 1\n opts.cd = True\n ei = jump.exit_info(opts, code, msg)\n exp = dict(code = code, stderr = msg, stdout = '.')\n self.assertEqual(ei, exp)\n\n def test_json_functions(self):\n '''\n JSON functions\n '''\n # Setup some named path info.\n f = 'tmp/jump_test.json'\n d1 = dict(\n foo = '/foo/bar/blah',\n blort = '/tmp/blort',\n )\n # Remove temp file if it exists.\n OP = os.path\n if OP.isfile(f):\n os.remove(f)\n self.assertFalse(OP.isfile(f))\n # Save the named paths.\n jump.save_named_paths(f, d1)\n self.assertTrue(OP.isfile(f))\n # Load them and make sure they equal the original.\n d2 = jump.load_named_paths(f)\n self.assertEqual(d1, d2)\n # Clean up.\n os.remove(f)\n self.assertFalse(OP.isfile(f))\n\n def test_parse_options(self):\n '''\n parse_options()\n '''\n # Invalid options.\n args = '--hello --add x y'.split()\n opts, code, msg = parse_options(args)\n self.assertTrue(code)\n self.assertIn('Unrecognized arguments', msg)\n\n # Help.\n args = '--help --add a b'.split()\n opts, code, msg = parse_options(args)\n self.assertFalse(code)\n self.assertEqual(msg, jump.usage())\n\n # Version.\n args = '--version --add q r'.split()\n opts, code, msg = parse_options(args)\n self.assertFalse(code)\n self.assertIn(jump.__version__, msg)\n\n # Conflicting options.\n tests = [\n '--add a b -rm x',\n '--mv a b foo',\n ]\n for t in tests:\n args = t.split()\n opts, code, msg = parse_options(args)\n self.assertTrue(code)\n self.assertIn('Conflicting arguments', msg)\n\n # Regular usages.\n tests = [\n '--add a b',\n '--rm a',\n '--mv a b',\n 'foo',\n '',\n ]\n for t in tests:\n args = t.split()\n opts, code, msg = parse_options(args)\n self.assertFalse(code)\n self.assertFalse(msg)\n\n def test_process_paths_add(self):\n '''\n process_paths(add)\n '''\n # Basic add.\n args = '--add a /foo/bar'.split()\n _, k, v = args\n opts = parse_options(args)[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertTrue(save)\n self.assertFalse(code)\n self.assertFalse(msg)\n self.assertEqual(paths2[k], v)\n self.assertNotEqual(paths1, paths2)\n\n # With --literal.\n args = '--add a ../foo/bar --literal'.split()\n _, k, v, _ = args\n opts = parse_options(args)[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertTrue(save)\n self.assertFalse(code)\n self.assertFalse(msg)\n self.assertEqual(paths2[k], v)\n self.assertNotEqual(paths1, paths2)\n\n def test_process_paths_rm(self):\n '''\n process_paths(rm)\n '''\n # Successful.\n args = '--rm a'.split()\n _, k = args\n opts = parse_options(args)[0]\n paths1 = dict(a = 'hi')\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertTrue(save)\n self.assertFalse(code)\n self.assertFalse(msg)\n self.assertEqual(paths2, {})\n self.assertNotEqual(paths1, paths2)\n\n # Bad key.\n args = '--rm a'.split()\n _, k = args\n opts = parse_options(args)[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertTrue(code)\n self.assertTrue(msg)\n self.assertIn('Name not found', msg)\n self.assertEqual(paths2, {})\n\n def test_process_paths_mv(self):\n '''\n process_paths(mv)\n '''\n # Successful.\n args = '--mv a b'.split()\n _, k1, k2 = args\n opts = parse_options(args)[0]\n v = 'hi'\n paths1 = dict(a = v)\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertTrue(save)\n self.assertFalse(code)\n self.assertFalse(msg)\n self.assertEqual(paths2[k2], v)\n self.assertEqual(paths1[k1], paths2[k2])\n self.assertNotIn(k1, paths2)\n self.assertNotEqual(paths1, paths2)\n\n # Bad key.\n args = '--mv a b'.split()\n _, k1, k2 = args\n opts = parse_options(args)[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertTrue(code)\n self.assertTrue(msg)\n self.assertIn('Name not found', msg)\n self.assertEqual(paths1, paths2)\n\n def test_process_paths_name(self):\n '''\n process_paths(name)\n '''\n # Successful.\n k, v = 'a b'.split()\n opts = parse_options([k])[0]\n paths1 = {k : v}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertFalse(code)\n self.assertEqual(msg, v)\n\n # Bad key.\n k = 'a'\n opts = parse_options([k])[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertTrue(code)\n self.assertTrue(msg)\n self.assertIn('Name not found', msg)\n\n def test_process_paths_all_names(self):\n '''\n process_paths(all_names)\n '''\n # Successful.\n opts = parse_options([])[0]\n paths1 = dict(a = 'hi', b = 'bye')\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertFalse(code)\n for k, v in paths2.items():\n self.assertIn(k, msg)\n self.assertIn(v, msg)\n\n # No names.\n opts = parse_options([])[0]\n paths1 = {}\n paths2, save, code, msg = process_paths(opts, paths1)\n self.assertFalse(save)\n self.assertFalse(code)\n self.assertIn('No names', msg)\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339817193","text":"\"\"\"Тест для базового класса сущностей.\"\"\"\nfrom poptimizer.data.domain import entity\n\n\nclass Entity(entity.BaseEntity):\n \"\"\"Тестовая реализация сущности доменной области.\"\"\"\n\n def __init__(self, attr_value):\n \"\"\"Хранит один атрибут, создающийся при инициализации.\"\"\"\n super().__init__()\n self.test_attr = attr_value\n\n\ndef test_entity():\n \"\"\"Проверка отслеживания изменения состояния.\n\n - чистый после создания\n - грязный после изменения атрибута\n - чистый после очистки состояния\n \"\"\"\n test_odj = Entity(4)\n\n assert not test_odj.is_dirty()\n\n test_odj.test_attr = 5\n\n assert test_odj.is_dirty()\n\n test_odj.clear()\n\n assert not test_odj.is_dirty()\n","sub_path":"poptimizer/data/domain/tests/test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"82823421","text":"# 建立神经网络,改网络参数就在这改\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nEPS = 0.003 # 输出层初始化的值\r\n\r\ndef fanin_init(size, fanin=None):\r\n # 高级初始化,size指上一层神经元的数量\r\n fanin = fanin or size[0]\r\n v = 1. / np.sqrt(fanin)\r\n return torch.Tensor(size).uniform_(-v, v)\r\n\r\n\r\nclass NET(torch.nn.Module): # 定义神经网络\r\n def __init__(self, state_dim, action_dim):\r\n # 输出每个动作的q(s, a)\r\n super(NET, self).__init__()\r\n self.fc1 = nn.Linear(state_dim, 32) # 在这改网络结构\r\n self.fc1.weight.data = fanin_init(self.fc1.weight.data.size())\r\n self.fc2 = nn.Linear(32, 32)\r\n self.fc2.weight.data = fanin_init(self.fc2.weight.data.size())\r\n self.fc3 = nn.Linear(32, action_dim)\r\n self.fc3.weight.data.uniform_(-EPS, EPS)\r\n\r\n def forward(self, state): # [m, state_dim] -> [m, action_dim](q值表)\r\n x = F.relu(self.fc1(state))\r\n x = F.relu(self.fc2(x))\r\n action_values = self.fc3(x) # 输出层不激活\r\n return action_values # 返回所有a的q\r\n\r\n\r\n\r\n","sub_path":"Python_dc-dc/old_v/alg/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"236767880","text":"import serial\nimport time\nimport threading\n\nexit_event = threading.Event()\n\ndef serSend():\n ser = serial.Serial('COM11',115200, timeout=0.1)\n while True:\n print('send:This is data for test')\n ser.write('This is data for test'.encode())\n time.sleep(1)\n if exit_event.is_set():\n break\n print('send thread terminated...')\n\n\nif __name__ == '__main__':\n t = threading.Thread(target=serSend)\n t.start()\n\n ser = serial.Serial('COM12',115200, timeout=0.1)\n try:\n while True:\n msg = ser.read(100)\n print(msg)\n time.sleep(0.1)\n except KeyboardInterrupt:\n print('KeyboardInterrupt...')\n exit_event.set()\n t.join()\n print('recv thread terminated...')\n \n\n","sub_path":"code4testing/sendGPS.py","file_name":"sendGPS.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"72134082","text":"import matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom neural_hw3 import dlnet\n\n# Data input\n\ndata = pd.read_csv('wdbc.data',header=None)\ndata.iloc[:,1].replace('B', 0,inplace=True)\ndata.iloc[:,1].replace('M', 1,inplace=True)\ndata = data.astype(float)\ndata.head(3)\nscaled_data=data\nnames = data.columns[1:13]\nscaler = MinMaxScaler()\nscaled_data = scaler.fit_transform(data.iloc[:,1:13])\nscaled_data = pd.DataFrame(scaled_data, columns=names)\n\n# Scale data\nx=scaled_data.iloc[:,2:13].values.transpose()\ny=data.iloc[:,1].values.transpose()\ny = np.array([y])\n\n# Create lists for output storage\ny_actual = []\npredictions = []\naccuracy = []\ntotal_accuracy = []\nloss_list = []\nincorrects = []\nincorrect_counter = 0\n\n# leave-one-out cross validation\nfor i in range(scaled_data.shape[0]):\n \n # Remove one sample from training set\n x_train = np.delete(x, i, axis=1)\n y_train = np.delete(y, i, axis=1)\n \n # Create neural net class\n nn = dlnet(x_train, y_train)\n final_loss = nn.gd(x_train, y_train)\n \n # Make k=1 testing set\n x_test = np.array([np.array(x[:,i])]).transpose()\n y_test = np.array([np.array(y[:,i])]).transpose()\n \n # Prediction, return prediction and accuracy\n pred_prob,pred_comp = nn.pred(x_test, y_test)\n \n # Add to lists\n predictions.append(pred_prob[0][0])\n y_actual.append(y_test[0][0])\n accuracy.append(pred_comp[0][0])\n \n # Check for incorrect predictions\n if pred_comp[0][0] != y_test[0][0]:\n incorrect_counter = incorrect_counter + 1\n incorrects.append(i)\n total_accuracy.append(100*((i+1)-incorrect_counter)/(i+1))\n loss_list.append(final_loss[0][0])\n# print(\"accuracy: \", accuracy)\n# print(\"predictions: \",predictions)\n# print(\"y actual: \",y_actual)\n \n# Plot and output\n plt.plot(total_accuracy, 'o', color='black')\n axes = plt.gca()\n axes.set_ylim([0,100])\n plt.title(\"Neural net accuracy through leave-1-out cycle # \"+str(i))\n plt.ylabel('% Accuracy')\n plt.xlabel('leave-one-out cycles')\n plt.show()\n print(\"Cycle number: \", i)\n print(\"average loss: \", sum(loss_list)/len(loss_list))\n print(\"Total accuracy: \",total_accuracy[i])\n print(\"Incorrect predictions, sample #: \", incorrects)\n","sub_path":"pdf/ML_files/loo_wdbc_kearney.py","file_name":"loo_wdbc_kearney.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290034556","text":"from socket import *\nimport json\nfrom kazoo import client as kz_client\nimport logging\nimport os,time\nlogging.basicConfig()\nmy_client = kz_client.KazooClient(hosts='127.0.0.1:2181')\n\ndef my_listener(state):\n\tif state == kz_client.KazooState.CONNECTED:\n\t\tprint(\"Client connected !\")\n\nmy_client.add_listener(my_listener)\nmy_client.start()\nprint((\"*\"*30)+\"SERVER\"+(\"*\")*30)\nchildren=my_client.get_children(\"/server/\")\nchildren.sort()\n#print(children)\nmsdata=my_client.get(\"/server/\"+children[0])\n#print(msdata[0])\nmslist=msdata[0].split(\"=>\")\nserverName = mslist[0]\nserverPort = int(mslist[1])\nprint(serverName,serverPort)\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName,serverPort))\nserver_id=\"JR$AI\"\nserver_port=\"14000\"\nserver_name=\"0.0.0.0\"\nserver_port1=\"$14000\"\nserver_name1=\"$0.0.0.0\"\nserver_id1=\"AI\"\nsbind1=server_id+\"=>\"+server_name+server_name1+\"=>\"+server_port+server_port1\nclientSocket.send(sbind1)\nsbind=server_id+\"=>\"+server_name+\"=>\"+server_port\nsignal = clientSocket.recv(1024)\n\n#print(signal)\nclientSocket.close()\nsentence=[]\nskey=\"\"\nsvalue=\"\"\nf=0\n\nsdic={}\nsdic1={}\n\n\n\n@my_client.ChildrenWatch(\"/server\")\ndef wlis(children):\n\tglobal server_port\n\tmsflag=0\n\t#print(children)\n\tfor i in children:\n\t\tif(\"ms\" in i):\n\t\t\tmsflag=1\n\tif(msflag==0): \n\t\telection=my_client.Election(\"/server/\",\"server\")\n\t\tprint(election.lock.contenders()[0])\n\t\tsc=election.lock.contenders()[0].split(\"=>\")\n\t\tif(sc[-1]==server_port):\n\t\t\tprint(\"smallest\")\n\t\t\tos.system(\"gnome-terminal -x python nms.py\")\n\t\t\ttime.sleep(2)\n\t\telse:\n\t\t\ttime.sleep(5)\n\t\tos.system(\"gnome-terminal -x python ns2.py\")\n\t\tos._exit(2)\n\tmsflag=0\n\n\nno_ch=my_client.get_children(\"/server/\")\nfor i in range(0,len(no_ch)):\n\tif(\"server3\" in no_ch[i]):\n\t\tdata=my_client.get(\"/server/\"+no_ch[i])\n\t\tprint(data[0])\n\t\treplist=data[0].split(\"=>\")\n\t\tserverName = replist[1]\n\t\tserverPort = int(replist[2])\n\t\tprint(serverName,serverPort)\n\t\tclientSocket = socket(AF_INET, SOCK_STREAM)\n\t\ttry:\n\t\t\tclientSocket.connect((serverName,serverPort))\n\t\t\tsentence=server_name+\"=>\"+server_port+\"=>\"+\"ret\"\n\t\t\tclientSocket.send(sentence)\n\t\t\trstatus=clientSocket.recv(30000)\n\t\t\tprint(\"GOT DATA FROM REPLICA\")\n\t\t\t#print(rstatus)\n\t\t\tdata_load=json.loads(rstatus)\n\t\t\t#print(data_load)\n\t\t\tclientSocket.close()\n\t\texcept:\n\t\t\tpass\n\t\tsdic.update(data_load)\n\t\tprint(\"sdic\",sdic)\n\t\tprint(\"RECEIVED DATA:\",sdic)\n\nrf=0\nif(signal==\"signal\"):\n\tprint(\"SERVER TO ZOOKEEPER INITIALISATION \")\n\tmy_client.create(\"/server/server2\",sbind,ephemeral=True,sequence=True)\n\tserverSocket = socket(AF_INET,SOCK_STREAM)\n\tserverSocket.setsockopt(SOL_SOCKET,SO_REUSEADDR, 1)\n\tserverSocket.bind((server_name,int(server_port)))\n\tserverSocket.listen(1)\n\tprint(\"The server is ready to receive\")\n\twhile 1:\n\t\tconnectionSocket, addr = serverSocket.accept()\n\t\t#print(\"cs\",connectionSocket)\n\t\tsentence = connectionSocket.recv(1024)\n\t\tsentence1=sentence.split(\"=>\")\n\t\tskey=sentence1[0]\n\t\tsvalue=sentence1[1]\n\t\ttry:\n\t\t\tf=sentence1[2]\n\t\texcept:\n\t\t\tf=\"\"\n\t\t#print(sentence)\n\t\t#print(f)\n\t\tif(f==\"ret\"):\n\t\t\tprint(\"SENDING REPLICA CONTENT\")\n\t\t\tdata_dic=json.dumps(sdic1)\n\t\t\tconnectionSocket.send(data_dic)\n\t\t\tconnectionSocket.close()\n\t\t\tcontinue\n\t\t\t\n\t\tk=ord(skey[0])\n\t\tif(k>=ord(server_id[0]) and k<=ord(server_id[1])):\n\t\t\tif(svalue==\"\"):\n\t\t\t\tconnectionSocket.send(sdic[skey])\n\t\t\telse:\n\t\t\t\tsdic[skey]=json.loads(svalue)\n\t\t\t\tconnectionSocket.send(\"Received\")\n\t\t\t\tno_ch=my_client.get_children(\"/server/\")\n\t\t\t\tfor i in range(0,len(no_ch)):\n\t\t\t\t\tif(\"server3\" in no_ch[i]):\n\t\t\t\t\t\tdata=my_client.get(\"/server/\"+no_ch[i])\n\t\t\t\t\t\t#print(data[0])\n\t\t\t\t\t\treplist=data[0].split(\"=>\")\n\t\t\t\t\t\tserverName = replist[1]\n\t\t\t\t\t\tserverPort = int(replist[2])\n\t\t\t\t\t\tprint(\"REPLICA SERVER ADDRESS\")\n\t\t\t\t\t\tprint(serverName,serverPort)\n\t\t\t\t\t\tclientSocket = socket(AF_INET, SOCK_STREAM)\n\t\t\t\t\t\tclientSocket.connect((serverName,serverPort))\n\t\t\t\t\t\tsentence=skey+\"=>\"+svalue+\"=>\"+\"s\"\n\t\t\t\t\t\tprint(\"-\"*40)\n\t\t\t\t\t\tprint(\"SENDING SEVER DATA TO REPLICA\")\n\t\t\t\t\t\tprint(\"-\"*40)\n\t\t\t\t\t\tclientSocket.send(sentence)\n\t\t\t\t\t\tclientSocket.close()\n\t\t\tprint(\"SERVER DATA\")\n\t\t\tprint(sdic)\n\t\t\tprint(\"-\"*40)\n\t\telif(k>=ord(server_id1[0]) and k<=ord(server_id1[1])):\n\t\t\tchildren=my_client.get_children(\"/server/\")\n\t\t\tchildren.sort()\n\t\t\tchildren=children[1:]\n\t\t\t#print(children)\n\t\t\tif(f!=\"s\"):\n\t\t\t\tprint(\"if\")\n\t\t\t\tfor i in children:\n\t\t\t\t\tif(\"ms\" not in i):\n\t\t\t\t\t\tchild_data=my_client.get(\"/server/\"+i)\n\t\t\t\t\t\tcdlist=child_data[0].split(\"=>\")\n\t\t\t\t\t\tcd_id=cdlist[0].split(\"$\")\n\t\t\t\t\t\t#server_list.append(cd_id[0])\n\t\t\t\t\t\tif(k>=ord(cd_id[0][0]) and k<=ord(cd_id[0][1])):\n\t\t\t\t\t\t\trf=0\n\t\t\t\t\t\t\tconnectionSocket.send(\"404\")\n\t\t\t\t\t\t\tconnectionSocket.close()\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trf=1\n\t\t\t\tif(rf==1):\n\t\t\t\t\tprint(\"QUERYING FROM REPLICA DATA\")\n\t\t\t\t\tif(svalue==\"\"):\n\t\t\t\t\t\tprint(skey)\n\t\t\t\t\t\tconnectionSocket.send(sdic1[skey])\n\t\t\t\t\telse:\n\t\t\t\t\t\t#print(\"here\")\n\t\t\t\t\t\tsdic1[skey]=json.loads(svalue)\n\t\t\t\t\t\tconnectionSocket.send(\"FROM REPLICA \")\t\t\t\t\n\t\t\telse:\n\t\t\t\tif(svalue==\"\"):\n\t\t\t\t\t#print(skey)\n\t\t\t\t\tconnectionSocket.send(sdic1[skey])\n\t\t\t\telse:\n\t\t\t\t\t#print(\"here\")\n\t\t\t\t\tsdic1[skey]=json.loads(svalue)\n\t\t\t\t\tconnectionSocket.send(\"FROM REPLICA \")\n\t\t\t\tprint(\"-\"*40)\n\t\t\t\tprint(\"REPLICA DATA STORED IN SERVER\")\n\t\t\t\tprint(sdic1)\n\t\t\t\tprint(\"-\"*40)\n\t\telse:\n\t\t\tconnectionSocket.send(\"404\")\n\t\t\tconnectionSocket.close()\n\n\n\n\n","sub_path":"ns2.py","file_name":"ns2.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427484846","text":"import os\nimport time\n\nfrom drltr.infrastructure.rl_trainer import RL_Trainer\nfrom drltr.agents.dqn_agent import DQNAgent\nfrom drltr.infrastructure.dqn_utils import get_env_kwargs\n\n\nclass Q_Trainer(object):\n\n def __init__(self, params):\n self.params = params\n\n train_args = {\n 'num_agent_train_steps_per_iter': params['num_agent_train_steps_per_iter'],\n 'num_critic_updates_per_agent_update': params['num_critic_updates_per_agent_update'],\n 'train_batch_size': params['batch_size'],\n 'double_q': params['double_q'],\n }\n\n env_args = get_env_kwargs(params['env_name'], params['lunar_arch'])\n\n self.agent_params = {**train_args, **env_args, **params}\n\n self.params['agent_class'] = DQNAgent\n self.params['agent_params'] = self.agent_params\n self.params['train_batch_size'] = params['batch_size']\n self.params['env_wrappers'] = self.agent_params['env_wrappers']\n\n self.rl_trainer = RL_Trainer(self.params)\n\n def run_training_loop(self):\n self.rl_trainer.run_training_loop(\n self.agent_params['num_timesteps'],\n collect_policy = self.rl_trainer.agent.actor,\n eval_policy = self.rl_trainer.agent.actor,\n )\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', default='PongNoFrameskip-v4',\n choices=('PongNoFrameskip-v4',\n 'LunarLander-v2')\n )\n\n parser.add_argument('--ep_len', type=int, default=200)\n parser.add_argument('--exp_name', type=str, default='todo')\n\n parser.add_argument('--eval_batch_size', type=int, default=1000)\n\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1)\n parser.add_argument('--num_critic_updates_per_agent_update', type=int, default=1)\n parser.add_argument('--double_q', action='store_true')\n\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--use_gpu', '-gpu', action='store_true')\n parser.add_argument('--which_gpu', '-gpu_id', default=0)\n parser.add_argument('--scalar_log_freq', type=int, default=int(1e4))\n\n parser.add_argument('--save_params', action='store_true')\n\n # For specifying the number of nodes in the fully connected layer of\n # lunar lander neural network\n parser.add_argument('--lunar_arch1', type=int, default=64)\n parser.add_argument('--lunar_arch2', type=int, default=64)\n\n args = parser.parse_args()\n\n # convert to dictionary\n params = vars(args)\n params['video_log_freq'] = -1 # This param is not used for DQN\n\n params['lunar_arch'] = (params['lunar_arch1'], params['lunar_arch2'])\n ##################################\n ### CREATE DIRECTORY FOR LOGGING\n ##################################\n\n logdir_prefix = 'dqn_'\n if args.double_q:\n logdir_prefix += 'double_q_'\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data')\n\n if not (os.path.exists(data_path)):\n os.makedirs(data_path)\n\n logdir = logdir_prefix + args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join(data_path, logdir)\n params['logdir'] = logdir\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n print(\"\\n\\n\\nLOGGING TO: \", logdir, \"\\n\\n\\n\")\n\n trainer = Q_Trainer(params)\n trainer.run_training_loop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"drltr/scripts/run_hw3_dqn.py","file_name":"run_hw3_dqn.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649914630","text":"import os\nimport cv2\nimport numpy as np\nimport queue\nfrom math import pi\n\n\ndef bfs(img, x, y, direction, r):\n visit = set([(x, y)])\n q = queue.Queue()\n q.put((x, y))\n height, width = img.shape[:2]\n while not q.empty():\n i, j = q.get()\n for ii, jj in direction:\n if ii + i >= 0 and ii + i < height and jj + j >= 0 and jj + j < width and not (\n ii + i,\n jj + j) in visit and (img[i + ii, j + jj, 0] < 60\n and img[i + ii, j + jj, 1] < 60\n and img[i + ii, j + jj, 2] < 60):\n q.put((ii + i, jj + j))\n visit.add((ii + i, jj + j))\n if len(visit) > np.around(pi * r**2) * 1.5:\n return False, visit\n return True, visit\n\n\ndef detect_circle(image):\n dst = cv2.GaussianBlur(image, (13, 15), 15) # 使用高斯模糊,修改卷积核ksize也可以检测出来\n gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)\n circles = cv2.HoughCircles(gray,\n cv2.HOUGH_GRADIENT,\n 1,\n 200,\n param1=50,\n param2=20,\n minRadius=50,\n maxRadius=250)\n # 1,200,50,20,50,250\n # 1,50,50,10,0,30\n height, width = image.shape[:2]\n if circles is not None and len(circles) > 0:\n circles = np.uint16(np.around(circles)) #around对数据四舍五入,为整数\n direction = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n for circle in circles[0, :]:\n x, y, r = circle\n print('circle=' + str(circle))\n if y < 0 or y >= height or x < 0 or x >= width:\n continue\n q = queue.Queue()\n q.put((y, x))\n visit = set([(y, x)])\n while not q.empty():\n i, j = q.get()\n if image[i, j, 0] < 60 and image[i, j, 1] < 60 and image[\n i, j, 2] < 60:\n f, v = bfs(image, i, j, direction, r)\n if f:\n for k in v:\n image[k[0], k[1]] = (150, 150, 150)\n break\n else:\n for ii, jj in direction:\n if ii + i >= 0 and ii + i < height and jj + j >= 0 and jj + j < width and not (\n ii + i, jj + j) in visit and (\n ii + i - y)**2 + (jj + j - x)**2 <= r**2:\n q.put((ii + i, jj + j))\n visit.add((ii + i, jj + j))\n print('circle detected')\n return image\n\n\ndef detect_circle_demo(image):\n dst = cv2.GaussianBlur(image, (13, 15), 15) # 使用高斯模糊,修改卷积核ksize也可以检测出来\n gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)\n circles = cv2.HoughCircles(gray,\n cv2.HOUGH_GRADIENT,\n 1,\n 200,\n param1=50,\n param2=20,\n minRadius=50,\n maxRadius=250)\n # 1,200,50,20,50,250\n # 1,50,50,10,0,30\n height, width = image.shape[:2]\n if circles is not None and len(circles) > 0:\n circles = np.uint16(np.around(circles)) #around对数据四舍五入,为整数\n for i in circles[0, :]:\n print('circle=' + str(i))\n cv2.circle(image, (i[0], i[1]), i[2], (0, 0, 255), 2)\n cv2.circle(image, (i[0], i[1]), 2, (255, 0, 0), 2) #圆心\n print('circle detected')\n return image\n\n\nif __name__ == '__main__':\n images_path = 'C:/Users/jf/Desktop/1'\n output_path = 'C:/Users/jf/Desktop/2'\n images_dir = os.listdir(images_path)\n for image in images_dir:\n src = cv2.imread(images_path + '/' + image)\n src_circle = cv2.imread(images_path + '/' + image)\n cv2.imwrite(output_path + '/' + image.replace('.JPG', '_2.JPG'),\n detect_circle(src))\n cv2.imwrite(output_path + '/' + image.replace('.JPG', '_circle.JPG'),\n detect_circle_demo(src_circle))\n","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379661459","text":"import tensorflow as tf\n\nfrom common.models.audio_bottom_blocks import get_Identity_block\nfrom common.models.audio_core_blocks import get_ResNet38_PANN_block\nfrom common.models.variational_audio_core_blocks import get_VariationalResNet38_PANN_block\nfrom common.models.embedding_pooling import get_attention_global_pooling\nfrom common.models.variational_embedding_pooling import get_variational_attention_global_pooling\n\n\ndef get_model(name_to_metadata,\n model_configuration):\n # The below configuration stuff are defined in the YAML files.\n bottom_model = model_configuration[\"bottom_model\"]\n bottom_model_configuration = model_configuration[\"bottom_model_configuration\"]\n\n core_model = model_configuration[\"core_model\"]\n core_model_configuration = model_configuration[\"core_model_configuration\"]\n\n input_type_list = model_configuration[\"input_type_list\"]\n y_pred_names = model_configuration[\"output_type_list\"]\n\n global_pooling = model_configuration[\"global_pooling\"]\n global_pooling_configuration = model_configuration[\"global_pooling_configuration\"]\n\n if \"bayesian\" in model_configuration.keys():\n if \"use_logit_vars\" in model_configuration[\"bayesian\"].keys():\n use_logit_vars = model_configuration[\"bayesian\"][\"use_logit_vars\"]\n else:\n use_logit_vars = False\n else:\n use_logit_vars = False\n\n # The below list is required for initialising a Keras model.\n input_layer_list = list()\n for input_type in input_type_list:\n input_layer_list.append(tf.keras.Input(shape=name_to_metadata[input_type][\"numpy_shape\"]))\n\n # I like splitting the model in 3 blocks.\n\n # A) The bottom block, that performs simple processes on the input data: e.g., concatenating modalities.\n custom_objects = dict()\n\n bottom_kl_loss = 0.0\n if bottom_model == \"Identity\":\n net_train,\\\n net_test = get_Identity_block(input_layer_list,\n bottom_model_configuration)\n else:\n raise ValueError(\"Invalid\")\n\n # B) The core model, this is where it gets deep.\n core_kl_loss = 0.0\n if core_model == \"ResNet38_PANN\":\n net_train, \\\n net_test = get_ResNet38_PANN_block(net_train,\n net_test,\n core_model_configuration)\n elif core_model == \"VariationalResNet38_PANN\":\n net_train, \\\n net_test, \\\n net_train_var, \\\n net_test_var, \\\n kl_loss = get_VariationalResNet38_PANN_block(net_train,\n net_test,\n core_model_configuration)\n core_kl_loss = core_kl_loss + kl_loss\n else:\n raise ValueError(\"Invalid core_model type.\")\n\n # C) For sequential data (audio, speech, video, text), we may need to perform a pooling of the features from all\n # sequence frames, into 1 \"frame\" that summarises the entire sequence.\n pool_kl_loss = 0.0\n if global_pooling == \"Attention\":\n prediction_train, \\\n prediction_test = get_attention_global_pooling(net_train,\n net_test,\n y_pred_names,\n global_pooling_configuration)\n elif global_pooling == \"VariationalAttention\":\n prediction_train, \\\n prediction_test, \\\n kl_loss = get_variational_attention_global_pooling(net_train,\n net_test,\n net_train_var,\n net_test_var,\n y_pred_names,\n global_pooling_configuration)\n\n pool_kl_loss = pool_kl_loss + kl_loss\n else:\n raise ValueError(\"Invalid global_pooling type.\")\n\n if len(y_pred_names) == 1:\n y_pred_name = y_pred_names[0]\n if use_logit_vars:\n keras_model_train = tf.keras.Model(inputs=input_layer_list, outputs=tf.concat([prediction_train[y_pred_name],\n prediction_train[y_pred_name + \"_var\"]],\n axis=1))\n keras_model_test = tf.keras.Model(inputs=input_layer_list, outputs=tf.concat([prediction_train[y_pred_name],\n prediction_train[y_pred_name + \"_var\"]],\n axis=1))\n else:\n keras_model_train = tf.keras.Model(inputs=input_layer_list, outputs=prediction_train[y_pred_name])\n keras_model_test = tf.keras.Model(inputs=input_layer_list, outputs=prediction_test[y_pred_name])\n else:\n raise NotImplementedError\n\n keras_model_test.summary()\n\n kl_loss = bottom_kl_loss + core_kl_loss + pool_kl_loss\n\n other_outputs = dict()\n other_outputs[\"kl_loss\"] = kl_loss\n\n return prediction_train, prediction_test, keras_model_train, keras_model_test, other_outputs, custom_objects\n","sub_path":"src/common/architecture.py","file_name":"architecture.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36245955","text":"import urllib.request\nimport re\n\ndef SaveImg(url, name):\n res = urllib.request.Request(url)\n res.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36')\n\n page = urllib.request.urlopen(res)\n img = page.read()\n\n with open(name, \"wb\") as f:\n f.write(img)\n\n\nurl = 'https://tieba.baidu.com/p/5106584395'\n\nres = urllib.request.Request(url)\nres.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36')\n\ncont = urllib.request.urlopen(res)\n\n#print(cont.geturl())\n\npage = cont.read()\n\nhtml = page.decode('utf-8')\n\nwith open(\"PageContent.txt\", \"w\", encoding='utf-8') as f:\n f.write(html)\n\nimglist = re.findall(r'\")\n q_section_path = [item.strip() for item in q_section_path]\n q_section_path = [item for item in q_section_path if item!=\"\"]\n section_name = q_section_path[-1]\n for iter_sec in corpus_dict:\n iter_sec_content = corpus_dict[iter_sec]\n if section_name == iter_sec_content['title']:\n iter_sec_path = iter_sec_content['section_hierarchy'].split(\">\")\n iter_sec_path = [corpus_dict[item]['title'] for item in iter_sec_path]\n if iter_sec_path[0:3] == q_section_path[0:3]:\n return iter_sec_content['id']\n return None #if nothing matches\n\ndef get_acc(df_q_feats, df_node_feats, corpus_dict, qna_list, topK = 5):\n correct = 0\n total = df_q_feats.shape[0]\n\n q_arr = np.asarray(df_q_feats)\n node_arr = np.asarray(df_node_feats[:][:-1])\n\n cos_sim_mat = cosine_similarity(q_arr, node_arr)\n # print(cos_sim_mat.shape)\n\n # max_sim = np.argmax(cos_sim_mat, axis = 1)\n max_sim = np.argsort(cos_sim_mat, axis = 1)[:,-topK:][:, ::-1]\n # print(max_sim.shape)\n\n for idx in range(total):\n pred_sections = [\"section_{}\".format(max_sim[idx][k]) for k in range(topK)]\n actual_section = match_path_with_section(qna_list[idx]['Section Hierarchy'], corpus_dict)\n if actual_section in pred_sections:\n # print(idx)\n correct+=1\n print(correct, correct/total)\n return max_sim\n\ndef add_qid(q_dict, indices):\n q_list = q_dict['All_Questions']\n for idx in range(len(q_list)):\n q_list[idx]['qid'] = \"q_\" + str(indices[idx]) #len of q_dict and indices are the same\n\n q_dict['All_Questions'] = q_list\n return q_dict\n\ndef get_split(indices, q_section_dict, max_sim_mat, qna_list, corpus_dict):\n sent1 = []\n sent2 = []\n labels = []\n for idx in indices:\n\n actual_sec_id = q_section_dict[\"q_\" + str(idx)].replace(\"section_\", \"\")\n # idx = int(idx.replace(\"q_\", \"\"))\n actual_sec_id = int(actual_sec_id)\n for sec_id in max_sim_mat[idx].tolist():\n sent1.append(qna_list[idx]['Question'])\n sent2.append(corpus_dict[\"section_{}\".format(sec_id)]['t5_para'])\n if sec_id == actual_sec_id:\n labels.append(1)\n else:\n labels.append(0)\n df = pd.DataFrame(columns = ['sentence_1', 'sentence_2', 'label'])\n df['sentence_1'] = sent1\n df['sentence_2'] = sent2\n df['label'] = labels\n print(df.head())\n print(df.shape)\n print((np.asarray(labels)==1).sum())\n return df\n\ndef get_split_for_rc(stage, relation_df, stage_q_dict, corpus_dict):\n sent1 = []\n sent2 = []\n labels = []\n # qids = []\n relation_df.reset_index(drop = True, inplace = True)\n for i in range(relation_df.shape[0]) :\n labels.append(relation_df['2'][i])\n temp = relation_df['1'][i]\n temp = temp.split(\"|\")\n # if temp[0] not in qids:\n # qids.append(temp[0]) #store uniqs\n # print(stage_q_dict)\n sent1.append(stage_q_dict[int(temp[0].replace(\"{}_Q\".format(stage), \"\"))][\"QUESTION_TEXT\"])\n sent2.append(corpus_dict[temp[1]]['text'][int(temp[2])])\n\n df = pd.DataFrame(columns = ['sentence_1', 'sentence_2', 'label'])\n df['sentence_1'] = sent1\n df['sentence_2'] = sent2\n df['label'] = labels\n print(df.head())\n print(df.shape)\n return df\n\ndef get_split_for_rc_tokwise(stage, relation_df, stage_q_dict, corpus_dict):\n sent1 = []\n sent2 = []\n labels = []\n qid_q = OrderedDict()\n qid_labels = OrderedDict()\n qid_section_dict = OrderedDict()\n # qids = []\n relation_df.reset_index(drop = True, inplace = True)\n for i in range(relation_df.shape[0]) :\n # labels.append(relation_df['2'][i])\n temp = relation_df['1'][i]\n temp = temp.split(\"|\")\n # if temp[0] not in qids:\n # qids.append(temp[0]) #store uniqs\n # print(stage_q_dict)\n qid = temp[0]\n q = stage_q_dict[int(temp[0].replace(\"{}_Q\".format(stage), \"\"))][\"QUESTION_TEXT\"]\n ans_sent = corpus_dict[temp[1]]['text'][int(temp[2])]\n ans_sent = remove_punct(ans_sent)\n if qid not in qid_section_dict:\n gt_ans = stage_q_dict[int(qid.replace(\"{}_Q\".format(stage), \"\"))][\"ANSWER\"]\n qid_q[qid] = q\n qid_section_dict[qid] = [ans_sent]\n if ans_sent in remove_punct(gt_ans):\n qid_labels[qid] = [1]\n else:\n qid_labels[qid] = [0]\n else:\n qid_section_dict[qid].append(ans_sent)\n if ans_sent in remove_punct(gt_ans):\n qid_labels[qid].append(1)\n else:\n qid_labels[qid].append(0)\n for qid in qid_section_dict:\n sent1.append(qid_q[qid])\n sent2.append(qid_section_dict[qid])\n labels.append(qid_labels[qid])\n\n df = pd.DataFrame(columns = ['sentence_1', 'sentence_2', 'label'])\n df['sentence_1'] = sent1\n df['sentence_2'] = sent2\n df['label'] = labels\n print(df.head())\n print(df.shape)\n return df\n\ndef get_dataset(df, tokenizer):\n sentences_1 = df.sentence_1.values\n sentences_2 = df.sentence_2.values\n labels = df.label.values\n input_ids = []\n attention_masks = []\n token_type_ids = []\n\n # For every sentence...\n for sent_idx in tqdm(range(len(sentences_1))):\n # inp = sentences_1[sent_idx] + '[SEP]'+ sentences_2[sent_idx]\n # `encode_plus` will:\n # (1) Tokenize the sentence.\n # (2) Prepend the `[CLS]` token to the start.\n # (3) Append the `[SEP]` token to the end.\n # (4) Map tokens to their IDs.\n # (5) Pad or truncate the sentence to `max_length`\n # (6) Create attention masks for [PAD] tokens.\n encoded_dict = tokenizer(\n sentences_1[sent_idx], # Input to encode.\n sentences_2[sent_idx],\n add_special_tokens = True, # To Add '[CLS]' and '[SEP]'\n max_length = 512, # Pad & truncate all sentences.\n truncation = True,\n pad_to_max_length = True,\n return_attention_mask = True, # Construct attn. masks.\n return_token_type_ids = True,\n return_tensors = 'pt', # Return pytorch tensors.\n )\n \n # Add the encoded sentence to the list. \n input_ids.append(encoded_dict['input_ids'])\n\n token_type_ids.append(encoded_dict['token_type_ids'])\n \n # And its attention mask (simply differentiates padding from non-padding).\n attention_masks.append(encoded_dict['attention_mask'])\n\n # Convert the lists into tensors.\n input_ids = torch.cat(input_ids, dim=0)\n token_type_ids = torch.cat(token_type_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n labels = torch.tensor(labels)\n\n # Print sentence 0, now as a list of IDs.\n print('Original: ', sentences_1[0], sentences_2[0])\n print('Token IDs:', input_ids[0])\n\n print(input_ids.shape, token_type_ids.shape, attention_masks.shape, labels.shape)\n\n # Combine the training inputs into a TensorDataset.\n dataset = TensorDataset(input_ids, token_type_ids, attention_masks, labels)\n return dataset\n\ndef get_dataset_tokwise(df, tokenizer):\n sentences_1 = df.sentence_1.values\n sentences_2 = df.sentence_2.values\n init_labels = df.label.values\n labels = []\n input_ids = []\n attention_masks = []\n token_type_ids = []\n\n # For every sentence...\n for sent_idx in tqdm(range(len(sentences_1))):\n temp_labels = []\n # inp = sentences_1[sent_idx] + '[SEP]'+ sentences_2[sent_idx]\n # `encode_plus` will:\n # (1) Tokenize the sentence.\n # (2) Prepend the `[CLS]` token to the start.\n # (3) Append the `[SEP]` token to the end.\n # (4) Map tokens to their IDs.\n # (5) Pad or truncate the sentence to `max_length`\n # (6) Create attention masks for [PAD] tokens.\n sentences = [sentences_1[sent_idx]] + sentences_2[sent_idx]\n compound = \" \" + \" \".join(sentences) + \"\"\n encoded_dict = tokenizer(\n compound,\n add_special_tokens = False, # To Add '[CLS]' and '[SEP]'\n max_length = 512, # Pad & truncate all sentences.\n truncation = True,\n pad_to_max_length = True,\n return_attention_mask = True, # Construct attn. masks.\n return_token_type_ids = True,\n return_tensors = 'pt', # Return pytorch tensors.\n )\n inp_ = encoded_dict['input_ids'].view(-1).tolist()\n # print(inp_)\n count_sep = 0\n count_tok = 0\n for idx, item in enumerate(inp_):\n count_tok += 1\n if item == 2 and inp_[idx - 1] == 2 and count_sep == 0:\n temp_labels.extend([2]*count_tok)\n count_tok = 0\n count_sep += 1\n elif (item == 2 and inp_[idx - 1] == 2) or (item == 2 and inp_[idx + 1] == 1):\n temp_labels.extend([init_labels[sent_idx][count_sep - 1]]*count_tok)\n count_tok = 0\n count_sep += 1\n temp_labels.extend([3]*count_tok)\n # Add the encoded sentence to the list. \n input_ids.append(encoded_dict['input_ids'])\n\n token_type_ids.append(encoded_dict['token_type_ids'])\n \n # And its attention mask (simply differentiates padding from non-padding).\n attention_masks.append(encoded_dict['attention_mask'])\n\n labels.append(temp_labels)\n\n # Convert the lists into tensors.\n print(encoded_dict['input_ids'].shape)\n print(np.array(labels).shape)\n input_ids = torch.cat(input_ids, dim=0)\n token_type_ids = torch.cat(token_type_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n labels = torch.tensor(labels)\n\n # Print sentence 0, now as a list of IDs.\n # print('Original: ', sentences_1[0], sentences_2[0])\n print('Token IDs:', input_ids[0])\n\n print(input_ids.shape, token_type_ids.shape, attention_masks.shape, labels.shape)\n\n # Combine the training inputs into a TensorDataset.\n dataset = TensorDataset(input_ids, token_type_ids, attention_masks, labels)\n return dataset\n\ndef get_qna_list(q_folderpath):\n start = 0\n\n with open(os.path.join(q_folderpath, \"train_annotation.json\")) as f:\n train_q_dict = json.load(f)\n len_train = len(train_q_dict['All_Questions'])\n train_indices = [i for i in range(start, start + len_train)]\n train_q_dict = add_qid(train_q_dict, train_indices)\n\n start += len_train\n\n with open(os.path.join(q_folderpath, \"valid_annotation.json\")) as f:\n valid_q_dict = json.load(f)\n len_valid = len(valid_q_dict['All_Questions'])\n valid_indices = [i for i in range(start, start + len_valid)]\n valid_q_dict = add_qid(valid_q_dict, valid_indices)\n\n start += len_valid\n\n with open(os.path.join(q_folderpath, \"test_annotation.json\")) as f:\n test_q_dict = json.load(f)\n len_test = len(test_q_dict['All_Questions'])\n test_indices = [i for i in range(start, start + len_test)]\n test_q_dict = add_qid(test_q_dict, test_indices)\n\n qna_list = []\n qna_list.extend(train_q_dict['All_Questions'])\n qna_list.extend(valid_q_dict['All_Questions'])\n qna_list.extend(test_q_dict['All_Questions'])\n return qna_list, train_indices, valid_indices, test_indices\n\ndef get_tfidf_vectorizer(corpus_dict):\n all_docs = []\n for section in corpus_dict:\n all_docs.append(corpus_dict[section]['t5_para'])\n from sklearn.feature_extraction.text import TfidfVectorizer\n\n vectorizer = TfidfVectorizer(max_df=.75, min_df=1, stop_words='english', use_idf=True, norm=None, sublinear_tf=True)\n vectorizer.fit(all_docs)\n return vectorizer\n\ndef get_tfidf_vector(str_, tfidf_vec):\n return tfidf_vec.transform([str_]).toarray().reshape((-1,)).tolist()\n\ndef get_section_features(corpus_dict, tfidf_vec=None):\n vec_list = []\n sec_list = []\n for section in corpus_dict:\n sec_list.append(section)\n temp = corpus_dict[section]\n temp = temp['t5_para']\n temp = get_tfidf_vector(temp, tfidf_vec)\n vec_list.append(temp)\n # print(section)\n vecs = np.asarray(vec_list)\n print(vecs.shape)\n col_names = [\"f{}\".format(i) for i in range(1, vecs.shape[1] + 1)]\n index = sec_list\n df = pd.DataFrame(data = vecs, index = index, columns = col_names)\n return df\n\ndef get_q_features(qna_list, tfidf_vec=None):\n vec_list = []\n q_list = [] \n for i in range(len(qna_list)):\n q_list.append(\"q_{}\".format(i))\n temp = qna_list[i]['Question']\n temp = get_tfidf_vector(temp, tfidf_vec)\n vec_list.append(temp)\n # print(i)\n vecs = np.asarray(vec_list)\n col_names = [\"f{}\".format(i) for i in range(1, vecs.shape[1] + 1)]\n index = q_list\n df = pd.DataFrame(data = vecs, index = index, columns = col_names)\n return df\n\ndef get_q_section_dict(qna_list, corpus_dict):\n q_section_dict = {}\n\n for idx, item in tqdm(enumerate(qna_list)):\n section = match_path_with_section(item['Section Hierarchy'], corpus_dict)\n if (section == None):\n print(\"Wrong - {}\".format(item['qid']))\n print(qna_list[idx]['Section Hierarchy'])\n else:\n q_section_dict[item['qid']] = section\n\n return q_section_dict","sub_path":"MTL_scripts/MTL_utils.py","file_name":"MTL_utils.py","file_ext":"py","file_size_in_byte":15262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117456630","text":"\"\"\"\n42. 接雨水\n给定 n 个非负整数表示每个宽度为 1 的柱子的高度图,计算按此排列的柱子,下雨之后能接多少雨水。\n\n\n\n上面是由数组 [0,1,0,2,1,0,1,3,2,1,2,1] 表示的高度图,在这种情况下,可以接 6 个单位的雨水(蓝色部分表示雨水)。 感谢 Marcos 贡献此图。\n\n示例:\n\n输入: [0,1,0,2,1,0,1,3,2,1,2,1]\n输出: 6\n\"\"\"\n\n\nfrom typing import List\n\n\ndef trap(height: List[int]) -> int:\n # 初始化一些变量,结果,数组长度\n ans = 0\n size = len(height)\n # 外层循环,遍历每个位置\n for i in range(size):\n leftmax = 0 # 记录每个位置对应的左侧最大值\n rightmax = 0 # 记录每个位置对应的右侧最大值\n for j in range(i, size): # 找到当前位置右边最高的柱子,并记录下来\n rightmax = max(height[j], rightmax)\n for j in range(i, -1, -1): # 找到当前位置左边最高的柱子,并记录下来\n leftmax = max(height[j], leftmax)\n # 计算当前位置能存多少雨水,并累加到结果上\n ans = ans + min(leftmax, rightmax) - height[i]\n return ans\n\n\nif __name__ == '__main__':\n j = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]\n print(trap(j))\n\n\ndef trap1(height: List[int]) -> int:\n if not height:\n return 0\n n = len(height)\n left = 0\n right = n - 1\n ans = 0\n l_max = height[0]\n r_max = height[n - 1]\n while left <= right:\n l_max = max(l_max, height[left])\n r_max = max(r_max, height[right])\n if l_max < r_max:\n ans += l_max - height[left]\n left += 1\n elif l_max >= r_max:\n ans += r_max - height[right]\n right -= 1\n return ans\n\n\nif __name__ == '__main__':\n j = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]\n print(trap1(j))\n","sub_path":"old-zgd/42.接雨水.py","file_name":"42.接雨水.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408321890","text":"import pandas as pd\nimport subprocess\n\n\ndef ratio(x, y, z, w):\n return w[0]*x+w[1]*y+w[2]*z\n\n\ndef is_black(sa):\n global blacklist\n\n for ln in range(len(blacklist)):\n if sa in blacklist.get_value(ln, \"sa\"):\n return True\n return False\n\n\ndef arg_warning(sa):\n global warning\n\n for ln in range(len(warning)):\n if sa in warning.get_value(ln, \"sa\"):\n return ln\n return -1\n\n\nmx = 0\nwarn = 0\natt = 0\nw_list = []\nwith open(\"init\\criteria.txt\", 'r') as file:\n for line in file:\n if \"warning\" in line:\n warn = float(line[line.find(\":\") + 2:])\n if \"attack\" in line:\n att = float(line[line.find(\":\") + 2:])\n if \"parameter\" in line:\n i = float(line[line.find(\":\") + 2:])\n w_list.append(i)\n if \"max\" in line:\n mx = int(line[line.find(\":\") + 2:])\n\np1 = subprocess.Popen(\"python classifier.py\", shell=True)\np2 = subprocess.Popen(\"python cluster.py\", shell=True)\np3 = subprocess.Popen(\"python coworker.py\", shell=True)\n\np1.wait()\np2.wait()\np3.wait()\n\nclassification = pd.read_csv(\"result_class.csv\")\ncluster = pd.read_csv(\"result_cluster.csv\")\nworker = pd.read_csv(\"result_co.csv\")\n\nres = classification[['sa', 'da', 'te']]\nres = res.assign(res=ratio(classification.as_matrix(columns=['res1']), cluster.as_matrix(columns=['res2']) , worker.as_matrix(columns=['res3']), w_list))\n\nwarning = pd.read_csv(\"result/warning.csv\")\nblacklist = pd.read_csv(\"result/blacklist.csv\")\n\nfor i in range(len(res)):\n nm = res.get_value(i, 'res')\n if not is_black(res.get_value(i, 'sa')):\n if nm >= att:\n black = pd.DataFrame([[res.get_value(i, 'sa')]], columns=['sa'])\n blacklist = blacklist.append(black, ignore_index=True)\n elif nm >= warn:\n s = str(res.get_value(i, 'sa')).replace(\" \",\"\")\n lists = warning.as_matrix(columns=[\"sa\"]).tolist()\n\n if s not in lists:\n wrn = pd.DataFrame([[res.get_value(i, 'sa'), 1]], columns=['sa', 'level'])\n warning = warning.append(wrn, ignore_index=True)\n else:\n k = lists.index(s)\n level = warning.get_value(k, 'level')\n if level == mx - 1:\n black = pd.DataFrame([[warning.get_value(k, 'sa')]], columns=['sa'])\n blacklist = blacklist.append(black, ignore_index=True)\n warning = warning.drop(k, axis=0)\n else:\n warning.set_value(k, 'level', level + 1)\n\n\nattacks = res[res['res'] >= warn].copy()\nattacks.drop([\"res\"], axis=1, inplace=True)\nattacks.to_csv(\"result/attack.csv\", index=False)\nwarning.to_csv(\"result/warning.csv\", index=False)\nblacklist.to_csv(\"result/blacklist.csv\", index=False)\n","sub_path":"scripts/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84014854","text":"#!flask/bin/python\n# -*- coding: utf-8 -*-\n\nimport re\nimport operator\nimport functools\n\nfrom app import app\nfrom app.wordlists import *\n\ndef de_hyphen_non_coded_words(word):\n if word.find(\"-\"):\n for coded_word in hyphenated_coded_words:\n if word.startswith(coded_word):\n return [word]\n return word.split(\"-\")\n return [word]\n\ndef clean_up_text(messy_text):\n text = re.sub(\"[\\\\s]\", \" \", messy_text, 0, 0)\n text = re.sub(u\"[\\.\\t\\,“”‘’<>\\*\\?\\!\\\"\\[\\]\\@\\':;\\(\\)\\./&]\", \" \", text, 0, 0)\n text = re.sub(u\"[—–]\", \"-\", text, 0, 0)\n return text.lower()\n\nclass Text:\n\n def __init__(self, ad_text):\n self.ad_text = ad_text\n self.analyse()\n\n def analyse(self):\n word_list = self.clean_up_word_list()\n self.extract_coded_words(word_list)\n self.assess_coding()\n\n def clean_up_word_list(self):\n word_list = filter(lambda x: x, clean_up_text(self.ad_text).split(\" \"))\n return functools.reduce(operator.concat, map(de_hyphen_non_coded_words, word_list))\n\n def extract_coded_words(self, advert_word_list):\n words, count = self.find_and_count_coded_words(advert_word_list,\n masculine_coded_words)\n self.masculine_coded_words, self.masculine_word_count = words, count\n words, count = self.find_and_count_coded_words(advert_word_list,\n feminine_coded_words)\n self.feminine_coded_words, self.feminine_word_count = words, count\n\n def find_and_count_coded_words(self, advert_word_list, gendered_word_list):\n gender_coded_words = [word for word in advert_word_list\n for coded_word in gendered_word_list\n if word.startswith(coded_word)]\n return (\",\").join(gender_coded_words), len(gender_coded_words)\n\n def assess_coding(self):\n coding_score = self.feminine_word_count - self.masculine_word_count\n if coding_score == 0:\n self.coding = \"neutral\"\n elif coding_score > 3:\n self.coding = \"strongly feminine-coded\"\n elif coding_score > 0:\n self.coding = \"feminine-coded\"\n elif coding_score < -3:\n self.coding = \"strongly masculine-coded\"\n else:\n self.coding = \"masculine-coded\"\n","sub_path":"app/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"78618875","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20200409\n# Problem link : https://leetcode.com/problems/backspace-string-compare/\n################################################################\n\ndef stringy(S: str) -> str:\n stack = []\n for c in S:\n if c !='#':\n stack.append(c)\n elif stack:\n stack.pop()\n return ''.join(stack)\n\nclass Solution:\n def backspaceCompare(self, S: str, T: str) -> bool:\n return stringy(S) == stringy(T)\n","sub_path":"30_day_challenge_2020_April/844_backspace_string_compare_day9.py","file_name":"844_backspace_string_compare_day9.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438636034","text":"from django.shortcuts import redirect,render\r\nfrom django.contrib.auth import login,logout,authenticate\r\nfrom .forms import *\r\nfrom .models import *\r\nfrom django.http import HttpResponse\r\n\r\n# Create your views here.\r\ndef HomePage(request):\r\n context={}\r\n return render(request,'Quiz/HomePage.html',context)\r\n\r\ndef home(request):\r\n if request.method == 'POST':\r\n print(request.POST)\r\n questions=QuesModel.objects.all()\r\n score=0\r\n wrong=0\r\n correct=0\r\n total=0\r\n for q in questions:\r\n total+=1\r\n print(request.POST.get(q.question))\r\n print(q.ans)\r\n print()\r\n if q.ans == request.POST.get(q.question):\r\n score+=10\r\n correct+=1\r\n else:\r\n wrong+=1\r\n percent = score/(total*10) *100\r\n context = {\r\n 'score':score,\r\n 'time': request.POST.get('timer'),\r\n 'correct':correct,\r\n 'wrong':wrong,\r\n 'percent':percent,\r\n 'total':total\r\n }\r\n return render(request,'Quiz/result.html',context)\r\n else:\r\n questions=QuesModel.objects.all()\r\n context = {\r\n 'questions':questions\r\n }\r\n return render(request,'Quiz/home.html',context)\r\n\r\ndef addQuestion(request): \r\n if request.user.is_staff:\r\n form=addQuestionform()\r\n if(request.method=='POST'):\r\n form=addQuestionform(request.POST)\r\n if(form.is_valid()):\r\n form.save()\r\n return redirect('/')\r\n context={'form':form}\r\n return render(request,'Quiz/addQuestion.html',context)\r\n else: \r\n return redirect('home') \r\n\r\ndef registerPage(request):\r\n if request.user.is_authenticated:\r\n return redirect('home') \r\n else: \r\n form=createuserform()\r\n if request.method=='POST':\r\n form=createuserform(request.POST)\r\n if form.is_valid() :\r\n user=form.save()\r\n return redirect('login')\r\n context={\r\n 'form':form,\r\n }\r\n return render(request,'Quiz/register.html',context)\r\n\r\ndef loginPage(request):\r\n if request.user.is_authenticated:\r\n return redirect('home')\r\n else:\r\n if request.method==\"POST\":\r\n username=request.POST.get('username')\r\n password=request.POST.get('password')\r\n user=authenticate(request,username=username,password=password)\r\n if user is not None:\r\n login(request,user)\r\n return redirect('/')\r\n context={}\r\n return render(request,'Quiz/login.html',context)\r\n\r\ndef logoutPage(request):\r\n logout(request)\r\n return redirect('/')\r\n\r\n","sub_path":"Quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8490162","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/philippos/Desktop/Dev_OpenSource/transitionMatrix/tests/test_estimators.py\n# Compiled at: 2018-10-22 07:00:59\n# Size of source mod 2**32: 2803 bytes\nimport unittest, pandas as pd, transitionMatrix as tm\nfrom transitionMatrix import source_path\nfrom transitionMatrix.estimators import cohort_estimator as es\nfrom transitionMatrix.estimators import aalen_johansen_estimator as aj\nACCURATE_DIGITS = 2\n\nclass TestSimpleEstimator(unittest.TestCase):\n pass\n\n\nclass TestCohortEstimator(unittest.TestCase):\n\n def test_cohort_estimator_counts(self):\n dataset_path = source_path + 'datasets/'\n data = pd.read_csv(dataset_path + 'synthetic_data5.csv')\n event_count = data[(data['Timestep'] < 4)]['ID'].count()\n description = [('0', 'Stage 1'), ('1', 'Stage 2'), ('2', 'Stage 3')]\n myState = tm.StateSpace(description)\n sorted_data = data.sort_values(['ID', 'Timestep'], ascending=[True, True])\n myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})\n result = myEstimator.fit(sorted_data)\n self.assertEqual(event_count, myEstimator.counts)\n\n\nclass TestAalenJohansenEstimator(unittest.TestCase):\n __doc__ = '\\n Test the estimation of a simple 2x2 transition matrix with absorbing state\\n\\n .. note: The result is subject to sampling error! Ensure the required accuracy corresponds to the input data size\\n\\n '\n\n def test_aalenjohansen_simple_transitions(self):\n dataset_path = source_path + 'datasets/'\n data = pd.read_csv(dataset_path + 'synthetic_data8.csv')\n sorted_data = data.sort_values(['Time', 'ID'], ascending=[True, True])\n description = [('0', 'G'), ('1', 'B')]\n myState = tm.StateSpace(description)\n myEstimator = aj.AalenJohansenEstimator(states=myState)\n labels = {'Timestamp': 'Time', 'From_State': 'From', 'To_State': 'To', 'ID': 'ID'}\n result = myEstimator.fit(sorted_data, labels=labels)\n self.assertAlmostEqual(result[(0, 0, -1)], 0.5, places=ACCURATE_DIGITS, msg=None, delta=None)\n self.assertAlmostEqual(result[(0, 1, -1)], 0.5, places=ACCURATE_DIGITS, msg=None, delta=None)\n self.assertEqual(result[(1, 0, -1)], 0.0)\n self.assertEqual(result[(1, 1, -1)], 1.0)","sub_path":"pycfiles/transitionMatrix-0.4.0-py3.4/test_estimators.cpython-34.py","file_name":"test_estimators.cpython-34.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639496196","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nfor subset in `seq 1 9`\ndo\npython -W ignore infinite_generator_3D_sukrit_MRI_brain_classification.py \\\n--fold $subset \\\n--scale 32 \\\n--data ../ATLAS_data \\\n--save generated_cubes_MRI_classification\ndone\n\"\"\"\n\n# In[1]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\n#import keras\n#print(\"Keras = {}\".format(keras.__version__))\n#import tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\nimport string\nimport sys\nimport math\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport SimpleITK as sitk\nimport nibabel as nib\n\nfrom tqdm import tqdm\nfrom sklearn import metrics\nfrom optparse import OptionParser\nfrom glob import glob\nfrom skimage.transform import resize\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom skimage import measure, morphology\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D\n\nsys.setrecursionlimit(40000)\n\nparser = OptionParser()\n\nparser.add_option(\"--fold\", dest=\"fold\", help=\"fold of subset\", default=None, type=\"int\")\nparser.add_option(\"--input_rows\", dest=\"input_rows\", help=\"input rows\", default=64, type=\"int\")\nparser.add_option(\"--input_cols\", dest=\"input_cols\", help=\"input cols\", default=64, type=\"int\")\nparser.add_option(\"--input_deps\", dest=\"input_deps\", help=\"input deps\", default=32, type=\"int\")\nparser.add_option(\"--crop_rows\", dest=\"crop_rows\", help=\"crop rows\", default=64, type=\"int\")\nparser.add_option(\"--crop_cols\", dest=\"crop_cols\", help=\"crop cols\", default=64, type=\"int\")\nparser.add_option(\"--data\", dest=\"data\", help=\"the directory of LUNA16 dataset\", default=None, type=\"string\")\nparser.add_option(\"--save\", dest=\"save\", help=\"the directory of processed 3D cubes\", default=None, type=\"string\")\nparser.add_option(\"--scale\", dest=\"scale\", help=\"scale of the generator\", default=32, type=\"int\")\n(options, args) = parser.parse_args()\nfold = options.fold\n\nseed = 1\nrandom.seed(seed)\n\nassert options.data is not None\nassert options.save is not None\nassert options.fold >= 0 and options.fold <= 9\n\nif not os.path.exists(options.save):\n os.makedirs(options.save)\n\nclass setup_config():\n '''\n hu_max = 1000.0\n hu_min = -1000.0\n '''\n\n hu_max = 100\n hu_min = 0\n \n HU_thred = (33 - hu_min) / (hu_max - hu_min)\n def __init__(self, \n input_rows=None, \n input_cols=None,\n input_deps=None,\n crop_rows=None, \n crop_cols=None,\n len_border=None,\n len_border_z=None,\n scale=None,\n DATA_DIR=None,\n train_fold=[0,1,2,3,4],\n valid_fold=[5,6],\n test_fold=[7,8,9],\n len_depth=None,\n lung_min=0.7,\n lung_max=1.0,\n save_samples = True\n ):\n self.input_rows = input_rows\n self.input_cols = input_cols\n self.input_deps = input_deps\n self.crop_rows = crop_rows\n self.crop_cols = crop_cols\n self.len_border = len_border\n self.len_border_z = len_border_z\n self.scale = scale\n self.DATA_DIR = DATA_DIR\n self.train_fold = train_fold\n self.valid_fold = valid_fold\n self.test_fold = test_fold\n self.len_depth = len_depth\n self.lung_min = lung_min\n self.lung_max = lung_max\n self.save_samples = save_samples\n\n def display(self):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n\n\n\nconfig = setup_config(input_rows=options.input_rows,\n input_cols=options.input_cols,\n input_deps=options.input_deps,\n crop_rows=options.crop_rows,\n crop_cols=options.crop_cols,\n scale=options.scale,\n len_border=20,\n len_border_z=32,\n len_depth=3,\n lung_min=0.1,\n lung_max=1.0,\n DATA_DIR=options.data,\n save_samples = False\n )\nconfig.display()\n\ndef plot_3d(image, save_img_dir, threshold):\n\n # Position the scan upright, \n # so the head of the patient would be at the top facing the camera\n p = image.astype(np.uint8) #.transpose(2,1,0)\n\n try:\n verts, faces = measure.marching_cubes_classic(p, threshold)\n\n fig = plt.figure(figsize=(10, 10))\n ax = Axes3D(fig)\n #ax = fig.add_subplot(111, projection='3d')\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n mesh = Poly3DCollection(verts[faces], alpha=0.70)\n face_color = [0.45, 0.45, 0.75]\n mesh.set_facecolor(face_color)\n ax.add_collection3d(mesh)\n\n ax.set_xlim(0, p.shape[0])\n ax.set_ylim(0, p.shape[1])\n ax.set_zlim(0, p.shape[2])\n\n plt.savefig(save_img_dir)\n return\n except:\n return\n\ndef infinite_generator_from_one_volume(config, img_array, segmentation_mask):\n size_x, size_y, size_z = img_array.shape\n if size_z-config.input_deps-config.len_depth-1-config.len_border_z < config.len_border_z:\n return None\n \n img_array[img_array < config.hu_min] = config.hu_min\n img_array[img_array > config.hu_max] = config.hu_max\n img_array = 1.0*(img_array-config.hu_min) / (config.hu_max-config.hu_min)\n img_array = img_array\n\n segmentation_mask = (segmentation_mask - np.min(segmentation_mask))/(np.max(segmentation_mask) - np.min(segmentation_mask))\n segmentation_mask = segmentation_mask.astype(int)\n print ('Segmentation True Shape', segmentation_mask.shape, 'Segmentation Image max: ', np.max(segmentation_mask), 'Segmentation Image min: ', np.min(segmentation_mask), ' Segmentation Image mean: ', np.mean(segmentation_mask))\n\n\n slice_set = np.zeros((config.scale, config.input_rows, config.input_cols, config.input_deps), dtype=float)\n slice_set_segmentation = np.zeros((config.scale, config.input_rows, config.input_cols, config.input_deps), dtype=float)\n cube_labels = []\n\n num_pair = 0\n cnt = 0\n while True:\n cnt += 1\n if cnt > 50 * config.scale and num_pair == 0:\n return None, None\n elif cnt > 50 * config.scale and num_pair > 0:\n return np.array(slice_set[:num_pair]), np.array(slice_set_segmentation[:num_pair])\n\n print ('num_pair', num_pair)\n \n start_x = random.randint(0+config.len_border, size_x-config.crop_rows-1-config.len_border)\n start_y = random.randint(0+config.len_border, size_y-config.crop_cols-1-config.len_border)\n start_z = random.randint(0+config.len_border_z, size_z-config.input_deps-config.len_depth-1-config.len_border_z)\n \n crop_window = img_array[start_x : start_x+config.crop_rows,\n start_y : start_y+config.crop_cols,\n start_z : start_z+config.input_deps+config.len_depth]\n\n crop_window_segmentation_mask = segmentation_mask[start_x : start_x+config.crop_rows,\n start_y : start_y+config.crop_cols,\n start_z : start_z+config.input_deps+config.len_depth]\n\n if config.crop_rows != config.input_rows or config.crop_cols != config.input_cols:\n crop_window = resize(crop_window, \n (config.input_rows, config.input_cols, config.input_deps+config.len_depth), \n preserve_range=True)\n\n crop_window_segmentation_mask = resize(crop_window_segmentation_mask, \n (config.input_rows, config.input_cols, config.input_deps+config.len_depth), \n preserve_range=True)\n \n t_img = np.zeros((config.input_rows, config.input_cols, config.input_deps), dtype=float)\n d_img = np.zeros((config.input_rows, config.input_cols, config.input_deps), dtype=float)\n\n for d in range(config.input_deps):\n for i in range(config.input_rows):\n for j in range(config.input_cols):\n for k in range(config.len_depth): #check in a depth of d+k around the 2D image if any pixel exceeds the threshold. If it doesn't (break statement not executed), then put the values from slice (d+k-1) into the pixel. \n if crop_window[i, j, d+k] >= config.HU_thred:\n t_img[i, j, d] = crop_window[i, j, d+k]\n d_img[i, j, d] = k #stores the depth from which the (i,j)th pixel in slice d has been taken.\n break\n if k == config.len_depth-1:\n d_img[i, j, d] = k\n \n d_img = d_img.astype('float32') #pixels taken from their own slice will have d_img[i, j, d] = 0. \n d_img /= (config.len_depth - 1)\n d_img = 1.0 - d_img\n \n cube_vols = config.input_rows * config.input_cols * config.input_deps\n mask_ratio = float(np.sum(segmentation_mask))/segmentation_mask.size #fraction of voxels which have a lesion in the original volume.\n\n if np.sum(d_img) > config.lung_max * cube_vols: #just ensures that not too many pixels are taken from slices far away than their original slice.\n print (np.sum(d_img), config.lung_max * config.input_rows * config.input_cols * config.input_deps)\n continue\n\n mask = crop_window_segmentation_mask[:,:,:config.input_deps]\n\n if np.sum(t_img) < config.lung_min * cube_vols or np.sum(t_img) > (1- config.lung_min) * cube_vols: #check if there is enough information in the cube.\n print ('Not enough information in cube ', num_pair, 'max:', cube_vols, 'information: ', np.sum(t_img))\n continue\n\n if np.sum(mask) > mask_ratio * cube_vols and np.sum(cube_labels) < config.scale/2: #check if there is enough pixels from the affected region in the cube. Also don't let too many cubes have the same label.\n print ('Cube ', num_pair, 'has enough lesioned pixels. Present is:' , np.sum(mask), 'minimum is ', mask_ratio * cube_vols, 'mask_ratio', mask_ratio)\n cube_labels.append(1)\n elif np.sum(cube_labels) > config.scale/2:\n print ('Too many cubes have lesions. Skipping this one', np.sum(cube_labels))\n continue\n else:\n cube_labels.append(0)\n \n slice_set[num_pair] = crop_window[:,:,:config.input_deps]\n slice_set_segmentation[num_pair] = crop_window_segmentation_mask[:,:,:config.input_deps]\n \n if config.save_samples:\n\n final_sample = slice_set[num_pair] #* 255\n final_sample = final_sample.astype(np.uint8)\n\n file_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)]) + '.png'\n image_path = os.path.join(options.save, 'MRI_cube_samples')\n \n if not os.path.exists(image_path):\n os.makedirs(image_path) \n \n plot_3d(final_sample, os.path.join(image_path, file_name), threshold = np.min(final_sample))\n\n\n \n #imageio.imwrite(, final_sample)\n\n print (\"************ Wrote Images *****************\")\n\n num_pair += 1\n if num_pair == config.scale:\n break\n return np.array(slice_set), np.array(cube_labels)\n\n\ndef get_self_learning_data(fold, config):\n slice_set = []\n all_cube_labels = []\n sites = fold\n\n for site in sites:\n subjects = os.listdir(os.path.join(config.DATA_DIR, 'Site' + str(site)))\n for subject in tqdm(subjects):\n session_dir = os.path.join(config.DATA_DIR, 'Site' + str(site), subject)\n if os.path.isdir(session_dir):\n sessions = os.listdir(session_dir)\n for session in sessions:\n data_folder = os.path.join(session_dir, session)\n\n ###patient_data = load_scan(data_folder)\n \n subject_file_name = subject + '_t1w_deface_stx.nii.gz'\n img = nib.load(os.path.join(data_folder, subject_file_name))\n img_array = img.get_fdata()\n \n segmentation_true_file_name = subject + '_LesionSmooth_stx.nii.gz'\n segmentation_true = nib.load(os.path.join(data_folder, segmentation_true_file_name))\n segmentation_true_array = segmentation_true.get_fdata()\n\n #img_array = img_array.transpose(2, 1, 0)\n print ('Image Shape', img_array.shape, 'Image max: ', np.max(img_array), 'Image min: ', np.min(img_array), 'Image mean: ', np.mean(img_array))\n\n print ('Segmentation True Shape', segmentation_true_array.shape, 'Segmentation Image max: ', np.max(segmentation_true_array), 'Segmentation Image min: ', np.min(segmentation_true_array), ' Segmentation Image mean: ', np.mean(segmentation_true_array))\n\n x, cube_labels = infinite_generator_from_one_volume(config, img_array, segmentation_true_array)\n \n if x is not None and cube_labels is not None:\n slice_set.extend(x)\n all_cube_labels.extend(cube_labels)\n\n '''\n for index_subset in fold:\n luna_subset_path = os.path.join(config.DATA_DIR, \"subset\"+str(index_subset))\n file_list = glob(os.path.join(luna_subset_path, \"*.mhd\"))\n \n for img_file in tqdm(file_list):\n \n itk_img = sitk.ReadImage(img_file) \n img_array = sitk.GetArrayFromImage(itk_img)\n img_array = img_array.transpose(2, 1, 0)\n \n x = infinite_generator_from_one_volume(config, img_array)\n if x is not None:\n slice_set.extend(x)\n '''\n return np.array(slice_set), np.array(all_cube_labels)\n\n\nprint(\">> Fold {}\".format(fold))\ncube, all_cube_labels = get_self_learning_data([fold], config)\nprint(\"cube: {} | {:.2f} ~ {:.2f}\".format(cube.shape, np.min(cube), np.max(cube)))\nprint(\"all_cube_labels: {} | {:.2f} ~ {:.2f}\".format(all_cube_labels.shape, np.min(all_cube_labels), np.max(all_cube_labels)))\n\nnp.save(os.path.join(options.save, \n \"batch_classification_MRI_labels_\"+str(config.scale)+\n \"_\"+str(config.input_rows)+\n \"x\"+str(config.input_cols)+\n \"x\"+str(config.input_deps)+\n \"_\"+str(fold)+\".npy\"), cube)\n\nnp.save(os.path.join(options.save, \n \"batch_classification_MRI_true_labels_\"+str(config.scale)+\n \"_\"+str(config.input_rows)+\n \"x\"+str(config.input_cols)+\n \"x\"+str(config.input_deps)+\n \"_\"+str(fold)+\".npy\"), all_cube_labels)","sub_path":"create_cubes/infinite_generator_3D_sukrit_MRI_brain_classification.py","file_name":"infinite_generator_3D_sukrit_MRI_brain_classification.py","file_ext":"py","file_size_in_byte":15097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483441250","text":"from django.test import TestCase\nfrom .models import Image, Profile\nfrom django.contrib.auth.models import User\nimport os\n# Create your tests here.\n\nclass TestImage(TestCase):\n def setUp(self):\n self.user = User.objects.create_user(username = \"username\", email = \"e@e.com\", password = \"password\")\n self.profile= Profile(user = self.user)\n self.img = Image(name =\"test\", caption = \"haha\", likes = 0, profile = self.profile)\n\n \n def test_instance(self):\n self.user.save()\n self.profile.save()\n self.assertTrue(isinstance(self.img, Image))\n\n def test_save_image(self):\n self.user.save()\n self.profile.save()\n self.img.save_image()\n imgs = Image.objects.all()\n self.assertTrue(len(imgs) > 0)\n\n def test_delete_image(self):\n self.user.save()\n self.profile.save()\n self.img.save_image()\n self.img.delete_image()\n images = Image.objects.all()\n self.assertTrue(len(images) == 0)\n\n def test_update_image(self):\n cwd = os.getcwd()\n self.user.save()\n self.profile.save()\n self.img.save_image()\n self.assertTrue(self.img.image == None)\n self.img = self.img.update_image(\"HI\", f'{cwd}/media/home.png')\n self.img.save_image()\n img =Image.objects.filter(caption='HI')\n self.assertTrue(len(img)>0)\n def tearDown(self):\n User.objects.all().delete()\n Profile.objects.all().delete()\n Image.objects.all().delete()\n\nclass TestProfile(TestCase):\n\n def setUp(self):\n self.user = User.objects.create_user(username = \"username\", email = \"e@e.com\", password = \"password\")\n self.profile= Profile(bio = \"new me\", user = self.user)\n # self.img = Image(name =\"test\", caption = \"haha\", likes = 0, profile = self.profile)\n \n def test_instance(self):\n self.user.save()\n self.profile.save()\n self.assertTrue(isinstance(self.profile, Profile))\n\n def test_save_profile(self):\n self.user.save()\n self.profile.save()\n profs = Profile.objects.all()\n self.assertTrue(len(profs) > 0)\n\n def test_search_users(self):\n self.user.save()\n self.profile.save()\n user = Profile.search_users(\"username\")\n self.assertTrue(len(user) > 0)\n\n def test_delete_profile(self):\n self.user.save()\n self.profile.save()\n self.profile.delete_profile()\n profs = Profile.objects.all()\n self.assertTrue(len(profs) == 0)\n\n def test_update_profile(self):\n cwd = os.getcwd()\n self.user.save()\n self.profile.save()\n self.assertTrue(self.profile.dp == None)\n self.profile.update_profile(f'{cwd}/media/home.png',\"changed\")\n profs = Profile.objects.filter(bio = \"changed\")\n self.assertTrue(len(profs) > 0)\n\n def tearDown(self):\n User.objects.all().delete()\n self.assertTrue(len(User.objects.all()) == 0)\n Profile.objects.all().delete()\n self.assertTrue(len(Profile.objects.all()) == 0)\n\n","sub_path":"people/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277255917","text":"n = int(input())\n\nst = 0\ntemp_n = n\nwhile(temp_n != 1):\n\ttemp_n /= 2\n\tst += 1\ncount_game = 0\nfor i in range(st):\n\tcount_game = count_game + 2 ** i\n#print(count_game)\nlst, com_number, new_lst, total_com = [], [], [], []\n\ncom_number = [i + 1 for i in range(n)]\n\nfor item in range(count_game):\n \tlst.append(int(input()))\npos = 0\n#print(lst)\n#print(com_number)\ntotal_com = com_number.copy()\nwhile (n != 1):\n\tn = int(n / 2)\n\tfor i in range(n):\n\t\tif lst[pos + i] == 1:\n\t\t\tnew_lst.append(total_com[2 * i])\n\t\telse:\n\t\t\tnew_lst.append(total_com[2 * i + 1])\n\ttotal_com = new_lst.copy()\n\tprint(total_com)\n\tnew_lst.clear()\n\tpos += n\nprint('Command number - ', total_com[0])\n\n","sub_path":"mun_18_3.py","file_name":"mun_18_3.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401290509","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Lead, MktgMessage, Slider\n\nclass MktgMessageAdmin(admin.ModelAdmin):\n list_display = [\"__unicode__\", \"start_date\",\"end_date\",\"active\", \"featured\"]\n class Meta:\n model = MktgMessage\n\nadmin.site.register(MktgMessage,MktgMessageAdmin)\n\nadmin.site.register(Lead)\n\nclass SliderAdmin(admin.ModelAdmin):\n list_display = [\"__unicode__\",\"order\",\"start_date\",\"end_date\",\"active\", \"featured\"]\n list_editable = [\"order\", \"end_date\"]\n class Meta:\n model = Slider\n\nadmin.site.register(Slider, SliderAdmin)\n","sub_path":"mktg/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"226018541","text":"\n\nfrom xai.brain.wordbase.nouns._junk import _JUNK\n\n#calss header\nclass _JUNKS(_JUNK, ):\n\tdef __init__(self,): \n\t\t_JUNK.__init__(self)\n\t\tself.name = \"JUNKS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"junk\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_junks.py","file_name":"_junks.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112543412","text":"import App\nimport GlobalPropertyTemplates\n# Setting up local templates.\n#################################################\nTractors = App.WeaponSystemProperty_Create(\"Tractor Beams\")\n\nTractors.SetMaxCondition(14000.000000)\nTractors.SetCritical(0)\nTractors.SetTargetable(0)\nTractors.SetPrimary(1)\nTractors.SetPosition(0.000000, -0.700000, -0.400000)\nTractors.SetPosition2D(64.000000, 80.000000)\nTractors.SetRepairComplexity(1.500000)\nTractors.SetDisabledPercentage(0.750000)\nTractors.SetRadius(0.250000)\nTractors.SetNormalPowerPerSecond(400.000000)\nTractors.SetWeaponSystemType(Tractors.WST_TRACTOR)\nTractors.SetSingleFire(1)\nTractors.SetAimedWeapon(0)\nkFiringChainString = App.TGString()\nkFiringChainString.SetString(\"\")\nTractors.SetFiringChainString(kFiringChainString)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(Tractors)\n#################################################\nAftTractor = App.TractorBeamProperty_Create(\"Tractor A\")\n\nAftTractor.SetMaxCondition(2000.000000)\nAftTractor.SetCritical(0)\nAftTractor.SetTargetable(1)\nAftTractor.SetPrimary(1)\nAftTractor.SetPosition(0.000000, -1.920000, 0.080000)\nAftTractor.SetPosition2D(59.000000, 110.000000)\nAftTractor.SetRepairComplexity(1.500000)\nAftTractor.SetDisabledPercentage(0.750000)\nAftTractor.SetRadius(0.250000)\nAftTractor.SetDumbfire(0)\nAftTractor.SetWeaponID(0)\nAftTractor.SetGroups(0)\nAftTractor.SetDamageRadiusFactor(0.300000)\nAftTractor.SetIconNum(0)\nAftTractor.SetIconPositionX(0.000000)\nAftTractor.SetIconPositionY(0.000000)\nAftTractor.SetIconAboveShip(1)\nAftTractor.SetFireSound(\"Tractor Beam\")\nAftTractor.SetMaxCharge(5.000000)\nAftTractor.SetMaxDamage(100000.000000)\nAftTractor.SetMaxDamageDistance(114.000000)\nAftTractor.SetMinFiringCharge(3.000000)\nAftTractor.SetNormalDischargeRate(1.000000)\nAftTractor.SetRechargeRate(2.000000)\nAftTractor.SetIndicatorIconNum(0)\nAftTractor.SetIndicatorIconPositionX(0.000000)\nAftTractor.SetIndicatorIconPositionY(0.000000)\nAftTractorForward = App.TGPoint3()\nAftTractorForward.SetXYZ(0.000000, -1.000000, 0.000000)\nAftTractorUp = App.TGPoint3()\nAftTractorUp.SetXYZ(0.000000, 0.000000, 1.000000)\nAftTractor.SetOrientation(AftTractorForward, AftTractorUp)\nAftTractor.SetArcWidthAngles(-0.698132, 0.698132)\nAftTractor.SetArcHeightAngles(-0.698132, 0.698132)\nAftTractor.SetTractorBeamWidth(0.300000)\nAftTractor.SetTextureStart(0)\nAftTractor.SetTextureEnd(0)\nAftTractor.SetTextureName(\"data/Textures/Tactical/TractorBeam.tga\")\nkColor = App.TGColorA()\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nAftTractor.SetOuterShellColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nAftTractor.SetInnerShellColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nAftTractor.SetOuterCoreColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nAftTractor.SetInnerCoreColor(kColor)\nAftTractor.SetNumSides(12)\nAftTractor.SetMainRadius(0.075000)\nAftTractor.SetTaperRadius(0.000000)\nAftTractor.SetCoreScale(0.450000)\nAftTractor.SetTaperRatio(0.200000)\nAftTractor.SetTaperMinLength(1.000000)\nAftTractor.SetTaperMaxLength(5.000000)\nAftTractor.SetLengthTextureTilePerUnit(0.250000)\nAftTractor.SetPerimeterTile(1.000000)\nAftTractor.SetTextureSpeed(0.200000)\nAftTractor.SetTextureName(\"data/Textures/Tactical/TractorBeam.tga\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(AftTractor)\n#################################################\nShuttleBay = App.ObjectEmitterProperty_Create(\"Shuttle Bay\")\n\nShuttleBayForward = App.TGPoint3()\nShuttleBayForward.SetXYZ(0.000000, -1.000000, 0.000000)\nShuttleBayUp = App.TGPoint3()\nShuttleBayUp.SetXYZ(0.000000, 0.000000, 1.000000)\nShuttleBayRight = App.TGPoint3()\nShuttleBayRight.SetXYZ(-1.000000, 0.000000, 0.000000)\nShuttleBay.SetOrientation(ShuttleBayForward, ShuttleBayUp, ShuttleBayRight)\nShuttleBayPosition = App.TGPoint3()\nShuttleBayPosition.SetXYZ(0.000000, -1.960000, 0.080000)\nShuttleBay.SetPosition(ShuttleBayPosition)\nShuttleBay.SetEmittedObjectType(ShuttleBay.OEP_SHUTTLE)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ShuttleBay)\n#################################################\nPeregrine = App.ShipProperty_Create(\"Peregrine\")\n\nPeregrine.SetGenus(1)\nPeregrine.SetSpecies(105)\nPeregrine.SetMass(220600.000000)\nPeregrine.SetRotationalInertia(3309000000000.000000)\nPeregrine.SetShipName(\"Peregrine\")\nPeregrine.SetModelFilename(\"data/Models/Ships/Nebula.nif\")\nPeregrine.SetDamageResolution(10.000000)\nPeregrine.SetAffiliation(0)\nPeregrine.SetStationary(0)\nPeregrine.SetAIString(\"FedAttack\")\nPeregrine.SetDeathExplosionSound(\"g_lsDeathExplosions\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(Peregrine)\n#################################################\nForwardTractor = App.TractorBeamProperty_Create(\"Tractor F\")\n\nForwardTractor.SetMaxCondition(2000.000000)\nForwardTractor.SetCritical(0)\nForwardTractor.SetTargetable(1)\nForwardTractor.SetPrimary(1)\nForwardTractor.SetPosition(0.000000, 2.200000, 0.080000)\nForwardTractor.SetPosition2D(70.000000, 110.000000)\nForwardTractor.SetRepairComplexity(1.500000)\nForwardTractor.SetDisabledPercentage(0.750000)\nForwardTractor.SetRadius(0.250000)\nForwardTractor.SetDumbfire(0)\nForwardTractor.SetWeaponID(0)\nForwardTractor.SetGroups(0)\nForwardTractor.SetDamageRadiusFactor(0.300000)\nForwardTractor.SetIconNum(0)\nForwardTractor.SetIconPositionX(0.000000)\nForwardTractor.SetIconPositionY(0.000000)\nForwardTractor.SetIconAboveShip(1)\nForwardTractor.SetFireSound(\"Tractor Beam\")\nForwardTractor.SetMaxCharge(5.000000)\nForwardTractor.SetMaxDamage(100000.000000)\nForwardTractor.SetMaxDamageDistance(114.000000)\nForwardTractor.SetMinFiringCharge(3.000000)\nForwardTractor.SetNormalDischargeRate(1.000000)\nForwardTractor.SetRechargeRate(2.000000)\nForwardTractor.SetIndicatorIconNum(0)\nForwardTractor.SetIndicatorIconPositionX(0.000000)\nForwardTractor.SetIndicatorIconPositionY(0.000000)\nForwardTractorForward = App.TGPoint3()\nForwardTractorForward.SetXYZ(0.000000, 1.000000, 0.000000)\nForwardTractorUp = App.TGPoint3()\nForwardTractorUp.SetXYZ(0.000000, 0.000000, 1.000000)\nForwardTractor.SetOrientation(ForwardTractorForward, ForwardTractorUp)\nForwardTractor.SetArcWidthAngles(-0.698132, 0.698132)\nForwardTractor.SetArcHeightAngles(-0.698132, 0.698132)\nForwardTractor.SetTractorBeamWidth(0.300000)\nForwardTractor.SetTextureStart(0)\nForwardTractor.SetTextureEnd(0)\nForwardTractor.SetTextureName(\"data/Textures/Tactical/TractorBeam.tga\")\nkColor = App.TGColorA()\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nForwardTractor.SetOuterShellColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nForwardTractor.SetInnerShellColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nForwardTractor.SetOuterCoreColor(kColor)\nkColor.SetRGBA(0.400000, 0.400000, 1.000000, 1.000000)\nForwardTractor.SetInnerCoreColor(kColor)\nForwardTractor.SetNumSides(12)\nForwardTractor.SetMainRadius(0.075000)\nForwardTractor.SetTaperRadius(0.000000)\nForwardTractor.SetCoreScale(0.450000)\nForwardTractor.SetTaperRatio(0.200000)\nForwardTractor.SetTaperMinLength(1.000000)\nForwardTractor.SetTaperMaxLength(5.000000)\nForwardTractor.SetLengthTextureTilePerUnit(0.250000)\nForwardTractor.SetPerimeterTile(1.000000)\nForwardTractor.SetTextureSpeed(0.200000)\nForwardTractor.SetTextureName(\"data/Textures/Tactical/TractorBeam.tga\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ForwardTractor)\n#################################################\nViewscreenForward = App.PositionOrientationProperty_Create(\"ViewscreenForward\")\n\nViewscreenForwardForward = App.TGPoint3()\nViewscreenForwardForward.SetXYZ(0.000000, 1.000000, 0.000000)\nViewscreenForwardUp = App.TGPoint3()\nViewscreenForwardUp.SetXYZ(0.000000, 0.000000, 1.000000)\nViewscreenForwardRight = App.TGPoint3()\nViewscreenForwardRight.SetXYZ(1.000000, 0.000000, 0.000000)\nViewscreenForward.SetOrientation(ViewscreenForwardForward, ViewscreenForwardUp, ViewscreenForwardRight)\nViewscreenForwardPosition = App.TGPoint3()\nViewscreenForwardPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nViewscreenForward.SetPosition(ViewscreenForwardPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenForward)\n#################################################\nViewscreenBack = App.PositionOrientationProperty_Create(\"ViewscreenBack\")\n\nViewscreenBackForward = App.TGPoint3()\nViewscreenBackForward.SetXYZ(0.000000, -1.000000, 0.000000)\nViewscreenBackUp = App.TGPoint3()\nViewscreenBackUp.SetXYZ(0.000000, 0.000000, 1.000000)\nViewscreenBackRight = App.TGPoint3()\nViewscreenBackRight.SetXYZ(-1.000000, 0.000000, 0.000000)\nViewscreenBack.SetOrientation(ViewscreenBackForward, ViewscreenBackUp, ViewscreenBackRight)\nViewscreenBackPosition = App.TGPoint3()\nViewscreenBackPosition.SetXYZ(0.000000, -2.000000, 0.100000)\nViewscreenBack.SetPosition(ViewscreenBackPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenBack)\n#################################################\nViewscreenLeft = App.PositionOrientationProperty_Create(\"ViewscreenLeft\")\n\nViewscreenLeftForward = App.TGPoint3()\nViewscreenLeftForward.SetXYZ(-1.000000, 0.000000, 0.000000)\nViewscreenLeftUp = App.TGPoint3()\nViewscreenLeftUp.SetXYZ(0.000000, 0.000000, 1.000000)\nViewscreenLeftRight = App.TGPoint3()\nViewscreenLeftRight.SetXYZ(0.000000, 1.000000, 0.000000)\nViewscreenLeft.SetOrientation(ViewscreenLeftForward, ViewscreenLeftUp, ViewscreenLeftRight)\nViewscreenLeftPosition = App.TGPoint3()\nViewscreenLeftPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nViewscreenLeft.SetPosition(ViewscreenLeftPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenLeft)\n#################################################\nViewscreenRight = App.PositionOrientationProperty_Create(\"ViewscreenRight\")\n\nViewscreenRightForward = App.TGPoint3()\nViewscreenRightForward.SetXYZ(1.000000, 0.000000, 0.000000)\nViewscreenRightUp = App.TGPoint3()\nViewscreenRightUp.SetXYZ(0.000000, 0.000000, 1.000000)\nViewscreenRightRight = App.TGPoint3()\nViewscreenRightRight.SetXYZ(0.000000, -1.000000, 0.000000)\nViewscreenRight.SetOrientation(ViewscreenRightForward, ViewscreenRightUp, ViewscreenRightRight)\nViewscreenRightPosition = App.TGPoint3()\nViewscreenRightPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nViewscreenRight.SetPosition(ViewscreenRightPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenRight)\n#################################################\nViewscreenUp = App.PositionOrientationProperty_Create(\"ViewscreenUp\")\n\nViewscreenUpForward = App.TGPoint3()\nViewscreenUpForward.SetXYZ(0.000000, 0.000000, 1.000000)\nViewscreenUpUp = App.TGPoint3()\nViewscreenUpUp.SetXYZ(0.000000, -1.000000, 0.000000)\nViewscreenUpRight = App.TGPoint3()\nViewscreenUpRight.SetXYZ(1.000000, 0.000000, 0.000000)\nViewscreenUp.SetOrientation(ViewscreenUpForward, ViewscreenUpUp, ViewscreenUpRight)\nViewscreenUpPosition = App.TGPoint3()\nViewscreenUpPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nViewscreenUp.SetPosition(ViewscreenUpPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenUp)\n#################################################\nViewscreenDown = App.PositionOrientationProperty_Create(\"ViewscreenDown\")\n\nViewscreenDownForward = App.TGPoint3()\nViewscreenDownForward.SetXYZ(0.000000, 0.000000, -1.000000)\nViewscreenDownUp = App.TGPoint3()\nViewscreenDownUp.SetXYZ(0.000000, 1.000000, 0.000000)\nViewscreenDownRight = App.TGPoint3()\nViewscreenDownRight.SetXYZ(1.000000, 0.000000, 0.000000)\nViewscreenDown.SetOrientation(ViewscreenDownForward, ViewscreenDownUp, ViewscreenDownRight)\nViewscreenDownPosition = App.TGPoint3()\nViewscreenDownPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nViewscreenDown.SetPosition(ViewscreenDownPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ViewscreenDown)\n#################################################\nFirstPersonCamera = App.PositionOrientationProperty_Create(\"FirstPersonCamera\")\n\nFirstPersonCameraForward = App.TGPoint3()\nFirstPersonCameraForward.SetXYZ(1.000000, 0.000000, 0.000000)\nFirstPersonCameraUp = App.TGPoint3()\nFirstPersonCameraUp.SetXYZ(0.000000, 0.000000, 1.000000)\nFirstPersonCameraRight = App.TGPoint3()\nFirstPersonCameraRight.SetXYZ(0.000000, -1.000000, 0.000000)\nFirstPersonCamera.SetOrientation(FirstPersonCameraForward, FirstPersonCameraUp, FirstPersonCameraRight)\nFirstPersonCameraPosition = App.TGPoint3()\nFirstPersonCameraPosition.SetXYZ(0.000000, 2.200000, 0.100000)\nFirstPersonCamera.SetPosition(FirstPersonCameraPosition)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(FirstPersonCamera)\n#################################################\nDorsalPhaser = App.PhaserProperty_Create(\"Phaser D\")\n\nDorsalPhaser.SetMaxCondition(1000.000000)\nDorsalPhaser.SetCritical(0)\nDorsalPhaser.SetTargetable(1)\nDorsalPhaser.SetPrimary(1)\nDorsalPhaser.SetPosition(0.000000, 0.840000, 0.550000)\nDorsalPhaser.SetPosition2D(18.000000, 30.000000)\nDorsalPhaser.SetRepairComplexity(3.000000)\nDorsalPhaser.SetDisabledPercentage(0.750000)\nDorsalPhaser.SetRadius(0.250000)\nDorsalPhaser.SetDumbfire(0)\nDorsalPhaser.SetWeaponID(5)\nDorsalPhaser.SetGroups(0)\nDorsalPhaser.SetDamageRadiusFactor(0.200000)\nDorsalPhaser.SetIconNum(364)\nDorsalPhaser.SetIconPositionX(63.000000)\nDorsalPhaser.SetIconPositionY(25.000000)\nDorsalPhaser.SetIconAboveShip(1)\nDorsalPhaser.SetFireSound(\"Type X Phaser\")\nDorsalPhaser.SetMaxCharge(1.500000)\nDorsalPhaser.SetMaxDamage(1800.000000)\nDorsalPhaser.SetMaxDamageDistance(100.000000)\nDorsalPhaser.SetMinFiringCharge(1.000000)\nDorsalPhaser.SetNormalDischargeRate(1.000000)\nDorsalPhaser.SetRechargeRate(0.750000)\nDorsalPhaser.SetIndicatorIconNum(510)\nDorsalPhaser.SetIndicatorIconPositionX(57.000000)\nDorsalPhaser.SetIndicatorIconPositionY(20.000000)\nDorsalPhaserForward = App.TGPoint3()\nDorsalPhaserForward.SetXYZ(0.000000, 1.000000, 0.000000)\nDorsalPhaserUp = App.TGPoint3()\nDorsalPhaserUp.SetXYZ(0.000000, 0.000000, 1.000000)\nDorsalPhaser.SetOrientation(DorsalPhaserForward, DorsalPhaserUp)\nDorsalPhaser.SetWidth(1.670000)\nDorsalPhaser.SetLength(1.260000)\nDorsalPhaser.SetArcWidthAngles(-2.530728, 2.530728)\nDorsalPhaser.SetArcHeightAngles(-0.052360, 1.570796)\nDorsalPhaser.SetPhaserTextureStart(0)\nDorsalPhaser.SetPhaserTextureEnd(7)\nDorsalPhaser.SetPhaserWidth(0.300000)\nkColor = App.TGColorA()\nkColor.SetRGBA(0.639216, 0.000000, 0.000000, 1.000000)\nDorsalPhaser.SetOuterShellColor(kColor)\nkColor.SetRGBA(0.992157, 0.192157, 0.054902, 1.000000)\nDorsalPhaser.SetInnerShellColor(kColor)\nkColor.SetRGBA(0.592157, 0.592157, 0.000000, 1.000000)\nDorsalPhaser.SetOuterCoreColor(kColor)\nkColor.SetRGBA(0.803922, 0.803922, 0.000000, 1.000000)\nDorsalPhaser.SetInnerCoreColor(kColor)\nDorsalPhaser.SetNumSides(6)\nDorsalPhaser.SetMainRadius(0.070000)\nDorsalPhaser.SetTaperRadius(0.020000)\nDorsalPhaser.SetCoreScale(0.300000)\nDorsalPhaser.SetTaperRatio(0.250000)\nDorsalPhaser.SetTaperMinLength(30.000000)\nDorsalPhaser.SetTaperMaxLength(30.000000)\nDorsalPhaser.SetLengthTextureTilePerUnit(0.050000)\nDorsalPhaser.SetPerimeterTile(1.000000)\nDorsalPhaser.SetTextureSpeed(2.000000)\nDorsalPhaser.SetTextureName(\"data/Dur_Phaser.tga\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(DorsalPhaser)\n#################################################\nPhaserV1 = App.PhaserProperty_Create(\"Phaser V1\")\n\nPhaserV1.SetMaxCondition(1000.000000)\nPhaserV1.SetCritical(0)\nPhaserV1.SetTargetable(1)\nPhaserV1.SetPrimary(1)\nPhaserV1.SetPosition(0.000000, 0.830000, 0.240000)\nPhaserV1.SetPosition2D(18.000000, 40.000000)\nPhaserV1.SetRepairComplexity(3.000000)\nPhaserV1.SetDisabledPercentage(0.750000)\nPhaserV1.SetRadius(0.250000)\nPhaserV1.SetDumbfire(0)\nPhaserV1.SetWeaponID(1)\nPhaserV1.SetGroups(0)\nPhaserV1.SetDamageRadiusFactor(0.200000)\nPhaserV1.SetIconNum(364)\nPhaserV1.SetIconPositionX(63.000000)\nPhaserV1.SetIconPositionY(40.000000)\nPhaserV1.SetIconAboveShip(0)\nPhaserV1.SetFireSound(\"Type X Phaser\")\nPhaserV1.SetMaxCharge(1.500000)\nPhaserV1.SetMaxDamage(1800.000000)\nPhaserV1.SetMaxDamageDistance(100.000000)\nPhaserV1.SetMinFiringCharge(1.000000)\nPhaserV1.SetNormalDischargeRate(1.000000)\nPhaserV1.SetRechargeRate(0.750000)\nPhaserV1.SetIndicatorIconNum(510)\nPhaserV1.SetIndicatorIconPositionX(57.000000)\nPhaserV1.SetIndicatorIconPositionY(35.000000)\nPhaserV1Forward = App.TGPoint3()\nPhaserV1Forward.SetXYZ(0.000000, 0.999950, -0.009999)\nPhaserV1Up = App.TGPoint3()\nPhaserV1Up.SetXYZ(0.000000, 0.000000, 1.000000)\nPhaserV1.SetOrientation(PhaserV1Forward, PhaserV1Up)\nPhaserV1.SetWidth(1.440000)\nPhaserV1.SetLength(1.070000)\nPhaserV1.SetArcWidthAngles(-2.356195, 2.356195)\nPhaserV1.SetArcHeightAngles(-0.436332, 0.052360)\nPhaserV1.SetPhaserTextureStart(0)\nPhaserV1.SetPhaserTextureEnd(7)\nPhaserV1.SetPhaserWidth(0.300000)\nkColor = App.TGColorA()\nkColor.SetRGBA(0.639216, 0.000000, 0.000000, 1.000000)\nPhaserV1.SetOuterShellColor(kColor)\nkColor.SetRGBA(0.992157, 0.192157, 0.054902, 1.000000)\nPhaserV1.SetInnerShellColor(kColor)\nkColor.SetRGBA(0.592157, 0.592157, 0.000000, 1.000000)\nPhaserV1.SetOuterCoreColor(kColor)\nkColor.SetRGBA(0.803922, 0.803922, 0.000000, 1.000000)\nPhaserV1.SetInnerCoreColor(kColor)\nPhaserV1.SetNumSides(6)\nPhaserV1.SetMainRadius(0.070000)\nPhaserV1.SetTaperRadius(0.020000)\nPhaserV1.SetCoreScale(0.300000)\nPhaserV1.SetTaperRatio(0.250000)\nPhaserV1.SetTaperMinLength(30.000000)\nPhaserV1.SetTaperMaxLength(30.000000)\nPhaserV1.SetLengthTextureTilePerUnit(0.050000)\nPhaserV1.SetPerimeterTile(1.000000)\nPhaserV1.SetTextureSpeed(2.000000)\nPhaserV1.SetTextureName(\"data/Dur_Phaser.tga\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(PhaserV1)\n#################################################\nHull = App.HullProperty_Create(\"Hull\")\n\nHull.SetMaxCondition(14000.000000)\nHull.SetCritical(1)\nHull.SetTargetable(0)\nHull.SetPrimary(1)\nHull.SetPosition(0.000000, 0.000000, 0.000000)\nHull.SetPosition2D(64.000000, 40.000000)\nHull.SetRepairComplexity(1.000000)\nHull.SetDisabledPercentage(0.000000)\nHull.SetRadius(3.000000)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(Hull)\n#################################################\nMedicalPod = App.HullProperty_Create(\"Medical Pod\")\n\nMedicalPod.SetMaxCondition(4000.000000)\nMedicalPod.SetCritical(0)\nMedicalPod.SetTargetable(1)\nMedicalPod.SetPrimary(0)\nMedicalPod.SetPosition(0.000000, -1.370000, 0.800000)\nMedicalPod.SetPosition2D(64.000000, 60.000000)\nMedicalPod.SetRepairComplexity(2.000000)\nMedicalPod.SetDisabledPercentage(0.000000)\nMedicalPod.SetRadius(0.350000)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(MedicalPod)\n#################################################\nSensorArray = App.SensorProperty_Create(\"Sensor Array\")\n\nSensorArray.SetMaxCondition(5000.000000)\nSensorArray.SetCritical(0)\nSensorArray.SetTargetable(1)\nSensorArray.SetPrimary(1)\nSensorArray.SetPosition(0.000000, 0.200000, -0.100000)\nSensorArray.SetPosition2D(64.000000, 10.000000)\nSensorArray.SetRepairComplexity(3.000000)\nSensorArray.SetDisabledPercentage(0.500000)\nSensorArray.SetRadius(0.300000)\nSensorArray.SetNormalPowerPerSecond(800.000000)\nSensorArray.SetBaseSensorRange(2000.000000)\nSensorArray.SetMaxProbes(50)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(SensorArray)\n#################################################\nShieldGenerator = App.ShieldProperty_Create(\"Shield Generator\")\n\nShieldGenerator.SetMaxCondition(9000.000000)\nShieldGenerator.SetCritical(0)\nShieldGenerator.SetTargetable(1)\nShieldGenerator.SetPrimary(1)\nShieldGenerator.SetPosition(0.000000, -0.650000, -0.275000)\nShieldGenerator.SetPosition2D(64.000000, 40.000000)\nShieldGenerator.SetRepairComplexity(3.000000)\nShieldGenerator.SetDisabledPercentage(0.500000)\nShieldGenerator.SetRadius(0.100000)\nShieldGenerator.SetNormalPowerPerSecond(1200.000000)\nShieldGeneratorShieldGlowColor = App.TGColorA()\nShieldGeneratorShieldGlowColor.SetRGBA(0.203922, 0.631373, 1.000000, 0.466667)\nShieldGenerator.SetShieldGlowColor(ShieldGeneratorShieldGlowColor)\nShieldGenerator.SetShieldGlowDecay(1.000000)\nShieldGenerator.SetMaxShields(ShieldGenerator.FRONT_SHIELDS, 7538.459961)\nShieldGenerator.SetMaxShields(ShieldGenerator.REAR_SHIELDS, 5384.620117)\nShieldGenerator.SetMaxShields(ShieldGenerator.TOP_SHIELDS, 7538.459961)\nShieldGenerator.SetMaxShields(ShieldGenerator.BOTTOM_SHIELDS, 7538.459961)\nShieldGenerator.SetMaxShields(ShieldGenerator.LEFT_SHIELDS, 5384.620117)\nShieldGenerator.SetMaxShields(ShieldGenerator.RIGHT_SHIELDS, 5384.620117)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.FRONT_SHIELDS, 12.564100)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.REAR_SHIELDS, 8.974360)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.TOP_SHIELDS, 12.564100)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.BOTTOM_SHIELDS, 12.564100)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.LEFT_SHIELDS, 8.974360)\nShieldGenerator.SetShieldChargePerSecond(ShieldGenerator.RIGHT_SHIELDS, 8.974360)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ShieldGenerator)\n#################################################\nWarpCore = App.PowerProperty_Create(\"Warp Core\")\n\nWarpCore.SetMaxCondition(7000.000000)\nWarpCore.SetCritical(1)\nWarpCore.SetTargetable(1)\nWarpCore.SetPrimary(1)\nWarpCore.SetPosition(0.000000, -0.280000, -0.200000)\nWarpCore.SetPosition2D(64.000000, 85.000000)\nWarpCore.SetRepairComplexity(3.000000)\nWarpCore.SetDisabledPercentage(0.300000)\nWarpCore.SetRadius(0.200000)\nWarpCore.SetMainBatteryLimit(600000.000000)\nWarpCore.SetBackupBatteryLimit(200000.000000)\nWarpCore.SetMainConduitCapacity(6000.000000)\nWarpCore.SetBackupConduitCapacity(750.000000)\nWarpCore.SetPowerOutput(5000.000000)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(WarpCore)\n#################################################\nImpulseEngines = App.ImpulseEngineProperty_Create(\"Impulse Engines\")\n\nImpulseEngines.SetMaxCondition(14000.000000)\nImpulseEngines.SetCritical(0)\nImpulseEngines.SetTargetable(0)\nImpulseEngines.SetPrimary(1)\nImpulseEngines.SetPosition(0.000000, 0.000000, 0.000000)\nImpulseEngines.SetPosition2D(60.000000, 60.000000)\nImpulseEngines.SetRepairComplexity(3.000000)\nImpulseEngines.SetDisabledPercentage(0.500000)\nImpulseEngines.SetRadius(0.010000)\nImpulseEngines.SetNormalPowerPerSecond(1200.000000)\nImpulseEngines.SetMaxAccel(10.170746)\nImpulseEngines.SetMaxAngularAccel(0.500000)\nImpulseEngines.SetMaxAngularVelocity(0.500000)\nImpulseEngines.SetMaxSpeed(10.170746)\nImpulseEngines.SetEngineSound(\"Federation Engines\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(ImpulseEngines)\n#################################################\nPhaserArrays = App.WeaponSystemProperty_Create(\"Phaser Arrays\")\n\nPhaserArrays.SetMaxCondition(14000.000000)\nPhaserArrays.SetCritical(0)\nPhaserArrays.SetTargetable(0)\nPhaserArrays.SetPrimary(1)\nPhaserArrays.SetPosition(0.000000, 0.000000, 0.000000)\nPhaserArrays.SetPosition2D(64.000000, 94.000000)\nPhaserArrays.SetRepairComplexity(3.000000)\nPhaserArrays.SetDisabledPercentage(0.750000)\nPhaserArrays.SetRadius(0.010000)\nPhaserArrays.SetNormalPowerPerSecond(150.000000)\nPhaserArrays.SetWeaponSystemType(PhaserArrays.WST_PHASER)\nPhaserArrays.SetSingleFire(0)\nPhaserArrays.SetAimedWeapon(0)\nkFiringChainString = App.TGString()\nkFiringChainString.SetString(\"\")\nPhaserArrays.SetFiringChainString(kFiringChainString)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(PhaserArrays)\n#################################################\nDamageControl = App.RepairSubsystemProperty_Create(\"Repair\")\n\nDamageControl.SetMaxCondition(14000.000000)\nDamageControl.SetCritical(0)\nDamageControl.SetTargetable(0)\nDamageControl.SetPrimary(1)\nDamageControl.SetPosition(0.000000, 0.000000, 0.000000)\nDamageControl.SetPosition2D(64.000000, 40.000000)\nDamageControl.SetRepairComplexity(2.000000)\nDamageControl.SetDisabledPercentage(0.100000)\nDamageControl.SetRadius(0.050000)\nDamageControl.SetNormalPowerPerSecond(1.000000)\nDamageControl.SetMaxRepairPoints(80.000000)\nDamageControl.SetNumRepairTeams(4)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(DamageControl)\n#################################################\nPortWarp = App.EngineProperty_Create(\"Port Warp\")\n\nPortWarp.SetMaxCondition(7000.000000)\nPortWarp.SetCritical(0)\nPortWarp.SetTargetable(1)\nPortWarp.SetPrimary(1)\nPortWarp.SetPosition(-1.250000, -0.600000, -0.250000)\nPortWarp.SetPosition2D(27.000000, 95.000000)\nPortWarp.SetRepairComplexity(3.000000)\nPortWarp.SetDisabledPercentage(0.500000)\nPortWarp.SetRadius(1.100000)\nPortWarp.SetEngineType(PortWarp.EP_WARP)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(PortWarp)\n#################################################\nStarWarp = App.EngineProperty_Create(\"Star Warp\")\n\nStarWarp.SetMaxCondition(7000.000000)\nStarWarp.SetCritical(0)\nStarWarp.SetTargetable(1)\nStarWarp.SetPrimary(1)\nStarWarp.SetPosition(1.250000, -0.600000, -0.250000)\nStarWarp.SetPosition2D(102.000000, 95.000000)\nStarWarp.SetRepairComplexity(3.000000)\nStarWarp.SetDisabledPercentage(0.500000)\nStarWarp.SetRadius(1.100000)\nStarWarp.SetEngineType(StarWarp.EP_WARP)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(StarWarp)\n#################################################\nWarpEngines = App.WarpEngineProperty_Create(\"Warp Engines\")\n\nWarpEngines.SetMaxCondition(14000.000000)\nWarpEngines.SetCritical(0)\nWarpEngines.SetTargetable(0)\nWarpEngines.SetPrimary(1)\nWarpEngines.SetPosition(0.000000, 0.000000, 0.000000)\nWarpEngines.SetPosition2D(69.000000, 60.000000)\nWarpEngines.SetRepairComplexity(3.000000)\nWarpEngines.SetDisabledPercentage(0.500000)\nWarpEngines.SetRadius(0.010000)\nWarpEngines.SetNormalPowerPerSecond(0.000000)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(WarpEngines)\n#################################################\nPortImpulse = App.EngineProperty_Create(\"Port Impulse\")\n\nPortImpulse.SetMaxCondition(4000.000000)\nPortImpulse.SetCritical(0)\nPortImpulse.SetTargetable(1)\nPortImpulse.SetPrimary(1)\nPortImpulse.SetPosition(-1.200000, -0.750000, 0.390000)\nPortImpulse.SetPosition2D(44.000000, 60.000000)\nPortImpulse.SetRepairComplexity(3.000000)\nPortImpulse.SetDisabledPercentage(0.500000)\nPortImpulse.SetRadius(0.150000)\nPortImpulse.SetEngineType(PortImpulse.EP_IMPULSE)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(PortImpulse)\n#################################################\nStarImpulse = App.EngineProperty_Create(\"Star Impulse\")\n\nStarImpulse.SetMaxCondition(4000.000000)\nStarImpulse.SetCritical(0)\nStarImpulse.SetTargetable(1)\nStarImpulse.SetPrimary(1)\nStarImpulse.SetPosition(1.200000, -0.750000, 0.390000)\nStarImpulse.SetPosition2D(85.000000, 60.000000)\nStarImpulse.SetRepairComplexity(3.000000)\nStarImpulse.SetDisabledPercentage(0.500000)\nStarImpulse.SetRadius(0.150000)\nStarImpulse.SetEngineType(StarImpulse.EP_IMPULSE)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(StarImpulse)\n#################################################\nBridge = App.HullProperty_Create(\"Bridge\")\n\nBridge.SetMaxCondition(6000.000000)\nBridge.SetCritical(1)\nBridge.SetTargetable(1)\nBridge.SetPrimary(0)\nBridge.SetPosition(0.000000, 0.590000, 0.722000)\nBridge.SetPosition2D(64.000000, 25.000000)\nBridge.SetRepairComplexity(1.000000)\nBridge.SetDisabledPercentage(0.250000)\nBridge.SetRadius(0.100000)\nApp.g_kModelPropertyManager.RegisterLocalTemplate(Bridge)\n#################################################\nPhaserV2 = App.PhaserProperty_Create(\"Phaser V2\")\n\nPhaserV2.SetMaxCondition(1000.000000)\nPhaserV2.SetCritical(0)\nPhaserV2.SetTargetable(1)\nPhaserV2.SetPrimary(1)\nPhaserV2.SetPosition(0.000000, -0.075000, 0.125000)\nPhaserV2.SetPosition2D(66.000000, 88.000000)\nPhaserV2.SetRepairComplexity(3.000000)\nPhaserV2.SetDisabledPercentage(0.750000)\nPhaserV2.SetRadius(0.250000)\nPhaserV2.SetDumbfire(0)\nPhaserV2.SetWeaponID(0)\nPhaserV2.SetGroups(0)\nPhaserV2.SetDamageRadiusFactor(0.200000)\nPhaserV2.SetIconNum(363)\nPhaserV2.SetIconPositionX(63)\nPhaserV2.SetIconPositionY(65)\nPhaserV2.SetIconAboveShip(1)\nPhaserV2.SetFireSound(\"Type X Phaser\")\nPhaserV2.SetMaxCharge(1.500000)\nPhaserV2.SetMaxDamage(1800.000000)\nPhaserV2.SetMaxDamageDistance(100.000000)\nPhaserV2.SetMinFiringCharge(1.000000)\nPhaserV2.SetNormalDischargeRate(1.000000)\nPhaserV2.SetRechargeRate(0.500000)\nPhaserV2.SetIndicatorIconNum(511)\nPhaserV2.SetIndicatorIconPositionX(57)\nPhaserV2.SetIndicatorIconPositionY(60)\nPhaserV2Forward = App.TGPoint3()\nPhaserV2Forward.SetXYZ(0.000000, 0.000000, -1.000000)\nPhaserV2Up = App.TGPoint3()\nPhaserV2Up.SetXYZ(0.000000, -1.000000, 0.000000)\nPhaserV2.SetOrientation(PhaserV2Forward, PhaserV2Up)\nPhaserV2.SetWidth(0.560000)\nPhaserV2.SetLength(0.450000)\nPhaserV2.SetArcWidthAngles(-0.436332, 0.436332)\nPhaserV2.SetArcHeightAngles(-1.134464, 1.134464)\nPhaserV2.SetPhaserTextureStart(0)\nPhaserV2.SetPhaserTextureEnd(7)\nPhaserV2.SetPhaserWidth(0.300000)\nkColor = App.TGColorA()\nkColor.SetRGBA(0.639216, 0.000000, 0.000000, 1.000000)\nPhaserV2.SetOuterShellColor(kColor)\nkColor.SetRGBA(0.992157, 0.192157, 0.054902, 1.000000)\nPhaserV2.SetInnerShellColor(kColor)\nkColor.SetRGBA(0.592157, 0.592157, 0.000000, 1.000000)\nPhaserV2.SetOuterCoreColor(kColor)\nkColor.SetRGBA(0.803922, 0.803922, 0.000000, 1.000000)\nPhaserV2.SetInnerCoreColor(kColor)\nPhaserV2.SetNumSides(6)\nPhaserV2.SetMainRadius(0.070000)\nPhaserV2.SetTaperRadius(0.020000)\nPhaserV2.SetCoreScale(0.300000)\nPhaserV2.SetTaperRatio(0.250000)\nPhaserV2.SetTaperMinLength(30.000000)\nPhaserV2.SetTaperMaxLength(30.000000)\nPhaserV2.SetLengthTextureTilePerUnit(0.050000)\nPhaserV2.SetPerimeterTile(1.000000)\nPhaserV2.SetTextureSpeed(2.000000)\nPhaserV2.SetTextureName(\"data/Dur_Phaser.tga\")\nApp.g_kModelPropertyManager.RegisterLocalTemplate(PhaserV2)\n#################################################\n# Property load function.\ndef LoadPropertySet(pObj):\n\t\"Sets up the object's properties.\"\n\tprop = App.g_kModelPropertyManager.FindByName(\"Hull\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Bridge\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Warp Core\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Shield Generator\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Sensor Array\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Medical Pod\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Impulse Engines\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Port Impulse\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Star Impulse\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Warp Engines\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Port Warp\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Star Warp\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Phaser Arrays\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Phaser D1\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Phaser V1\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Phaser V2\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Repair\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Tractor Beams\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Tractor F\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Tractor A\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Shuttle Bay\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"Peregrine\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenForward\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenBack\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenLeft\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenRight\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenUp\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"ViewscreenDown\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n\tprop = App.g_kModelPropertyManager.FindByName(\"FirstPersonCamera\", App.TGModelPropertyManager.LOCAL_TEMPLATES)\n\tif (prop != None):\n\t\tpObj.AddToSet(\"Scene Root\", prop)\n","sub_path":"scripts/ships/Hardpoints/E6M2NebulaVar4.py","file_name":"E6M2NebulaVar4.py","file_ext":"py","file_size_in_byte":34078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411520961","text":"import time\nimport mpi4py\nmpi4py.rc.initialize = False\nmpi4py.rc.finalize = False\nfrom mpi4py import MPI\nimport pandas\nimport sys\nsys.path.append(\"../../python_utilities\")\nfrom NN_architecture_MPI import *\nfrom NN_architecture_MPI_recycle import *\n\nMPI.Init()\n\nmpi_comm_size = MPI.COMM_WORLD.Get_size()\nmpi_rank = MPI.COMM_WORLD.Get_rank()\n\nif MPI.Is_initialized() and 0==mpi_rank :\n print(\"MPI is initialized with \", mpi_comm_size, \" processes\", flush=True)\n\ndf_array = []\n\nif 0 == mpi_rank:\n print(\"MPI rank: \", mpi_rank, \" - starting importing data\", flush=True)\n df = pandas.read_csv('../../data/forest_fires.txt')\n print(\"MPI rank: \", mpi_rank, \" - finished importing data\", flush = True)\n\n #### Convert a a pandas dataframe into a numpy array\n df_array = df.values \n\n for row in range(df_array.shape[0]):\n if df_array[row,2]=='jan':\n df_array[row,2] = 1\n elif df_array[row,2]=='feb':\n df_array[row,2] = 2\n elif df_array[row,2]=='mar':\n df_array[row,2] = 3\n elif df_array[row,2]=='apr':\n df_array[row,2] = 4\n elif df_array[row,2]=='may':\n df_array[row,2] = 5\n elif df_array[row,2]=='jun':\n df_array[row,2] = 6\n elif df_array[row,2]=='jul':\n df_array[row,2] = 7\n elif df_array[row,2]=='aug':\n df_array[row,2] = 8\n elif df_array[row,2]=='sep':\n df_array[row,2] = 9\n elif df_array[row,2]=='oct':\n df_array[row,2] = 10\n elif df_array[row,2]=='nov':\n df_array[row,2] = 11\n elif df_array[row,2]=='dec':\n df_array[row,2] = 12\n if df_array[row,3]=='mon':\n df_array[row,3] = 1\n elif df_array[row,3]=='tue':\n df_array[row,3] = 2\n elif df_array[row,3]=='wed':\n df_array[row,3] = 3\n elif df_array[row,3]=='thu':\n df_array[row,3] = 4\n elif df_array[row,3]=='fri':\n df_array[row,3] = 5\n elif df_array[row,3]=='sat':\n df_array[row,3] = 6\n elif df_array[row,3]=='sun':\n df_array[row,3] = 7\n df_array[row,12] = np.log(1+df_array[row,12])\n\nif 0 == mpi_rank:\n print(\"broadcast of data started\", flush=True)\n\ndf_array = MPI.COMM_WORLD.bcast(df_array, root=0) \n\nif 0 == mpi_rank:\n print(\"broadcast of data completed\", flush=True)\n\nX_sample = df_array[:,:-1]\ny_vals = df_array[:,-1]\ny_vals = np.reshape(y_vals, (len(y_vals),1))\n\nscore_threshold = 0.9\nmax_num_layers = 5\nmodel_evals = 1\n\n\"\"\"\nif 0 == mpi_rank:\n print(\"Model optimization without recycling\")\n\nmodel = model_adaptivity_MPI(X_sample, y_vals, score_threshold, model_evals, max_num_layers, early_stopping=True, model_type=\"regression\", probability_model=False, adjust_eval = False, verbose=False)\n\nif 0 == mpi_rank:\n print(\"Specifics of the best model selected without recycling\")\n model.summary()\n\nif 0 == mpi_rank:\n print(\"##################################################################\", flush=True)\n print(\"##################################################################\", flush=True)\n print(\"##################################################################\", flush=True)\n\"\"\"\n\nif 0 == mpi_rank:\n print(\"Model optimization with recycling\", flush=True)\n\nstart_adaptive = time.time()\n\nmodel_recycle = model_adaptivity_MPI_recycle(X_sample, y_vals, score_threshold, model_evals, max_num_layers, early_stopping=True, model_type=\"regression\", probability_model=False, coeff_det_type=2, verbose=False)\n\nend_adaptive = time.time()\n\nMPI.COMM_WORLD.Barrier()\n\ntime_adaptive_local = end_adaptive - start_adaptive\ntime_adaptive_global = 0.0\ntime_adaptive_global = MPI.COMM_WORLD.allreduce(time_adaptive_local, op=MPI.MAX)\n\nif 0 == mpi_rank:\n print(\"Specifics of the best model selected with recycling\", flush=True)\n model_recycle.summary()\n\nif 0 == mpi_rank:\n print(\"##################################################################\", flush=True)\n\nif 0 == mpi_rank:\n print(\"Time to perform adaptive model selection: \", time_adaptive_global, \" (s)\", flush=True)\n\n\nMPI.Finalize()\n\n","sub_path":"MC_NN_Massi/keras_exercises/MPI_experiments/forest_fires_MPI.py","file_name":"forest_fires_MPI.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79816509","text":"import csv\n\ntorso_start_arr = ['forward', 'backward', 'neutral']\ntorso_start_deg_arr = ['small', 'medium', 'large']\ntorso_end_arr = ['forward', 'backward', 'neutral']\ntorso_end_deg_arr = ['small', 'medium', 'large']\ntorso_speed_arr = ['slow', 'medium', 'fast']\nleft_arm_start_arr = ['forward', 'sides', 'high', 'out']\nleft_arm_end_arr = ['forward', 'sides', 'high', 'out']\nleft_arm_speed_arr = ['slow', 'medium', 'fast']\nright_arm_start_arr = ['forward', 'sides', 'high', 'out']\nright_arm_end_arr = ['forward', 'sides', 'high', 'out']\nright_arm_speed_arr = ['slow', 'medium', 'fast']\n\nwith open('torso_arm_dof.csv', 'w', newline='') as csvfile:\n fieldnames = ['torso_start', 'torso_start_deg', 'torso_end', 'torso_end_deg', 'torso_speed', 'left_arm_start', 'left_arm_end', 'left_arm_speed', 'right_arm_start', 'right_arm_end', 'right_arm_speed']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n options = 0\n for torso_start in torso_start_arr:\n for torso_start_deg in torso_start_deg_arr:\n for torso_end in torso_end_arr:\n for torso_end_deg in torso_end_deg_arr:\n for torso_speed in torso_speed_arr:\n for left_arm_start in left_arm_start_arr:\n for left_arm_end in left_arm_end_arr:\n for left_arm_speed in left_arm_speed_arr:\n for right_arm_start in right_arm_start_arr:\n for right_arm_end in right_arm_end_arr:\n for right_arm_speed in right_arm_speed_arr:\n\n add_flag = True\n\n #Torso\n\n #Degree does not matter for neutral torso\n if (torso_start == 'neutral') and (not torso_start_deg == 'small'):\n add_flag = False\n if (torso_end == 'neutral') and (not torso_end_deg == 'small'):\n add_flag = False\n\n #Speed does not matter if start and end position are the same\n if (torso_start == torso_end) and (torso_start_deg == torso_end_deg) and (not torso_end_deg == 'small'):\n add_flag = False\n\n #The torso cannot start and end in the same direction (i.e. forward small to forward large)\n if (torso_start == torso_end):\n add_flag = False\n\n #Only two speeds of the torso (slow and fast)\n if (torso_speed == 'medium'):\n add_flag = False\n\n #Only two degrees of the torso (small and large)\n if (torso_start_deg == 'medium') or (torso_end_deg == 'medium'):\n add_flag = False\n\n #Can only start at a “neutral” position\n if (not torso_start == 'neutral'):\n add_flag = False\n\n #Arms\n\n #Speed does not matter for stationary arm\n if (left_arm_start == left_arm_end) and (not left_arm_speed == 'slow'):\n add_flag = False\n if (right_arm_start == right_arm_end) and (not right_arm_speed == 'slow'):\n add_flag = False\n\n #Arms are always symmetric in speed\n\n #Only two speeds (slow and fast)\n if (left_arm_speed == 'medium') or (right_arm_speed == 'medium'):\n add_flag = False\n\n #Eliminate all left arm alone actions (for symmetry)\n if (right_arm_start == right_arm_end) and (not left_arm_start == left_arm_end):\n add_flag = False\n\n #If both arms are moving, must be symmetric\n if (not right_arm_start == right_arm_end) and (not left_arm_start == left_arm_end):\n if (not right_arm_start == left_arm_start) or (not left_arm_end == right_arm_end) or (not right_arm_speed == left_arm_speed):\n add_flag = False\n\n #Eliminate the “out” position\n if (left_arm_start == 'out') or (right_arm_start == 'out') or (left_arm_end == 'out') or (right_arm_end == 'out'):\n add_flag = False\n\n #Do not start from a high position\n if (left_arm_start == 'high') or (right_arm_start == 'high'):\n add_flag = False\n\n if add_flag:\n options += 1\n writer.writerow({'torso_start': torso_start, 'torso_start_deg': torso_start_deg, 'torso_end': torso_end, 'torso_end_deg': torso_end_deg, 'torso_speed': torso_speed, 'left_arm_start': left_arm_start, 'left_arm_end': left_arm_end, 'left_arm_speed': left_arm_speed, 'right_arm_start': right_arm_start, 'right_arm_end': right_arm_end, 'right_arm_speed': right_arm_speed})\n # print({'left_arm_start': left_arm_start, 'left_arm_end': left_arm_end, 'left_arm_speed': left_arm_speed, 'right_arm_start': right_arm_start, 'right_arm_end': right_arm_end, 'right_arm_speed': right_arm_speed})\n\n\nprint(options)\n","sub_path":"dof_calc.py","file_name":"dof_calc.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611063450","text":"try:\n import requests\nexcept ModuleNotFoundError:\n os.system('pip3 install --user requests')\n\n import site\n from importlib import reload\n reload(site)\n\nfrom cli.utils import console\n\ncreate_events_mutation = \"\"\"\n mutation ($objects: [events_create_input!]!) {\n create_events(objects: $objects)\n }\n\"\"\"\n\ndef graphql_request(url, query, variables):\n return requests.post(url, json={\n 'query': query,\n 'variables': variables\n }, timeout=5)\n\ndef create_cli_event(url, config):\n try:\n objects = [{\n **config,\n 'data': {\n 'event_type': 'CLI_ACTION'\n }\n }]\n\n return graphql_request(url, create_events_mutation, {\n 'objects': objects\n })\n except:\n msg = 'Help us improve MLCraft! The cli collects anonymized usage stats which\\nallow us to keep improving MLCraft at warp speed. To opt-out or read more,\\nvisit https://github.com/mlcraft-io/mlcraft/tree/main/docs/telemetry\\n'\n console.log(msg)\n","sub_path":"cli/telemetry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54910209","text":"'''\nCreated on 2018. 12. 17.\n\n@author: gdj-4\n'''\na,b,hap = 0,0,0\n\nwhile(True) : #줄을 잘 맞춰야한다.\n print(\"첫번째 수를 입력하세요\")\n a = int(input())\n if a==0 :\n break\n print(\"두번째 수를 입력하세요\")\n b = int(input())\n hap = a + b\n print(\"%d + %d = %d\" % (a,b,hap))\nprint(\"프로그램 종료\")\n","sub_path":"java/Python/workspace/pythonex/whileex2.py","file_name":"whileex2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67564794","text":"import csv\nfrom . import kAnonymity\n\n\ndef master(dataWithMasking, file_name, lines, nums, kmin):\n \"\"\" perform simple \"\"\"\n \"\"\" import the column selection \"\"\"\n anonymizedAndMaskedData = kAnonymity.master(dataWithMasking, file_name, lines, nums, kmin)\n # write the info in a file because the Apriori is not updated #\n with open(file_name, \"w\") as f1:\n mywriter = csv.writer(f1, delimiter=',', quotechar='|')\n for line in range(len(anonymizedAndMaskedData)):\n mywriter.writerow(anonymizedAndMaskedData[line])\n f1.close()\n\n return\n\n\nif __name__ == '__main__':\n master()\n","sub_path":"Anonym_methods/AnonymiseTheData.py","file_name":"AnonymiseTheData.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433745623","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\n@Date : Fri Nov 14 13:20:38 2014 \\n\n@Author : Erwan Ledoux \\n\\n\n\n\n\nThe Cloner\n\n\"\"\"\n\n#\nimport ShareYourSystem as SYS\nBaseModuleStr=\"ShareYourSystem.Objects.Caller\"\nDecorationModuleStr=\"ShareYourSystem.Classors.Classer\"\nSYS.setSubModule(globals())\n#\n\n#\nimport collections\nimport copy\n\n#\n\n#\nDoStrsList=[\"Cloner\",\"Clone\",\"Cloning\",\"Cloned\"]\n#\n\n#\nCloningIdStringsList=[]\n#\n\ndef getCopiedMutableVariableWithMutableVariable(_MutableVariable,**_KwargVariablesDict):\n\n\tif 'CloningIdsDict' not in _KwargVariablesDict:\n\t\t_KwargVariablesDict['CloningIdsDict']={}\n\tif 'CloningNotCopyKeyStringsList' not in _KwargVariablesDict:\n\t\t_KwargVariablesDict['CloningNotCopyKeyStringsList']=[]\n\n\t#get the type\n\tType=type(_MutableVariable)\n\n\t#Debug\n\tprint('_MutableVariable is ',_MutableVariable)\n\tprint('')\n\n\t#itemized variable case\n\tif Type in [dict,collections.OrderedDict]:\n\t\treturn Type(\n\t\t\t\t\tmap(\n\t\t\t\t\t\tlambda __ItemTuple:\n\t\t\t\t\t\t(\n\t\t\t\t\t\t\t__ItemTuple[0],\n\t\t\t\t\t\t\t__ItemTuple[1].clone(\n\t\t\t\t\t\t\t\t_KwargVariablesDict['CloningIdsDict'],\n\t\t\t\t\t\t\t\t_KwargVariablesDict['CloningNotCopyKeyStringsList']\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tif hasattr(__ItemTuple[1],'clone')\n\t\t\t\t\t\t\telse getCopiedMutableVariableWithMutableVariable(\n\t\t\t\t\t\t\t\t__ItemTuple[1],\n\t\t\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tif type(__ItemTuple[1]) in [list,tuple,set,dict,collections.OrderedDict]\n\t\t\t\t\t\t\telse __ItemTuple[1]\n\t\t\t\t\t\t),\n\t\t\t\t\t\t_MutableVariable.items()\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t#listed variable case\n\telif Type in [list,tuple,set]:\n\n\t\treturn Type(\n\t\t\tmap(\n\t\t\t\tlambda __ListedVariable:\n\t\t\t\t__ListedVariable.clone(\n\t\t\t\t\t\t\t\t_KwargVariablesDict['CloningIdsDict'],\n\t\t\t\t\t\t\t\t_KwargVariablesDict['CloningNotCopyKeyStringsList']\n\t\t\t\t\t\t\t)\n\t\t\t\tif hasattr(__ListedVariable,'clone')\n\t\t\t\telse getCopiedMutableVariableWithMutableVariable(\n\t\t\t\t\t__ListedVariable,\n\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t)\n\t\t\t\tif type(__ListedVariable) in [list,tuple,set,dict,collections.OrderedDict]\n\t\t\t\telse __ListedVariable,\n\t\t\t\t_MutableVariable\n\t\t\t)\n\t\t)\n\n\t#other\n\telse:\n\n\t\treturn _MutableVariable\n\t\t\n#\n@DecorationClass(**{'DoingGetBool':True})\nclass ClonerClass(BaseClass):\n\t\n\t#Definition\n\tRepresentingKeyStrsList=[\n\t\t\t\t\t\t\t\t'CloningIdsDict',\n\t\t\t\t\t\t\t\t'CloningNotCopyKeyStringsList',\n\t\t\t\t\t\t\t\t'CloningResetBool',\n\t\t\t\t\t\t\t\t'ClonedItemTuplesList',\n\t\t\t\t\t\t\t\t'ClonedCopyVariable'\n\t\t\t\t\t\t\t]\n\n\n\tdef default_init(self,\n\t\t\t\t\t\t_CloningIdsDict=None,\n\t\t\t\t\t\t_CloningNotCopyKeyStringsList=None,\n\t\t\t\t\t\t_CloningResetBool=False,\n\t\t\t\t\t\t_ClonedItemTuplesList=None,\n\t\t\t\t\t\t_ClonedCopyVariable=None,\n\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t):\n\n\t\t#Call the parent __init__ method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\t#@Argumenter.ArgumenterClass()\n\tdef do_clone(self):\n\n\t\t#filter\n\t\tself.ClonedItemTuplesList=SYS._filter(\n\t\t\t\tlambda __ItemTuple:\n\t\t\t\t__ItemTuple[0] not in self.CloningNotCopyKeyStringsList,\n\t\t\t\tself.__dict__.items()\n\t\t\t)\n\n\t\t\"\"\"\n\t\t#global\n\t\tglobal CloningIdStringsList\n\n\t\t#check\n\t\tif self.CloningResetBool:\n\t\t\tCloningIdStringsList=[]\n\t\t\tself.CloningIdsDict={}\n\n\t\t#if self.IdString in CloningIdStringsList:\n\n\t\t#return \n\n\t\t#append\n\t\t#CloningIdStringsList.append(id(self))\n\n\t\t\n\t\t\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t('self.',self,['ClonedItemTuplesList'])\n\t\t)\n\t\t'''\n\n\t\t#debug\n\t\tself.ClonedValueTuplesList=[]\n\t\tfor __ClonedItemTuple in self.ClonedItemTuplesList:\n\n\t\t\tprint('__ClonedItemTuple[0] is ',__ClonedItemTuple[0])\n\t\t\tprint('__ClonedItemTuple[1] is ',__ClonedItemTuple[1])\n\t\t\tprint('')\n\n\t\t\tClonedIdString=id(__ClonedItemTuple[1])\n\t\t\tif hasattr(\n\t\t\t\t\t\t__ClonedItemTuple[1],\n\t\t\t\t\t\t'clone'\n\t\t\t\t\t):\n\n\t\t\t\tprint('IdString in CloningIdStringsList is ',ClonedIdString in CloningIdStringsList)\n\t\t\t\tprint('')\n\n\t\t\t\t#Check\n\t\t\t\tif ClonedIdString in CloningIdStringsList:\n\t\n\t\t\t\t\t#\n\t\t\t\t\tself.ClonedValueTuplesList.append(self.CloningIdsDict[ClonedIdString])\n\n\t\t\t\telse:\n\n\t\t\t\t\t#append\n\t\t\t\t\tCloningIdStringsList.append(ClonedIdString)\n\n\t\t\t\t\t#set\n\t\t\t\t\tself.CloningIdsDict[ClonedIdString]=__ClonedItemTuple[1]\n\n\t\t\t\t\t#append\n\t\t\t\t\tself.ClonedValueTuplesList.append(\n\t\t\t\t\t\t__ClonedItemTuple[1].clone(\n\t\t\t\t\t\t\tself.CloningIdsDict,\n\t\t\t\t\t\t\tself.CloningNotCopyKeyStringsList\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\n\t\t\telse:\n\n\t\t\t\tself.ClonedValueTuplesList.append(\n\t\t\t\t\tgetCopiedMutableVariableWithMutableVariable(__ClonedItemTuple[1],\n\t\t\t\t\t\t**{\n\t\t\t\t\t\t\t'CloningIdsDict':self.CloningIdsDict,\n\t\t\t\t\t\t\t'CloningNotCopyKeyStringsList':self.CloningNotCopyKeyStringsList\n\t\t\t\t\t\t})\n\t\t\t\t)\n\n\t\t'''\n\t\t#copy or clone\n\t\tself.ClonedValueTuplesList=map(\n\t\t\tlambda __ClonedValueTuple:\n\t\t\t__ClonedValueTuple.clone() if hasattr(\n\t\t\t\t\t__ClonedValueTuple,\n\t\t\t\t\t'clone'\n\t\t\t\t)\n\t\t\telse getCopiedMutableVariableWithMutableVariable(__ClonedValueTuple),\n\t\t\tSYS.unzip(self.ClonedItemTuplesList,[1])\n\t\t)\n\t\t'''\n\t\t\t\n\t\t#update\n\t\tself.CloningIdsDict[self.IdString]=self.ClonedCopyVariable\n\t\t\n\n\t\t#instance\n\t\tself.ClonedCopyVariable=self.__class__(\n\t\t\t\t**dict(\t\n\t\t\t\tcopy.deepcopy(\n\t\t\t\t\t\tself.ClonedItemTuplesList\n\t\t\t\t\t\t#zip(\n\t\t\t\t\t\t#\tSYS.unzip(self.ClonedItemTuplesList,[0]),\n\t\t\t\t\t\t#\tself.ClonedValueTuplesList\n\t\t\t\t\t\t#\t)\n\t\t\t\t)\n\t\t\t)\n\t\t)\n\t\t\"\"\"\t\n\t\t\n\t\tfor __ClonedItemTuple in self.ClonedItemTuplesList:\n\t\t\tprint(__ClonedItemTuple)\n\t\t\tcopy.deepcopy(__ClonedItemTuple)\n\n\t\t#copy\n\t\tself.ClonedCopyVariable=copy.deepcopy(self)\n\n\n\t\t#return\n\t\treturn self.ClonedCopyVariable\n\n\t\t#Return self\n\t\t#return self\n\t\n#\n\n","sub_path":"Pythonlogy/draft/Cloner/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562207582","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\nimport datetime,os\nnow=datetime.datetime.now()\ndatenow=now.strftime('%Y%m%d_%H')\nsourcefilename='123.csv'\nlistuser=[]\nlistuser_id=[]\nusername=[]\nwith open(sourcefilename) as file4:\n for eachline in file4:\n username.append(\"'%s'\" % eachline.split(\",\")[1])\nuser_name=\",\".join(username)\nos.system('mysql -uhengcaimysql -pK2OFEWinzrBC5IrpmO5S8mqW24yE0x4D -h 10.1.0.20 passport -e \"select username,userid from users where username in( %s );\">user_id.xls' % user_name)\nwith open('user_id.xls') as file1:\n for eachline in file1:\n listuser.append(eachline.split())\n\nwith open (sourcefilename) as file2:\n for eachline in file2:\n record= eachline.split(',')\n for i in listuser:\n if record[1] == i[0]:\n record.insert(2,i[1])\n listuser_id.append(record)\nwith open ('%s_user_id.csv' % datenow,'wb') as file3:\n for i in listuser_id:\n file3.write(','.join(i))\nos.remove(sourcefilename)\nos.remove(\"user_id.xls\")\n\n\n\n","sub_path":"leajoy/numberexport.py","file_name":"numberexport.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638885463","text":"import os\nfrom multiprocessing import Pool\n\nfrom config import *\n\n\ndef run_lkh(lkh_par_file):\n os.system(\"{}LKH {}\".format(LKH_EXE, lkh_par_file))\n\n\ndef pool_func(current_elem):\n curr_DIR = DIR + current_elem\n os.chdir(curr_DIR)\n filename = current_elem\n run_lkh(filename + \".par\")\n run_lkh(filename + \"_sparse.par\")\n\ndirs = os.listdir(DIR)\n\npool = Pool(processes=5)\npool.map(pool_func, dirs)\n","sub_path":"lkhrunner.py","file_name":"lkhrunner.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525375934","text":"###############\n### IMPORTS ###\n\nimport pandas as pd\nimport numpy as np\nimport os, sys, site, math, time, pickle, gzip\nfrom glob import glob\nfrom tqdm import tqdm, trange\n\nimport shapely\nimport geopandas as gpd\nimport geopy, geopy.distance\n\nimport zephyr\n\n### Common paths\nprojpath = zephyr.settings.projpath\ndatapath = zephyr.settings.datapath\n\n#######################\n### ARGUMENT INPUTS ###\n\nimport argparse\nparser = argparse.ArgumentParser(description='transmission-site distance')\nparser.add_argument(\n '-z', '--zone', type=str, help='zone name (such as state abbreviation)',)\nparser.add_argument(\n '-s', '--zonesource', type=str, default='state',\n help='shapefile from which to read zones (located at in/Maps/{zonesource}/)',)\nparser.add_argument(\n '-r', '--resource', help=\"'wind' or 'pv'\", type=str, choices=['wind','pv'])\nparser.add_argument(\n '-w', '--overwrite', help='indicate whether to overwrite existing outputs',\n action='store_true')\nparser.add_argument(\n '-x', '--transcostmultiplier', default=1, type=int,\n help='multiplier for transmission costs')\n\n### Parse argument inputs\nargs = parser.parse_args()\nzone = args.zone\nzonesource = args.zonesource\nresource = args.resource\noverwrite = args.overwrite\ntranscostmultiplier = args.transcostmultiplier\n\n##############\n### INPUTS ###\n\n### Transmission characteristics\nvoltcutoff = 230\nurbanclasses = ['U'] ### or ['U','C']\n\n### Costs - from ReEDS documentation 2019\ninflator = zephyr.calc.inflate(2010,2017)\ncost = {\n 'spur': 3.667 * inflator / 1.60934, # 2010$/kW-mile --> 2017 $/kW-km\n 230: 3.667 * inflator / 1.60934, # 2010$/kW-mile --> 2017 $/kW-km\n 345: 2.333 * inflator / 1.60934, # 2010$/kW-mile --> 2017 $/kW-km\n 500: 1.347 * inflator / 1.60934, # 2010$/kW-mile --> 2017 $/kW-km\n 765: 1.400 * inflator / 1.60934, # 2010/kW-mile --> 2017$/kW-km\n}\n\n### Cover the in-between voltages\n### Set cost equal to cost of next-lowest voltage\nfor c in [236, 240, 245, 250, 287]:\n cost[c] = cost[230]\nfor c in [400,450]:\n cost[c] = cost[345]\nfor c in [1000]:\n cost[c] = cost[765]\n\n### Scale by transmission cost multiplier\nfor c in cost:\n cost[c] = cost[c] * transcostmultiplier\n\n### Financial assumptions\nwacc_gen = 0.042\nwacc_trans = 0.036\nlifetime_gen = 25\nlifetime_trans = 50\ncrf_gen = zephyr.cpm.crf(wacc=wacc_gen, lifetime=lifetime_gen)\ncrf_trans = zephyr.cpm.crf(wacc=wacc_trans, lifetime=lifetime_trans)\n\ndef cost_trans_annual(row):\n out = row['cost_trunk_upfront'] * crf_trans + row['cost_spur_upfront'] * crf_gen\n return out\n\n### Filepaths\noutpath = {\n 'pv': os.path.join(\n projpath,'io','cf-2007_2013','{}x','{}',\n 'pv','distance-station_urbanedge-mincost').format(transcostmultiplier, zonesource),\n 'wind': os.path.join(\n projpath,'io','cf-2007_2013','{}x','{}',\n 'wind','distance-station_urbanedge-mincost').format(transcostmultiplier, zonesource),\n}\n\n#####################################################\n### MORE INPUTS (adjust based on zones shapefile) ###\n\nneighborstates = {\n ### If using a zonesource other than \"state\", should add entries to this dict\n ### giving the list of states contained within and adjacent to each zone in zonesource.\n ### Otherwise we use urban areas and water bodies for the full US, which slows down\n ### the execution.\n 'AL': ['AL','MS','TN','GA','FL'],\n 'AK': ['AK',],\n 'AZ': ['AZ','CA','NV','UT','CO','NM'],\n 'AR': ['AR','TX','OK','MO','TN','MS','LA'],\n 'CA': ['CA','OR','NV','AZ'],\n 'CO': ['CO','AZ','UT','WY','NE','KS','OK','NM'],\n 'CT': ['CT','NJ','NY','MA','RI'],\n 'DE': ['DE','MD','DC','VA','PA','NJ'],\n 'FL': ['FL','AL','GA'],\n 'GA': ['GA','AL','TN','NC','SC','FL'],\n 'HI': ['HI',],\n 'ID': ['ID','NV','OR','WA','MT','WY','UT'],\n 'IL': ['IL','MO','IA','WI','IN','KY'],\n 'IN': ['IN','IL','MI','OH','KY'],\n 'IA': ['IA','NE','SD','MN','WI','IL','MO'],\n 'KS': ['KS','CO','NE','MO','OK'],\n 'KY': ['KY','MO','IL','IN','OH','WV','VA','TN'],\n 'LA': ['LA','TX','AR','MS'],\n 'ME': ['ME','NH','MA'],\n 'MD': ['MD','VA','DC','WV','PA','NJ','DE'],\n 'MA': ['MA','RI','CT','NY','NJ','VT','NH'],\n 'MI': ['MI','IN','IL','WI','OH','KY'],\n 'MN': ['MN','IA','SD','ND','WI'],\n 'MS': ['MS','LA','AR','TN','AL'],\n 'MO': ['MO','AR','OK','KS','NE','IA','IL','KY','TN'],\n 'MT': ['MT','ID','WY','SD','ND'],\n 'NE': ['NE','KS','CO','WY','SD','IA','MO'],\n 'NV': ['NV','CA','OR','ID','UT','AZ'],\n 'NH': ['NH','MA','VT','ME'],\n 'NJ': ['NJ','DE','MD','DC','PA','NY','CT'],\n 'NM': ['NM','AZ','UT','CO','OK','TX'],\n 'NY': ['NY','NJ','PA','VT','MA','CT'],\n 'NC': ['NC','SC','GA','TN','VA','DC'],\n 'ND': ['ND','SD','MT','MN'],\n 'OH': ['OH','KY','IN','MI','PA','WV'],\n 'OK': ['OK','TX','NM','CO','KS','MO','AR'],\n 'OR': ['OR','NV','CA','WA','ID'],\n 'PA': ['PA','WV','OH','NY','NJ','DE','MD','DC','VA'],\n 'RI': ['RI','CT','NY','MA'],\n 'SC': ['SC','GA','NC'],\n 'SD': ['SD','NE','WY','MT','ND','MN','IA'],\n 'TN': ['TN','MS','AR','MO','KY','VA','NC','GA','AL',],\n 'TX': ['TX','NM','OK','AR','LA'],\n 'UT': ['UT','AZ','NV','ID','WY','CO','NM'],\n 'VT': ['VT','NY','NH','MA'],\n 'VA': ['VA','WV','MD','PA','DC','NC','TN','KY'],\n 'WA': ['WA','OR','ID'],\n 'WV': ['WV','KY','OH','PA','MD','DC','VA'],\n 'WI': ['WI','IL','IA','MN','MI'],\n 'WY': ['WY','UT','ID','MT','SD','NE','CO'],\n 'DC': ['DC','VA','WV','MD','PA','DC'],\n}\n\nlat = {'pv':'Latitude', 'wind':'latitude'}\nlon = {'pv':'Longitude', 'wind':'longitude'}\n\n#################\n### PROCEDURE ###\n\n###################\n### Load input data\n\n###### EIA transmission\n##### First download from \n##### https://hifld-geoplatform.opendata.arcgis.com/datasets/electric-power-transmission-lines/data\nfilesin = {\n 'Transmission': os.path.join(\n projpath,'in','Maps','HIFLD','Electric_Power_Transmission_Lines-shp'),\n}\ndftrans = gpd.read_file(filesin['Transmission']).to_crs({'init':'epsg:4326'})\n\ndftransvolt = dftrans.loc[dftrans.VOLTAGE>0].reset_index(drop=True).copy()\n\n### Cut PR\ndftransvolt = dftransvolt.loc[\n ~((dftransvolt.centroid.x >= -70) \n & (dftransvolt.centroid.y <= 20))\n].reset_index(drop=True).copy()\n\n### Get the transmission dataframe\ndftranshv = dftransvolt.loc[dftransvolt.VOLTAGE>=voltcutoff].reset_index(drop=True).copy()\n\n### Load urban shapefile\n### First download from https://www2.census.gov/geo/tiger/TIGER2019/UAC/\ndfurban_all = gpd.read_file(\n os.path.join(projpath,'in','Maps','Census','tl_2010_us_uac10'))\n\n### Create a list of endpoints of transmission segments\ntransends = []\nvolts = []\nids = []\nfor i in dftranshv.index:\n for foo in range(2):\n volts.append(dftranshv.loc[i,'VOLTAGE'])\n ids.append(dftranshv.loc[i,'OBJECTID'])\n line = dftranshv.loc[i,'geometry']\n if type(line) == shapely.geometry.linestring.LineString:\n ### First point\n transends.append((line.coords[0][0], line.coords[0][1]))\n ### Last point\n transends.append((line.coords[-1][0], line.coords[-1][1]))\n else:\n ### Merge the constituent lines\n outcoords = [list(i.coords) for i in line]\n outline = shapely.geometry.LineString([i for sublist in outcoords for i in sublist])\n ### First point\n transends.append((outline.coords[0][0], outline.coords[0][1]))\n ### Last point\n transends.append((outline.coords[-1][0], outline.coords[-1][1]))\ndfends = pd.DataFrame(transends, columns=['lon','lat'])\ndfends['voltage'] = volts\ndfends['objectid'] = ids\ndfends['geometry'] = [shapely.geometry.Point(i) for i in transends]\ndfends = gpd.GeoDataFrame(dfends)\n\n###### Designate filepaths\noutfile = {\n 'pv': 'nsrdb,icomesh9-urban{}-trans{}-{}_{}.csv'.format(\n ''.join(urbanclasses), voltcutoff, zonesource, zone),\n 'wind': 'wtkhsds,every2,offset0,onshore-urban{}-trans{}-{}_{}.csv'.format(\n ''.join(urbanclasses), voltcutoff, zonesource, zone)\n}\n\nindex_coords = {'pv':'psm3id', 'wind':'rowf_colf'}\nweightsfile = {\n 'pv': os.path.join(\n projpath,'io','geo','developable-area','{}','nsrdb-icomesh9',\n 'sitearea-water,parks,native,mountains,urban-{}_{}.csv'\n ).format(zonesource, zonesource, zone),\n 'wind': os.path.join(\n projpath,'io','geo','developable-area','{}','wtk-hsds-every2',\n 'sitearea-water,parks,native,mountains,urban-{}_{}.csv'\n ).format(zonesource, zonesource, zone)\n}\n\n###################################################\n### Calculate interconnection distances + costs ###\n\n### Make output folder\nos.makedirs(outpath[resource], exist_ok=True)\n\n### Skip if it's done\nif os.path.exists(os.path.join(outpath[resource],outfile[resource])) and (overwrite == False):\n print('{} exists and overwrite == False, so quitting.'.format(\n os.path.join(outpath[resource],outfile[resource])))\n quit()\n\n###### Load site dataframes\nif resource == 'pv':\n dfcoords = pd.read_csv(os.path.join(\n projpath,'io','geo','icomesh-nsrdb-info-key-psmv3-eGRID-avert-ico9.csv'))\n\nelif resource == 'wind':\n ### Load HSDS points\n dfcoords = pd.read_csv(\n os.path.join(projpath,'io','geo','hsdscoords.gz')\n ).rename(columns={'row':'row_full','col':'col_full'})\n ### Make the lookup index\n dfcoords['rowf_colf'] = (\n dfcoords.row_full.astype(str)+'_'+dfcoords.col_full.astype(str))\n\n### Load site weights for the modeled zone, merging multiply-listed sites\npolyweights = pd.read_csv(\n weightsfile[resource]\n).groupby(index_coords[resource]).sum().reset_index()\n\n### Merge sites with calculated land area\ndfsites = dfcoords.merge(polyweights, on=index_coords[resource], how='inner')\n\n###### Transmission\n### Get the zones file\nzonefile = os.path.join(datapath,'Maps',zonesource)\ndfzones = gpd.read_file(zonefile).set_index(zonesource)\n\n### Get the zone site polygons\npolyzone = dfzones.loc[zone,'geometry']\n\n### Get the line endpoints within the modeled zone\ndfendszone = dfends.loc[dfends.within(polyzone)].copy()\n\n###### Urban areas\n### Get bounding box for region, add 0.5° buffer\nregionbounds = {\n 'longitude':[polyzone.bounds[0]-0.5, polyzone.bounds[2]+0.5],\n 'latitude':[polyzone.bounds[1]-0.5, polyzone.bounds[3]+0.5],\n}\n\n### Get region bounding box\nregionbox = shapely.geometry.Polygon([\n (regionbounds['longitude'][0], regionbounds['latitude'][0]),\n (regionbounds['longitude'][1], regionbounds['latitude'][0]),\n (regionbounds['longitude'][1], regionbounds['latitude'][1]),\n (regionbounds['longitude'][0], regionbounds['latitude'][1]),\n])\n\ndfurban_states = dfurban_all.loc[\n dfurban_all.NAME10.astype(str).map(lambda x: x[-2:] in neighborstates[zone])\n].copy()\n\n### Merge all the urban areas, since we don't need to know which is which\n### Filter to urban areas included in urbanclasses\n### 'U' is urban area of >50k people; 'C' is urban cluster of ≥2.5k and <50k\n### https://www2.census.gov/geo/pdfs/reference/ua/2010ua_faqs.pdf\ndfurban = dfurban_states.loc[dfurban_states.UATYP10.isin(urbanclasses)]\ndfurban = dfurban.dissolve('FUNCSTAT10')\n\n### Pull out the urban polygon\npolyurban = dfurban.loc['S','geometry'].buffer(0.)\npolyurban = polyurban.intersection(regionbox).buffer(0.)\n\n### Filter to urban areas within the state\ndfurbanzone = dfurban.copy()\ndfurbanzone['geometry'] = dfurbanzone.intersection(polyzone)\npolyurbanzone = polyurban.intersection(polyzone).buffer(0.)\n\n### Put it in a geodataframe\nmultipolyurbanzone = []\nfor x in dfurbanzone.iloc[0]['geometry']:\n if type(x) not in [shapely.geometry.linestring.LineString]:\n multipolyurbanzone.append(shapely.geometry.Polygon(x))\ndfurbanzone = gpd.GeoSeries(shapely.geometry.MultiPolygon(multipolyurbanzone))\n\n### Get urban centroid\nurban_centroid_lon, urban_centroid_lat = list(dfurbanzone.iloc[0].centroid.coords)[0]\n\n######### Calculate substation distances and costs\n###### Substation-urban edge distances and costs\n### Loop over substations\ndistances_trunk = []\nlat_urban_fromstation = []\nlon_urban_fromstation = []\n\nfor i in dfendszone.index:\n ### Get the site-urban distance in km\n stationpoint = shapely.geometry.Point(dfendszone.loc[i,'lon'], dfendszone.loc[i,'lat'])\n polypoint = shapely.ops.nearest_points(stationpoint, polyurbanzone)[1]\n km = geopy.distance.distance((stationpoint.y, stationpoint.x), (polypoint.y,polypoint.x)).km\n distances_trunk.append(km)\n lat_urban_fromstation.append(polypoint.y)\n lon_urban_fromstation.append(polypoint.x)\n\n### Store the trunk distance data\ndfendszone['km_urban_fromstation'] = distances_trunk\ndfendszone['lat_urban_fromstation'] = lat_urban_fromstation\ndfendszone['lon_urban_fromstation'] = lon_urban_fromstation\n### Calculate trunk costs\ndfendszone['cost_trunk_upfront'] = (\n dfendszone['voltage'].map(cost) * dfendszone['km_urban_fromstation'])\n\n###### Site-substation costs\n### Loop over sites\ndictout = {}\nfor i in tqdm(dfsites.index, desc='{},{}'.format(zonesource,zone)):\n sitepoint = shapely.geometry.Point(\n dfsites.loc[i,lon[resource]], dfsites.loc[i,lat[resource]])\n ### Get all distances\n dfendszone['km_site_spur'] = dfendszone.apply(\n lambda row: geopy.distance.distance((sitepoint.y, sitepoint.x),(row['lat'],row['lon'])).km,\n axis=1,\n )\n dfendszone['cost_spur_upfront'] = dfendszone['km_site_spur'] * cost[230]\n dfendszone['cost_trans_upfront'] = (\n dfendszone['cost_trunk_upfront'] + dfendszone['cost_spur_upfront'])\n ### Calculate annualized cost\n dfendszone['cost_trans_annual'] = dfendszone.apply(cost_trans_annual, axis=1)\n\n ### Store all the output information\n dictout[i] = (dfendszone.nsmallest(1,'cost_trans_annual')\n .drop(['geometry'],axis=1)\n .rename(columns={'lon':'lon_station','lat':'lat_station'}))\n\n### Save it\ndfout = pd.concat(dictout).reset_index(level=1,drop=True)\ndfsites = dfsites.merge(dfout, left_index=True, right_index=True, how='left')\ndfsites.to_csv(os.path.join(outpath[resource],outfile[resource]), index=False)\n","sub_path":"vresc_6-transmission-site-distance.py","file_name":"vresc_6-transmission-site-distance.py","file_ext":"py","file_size_in_byte":13973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"122010438","text":"import json\nimport random\nimport time\n\nimport scrapy\n\nfrom ..items import FinalitemItem\nfrom ..settings import IPS\n\n\n\nclass LagouSpider(scrapy.Spider):\n name = 'lg'\n\n def start_requests(self):\n # citys = ['%E5%8C%97%E4%BA%AC', '%E4%B8%8A%E6%B5%B7', '%E6%B7%B1%E5%9C%B3', '%E5%B9%BF%E5%B7%9E']\n jobs = ['python', 'python web', '爬虫', 'AI']\n # for city in citys:\n url = 'https://www.lagou.com/jobs/positionAjax.json?city=%E5%B9%BF%E5%B7%9E&needAddtionalResult=false'\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cookie': '_ga=GA1.2.933299604.1542897027; _gid=GA1.2.788367443.1542897027; user_trace_token=20181122223028-28bc3167-ee63-11e8-8adb-5254005c3644; LGUID=20181122223028-28bc3542-ee63-11e8-8adb-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; JSESSIONID=ABAAABAAAGFABEF53DF3AB8571140915EE9E256D19A31DC; TG-TRACK-CODE=index_search; LGSID=20181123211848-4ff8f3d1-ef22-11e8-b78d-525400f775ce; PRE_UTM=; PRE_HOST=www.baidu.com; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D-uLgZFW3bZOsNEzCvHPiDW9ratUJLcloz4fo9hlsOPa%26ck%3D2610.1.60.231.156.226.144.253%26shh%3Dwww.baidu.com%26sht%3Dbaidu%26wd%3D%26eqid%3Da715324e0001fa30000000025bf7fe30; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; _gat=1; SEARCH_ID=b2dfac9ae4eb4bec8e486e8c4b6236b9; LGRID=20181123213048-fd6d1931-ef23-11e8-8b1c-5254005c3644',\n 'Referer': 'https://www.lagou.com/jobs/list_python%20web?city=%E5%B9%BF%E5%B7%9E&cl=false&fromSearch=true&labelWords=&suginput=',\n 'User-Agent': random.choice(IPS),\n 'X-Anit-Forge-Code': '0',\n 'X-Anit-Forge-Token': 'None',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n for job in jobs:\n i = 1\n while True:\n formdata = {\n 'first': 'false',\n 'pn': str(i),\n 'kd': job,\n }\n i += 1\n time.sleep(1)\n yield scrapy.FormRequest(url=url, formdata=formdata, headers=headers)\n\n def parse(self, response):\n data = json.loads(response.text)\n item = FinalitemItem()\n print(data['content']['positionResult']['result']) # 字典列表\n for detailinfo in data['content']['positionResult']['result']:\n # positionName = detailinfo['positionName'] # 职位\n # companyFullName = detailinfo['companyFullName'] # 公司名字\n # salary = detailinfo['salary'] # 薪水\n # jobNature = detailinfo['jobNature'] # 任职要求\n # workYear = detailinfo['workYear'] # 经验要求\n # education = detailinfo['education'] # 学历要求\n # city = detailinfo['city'] # 城市\n # positionAdvantage = detailinfo['positionAdvantage'] # 公司福利\n # companySize = detailinfo['companySize'] # 公司规模\n # district = detailinfo['district'] # 街区\n businessZones = ''\n try:\n for area in detailinfo['businessZones']:\n businessZones = businessZones + area\n except:\n businessZones = '还没做好呢'\n\n # print(locals())\n\n item['positionName'] = detailinfo['positionName']\n item['companyFullName'] = detailinfo['companyFullName']\n item['salary'] = detailinfo['salary']\n item['jobNature'] = detailinfo['jobNature']\n item['workYear'] = detailinfo['workYear']\n item['education'] = detailinfo['education']\n item['city'] = detailinfo['city']\n item['positionAdvantage'] = detailinfo['positionAdvantage']\n item['companySize'] = detailinfo['companySize']\n item['district'] = detailinfo['district']\n item['businessZones'] = detailinfo['businessZones']\n yield item\n","sub_path":"Pythonhomework/Scrapy/finalitem/finalitem/spiders/lagou.py","file_name":"lagou.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306454530","text":"import cv2\nimport numpy as np\nimport pygame\n\n# experiment with wxPython's wx.Slider\n# wx.Slider(parent, id, init_val, min_val, max_val, position_tuple, size_tuple, style)\n# position_tuple (x, y) of upper left corner, size_tuple (width, height)\n# (on my Windows XP the mouse-wheel controls the slider that has the focus)\n# tested with Python24 and wxPython26 vegaseat 17oct2005\nimport wx\nimport threading\nfrom ConfigParser import SafeConfigParser\n\nlower = np.array([10, 150, 100])\nupper = np.array([30, 255, 255])\nflag = True\ndef start():\n global lower, upper, flag\n cap = cv2.VideoCapture(0)\n while flag:\n _, frame = cap.read()\n #hue saturation value\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower, upper)\n\n kernal = np.ones((15, 15), np.float32)/225\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal)\n cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n if len(cnts) > 0:\n c = max(cnts, key=cv2.contourArea)\n ((x,y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n\n if radius > 10:\n cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)\n cv2.circle(frame, (int(x), int(y)), 1, (0, 255, 255), -1)\n\n cv2.imshow('frame', frame)\n cv2.imshow('mask', mask)\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n cv2.destroyAllWindows()\n cap.release()\n\n\nclass MyPanel(wx.Panel):\n def __init__(self, parent, id):\n global upper, lower\n wx.Panel.__init__(self, parent, id)\n self.SetBackgroundColour(\"white\")\n try:\n config = SafeConfigParser()\n config.read('HSV_Config.ini')\n l1 = int(config.get('Lower Bound', 'H'))\n l2 = int(config.get('Lower Bound', 'S'))\n l3 = int(config.get('Lower Bound', 'V'))\n u1 = int(config.get('Upper Bound', 'H'))\n u2 = int(config.get('Upper Bound', 'S'))\n u3 = int(config.get('Upper Bound', 'V'))\n except:\n l1 = l2 = l3 = u1 = u2 = u3 = 50\n self.lower_l = wx.StaticText(self, -1, label=\"Lower range\", pos=(130, 20), name='lower')\n\n self.lower_h = wx.StaticText(self, -1, label=\"H\", pos=(330, 90), name='H')\n self.slider1 = wx.Slider(self, -1, l1, 0, 255, (10, 80), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n\n self.lower_ = wx.StaticText(self, -1, label=\"S\", pos=(330, 140), name='H')\n self.slider2 = wx.Slider(self, -1, l2, 0, 255, (10, 130), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n\n self.lower_v = wx.StaticText(self, -1, label=\"V\", pos=(330, 190), name='S')\n self.slider3 = wx.Slider(self, -1, l3, 0, 255, (10, 180), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n\n self.upper_l = wx.StaticText(self, -1, label=\"Upper range\", pos=(120, 250), name='upper')\n\n self.upper_l = wx.StaticText(self, -1, label=\"H\", pos=(330, 290), name='H')\n self.slider4 = wx.Slider(self, -1, u1, 0, 255, (10, 280), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n\n self.upper_l = wx.StaticText(self, -1, label=\"S\", pos=(330, 340), name='S')\n self.slider5 = wx.Slider(self, -1, u2, 0, 255, (10, 330), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n\n self.upper_l = wx.StaticText(self, -1, label=\"V\", pos=(330, 390), name='V')\n self.slider6 = wx.Slider(self, -1, u3, 0, 255, (10, 380), (300, 50), wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)\n lower = np.array([int(self.slider1.GetValue()), int(self.slider2.GetValue()), int(self.slider3.GetValue())])\n upper = np.array([int(self.slider4.GetValue()), int(self.slider5.GetValue()), int(self.slider6.GetValue())])\n self.save = wx.Button(self, id=wx.ID_ANY, label=\"Save\", pos=(260, 430))\n self.Bind(wx.EVT_BUTTON, self.save_config, self.save)\n # respond to changes in slider position ...\n self.Bind(wx.EVT_SLIDER, self.sliderUpdate)\n\n def sliderUpdate(self, event):\n global lower, upper\n lower = np.array([int(self.slider1.GetValue()), int(self.slider2.GetValue()), int(self.slider3.GetValue())])\n upper = np.array([int(self.slider4.GetValue()), int(self.slider5.GetValue()), int(self.slider6.GetValue())])\n\n def __del__(self):\n global flag\n self.Close()\n flag = False\n # wx.GetApp().ExitMainLoop()\n\n def save_config(self, event):\n try:\n config = SafeConfigParser()\n config.add_section('Lower Bound')\n config.set('Lower Bound', 'H', str(self.slider1.GetValue()))\n config.set('Lower Bound', 'S', str(self.slider2.GetValue()))\n config.set('Lower Bound', 'V', str(self.slider3.GetValue()))\n config.add_section('Upper Bound')\n config.set('Upper Bound', 'H', str(self.slider4.GetValue()))\n config.set('Upper Bound', 'S', str(self.slider5.GetValue()))\n config.set('Upper Bound', 'V', str(self.slider6.GetValue()))\n f = open('HSV_Config.ini', 'w')\n config.write(f)\n f.close()\n except Exception as e:\n print(e)\n\ndef window():\n global flag\n flag = True\n app = wx.App(False)\n frame = wx.Frame(None, -1, \"HSV\", size=(380, 500))\n MyPanel(frame, -1)\n frame.Show(True)\n threading.Thread(name='cap', target=start).start()\n app.MainLoop()\n","sub_path":"src/image_detection.py","file_name":"image_detection.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625416487","text":"class Empty (Exception):\r\n\t\"\"\" Error attempting to access an element from an empty container \"\"\"\r\n\tpass\r\n\r\n\r\nclass ArrayStack:\r\n\t\"\"\" LIFO stack implementation usinng Python list as underlying storage \"\"\"\r\n\r\n\tdef __init__ (self):\r\n\t\t\"\"\" Creat an empty stack \"\"\"\r\n\t\tself._data = []\r\n\r\n\tdef len (self):\r\n\t\t\"\"\" Return the number of elements in the stack \"\"\"\r\n\t\treturn len (self._data)\r\n\t\r\n\tdef is_empty (self):\r\n\t\t\"\"\" Return True if the stack is empty \"\"\"\r\n\t\treturn len(self._data) == 0\r\n\r\n\tdef push (self, e):\r\n\t\t\"\"\" Add element e to the top of the Stack \"\"\"\r\n\t\tself._data.append(e)\r\n\r\n\tdef top (self):\r\n\t\t\"\"\" Return (but do not remove) the element at the top of the stack \r\n\r\n\t\tRaise Empty exception if the Stack is empty\r\n\t\t\"\"\"\r\n\t\tif self.is_empty():\r\n\t\t\ttry:\r\n\t\t\t\traise Empty('Stack is empty')\r\n\t\t\texcept Empty as error:\r\n\t\t\t\tprint ('Hi no element')\r\n\t\telse:\r\n\t\t\treturn self._data[-1]\r\n\r\n\tdef pop (self):\r\n\t\t\"\"\" Remove and return the element from the top of the stack\r\n\t\tRaise Empty exception if the stack is empty.\r\n\t\t\"\"\"\r\n\t\tif self.is_empty():\r\n\t\t\ttry:\r\n\t\t\t\traise Empty('Stack is empty')\r\n\t\t\texcept Empty as error:\r\n\t\t\t\tprint ('Empty stack')\r\n\t\telse:\r\n\t\t\treturn self._data.pop()\r\n\r\n\r\n\r\ndef test_empty_stack():\r\n\tst = ArrayStack()\t\r\n\tprint (st.top())\r\n\r\n\r\ndef test_stack():\r\n\tst = ArrayStack()\t\r\n\tprint (st.top())\r\n\tst.push(10)\r\n\tst.push(20)\r\n\tst.pop()\r\n\tst.pop()\r\n\tst.pop()\r\n\t\r\n\tprint (st.len())\r\n\r\n\r\nif __name__ == '__main__':\r\n\ttest_empty_stack()\r\n","sub_path":"Array_stack-v1.py","file_name":"Array_stack-v1.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476182303","text":"#!/usr/bin/python3\nimport argparse\nfrom base.system.config import Config\nfrom base.system.database import Database\nfrom base.system.global_settings import GlobalSettings\nfrom base.model.operator import Operator\nfrom base.system.sign_cache import SignCache\nfrom base.system.connection_pool import ConnectionPool\n\nVERSION = '0.1b'\n\nif __name__ == '__main__':\n #parse console arguments\n parser = argparse.ArgumentParser(description='Historical Risk Engine (HRE) main cronjob')\n parser.add_argument('operator_ids', metavar='operator_id', type=int, nargs='*', \n help='Operators id')\n parser.add_argument('--config', dest='config_file', nargs=1, default='config/config.yml',\n help='Use custom configuration file')\n parser.add_argument('--debug', dest='debug_mode', action='store_const', const=True, default=False,\n help='Turn on DEBUG MODE')\n parser.add_argument('--noblock', dest='no_block', action='store_const', const=True, default=False,\n help='Turn off all auto blocking')\n parser.add_argument('--connections-max', dest='connections_max', nargs=1, type=int, default=5,\n help='Maximum number of connections')\n parser.add_argument('-v', '--version', action='version', version='HRE cronjob %s' % (VERSION))\n\n args = parser.parse_args()\n\n # init main configuration object\n config = Config(args.config_file, VERSION)\n config.debug_mode = args.debug_mode\n config.no_block = args.no_block\n\n main_db = Database(config.get('main_db', {}))\n\n config.global_settings = GlobalSettings(main_db, config)\n\n operators_ids = None\n if args.operator_ids:\n operators_ids = [{'id':tuple(args.operator_ids)}]\n operators = Operator.fetch_all(main_db, config, operators_ids)\n\n conn_pool = ConnectionPool(config.get('engine_db', {}), config, args.connections_max)\n\n for operator in operators:\n signs_cache = SignCache()\n activities = operator.get_activities()\n for activity in activities:\n activity.run_activity(operator.get_engine_id(), conn_pool, signs_cache)\n\n conn_pool.close()\n main_db.close()\n\n #activity_ids = [1, 2, 3]\n #for activity_id in activity_ids:\n # p = Process(target=run_activity, args=(activity_id,))\n # p.start()\n # p.join()\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496731803","text":"# Software Engineering Level 3 - Task 22\n# Compulsory Task 1 - Implementing the K-means clustering algorithm\n\nimport matplotlib.pyplot as plt\nimport math\nimport csv\nimport random\n\n# Firstly, defining the function to calculate the distance between two points on the graph.\ndef distance_between_points(point1, point2):\n return math.sqrt((point2[1] - point1[1]) ** 2 + (point2[0] - point1[0]) ** 2)\n\n# Then defining a function to take in a list of points and calculate the mean point to return e.g. [x, y].\ndef xy_mean(points):\n y_sum = 0\n x_sum = 0\n for p in points:\n x_sum += p[0]\n y_sum += p[1]\n return [\n x_sum / len(points),\n y_sum / len(points),\n ]\n\n# This function calculates the nearest center point for an individual point, i.e. compares one point 'point' to a list\n# of 'center_points' and once the smallest distance is found, it returns the index of the smallest distance.\n# The index of the smallest distance is important because it will reference the mean point with the smallest distance.\ndef pick_nearest_center_point_idx(center_points, point):\n closest_center_point_idx = 0\n distance_to_closest_center_point = distance_between_points(center_points[0], point)\n\n for center_point_idx, center_point in enumerate(center_points):\n this_distance = distance_between_points(center_point, point)\n if this_distance < distance_to_closest_center_point:\n distance_to_closest_center_point = this_distance\n closest_center_point_idx = center_point_idx\n\n return closest_center_point_idx\n\n\n# def build_clusters(center_points, points): cluster[]\n# given some center points and points - build and return clusters\n# This function allows for a list of dictionaries to be built, which have the correct number of clusters (i.e. center\n# points) as inputed by the user.\n# The points are then placed into the correct cluster with the nearest center point.\ndef build_clusters(center_points, points):\n clusters = [{'center_point': center_point, 'data_points': []} for center_point in center_points]\n # we have a list of clusters with only the center point set\n # [\n # {'center_point': [1, 2], 'data_points': []},\n # {'center_point': [3, 5], 'data_points': []},\n # ...\n # ]\n\n # place each point into the cluster with the nearest center point\n for point in points:\n nearest_center_point_idx = pick_nearest_center_point_idx(center_points, point)\n clusters[nearest_center_point_idx]['data_points'].append(point)\n\n return clusters\n\n# Starting the main execution of the program.\nif __name__ == '__main__':\n\n # Firstly, a list variable is used to store the x, y values from the given CSV file of country data.\n country_data = []\n\n # Opening csv file and storing the data into x and y values and thereafter appending each point as a separate list\n # to the 'country_data' list variable; this will become a list of lists with each point at a separate list item.\n with open('dataBoth.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n\n for row in readCSV:\n if row[0] != 'Countries':\n birth_int = float(row[1])\n life_int = float(row[2])\n\n country_data.append([birth_int, life_int])\n\n # Welcome message and getting input from the user concerning the number of clusters and iterations they wish to use.\n print('Welcome to the K-Means Algorithm Program.')\n no_clusters = int(input('\\nHow many clusters would you like to include? '))\n no_iterations = int(input('\\nHow many iterations would you like to run? '))\n\n # Setting a list variable to store the calculated clusters.\n built_clusters = []\n\n # Now to perform the clustering, this loop runs the number of times/ iterations specified by the user.\n # For the first loop, no mean has been set, therefore when 'i == 0', random points are drawn, depending on the\n # number of clusters specified by the user. They are then stored in the list 'cluster_center_points'.\n for i in range(no_iterations):\n # Variable set to display sum of squared distances for each iteration below.\n iteration_sum_distances = 0.0\n\n if i == 0:\n # First iteration draws random mean points from the list of lists, 'country_data'.\n cluster_center_points = random.sample(country_data, no_clusters)\n else:\n # Thereafter, new means are calculated within the clusters and added to the list.\n cluster_center_points = [xy_mean(c['data_points']) for c in built_clusters]\n\n # Setting a list with dictionaries, i.e. 'center_point': [[x, y], 'data_points': [x, y], [x, y]] etc.\n # This will have the number of clusters as inputed and place the data points in the correct clusters.\n built_clusters = build_clusters(cluster_center_points, country_data)\n\n # This part was added in for compulsory task 2.\n # This calculates the squared distances for each point and the mean for each iteration and displays it.\n for cluster in built_clusters:\n mean = cluster['center_point']\n\n for point in cluster['data_points']:\n iteration_sum_distances += distance_between_points(point, mean)\n\n\n print(f\"\\nSum of squared distances for iteration {i + 1} is {iteration_sum_distances}.\")\n\n\n # Creating the figure for the graph and setting appropriate headings.\n figure = plt.figure()\n plt.title('Birth Rates and Life Expectancies in Countries Across the World')\n plt.xlabel('Birth Rates')\n plt.ylabel('Life Expectancies')\n # Setting variables to store the number of countries, cluster count and list of country names to display.\n # Number of countries will also be used to calculate the means for x and y.\n cluster_count = 1\n country_list = []\n # Now looping through the clusters to plot the points and print out various information regarding the results.\n # Random colors are chosen for each cluster, depending on the number of clusters chosen.\n for cluster in built_clusters:\n r = random.random()\n b = random.random()\n g = random.random()\n\n # Color list created with random choices.\n color = [[r, g, b]]\n\n no_countries = 0\n\n # Looping through each point in each cluster's list of points.\n # Point values are separated to x and y, so as to calculate the sums and then average values for each cluster.\n for point in cluster['data_points']:\n x = point[0]\n y = point[1]\n sum_x = 0\n sum_y = 0\n sum_x += x\n sum_y += y\n\n # Scattering each point onto the graph with its cluster color.\n plt.scatter(point[0], point[1], c=color)\n\n # Incrementing country count for each point in a cluster.\n no_countries += 1\n\n # Opening the file to check which countries have been placed in each cluster.\n # If the point matches the country listed in the file, the country name is added to 'country_list'.\n with open('dataBoth.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n\n for row in readCSV:\n if row[0] != 'Countries' and x == float(row[1]) and y == float(row[2]):\n country_name = str(row[0])\n country_list.append(country_name)\n\n # Displaying results related to number of number of countries, list of country names and x and y means for each\n # cluster.\n print(f\"\\nCluster {cluster_count} contains {no_countries} countries.\")\n print(f\"\\nThe list of countries in cluster {cluster_count} is {country_list}\")\n print(f\"\\nCluster {cluster_count} birth rate = {sum_x / no_countries}\")\n print(f\"\\nCluster {cluster_count} life expectancy = {sum_y / no_countries}\")\n\n # Clearing the country_list of names to start afresh for the next iteration/ next cluster.\n # Also iterating the cluster count for reference.\n country_list.clear()\n cluster_count += 1\n\n\n # Displaying the plotted clusters.\n plt.show()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kmeans_program.py","file_name":"kmeans_program.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238028885","text":"import unittest\n\nclass TestTopic(unittest.TestCase):\n \"\"\"\n Test all the general Topic cases\n \"\"\"\n\n def test_Empty( self ):\n\n from Products.CMFTopic.Topic import Topic\n topic = Topic('top')\n\n query = topic.buildQuery()\n self.assertEqual( len( query ), 0 )\n\n def test_Simple( self ):\n\n from Products.CMFTopic.Topic import Topic\n topic = Topic('top')\n topic.addCriterion( 'foo', 'String Criterion' )\n topic.getCriterion( 'foo' ).edit( 'bar' )\n\n query = topic.buildQuery()\n self.assertEqual( len(query), 1 )\n self.assertEqual( query['foo'], 'bar' )\n\n topic.addCriterion( 'baz', 'Integer Criterion' )\n topic.getCriterion( 'baz' ).edit( 43 )\n\n query = topic.buildQuery()\n self.assertEqual( len( query ), 2 )\n self.assertEqual( query[ 'foo' ], 'bar' )\n self.assertEqual( query[ 'baz' ], 43 )\n\n def test_Nested( self ):\n\n from Products.CMFTopic.Topic import Topic\n topic = Topic('top')\n\n topic.addCriterion( 'foo', 'String Criterion' )\n topic.getCriterion( 'foo' ).edit( 'bar' )\n\n topic.addSubtopic( 'qux' )\n subtopic = topic.qux\n\n subtopic.addCriterion( 'baz', 'String Criterion' )\n subtopic.getCriterion( 'baz' ).edit( 'bam' )\n\n query = subtopic.buildQuery()\n self.assertEqual( len( query ), 2 )\n self.assertEqual( query['foo'], 'bar' )\n self.assertEqual( query['baz'], 'bam' )\n\n subtopic.acquireCriteria = 0\n query = subtopic.buildQuery()\n self.assertEqual( len( query ), 1 )\n self.assertEqual( query['baz'], 'bam' )\n\ndef test_suite():\n return unittest.makeSuite(TestTopic)\n\nif __name__ == '__main__':\n unittest.TextTestRunner().run(test_suite())\n","sub_path":"CMF/tags/1.4.8/CMFTopic/tests/test_Topic.py","file_name":"test_Topic.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"75781393","text":"#!/usr/bin/env python3\n# Copyright (c) 2018-2019 The Copernicus developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test mining RPCs\n\n- getblocktemplate template mode\n- submitblock\"\"\"\n\nfrom binascii import b2a_hex\nimport copy\n\nfrom test_framework.blocktools import create_coinbase\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.mininode import CBlock\nfrom test_framework.util import *\n\n\ndef b2x(b):\n return b2a_hex(b).decode('ascii')\n\nclass MiningTest(BitcoinTestFramework):\n def set_test_params(self):\n self.num_nodes = 2\n self.setup_clean_chain = False\n\n def run_test(self):\n node = self.nodes[0]\n # Mine a block to leave initial block download\n node.generate(1)\n\n prevblk = node.getblock(node.getbestblockhash())\n\n tmpl = node.getblocktemplate()\n self.log.info(\"getblocktemplate: Test capability advertised\")\n\n assert 'rules' in tmpl\n assert 'vbavailable' in tmpl\n assert 'transactions' in tmpl\n assert 'coinbaseaux' in tmpl\n assert 'coinbasetxn' not in tmpl\n assert 'mutable' in tmpl\n assert isinstance(tmpl['version'], int)\n assert isinstance(tmpl['curtime'], int)\n assert isinstance(tmpl['vbrequired'], int)\n assert isinstance(tmpl['coinbasevalue'], int)\n assert_is_hex_string(tmpl['bits'])\n assert_is_hash_string(tmpl['target'])\n assert_is_hash_string(tmpl['previousblockhash'])\n assert_equal(tmpl['sizelimit'], 32000000)\n assert_equal(tmpl['sigoplimit'], 640000)\n assert_equal(tmpl['mintime'], prevblk['mediantime'] + 1)\n assert_equal(tmpl['height'], prevblk['height'] + 1)\n assert_equal(tmpl['noncerange'], \"00000000ffffffff\")\n\n coinbase_tx = create_coinbase(height=int(tmpl[\"height\"]) + 1)\n # sequence numbers must not be max for nLockTime to have effect\n coinbase_tx.vin[0].nSequence = 2 ** 32 - 2\n coinbase_tx.rehash()\n\n block = CBlock()\n block.nVersion = tmpl[\"version\"]\n block.hashPrevBlock = int(tmpl[\"previousblockhash\"], 16)\n block.nTime = tmpl[\"curtime\"]\n block.nBits = int(tmpl[\"bits\"], 16)\n block.nNonce = 0\n block.vtx = [coinbase_tx]\n\n self.log.info(\"getblocktemplate: Test valid block\")\n #assert_template(node, block, None)\n\n self.log.info(\"submitblock: Test block decode failure\")\n assert_raises_rpc_error(-22, \"Block decode failed\",\n node.submitblock, b2x(block.serialize()[:-15]))\n\n block.hashMerkleRoot = block.calc_merkle_root()\n node.submitblock(b2x(block.serialize()[:]))\n\nif __name__ == '__main__':\n MiningTest().main()\n","sub_path":"mining_template.py","file_name":"mining_template.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"187527493","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 24 08:31:30 2020\n\n@author: https://www.linkedin.com/in/gabrieltribeiro/\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport timeit\n\ndef const_O(x):\n x[0]**2\n \ndef linear_O(x):\n for item in range(len(x)):\n x[item]**2\n \ndef quadratic_O(x):\n for item_1 in range(len(x)):\n for item_2 in range(len(x)):\n x[item_1]*x[item_2]\n\ntimes_const = []\ntimes_linear = []\ntimes_quad = []\n\ndims = np.linspace(1, 10, 10)\nfor x_size in dims:\n \n start_time = timeit.default_timer()\n const_O(np.ones(int(x_size)))\n times_const.append(timeit.default_timer() - start_time)\n \n start_time = timeit.default_timer()\n linear_O(np.ones(int(x_size)))\n times_linear.append(timeit.default_timer() - start_time)\n \n start_time = timeit.default_timer()\n quadratic_O(np.ones(int(x_size)))\n times_quad.append(timeit.default_timer() - start_time)\n\nplt.figure(figsize=(10,5))\nplt.plot(dims, times_const)\nplt.plot(dims, times_linear)\nplt.plot(dims, times_quad)\nplt.legend(['O(c)', 'O(x)', 'O(x**2)'])\nplt.xlabel('Input dimension', fontsize='large')\nplt.ylabel('Execution time', fontsize='large')\nplt.xticks(ticks=dims)\nplt.grid()\n \n \n\n","sub_path":"code/bigO_demo.py","file_name":"bigO_demo.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"107829376","text":"import os\n\n\ndef readfilecontent(path):\n print(\"Reading file content from \" + path)\n\n if os.path.exists(path) == 0:\n print(\"The file does not exist\")\n else:\n file = open(path,\"r\")\n\n for line in file:\n print(line)\n\ndef readfoldercontent(path):\n print(\"Reading folder contents : \" + path)\n\n if os.path.exists(path) == 0:\n print(\"The folder does not exist\")\n else:\n d = os.listdir(path)\n\n for item in d:\n print(item)","sub_path":"playground/buddike/lab1/file_processor.py","file_name":"file_processor.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305978889","text":"# Copyright (c) 2014 Joseph Keshet, Morgan Sonderegger, Thea Knowles\n#\n# This file is part of Autovot, a package for automatic extraction of\n# voice onset time (VOT) from audio files.\n#\n# Autovot is free software: you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# Autovot is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with Autovot. If not, see\n# .\n#\n\nimport subprocess\nimport random\nimport logging\nimport wave\nimport tempfile\nimport os\n\n\ndef csv_append_row(tmp_preds, preds_filename, with_headers=True):\n\n if with_headers:\n skip_header = True\n\n all_lines = list()\n\n # check if the CSV file exists\n if os.path.isfile(preds_filename):\n # read it lines\n for line in open(preds_filename, 'r'):\n all_lines.append(line)\n else:\n # if the file does not exist it does not have headers and they should be copied\n skip_header = False\n\n # check if there is a header\n for line in open(tmp_preds, 'r'):\n if skip_header:\n skip_header = False\n else:\n all_lines.append(line)\n # now dump everything back\n with open(preds_filename, 'w') as f:\n for line in all_lines:\n f.write(line)\n\n\ndef generate_tmp_filename(extension=\"txt\"):\n return tempfile._get_default_tempdir() + \"/\" + next(tempfile._get_candidate_names()) + \".\" + extension\n\n\ndef logging_defaults(logging_level=\"INFO\"):\n logging.basicConfig(level=logging_level, format='%(asctime)s.%(msecs)d [%(filename)s] %(levelname)s: %(message)s',\n datefmt='%H:%M:%S')\n\ndef num_lines(filename):\n lines = 0\n for _ in open(filename, 'rU'):\n lines += 1\n return lines\n\n\ndef easy_call(command):\n try:\n logging.debug(command)\n return_code = subprocess.call(command, shell=True)\n if return_code == 127 or return_code < 0:\n logging.debug('Return code: %d' % return_code)\n exit(-1)\n except Exception as exception:\n logging.error('Could not execute the following:')\n logging.error(command)\n logging.error('%s - %s' % (type(exception), exception.args))\n exit(-1)\n\n\ndef random_shuffle_data(in_features_filename, in_labels_filename, out_features_filename, out_labels_filename):\n\n # open files\n in_features = open(in_features_filename, 'rU')\n in_labels = open(in_labels_filename, 'rU')\n\n # read infra text header\n header = in_labels.readline()\n dims = header.split()\n\n # read file lines\n lines = list()\n for x, y in zip(in_features, in_labels):\n lines.append((x, y))\n if len(lines) != int(dims[0]):\n logging.error(\"Either the feature file and the label file are not the same length of label file missing a \"\n \"header\")\n exit(-1)\n\n # close files\n in_features.close()\n in_labels.close()\n\n # random shuffle the instances\n random.shuffle(lines)\n\n # write back the result\n out_features = open(out_features_filename, 'w')\n out_labels = open(out_labels_filename, 'w')\n\n # write labels header\n header = \"%s %s\\n\" % (dims[0], dims[1])\n out_labels.write(header)\n\n # write data\n for x, y in lines:\n out_features.write(x)\n out_labels.write(y)\n\n # close files\n out_features.close()\n out_labels.close()\n\n return len(lines)\n\n\ndef extract_lines(input_filename, output_filename, lines_range, has_header=False):\n\n if lines_range[0] >= lines_range[1]:\n logging.error(\"Range should be causal.\")\n exit(-1)\n input_file = open(input_filename, 'rU')\n output_file = open(output_filename, 'w')\n if has_header:\n header = input_file.readline().strip().split()\n new_header = \"%d 2\\n\" % (lines_range[1]-lines_range[0]+1)\n output_file.write(new_header)\n for line_num, line in enumerate(input_file):\n if lines_range[0] <= line_num <= lines_range[1]:\n output_file.write(line)\n input_file.close()\n output_file.close()\n\n\ndef is_textgrid(filename):\n try:\n file = open(filename, 'rU')\n first_line = file.readline()\n except:\n return False\n if \"ooTextFile\" in first_line:\n return True\n return False\n\n\ndef is_valid_wav(filename):\n # check the sampling rate and number bits of the WAV\n try:\n wav_file = wave.Wave_read(filename)\n except:\n return False\n if wav_file.getframerate() != 16000 or wav_file.getsampwidth() != 2 or wav_file.getnchannels() != 1 \\\n or wav_file.getcomptype() != 'NONE':\n return False\n return True\n","sub_path":"utils/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573529257","text":"#Rewrite your pay computation with time-and-a-half for overtime and create a function called computepay which takes two parameters (hours and rate).\n\ntry:\n hours = float(input(\"How many hours did you work? \"))\n pay_rate = float(input(\"What is your rate of pay? \"))\nexcept:\n print(\"You must enter only numbers.\")\n quit()\n\ndef computepay(hrs, rt):\n if hrs > 40:\n print('Your pay, including over time, is $' + str( (rt * 1.5 *(hrs - 40)) + (40 * rt) ))\n else:\n print('Your pay is $' + str(rt * hrs))\ncomputepay(hours, pay_rate)","sub_path":"coursera/exercise-4.6.py","file_name":"exercise-4.6.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629718608","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Neural_Network(object):\n\n def __init__(self, eta=0.01):\n self.input_size = 2\n self.hidden_size = 10\n self.output_size = 2\n self.eta = eta\n self.errors = []\n\n self.W1 = np.random.randn(self.input_size, self.hidden_size)\n self.W2 = np.random.randn(self.hidden_size, self.output_size)\n\n def sigmoid(self, x):\n return 1./(1 + np.exp(-x))\n\n def sigmoid_derivative(self, s):\n return s * (1 - s) \n\n def forward(self, x):\n self.y0 = np.array(x).copy()\n self.a1 = np.dot(self.y0, self.W1)\n self.y1 = self.sigmoid(self.a1)\n self.a2 = np.dot(self.y1, self.W2)\n self.y2 = self.sigmoid(self.a2)\n return self.y2\n\n def backward(self, output):\n self.epsilon_2 = output - self.y2\n self.delta_2 = self.epsilon_2 * self.sigmoid_derivative(self.y2)\n\n self.epsilon_1 = self.delta_2.dot(self.W2.T)\n self.delta_1 = self.epsilon_1 * self.sigmoid_derivative(self.y1)\n \n self.W2 += self.eta * self.y1.T.dot(self.delta_2)\n self.W1 += self.eta * self.y0.T.dot(self.delta_1)\n\n def train(self, x, y):\n self.forward(x)\n self.backward(y)\n self.errors.append(np.mean(np.square(y - self.y2)))","sub_path":"ex3/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"158491896","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom deep_dialog import tools\n\ndef standardize(arr):\n '''\n 这个函数真的是空有其表!\n :param arr: 输入特征向量\n :return: 直接输出特征向量!\n '''\n return arr\n\ndef calc_entropies(state, q, db):\n '''\n SL中计算熵的方式,跟RL中不一样!\n :param state:\n :param q: table probability, (N,)\n :param db: database\n :return: 每个slot的熵\n '''\n entropies = {}\n for s,c in state.iteritems():\n if s not in db.slots:\n entropies[s] = 0.\n else:\n p = (db.ids[s]*q).sum(axis=1)\n u = db.priors[s]*q[db.unks[s]].sum()\n c_tilde = p+u\n c_tilde = c_tilde/c_tilde.sum()\n entropies[s] = tools.entropy_p(c_tilde)\n return entropies\n","sub_path":"deep_dialog/agents/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91899753","text":"from django.test import SimpleTestCase\n\nfrom people.models import Person\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\nclass TestPerson(SimpleTestCase):\n def setUp(self):\n self.person = Person()\n\n def test_get_user_returns_none_if_no_user_ref_and_no_user_id(self):\n self.assertIsNone(self.person.get_user())\n\n def test_get_user_returns_user_if_user_ref(self):\n user = User()\n self.person.user = user\n self.assertIs(user, self.person.get_user())\n\n def test_set_user_sets_user_ref_and_user_id(self):\n user = User()\n user.id = 1\n person = self.person\n person.set_user(user)\n self.assertEqual(person.user_id, 1)\n self.assertIs(person.user, user)","sub_path":"people/tests/unit/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553046625","text":"from datetime import *\nfrom pymongo import MongoClient\nimport uuid\nfrom config import var\n#import pprint\n\nimport sys\nif sys.version_info[0] < 3:\n from Tkinter import *\nelse:\n from tkinter import *\n\nclient = MongoClient()\nclient = MongoClient(var.link, 27017)\ndb = client['subconn']\ncollection = db['items']\np_list = []\n\nch = ''\n\n\ndef sel():\n global ch\n ch = str(var.get())\n\n\ndef item_generator(item_name, item_quantity, item_exp):\n nw = datetime.now()\n da = timedelta(days=item_exp)\n ex = (nw + da).__str__()[:10]\n nw = nw.__str__()[:10]\n #print nw, ex\n for _ in range(item_quantity):\n post = {\"name\": item_name, \"packaging_date\": str(nw), \"expiry_date\":\\\n str(ex), \"code\": item_name+\"|\"+str(uuid.uuid4())+\"|\"+str(ex), \"transaction_id\": \"None\", \"assigned_to\": \"Factory\"}\n p_list.append(post)\n\n\ndef is_number(s):\n try:\n int(s)\n return int(s)\n except ValueError:\n return 0\n\n\ndef show_answer():\n val = ''\n if ch == '1':\n val = 'Rice'\n exp = 2*365\n elif ch == '2':\n val = 'Wheat'\n exp = 6*30\n elif ch == '3':\n val = 'Sugar'\n exp = 2*365\n else:\n val = 'Oil'\n exp = 365\n quan = is_number(blank1.get())\n item_generator(val, quan, exp)\n blank1.delete(0, 'end')\n if len(p_list)>0:\n #pprint.pprint(p_list)\n db.items.insert_many(p_list)\n del p_list[:]\n\n\nmain = Tk()\nmain.resizable(0, 0)\nfnt = (None, 20)\n\nLabel(main, text=\"Choose Item\", font=fnt).grid(row=0)\nLabel(main, text=\"Enter Quantity\", font=fnt).grid(row=3)\n\nvar = IntVar()\nR1 = Radiobutton(main, text=\"Rice\", variable=var, value=1, font=fnt,\\\n command=sel)\nR2 = Radiobutton(main, text=\"Wheat\", variable=var, value=2, font=fnt,\\\n command=sel)\nR3 = Radiobutton(main, text=\"Sugar\", variable=var, value=3, font=fnt,\\\n command=sel)\nR4 = Radiobutton(main, text=\"Oil\", variable=var, value=4, font=fnt,\\\n command=sel)\n\nblank1 = Entry(main, font=fnt)\nblank1.grid(row=3, column=1)\nR1.grid(row=1, column=0)\nR2.grid(row=1, column=1)\nR3.grid(row=2, column=0)\nR4.grid(row=2, column=1)\n\nButton(main, text='Quit', bg='red', font=fnt, command=main.destroy).\\\n grid(row=4, column=0, sticky=W, pady=4)\nButton(main, text='Add records',bg='green', font=fnt, command=show_answer).\\\n grid(row=4, column=1, sticky=W, pady=4)\n\nmainloop()\n","sub_path":"subconn/Admin_tools/create_items.py","file_name":"create_items.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223406553","text":"\"\"\" The following is a module which takes implementations of\r\n sorting and searching algorithms from rosetta code,\r\n modifies them so that they count comaprisons, and prints the\r\n results \"\"\"\r\n\"\"\" note that this code is awful and all over the place, please see my\r\nerlang code for properly documented implementations of these algorithms\r\nwhich I wrote myself \"\"\"\r\n\r\nfrom heapq import merge\r\n\r\n\"\"\" an implementation of selection sort from\r\nhttps://rosettacode.org/wiki/Sorting_algorithms/Selection_sort#Python \"\"\"\r\ndef selection_sort(lst):\r\n count = 0\r\n for i, e in enumerate(lst):\r\n count += 1\r\n mn = min(range(i,len(lst)), key=lst.__getitem__)\r\n lst[i], lst[mn] = lst[mn], e\r\n return lst, count * len(lst)\r\n\r\n\"\"\" an implementation of insertion sort from https://rosettacode.org/wiki/Sorting_algorithms/Insertion_sort#Python \"\"\"\r\ndef insertion_sort(l):\r\n count = 0\r\n for i in range(1, len(l)):\r\n j = i-1\r\n key = l[i]\r\n while (l[j] > key) and (j >= 0):\r\n count += 2\r\n l[j+1] = l[j]\r\n j -= 1\r\n l[j+1] = key\r\n\r\n return count\r\n\r\ndef merge_sort_iterative(data):\r\n \"\"\" gets the data using merge sort and returns sorted.\r\n\r\n this code comes from: https://stackoverflow.com/a/24645442\r\n \"\"\"\r\n count = 0\r\n\r\n for j in range(1, len(data)):\r\n j *= 2\r\n for i in range(0,len(data),j):\r\n data_1 = data[i:i+(j/2)]\r\n data_2 = data[i+(j/2):j-i]\r\n l = m = 0\r\n while l < len(data_1) and m < len(data_2):\r\n count += 1\r\n if data_1[l] < data_2[m]:\r\n count += 1\r\n m += 1\r\n elif data_1[l] > data_2[m]:\r\n count += 1\r\n data_1[l], data_2[m] = data_2[m], data_1[l]\r\n l += 1\r\n data[i:i+(j/2)], data[i+(j/2):j-i] = data_1, data_2\r\n\r\n return data, count\r\n\r\n\"\"\" an iterative implementation of quicksort from https://rosettacode.org/wiki/Sorting_algorithms/Quick_sort#Python \"\"\"\r\ndef quick_sort(arr):\r\n count = 0\r\n less = []\r\n pivotList = []\r\n more = []\r\n if len(arr) <= 1:\r\n count += 1\r\n return arr, count\r\n else:\r\n pivot = arr[0]\r\n for i in arr:\r\n if i < pivot:\r\n count += 1\r\n less.append(i)\r\n elif i > pivot:\r\n count += 1\r\n more.append(i)\r\n else:\r\n pivotList.append(i)\r\n less, countLess = quick_sort(less)\r\n more, countMore = quick_sort(more)\r\n totalCount = countLess + countMore + count\r\n return less + pivotList + more, totalCount\r\n\r\ndef readFile(fileName):\r\n \"\"\"The readfile function reads a textfile and returns its in an array of its lines\r\n\r\n Args:\r\n fileName (string): The name of the file the user wishes to read\r\n\r\n Returns:\r\n An array of the lines in the file\r\n\r\n \"\"\"\r\n with open(fileName) as file:\r\n return [line for line in file]\r\n\r\ndef formatLine(line):\r\n \"\"\"The formatLine function removes ALL whitespace, trailing special characters and shifts the line to lowercase\r\n\r\n Note:\r\n The function removes all whitespace in a string, not just leading and trailing spaces\r\n\r\n Args:\r\n line (string): The line to be formatted\r\n\r\n Returns:\r\n The formatted line\r\n\r\n \"\"\"\r\n return line.rstrip('\\n\\r').replace(' ', '').lower()\r\n\r\n\"\"\" the main routine for the module which handles all of the program logic\r\nincluding file io and console output \"\"\"\r\ndef main():\r\n fileList = readFile('magicitems.txt')\r\n fileArray = []\r\n for line in fileList:\r\n fileArray.append(formatLine(line))\r\n\r\n insertionCount = insertion_sort(fileArray)\r\n list, selectionCount = selection_sort(fileArray)\r\n list, mergeCount = merge_sort_iterative(fileArray)\r\n list, quickCount = quick_sort(fileArray)\r\n print(\"Insertion Sort number of comparisons: \" + str(insertionCount))\r\n print(\"Selection Sort number of comparisons: \" + str(selectionCount))\r\n print(\"Merge Sort number of comparisons: \" + str(mergeCount))\r\n print(\"Quick Sort number of comparisons: \" + str(quickCount))\r\n\r\nmain()\r\n","sub_path":"Assignments/Assignment2/code/python/assignment2Sorting.py","file_name":"assignment2Sorting.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"473283377","text":"from typing import Optional\n\nfrom darwin.future.core.client import Client\nfrom darwin.future.exceptions.core.datasets import DatasetNotFound\n\n\ndef remove_dataset(api_client: Client, id: int, team_slug: Optional[str] = None) -> int:\n \"\"\"\n Creates a new dataset for the given team\n\n Parameters\n ----------\n api_client : Client\n The client to use to make the request\n id : int\n The name of the dataset to create\n\n Returns\n -------\n JSONType\n \"\"\"\n if not team_slug:\n team_slug = api_client.config.default_team\n\n response = api_client.put(\n f\"/datasets/{id}/archive\",\n {\"team_slug\": team_slug},\n )\n assert isinstance(response, dict)\n\n if \"id\" not in response:\n raise DatasetNotFound(f\"Dataset with id {id} not found\")\n\n return int(response[\"id\"])\n","sub_path":"darwin/future/core/datasets/remove_dataset.py","file_name":"remove_dataset.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564422426","text":"from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply\nfrom keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\nimport os\nimport numpy as np\nimport sys\nimport cv2\n\n# py cuda epoch\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = sys.argv[1]\n\nT = int(sys.argv[2])\n\nnp.random.seed(126)\n\nclass CGAN():\n def __init__(self):\n # Input shape\n self.img_rows = 64\n self.img_cols = 64\n self.channels = 3\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.num_classes = 120\n self.latent_dim = 100\n\n optimizer = Adam(0.0002, 0.5)\n\n # Build and compile the discriminator\n\n # Build the generator\n self.generator = self.build_generator()\n\n \n\n def build_generator(self):\n\n generator=Sequential()\n generator.add(Dense(50*16*16, input_dim=self.latent_dim, activation='relu'))\n #generator.add(LeakyReLU())\n generator.add(BatchNormalization(momentum=0.8))\n generator.add(Reshape((16,16,50)))\n generator.add(UpSampling2D())\n generator.add(Conv2D(128, kernel_size=4, activation='relu'))\n #generator.add(LeakyReLU())\n generator.add(BatchNormalization(momentum=0.8))\n generator.add(UpSampling2D())\n generator.add(Conv2D(64, kernel_size=4, activation='relu'))\n #generator.add(LeakyReLU())\n generator.add(BatchNormalization(momentum=0.8))\n generator.add(Conv2D(3, kernel_size=4, activation='relu'))\n #generator.add(LeakyReLU())\n generator.add(BatchNormalization(momentum=0.8))\n generator.add(Flatten())\n generator.add(Dense(64*64*3, activation='tanh'))\n generator.add(Reshape((64,64,3)))\n\n generator.summary()\n\n noise = Input(shape=(self.latent_dim,))\n label = Input(shape=(1,), dtype='int32')\n label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))\n\n model_input = multiply([noise, label_embedding])\n img = generator(model_input)\n\n return Model([noise, label], img)\n\n\ndef save_imgs(generator):\n import matplotlib.pyplot as plt\n\n\n # 5 tags at the same time\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, 100))\n label = [99,99,99,99,99, 66,66,66,66,66, 48,48,48,48,48, 4,4,4,4,4, 74,74,74,74,74]\n label = np.array(label).reshape(-1,1)\n\n # gen_imgs should be shape (25, 64, 64, 3)\n gen_imgs = generator.predict([noise, label])\n gen_imgs = (gen_imgs+1)/2\n gen_imgs.astype(float)\n\n # bgr -> rgb\n image_list=[]\n for i in range(r*c):\n #image_list.append(cv2.cvtColor(gen_imgs[i], cv2.COLOR_BGR2RGB))\n image_list.append(gen_imgs[i,:,:,::-1])\n image_list = np.array(image_list)\n gen_imgs = image_list\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt,:,:,:])\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(\"output.png\")\n plt.close()\n\n\ngenerator = CGAN().generator\ngenerator.load_weights('model/cgan1_relu32_g_{}.h5'.format(T))\n\n\nsave_imgs(generator)\n","sub_path":"HW3/3-2/cgan1_generate.py","file_name":"cgan1_generate.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408327642","text":"\r\nimport socket\r\nimport pygame\r\nfrom time import sleep\r\npygame.init()\r\ns = socket.socket()\r\ns.bind(('192.168.3.36',12345))\r\ns.listen()\r\nc,addr=s.accept()\r\nwinx=400\r\nwiny= 400\r\nwin = pygame.display.set_mode((winx,winy))\r\npygame.display.set_caption('dwada')\r\nplayerwid=50\r\nplayerhei=50\r\nnet_wid=None\r\nnet_hei=None\r\nx = 300\r\nh1 = False\r\ny = 200\r\nbullet = False\r\nxbul = x\r\nybul = None\r\nrun = True\r\nnet_x = None\r\nnet_y = None\r\njump = False\r\nwhile run:\r\n ##\r\n c.send(bytes(f't{xbul}', encoding='utf8'))\r\n ccwww = c.recv(1024).decode()\r\n c.send(bytes(f'g{y}', encoding='utf8'))\r\n cchhh = c.recv(1024).decode()\r\n ##\r\n #######################################\r\n c.send(bytes(f'w{playerwid}', encoding='utf8'))\r\n ccww = c.recv(1024).decode()\r\n c.send(bytes(f'h{playerhei}', encoding='utf8'))\r\n cchh = c.recv(1024).decode()\r\n #######################################\r\n c.send(bytes(f'x{x}', encoding='utf8'))\r\n ccx = c.recv(1024).decode()\r\n c.send(bytes(f'y{y}', encoding='utf8'))\r\n ccy = c.recv(1024).decode()\r\n ##########################################\r\n if ccww[0:1] == 'w':\r\n net_wid = int(ccww[1:])\r\n if cchh[0:1] == 'h':\r\n net_hei = int(cchh[1:])\r\n ###########################################\r\n if ccx[0:1] == 'x':\r\n net_x = int(ccx[1:])\r\n if ccy[0:1] == 'y':\r\n net_y = int(ccy[1:])\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run=False\r\n key = pygame.key.get_pressed()\r\n ##\r\n if ccwww[0:1] == 't':\r\n net_xx = int(ccwww[1:])\r\n if cchhh[0:1] == 'g':\r\n net_yy = int(cchhh[1:])\r\n ##\r\n if key[pygame.K_w] and y > 0:\r\n y -= 1\r\n if key[pygame.K_s] and y < winy-playerhei :\r\n y += 1\r\n if key[pygame.K_d] and x < winx-playerwid:\r\n x += 1\r\n if key[pygame.K_a] and x > 0:\r\n x -= 1\r\n\r\n #########################\r\n #########################\r\n if h1 == True:\r\n xbul = x\r\n if xbul > winx:\r\n print('dwad')\r\n xbul = x\r\n if key[pygame.K_e]:\r\n bullet = True\r\n h1 = True\r\n h1 = False\r\n if xbul > 450:\r\n bullet = False\r\n xbul = x\r\n if bullet == True:\r\n xbul += 1\r\n##################################\r\n win.fill((255,255,255))\r\n pygame.draw.rect(win,'green',[x,y,playerwid,playerhei])\r\n pygame.draw.rect(win, 'red', [net_x, net_y,net_wid,net_hei])\r\n ###############################################################\r\n pygame.draw.rect(win, 'black', [xbul , y, 8, 8])\r\n pygame.draw.rect(win, 'black', [net_xx , net_y, 8, 8])\r\n ################################################################\r\n pygame.display.update()\r\n\r\n\r\n","sub_path":"GameMultiPlayer/GameServer.py","file_name":"GameServer.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298307714","text":"import os\n\n\ndef find_mac_adresses():\n adresses = []\n os.system(\"arp>arp.txt\")\n file = open(\"arp.txt\", \"r\")\n lines = file.readlines()\n print(\"start\")\n for line in lines:\n if (line != lines[0]): # no the first line\n adress = \"\"\n for i in range(33, 50):\n adress += line[i]\n print(adress)\n adresses.append(adress)\n return adresses\n\ndef check_if_safe():\n adresses=find_mac_adresses()\n flag = 0\n\n # using set() + len()\n # to check all unique list elements\n flag = len(set(adresses)) == len(adresses)\n\n # printing result\n if (flag):\n return True\n else:\n return False\n\ncheck_if_safe()\n","sub_path":"safe_checker.py","file_name":"safe_checker.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104661979","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 5 10:51:03 2017\n\n@author: thuzhang\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport pywt # python 小波变换的包 \n\ndef wt(index_list,wavefunc,lv,m,n): # 打包为函数,方便调节参数。 lv为分解层数;data为最后保存的dataframe便于作图;index_list为待处理序列;wavefunc为选取的小波函数;m,n则选择了进行阈值处理的小波系数层数\n \n # 分解\n coeff = pywt.wavedec(index_list,wavefunc,mode='sym',level=lv) # 按 level 层分解,使用pywt包进行计算, cAn是尺度系数 cDn为小波系数\n\n sgn = lambda x: 1 if x > 0 else -1 if x < 0 else 0 # sgn函数\n\n # 去噪过程\n for i in range(m,n+1): # 选取小波系数层数为 m~n层,尺度系数不需要处理\n cD = coeff[i]\n for j in range(len(cD)):\n Tr = np.sqrt(2*np.log(len(cD))) # 计算阈值\n if cD[j] >= Tr:\n coeff[i][j] = sgn(cD[j]) - Tr # 向零收缩\n else:\n coeff[i][j] = 0 # 低于阈值置零\n\n # 重构\n denoised_index = pywt.waverec(coeff,wavefunc)\n plt.figure(figsize=(15,5))\n plt.plot(index_list, 'blue')\n plt.plot(denoised_index,'red')\n plt.show()\n return denoised_index\n\n","sub_path":"WaveLetMethodForPredictPower/WaveLetDenoise.py","file_name":"WaveLetDenoise.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165381661","text":"import numpy as np\n\nfrom gym_pybullet_drones.utils.enums import DroneModel, Physics\nfrom gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ActionType, ObservationType, BaseSingleAgentAviary\n\nclass HoverAviary(BaseSingleAgentAviary):\n \"\"\"Single agent RL problem: hover at position.\"\"\"\n\n ################################################################################\n \n def __init__(self,\n drone_model: DroneModel=DroneModel.CF2X,\n initial_xyzs=None,\n initial_rpys=None,\n physics: Physics=Physics.PYB,\n pyb_freq: int = 240,\n ctrl_freq: int = 240,\n gui=False,\n record=False,\n obs: ObservationType=ObservationType.KIN,\n act: ActionType=ActionType.RPM\n ):\n \"\"\"Initialization of a single agent RL environment.\n\n Using the generic single agent RL superclass.\n\n Parameters\n ----------\n drone_model : DroneModel, optional\n The desired drone type (detailed in an .urdf file in folder `assets`).\n initial_xyzs: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial XYZ position of the drones.\n initial_rpys: ndarray | None, optional\n (NUM_DRONES, 3)-shaped array containing the initial orientations of the drones (in radians).\n physics : Physics, optional\n The desired implementation of PyBullet physics/custom dynamics.\n pyb_freq : int, optional\n The frequency at which PyBullet steps (a multiple of ctrl_freq).\n ctrl_freq : int, optional\n The frequency at which the environment steps.\n gui : bool, optional\n Whether to use PyBullet's GUI.\n record : bool, optional\n Whether to save a video of the simulation in folder `files/videos/`.\n obs : ObservationType, optional\n The type of observation space (kinematic information or vision)\n act : ActionType, optional\n The type of action space (1 or 3D; RPMS, thurst and torques, or waypoint with PID control)\n\n \"\"\"\n super().__init__(drone_model=drone_model,\n initial_xyzs=initial_xyzs,\n initial_rpys=initial_rpys,\n physics=physics,\n pyb_freq=pyb_freq,\n ctrl_freq=ctrl_freq,\n gui=gui,\n record=record,\n obs=obs,\n act=act\n )\n\n ################################################################################\n \n def _computeReward(self):\n \"\"\"Computes the current reward value.\n\n Returns\n -------\n float\n The reward.\n\n \"\"\"\n state = self._getDroneStateVector(0)\n return -1 * np.linalg.norm(np.array([0, 0, 1])-state[0:3])**2\n\n ################################################################################\n \n def _computeTerminated(self):\n \"\"\"Computes the current done value.\n\n Returns\n -------\n bool\n Whether the current episode is done.\n\n \"\"\"\n if self.step_counter/self.PYB_FREQ > self.EPISODE_LEN_SEC:\n return True\n else:\n return False\n \n ################################################################################\n \n def _computeTruncated(self):\n \"\"\"Computes the current truncated value(s).\n\n Unused in this implementation.\n\n Returns\n -------\n bool\n Always false.\n\n \"\"\"\n return False\n\n ################################################################################\n \n def _computeInfo(self):\n \"\"\"Computes the current info dict(s).\n\n Unused.\n\n Returns\n -------\n dict[str, int]\n Dummy value.\n\n \"\"\"\n return {\"answer\": 42} #### Calculated by the Deep Thought supercomputer in 7.5M years\n\n ################################################################################\n \n def _clipAndNormalizeState(self,\n state\n ):\n \"\"\"Normalizes a drone's state to the [-1,1] range.\n\n Parameters\n ----------\n state : ndarray\n (20,)-shaped array of floats containing the non-normalized state of a single drone.\n\n Returns\n -------\n ndarray\n (20,)-shaped array of floats containing the normalized state of a single drone.\n\n \"\"\"\n MAX_LIN_VEL_XY = 3 \n MAX_LIN_VEL_Z = 1\n\n MAX_XY = MAX_LIN_VEL_XY*self.EPISODE_LEN_SEC\n MAX_Z = MAX_LIN_VEL_Z*self.EPISODE_LEN_SEC\n\n MAX_PITCH_ROLL = np.pi # Full range\n\n clipped_pos_xy = np.clip(state[0:2], -MAX_XY, MAX_XY)\n clipped_pos_z = np.clip(state[2], 0, MAX_Z)\n clipped_rp = np.clip(state[7:9], -MAX_PITCH_ROLL, MAX_PITCH_ROLL)\n clipped_vel_xy = np.clip(state[10:12], -MAX_LIN_VEL_XY, MAX_LIN_VEL_XY)\n clipped_vel_z = np.clip(state[12], -MAX_LIN_VEL_Z, MAX_LIN_VEL_Z)\n\n if self.GUI:\n self._clipAndNormalizeStateWarning(state,\n clipped_pos_xy,\n clipped_pos_z,\n clipped_rp,\n clipped_vel_xy,\n clipped_vel_z\n )\n\n normalized_pos_xy = clipped_pos_xy / MAX_XY\n normalized_pos_z = clipped_pos_z / MAX_Z\n normalized_rp = clipped_rp / MAX_PITCH_ROLL\n normalized_y = state[9] / np.pi # No reason to clip\n normalized_vel_xy = clipped_vel_xy / MAX_LIN_VEL_XY\n normalized_vel_z = clipped_vel_z / MAX_LIN_VEL_XY\n normalized_ang_vel = state[13:16]/np.linalg.norm(state[13:16]) if np.linalg.norm(state[13:16]) != 0 else state[13:16]\n\n norm_and_clipped = np.hstack([normalized_pos_xy,\n normalized_pos_z,\n state[3:7],\n normalized_rp,\n normalized_y,\n normalized_vel_xy,\n normalized_vel_z,\n normalized_ang_vel,\n state[16:20]\n ]).reshape(20,)\n\n return norm_and_clipped\n \n ################################################################################\n \n def _clipAndNormalizeStateWarning(self,\n state,\n clipped_pos_xy,\n clipped_pos_z,\n clipped_rp,\n clipped_vel_xy,\n clipped_vel_z,\n ):\n \"\"\"Debugging printouts associated to `_clipAndNormalizeState`.\n\n Print a warning if values in a state vector is out of the clipping range.\n \n \"\"\"\n if not(clipped_pos_xy == np.array(state[0:2])).all():\n print(\"[WARNING] it\", self.step_counter, \"in HoverAviary._clipAndNormalizeState(), clipped xy position [{:.2f} {:.2f}]\".format(state[0], state[1]))\n if not(clipped_pos_z == np.array(state[2])).all():\n print(\"[WARNING] it\", self.step_counter, \"in HoverAviary._clipAndNormalizeState(), clipped z position [{:.2f}]\".format(state[2]))\n if not(clipped_rp == np.array(state[7:9])).all():\n print(\"[WARNING] it\", self.step_counter, \"in HoverAviary._clipAndNormalizeState(), clipped roll/pitch [{:.2f} {:.2f}]\".format(state[7], state[8]))\n if not(clipped_vel_xy == np.array(state[10:12])).all():\n print(\"[WARNING] it\", self.step_counter, \"in HoverAviary._clipAndNormalizeState(), clipped xy velocity [{:.2f} {:.2f}]\".format(state[10], state[11]))\n if not(clipped_vel_z == np.array(state[12])).all():\n print(\"[WARNING] it\", self.step_counter, \"in HoverAviary._clipAndNormalizeState(), clipped z velocity [{:.2f}]\".format(state[12]))\n","sub_path":"gym_pybullet_drones/envs/single_agent_rl/HoverAviary.py","file_name":"HoverAviary.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"380058959","text":"import re\nimport os\nimport json\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nfrom Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\n\nfactory = StemmerFactory()\nfactoryStop = StopWordRemoverFactory()\nstemmer = factory.create_stemmer()\nstopword = factoryStop.create_stop_word_remover()\n\ndef get_data(name):\n with open(dir_path+\"/\"+str(name), \"r\") as filename:\n return json.load(filename)\ndef save_data(name, data):\n with open(dir_path+\"/\"+str(name), \"w\") as filename:\n json.dump(data, filename)\n\nlas_use = get_data(\"data_stemmer/last_use.json\")\nfail_stem = get_data(\"data_stemmer/fail_stem.json\")\nkata_dasar = get_data(\"data_stemmer/kata-dasar.json\")\n\ndef stemmer_kata(teks):\n # print(len(last_use_k))\n # print(len(last_use_r))\n teks_s = str(teks).split()\n for i, kt in enumerate(teks_s):\n if kt in las_use:\n teks_s[i] = las_use[kt]\n # teks_s[i] = last_use_r[last_use_k.index(kt)]\n elif kt in las_use or kt in fail_stem or kt in kata_dasar:\n continue\n else:\n teks_s[i] = stemmer.stem(kt)\n # print(\"*\", end=\"\")\n if teks_s[i] != kt:\n las_use[kt] = teks_s[i]\n # last_use_k.append(kt)\n # last_use_r.append(teks_s[i])\n else:\n if kt not in fail_stem:\n # last_use_aneh.append(kt)\n fail_stem.insert(0,kt)\n try:\n save_data(nama = \"data_stemmer/last_use.json\", data=las_use)\n save_data(nama = \"data_stemmer/fail_stem.json\", data=fail_stem)\n except:\n pass\n return \" \".join(teks_s)\n\ndef stop_word(kata):\n kata = kata.split()\n n_kata = list()\n for i in kata:\n n_kata.append(stopword.remove(i))\n a = re.sub(' +', ' ',\" \".join(n_kata))\n a = a.lstrip()\n return a","sub_path":"Tanpa Seleksi fitur/modul/stemnstopword.py","file_name":"stemnstopword.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"348161284","text":"from bs4 import BeautifulSoup as soup \nfrom urllib.request import urlopen as uReq \n\n# Url to Scrape\npage_url = \"https://www.newegg.com/p/pl?d=ram\"\n\n# opens the connection and downloads html page from url\nuClient = uReq(page_url)\n\n# parses html into a soup data structure to traverse html\n# as if it were a json data type.\npage_soup = soup(uClient.read(), \"html.parser\")\nuClient.close()\n\n# finds each product\ncontainers = page_soup.findAll(\"div\", {\"class\": \"item-container\"})\n\n# name the output file to write to local disk\nout_filename = \"ram.csv\"\n# header of csv file to be written\nheaders = \"Brand, Item_name, Shipping_fee, Price \\n\"\n\n# opens file, and writes headers\nf = open(out_filename, \"w\")\nf.write(headers)\n\n# loops over each product and grabs attributes about\n# each product\nfor container in containers:\n div_with_info = container.find('div','item-info').div.select(\"a\")\n brand = div_with_info[0].img[\"title\"].title()\n title_container = container.find('a',\"item-title\")\n title = title_container.text\n price_container = container.findAll('li',{'class':'price-current'})\n price = price_container[0].strong.text.strip()\n shipping_container = container.findAll('li',{'class': 'price-ship'})\n shipping = shipping_container[0].text.strip()\n\n # Write to csv file\n f.write(brand + \", \" + title.replace(\",\",\"|\") + \",\" + shipping + \", $\" + price + \".99 \" + \"\\n\")\n\nf.close() # Close the file\n\n\n","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471992114","text":"import os\nimport settings\nimport pandas as pd\nimport numpy as np\n# fix random seed for reproducibility\nnp.random.seed(settings.SEED)\nfrom math import sqrt\nimport matplotlib\n# be able to save images on server\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.models import model_from_json\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\n# frame a sequence as a supervised learning problem\ndef timeseries_to_supervised(data, lag=1):\n\tdf = pd.DataFrame(data)\n\tcolumns = [df.shift(i) for i in range(1, lag+1)]\n\tcolumns.append(df)\n\tdf = pd.concat(columns, axis=1)\n\tdf.fillna(0, inplace=True)\n\treturn df\n\n# create a differenced series\ndef difference(dataset, interval=1):\n\tdiff = list()\n\tfor i in range(interval, len(dataset)):\n\t\tvalue = dataset[i] - dataset[i - interval]\n\t\tdiff.append(value)\n\treturn pd.Series(diff)\n \n# invert differenced value\ndef inverse_difference(history, yhat, interval=1):\n\treturn yhat + history[-interval]\n\n# scale train and test data to [-1, 1]\ndef scale(train, test):\n\t# fit scaler\n\tscaler = MinMaxScaler(feature_range=(-1, 1))\n\tscaler = scaler.fit(train)\n\t# transform train\n\ttrain = train.reshape(train.shape[0], train.shape[1])\n\ttrain_scaled = scaler.transform(train)\n\t# transform test\n\ttest = test.reshape(test.shape[0], test.shape[1])\n\ttest_scaled = scaler.transform(test)\n\treturn scaler, train_scaled, test_scaled\n\n# inverse scaling for a forecasted value\ndef invert_scale(scaler, X, value):\n\t# combine input and output\n\tnew_row = [x for x in X] + [value]\n\tarray = np.array(new_row)\n\tarray = array.reshape(1, len(array))\n\tinverted = scaler.inverse_transform(array)\n\t#return output\n\treturn inverted[0, -1]\n\ndef fit_lstm(train, batch_size, epochs, neurons):\n\tX, y = train[:, 0:-1], train[:, -1]\n\tX = X.reshape(X.shape[0], 1, X.shape[1])\n\tmodel = Sequential()\n\tmodel.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))\n\tmodel.add(Dense(1))\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\tfor i in range(epochs):\n\t\tmodel.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\t\tmodel.reset_states()\n\treturn model\n\n# make a one-step forecast\ndef forecast_lstm(model, batch_size, X):\n\tX = X.reshape(1, 1, len(X))\n\tyhat = model.predict(X, batch_size=batch_size)\n\treturn yhat[0,0]\n\n# Update LSTM model\ndef update_model(model, train, batch_size, epochs):\n\tX, y = train[:, 0:-1], train[:, -1]\n\tX = X.reshape(X.shape[0], 1, X.shape[1])\n\tfor i in range(epochs):\n\t\tmodel.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\t\tmodel.reset_states()\n\n\ndef evaluate_model(scaler, train_scaled, test_scaled, raw_values, batch_size=1, epochs=2000, neurons=2, updates=0, seed=True):\n\t# fit the model\n\tlstm_model = fit_lstm(train_scaled, batch_size, epochs, neurons)\n\t# forecast the entire training dataset to build up state for forecasting\n\tif seed:\n\t\ttrain_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\n\t\tlstm_model.predict(train_reshaped, batch_size=batch_size)\n\t# walk-forward validation on validation set\n\ttrain_copy = np.copy(train_scaled)\n\tpredictions = list()\n\tfor i in range(len(test_scaled)):\n\t\t# update model\n\t\tif i > 0 and updates>0:\n\t\t\tupdate_model(lstm_model, train_copy, batch_size, updates)\n\t\t# make one-step forecast\n\t\tX, y = test_scaled[i, 0:-1], test_scaled[i, -1]\n\t\tyhat = forecast_lstm(lstm_model, 1, X)\n\t\t# invert scaling\n\t\tyhat = invert_scale(scaler, X, yhat)\n\t\t# invert differencing\n\t\tyhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)\n\t\t# store forecast\n\t\tpredictions.append(yhat)\n\t\t# add to training set\n\t\ttrain_copy = np.concatenate((train_copy, test_scaled[i,:].reshape(1, -1)))\n\t\t# print outputs\n\t\texpected = raw_values[len(train_scaled) + i + 1]\n\t\t#print('>Predicted=%f, Expected=%f' % (yhat, expected))\n\n\t# report performance\n\trmse = sqrt(mean_squared_error(raw_values[-len(test_scaled):], predictions))\n\t#print('RMSE: %.3f' % rmse)\n\n\treturn rmse, predictions\n\n\n'''\n# load data\nseries = pd.Series.from_csv(os.path.join(settings.PROCESSED_DIR, 'train.csv'))\n\n# transform data to be stationary\nraw_values = series.values\ndiff_values = difference(raw_values, 1)\n# transform data to be supervised learning\nsupervised = timeseries_to_supervised(diff_values, 1)\nsupervised_values = supervised.values\n# split data into train and validation-sets\ntrain, validation = supervised_values[0:-12], supervised_values[-12:]\n# transform the scale of the data\nscaler, train_scaled, validation_scaled = scale(train, validation)\n\n# fit the model\n#evaluate_model(scaler, train_scaled, validation_scaled, raw_values, batch_size=1, epochs=1000, neurons=4, updates=0, seed=True)\n\n# experiment\n# vary training update epochs\nepochs = [0, 2, 5, 10, 20]\nfor e in epochs:\n\trmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, updates=e)\n\tprint('>Updates=%d, RMSE=%.3f' % (e, rmse))\n\n\n# stateless vs Stateful \nrmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, seed=True, updates=0)\nprint('>with-seed, RMSE=%.3f' % (rmse))\nrmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, seed=False, updates=0)\nprint('>without-seed, RMSE=%.3f' % (rmse))\n\n\n# vary training epochs\nepochs = [500, 1000, 2000, 3000]\nfor e in epochs:\n\trmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, epochs=e)\n\tprint('>Epochs=%d, RMSE=%.3f' % (e, rmse))\n\n\n# vary training batches\nbatches = [1, 2, 4]\nfor e in batches:\n\trmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, batch_size=e)\n\tprint('>Batches=%d, RMSE=%.3f' % (e, rmse))\n\n\n\n# vary training neurons\nneurons = [1, 2, 3, 4, 5]\nfor e in neurons:\n\trmse, predictions = evaluate_model(scaler, train_scaled, validation_scaled, raw_values, neurons=e)\n\tprint('>Neurons=%d, RMSE=%.3f' % (e, rmse))\n'''\n\n\n# Finalize Model\n# load data\ntrain = pd.Series.from_csv(os.path.join(settings.PROCESSED_DIR, 'train.csv'))\n\n# transform data to be stationary\nraw_values = train.values\ndiff_values = difference(raw_values, 1)\n# transform data to be supervised learning\nsupervised = timeseries_to_supervised(diff_values, 1)\nsupervised_values = supervised.values\n# fit scaler\nscaler = MinMaxScaler(feature_range=(-1, 1))\nscaler = scaler.fit(supervised_values)\n# transform train\nreshaped_values = supervised_values.reshape(supervised_values.shape[0], supervised_values.shape[1])\ntrain_scaled = scaler.transform(reshaped_values)\n\n# Save final model (combine train and validation)\nlstm_model = fit_lstm(train_scaled, batch_size=1, epochs=2000, neurons=2)\n\n# save model to file\n# serialize model to JSON\nmodel_json = lstm_model.to_json()\nwith open(os.path.join(settings.OUTPUT_DIR, \"lstm_model.json\"), \"w\") as json_file:\n json_file.write(model_json)\n\n# serialize weights to HDF5\nlstm_model.save_weights(os.path.join(settings.OUTPUT_DIR, \"lstm_model.h5\"))\n\n\n\n\n# Validate model\n# load json and create model\njson_file = open(os.path.join(settings.OUTPUT_DIR, \"lstm_model.json\"), 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(os.path.join(settings.OUTPUT_DIR, \"lstm_model.h5\"))\n# evaluate loaded model on test data\nloaded_model.compile(loss='mean_squared_error', optimizer='adam')\n\n\n# load and prepare datasets\ntrain = pd.Series.from_csv(settings.PROCESSED_DIR + 'train.csv')\ntest = pd.Series.from_csv(settings.PROCESSED_DIR + 'test.csv')\nseries = pd.concat([train, test])\n# transform data to be stationary\nraw_values = series.values\ndiff_values = difference(raw_values, 1)\n# transform data to be supervised learning\nsupervised = timeseries_to_supervised(diff_values, 1)\nsupervised_values = supervised.values\n# split data into train and validation-sets\nsupervised_train, supervised_test = supervised_values[0:-len(test)], supervised_values[-len(test):]\n# transform the scale of the data\nscaler, train_scaled, test_scaled = scale(supervised_train, supervised_test)\n\n\nbatch_size=1\n#updates=2\n# forecast the entire training dataset to build up state for forecasting\ntrain_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\nloaded_model.predict(train_reshaped, batch_size=batch_size)\n# walk-forward validation on validation set\n#train_copy = np.copy(train_scaled)\npredictions = list()\nfor i in range(len(test_scaled)):\n\t# update model\n\t#if i > 0:\n\t\t#update_model(loaded_model, train_copy, batch_size, updates)\n\t# make one-step forecast\n\tX, y = test_scaled[i, 0:-1], test_scaled[i, -1]\n\tyhat = forecast_lstm(loaded_model, 1, X)\n\t# invert scaling\n\tyhat = invert_scale(scaler, X, yhat)\n\t# invert differencing\n\tyhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)\n\t# store forecast\n\tpredictions.append(yhat)\n\t# add to training set\n\t#train_copy = np.concatenate((train_copy, test_scaled[i,:].reshape(1, -1)))\n\t# print outputs\n\texpected = raw_values[len(train_scaled) + i + 1]\n\tprint('>Predicted=%f, Expected=%f' % (yhat, expected))\n# report performance\nrmse = sqrt(mean_squared_error(raw_values[-len(test_scaled):], predictions))\nprint('RMSE: %.3f' % rmse)\n\nplt.plot(test.values)\nplt.plot(predictions, color='red')\nplt.title('LSTM Model for test set')\nplt.savefig(os.path.join(settings.OUTPUT_DIR, \"lstm_model.png\"))\nplt.close()\n\n","sub_path":"LSTM_model.py","file_name":"LSTM_model.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221104571","text":"# -*- coding: utf-8 -*-\n\"\"\" Task set for the 'My Patients' list on mobile\"\"\"\nimport logging\nfrom random import randint\nfrom locust import task\nfrom tasks.mobile.patient_view import PatientView\nfrom liveobs_ui.page_object_models.mobile.list_page import ListPage\nfrom .list import List\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass PatientList(List):\n \"\"\" Task set for Patient List \"\"\"\n\n def on_start(self):\n \"\"\" Actions to carry out on start of the task set \"\"\"\n self.client.timed_event_for_locust(\n 'request',\n 'Load Patient List Page',\n self.load_patient_list\n )\n self.client.timed_event_for_locust(\n 'request',\n 'Select Patient in Patient List',\n self.select_patient_list_item\n )\n\n def load_patient_list(self):\n \"\"\"\n Load the patient list page\n \"\"\"\n patient_list_page = ListPage(self.client)\n patient_list_page.go_to_patient_list()\n\n def select_patient_list_item(self):\n \"\"\"\n Select a random patient list item\n \"\"\"\n patient_list_page = ListPage(self.client)\n patients = patient_list_page.get_list_items()\n patient_item = patients[randint(0, len(patients) - 1)]\n patient_list_page.open_item(patient_item)\n\n tasks = {PatientView: 9}\n\n @task(1)\n def stop(self):\n \"\"\" Exit patient list to task above this in Locust hierarchy \"\"\"\n self.client.timed_event_for_locust(\n 'request',\n 'Abandoning Patient List',\n self.client.get,\n '{}/mobile/patients'.format(self.locust.host)\n )\n _LOGGER.info(\"Stopping patient list.\")\n self.escape('MobileHome')\n","sub_path":"tasks/mobile/patient_list.py","file_name":"patient_list.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602546892","text":"import os\nimport shutil\nimport json\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, models, transforms\nimport torch\nimport catalyst\nfrom catalyst.dl import SupervisedRunner\nfrom catalyst.dl.callbacks import AccuracyCallback\nfrom transforms import *\n\nDEFALUT_CANDIDATES = [\n ShearXY,\n TranslateXY,\n Rotate,\n AutoContrast,\n Invert,\n Equalize,\n Solarize,\n Posterize,\n Contrast,\n Color,\n Brightness,\n Sharpness,\n Cutout,\n]\n\n\ndef get_criterion():\n return nn.CrossEntropyLoss()\n\n\ndef get_loaders(individual: list, batch_size: int = 16, num_workers: int = 4, is_mini: bool = False):\n class MiniCifer10Dataset(datasets.CIFAR10):\n def __init__(self, num_data: int, root: str, train: bool = True, **args: dict):\n super(MiniCifer10Dataset, self).__init__(root, train, **args)\n self.data = self.data[:num_data]\n self.targets = self.targets[:num_data]\n subpolicy = []\n for use, op in zip(individual, DEFALUT_CANDIDATES):\n if use:\n subpolicy.append(op(0.2, 0.5))\n\n transform = transforms.Compose([transforms.Resize(32), *subpolicy, transforms.ToTensor()])\n\n # Dataset\n args_dataset = dict(root='./data', download=True, transform=transform)\n trainset = MiniCifer10Dataset(num_data=1000, train=True, **args_dataset) if is_mini \\\n else datasets.CIFAR10(train=True, **args_dataset)\n testset = MiniCifer10Dataset(num_data=1000, train=False, **args_dataset) if is_mini \\\n else datasets.CIFAR10(train=False, **args_dataset)\n\n # Data Loader\n args_loader = dict(batch_size=batch_size, num_workers=num_workers)\n train_loader = DataLoader(trainset, shuffle=True, **args_loader)\n val_loader = DataLoader(testset, shuffle=False, **args_loader)\n return train_loader, val_loader\n\n\ndef get_model(num_class: int = 10):\n model = models.resnet18(pretrained=True)\n num_features = model.fc.in_features\n model.fc = nn.Linear(num_features, num_class)\n return model\n\n\ndef get_optimizer(model: torch.nn.Module, init_lr: float = 1e-3, epoch: int = 10):\n optimizer = optim.SGD(model.parameters(), lr=init_lr, momentum=0.9)\n lr_scheduler = optim.lr_scheduler.MultiStepLR(\n optimizer,\n milestones=[int(epoch*0.8), int(epoch*0.9)],\n gamma=0.1\n )\n return optimizer, lr_scheduler\n\n\ndef evaluate(individual: list):\n epochs = 1\n num_class = 10\n output_path = './output/train_cifer10'\n # output_path = None\n model = get_model()\n train_loader, val_loader = get_loaders(individual=individual, is_mini=True)\n loaders = {\"train\": train_loader, \"valid\": val_loader}\n\n optimizer, lr_scheduler = get_optimizer(model=model)\n criterion = get_criterion()\n\n runner = SupervisedRunner(device=catalyst.utils.get_device())\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=lr_scheduler,\n loaders=loaders,\n logdir=output_path,\n callbacks=[AccuracyCallback(num_classes=num_class, accuracy_args=[1])],\n num_epochs=epochs,\n main_metric=\"accuracy01\",\n minimize_metric=False,\n fp16=None,\n verbose=True\n )\n\n is_remove_events = True\n if is_remove_events:\n shutil.rmtree(os.path.join(output_path, \"_base_log\"))\n shutil.rmtree(os.path.join(output_path, \"train_log\"))\n shutil.rmtree(os.path.join(output_path, \"valid_log\"))\n os.remove(os.path.join(output_path, \"log.txt\"))\n\n with open(os.path.join(output_path, \"checkpoints\", \"_metrics.json\"), encoding=\"utf-8\") as f:\n dic = json.load(f)\n\n return dic[\"best\"][\"accuracy01\"],\n\n\nif __name__ == \"__main__\":\n individual = [0, 0.5, 1, 2, 0.5, 1]\n evaluate(individual)\n","sub_path":"workspace/train_cifer10.py","file_name":"train_cifer10.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"65128229","text":"import json\nimport os\nimport uuid\n\nimport pytest\nfrom alembic.command import upgrade as upgrade_command\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy_utils import create_database, drop_database\nfrom yarl import URL\n\nfrom citizens.utils.testing import get_alembic_config\n\n\nTESTS_DIR = os.path.dirname(__file__)\nDATABASE_URL = os.getenv('DATABASE_URL',\n 'postgresql://me:hackme@0.0.0.0/citizens')\n\n\n@pytest.fixture\ndef citizens_data():\n path = os.path.join(TESTS_DIR, 'data/citizens.json')\n with open(path) as f:\n return json.load(f)['citizens']\n\n\n@pytest.fixture\ndef temp_db() -> str:\n tmp_db_name = '.'.join([uuid.uuid4().hex, 'pytest'])\n tmp_db_url = str(URL(DATABASE_URL).with_path(tmp_db_name))\n create_database(tmp_db_url)\n\n try:\n yield tmp_db_url\n finally:\n drop_database(tmp_db_url)\n\n\n@pytest.fixture\ndef temp_migrated_db(temp_db) -> str:\n config = get_alembic_config(temp_db)\n upgrade_command(config, 'head')\n return temp_db\n\n\n@pytest.fixture()\ndef temp_migrated_db_engine(temp_migrated_db) -> Engine:\n engine = create_engine(temp_migrated_db, echo=True)\n try:\n yield engine\n finally:\n engine.dispose()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627514896","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\nfrom odoo.http import request\nimport json\n\nclass websitecartempty(http.Controller):\n\t@http.route('/shop/cartempty',type='http', auth='public', website=True)\n\tdef new_web(self, **kw):\n\t\tsale_order= request.website.sale_get_order(force_create=True)\n\t\tif sale_order:\n\t\t\tfor line in sale_order.website_order_line:\n\t\t\t\tline.unlink()\n\t\treturn[]","sub_path":"website_clear_shopping_cart_app/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298716078","text":"\"\"\"\n@Name: server.py\n@Author: lxy\n@Date: 2019/5/10 10:19\n@Description: 服务器相关\n@Contact: lxy_jdsy@163.com\n\"\"\"\nimport sqlite3\nimport socket\nfrom threading import Timer\n\nfrom work import Work\nfrom database import Database\n\n\nclass Server(object):\n def __init__(self):\n # 创建新的socket\n self.server = socket.socket()\n print(\"正在启动...\")\n # 绑定主机信息,信息的格式必须是元组\n self.host_port = (\"127.0.0.1\", 2333)\n self.server.bind(self.host_port)\n print(\"当前ip为:{},监听端口为:{}\".format(self.host_port[0], self.host_port[1]))\n # 设置最大排队数\n self.server.listen(5)\n print(\"初始化成功\")\n # 所有连接客户机管理\n self.clients = []\n # 所有子线程管理\n self.threads = []\n # 连接sqlite数据库\n self.db = Database(\"weiliao.db\")\n # 设置执行时间\n Timer(5, exit).start()\n\n # 更新连接客户机信息\n def update_clients(self, update_mode, username, status, client, del_thread=None):\n # 如果模式为更新,则修改相应客户机的用户名与状态信息\n if update_mode == \"update\":\n for c in self.clients:\n if c[\"client\"] is client:\n c[\"username\"] = username\n c[\"status\"] = status\n print(self.clients)\n break\n # 如果模式为删除,则删除相应客户机连接与子线程\n elif update_mode == \"delete\":\n for c in self.clients:\n if c[\"client\"] is client:\n self.clients.remove(c)\n if del_thread:\n self.threads.remove(del_thread)\n # 对所有子线程更新所有客户机列表\n for thread in self.threads:\n thread.update_clients(self.clients)\n # 向所有状态为“login”的客户机发送在线用户列表\n users = [c[\"username\"] for c in self.clients if c[\"status\"] == \"login\"]\n d = {\"type\": \"users\", \"users\": users}\n for client in self.clients:\n if client[\"status\"] == \"login\":\n client[\"client\"].send(bytes(str(d), encoding=\"utf-8\"))\n\n # 接收连接请求\n def accept(self):\n # 持续监听端口\n while True:\n # 获得新连接客户机的数据\n client, address = self.server.accept()\n print(\"检测到新的连接,ip:{},端口:{}\".format(address[0], address[1]))\n # 更新用户列表\n self.clients.append({\"client\": client, \"username\": \"\", \"status\": \"connect\"})\n # 创建子线程处理本次连接\n t = Work(self.update_clients, client, self.db)\n # 将该线程加入线程列表\n self.threads.append(t)\n # 开始子线程\n t.start()\n","sub_path":"Python/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519098855","text":"import random\nimport matplotlib.pyplot as plt\n\nclass Player:\n\n def __init__(self,name):\n self.name=name\n self.points=0.0\n\n def choose(self):\n pass\n\n def recieve_result(self,opponent,own,result):\n if result == 1:\n self.points += 1.0\n elif result == 2:\n self.points += 0.5\n\n\n def get_name(self):\n return self.name\n\n def get_points(self):\n return self.points\n\n\nclass RandomPlayer(Player):\n\n def __init__(self):\n name=\"Random\"\n super().__init__(name)\n\n def choose(self):\n x = random.randint(0, 2)\n return x\n\n\nclass SequentialPlayer(Player):\n\n def __init__(self):\n name = \"Sequential\"\n self.ind = 0\n super().__init__(name)\n\n def choose(self):\n x=self.ind\n if self.ind < 2:\n self.ind += 1\n else:\n self.ind = 0\n return x\n\n\nclass MostCommonPlayer(Player):\n\n def __init__(self):\n name = \"Most Common\"\n self.r = 0\n self.p = 0\n self.s = 0\n super().__init__(name)\n\n def choose(self):\n if self.r-(self.s+self.p) > 0:\n return 1\n elif self.p-(self.s+self.r) > 0:\n return 2\n elif self.s-(self.r+self.p) > 0:\n return 0\n else:\n return random.randint(0,2)\n\n def recieve_result(self,opponent,own,result):\n if opponent == 0:\n self.r += 1\n elif opponent == 1:\n self.p += 1\n elif opponent == 2:\n self.s += 1\n super().recieve_result(opponent,own,result)\n\n\nclass Historian(Player):\n\n def __init__(self,husk):\n name = \"Historian\"\n self.husk = husk\n self.history = []\n super().__init__(name)\n\n def choose(self):\n if len(self.history) < self.husk+2:\n return random.randint(0,2)\n else:\n matches = []\n husk=self.history[-self.husk:]\n\n for i in range(len(self.history)-len(husk)):\n if self.history[i] == husk[0] and self.history[i:i+len(husk)] == husk:\n matches.append(self.history[i+len(husk)])\n if len(matches)==0:\n return random.randint(0,2)\n expected = max(set(matches), key=matches.count)\n if expected == 0:\n return 1\n elif expected == 1:\n return 2\n else:\n return 0\n\n def recieve_result(self,opponent,own,result):\n self.history.append(opponent)\n super().recieve_result(opponent,own,result)\n\n\nclass SimpleGame:\n\n list = [\"Rock\", \"Paper\", \"Scissor\"]\n\n def __init__(self, spiller1, spiller2):\n self.p1 = spiller1\n self.p2 = spiller2\n self.choice1 = None\n self.choice2 = None\n self.result = None\n\n def play(self):\n self.choice1 = self.p1.choose()\n self.choice2 = self.p2.choose()\n\n if self.choice1 == self.choice2:\n self.p1.recieve_result(self.choice2,self.choice1,2)\n self.p2.recieve_result(self.choice1,self.choice2,2)\n self.result = \"Draw\"\n\n else:\n self.compare(self.choice1,self.choice2)\n print(self)\n\n def compare(self,s1,s2):\n if s1 == 0:\n if s2 == 1:\n self.p1.recieve_result(s2,s1,0)\n self.p2.recieve_result(s1,s2,1)\n self.result = self.p2.get_name() #p2 vant\n elif s2 == 2:\n self.p1.recieve_result(s2,s1,1)\n self.p2.recieve_result(s1,s2,0)\n self.result = self.p1.get_name() #p1 vant\n elif s1 == 1:\n if s2 == 0:\n self.p1.recieve_result(s2,s1,1)\n self.p2.recieve_result(s1,s2,0)\n self.result = self.p1.get_name() #p1 vant\n elif s2 == 2:\n self.p1.recieve_result(s2,s1,0)\n self.p2.recieve_result(s1,s2,1)\n self.result = self.p2.get_name() #p2 vant\n elif s1 == 2:\n if s2 == 0:\n self.p1.recieve_result(s2,s1,0)\n self.p2.recieve_result(s1,s2,1)\n self.result = self.p2.get_name() #p2 vant\n elif s2 == 1:\n self.p1.recieve_result(s2,s1,1)\n self.p2.recieve_result(s1,s2,0)\n self.result = self.p1.get_name() #p1 vant\n\n def __str__(self):\n return \"P1 - \"+self.p1.get_name()+\": \"+self.list[self.choice1]+\",\\t\"+\"P2 - \"+self.p2.get_name()+\": \"+self.list[self.choice2]+\"\\t-> winner: \"+self.result\n\n\nclass ManyGames(SimpleGame):\n\n def __init__(self,spiller1,spiller2,nr_games):\n super().__init__(spiller1,spiller2)\n self.games=nr_games\n self.p1_result=[]\n\n def single_game(self):\n self.play()\n\n def play_games(self):\n i=0\n while i= 1:\n\t\t\traise forms.ValidationError(\"This Username is already taken. Please enter another!\")\n\n\t\treturn self.cleaned_data['username']\n#####################################################\n######## Create a User\n@view_function\n#permission_required('homepage.add_users')\ndef create(request):\n\tuser = hmod.Users.objects.create_user(\n\t\tusername = 'Enter Username',\n\t\tpassword = '',\n\t\tfirst_name = '',\n\t\tlast_name = '',\n\t\taddress1 = '',\n\t\tcity = '',\n\t\tstate = '',\n\t\tzip = '',\n\t\temail = '',\n\t)\n\n\treturn HttpResponseRedirect('/homepage/users.edit/{}'.format(user.id))\n\n#####################################################\n######## Delete a User\n@view_function\ndef delete(request):\n\ttry:\n\t\tuser = hmod.Users.objects.get(id=request.urlparams[0])\n\texcept hmod.Users.DoesNotExist:\n\t\treturn HttpResponseRedirect('/homepage/users/')\n\t\n\tuser.delete()\n\n\treturn HttpResponseRedirect('/homepage/users/'.format(user.id))\n\n\n\n\n\n\n\n","sub_path":"colheritage/homepage/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164629531","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.font import Font\nfrom db import Database\nimport webbrowser as chrome\n\ndb = Database('bookmarks.db')\nselectedLink = ''\n\ndef openLink():\n print('selected chutiyap',selectedLink)\n chrome.open(selectedLink)\n\ndef populate_bookmarks():\n link_list.delete(0,END)\n space = \"-\"*10\n for row in db.fetch():\n link_list.insert(END, (row[0], row[1],space,row[2]))\n\ndef selectLink(event):\n index = link_list.curselection()[0]\n global selectedLink\n selectedLink = link_list.get(index)[3]\n selectedLabel.config(text=selectedLink)\n\ndef addLink():\n if(part_text.get()=='' or link_text.get()==''):\n messagebox.showerror('Requires Field', 'Please enter Title and Link')\n return \n db.insert(part_text.get(),link_text.get())\n link_list.delete(0,END)\n populate_bookmarks()\n\ndef removeLink():\n if(link_list.curselection()):\n index = link_list.curselection()[0]\n db.remove(link_list.get(index)[0])\n populate_bookmarks()\n else:\n messagebox.showerror('Requires Field', 'Please Select A Link')\n\napp = Tk()\napp.geometry('900x700')\napp.title('Link Manager')\napp.configure(bg=\"#151516\")\nhelv36 = Font(family=\"Adobe Garamond Pro\",size=12)\n\nframe1 = Frame(app, bg=\"#151516\")\n\npart_text = StringVar()\npart_label = Label(frame1, text=\"Title\", font=('bold',16), bg=\"#151516\", fg=\"white\", pady=20)\npart_label.grid(row=0, column=0, sticky=W)\npart_entry = Entry(frame1, text=part_text, width=30)\npart_entry.grid(row=0, column=1)\n\nlink_text = StringVar()\nlink_label = Label(frame1, text=\"Link\", font=('bold',16), bg=\"#151516\", fg=\"white\")\nlink_label.grid(row=0, column=2, sticky=W)\nlink_entry = Entry(frame1, text=link_text, width=50)\nlink_entry.grid(row=0, column=3,columnspan=2)\n\n\nlink_list = Listbox(frame1, height=25, width=60, bg=\"#151516\", fg=\"white\", bd=0, font=helv36, activestyle=\"none\", highlightthickness=0, selectbackground=\"#6495ed\")\nlink_list.grid(row=3, column=0, columnspan=3, rowspan=6, pady=20, padx=20)\n\nscrollbar = Scrollbar(frame1)\nscrollbar.grid(row=3, column=3)\n\nlink_list.configure(yscrollcommand=scrollbar.set)\nlink_list.bind('<>', selectLink)\nscrollbar.configure(command=link_list.yview)\n\naddBtn = Button(frame1, text=\"Add New link\", command=addLink)\naddBtn.grid(row=2, column=0)\n\nremoveBtn = Button(frame1, text=\"Delete\", bg=\"red\", command=removeLink)\nremoveBtn.grid(row=2, column=1)\n\n\nselectedLabel = Label(frame1, text='', font=('bold',14), bg=\"#151516\", fg=\"white\")\nselectedLabel.grid(row=10, column=0, columnspan=3)\n\ngotoBtn = Button(frame1, text=\"Go\", bg=\"red\", command=openLink, width=10)\ngotoBtn.grid(row=10, column=4)\n\npopulate_bookmarks()\n\nframe1.pack(fill='both', expand=True)\n\napp.resizable(True,True)\napp.mainloop()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"185471756","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table\nfrom bisector import *\nfrom astropy.time import Time\nfrom ccf2rv import *\nfrom per_epoch_table import per_epoch_table\n\ndef sinusoidal(phase,dphase,amp,zp):\n return np.sin( (phase+dphase))*amp+zp\n\n# do not *formally* exclude an order, but this is done later with the bandpass keyword\nexclude_orders = [-1]\n\nobject = 'TOI-1278'\n\n# number of median-absolute deviations within an epoch to consider a point discrepant\n\ntbl = get_object_rv(object,mask = 'gl846_neg',\n method = 'template',force = True,\n exclude_orders = exclude_orders,\n snr_min = 20.0, sanitize = False,\n dvmax_per_order = 3.0, bandpass = 'HK',\n doplot = True)\n\n# period for the sinusoidal currve\nperiod = 14.4\n\n# create the table with bis per epoch\ntbl_bin = per_epoch_table(tbl,nMAD_cut = 5)\n\n# get time stamps friendly for plotting\nt2 = Time(tbl_bin['MJDATE_MEAN'], format = 'mjd')\nt3 = Time(tbl['MJDATE'], format = 'mjd')\n\n# get phase for sine fitting\nphase_bin = 2*np.pi*tbl_bin['MJDATE_MEAN']/period\nphase = 2*np.pi*tbl['MJDATE']/period\n\n# fit sinusoid\nfit, pcov = curve_fit(sinusoidal, phase_bin, tbl_bin['RV'])\n\n# some plotting fiddling\ndt = np.max(tbl_bin['MJDATE_MEAN']) - np.min(tbl_bin['MJDATE_MEAN'])\ntime_plot = np.arange(np.min(tbl_bin['MJDATE_MEAN'])-dt/10,np.max(tbl_bin['MJDATE_MEAN'])+dt/10,dt/1000)\nphase_plot = 2*np.pi*time_plot/period\n\nmodel_bin = sinusoidal(phase_bin,*fit)\nmodel= sinusoidal(phase,*fit)\nmodel_plot = sinusoidal(phase_plot,*fit)\n\nprint('Amplitude of the sinusoidal at {0} days: {1} m/s'.format(period,fit[1]))\nprint('Mean/Median per-epoch STDDEV {0}/{1} m/s'.format(np.mean(tbl_bin[\"ERROR_RV\"]),np.median(tbl_bin[\"ERROR_RV\"])))\n\nfig, ax = plt.subplots(nrows = 2, ncols = 1,sharex = True)\n\nfor i in range(len(t2)):\n ax[0].plot_date(t2.plot_date,tbl_bin['RV'],'g.')\n ax[0].plot_date([t2[i].plot_date,t2[i].plot_date],[tbl_bin['RV'][i]-tbl_bin['ERROR_RV'][i],\n tbl_bin['RV'][i]+tbl_bin['ERROR_RV'][i]],'g')\n\nax[0].plot_date(t3.plot_date,tbl['RV'],'r.',alpha = 0.5)\nax[1].errorbar(t3.plot_date,tbl['RV'] - model,yerr=tbl['ERROR_RV'], linestyle=\"None\",\n fmt='o',color = 'green', alpha = 0.2)\n\nax[0].plot(Time(time_plot, format = 'mjd').plot_date,model_plot,'r:')\nax[0].set(ylabel = 'Velocity [km/s]',title = object)\n\n\nax[1].errorbar(t2.plot_date, tbl_bin['RV'] - model_bin, yerr=tbl_bin['ERROR_RV'], linestyle=\"None\", fmt='o',\n alpha = 0.5, capsize = 2, color = 'black')\n\nax[1].plot(Time(time_plot, format = 'mjd').plot_date,np.zeros(len(time_plot)),'r:')\nax[1].set(xlabel = 'Date', ylabel = 'Residuals [km/s]')\nplt.tight_layout()\nplt.savefig(object+'.png')\nplt.show()\n\n","sub_path":"spirou/sandbox/ccf_tools/analyse_TOI1278.py","file_name":"analyse_TOI1278.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"236738901","text":"# List of packages to import\nfrom cobra.mit.session import LoginSession\nfrom cobra.mit.access import MoDirectory\n#\n# import cobra.mit.request\n# import cobra.model.fv\n# import cobra.model.pol\nimport xlsxwriter\nimport urllib3\nimport requests\nimport json\nimport aciCredentials\nfrom nodeList import nodeList\n# import getToken\n\nurllib3.disable_warnings()\n\n# declare variables\nurl = str(aciCredentials.url)\nuser = str(aciCredentials.user)\npassword = str(aciCredentials.pwd)\n\n\n# log into an APIC and create a directory object\nls = LoginSession(url, user, password)\nmd = MoDirectory(ls)\nmd.login()\n\n# search by Class\n# psu = md.lookupByClass(\"fabricNode\", parentDn='topology/pod-1')\n\n# print header\nprint(\"{:<10} {:<15} {:<15} {:<20} {:<15} {:<15} {:<15} {:<20} {:<20} {:<20} {:<20} {:<15}\"\\\n .format('NodeID', 'Interface', 'Status', 'Status Quality', 'Speed', 'DOM TX', 'DOM RX', 'DOM Temp', 'Optic Vendor', 'Optic Type', 'Optic P/N', 'Optic S/N'))\n\n\ndef domStatus(nodes):\n\n for nodeId in nodes:\n for i in range(1, (nodeId[2] + 1)):\n phy = '1/' + str(i)\n speedDn = md.lookupByDn('topology/pod-1/node-' + str(nodeId[0]) + '/sys/phys-[eth' + str(phy) + ']/phys')\n\n if speedDn.operSt == 'down':\n print('{:<10} {:<15} {:<15} {:<20} {:<15}' \\\n .format(nodeId[0], str(phy), speedDn.operSt, speedDn.operStQual, speedDn.operSpeed))\n continue\n\n else:\n opticDn = md.lookupByDn('topology/pod-1/node-' + str(nodeId[0]) + '/sys/phys-[eth' + str(phy) + ']/phys/fcot')\n\n if opticDn.typeName == 'SFP-H10GB-CU1M' or 'SFP-H10GB-CU3M':\n print('{:<10} {:<15} {:<15} {:<20} {:<15} {:<15} {:<15} {:<20} {:<20} {:<20} {:<20} {:<15}' \\\n .format(nodeId[0], str(phy), speedDn.operSt, speedDn.operStQual, speedDn.operSpeed, '', '', '',\n opticDn.guiName, opticDn.typeName, opticDn.guiPN, opticDn.guiSN))\n continue\n\n elif opticDn.typeName == '1000base-T':\n print('{:<10} {:<15} {:<15} {:<20} {:<15} {:<15} {:<15} {:<20} {:<20} {:<20} {:<20} {:<15}' \\\n .format(nodeId[0], str(phy), speedDn.operSt, speedDn.operStQual, speedDn.operSpeed, '', '', '',\n opticDn.guiName, opticDn.typeName, opticDn.guiPN, opticDn.guiSN))\n continue\n\n # search by DN\n domTXDn = md.lookupByDn('topology/pod-1/node-' + str(nodeId[0]) + '/sys/phys-[eth' + str(phy) + ']/phys/domstats/txpower')\n domRXDn = md.lookupByDn('topology/pod-1/node-' + str(nodeId[0]) + '/sys/phys-[eth' + str(phy) + ']/phys/domstats/rxpower')\n domTempDn = md.lookupByDn('topology/pod-1/node-' + str(nodeId[0]) + '/sys/phys-[eth' + str(phy) + ']/phys/domstats/temperature')\n\n print('{:<10} {:<15} {:<15} {:<20} {:<15} {:<15} {:<15} {:<20} {:<20} {:<20} {:<20} {:<15}' \\\n .format(nodeId[0], str(phy), speedDn.operSt, speedDn.operStQual, speedDn.operSpeed, domTXDn.value,\n domRXDn.value, domTempDn.value, opticDn.guiName, opticDn.typeName, opticDn.guiPN, opticDn.guiSN))\n\n\n# psuStatus(spineList)\ndomStatus(nodeList)\n\n# Use the connected moDir queries and configuration...\nmd.logout()\n","sub_path":"DOM.py","file_name":"DOM.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"242566691","text":"'''\nNotes\n* Use queue.PriorityQueue (put and get) for heap API\n* Linked list trick - use a dummy head to make initialization logic cleaner, throw away before returning linked list (return head.next)\n'''\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n from Queue import PriorityQueue\n head = current_node = ListNode(0)\n\n q = PriorityQueue()\n for l in lists:\n if l:\n q.put((l.val, l))\n while not q.empty():\n val, node = q.get()\n next_node = node.next\n current_node.next = ListNode(val)\n current_node = current_node.next\n if next_node:\n q.put((next_node.val, next_node))\n return head.next\n","sub_path":"leet_code/merge_k_sorted_linked_lists.py","file_name":"merge_k_sorted_linked_lists.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"348494063","text":"#Muhammad Khalif Rizaldi Wibowo\r\n#L200180217\r\n\r\n#1\r\ndef cetakSiku(x):\r\n for i in range (1,x+1):\r\n print('*'*i)\r\n \r\n#2\r\ndef gambarlahPersegiEmpat(a, b):\r\n for i in range (a):\r\n if i==0 or i==a-1:\r\n print (b * '@')\r\n else:\r\n print ('@' + \" \" * (b-2) + '@') \r\n\r\n#3\r\ndef jumlahHurufVokal(huruf):\r\n vokal = 'aiueoAIUEO'\r\n a = 0\r\n hasil = 0\r\n for i in huruf :\r\n if i in vokal:\r\n a += len(i)\r\n else:\r\n a += 0\r\n hasil = len(huruf), a\r\n return hasil\r\n#3b\r\ndef jumlahHurufKonsonan(huruf):\r\n konsonan = 'bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ'\r\n b = 0\r\n hasil = 0\r\n for i in huruf:\r\n if i in konsonan:\r\n b +=len(i)\r\n else:\r\n b +=0\r\n hasil = len(huruf),b\r\n return hasil\r\n#4\r\ndef rerata(b):\r\n return sum(b)/len(b)\r\n\r\n#5\r\nfrom math import sqrt as sq\r\ndef apakahPrima(n):\r\n n = int(n)\r\n assert n>=0\r\n primaKecil = [2,3,5,7,9,11]\r\n bukanPrKecil = [0,1,4,6,8,9,10]\r\n if n in primaKecil:\r\n return True\r\n elif n in bukanPrKecil:\r\n return False\r\n else:\r\n for i in range(2,int(sq(n))+1):\r\n if n%i==0:\r\n return False\r\n return True\r\n\r\n#6\r\ndef bilanganPrima(n):\r\n for i in range(2,n):\r\n prima = True\r\n for j in range (2,i):\r\n if(i%j==0):\r\n prima = False\r\n if (prima):\r\n print(i)\r\n#7\r\ndef faktorPrima(x):\r\n bilanganList = []\r\n loop = 2\r\n while loop <= x:\r\n if x%loop == 0:\r\n x/= loop\r\n bilanganList.append(loop)\r\n else:\r\n loop += 1\r\n return bilanganList\r\n#8\r\ndef apakahTerkandung(a, b):\r\n x = True\r\n for i in range(len(b)):\r\n if a in b:\r\n x = True\r\n else:\r\n x = False\r\n return x\r\n#9\r\ndef kelipatan(x):\r\n for i in range (x):\r\n if(i<=0):\r\n pass\r\n elif(i%3==0 and i%5==0):\r\n print ('Python UMS')\r\n elif(i%3==0):\r\n print ('Python')\r\n elif(i%5==0):\r\n print ('UMS')\r\n else:\r\n print(i)\r\n#10\r\nfrom math import sqrt as akar\r\ndef selesaikanABC(a,b,c):\r\n a = float(a)\r\n b = float(b)\r\n c = float(c)\r\n D = float(b**2 - 4*a*c)\r\n if (D<0):\r\n hasil = \"Determinannya negatif, persamaan tidak mempunayai akar real.\"\r\n return hasil\r\n else:\r\n x1 = (-b + akar(D)/(2*a))\r\n x2 = (-b - akar(D)/(2*a))\r\n hasil = (x1,x2)\r\n return hasil\r\n\r\n#11\r\ndef apakahKabisat(tahun):\r\n hasil = False\r\n if(tahun%4==0 and tahun%100 !=0 and tahun%400 !=0):\r\n hasil = True\r\n elif(tahun%100==0 and tahun%400 !=0):\r\n hasil = False\r\n elif(tahun%400==0):\r\n hasil = True\r\n else:\r\n hasil = False\r\n return False\r\n#12\r\nimport random\r\ndef tebak():\r\n max = 7\r\n start = 1\r\n x = random.randrange(1,100,1)\r\n while (start <= max):\r\n s = 'Masukkan tebakan ke- ' +str(start)+ ':>'\r\n i = input(s)\r\n if(i == x):\r\n print (\"Ya, anda benar\")\r\n elif(i > x):\r\n print(\"itu terlalu besar\")\r\n elif(i < x):\r\n print(\"Itu terlalu kecil\")\r\n start +=1\r\n","sub_path":"modul1.py","file_name":"modul1.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447766316","text":"#!/usr/bin/env python3\n#\n# This file is part of LiteX-Boards.\n#\n# Copyright (c) 2021 Antmicro \n# SPDX-License-Identifier: BSD-2-Clause\n\nimport os\nimport argparse\n\nfrom migen import *\n\nfrom litex_boards.platforms import lpddr4_test_board\nfrom litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict\n\nfrom litex.soc.cores.clock import *\nfrom litex.soc.integration.soc_core import *\nfrom litex.soc.integration.soc import SoCRegion\nfrom litex.soc.integration.builder import *\nfrom litex.soc.cores.led import LedChaser\n\nfrom litedram.modules import MT53E256M16D1\nfrom litedram.phy import lpddr4\n\nfrom liteeth.phy import LiteEthS7PHYRGMII\nfrom litehyperbus.core.hyperbus import HyperRAM\n\n# CRG ----------------------------------------------------------------------------------------------\n\nclass _CRG(Module):\n def __init__(self, platform, sys_clk_freq, iodelay_clk_freq):\n self.clock_domains.cd_sys = ClockDomain()\n self.clock_domains.cd_sys2x = ClockDomain(reset_less=True)\n self.clock_domains.cd_sys8x = ClockDomain(reset_less=True)\n self.clock_domains.cd_idelay = ClockDomain()\n\n # # #\n\n self.submodules.pll = pll = S7PLL(speedgrade=-1)\n pll.register_clkin(platform.request(\"clk100\"), 100e6)\n pll.create_clkout(self.cd_sys, sys_clk_freq)\n pll.create_clkout(self.cd_sys2x, 2 * sys_clk_freq)\n pll.create_clkout(self.cd_sys8x, 8 * sys_clk_freq)\n pll.create_clkout(self.cd_idelay, iodelay_clk_freq)\n\n self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)\n\n# BaseSoC ------------------------------------------------------------------------------------------\n\nclass BaseSoC(SoCCore):\n def __init__(self, *, sys_clk_freq=int(50e6), iodelay_clk_freq=200e6,\n with_ethernet=False, with_etherbone=False, eth_ip=\"192.168.1.50\", eth_dynamic_ip=False,\n with_hyperram=False, with_sdcard=False, with_jtagbone=True, with_uartbone=False,\n with_led_chaser=True, ident_version=True, **kwargs):\n platform = lpddr4_test_board.Platform()\n\n # SoCCore ----------------------------------------------------------------------------------\n SoCCore.__init__(self, platform, sys_clk_freq,\n ident = \"LiteX SoC on LPDDR4 Test Board\",\n ident_version = ident_version,\n **kwargs)\n\n # CRG --------------------------------------------------------------------------------------\n self.submodules.crg = _CRG(platform, sys_clk_freq, iodelay_clk_freq=iodelay_clk_freq)\n\n # LDDR4 SDRAM ------------------------------------------------------------------------------\n if not self.integrated_main_ram_size:\n self.submodules.ddrphy = lpddr4.K7LPDDR4PHY(platform.request(\"lpddr4\"),\n iodelay_clk_freq = iodelay_clk_freq,\n sys_clk_freq = sys_clk_freq,\n )\n self.add_sdram(\"sdram\",\n phy = self.ddrphy,\n module = MT53E256M16D1(sys_clk_freq, \"1:8\"),\n l2_cache_size = kwargs.get(\"l2_size\", 8192),\n l2_cache_min_data_width = 256,\n )\n\n # HyperRAM ---------------------------------------------------------------------------------\n if with_hyperram:\n self.submodules.hyperram = HyperRAM(platform.request(\"hyperram\"))\n self.bus.add_slave(\"hyperram\", slave=self.hyperram.bus, region=SoCRegion(origin=0x20000000, size=8*1024*1024))\n\n # SD Card ----------------------------------------------------------------------------------\n if with_sdcard:\n self.add_sdcard()\n\n # Ethernet / Etherbone ---------------------------------------------------------------------\n if with_ethernet or with_etherbone:\n # Traces between PHY and FPGA introduce ignorable delays of ~0.165ns +/- 0.015ns.\n # PHY chip does not introduce delays on TX (FPGA->PHY), however it includes 1.2ns\n # delay for RX CLK so we only need 0.8ns to match the desired 2ns.\n self.submodules.ethphy = LiteEthS7PHYRGMII(\n clock_pads = self.platform.request(\"eth_clocks\"),\n pads = self.platform.request(\"eth\"),\n rx_delay = 0.8e-9,\n )\n if with_ethernet:\n self.add_ethernet(phy=self.ethphy, dynamic_ip=eth_dynamic_ip)\n if with_etherbone:\n self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)\n\n # Jtagbone ---------------------------------------------------------------------------------\n if with_jtagbone:\n self.add_jtagbone()\n\n # UartBone ---------------------------------------------------------------------------------\n if with_uartbone:\n self.add_uartbone(\"serial\", baudrate=1e6)\n\n # Leds -------------------------------------------------------------------------------------\n if with_led_chaser:\n self.submodules.leds = LedChaser(\n pads = platform.request_all(\"user_led\"),\n sys_clk_freq = sys_clk_freq)\n\n# Build --------------------------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"LiteX SoC on LPDDR4 Test Board\")\n target = parser.add_argument_group(title=\"Target options\")\n target.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream\")\n target.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream\")\n target.add_argument(\"--flash\", action=\"store_true\", help=\"Flash bitstream\")\n target.add_argument(\"--sys-clk-freq\", default=50e6, help=\"System clock frequency\")\n target.add_argument(\"--iodelay-clk-freq\", default=200e6, help=\"IODELAYCTRL frequency\")\n ethopts = target.add_mutually_exclusive_group()\n ethopts.add_argument(\"--with-ethernet\", action=\"store_true\", help=\"Add Ethernet\")\n ethopts.add_argument(\"--with-etherbone\", action=\"store_true\", help=\"Add EtherBone\")\n target.add_argument(\"--eth-ip\", default=\"192.168.1.50\", help=\"Ethernet/Etherbone IP address\")\n target.add_argument(\"--eth-dynamic-ip\", action=\"store_true\", help=\"Enable dynamic Ethernet IP addresses setting\")\n target.add_argument(\"--with-hyperram\", action=\"store_true\", help=\"Add HyperRAM\")\n target.add_argument(\"--with-sdcard\", action=\"store_true\", help=\"Add SDCard\")\n target.add_argument(\"--with-jtagbone\", action=\"store_true\", help=\"Add JTAGBone\")\n target.add_argument(\"--with-uartbone\", action=\"store_true\", help=\"Add UartBone on 2nd serial\")\n parser.add_argument(\"--no-ident-version\", action=\"store_false\", help=\"Disable build time output\")\n builder_args(parser)\n soc_core_args(parser)\n vivado_build_args(parser)\n args = parser.parse_args()\n\n assert not (args.with_etherbone and args.eth_dynamic_ip)\n\n soc = BaseSoC(\n sys_clk_freq = int(float(args.sys_clk_freq)),\n iodelay_clk_freq = int(float(args.iodelay_clk_freq)),\n with_ethernet = args.with_ethernet,\n with_etherbone = args.with_etherbone,\n eth_ip = args.eth_ip,\n eth_dynamic_ip = args.eth_dynamic_ip,\n with_hyperram = args.with_hyperram,\n with_sdcard = args.with_sdcard,\n with_jtagbone = args.with_jtagbone,\n with_uartbone = args.with_uartbone,\n ident_version = args.no_ident_version,\n **soc_core_argdict(args))\n builder = Builder(soc, **builder_argdict(args))\n vns = builder.build(**vivado_build_argdict(args), run=args.build)\n\n if args.load:\n prog = soc.platform.create_programmer()\n prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\"))\n\n if args.flash:\n prog = soc.platform.create_programmer()\n prog.flash(0, os.path.join(builder.gateware_dir, soc.build_name + \".bin\"))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"litex_boards/targets/antmicro_lpddr4_test_board.py","file_name":"antmicro_lpddr4_test_board.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608260802","text":"from __future__ import print_function, division, with_statement\nimport networkx as nx\n\n\ndef save_to_graph(article_name, concepts, relevance_scores, related_concepts, clear_prev=False):\n if not clear_prev:\n G = nx.read_gml('knowledge_graph.gml')\n else:\n G = nx.DiGraph()\n\n print(type(G))\n\n for index, concept in enumerate(concepts):\n G.add_node(concept)\n G.add_edge(article_name, concept, weight=relevance_scores[index])\n\n for related_concept in related_concepts[index]:\n G.add_edge(concept, related_concept)\n\n nx.write_gml(G, 'knowledge_graph.gml')\n\n return G\n\n\ndef search_concept(G, concept_name):\n files = []\n relevant_concepts = []\n\n if concept_name in G:\n inner_concepts = G.in_edges(concept_name)\n outer_concepts = G.out_edges(concept_name)\n\n files_temp = [inner_concepts[k][0] for k in range(0, len(inner_concepts))]\n relevant_concepts += [outer_concepts[k][1] for k in range(0, len(outer_concepts))]\n\n for filename in files_temp:\n if '.pdf' in filename:\n files.append(filename)\n else:\n relevant_concepts.append(filename)\n\n relevances = [G.get_edge_data(filename, concept_name)['weight'] for filename in files]\n\n print(\"Found in files: \" + ','.join(files))\n print(\"Relevance scores: \" + ','.join(relevances))\n print(\"Relevant Concepts: \" + ','.join(relevant_concepts))","sub_path":"knowledge_graph.py","file_name":"knowledge_graph.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536817547","text":"from policy_gradients.agent import Trainer\nimport git\nimport pickle\nimport numpy as np\nimport os\nimport argparse\nfrom policy_gradients import models\nimport sys\nimport json\nimport torch\nfrom cox.store import Store, schema_from_dict\n\n\n# Tee object allows for logging to both stdout and to file\nclass Tee(object):\n def __init__(self, file_path, stream_type, mode='a'):\n assert stream_type in ['stdout', 'stderr']\n\n self.file = open(file_path, mode)\n self.stream_type = stream_type\n self.errors = 'chill'\n\n if stream_type == 'stdout':\n self.stream = sys.stdout\n sys.stdout = self\n else:\n self.stream = sys.stderr\n sys.stderr = self\n\n def write(self, data):\n self.file.write(data)\n self.stream.write(data)\n\n def flush(self):\n self.file.flush()\n self.stream.flush()\n\ndef main(params):\n for k, v in zip(params.keys(), params.values()):\n assert v is not None, f\"Value for {k} is None\"\n\n # #\n # Setup logging\n # #\n metadata_schema = schema_from_dict(params)\n base_directory = params['out_dir']\n store = Store(base_directory)\n\n # redirect stderr, stdout to file\n \"\"\"\n def make_err_redirector(stream_name):\n tee = Tee(os.path.join(store.path, stream_name + '.txt'), stream_name)\n return tee\n\n stderr_tee = make_err_redirector('stderr')\n stdout_tee = make_err_redirector('stdout')\n \"\"\"\n\n # Store the experiment path and the git commit for this experiment\n metadata_schema.update({\n 'store_path':str,\n 'git_commit':str\n })\n\n repo = git.Repo(path=os.path.dirname(os.path.realpath(__file__)),\n search_parent_directories=True)\n\n metadata_table = store.add_table('metadata', metadata_schema)\n metadata_table.update_row(params)\n metadata_table.update_row({\n 'store_path':store.path,\n 'git_commit':repo.head.object.hexsha\n })\n\n metadata_table.flush_row()\n\n # Table for checkpointing models and envs\n\n if params['save_iters'] > 0:\n store.add_table('checkpoints', {\n 'val_model':store.PYTORCH_STATE,\n 'policy_model':store.PYTORCH_STATE,\n 'envs':store.PICKLE,\n 'policy_opt': store.PYTORCH_STATE,\n 'val_opt': store.PYTORCH_STATE,\n 'iteration':int\n })\n\n # The trainer object is in charge of sampling trajectories and\n # taking PPO/TRPO optimization steps\n\n p = Trainer.agent_from_params(params, store=store)\n if 'load_model' in params and params['load_model']:\n print('Loading pretrained model', params['load_model'])\n pretrained_models = torch.load(params['load_model'])\n p.policy_model.load_state_dict(pretrained_models['policy_model'])\n p.val_model.load_state_dict(pretrained_models['val_model'])\n # Load optimizer states. Note that \n # p.POLICY_ADAM.load_state_dict(pretrained_models['policy_opt'])\n # p.val_opt.load_state_dict(pretrained_models['val_opt'])\n # Restore environment parameters, like mean and std.\n p.envs = pretrained_models['envs']\n rewards = []\n\n # Table for final results\n final_table = store.add_table('final_results', {\n 'iteration':int,\n '5_rewards':float,\n 'terminated_early':bool,\n 'val_model':store.PYTORCH_STATE,\n 'policy_model':store.PYTORCH_STATE,\n 'envs':store.PICKLE,\n 'policy_opt': store.PYTORCH_STATE,\n 'val_opt': store.PYTORCH_STATE,\n 'iteration':int\n })\n\n\n def finalize_table(iteration, terminated_early, rewards):\n final_5_rewards = np.array(rewards)[-5:].mean()\n final_table.append_row({\n 'iteration':iteration,\n '5_rewards':final_5_rewards,\n 'terminated_early':terminated_early,\n 'iteration':iteration,\n 'val_model': p.val_model.state_dict(),\n 'policy_model': p.policy_model.state_dict(),\n 'policy_opt': p.POLICY_ADAM.state_dict(),\n 'val_opt': p.val_opt.state_dict(),\n 'envs':p.envs\n })\n\n # Try-except so that we save if the user interrupts the process\n try:\n for i in range(params['train_steps']):\n print('Step %d' % (i,))\n if params['save_iters'] > 0 and i % params['save_iters'] == 0:\n store['checkpoints'].append_row({\n 'iteration':i,\n 'val_model': p.val_model.state_dict(),\n 'policy_model': p.policy_model.state_dict(),\n 'policy_opt': p.POLICY_ADAM.state_dict(),\n 'val_opt': p.val_opt.state_dict(),\n 'envs':p.envs\n })\n \n mean_reward = p.train_step()\n rewards.append(mean_reward)\n\n finalize_table(i, False, rewards)\n except KeyboardInterrupt:\n torch.save(p.val_model, 'saved_experts/%s-expert-vf' % (params['game'],))\n torch.save(p.policy_model, 'saved_experts/%s-expert-pol' % (params['game'],))\n\n finalize_table(i, True, rewards)\n store.close()\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\ndef add_common_parser_opts(parser):\n # Basic setup\n parser.add_argument('--game', type=str, help='gym game')\n parser.add_argument('--mode', type=str, choices=['ppo', 'trpo', 'robust_ppo'],\n help='pg alg')\n parser.add_argument('--out-dir', type=str,\n help='out dir for store + logging')\n parser.add_argument('--advanced-logging', type=str2bool, const=True, nargs='?')\n parser.add_argument('--kl-approximation-iters', type=int,\n help='how often to do kl approx exps')\n parser.add_argument('--log-every', type=int)\n parser.add_argument('--policy-net-type', type=str,\n choices=models.POLICY_NETS.keys())\n parser.add_argument('--value-net-type', type=str,\n choices=models.VALUE_NETS.keys())\n parser.add_argument('--train-steps', type=int,\n help='num agent training steps')\n parser.add_argument('--cpu', type=str2bool, const=True, nargs='?')\n\n # Which value loss to use\n parser.add_argument('--value-calc', type=str,\n help='which value calculation to use')\n parser.add_argument('--initialization', type=str)\n\n # General Policy Gradient parameters\n parser.add_argument('--num-actors', type=int, help='num actors (serial)',\n choices=[1])\n parser.add_argument('--t', type=int,\n help='num timesteps to run each actor for')\n parser.add_argument('--gamma', type=float, help='discount on reward')\n parser.add_argument('--lambda', type=float, help='GAE hyperparameter')\n parser.add_argument('--val-lr', type=float, help='value fn learning rate')\n parser.add_argument('--val-epochs', type=int, help='value fn epochs')\n\n # PPO parameters\n parser.add_argument('--adam-eps', type=float, choices=[0, 1e-5], help='adam eps parameter')\n\n parser.add_argument('--num-minibatches',type=int,\n help='num minibatches in ppo per epoch')\n parser.add_argument('--ppo-epochs', type=int)\n parser.add_argument('--ppo-lr', type=float,\n help='if nonzero, use gradient descent w this lr')\n parser.add_argument('--ppo-lr-adam', type=float,\n help='if nonzero, use adam with this lr')\n parser.add_argument('--anneal-lr', type=str2bool,\n help='if we should anneal lr linearly from start to finish')\n parser.add_argument('--clip-eps', type=float, help='ppo clipping')\n parser.add_argument('--clip-val-eps', type=float, help='ppo clipping value')\n parser.add_argument('--entropy-coeff', type=float,\n help='entropy weight hyperparam')\n parser.add_argument('--value-clipping', type=str2bool,\n help='should clip values (w/ ppo eps)')\n parser.add_argument('--value-multiplier', type=float,\n help='coeff for value loss in combined step ppo loss')\n parser.add_argument('--share-weights', type=str2bool,\n help='share weights in valnet and polnet')\n parser.add_argument('--clip-grad-norm', type=float,\n help='gradient norm clipping (-1 for no clipping)')\n parser.add_argument('--policy-activation', type=str,\n help='activation function for countinous policy network')\n \n # TRPO parameters\n parser.add_argument('--max-kl', type=float, help='trpo max kl hparam')\n parser.add_argument('--max-kl-final', type=float, help='trpo max kl final')\n parser.add_argument('--fisher-frac-samples', type=float,\n help='frac samples to use in fisher vp estimate')\n parser.add_argument('--cg-steps', type=int,\n help='num cg steps in fisher vp estimate')\n parser.add_argument('--damping', type=float, help='damping to use in cg')\n parser.add_argument('--max-backtrack', type=int, help='max bt steps in fvp')\n parser.add_argument('--trpo-kl-reduce-func', type=str, help='reduce function for KL divergence used in line search. mean or max.')\n\n # Robust PPO parameters.\n parser.add_argument('--robust-ppo-eps', type=float, help='max eps for robust PPO training')\n parser.add_argument('--robust-ppo-method', type=str, choices=['convex-relax', 'sgld', 'pgd'], help='robustness regularization methods')\n parser.add_argument('--robust-ppo-pgd-steps', type=int, help='number of PGD optimization steps')\n parser.add_argument('--robust-ppo-detach-stdev', type=str2bool, help='detach gradient of standard deviation term')\n parser.add_argument('--robust-ppo-reg', type=float, help='robust PPO regularization')\n parser.add_argument('--robust-ppo-eps-scheduler-opts', type=str, help='options for epsilon scheduler for robust PPO training')\n parser.add_argument('--robust-ppo-beta', type=float, help='max beta (IBP mixing factor) for robust PPO training')\n parser.add_argument('--robust-ppo-beta-scheduler-opts', type=str, help='options for beta scheduler for robust PPO training')\n\n # Adversarial attack parameters.\n parser.add_argument('--attack-method', type=str, choices=[\"none\", \"critic\", \"random\", \"action\", \"sarsa\", \"sarsa+action\"], help='adversarial attack methods.')\n parser.add_argument('--attack-ratio', type=float, help='attack only a ratio of steps.')\n parser.add_argument('--attack-steps', type=int, help='number of PGD optimization steps.')\n parser.add_argument('--attack-eps', type=str, help='epsilon for attack. If set to \"same\", we will use value of robust-ppo-eps.')\n parser.add_argument('--attack-step-eps', type=str, help='step size for each iteration. If set to \"auto\", we will use attack-eps / attack-steps')\n parser.add_argument('--attack-sarsa-network', type=str, help='sarsa network to load for attack.')\n parser.add_argument('--attack-sarsa-action-ratio', type=float, help='When set to non-zero, enable sarsa-action attack.')\n\n # Normalization parameters\n parser.add_argument('--norm-rewards', type=str, help='type of rewards normalization', \n choices=['rewards', 'returns', 'none'])\n parser.add_argument('--norm-states', type=str2bool, help='should norm states')\n parser.add_argument('--clip-rewards', type=float, help='clip rews eps')\n parser.add_argument('--clip-observations', type=float, help='clips obs eps')\n\n # Saving\n parser.add_argument('--save-iters', type=int, help='how often to save model (0 = no saving)')\n\n # Visualization\n parser.add_argument('--show-env', type=str2bool, help='Show environment visualization')\n parser.add_argument('--save-frames', type=str2bool, help='Save environment frames')\n parser.add_argument('--save-frames-path', type=str, help='Path to save environment frames')\n\n # For grid searches only\n # parser.add_argument('--cox-experiment-path', type=str, default='')\n return parser\n\n\ndef override_json_params(params, json_params, excluding_params):\n # Override the JSON config with the argparse config\n missing_keys = []\n for key in json_params:\n if key not in params:\n missing_keys.append(key)\n assert not missing_keys, \"Following keys not in args: \" + str(missing_keys)\n\n missing_keys = []\n for key in params:\n if key not in json_params and key not in excluding_params:\n missing_keys.append(key)\n assert not missing_keys, \"Following keys not in JSON: \" + str(missing_keys)\n\n json_params.update({k: params[k] for k in params if params[k] is not None})\n return json_params\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate experiments to be run.')\n parser.add_argument('--config-path', type=str, required=True,\n help='json for this config')\n parser.add_argument('--out-dir-prefix', type=str, default=\"\", required=False,\n help='prefix for output log path')\n parser.add_argument('--load-model', type=str, default='', required=False, help='load pretrained model and optimizer states before training')\n parser = add_common_parser_opts(parser)\n \n args = parser.parse_args()\n\n params = vars(args)\n json_params = json.load(open(args.config_path))\n\n extra_params = ['config_path', 'out_dir_prefix', 'load_model']\n params = override_json_params(params, json_params, extra_params)\n\n # Append a prefix for output path.\n if args.out_dir_prefix:\n params['out_dir'] = os.path.join(args.out_dir_prefix, params['out_dir'])\n print(f\"setting output dir to {params['out_dir']}\")\n main(params)\n\n","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":14005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"232036548","text":"import random\r\nimport shutil, path\r\n\r\nfname = path.Path(__file__).stem\r\nrandom.seed(0)\r\nroot = path.Path(f'{fname}_cases')\r\nif not root.exists():\r\n root.mkdir()\r\n\r\nfor i in range(10):\r\n s = ''\r\n for _ in range(2):\r\n hour = random.randint(0, 23)\r\n minute = random.randint(0, 59)\r\n second = random.randint(0, 59)\r\n s += f'{hour} {minute} {second} '\r\n open(root / f'case_{i+1:02}.in', \"w\").write(s)\r\n\r\nimport subprocess\r\nfor i in range(10):\r\n fin = open(root / f'case_{i+1:02}.in', 'r')\r\n fout = open(root / f'case_{i+1:02}.out', 'w')\r\n exe = f\"{fname}.exe\"\r\n p = subprocess.Popen(exe, stdin=fin, stdout=fout)\r\n p.wait(timeout=1)\r\n\r\n# shutil.make_archive(fname, 'zip', base_dir=str(root))\r\n","sub_path":"tutorials/codes/lab08/时钟结构体/lab08_timer.py","file_name":"lab08_timer.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542293590","text":"from veriloggen import *\n\nfrom fdam_cgra.make_omega import make_omega\n\n\ndef make_control_exec(cgra_id, num_pe, num_pe_io, net_radix):\n net = make_omega(num_pe * 2, 0, net_radix, 8)\n en_net_bits = net.get_ports().get('en').width\n en_pc_net_bits = net.get_ports().get('en_pc_net').width\n\n m = Module('cgra%d_control_exec' % cgra_id)\n clk = m.Input('clk')\n rst = m.Input('rst')\n\n start = m.Input('start')\n\n read_fifo_mask = m.Input('read_fifo_mask', num_pe_io)\n write_fifo_mask = m.Input('write_fifo_mask', num_pe_io)\n\n available_read = m.Input('available_read', num_pe_io)\n available_write = m.Input('available_write', num_pe_io)\n\n available_pop = m.Input('available_pop', num_pe_io)\n available_push = m.Input('available_push', num_pe_io)\n\n read_fifo_done = m.Input('read_fifo_done', num_pe_io)\n write_fifo_done = m.Input('write_fifo_done', num_pe_io)\n\n en_pe = m.OutputReg('en_pe', num_pe)\n en_net = m.OutputReg('en_net', en_net_bits)\n en_pc_net = m.OutputReg('en_pc_net', en_pc_net_bits)\n\n en_fecth_data = m.OutputReg('en_fecth_data', num_pe_io)\n\n done = m.OutputReg('done')\n\n FSM_IDLE = m.Localparam('FSM_IDLE', 0)\n FSM_PROCESS = m.Localparam('FSM_PROCESS', 1)\n FSM_DONE = m.Localparam('FSM_DONE', 3)\n FSM_WAIT_DATA = m.Localparam('FSM_WAIT_DATA', 2)\n m.EmbeddedCode('')\n\n fsm_state = m.Reg('fsm_state', 2)\n\n en_read = m.Reg('en_read', num_pe_io)\n en_write = m.Reg('en_write', num_pe_io)\n en_read1 = m.Reg('en_read1', num_pe_io)\n en_write1 = m.Reg('en_write1', num_pe_io)\n en_process = m.Reg('en_process', num_pe + en_net_bits + en_pc_net_bits)\n en_process1 = m.Reg('en_process1', num_pe + en_net_bits + en_pc_net_bits)\n available_pop_masked = m.Reg('available_pop_masked', num_pe_io)\n available_push_masked = m.Reg('available_push_masked', num_pe_io)\n available_queues = m.Reg('available_queues')\n\n m.EmbeddedCode('')\n i = m.Integer('i')\n m.Always(Posedge(clk))(\n If(rst)(\n available_pop_masked(0),\n available_push_masked(0),\n available_queues(0)\n ).Else(\n available_pop_masked(available_pop | Unot(read_fifo_mask)),\n available_push_masked(available_push | Unot(write_fifo_mask)),\n available_queues(Uand(available_pop_masked & available_push_masked))\n )\n )\n m.Always(Posedge(clk))(\n If(rst)(\n en_process(0),\n en_process1(0),\n en_read(0),\n en_write(0),\n en_read1(0),\n en_write1(0),\n en_pe(0),\n en_net(0),\n en_pc_net(0)\n ).Else(\n en_read(Or(available_read, read_fifo_done)),\n en_write(Or(available_write, write_fifo_done)),\n en_read1(And(en_read, read_fifo_mask)),\n en_write1(And(en_write, write_fifo_mask)),\n For(i(0), i < en_process.width, i.inc())(\n en_process[i](And(Uor(en_read1), Uor(en_write1))),\n en_process1[i](And(en_process[i], fsm_state[0]))\n ),\n For(i(0), i < en_pe.width, i.inc())(\n en_pe[i](en_process1[i])\n ),\n For(i(0), i < en_net.width, i.inc())(\n en_net[i](en_process1[i + en_pe.width])\n ),\n For(i(0), i < en_pc_net.width, i.inc())(\n en_pc_net[i](en_process1[i + en_pe.width + en_net.width])\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst)(\n fsm_state(FSM_IDLE),\n en_fecth_data(Int(0, en_fecth_data.width, 16)),\n done(0)\n ).Else(\n Case(fsm_state)(\n When(FSM_IDLE)(\n If(start)(\n fsm_state(FSM_WAIT_DATA),\n en_fecth_data(Int((1 << en_fecth_data.width) - 1, en_fecth_data.width, 16)),\n )\n ),\n When(FSM_WAIT_DATA)(\n If(available_queues)(\n fsm_state(FSM_PROCESS)\n )\n ),\n When(FSM_PROCESS)(\n If(Uand(write_fifo_done | ~write_fifo_mask))(\n fsm_state(FSM_DONE)\n )\n ),\n\n When(FSM_DONE)(\n done(1)\n )\n )\n )\n )\n\n return m\n","sub_path":"fdam-hw-generator/src/fdam_cgra/make_control_exec.py","file_name":"make_control_exec.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"450938717","text":"''' oxo_data is the data module for a tic-tac-toe (or OXO) game. \n It saves and restores a game board. The functions are:\n saveGame(game) -> None\n restoreGame() -> game\n Note that no limits are placed on the size of the data.\n The game implementation is responsible for validating\n all data in and out.'''\n\nimport os.path\ngame_file = \"oxogame.dat\"\n\ndef _getPath():\n ''' getPath -> string\n Returns a valid path for data file. \n Tries to use the users home folder, defaults to cwd'''\n\n try:\n game_path = os.environ['HOMEPATH'] or os.environ['HOME']\n if not os.path.exists(game_path):\n game_path = os.getcwd()\n except (KeyError, TypeError):\n game_path = os.getcwd()\n return game_path\n\ndef saveGame(game):\n ''' saveGame(game) -> None\n\n saves a game object in the data file in the users home folder.\n No checking is done on the input which is expected to\n be a list of characters'''\n \n path = os.path.join(_getPath(), game_file)\n try:\n with open(path, 'w') as gf:\n gamestr = ''.join(game)\n gf.write(gamestr)\n except FileNotFoundError:\n print(\"Failed to save file\")\n\ndef restoreGame():\n ''' restoreGame() -> game\n\n Restores a game from the data file.\n The game object is a list of characters'''\n \n path = os.path.join(_getPath(), game_file) \n with open(path) as gf:\n gamestr = gf.read()\n return list(gamestr)\n\ndef test():\n print(\"Path = \", _getPath())\n saveGame(list(\"XO XO XO \"))\n print(restoreGame())\n\nif __name__ == \"__main__\": test()\n","sub_path":"source_code/python_projects/Chapter4/OXO/oxo_data.py","file_name":"oxo_data.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394100362","text":"from CardPools import *\nfrom Academy import TransferStudent\nfrom SV_Basic import SVClasses\nimport copy\nfrom numpy.random import choice as npchoice\nfrom numpy.random import randint as nprandint\nfrom numpy.random import shuffle as npshuffle\nimport numpy as np\nfrom collections import Counter as cnt\n\nimport inspect\n\ndef extractfrom(target, listObj):\n\ttry: return listObj.pop(listObj.index(target))\n\texcept: return None\n\nclass Hand_Deck:\n\tdef __init__(self, Game, deck1=[], deck2=[]): # 通过卡组列表加载卡组\n\t\tself.Game = Game\n\t\tself.hands = {1: [], 2: []}\n\t\tself.decks = {1: [], 2: []}\n\t\tself.noCards = {1: 0, 2: 0}\n\t\tself.handUpperLimit = {1: 10, 2: 10}\n\t\tif self.Game.heroes[1].Class in SVClasses:\n\t\t\tself.handUpperLimit[1] = 9\n\t\tif self.Game.heroes[2].Class in SVClasses:\n\t\t\tself.handUpperLimit[2] = 9\n\t\tself.initialDecks = {1: deck1 if deck1 else Default1, 2: deck2 if deck2 else Default2}\n\t\t\n\tdef initialize(self):\n\t\tself.initializeDecks()\n\t\tself.initializeHands()\n\n\tdef initializeDecks(self):\n\t\tfor ID in range(1, 3):\n\t\t\tClass = self.Game.heroes[ID].Class # Hero's class\n\t\t\tfor obj in self.initialDecks[ID]:\n\t\t\t\tif obj.name == \"Transfer Student\": obj = self.Game.transferStudentType\n\t\t\t\tcard = obj(self.Game, ID)\n\t\t\t\tif \"Galakrond, \" in card.name:\n\t\t\t\t\t# 检测过程中,如果目前没有主迦拉克隆或者与之前检测到的迦拉克隆与玩家的职业不符合,则把检测到的迦拉克隆定为主迦拉克隆\n\t\t\t\t\tif self.Game.Counters.primaryGalakronds[ID] is None or (\n\t\t\t\t\t\t\tself.Game.Counters.primaryGalakronds[ID].Class != Class and card.Class == Class):\n\t\t\t\t\t\tself.Game.Counters.primaryGalakronds[ID] = card\n\t\t\t\tcard.inOrigDeck = True\n\t\t\t\tself.decks[ID].append(card)\n\t\t\tnpshuffle(self.decks[ID])\n\t\t\tfor i, card in enumerate(self.decks[ID]): #克苏恩一定不会出现在起始手牌中,只会沉在牌库底,然后等待效果触发后的洗牌\n\t\t\t\tif card.name == \"C'Thun, the Shattered\":\n\t\t\t\t\tself.decks[ID].insert(0, self.decks[ID].pop(i))\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\tdef initializeHands(self): # 起手要换的牌都已经从牌库中移出到mulligan列表中,\n\t\t# 如果卡组有双传说任务,则起手时都会上手\n\t\tmainQuests = {1: [], 2: []}\n\t\tmulliganSize = {1: 3, 2: 4}\n\t\tif self.Game.heroes[2].Class in SVClasses:\n\t\t\tmulliganSize[2] = 3\n\t\tfor ID in range(1, 3):\n\t\t\tmainQuests[ID] = [card for card in self.decks[ID] if card.description.startswith(\"Quest\")]\n\t\t\tnumQueststoDraw = min(len(mainQuests[ID]), mulliganSize[ID])\n\t\t\tif numQueststoDraw > 0:\n\t\t\t\tqueststoDraw = npchoice(mainQuests[ID], numQueststoDraw, replace=False)\n\t\t\t\tfor quest in queststoDraw:\n\t\t\t\t\tself.Game.mulligans[ID].append(extractfrom(quest, self.decks[ID]))\n\t\t\tfor i in range(mulliganSize[ID] - numQueststoDraw):\n\t\t\t\tself.Game.mulligans[ID].append(self.decks[ID].pop())\n\n\tdef mulligan(self, indices1, indices2):\n\t\tindices = {1: indices1, 2: indices2} # indicesCards是要替换的手牌的列表序号,如[1, 3]\n\t\tfor ID in range(1, 3):\n\t\t\tcardstoReplace = []\n\t\t\t# self.Game.mulligans is the cards currently in players' hands.\n\t\t\tif indices[ID]:\n\t\t\t\tfor num in range(1, len(indices[ID]) + 1):\n\t\t\t\t\t# 起手换牌的列表mulligans中根据要换掉的牌的序号从大到小摘掉,然后在原处补充新手牌\n\t\t\t\t\tcardstoReplace.append(self.Game.mulligans[ID].pop(indices[ID][-num]))\n\t\t\t\t\tself.Game.mulligans[ID].insert(indices[ID][-num], self.decks[ID].pop())\n\t\t\tself.decks[ID] += cardstoReplace\n\t\t\tfor card in self.decks[ID]: card.entersDeck() # Cards in deck arm their possible trigDeck\n\t\t\tnpshuffle(self.decks[ID]) # Shuffle the deck after mulligan\n\t\t\t# 手牌和牌库中的牌调用entersHand和entersDeck,注册手牌和牌库扳机\n\t\t\tself.hands[ID] = [card.entersHand() for card in self.Game.mulligans[ID]]\n\t\t\tself.Game.mulligans[ID] = []\n\t\t\tfor card in self.hands[1] + self.hands[2]:\n\t\t\t\tcard.effectCanTrigger()\n\t\t\t\tcard.checkEvanescent()\n\n\t\tif self.Game.GUI: self.Game.GUI.update()\n\t\tif not self.Game.heroes[2].Class in SVClasses:\n\t\t\tself.addCardtoHand(TheCoin(self.Game, 2), 2)\n\t\tself.Game.Manas.calcMana_All()\n\t\tfor ID in range(1, 3):\n\t\t\tfor card in self.hands[ID] + self.decks[ID]:\n\t\t\t\tif \"Start of Game\" in card.index:\n\t\t\t\t\tcard.startofGame()\n\t\tself.drawCard(1)\n\t\tfor card in self.hands[1] + self.hands[2]:\n\t\t\tcard.effectCanTrigger()\n\t\t\tcard.checkEvanescent()\n\t\tif self.Game.GUI: self.Game.GUI.update()\n\n\t# 双人游戏中一方很多控制自己的换牌,之后两个游戏中复制对方的手牌和牌库信息\n\tdef mulligan1Side(self, ID, indices):\n\t\tcardstoReplace = []\n\t\tif indices:\n\t\t\tfor num in range(1, len(indices) + 1):\n\t\t\t\tcardstoReplace.append(self.Game.mulligans[ID].pop(indices[-num]))\n\t\t\t\tself.Game.mulligans[ID].insert(indices[-num], self.decks[ID].pop())\n\t\tself.hands[ID] = self.Game.mulligans[ID]\n\t\tself.decks[ID] += cardstoReplace\n\t\tfor card in self.decks[ID]: card.entersDeck()\n\t\tnpshuffle(self.decks[ID])\n\n\t# 在双方给予了自己的手牌和牌库信息之后把它们注册同时触发游戏开始时的效果\n\tdef startGame(self): # This ID is the opponent's ID\n\t\tfor ID in range(1, 3): # 直接拿着mulligans开始\n\t\t\tself.hands[ID] = [card.entersHand() for card in self.hands[ID]]\n\t\t\tfor card in self.decks[ID]: card.entersDeck()\n\t\t\tself.Game.mulligans[ID] = []\n\t\tfor ID in range(1, 3):\n\t\t\tfor card in self.hands[1] + self.hands[2]:\n\t\t\t\tcard.effectCanTrigger()\n\t\t\t\tcard.checkEvanescent()\n\t\tif not self.Game.heroes[2].Class in SVClasses:\n\t\t\tself.addCardtoHand(TheCoin(self.Game, 2), 2)\n\t\tself.Game.Manas.calcMana_All()\n\t\tfor ID in range(1, 3):\n\t\t\tfor card in self.hands[ID] + self.decks[ID]:\n\t\t\t\tif \"Start of Game\" in card.index: card.startofGame()\n\t\tself.drawCard(1)\n\t\tfor card in self.hands[1] + self.hands[2]:\n\t\t\tcard.effectCanTrigger()\n\t\t\tcard.checkEvanescent()\n\t\tif self.Game.GUI: self.Game.GUI.update()\n\n\tdef handNotFull(self, ID):\n\t\treturn len(self.hands[ID]) < self.handUpperLimit[ID]\n\n\tdef spaceinHand(self, ID):\n\t\treturn self.handUpperLimit[ID] - len(self.hands[ID])\n\n\tdef outcastcanTrigger(self, card):\n\t\tposinHand = self.hands[card.ID].index(card)\n\t\treturn posinHand == 0 or posinHand == len(self.hands[card.ID]) - 1\n\n\tdef noDuplicatesinDeck(self, ID):\n\t\t#typeCounter = cnt((type(card) for card in self.decks[ID]))\n\t\t#return all(typeCounter.values())\n\t\trecord = []\n\t\tfor card in self.decks[ID]:\n\t\t\tif type(card) not in record: record.append(type(card))\n\t\t\telse: return False\n\t\treturn True\n\t\t\n\tdef noMinionsinDeck(self, ID):\n\t\treturn not any(card.type == \"Minion\" for card in self.decks[ID])\n\t\t\n\tdef noMinionsinHand(self, ID, minion=None):\n\t\treturn not any(card.type == \"Minion\" and card is not minion for card in self.hands[ID])\n\t\t\n\tdef holdingDragon(self, ID, minion=None):\n\t\treturn any(card.type == \"Minion\" and \"Dragon\" in card.race and card is not minion \\\n\t\t\t\tfor card in self.hands[ID])\n\t\t\t\t\n\tdef holdingSpellwith5CostorMore(self, ID):\n\t\treturn any(card.type == \"Spell\" and card.mana >= 5 for card in self.hands[ID])\n\t\t\n\tdef holdingCardfromAnotherClass(self, ID, card=None):\n\t\tClass = self.Game.heroes[ID].Class\n\t\treturn any(Class not in cardinHand.Class and cardinHand.Class != \"Neutral\" and cardinHand is not card \\\n\t\t\t\t\t\tfor cardinHand in self.hands[ID])\n\t\t\t\t\t\t\n\t# 抽牌一次只能一张,需要废除一次抽多张牌的功能,因为这个功能都是用于抽效果指定的牌。但是这些牌中如果有抽到时触发的技能,可能会导致马上抽牌把列表后面的牌提前抽上来\n\t# 现在规则为如果要连续抽2张法术,则分两次检测牌库中的法术牌,然后随机抽一张。\n\t# 如果这个规则是正确的,则在牌库只有一张夺灵者哈卡的堕落之血时,抽到这个法术之后会立即额外抽牌,然后再塞进去两张堕落之血,那么第二次抽法术可能会抽到新洗进去的堕落之血。\n\t# Damage taken due to running out of card will keep increasing. Refilling the deck won't reset the damage you take next time you draw from empty deck\n\tdef drawCard(self, ID, card=None):\n\t\tgame, GUI = self.Game, self.Game.GUI\n\t\tif card is None: # Draw from top of the deck.\n\t\t\tif self.decks[ID]: # Still have cards left in deck.\n\t\t\t\tcard = self.decks[ID].pop()\n\t\t\t\tmana = card.mana\n\t\t\telse:\n\t\t\t\tif self.Game.heroes[ID].Class in SVClasses:\n\t\t\t\t\tif self.Game.heroes[ID].status[\"Draw to Win\"] > 0:\n\t\t\t\t\t\tself.Game.heroes[3 - ID].dead = True\n\t\t\t\t\t\tself.Game.gathertheDead(True)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.Game.heroes[ID].dead = True\n\t\t\t\t\t\tself.Game.gathertheDead(True)\n\t\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tself.noCards[ID] += 1 # 如果在疲劳状态有卡洗入牌库,则疲劳值不会减少,在下次疲劳时,仍会从当前的非零疲劳值开始。\n\t\t\t\t\tdamage = self.noCards[ID]\n\t\t\t\t\tif GUI: GUI.fatigueAni(ID, damage)\n\t\t\t\t\tdmgTaker = game.scapegoat4(game.heroes[ID])\n\t\t\t\t\tdmgTaker.takesDamage(None, damage, damageType=\"Ability\") # 疲劳伤害没有来源\n\t\t\t\t\treturn None, -1 #假设疲劳时返回的数值是负数,从而可以区分爆牌(爆牌时仍然返回那个牌的法力值)和疲劳\n\t\telse:\n\t\t\tif isinstance(card, (int, np.int32, np.int64)):\n\t\t\t\tcard = self.decks[ID].pop(card)\n\t\t\telse:\n\t\t\t\tcard = extractfrom(card, self.decks[ID])\n\t\t\tmana = card.mana\n\t\tcard.leavesDeck()\n\t\tif self.handNotFull(ID):\n\t\t\tif GUI: btn = GUI.drawCardAni_1(card)\n\t\t\tcardTracker = [card] # 把这张卡放入一个列表,然后抽牌扳机可以对这个列表进行处理同时传递给其他抽牌扳机\n\t\t\tgame.sendSignal(\"CardDrawn\", ID, None, cardTracker, mana, \"\")\n\t\t\tself.Game.Counters.numCardsDrawnThisTurn[ID] += 1\n\t\t\tif cardTracker[0].type == \"Spell\" and \"Casts When Drawn\" in cardTracker[0].index:\n\t\t\t\tif GUI: btn.remove()\n\t\t\t\tcardTracker[0].whenEffective()\n\t\t\t\tgame.sendSignal(\"SpellCastWhenDrawn\", ID, None, cardTracker[0], mana, \"\")\n\t\t\t\t#抽到之后施放的法术如果检测到玩家处于濒死状态,则不会再抽一张。如果玩家有连续抽牌的过程,则执行下次抽牌\n\t\t\t\tif game.heroes[ID].health > 0 and not game.heroes[ID].dead:\n\t\t\t\t\tself.drawCard(ID)\n\t\t\t\tcardTracker[0].afterDrawingCard()\n\t\t\telse: # 抽到的牌可以加入手牌。\n\t\t\t\tif cardTracker[0].type == \"Minion\" or cardTracker[0].type == \"Amulet\":\n\t\t\t\t\tcardTracker[0].whenDrawn()\n\t\t\t\tcardTracker[0] = cardTracker[0].entersHand()\n\t\t\t\tself.hands[ID].append(cardTracker[0])\n\t\t\t\tif GUI: GUI.drawCardAni_2(btn, cardTracker[0])\n\t\t\t\tgame.sendSignal(\"CardEntersHand\", ID, None, cardTracker, mana, \"\")\n\t\t\t\tgame.Manas.calcMana_All()\n\t\t\treturn (cardTracker[0], mana)\n\t\telse:\n\t\t\tif GUI: GUI.millCardAni(card)\n\t\t\treturn (None, mana) #假设即使爆牌也可以得到要抽的那个牌的费用,用于神圣愤怒\n\t\t\t\n\t# Will force the ID of the card to change. obj can be an empty list/tuple\n\tdef addCardtoHand(self, obj, ID, comment=\"\", byDiscover=False, pos=-1, showAni=True):\n\t\tgame, GUI = self.Game, self.Game.GUI\n\t\tif not isinstance(obj, (list, np.ndarray, tuple)): # if the obj is not a list, turn it into a single-element list\n\t\t\tobj = [obj]\n\t\tmorethan3 = len(obj) > 2\n\t\tfor card in obj:\n\t\t\tif self.handNotFull(ID):\n\t\t\t\tif comment == \"type\": card = card(game, ID)\n\t\t\t\telif comment == \"index\": card = game.cardPool[card](game, ID)\n\t\t\t\tcard.ID = ID\n\t\t\t\tif showAni:\n\t\t\t\t\tif GUI: btn = GUI.cardEntersHandAni_1(card)\n\t\t\t\t\tself.hands[ID].insert(pos + 100 * (pos < 0), card)\n\t\t\t\t\tif GUI: GUI.cardEntersHandAni_2(btn, pos, steps=5 if morethan3 else 10)\n\t\t\t\telse:\n\t\t\t\t\tself.hands[ID].insert(pos + 100 * (pos < 0), card)\n\t\t\t\t\tif GUI: GUI.cardReplacedinHand_Refresh(ID)\n\t\t\t\tcard = card.entersHand()\n\t\t\t\tgame.sendSignal(\"CardEntersHand\", ID, None, [card], 0, comment)\n\t\t\t\tif byDiscover: game.sendSignal(\"PutinHandbyDiscover\", ID, None, obj, 0, '')\n\t\t\telse:\n\t\t\t\tself.Game.Counters.shadows[ID] += 1\n\t\tgame.Manas.calcMana_All()\n\t\t\n\tdef replaceCardDrawn(self, targetHolder, newCard):\n\t\tID = targetHolder[0].ID\n\t\tisPrimaryGalakrond = targetHolder[0] == self.Game.Counters.primaryGalakronds[ID]\n\t\ttargetHolder[0] = newCard\n\t\tif isPrimaryGalakrond: self.Game.Counters.primaryGalakronds[ID] = newCard\n\t\t\n\tdef replaceCardinHand(self, card, newCard):\n\t\tID = card.ID\n\t\ti = self.hands[ID].index(card)\n\t\tcard.leavesHand()\n\t\tself.hands[ID].pop(i)\n\t\tself.addCardtoHand(newCard, ID, \"\", byDiscover=False, pos=i, showAni=False)\n\t\t\n\tdef replaceCardinDeck(self, card, newCard):\n\t\tID = card.ID\n\t\ttry:\n\t\t\ti = self.decks[ID].index(card)\n\t\t\tcard.leavesDeck()\n\t\t\tself.decks[ID][i] = newCard\n\t\t\tself.Game.sendSignal(\"DeckCheck\", ID, None, None, 0, \"\")\n\t\texcept: pass\n\t\t\n\tdef replaceWholeDeck(self, ID, newCards):\n\t\tself.extractfromDeck(0, ID, all=True)\n\t\tself.decks[ID] = newCards\n\t\tfor card in newCards: card.entersDeck()\n\t\tself.Game.sendSignal(\"DeckCheck\", ID, None, None, 0, \"\")\n\t\t\n\tdef replacePartofDeck(self, ID, indices, newCards):\n\t\tfor card in newCards: card.leavesDeck()\n\t\tdeck = self.decks[ID]\n\t\tfor i, oldCard, newCard in zip(indices, deck, newCards):\n\t\t\toldCard.leavesDeck()\n\t\t\tdeck[i] = newCard\n\t\t\tnewCard.entersDeck()\n\t\tself.Game.sendSignal(\"DeckCheck\", ID, None, None, 0, \"\")\n\t\t\n\t# All the cards shuffled will be into the same deck. If necessary, invoke this function for each deck.\n\t# PlotTwist把手牌洗入牌库的时候,手牌中buff的随从两次被抽上来时buff没有了。\n\t# 假设洗入牌库这个动作会把一张牌初始化\n\tdef shuffleCardintoDeck(self, obj, initiatorID, enemyCanSee=True, sendSig=True):\n\t\tif obj:\n\t\t\tcurGame = self.Game\n\t\t\tif curGame.GUI: curGame.GUI.shuffleCardintoDeckAni(obj, enemyCanSee)\n\t\t\tif isinstance(obj, (list, tuple, np.ndarray)):\n\t\t\t\tID = obj[0].ID\n\t\t\t\tnewDeck = self.decks[ID] + obj\n\t\t\t\tfor card in obj: card.entersDeck()\n\t\t\telse: # Shuffle a single card\n\t\t\t\tID = obj.ID\n\t\t\t\tnewDeck = self.decks[ID] + [obj]\n\t\t\t\tobj.entersDeck()\n\n\t\t\tif curGame.mode == 0:\n\t\t\t\tif curGame.guides:\n\t\t\t\t\torder = curGame.guides.pop(0)\n\t\t\t\telse:\n\t\t\t\t\torder = list(range(len(newDeck)))\n\t\t\t\t\tnpshuffle(order)\n\t\t\t\t\tcurGame.fixedGuides.append(tuple(order))\n\t\t\t\tself.decks[ID] = [newDeck[i] for i in order]\n\t\t\tif sendSig: curGame.sendSignal(\"CardShuffled\", initiatorID, None, obj, 0, \"\")\n\t\t\tcurGame.sendSignal(\"DeckCheck\", ID, None, None, 0, \"\")\n\t\n\t#Given the index in hand. Can't shuffle multiple cards except for whole hand\n\tdef shufflefromHand2Deck(self, i, ID, initiatorID, all=True):\n\t\tif all:\n\t\t\thand = self.extractfromHand(None, ID, all, enemyCanSee=False)[0]\n\t\t\tfor card in hand: card.reset(ID)\n\t\t\tself.shuffleCardintoDeck(hand, initiatorID, enemyCanSee=False, sendSig=True)\n\t\telif i:\n\t\t\tcard = self.extractfromHand(i, ID, all, enemyCanSee=False)[0]\n\t\t\tcard.reset(ID)\n\t\t\tself.shuffleCardintoDeck(card, initiatorID, enemyCanSee=False, sendSig=True)\n\t\t\t\n\tdef burialRite(self, ID, minions, noSignal=False):\n\t\tif not isinstance(minions, list):\n\t\t\tminions = [minions]\n\t\tfor minion in minions:\n\t\t\tself.Game.summonfromHand(minion, ID, -1, ID)\n\t\t\tminion.loseAbilityInhand()\n\t\tfor minion in minions:\n\t\t\tself.Game.killMinion(minion, minion)\n\t\tself.Game.gathertheDead()\n\t\tif not noSignal:\n\t\t\tfor minion in minions:\n\t\t\t\tself.Game.Counters.numBurialRiteThisGame[ID] += 1\n\t\t\t\tself.Game.sendSignal(\"BurialRite\", ID, None, minion, 0, \"\")\n\t\t\t\t\n\tdef discardAll(self, ID):\n\t\tif self.hands[ID]:\n\t\t\tcards, cost, isRightmostCardinHand = self.extractfromHand(None, ID=ID, all=True, enemyCanSee=True)\n\t\t\tn = len(cards)\n\t\t\tfor card in cards:\n\t\t\t\tcard.whenDiscarded()\n\t\t\t\tself.Game.Counters.cardsDiscardedThisGame[ID].append(card.index)\n\t\t\t\tself.Game.Counters.shadows[card.ID] += 1\n\t\t\t\tself.Game.sendSignal(\"PlayerDiscardsCard\", card.ID, None, card, -1, \"\")\n\t\t\tself.Game.sendSignal(\"PlayerDiscardsHand\", ID, None, None, n, \"\")\n\t\t\tself.Game.Manas.calcMana_All()\n\n\tdef discardCard(self, ID, card=None):\n\t\tif card is None: # Discard a random card.\n\t\t\tif self.hands[ID]:\n\t\t\t\tcard = npchoice(self.hands[ID])\n\t\t\t\tcard, cost, isRightmostCardinHand = self.extractfromHand(card, enemyCanSee=True)\n\t\t\t\tself.Game.sendSignal(\"PlayerDiscardsCard\", card.ID, None, card, 1, \"\")\n\t\t\t\tcard.whenDiscarded()\n\t\t\t\tself.Game.Manas.calcMana_All()\n\t\t\t\tself.Game.Counters.cardsDiscardedThisGame[ID].append(card.index)\n\t\t\t\tself.Game.Counters.shadows[card.ID] += 1\n\t\t\t\tself.Game.sendSignal(\"CardLeavesHand\", card.ID, None, card, 0, \"\")\n\t\telse: # Discard a chosen card.\n\t\t\ti = card if isinstance(card, (int, np.int32, np.int64)) else self.hands[ID].index(card)\n\t\t\tcard = self.hands[ID].pop(i)\n\t\t\tcard.leavesHand()\n\t\t\tif self.Game.GUI: self.Game.GUI.cardsLeaveHandAni(card, enemyCanSee=True)\n\t\t\tself.Game.sendSignal(\"PlayerDiscardsCard\", card.ID, None, card, 1, \"\")\n\t\t\tfor func in card.triggers[\"Discarded\"]: func()\n\t\t\tself.Game.Manas.calcMana_All()\n\t\t\tself.Game.Counters.cardsDiscardedThisGame[ID].append(card.index)\n\t\t\tself.Game.Counters.shadows[card.ID] += 1\n\t\t\tself.Game.sendSignal(\"CardLeavesHand\", card.ID, None, card, 0, \"\")\n\t\t\t\n\t# 只能全部拿出手牌中的所有牌或者拿出一个张,不能一次拿出多张指定的牌\n\tdef extractfromHand(self, card, ID=0, all=False, enemyCanSee=False):\n\t\tif all: # Extract the entire hand.\n\t\t\tcardsOut = self.hands[ID]\n\t\t\tif cardsOut:\n\t\t\t\tself.hands[ID] = []\n\t\t\t\tfor card in cardsOut:\n\t\t\t\t\tcard.leavesHand()\n\t\t\t\t\tself.Game.sendSignal(\"CardLeavesHand\", card.ID, None, card, 0, '')\n\t\t\t\t# 一般全部取出手牌的时候都是直接洗入牌库,一般都不可见\n\t\t\t\tif self.Game.GUI: self.Game.GUI.cardsLeaveHandAni(cardsOut, False)\n\t\t\treturn cardsOut, 0, -2 # -2 means the posinHand doesn't have real meaning.\n\t\telse:\n\t\t\tif not isinstance(card, (int, np.int32, np.int64)):\n\t\t\t\t# Need to keep track of the card's location in hand.\n\t\t\t\tindex, cost = self.hands[card.ID].index(card), card.getMana()\n\t\t\t\tposinHand = index if index < len(self.hands[card.ID]) - 1 else -1\n\t\t\t\tcard = self.hands[card.ID].pop(index)\n\t\t\telse: # card is a number\n\t\t\t\tposinHand = card if card < len(self.hands[ID]) - 1 else -1\n\t\t\t\tcard = self.hands[ID].pop(card)\n\t\t\t\tcost = card.getMana()\n\t\t\tcard.leavesHand()\n\t\t\tif self.Game.GUI: self.Game.GUI.cardsLeaveHandAni(card, enemyCanSee)\n\t\t\tself.Game.sendSignal(\"CardLeavesHand\", card.ID, None, card, 0, '')\n\t\t\treturn card, cost, posinHand\n\n\t# 只能全部拿牌库中的所有牌或者拿出一个张,不能一次拿出多张指定的牌\n\tdef extractfromDeck(self, card, ID=0, all=False, enemyCanSee=True):\n\t\tif all: # For replacing the entire deck or throwing it away.\n\t\t\tcardsOut = self.decks[ID]\n\t\t\tself.decks[ID] = []\n\t\t\tfor card in cardsOut: card.leavesDeck()\n\t\t\treturn cardsOut, 0, False\n\t\telse:\n\t\t\tif not isinstance(card, (int, np.int32, np.int64)):\n\t\t\t\tcard = extractfrom(card, self.decks[card.ID])\n\t\t\telse:\n\t\t\t\tif not self.decks[ID]:\n\t\t\t\t\treturn None, 0, False\n\t\t\t\tcard = self.decks[ID].pop(card)\n\t\t\tcard.leavesDeck()\n\t\t\tif self.Game.GUI: self.Game.GUI.cardLeavesDeckAni(card, enemyCanSee=enemyCanSee)\n\t\t\treturn card, 0, False\n\n\tdef removeDeckTopCard(self, ID, num=1):\n\t\tcards, i = [], 0\n\t\twhile i < num:\n\t\t\tcard = self.extractfromDeck(-1, ID)[0]\n\t\t\ti += 1\n\t\t\tif card: cards.append(card)\n\t\treturn cards\n\t\t\n\tdef createCopy(self, game):\n\t\tif self not in game.copiedObjs:\n\t\t\tCopy = type(self)(game)\n\t\t\tgame.copiedObjs[self] = Copy\n\t\t\tCopy.initialDecks = self.initialDecks\n\t\t\tCopy.hands, Copy.decks = {1: [], 2: []}, {1: [], 2: []}\n\t\t\tCopy.noCards, Copy.handUpperLimit = copy.deepcopy(self.noCards), copy.deepcopy(self.handUpperLimit)\n\t\t\tCopy.decks = {1: [card.createCopy(game) for card in self.decks[1]],\n\t\t\t\t\t\t 2: [card.createCopy(game) for card in self.decks[2]]}\n\t\t\tCopy.hands = {1: [card.createCopy(game) for card in self.hands[1]],\n\t\t\t\t\t\t 2: [card.createCopy(game) for card in self.hands[2]]}\n\t\t\treturn Copy\n\t\telse:\n\t\t\treturn game.copiedObjs[self]\n\n\nDefault1 = [OhMyYogg, Counterspell, PenFlinger, Frostbolt, RuneDagger, RuneDagger, DarkPharaohTekahn,\n\t\t\tRapidFire, HeadmasterKelThuzad, ZephrystheGreat, RaidLeader\n\t\t\t]\n\nDefault2 = [Khadgar, DwarvenSharpshooter, ProfessorSlate, OhMyYogg, Counterspell, ForbiddenWords, KirinTorMage, NatureStudies,\n\t\t\tPenFlinger, Frostbolt, ArcaneShot, RapidFire, HeadmasterKelThuzad, ZephrystheGreat, ZephrystheGreat\n\t\t\t]","sub_path":"Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":19908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58863170","text":"def gravitation(A):\n B=zip(*A)\n res=[]\n for row in B:\n if not \"#\" in row:\n r.append(0)\n else:\n while row[0]==\".\":\n row=row[1:]\n r.append(sum([1 for x in row if x==\".\"]))\n minr = min(res)\n return [i for i,x in enumerate(res) if x==minr]","sub_path":"arcade/the-core/waterfall-of-integration/gravitation.py","file_name":"gravitation.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616847002","text":"#\n# @lc app=leetcode id=1338 lang=python\n#\n# [1338] Reduce Array Size to The Half\n#\n\n# @lc code=start\n\nclass Solution:\n def minSetSize(self, arr: List[int]) -> int:\n freq = {}\n for item in arr:\n if item not in freq:\n freq[item] = 0\n freq[item] += 1\n target = len(arr) // 2\n new = reversed(sorted(freq, key=freq.get))\n count = 0\n for item in new:\n count += 1\n target -= freq[item]\n if target <= 0:\n return count\n return count\n\n# @lc code=end\n","sub_path":"Leetcode/1338.reduce-array-size-to-the-half.py","file_name":"1338.reduce-array-size-to-the-half.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562138644","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/12 21:52\n# @Author : Yajun Yin\n# @Note :\n\nimport tensorflow as tf\nfrom tensorflow.contrib.lookup import index_table_from_file\nimport time\n\n\nclass BPRDynamicSampler(object):\n \"\"\"Baysesian Personal Ranking (BPR) provides a pair-wise criterion for matrix factorization training.\n\n This class can generate a tf.data.dataset for BPR dataset with dynamic negative sampling.\n NOTE that generated dataset avoid using py_func to ensure good parallelism (num_parallel_calls) by CPU.\n Also, when negative sampling, the negative item is checked that it is an item that the user never interacted.\n Lastly, ID-index mapping is inserted as long as user_list file and item_list file is provided.\n\n In Alfredo L´ainez Rodrigo and Luke de Oliveira, Distributed Bayesian Personalized Ranking in Spark:\n\n BPR define training set as:\n D:= {(u, i , j) | i in {I_u^+} and j in {I - I_u^+}},\n which implies D is a set of all (user, item+, item-) triples.\n\n Considering that we may have an enormous number of (user, items+), and that we need to sample\n negative elements for every user to effectively learning its ranking.\n (In original BPR paper of Rendle, the mentioned bootstrap sampling can not get good performance in my test)\n\n Params:\n user_list_file: files record all user_ID (no need start from zero), one ID is recorded in a line.\n item_list_file: files record all item_ID (no need start from zero), one ID is recorded in a line.\n user_interactive_items: dict, key is user_ID, value is list of item_IDs that the user interacted with.\n Like: {320480545: [9943255, 9992345, ...,98435453]}\n This is used to check whether negative item is actually negative or not,\n you can get this dict through grouping data_file by user_ID.\n data_file: csv file, triple of rating (user, item, score) in a line.\n THIS CSV FILE CAN BE USED FOR TRAINING WEIGHTED-ALS IN SPARK (as benchmark) DIRECTLY.\n Like: 42353425,6546745,3.0\n 65474785,53455436,2.0\n\n Usage:\n >>> user_list_file, item_list_file=\"user_list.voc\", \"item_list.voc\"\n >>> data_file = \"train.csv\"\n >>> user_items = {53454: [342543, 7585454, 432543, 9867645], 435632: [423432]}\n >>> sess = tf.InteractiveSession()\n >>> sampler = BPRDynamicSampler(user_items, user_list_file, item_list_file, sess)\n >>> bpr_dataset_iter = sampler.dataset(data_file, 128, True)\n >>> for epoch in range(100):\n >>> sess.run(bpr_dataset_iter.initializer)\n >>> print(sess.run(bpr_dataset_iter.get_next()))\n \"\"\"\n\n def __init__(self, user_interactive_items, user_list_file, item_list_file, sess):\n self.user_items = user_interactive_items\n self.item_table = index_table_from_file(item_list_file)\n self.user_table = index_table_from_file(user_list_file)\n sess.run(tf.tables_initializer())\n\n self.generate_sparse_tensor_table(sess)\n\n def set_sparse_tensor_index_value(self, user, items):\n\n user_index = self.user_table.lookup(user)\n items_index = self.item_table.lookup(items)\n for i in range(items.get_shape().as_list()[0]):\n yield [user_index, tf.cast(tf.constant(i), tf.int64)], items_index[i]\n\n @staticmethod\n def lookup_sparse_tensor_by_index(sparse_tensor, index):\n row, col = sparse_tensor.get_shape().as_list()\n ret = tf.sparse_slice(sparse_tensor, [index, 0], [1, col]).values\n ret = tf.cast(ret, tf.int64)\n return ret\n\n def generate_sparse_tensor_table(self, sess):\n index = []\n value = []\n for user, items in self.user_items.items():\n u = tf.constant(str(user))\n i = tf.constant(list(map(str, items)))\n d = self.set_sparse_tensor_index_value(u, i)\n for k, v in d:\n index.append(k)\n value.append(v)\n\n self.num_user = sess.run(self.user_table.size())\n self.num_item = sess.run(self.item_table.size())\n index, value = sess.run([index, value])\n self.table = tf.SparseTensor(index, value, [self.num_user, self.num_item])\n\n def dataset(self, data_file, batch_size, shuffle):\n def parse(line):\n _CSV_COLUMN_DEFAULTS = [[''], [''], [0.0]]\n columns = tf.decode_csv(line, record_defaults=_CSV_COLUMN_DEFAULTS)\n columns.pop()\n u = columns[0]\n u = self.user_table.lookup(u)\n interactive_items = self.lookup_sparse_tensor_by_index(self.table, u)\n item_i = columns[1]\n item_i = self.item_table.lookup(item_i)\n item_j = tf.random_uniform(shape=[], minval=0, maxval=7, dtype=tf.int64)\n\n def cond(item_j, interactive_items):\n return tf.reduce_any(tf.equal(interactive_items, item_j))\n\n def body(item_j, interactive_items):\n item_j = tf.random_uniform(shape=[], minval=0, maxval=7, dtype=tf.int64)\n return item_j, interactive_items\n\n j, _ = tf.while_loop(cond, body, [item_j, interactive_items])\n columns.append(j)\n features = {'u': u, 'i': item_i, 'j': j}\n return features\n\n dataset = tf.data.TextLineDataset(data_file)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=20000)\n dataset = dataset.map(parse, num_parallel_calls=8)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(batch_size * 8)\n iterator = dataset.make_initializable_iterator()\n return iterator\n\n\nif __name__ == '__main__':\n user_list_file, item_list_file = \"user_list.voc\", \"item_list.voc\"\n data_file = \"train.csv\"\n a = {54: [342, 756, 758], 32: [432, 342, 5341]}\n sess = tf.InteractiveSession()\n sampler = BPRDynamicSampler(a, user_list_file, item_list_file, sess)\n bpr_dataset_iter = sampler.dataset(data_file, 3, False)\n\n for epoch in range(20):\n sess.run(bpr_dataset_iter.initializer)\n print(sess.run(bpr_dataset_iter.get_next()))\n","sub_path":"dataset/BPR_dataset_dynamic_negative_sampling_tensorflow/bpr_sampler.py","file_name":"bpr_sampler.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408713449","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#TASK 1: IMPORT LIBRARIES\n\nimport numpy as np\nimport seaborn as sns\nfrom scipy.stats import skew\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\nimport matplotlib.pyplot as plt\nplt.style.use(\"ggplot\")\nplt.rcParams['figure.figsize'] = (12, 8)\n\n\n# In[3]:\n\n\n#TASK 2: LOAD THE DATA - The adverstiting dataset captures sales revenue generated with respect to advertisement spends across \n #multiple channles like radio, tv and newspaper.\n\nadvert=pd.read_csv(\"Advertising.csv\")\nadvert.head()\n\n\n# In[4]:\n\n\nadvert.info()\n\n\n# In[5]:\n\n\n#TASK 3: RELATIONSHIP BETWEEN FEATURES AND RESPONSE\n\nsns.pairplot(advert, x_vars=['TV', 'radio', 'newspaper'], y_vars='sales', height=7, aspect=0.7)\n\n\n# In[6]:\n\n\n#TASK 4: MULTIPLE LINEAR REGRESSION - ESTIMATING COEFFICIENTS\n\nfrom sklearn.linear_model import LinearRegression\n\nx = advert[['TV', 'radio', 'newspaper']]\ny = advert.sales\n\nlm1=LinearRegression()\nlm1.fit(x,y)\n\nprint(lm1.intercept_)\nprint(lm1.coef_)\n\n\n# In[8]:\n\n\nlist(zip(['TV','radio', 'newspaper'], lm1.coef_))\n\n\n# In[10]:\n\n\nsns.heatmap(advert.corr(), annot=True)\n\n\n# In[14]:\n\n\n#TASK 5: FEATURE SELECTION - Here, model fit and the accuracy of the predictions will be evaluated using R² also known as r2_score\n\nfrom sklearn.metrics import r2_score\n\nlm2=LinearRegression().fit(x[['TV','radio']], y)\nlm2_pred=lm2.predict(x[['TV','radio']])\n\nprint(\"R^2 = \", r2_score(y,lm2_pred))\n\n\n# In[15]:\n\n\nlm3=LinearRegression().fit(x[['TV','radio','newspaper']], y)\nlm3_pred=lm3.predict(x[['TV','radio','newspaper']])\n\nprint(\"R^2 = \", r2_score(y,lm3_pred))\n\n\n# In[26]:\n\n\n#Task 6: Model Evaluation Using Train/Test Split and Metrics\n\n#Mean Absolute Error (MAE) is the mean of the absolute value of the errors:\n#Mean Squared Error (MSE) is the mean of the squared errors:\n#Root Mean Squared Error (RMSE) is the mean of the squared errors:\n\n#Here, let's use train/test split with RMSE to see whether newspaper should be kept in the model:\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nx=advert[['TV','radio','newspaper']]\ny=advert.sales\n\nx_train, x_test, y_train, y_test= train_test_split(x,y, random_state=1)\n\nlm4 = LinearRegression().fit(x_train,y_train)\nlm4_pred=lm4.predict(x_test)\n\nprint(\"RMSE = \", np.sqrt(mean_squared_error(y_test,lm4_pred)))\nprint(\"R^2 = \", r2_score(y_test,lm4_pred))\n\n\n# In[27]:\n\n\nx=advert[['TV','radio']]\ny=advert.sales\n\nx_train, x_test, y_train, y_test= train_test_split(x,y, random_state=1)\n\nlm5 = LinearRegression().fit(x_train,y_train)\nlm5_pred=lm5.predict(x_test)\n\nprint(\"RMSE = \", np.sqrt(mean_squared_error(y_test,lm5_pred)))\nprint(\"R^2 = \", r2_score(y_test,lm5_pred))\n\n\n# In[30]:\n\n\nfrom yellowbrick.regressor import PredictionError, ResidualsPlot\n\nvisualizer=PredictionError(lm5).fit(x_train, y_train)\nvisualizer.score(x_test, y_test)\nvisualizer.show()\n\n\n# In[32]:\n\n\n#TASK 7: INTERACTION EFFECT - SYNERGY\n\nadvert['interaction']= advert['TV'] * advert['radio']\n\nx=advert[['TV', 'radio', 'interaction']]\ny=advert.sales\n\nx_train, x_test, y_train, y_test= train_test_split(x,y, random_state=1)\n\nlm6 = LinearRegression().fit(x_train,y_train)\nlm6_pred=lm6.predict(x_test)\n\nprint(\"RMSE = \", np.sqrt(mean_squared_error(y_test,lm6_pred)))\nprint(\"R^2 = \", r2_score(y_test,lm6_pred))\n\n\n# In[34]:\n\n\nvisualizer=PredictionError(lm6).fit(x_train, y_train)\nvisualizer.score(x_test, y_test)\nvisualizer.show()\n\n\n# In[ ]:\n\n\n# CONCLUSION: The goal of this prject was to identify trivial features. \n#Lesser the RMSE value, higher is the accuracy. \n#Thus, in this model, newspaper seems to be a less important feature.\n\n","sub_path":"Predict Sales Revenue with multiple linear regression.py","file_name":"Predict Sales Revenue with multiple linear regression.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334686841","text":"# -*- coding: utf-8 -*-\n\nimport io\nimport sys\nimport csv\nimport time\nimport queue\nimport argparse\nimport threading\nfrom Bio import Entrez\nfrom metaparse import parse, get_experiment_xml_string\nfrom metatestxml import TEST_XML\n\n# ============================================================================ #\nUSAGE = \"\"\"Usage examples:\npython metadatatable.py -e myemail@ncbi.gov -ox raw.xml -ot parsed.tsv -t biomol_transcript[properties] OR study_type_transcriptome_analysis[properties] OR strategy_rna_seq[properties] OR strategy_FL_cDNA[properties]\npython metadatatable.py -e myemail@ncbi.gov -ox raw.xml -ot parsed.tsv -t (staphylococcus aureus[Title]) AND blood[Text Word]\npython metadatatable.py -e myemail@ncbi.gov -ox raw.xml -t (staphylococcus aureus[Title]) AND blood[Text Word]\npython metadatatable.py -e myemail@ncbi.gov -ot parsed.tsv -t strategy_rna_seq[properties]\npython metadatatable.py -e myemail@ncbi.gov -t strategy_rna_seq[properties]\npython metadatatable.py -e myemail@ncbi.gov -i myown.xml -ot parsed.tsv\npython metadatatable.py -e myemail@ncbi.gov -i myown.xml -ot parsed.tsv -f\npython metadatatable.py -e myemail@ncbi.gov -i myown.xml\n\"\"\"\n\nparser = argparse.ArgumentParser(\"MetadataTable\", description=USAGE,\n formatter_class=argparse.RawTextHelpFormatter)\n\nparser.add_argument(\"-ox\", \"--output_xml\",\n type=str,\n help=\"Path for saving xml file\")\n\nparser.add_argument(\"-ot\", \"--output_tsv\",\n type=str,\n help=\"Path for saving parsed file\")\n\nparser.add_argument(\"-i\", \"--input_xml\",\n type=str,\n help=\"Path to input file\")\n\nparser.add_argument(\"-e\", \"--email\",\n type=str,\n help=\"Let NCBI know who you are (required if using -t)\")\n\nparser.add_argument(\"-t\", \"--term\",\n nargs=\"+\",\n type=str,\n help=\"Query terms\")\n\nparser.add_argument(\"-u\", \"--unlimited\",\n action=\"store_true\",\n help=\"Retrieve unlimited records\")\n\nparser.add_argument(\"-c\", \"--case\",\n type=str,\n default=\"rnaseq\",\n choices=(\"rnaseq\", \"source\"),\n help=\"Select which builtin case to use\")\n\nparser.add_argument(\"-f\", \"--full\",\n action=\"store_true\",\n help=\"Whether to output full table (Only for builtin template)\")\n\nparser.add_argument(\"-x\", \"--xpath\",\n type=str,\n help=\"Path to a csv file for xpath query\")\n\n# ============================================================================ #\nBUILTIN_XPATH = {\n \"rnaseq\": [\n (\"SRA run accession\", (\"//RUN_SET/RUN\", \"accession\"), True),\n (\"SRA experiment accession\", (\"//EXPERIMENT\", \"accession\", \"accession\"), True),\n (\"Biosample accession (1-to-1 with SRA sample accession when both exist)\", (\"//IDENTIFIERS/EXTERNAL_ID[@namespace='BioSample']\", None), True),\n (\"Tissue\", (\"//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='tissue']/VALUE|//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='tissue source']/VALUE|//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='OrganismPart']/VALUE\", None), True),\n (\"Strain\", (\"//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='strain']/VALUE\", None), True),\n (\"Development Stage\", (\"//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='developmental stage']/VALUE|//SAMPLE/SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE[TAG='DevelopmentalStage']/VALUE\", None), True),\n (\"SRA project accession\", (\"//STUDY_REF\", \"accession\"), True),\n (\"Base count of run\", (\"//RUN\", \"total_bases\"), True),\n (\"Paired-end flag\", (\"//PAIRED\",\"*\"), True),\n (\"Spot count of run\", (\"//RUN\", \"total_spots\"), True),\n (\"Platform (eg Illumina)\", (\"//PLATFORM/*/*|//PLATFORM/*\", None), True),\n (\"SRA sample accession\", (\"//SAMPLE\",\"accession\"), False),\n # (\"Taxid\", (\"todo\",), False),\n # (\"Library source\", (\"todo\",), False),\n # (\"Cell line\", (\"todo\",), False),\n # (\"Sample title\", (\"todo\",), False),\n # (\"Source Provider\", (\"todo\",), False),\n # (\"Study description\", (\"todo\",), False),\n ],\n \"source\": [\n ],\n}\n\n# ============================================================================ #\nESEARCH_BATCH = 100000 # max=100,000\nEFETCH_BATCH = 1000 # max=10,000\nESEARCH_MAX = ESEARCH_BATCH\n\n\ndef Wait(lastTime): # Please do not post more than three URL requests per second.\n while time.time() - lastTime < 0.33:\n time.sleep(0.11)\n return time.time()\n\n\ndef GetIdList(**kwargs):\n idList = []\n # Get total count\n handle = Entrez.esearch(db=\"sra\", retmax=ESEARCH_BATCH, **kwargs)\n result = Entrez.read(handle)\n handle.close()\n idList.extend(result[\"IdList\"])\n total = int(result[\"Count\"])\n # Get all idList\n retstart = ESEARCH_BATCH\n lastTime = time.time()\n while retstart < total:\n handle = Entrez.esearch(db=\"sra\", retmax=ESEARCH_BATCH, retstart=retstart, **kwargs)\n result = Entrez.read(handle)\n handle.close()\n idList.extend(result[\"IdList\"])\n retstart += ESEARCH_BATCH\n lastTime = Wait(lastTime)\n return idList\n\n\ndef GetRecords(idList):\n total = len(idList)\n retstart = 0\n lastTime = time.time()\n while retstart < total:\n handle = Entrez.efetch(db=\"sra\", id=idList, retmax=EFETCH_BATCH, retstart=retstart)\n data = io.TextIOWrapper(handle.detach(), encoding=\"utf-8\")\n yield data.read()\n retstart += EFETCH_BATCH\n lastTime = Wait(lastTime)\n\n\n# ============================================================================ #\nclass TaskDone(object):\n pass\n\n\nclass BlackHole(object):\n def write(self, value):\n pass\n\n\ndef Process(q, output_xml, output_tsv, names, queries):\n fx = ft = BlackHole()\n if output_xml:\n fx = open(output_xml, \"w\", encoding=\"utf-8\")\n fx.write('\\n\\n')\n if output_tsv:\n ft = open(output_tsv, \"w\", encoding=\"utf-8\")\n ft.write(\"\\t\".join(names))\n ft.write(\"\\n\")\n if output_xml is None and output_tsv is None:\n ft = sys.stdout\n while True:\n item = q.get()\n if item is TaskDone:\n q.task_done()\n break\n fx.write(\"\".join(get_experiment_xml_string(item)))\n for row in parse(item, queries):\n ft.write(\"\\t\".join(row))\n ft.write(\"\\n\")\n ft.flush()\n q.task_done()\n if output_xml:\n fx.write(\"\\n\")\n fx.close()\n if output_tsv:\n ft.close()\n\n\n# ============================================================================ #\nif __name__ == \"__main__\":\n P, _ = parser.parse_known_args()\n\n # print(\"P.output_xml\", P.output_xml)\n # print(\"P.output_tsv\", P.output_tsv)\n # print(\"P.input_xml\", P.input_xml)\n # print(\"P.email\", P.email)\n # print(\"P.term\", P.term)\n # print(\"P.unlimited\", P.unlimited)\n # print(\"P.case\", P.case)\n # print(\"P.full\", P.full)\n # print(\"P.xpath\", P.xpath)\n # exit()\n\n # Only one input mode is allowed to avoid confusion\n if P.input_xml and P.term:\n sys.stderr.write(\"Please use either a xml file or search terms as input\")\n exit(1)\n if P.term and not P.email:\n sys.stderr.write(\"Email is required for querying Entrez\")\n exit(1)\n\n # Set the field names and queries\n if P.xpath:\n NAMES = []\n QUERIES = []\n with open(P.xpath, newline=\"\", encoding=\"utf-8\") as csvfile:\n for row in csv.reader(csvfile):\n NAMES.append(row[0])\n QUERIES.append((row[1], row[2] if len(row) == 3 and row[2] else None))\n try:\n for _ in parse(TEST_XML, QUERIES):\n pass\n except Exception as e:\n sys.stderr.write(\"Xpath test failed. Please check the xpath file.\")\n exit(1)\n else:\n fields = BUILTIN_XPATH[P.case]\n if P.full:\n NAMES = [row[0] for row in fields if row[2]]\n QUERIES = [row[1] for row in fields if row[2]]\n else:\n NAMES = [row[0] for row in fields]\n QUERIES = [row[1] for row in fields]\n\n # Input Mode 1. Read and process xml file, save parsed data to a file or print out parsed data\n if P.input_xml:\n if P.output_tsv:\n fo = open(P.output_tsv, \"w\", encoding=\"utf-8\")\n fo.write(\"\\t\".join(NAMES))\n fo.write(\"\\n\")\n else:\n fo = sys.stdout\n with open(P.input_xml, \"r\", encoding=\"utf-8\") as fi:\n for result in parse(fi, QUERIES):\n fo.write(\"\\t\".join(result))\n fo.write(\"\\n\")\n fo.flush()\n if P.output_tsv:\n fo.close()\n\n # Input Mode 2. Query Entrez and parse on the fly\n elif P.term:\n Entrez.email = P.email\n Entrez.tool = \"MetadataTable\"\n\n # Retrieve all ids\n ids = GetIdList(term=\" \".join(P.term))\n if len(ids) > ESEARCH_MAX:\n sys.stderr.write(\"Query returned too many results (%s). Please consider refine you search or use -u option\" % len(ids))\n exit(1)\n\n # Process the downloaded records in a separate thread\n q = queue.Queue()\n t = threading.Thread(target=Process, args=(q, P.output_xml, P.output_tsv, NAMES, QUERIES), daemon=True)\n t.start()\n\n # Retrieve all records and put them in the queue\n for d in GetRecords(ids):\n q.put(d)\n q.put(TaskDone)\n\n # Wait till all records are processed\n q.join()\n\n exit(0)\n","sub_path":"metadatatable.py","file_name":"metadatatable.py","file_ext":"py","file_size_in_byte":9762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73389244","text":"import pymysql\nimport data.sqlstringparser as ds\nfrom finspider.finspider.spiderurl import *\n\n\ndef test_sql_string():\n _sql_conn_string = ds.mysql_conn_dict()\n print(_sql_conn_string)\n\n\ndef test_conn():\n sql_conn_string = ds.mysql_conn_dict()\n db = pymysql.connect(**sql_conn_string)\n cursor = db.cursor()\n cursor.execute(\"SELECT VERSION()\")\n data = cursor.fetchone()\n print(data)\n # print(db.open)\n db.close()\n\ndef test_sina_url():\n url_list = sina_fund_url()\n # print(url_list)\n print(url_list)\n\n\nif __name__ == \"__main__\":\n test_sql_string()\n # test_conn()\n test_sina_url()\n\n\n\n\n","sub_path":"src/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"72787060","text":"__author__ = 'YJ'\nimport pymysql\n# -*- coding: UTF-8 -*-\n\nclass Mysql:\n def __init__(self, host, user , passwd, database):\n self.conn = pymysql.connect(host=host,\n user=user,\n passwd=passwd,\n db=database\n )\n self.conn.set_charset('utf8')\n # self.conn.set_character_set('utf8')\n\n def __del__(self):\n self.conn.close()\n\n def clear_table(self, table):\n \"\"\"\n 清空表\n \"\"\"\n with self.conn.cursor() as cursor:\n # Create a new record\n sql = \"truncate table %s\" % table\n cursor.execute(sql)\n self.conn.commit()\n\n def save_catalog_page(self, page_id, page_link, page_content):\n \"\"\"\n 保存目录页面到数据库\n \"\"\"\n try:\n with self.conn.cursor() as cursor:\n # Create a new record\n sql = \"\"\"INSERT INTO catalog_pages(page_id, page_link, page_content)\n VALUES (%s, %s, %s)\"\"\"\n # print(page_id, page_link, page_content)\n cursor.execute(sql, (page_id, page_link, page_content))\n finally:\n self.conn.commit()\n\n def select_all_catalog_pages(self):\n \"\"\"\n 获取所有目录页内容\n \"\"\"\n with self.conn.cursor() as cursor:\n sql = \"select page_content from catalog_pages\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return result\n\n def save_detail_page(self, page_link, page_content):\n \"\"\"\n 保存目录页面到数据库\n \"\"\"\n try:\n with self.conn.cursor() as cursor:\n # Create a new record\n sql = \"\"\"INSERT INTO detail_pages(page_link, page_content)\n VALUES (%s, %s)\"\"\"\n cursor.execute(sql, (page_link, page_content))\n finally:\n self.conn.commit()\n\n def select_video_page(self, page_link):\n \"\"\"\n 查找是否已经下载过详情页面\n \"\"\"\n with self.conn.cursor() as cursor:\n check_repeat = \"select * from detail_pages where page_link LIKE '%s'\" % page_link\n cursor.execute(check_repeat)\n return cursor.fetchone()\n\n def save_video(self, details):\n \"\"\"\n 保存影片详情\n \"\"\"\n try:\n with self.conn.cursor() as cursor:\n # Create a new record\n sql = \"INSERT INTO videos (title, link, actor, score, search_link, cover)\" \\\n \" VALUES (%s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, (details['title'][:100], details['link'], details['actor'],\n details['score'], details['search'], details['cover']))\n finally:\n self.conn.commit()\n\n def select_video(self, video_link):\n with self.conn.cursor() as cursor:\n sql = \"select * from videos where link LIKE '%s'\" % video_link\n cursor.execute(sql)\n return cursor.fetchone()\n","sub_path":"MySQL.py","file_name":"MySQL.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576443809","text":"from sense_hat import SenseHat\n\nsense = SenseHat()\nsense.clear()\n\nwhile True:\n \n a = sense.get_accelerometer_raw()\n x = a[\"x\"]\n y = a[\"y\"]\n z = a[\"z\"]\n \n x = int(round(x,0))\n y = int(round(y,0))\n z = int(round(z,0))\n \n print(f\"X:{x} Y:{y} Z:{z}\")","sub_path":"raspberry_pi_unit/w4l1e2.py","file_name":"w4l1e2.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"466356650","text":"\nnum=1\nsum=0\nplist=[]\nmplist=[]\nfor x in input().split('+'):\n\tplist.append(x)\n\n\n\nfor j in range(len(plist)):\n\n\tmplist=plist[j].split('*')\n\n\tfor x in range(len(mplist)):\n\t\tnum = num * int(mplist[x])\n\t\tplist[j]=num\n\n\tnum=1\n\tsum+=int(plist[j])\nprint(sum)\n","sub_path":"자료구조_알고리즘설계해석/Python_String_Split_덧셈곱셈계산 2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559310102","text":"# Add auto-completion and a stored history file of commands to your Python\n# interactive interpreter. Requires Python 2.0+, readline. Autocomplete is bound\n# to the Esc key by default (you can change it - see readline docs).\n#\n# Store the file in /pystartup, and set an environment\n# variable to point to it:\n# \"export PYTHONSTARTUP=/pystartup\" in bash.\n#\n\ntry:\n import atexit\n import os\n import readline\n\n history = os.path.expanduser('~/.pyhistory')\n if not os.path.isfile(history):\n with open(history, 'wb') as fh: fh.write('')\n readline.read_history_file(history)\n\n readline.parse_and_bind('tab: complete')\n atexit.register(readline.write_history_file, history)\nexcept:\n print(\"Unable to load python startup\")\n","sub_path":"Library/Python/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9128355","text":"d1='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\nd2='BCDEFGHIJKLMNOPQRSTUVWXYZAbcdefghijklmnopqrstuvwxyza2345678901'\n\ndef encrypt(string1):\n ls1=[]\n for i in string1:\n if i in d1:\n ls1.append(d2[d1.index(i)])\n print(ls1)\n return ls1\n\ndef decrypt(string2):\n ls2=[]\n for i in string2:\n if i in d2:\n ls2.append(d1[d2.index(i)])\n print(ls2)\n return ls2\n\nwhile True:\n try:\n s1=input()\n s2=input()\n print(\"\".join(encrypt(s1)))\n print(\"\".join(decrypt(s2)))\n \n except:\n break\n","sub_path":"scripts01/scripts01/decrypt_encrypt.py","file_name":"decrypt_encrypt.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"445675435","text":"import os\nimport traceback\n\nimport IPython.display\nfrom IPython.display import HTML, Image, display\n\nimport radiopadre\nimport radiopadre.file\nfrom radiopadre.render import render_title, render_url, render_preamble\n\ndef _make_thumbnail(image, width):\n thumbdir = \"%s/radiopadre-thumbnails\" % os.path.dirname(image)\n thumb = os.path.join(thumbdir, \"%d.%s\" % (width, os.path.basename(image)))\n # does thumbdir need to be created?\n if not os.path.exists(thumbdir):\n if not os.access(os.path.dirname(thumbdir), os.W_OK):\n return None\n os.mkdir(thumbdir)\n # does thumb need to be updated?\n if not os.path.exists(thumb) or os.path.getmtime(thumb) < os.path.getmtime(image):\n # can't write? That's ok too\n if not os.access(thumbdir, os.W_OK) or os.path.exists(thumb) and not os.access(thumb, os.W_OK):\n return None\n if os.system(\"convert -thumbnail %d %s %s\" % (width, image, thumb)):\n raise RuntimeError(\"thumbnail convert failed, maybe imagemagick is not installed?\")\n return thumb\n\n\nclass ImageFile(radiopadre.file.FileBase):\n\n @staticmethod\n def _show_thumbs(images, width=None, ncol=None, maxwidth=None, mincol=None,\n external_thumbs=None,\n maxcol=None, title=None, **kw):\n\n if not images:\n return None\n nrow, ncol, width = radiopadre.file.compute_thumb_geometry(\n len(images), ncol, mincol, maxcol, width, maxwidth)\n npix = int(radiopadre.DPI * width)\n\n # make list of basename, filename tuples\n filelist = sorted(\n [(os.path.basename(img.fullpath), img.fullpath) for img in images])\n\n # keep track of thumbnail fails\n nfail = 0\n\n html = render_preamble() + render_title(title) + \\\n \"\"\"
    \n \\n\n \"\"\"\n for row in range(nrow):\n html += \"\"\"\\n\"\"\"\n filelist_row = filelist[row * ncol:(row + 1) * ncol]\n for name, image in filelist_row:\n html += \"\"\"\\n\"\n html += \"\"\"\\n\"\"\"\n for _, image in filelist_row:\n if external_thumbs is False:\n thumb = None\n # make thumbnail and record exceptions. Print the first one, as\n # they really shouldn't happen\n else:\n try:\n thumb = _make_thumbnail(image, npix)\n if not thumb and external_thumbs:\n nfail += 1\n except:\n if not nfail:\n traceback.print_exc()\n nfail += 1\n thumb = None\n html += \"\"\"\\n\"\n html += \"\\n\"\n html += \"
    \"\"\"\n html += \"%s\" % (render_url(image), name)\n html += \"
    \"\"\"\n if thumb:\n html += \"?\" % (\n render_url(image), render_url(thumb))\n else:\n html += \"?\" % (\n render_url(image), render_url(image), npix)\n html += \"
    \"\n\n if nfail:\n html += \"(WARNING: %d thumbnails unexpectedly failed to generate, check console for errors)
    \\n\" % nfail\n\n display(HTML(html))\n\n def show(self, width=None, **kw):\n display(Image(self.fullpath, width=width and width * 100))\n","sub_path":"radiopadre/imagefile.py","file_name":"imagefile.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583129262","text":"#\n# Copyright (c) 2011-2016, Hortonworks Inc. All rights reserved.\n#\n# Except as expressly permitted in a written agreement between your\n# company and Hortonworks, Inc, any use, reproduction, modification,\n# redistribution, sharing, lending or other exploitation of all or\n# any part of the contents of this file is strictly prohibited.\n#\n#\nfrom beaver.machine import Machine\nfrom beaver.config import Config\nfrom beaver.component.hadoop import Hadoop\nimport re\n\n\nclass Mahout:\n @classmethod\n def run(cls, cmd, cwd=None, env=None, logoutput=True):\n return cls.runas(None, cmd, cwd=cwd, env=env, logoutput=logoutput)\n\n @classmethod\n def runas(cls, user, cmd, cwd=None, env=None, logoutput=True, disableAuth=False):\n if not env:\n env = {}\n # if disable auth is requsted set the env\n # var to empty only for a secure cluster\n if disableAuth and Hadoop.isSecure():\n env['KRB5CCNAME'] = ''\n # if disableAuth is false and we are running a secure cluster get\n # the credentials\n elif Hadoop.isSecure():\n if user is None: user = Config.getEnv('USER')\n kerbTicket = Machine.getKerberosTicket(user)\n env['KRB5CCNAME'] = kerbTicket\n user = None\n\n mahout_cmd = Config.get('mahout', 'MAHOUT_CMD')\n mahout_cmd += \" \" + cmd\n osenv = {\"JAVA_HOME\": Config.get('machine', 'JAVA_HOME')}\n if env:\n for key, value in env.items():\n osenv[key] = value\n return Machine.runas(user, mahout_cmd, cwd=cwd, env=osenv, logoutput=logoutput)\n\n @classmethod\n def getVersion(cls):\n exit_code, output = cls.run(\"\", logoutput=False)\n if Machine.type() == \"Windows\":\n pattern = re.compile(\"MAHOUT_JOB:.*examples-(.*)-job.*\")\n else:\n pattern = re.compile(\"MAHOUT-JOB:.*examples-(.*)-job.*\")\n m = pattern.search(output)\n if m:\n return m.group(1)\n return \"\"\n","sub_path":"beaver/component/mahout.py","file_name":"mahout.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157840762","text":"from typing import List\n\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n # dp[i]: if s[:i] can break by dict.\n dp = [False] * (len(s) + 1)\n dp[0] = True\n for i in range(len(s) + 1):\n for j in range(i-1, -1, -1):\n if s[j:i] in wordDict and dp[j]:\n dp[i] = True\n break\n return dp[-1]\n\n def wordBreakRecursive(self, s: str, wordDict: List[str]) -> bool:\n if not s:\n return True\n for word in wordDict:\n if s[:len(word)] == word and self.wordBreak(s[len(word):], wordDict):\n return True\n return False","sub_path":"python3/l0139_word_break.py","file_name":"l0139_word_break.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214728226","text":"from __future__ import unicode_literals\n\nimport hashlib\n\nfrom django.db import models\nfrom django.utils import timezone\nimport uuid\nimport json\nimport random\nfrom collections import OrderedDict\n\n\nclass Survey(models.Model):\n SURVEY_STATUS_CREATED = 1\n SURVEY_STATUS_SENT = 2\n SURVEY_STATUS_ANSWERED = 3\n\n SURVEY_STATUSES = (\n (SURVEY_STATUS_CREATED, 'created'),\n (SURVEY_STATUS_SENT, 'send'),\n (SURVEY_STATUS_ANSWERED, 'answered'),\n )\n\n survey_unique_value = models.SlugField(\n max_length=100,\n default='',\n )\n status = models.IntegerField(\n default=SURVEY_STATUS_CREATED,\n choices=SURVEY_STATUSES,\n )\n user_comment = models.TextField(\n default='',\n )\n internal_comment = models.TextField(\n default='',\n )\n\n created_at = models.DateTimeField(\n default=timezone.now,\n )\n\n updated_at = models.DateTimeField(\n default=timezone.now,\n )\n\n def __str__(self):\n return 'Survey {0}, {1}'.format(self.id, self.created_at)\n\n @classmethod\n def get_random_string(cls):\n return str(uuid.uuid1())\n\n\nclass Question(models.Model):\n QUESTION_SINGLE_SELECT = 2\n QUESTION_MULTIPLE_SELECT = 3\n QUESTION_RATTING = 4\n\n QUESTION_OPEN_END_COMMENT = 'What you would like to be changed?'\n\n QUESTION_SINGLE_SELECT_ANSWER_YES = 'Yes'\n QUESTION_SINGLE_SELECT_ANSWER_NO = 'No'\n\n QUESTION_RATTING_OPTIONS = 5\n\n QUESTION_STATUS_ACTIVE = 1\n QUESTION_STATUS_INACTIVE = 0\n\n QUESTION_TYPES = (\n (QUESTION_SINGLE_SELECT, 'Yes / No'),\n (QUESTION_RATTING, 'rating'),\n )\n\n QUESTION_STATUSES = (\n (QUESTION_STATUS_ACTIVE, 'active'),\n (QUESTION_STATUS_INACTIVE, 'inactive'),\n )\n\n question_text = models.TextField()\n question_type = models.IntegerField(\n default=QUESTION_RATTING,\n choices=QUESTION_TYPES,\n )\n status = models.IntegerField(\n default=QUESTION_STATUS_ACTIVE,\n choices=QUESTION_STATUSES,\n )\n number = models.IntegerField(default=0)\n\n def __unicode__(self):\n return self.question_text\n\n def save(self, *args, **kwargs):\n super(Question, self).save(*args, **kwargs)\n\n if self.question_type == Question.QUESTION_SINGLE_SELECT_ANSWER_NO:\n Responses.objects.create(\n question=self,\n response_text=Question.QUESTION_SINGLE_SELECT_ANSWER_YES,\n )\n Responses.objects.create(\n question=self,\n response_text=Question.QUESTION_SINGLE_SELECT_ANSWER_NO,\n )\n elif self.question_type == Question.QUESTION_RATTING:\n responses = Question.QUESTION_RATTING_OPTIONS\n question = Question.objects.get(id=self.id)\n while responses > 0:\n resp = Responses.objects.create(\n question=question,\n response_text=str(responses),\n )\n resp.save()\n responses -= 1\n\n\nclass Responses(models.Model):\n question = models.ForeignKey(\n Question,\n related_name='responses',\n on_delete=models.CASCADE,\n )\n response_text = models.CharField(\n default='',\n max_length=255,\n )\n\n def __unicode__(self):\n return '{0} response {1}'.format(self.question, self.response_text)\n\n\nclass Answers(models.Model):\n answer = models.IntegerField()\n survey = models.ForeignKey(\n Survey,\n related_name='answers',\n on_delete=models.CASCADE,\n )\n question = models.ForeignKey(Question)\n\n def __unicode__(self):\n return 'Survey {0}, Question {1}, Answers: {2}'.format(\n self.survey.id,\n self.question.number,\n self.answer,\n )","sub_path":"delasport/survey/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461389247","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport logging\n\nfrom logging.handlers import RotatingFileHandler\nfrom werkzeug import import_string\n\nfrom flask import Flask\n\nfrom .extensions import db\nfrom .blueprints import DEFAULT_BLUEPRINTS\n\ndef create_app():\n app = Flask(__name__)\n # config\n if os.getenv('FLASK') == 'dev':\n app.config.from_pyfile('config/development.conf')\n app.logger.info(\"Config: Development\")\n else:\n app.config.from_pyfile('config/production.conf')\n app.logger.info(\"Config: Production\")\n\n configure_extensions(app)\n configure_logging(app)\n\n # register module\n configure_blueprints(app, DEFAULT_BLUEPRINTS)\n\n return app\n\ndef configure_extensions(app):\n # configure extensions\n db.init_app(app)\n db_session = db.create_scoped_session()\n Base = db.make_declarative_base()\n Base.query = db_session.query_property()\n\n\ndef configure_blueprints(app, blueprints):\n blueprints_list = []\n packages_list = []\n\n for name in blueprints:\n blueprint = import_string(name)\n blueprints_list.append(blueprint)\n package = import_string(blueprint.import_name)\n packages_list.append(package)\n\n for package in list(set(packages_list)):\n __import__('%s.views' % package.__name__)\n\n for blueprint in list(set(blueprints_list)):\n app.register_blueprint(blueprint)\n\n\ndef configure_logging(app):\n\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n\n debug_log = os.path.join(app.root_path,\n app.config['DEBUG_LOG'])\n\n debug_file_handler = \\\n RotatingFileHandler(debug_log,\n maxBytes=100000,\n backupCount=10)\n\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n app.logger.addHandler(debug_file_handler)\n\n error_log = os.path.join(app.root_path,\n app.config['ERROR_LOG'])\n\n error_file_handler = \\\n RotatingFileHandler(error_log,\n maxBytes=100000,\n backupCount=10)\n\n error_file_handler.setLevel(logging.ERROR)\n error_file_handler.setFormatter(formatter)\n app.logger.addHandler(error_file_handler)\n","sub_path":"blog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210258406","text":"def delayy(n):\r\n i=0\r\n while i int:\n ver1=[int(v) for v in version1.split('.')]\n ver2=[int(v) for v in version2.split('.')]\n for i in range(max(len(version1),len(version2))):\n v1=ver1[i] if iv2:\n return 1\n elif v1