diff --git "a/2284.jsonl" "b/2284.jsonl" new file mode 100644--- /dev/null +++ "b/2284.jsonl" @@ -0,0 +1,628 @@ +{"seq_id":"385379754","text":"\"\"\"\n This program is a Flask web service which provides an HTTP interface to\n the passwd.db class.\n\n Under normal circumstances this program would be invoked with a WSGI\n wrapper for production use. Being this program is for demo purposes no\n WSGI wrapper program is provided.\n\n To run the demo execute:\n\n $: test/run.sh\n\n Call the service running on localhost:\n\n curl -s 'http://127.0.0.1:5151/users'\n\n curl -s 'http://127.0.0.1:5151/users/query?shell=/bin/false'\n\n curl -s 'http://127.0.0.1:5151/users/1001'\n\n curl -s 'http://127.0.0.1:5151/users/1002/groups'\n\n curl -s 'http://127.0.0.1:5151/groups'\n\n curl -s 'http://127.0.0.1:5151/groups/query?member=_analyticsd&member=_networkd'\n\n Author: Dylan Doxey \n Date: 07/20/2018\n\n\"\"\"\nimport os\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))\n\nfrom flask import Flask, request, jsonify\n\nimport passwd\n\napp = Flask(__name__)\ndb = passwd.db(os.environ.get('CONFIG', 'conf/config.ini'))\n\n\n@app.route('/')\ndef default():\n return 'It Works!'\n\n\n@app.route('/users')\ndef users():\n return jsonify(db.users())\n\n\n@app.route('/users/query')\ndef users_query():\n\n parameters = [\n 'name',\n 'uid',\n 'gid',\n 'comment',\n 'home',\n 'shell',\n ]\n query = {}\n\n for parameter in parameters:\n if parameter in request.args:\n query[parameter] = request.args.getlist(parameter)\n if len(query[parameter]) == 1:\n query[parameter] = query[parameter][0]\n\n return jsonify(db.users(query))\n\n\n@app.route('/users/')\ndef users_uid(uid):\n return jsonify(db.user({'uid': str(uid)}))\n\n\n@app.route('/users//groups')\ndef users_uid_groups(uid):\n user = db.user({'uid': str(uid)})\n if not user:\n return jsonify([])\n (gid, name) = (user['gid'], user['name'])\n groups = db.groups()\n chosen = []\n for group in groups:\n if gid == group['gid'] or name in group['members']:\n chosen.append(group)\n return jsonify(chosen)\n\n\n@app.route('/groups')\ndef groups():\n return jsonify(db.groups())\n\n\n@app.route('/groups/query')\ndef groups_query():\n\n parameters = [\n 'name',\n 'gid',\n 'member',\n 'members',\n ]\n query = {}\n\n for parameter in parameters:\n if parameter in request.args:\n query[parameter] = request.args.getlist(parameter)\n if len(query[parameter]) == 1:\n query[parameter] = query[parameter][0]\n\n if 'member' in query:\n if 'members' not in query:\n query['members'] = []\n query['members'].extend(query['member'])\n del query['member']\n\n return jsonify(db.groups(query))\n\n\n@app.route('/groups/')\ndef groups_gid(gid):\n return jsonify(db.group({'gid': str(gid)}))\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257522610","text":"from datetime import timedelta\nimport math\nimport random\nimport string\n\n# third-party\nimport redis\nimport twitter\n\n# django\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Case, When, BigIntegerField\nfrom django.template import loader\nfrom django.utils import timezone\nfrom django.views.generic import TemplateView\nfrom social_django.models import UserSocialAuth\n\n# application\nfrom .forms import TweetForm\nfrom .models import Tweet\n\n\nPOOL = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)\nLIMIT = 20\n\nTAB_NEW = 0\nTAB_POP = 1\nTAB_HOT = 2\n\ndef index(request):\n template = loader.get_template('tweets/index.html')\n\n if not 'id' in request.session:\n request.session['id'] = ''.join(\n [random.choice(string.punctuation + string.ascii_letters + string.digits) \\\n for i in range(16)])\n\n tab = int(request.GET.get('tab', TAB_NEW))\n page = int(request.GET.get('page', 1))\n\n context = {\n 'user': request.user,\n 'page': page,\n 'tab': tab,\n }\n\n tweet_list = []\n daily_key, hot_key = get_redis_key()\n if tab == TAB_NEW:\n tweet_list = Tweet.objects.all().order_by('-create_at')[LIMIT*(page-1):LIMIT*page]\n count = Tweet.objects.all().count()\n elif tab == TAB_POP:\n tweet_list = get_ranking(daily_key, offset=page - 1)\n count = Tweet.objects.all().count()\n elif tab == TAB_HOT:\n tweet_list = get_ranking(hot_key, offset=page - 1)\n count = Tweet.objects.all().count()\n\n if len(tweet_list) == 0:\n return HttpResponse(template.render(context, request))\n context['tweet_list'] = tweet_list\n context['is_last'] = math.ceil(count/LIMIT) == page\n\n # ランキング用のキャッシュ作成\n conn = redis.StrictRedis(connection_pool=POOL)\n if not conn.exists(hot_key):\n tweet_id_list = []\n for tweet in tweet_list:\n tweet_id_list.append(0)\n tweet_id_list.append(tweet.tweet_id)\n conn.zadd(hot_key, *tweet_id_list)\n if not conn.exists(daily_key):\n conn.zadd(daily_key, *tweet_id_list)\n set_expired_time()\n\n return HttpResponse(template.render(context, request))\n\ndef how_to_use(request):\n template = loader.get_template('tweets/how_to_use.html')\n context = {\n 'user': request.user,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef detail(request, tweet_id):\n template = loader.get_template('tweets/detail.html')\n tweet = Tweet.objects.get(tweet_id=tweet_id)\n if tweet.user_id == 0:\n user = None\n else:\n user = UserSocialAuth.objects.get(user_id=tweet.user_id)\n context = {\n 'tweet': Tweet.objects.get(tweet_id=tweet_id),\n 'user': user,\n }\n return HttpResponse(template.render(context, request))\n\ndef like(request, tweet_id):\n url = '/tweets/{0}'.format(tweet_id)\n if not 'id' in request.session:\n return HttpResponseRedirect(url)\n conn = redis.StrictRedis(connection_pool=POOL)\n if 0 < conn.sadd('like:{0}'.format(tweet_id), request.session['id']):\n tweet = Tweet.objects.get(tweet_id=tweet_id)\n tweet.like += 1\n if 0 < conn.srem('dislike:{0}'.format(tweet_id), request.session['id']):\n tweet.dislike -= 1\n else:\n daily_key, hot_key = get_redis_key()\n conn.zincrby(hot_key, tweet_id)\n conn.zincrby(daily_key, tweet_id)\n set_expired_time()\n tweet.save()\n return HttpResponseRedirect(url)\n\ndef dislike(request, tweet_id):\n url = '/tweets/{0}'.format(tweet_id)\n if not 'id' in request.session:\n return HttpResponseRedirect(url)\n conn = redis.StrictRedis(connection_pool=POOL)\n if 0 < conn.sadd('dislike:{0}'.format(tweet_id), request.session['id']):\n tweet = Tweet.objects.get(tweet_id=tweet_id)\n tweet.dislike += 1\n if 0 < conn.srem('like:{0}'.format(tweet_id), request.session['id']):\n tweet.like -= 1\n daily_key, hot_key = get_redis_key()\n conn.zincrby(hot_key, tweet_id, -1)\n conn.zincrby(daily_key, tweet_id, -1)\n set_expired_time()\n tweet.save()\n return HttpResponseRedirect(url)\n\nclass TweetRegisterView(LoginRequiredMixin, TemplateView):\n login_url = '/'\n\n def get(self, request, *args, **kwargs):\n context = {\n 'user': request.user,\n 'forms': TweetForm,\n }\n template = loader.get_template('tweets/register.html')\n return HttpResponse(template.render(context, request))\n\n def post(self, request, *args, **kwargs):\n\n form = TweetForm(request.POST)\n if not form.is_valid():\n return self.get(request, *args, **kwargs)\n\n tweet = form.save(commit=False)\n\n tweet.user_id = request.user.id\n\n # アイコン画像のURLを取得する\n api = twitter.Api(\n consumer_key=settings.TWITTER_CONSUMER_KEY,\n consumer_secret=settings.TWITTER_CONSUMER_SECRET,\n access_token_key=settings.TWITTER_ACCESS_TOKEN_KEY,\n access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET)\n tweet.profile_image_url = api.GetUser(screen_name=form.tweet_user_name).AsDict()['profile_image_url_https'].replace('_normal', '_400x400')\n tweet.save()\n\n return HttpResponseRedirect(self.login_url)\n\n# ランキング順でTweetオブジェクトを取得する\ndef get_redis_key():\n now = timezone.localtime(timezone.now())\n hot_key = 'hot_kusorep:{0}'.format(now.strftime('%Y%m%d%H'))\n daily_key = 'daily_ranking:{0}'.format(now.strftime('%Y%m%d'))\n return daily_key, hot_key\n\n\ndef get_ranking(key, limit=LIMIT, offset=0):\n conn = redis.StrictRedis(connection_pool=POOL)\n ranking_id_list = conn.zrange(key, limit*offset, limit*(offset+1)-1, desc=True, withscores=True)\n ranking_order = Case(*[When(tweet_id=tweet_id, then=int(score)) for tweet_id, score in ranking_id_list], output_field=BigIntegerField())\n ranking_list = Tweet.objects.filter(tweet_id__in=[tweet_id for tweet_id, _ in ranking_id_list])\\\n .annotate(ranking_order=ranking_order).order_by('-ranking_order', '-create_at')\n\n return ranking_list\n\ndef set_expired_time():\n now = timezone.localtime(timezone.now())\n\n conn = redis.StrictRedis(connection_pool=POOL)\n daily_key, hot_key = get_redis_key()\n\n base_time = now.replace(minute=0, second=0, microsecond=0)\n conn.expireat(hot_key, base_time+timedelta(hours=1))\n\n base_time = now.replace(hour=0)\n conn.expireat(daily_key, base_time+timedelta(days=1))\n\ndef get_user_dict(tweet_list):\n return {user.id: user for user in UserSocialAuth.objects.filter(user_id__in=set([tweet.user_id for tweet in tweet_list]))}","sub_path":"kusorep/tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"28993920","text":"class MiscMenu():\n\n from sys import exit as Exit\n\n def header(self):\n self.cls()\n print(self.strtable['00'])\n\n def menu1(self):\n self.header()\n print(self.strtable['60'])\n choice = self.getchoice(1, 6, '=> ')\n if (choice == 1):\n self.encryptmenu()\n if (choice == 2):\n self.decryptmenu()\n if (choice == 3):\n self.info()\n if (choice == 4):\n self.help()\n if (choice == 5):\n self.Exit()\n\n def info(self):\n self.header()\n print(self.strtable['31'])\n raw_input(self.strtable['62'])\n self.menu1()\n\n def help(self):\n self.header()\n print(self.strtable['41'])\n raw_input(self.strtable['62'])\n self.menu1()\n\n class stdRedirect():\n\n def maskget(self, prompt, sign):\n from sys import stdout\n from msvcrt import getch\n toreturn = ''\n a = 0\n stdout.write(prompt)\n while a != '\\n':\n a = getch()\n if ord(a) == 13:\n stdout.write('\\n')\n break\n if ord(a) == 8:\n if len(toreturn) != 0:\n stdout.write('\\b\\x00\\x00\\b')\n toreturn = toreturn[:-1]\n else:\n toreturn += a\n stdout.write(sign)\n return toreturn\n","sub_path":"Python/Python scripts/Encryption Projects/Encryption Project 3/menus/miscmenu.py","file_name":"miscmenu.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596519101","text":"from flask import Flask, session\n\nfrom data import db_session\nfrom data.users import User\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\n\ndef main():\n db_session.global_init(\"db/blogs.sqlite\")\n # user = User()\n # user.name = \"Пользователь 25\"\n # user.about = \"биография пользователя 5\"\n # user.email = \"email25@email.ru\"\n # user.set_password(\"dagdfagds\")\n\n # session = db_session.create_session()\n # session.add(user)\n # session.commit()\n app.run()\n\n for user in session.query(User).filter((User.id > 1) | (User.email.like(\"%4%\"))):\n print(user)\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"262991210","text":"__all__ = ('IntegrationDetail', )\n\nfrom ..core import ROLES\nfrom ..utils import timestamp_to_datetime, DISCORD_EPOCH_START\nfrom ..role import create_partial_role_from_id\n\nfrom .preinstanced import IntegrationExpireBehavior\n\nclass IntegrationDetail:\n \"\"\"\n Details about a non discord integration.\n \n Attributes\n ----------\n expire_behavior : ``IntegrationExpireBehavior``\n The behavior of expiring subscription.\n expire_grace_period : `int`\n The grace period in days for expiring subscribers. Can be `1`, `3`, `7`, `14` or `30`. If the integration is\n partial, or is not applicable for it, then is set as `-1`.\n role_id : `int`\n The role's identifier what the integration uses for subscribers.\n subscriber_count : `int`\n How many subscribers the integration has. Defaults to `0`.\n synced_at : `datetime`\n When the integration was last synced.\n syncing : `bool`\n Whether the integration syncing.\n \"\"\"\n __slots__ = ('expire_behavior', 'expire_grace_period', 'role_id', 'subscriber_count', 'synced_at', 'syncing', )\n \n def __init__(self, data):\n \"\"\"\n Fills up the integration detail from the respective integration's data.\n \n Parameters\n ----------\n data : `dict` of (`str`, `Any`) items\n Received integration data.\n \"\"\"\n self.syncing = data.get('syncing', False)\n \n role_id = data.get('role_id', None)\n if role_id is None:\n role_id = 0\n else:\n role_id = int(role_id)\n self.role_id = role_id\n \n self.expire_behavior = IntegrationExpireBehavior.get(data.get('expire_behavior', 0))\n \n self.expire_grace_period = data.get('expire_grace_period', -1)\n \n try:\n synced_at = data['synced_at']\n except KeyError:\n synced_at = DISCORD_EPOCH_START\n else:\n synced_at = timestamp_to_datetime(synced_at)\n self.synced_at = synced_at\n \n self.subscriber_count = data.get('subscriber_count', 0)\n \n @property\n def role(self):\n \"\"\"\n Returns the integration's role.\n \n Returns\n -------\n role : `None` or ``Role``\n \"\"\"\n role_id = self.role_id\n if role_id:\n return create_partial_role_from_id(role_id)\n \n \n @classmethod\n def from_role(cls, role):\n \"\"\"\n Creates a partial integration detail with the given role.\n \n Parameters\n ----------\n role : ``Role``\n The respective role.\n \n Returns\n -------\n self : ``IntegrationDetail``\n The created integration detail.\n \"\"\"\n self = object.__new__(cls)\n self.syncing = False\n self.role_id = role.id\n self.expire_behavior = IntegrationExpireBehavior.remove_role\n self.expire_grace_period = -1\n self.synced_at = DISCORD_EPOCH_START\n self.subscriber_count = 0\n return self\n \n def __repr__(self):\n \"\"\"Returns the integration detail's representation.\"\"\"\n repr_parts = [\n '<', self.__class__.__name__,\n ]\n \n role_id = self.role_id\n if role_id:\n try:\n role = ROLES[role_id]\n except KeyError:\n pass\n else:\n repr_parts.append(' role=')\n repr_parts.append(repr(role))\n \n repr_parts.append('>')\n return ''.join(repr_parts)\n","sub_path":"hata/discord/integration/integration_detail.py","file_name":"integration_detail.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"463686481","text":"from unittest import TestCase\n\nfrom utils.listnode import ListNode\nfrom week4.palindrome_linked_list import Solution\n\n\nclass TestSolution(TestCase):\n def test_is_palindrome(self):\n node1 = ListNode(1)\n node2 = ListNode(2)\n node3 = ListNode(1)\n\n node1.next = node2\n node2.next = node3\n\n s = Solution()\n s.isPalindrome(node1)","sub_path":"week4/test_palindrome_linked_list.py","file_name":"test_palindrome_linked_list.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349452921","text":"from __future__ import absolute_import\n\nfrom ..celery import celery, get_attribute_manager\n\nfrom eduid_userdb.testing import MongoTestCase\nfrom bson import ObjectId\n\nimport eduid_userdb\n\nfrom eduid_userdb.exceptions import UserDoesNotExist\n\n__author__ = 'leifj'\n\nclass AmTestUser(eduid_userdb.User):\n \"\"\"\n User class for the 'test' plugin below.\n \"\"\"\n def __init__(self, data):\n self.uid = data.pop('uid', None)\n\n eduid_userdb.User.__init__(self, data = data)\n\n def to_dict(self, old_userdb_format=False):\n res = eduid_userdb.User.to_dict(self, old_userdb_format=old_userdb_format)\n res['uid'] = self.uid\n return res\n\n\nclass AmTestUserDb(eduid_userdb.UserDB):\n \"\"\"\n UserDB for the 'test' plugin below.\n \"\"\"\n UserClass = AmTestUser\n\n\ndef plugin_attribute_fetcher(context, user_id):\n \"\"\"\n A small fake attribute manager plugin that reads a user and sets the 'eppn'\n attribute to one based on the users _id.\n\n :param context: User database\n :param user_id: Unique identifier\n :type context: AmTestUserDb\n :type user_id: ObjectId\n\n :return: update dict\n :rtype: dict\n \"\"\"\n assert isinstance(context, AmTestUserDb)\n db = context\n\n user = db.get_user_by_id(user_id)\n if user is None:\n raise UserDoesNotExist(\"No user matching _id={!r}\".format(user_id))\n\n # Transfer all attributes except `uid' from the test plugins database.\n # Transform eduPersonPrincipalName on the way to make it clear that the\n # update was done using this code.\n res = user.to_dict(old_userdb_format=True)\n res['eduPersonPrincipalName'] = \"{!s}@eduid.se\".format(user.uid)\n del res['uid']\n return res\n\n\nclass MessageTest(MongoTestCase):\n \"\"\"\n This testcase sets up an AttributeManager instance and sends a message to an internally defined plugin that\n transforms 'uid' to its urn:oid representation.\n \"\"\"\n def setUp(self):\n super(MessageTest, self).setUp(celery, get_attribute_manager)\n\n def testMessage(self):\n \"\"\"\n This simulates the 'test' application that keeps its own data in the 'user' collection in the 'test' DB\n and sends a message notifying the attribute manager instance (am) about a new entry in its dataset thereby\n calling the plugin (above) which is registered with the am in the test setup below.\n \"\"\"\n test_context = AmTestUserDb(db_uri = self.tmp_db.get_uri(''), db_name='eduid_am_test')\n\n # register fake AMP plugin named 'test'\n self.am.registry.attribute_fetcher['test'] = plugin_attribute_fetcher\n self.am.registry.context['test'] = test_context\n\n _id = ObjectId()\n userdoc = {'_id': _id,\n 'eduPersonPrincipalName': 'foo-bar',\n 'uid': 'vlindeman',\n 'passwords': [{'id': ObjectId('112345678901234567890123'),\n 'salt': '$NDNv1H1$9c81...545$32$32$',\n }],\n }\n test_user = AmTestUser(userdoc)\n # Save the user in the eduid_am_test database\n test_context.save(test_user)\n\n # It is important to not import eduid_am.tasks before the Celery config has been\n # set up (done in MongoTestCase.setUp()). Since Celery uses decorators, it will\n # have instantiated AttributeManagers without the right config if the import is\n # done prior to the Celery app configuration.\n from eduid_am.tasks import update_attributes\n update_attributes.delay(app_name='test', obj_id = _id)\n\n # verify the user has been propagated to the amdb\n am_user = self.amdb.get_user_by_id(_id)\n self.assertEqual(am_user.eppn, 'vlindeman@eduid.se')\n","sub_path":"eduid_am/tests/test_am.py","file_name":"test_am.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"186221616","text":"import sys\nread=lambda:sys.stdin.readline().rstrip()\nreadi=lambda:int(sys.stdin.readline())\nwriteln=lambda x:sys.stdout.write(str(x)+\"\\n\")\nwrite=lambda x:sys.stdout.write(x)\ncode = read(); lc = len(code)\ndp=[0]*(lc+1); dp[0]=1\nfor i in range(1, lc+1):\n x = int(code[i-1])\n if 1 <= x <= 9:\n dp[i] = (dp[i] + dp[i-1]) % 1000000\n if i == 1:\n continue\n if code[i-1] == '0':\n continue\n x = int(code[i-2])*10 + int(code[i-1])\n if 10 <= x <= 26:\n dp[i] = (dp[i] + dp[i-2]) % 1000000\nwriteln(dp[-1])\nprint(dp)","sub_path":"dp/cryptocode_2011.py","file_name":"cryptocode_2011.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"595780084","text":"import math\n\nfrom pyproj import Proj, transform\n\nclass CoordinateConverter(object):\n def __init__(self, zone=None):\n self.zone = None\n self.datumIn = 'WGS84'\n self.datumOut = 'WGS84'\n self.projectionLatLong = Proj(proj='latlong', datum=self.datumIn)\n self.setUtmProjection(zone)\n\n def convertLLToUtm(self, lat, lon):\n if self.projectionUtm is None:\n zone = self.determineUtmZone(lon)\n self.setUtmProjection(zone)\n\n x, y = transform(self.projectionLatLong, self.projectionUtm, lon, lat)\n return x, y, self.zone\n\n def convertUtmToLL(self, x, y, zone=None):\n if zone:\n self.setUtmProjection(zone)\n\n lon, lat = transform(self.projectionUtm, self.projectionLatLong, x, y)\n return lat, lon\n\n def setUtmProjection(self, zone=None):\n if zone:\n self.zone = zone\n if self.zone:\n self.projectionUtm = Proj(proj='utm', zone=self.zone, datum=self.datumOut)\n else:\n self.projectionUtm = None\n\n @staticmethod\n def determineUtmZone(longitude):\n zone = (math.floor((longitude + 180)/6) % 60) + 1\n return zone\n","sub_path":"coordinateConverter.py","file_name":"coordinateConverter.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"621194242","text":"from unittest.mock import MagicMock\nimport prefect\nfrom prefect import context\nfrom prefect.tasks.great_expectations import RunGreatExpectationsValidation\nfrom prefect.utilities.configuration import set_temporary_config\nimport pytest\nimport os\nimport shutil\nimport tempfile\n\n\nclass TestInitialization:\n def test_inits_with_no_args(self):\n t = RunGreatExpectationsValidation()\n assert t\n\n def test_kwargs_get_passed_to_task_init(self):\n t = RunGreatExpectationsValidation(\n checkpoint_name=\"checkpoint\",\n context=1234,\n assets_to_validate=[\"assets\"],\n batch_kwargs={\"kwargs\": \"here\"},\n expectation_suite_name=\"name\",\n context_root_dir=\"/path/to/somewhere\",\n runtime_environment={\n \"plugins_directory\": \"/path/to/plugins/somewhere/else\"\n },\n run_name=\"1234\",\n run_info_at_end=False,\n disable_markdown_artifact=True,\n evaluation_parameters=dict(prev_run_row_count=100),\n )\n assert t.checkpoint_name == \"checkpoint\"\n assert t.context == 1234\n assert t.assets_to_validate == [\"assets\"]\n assert t.batch_kwargs == {\"kwargs\": \"here\"}\n assert t.expectation_suite_name == \"name\"\n assert t.context_root_dir == \"/path/to/somewhere\"\n assert t.runtime_environment == {\n \"plugins_directory\": \"/path/to/plugins/somewhere/else\"\n }\n assert t.run_name == \"1234\"\n assert t.run_info_at_end == False\n assert t.disable_markdown_artifact == True\n assert t.evaluation_parameters == dict(prev_run_row_count=100)\n\n def test_raises_if_params_not_mutually_exclusive(self):\n task = RunGreatExpectationsValidation(context=\"test\")\n with pytest.raises(ValueError, match=\"Exactly\"):\n task.run()\n\n with pytest.raises(ValueError, match=\"Exactly\"):\n task.run(expectation_suite_name=\"name\")\n\n with pytest.raises(ValueError, match=\"Exactly\"):\n task.run(batch_kwargs={\"here\"})\n\n with pytest.raises(ValueError, match=\"Exactly\"):\n task.run(\n expectation_suite_name=\"name\",\n batch_kwargs={\"here\"},\n assets_to_validate=[\"val\"],\n )\n\n with pytest.raises(ValueError, match=\"Exactly\"):\n task.run(assets_to_validate=[\"val\"], checkpoint_name=\"name\")\n","sub_path":"tests/tasks/great_expectations/test_great_expectations_validation.py","file_name":"test_great_expectations_validation.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"6983040","text":"class Element():\n def __init__(self, name, symbol, number):\n self.name = name\n self.symbol = symbol\n self.number = number\n\ndic = {\"name\":\"Hydrogen\", \"symbol\":\"H\", \"number\":1}\n\nhydrogen = Element(dic['name'], dic['symbol'], dic['number'])\n\nprint(hydrogen.name)","sub_path":"6-5.py","file_name":"6-5.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"520421507","text":"\"\"\"\nGet Data from fitbit\n\"\"\"\n\nimport json\nimport time\nimport urllib\nimport urllib2\n\ncodes = json.loads(open('codes.json').read())\naccess_code = codes['access_token']\nuser_id = codes['user_id']\nclient_id = \"2288W8\"\n\ndata = {}\n\nheaders = {\n \"Authorization\": \"Bearer \"+access_code\n}\n\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/\"+user_id+\"/sleep/date/2017-04-21.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/-/heart/date/2017-04-20.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/\"+user_id+\"/activities/heart/date/today/1d.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/-/sleep/minutesAsleep/date/2017-04-11/7d.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/-/activities/steps/date/today/1m.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/-/activities.json\", urllib.urlencode(data), headers)\n\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/\"+user_id+\"/activities/heart/date/today/1m.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/\"+user_id+\"/activities/heart/date/today/1d/1sec/time/00:00/00:01.json\", urllib.urlencode(data), headers)\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/5MH8SH/profile.json\", urllib.urlencode(data), headers)\n\"\"\"for i in range(1, 10):\n req = urllib2.Request(\"https://api.fitbit.com/1/user/5MH8SH/heart/date/2017-05-0\"+str(i)+\".json\", urllib.urlencode(data), headers)\n try:\n response = urllib2.urlopen(req)\n print(response.read())\n except urllib2.HTTPError:\n pass\n time.sleep(1)\"\"\"\n\n#5MH8SH\n\n#req = urllib2.Request(\"https://api.fitbit.com/1/user/5MH8SH/heart/data/2017-04-20.json\", headers)\n#response = urllib2.urlopen(req)\n#print(response.read())\n\n##print(urllib2.urlopen(\"https://api.fitbit.com/1/user/\"+user_id+\"/activities/heart/date/today/1d.json\").read())\n\n\"\"\"\nSimluation stuff\n\"\"\"\nreq = urllib2.Request(\"http://localhost:49494/?\"+urllib.urlencode({\"PVC\": False, \"AFVF\": False, \"NOHR\": False}))\nresponse = urllib2.urlopen(req)\nprint(response.read())\n","sub_path":"fitbit.py","file_name":"fitbit.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"563574677","text":"from django.contrib import admin\nfrom home.models import Member, Activity, Message\n\n\nclass MemberAdmin(admin.ModelAdmin):\n\tlist_display = ('mem_full_name_en', 'mem_current_points', 'mem_weekly_added_points')\n\tlist_editable = ('mem_weekly_added_points',)\n\treadonly_fields = ('mem_last_points', 'mem_current_order_in_names', 'mem_last_order_in_names', 'mem_full_name_en', 'mem_full_name_ar',)\n\t\nclass ActivityAdmin(admin.ModelAdmin):\n\tlist_display = ('activity_week_number', 'activity_image', 'day_one_ar', 'day_two_ar', 'day_three_ar', 'day_four_ar', 'day_five_ar')\n\tlist_editable = ('activity_image', 'day_one_ar', 'day_two_ar', 'day_three_ar', 'day_four_ar', 'day_five_ar',)\n\n\nadmin.site.register(Member, MemberAdmin)\nadmin.site.register(Activity, ActivityAdmin)\nadmin.site.register(Message)\n","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"312731687","text":"import pygame as pg\nimport sys\nfrom settings import *\nfrom ship import *\n\nclass Game:\n def __init__(self):\n pg.init()\n pg.font.init()\n pg.mixer.init()\n\n self.screen = pg.display.set_mode((width, height))\n pg.display.set_caption(Title)\n self.clock = pg.time.Clock()\n pg.key.set_repeat(500, 100)\n self.load_data()\n self.running= True\n\n def load_data(self):\n pass\n\n def new(self):\n # initialize all variables and do all the setup for a new game\n self.all_sprites = pg.sprite.Group()\n self.wall = pg.sprite.Group()\n self.player = Player(self, 10, 10)\n self.boat = Boat(self, 11, 5)\n for x in range(5, 27):\n Wall(self, x, 1) and Wall(self, x, 22)\n for y in range(1, 22):\n Wall(self, 5, y) and Wall(self, 26, y)\n self.paused = False\n self.run()\n\n def run(self):\n # game loop - set self.playing = False to end the game\n self.playing = True\n while self.playing:\n self.dt = self.clock.tick(fps) / 1000\n self.events()\n if not self.paused:\n self.update()\n self.draw()\n\n def button(self, naam1, naam2, x, y, w, h):\n # image, image highlight, x pos, y pos, width, height\n mouse = pg.mouse.get_pos()\n\n # als x pos + width groter\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n self.screen.blit(naam1, (x, y))\n self.screen.blit(naam2, (x, y))\n else:\n self.screen.blit(naam1, (x, y))\n\n def quit(self):\n pg.quit()\n sys.exit()\n\n def update(self):\n # update portion of the game loop\n self.all_sprites.update()\n\n def instructions(self):\n self.screen.blit(instbg_image, (0, 0))\n\n\n pg.display.update()\n\n def draw_grid(self):\n for x in range(6 * tilesize, width - 5 * tilesize, tilesize):\n pg.draw.line(self.screen, light_grey, (x, 2 * tilesize), (x, height - 2 * tilesize))\n for y in range(2 * tilesize, height - 1 * tilesize, tilesize):\n pg.draw.line(self.screen, light_grey, (6 * tilesize, y), (width - 6 * tilesize, y))\n\n \"\"\"self.screen.blit(board_image, (0, 0))\n self.screen.blit(map_image, (width / 5, height / 10))\n\n self.button(menu1_image, menu2_image, 100, 100, 50, 50)\n self.screen.blit(kaartn_image, (20, 20))\n self.screen.blit(kaarts_image, (700, 20))\n\n mouse = pg.mouse.get_pos()\n if 700 + 80 > mouse[0] > 700 and 20 + 160 > mouse[1] > 20:\n self.screen.blit(kaart1, (600, 20))\n if 20 + 80 > mouse[0] > 20 and 20 + 160 > mouse[1] > 20:\n self.screen.blit(kaart2, (70, 20))\n\n for event in pg.event.get():\n if event.type == pg.MOUSEBUTTONDOWN:\n if 100 + 50 > mouse[0] > 100 and 100 + 50 > mouse[1] > 100:\n g.main_menu()\"\"\"\n\n def draw(self):\n self.screen.fill(dark_grey)\n self.draw_grid()\n self.all_sprites.draw(self.screen)\n if self.paused:\n self.screen.blit(pause_image, (0, 0))\n self.screen.blit(pauzet_image, (width / 5, width / 8))\n self.button(help1_image, help2_image, 600, 450, 150, 50)\n mouse = pg.mouse.get_pos()\n if 600 + 130 > mouse[0] > 600 and 450 + 50 > mouse[1] > 50:\n self.screen.blit(instbg_image, (0, 0))\n self.screen.blit(inst1_image, (100, 0))\n\n pg.display.flip()\n\n \"\"\"def options(self):\n self.screen.fill(aqua)\n TextSurf, TextRect = text_objects(\"options\", pg.font.Font('freesansbold.ttf', 60))\n TextRect.center = ((width / 2), (height/ 10))\n self.screen.blit(TextSurf, TextRect)\n\n self.button(\"resolution\", width / 24, height / 6, width / 6, height / 6, red,red, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"480p\", width / 8 * 2, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"720p\", width / 8 * 4, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"1080p\", width / 8 * 6, height / 6, width / 6, height / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))\n\n self.button(\"sound\", width / 24, height\n / 6 * 2.5, width / 6, height\n / 6, red,red, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"off\", width / 8 * 2, height\n / 6 * 2.5, width / 6, height\n / 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"50%\", width / 8 * 4, height\n / 6 * 2.5, width / 6, height\n / 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"100%\", width / 8 * 6, height\n / 6 * 2.5, width / 6, height\n / 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))\n\n self.button(\"window/full\", width / 8, height\n / 6 * 4, width / 4, height\n / 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"main menu\", width / 8 * 3, height\n / 6 * 4, width / 4, height\n / 6,silver, dark_silver, pg.font.Font('freesansbold.ttf', 20))\n self.button(\"exit\", width / 8 * 5, height\n / 6 * 4, width / 4, height\n / 6, silver,dark_silver, pg.font.Font('freesansbold.ttf', 20))\n\n mouse = pg.mouse.get_pos()\n pg.display.update()\n\n for event in pg.event.get():\n if event.type == pg.MOUSEBUTTONDOWN:\n if (width / 8 * 2) + width / 6 > mouse[0] > (width / 8 * 2) and height\\\n / 6 + height\\\n / 6 > mouse[1] > height\\\n / 6:\n pg.transform.scale(self.screen, (640, 480))\n pg.display.set_mode((640, 480))\n height = 480\n width = 640\n if (width / 8 * 4) + width / 6 > mouse[0] > (width / 8 * 4) and height / 6 + height / 6 > mouse[1] > height / 6:\n pg.display.set_mode((1280, 720))\n pg.transform.scale(self.screen, (1280, 720))\n width = 1280\n height = 720\n if (width / 8 * 6) + width / 6 > mouse[0] > (width / 8 * 6) and height / 6 + height\\\n / 6 > mouse[1] > height\\\n / 6:\n pg.display.set_mode((1920, 1080))\n pg.transform.scale(self.screen, (1920, 1080))\n width = 1920\n height\\\n = 1080\n if (width / 8 * 2) + width / 6 > mouse[0] > (width / 8 * 2) and height\\\n / 6 * 2.5 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 2.5:\n pass # Sounds options worden later toegevoegd\n if (width / 8 * 4) + width / 6 > mouse[0] > (width / 8 * 4) and height\\\n / 6 * 2.5 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 2.5:\n pass # Sounds options worden later toegevoegd\n if (width / 8 * 6) + width / 6 > mouse[0] > (width / 8 * 6) and height\\\n / 6 * 2.5 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 2.5:\n pass # Sounds options worden later toegevoegd\n if (width / 8) + width / 4 > mouse[0] > (width / 8) and height\\\n / 6 * 4 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 4:\n pg.display.set_mode(FULLSCREEN)\n if (width / 8 * 3) + width / 4 > mouse[0] > (width / 8 * 3) and height\\\n / 6 * 4 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 4:\n self.main_menu()\n if (width / 8 * 5) + width / 4 > mouse[0] > (width / 8 * 5) and height\\\n / 6 * 4 + height\\\n / 6 > mouse[1] > height\\\n / 6 * 4:\n pg.quit()\"\"\"\n\n def events(self):\n # catch all events here\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n self.quit()\n if event.key == pg.K_p:\n self.paused = not self.paused\n if event.key == pg.K_LEFT:\n self.player.move(dx=-1)\n if event.key == pg.K_RIGHT:\n self.player.move(dx=1)\n if event.key == pg.K_UP:\n self.player.move(dy=-1)\n if event.key == pg.K_DOWN:\n self.player.move(dy=1)\n\n def main_menu(self):\n in_main_menu = True\n while in_main_menu:\n self.screen.blit(bg_image, (0, 0))\n self.screen.blit(title_image, (width / 5, width / 8))\n\n self.button(start_image1, start_image2, 600, 400, 150, 50)\n self.button(score1_image, score2_image, 600, 500, 150, 50)\n self.button(help1_image, help2_image, 600, 450, 150, 50)\n\n mouse = pg.mouse.get_pos()\n\n pg.display.update()\n\n for event in pg.event.get():\n if event.type == pg.MOUSEBUTTONDOWN:\n if 600 + 150 > mouse[0] > 600 and 400 + 50 > mouse[1] > 150:\n g.new()\n if 600 + 150 > mouse[0] > 600 and 500 + 50 > mouse[1] > 150:\n pass # options\n if 600 + 150 > mouse[0] > 600 and 450 + 50 > mouse[1] > 150:\n g.instructions()\n if event.type == pg.QUIT:\n g.quit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n g.quit()\n\n def show_go_screen(self):\n pass\n\n def Score(self):\n pass\n\n# create the game object\ng = Game()\ng.main_menu()\nwhile g.running:\n g.new()\n g.run()\n g.main_menu\n\npg.quit()\n\n","sub_path":"v2/we made it.py","file_name":"we made it.py","file_ext":"py","file_size_in_byte":11015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"245509747","text":"import random\nimport numpy as np\nfrom show import *\n\n# from sklearn.model_selection import KFold\n\nnp.set_printoptions(precision=3) # 控制元素小数点后位数为3\n\n\nclass Train:\n\n def __init__(self, matrix, k=15, n=150):\n self.matrix = matrix\n self.user_type = matrix[:, 1]\n self.user_attr = matrix[:, 2:9]\n self.user_act = matrix[:, 9:]\n self.user_num = len(self.matrix) # 用户数量\n self.attr_len = 7 # 属性向量长度\n self.attr_weight = [0.055, 0.161, 0.133, 0.055, 0.161, 0.303, 0.133] # 属性权重\n # 性别,年龄,用户类型,账户类型,所属分公司,户主关系,地址\n self.act_len = 7 # 行为向量长度\n self.k = k # 最近邻数量n\n self.ks = np.arange(5, 105, 5)\n self.n = n # top-N推荐数目\n self.ns = np.arange(100, 500, 50) # top-N推荐数目\n\n self.sim_attr = np.zeros((self.user_num, self.user_num))\n self.sim_act = np.zeros((self.user_num, self.user_num))\n self.sim_user = np.zeros((self.user_num, self.user_num))\n\n self.score = np.zeros((self.user_num, 3), dtype=np.int)\n self.predict = np.zeros((self.user_num, 3))\n self.recommend = np.zeros((self.user_num, 3))\n self.recalls = np.zeros(3)\n self.precises = np.zeros(3)\n self.recall = 0\n self.precise = 0\n\n def attr_sim(self):\n print('***计算属性similarity***')\n for i in range(self.user_num):\n for j in range(i):\n self.sim_attr[i][j] = self.sim_attr[j][i]\n for j in range(i + 1, self.user_num):\n self.sim_attr[i][j] = self._attr_sim(self.user_attr[i], self.user_attr[j])\n # print(\"sim_attr[%s][%s]:\" % (i, j), self.sim_attr[i][j])\n # break\n # break\n np.savetxt('./data/users_attr_sim.csv', self.sim_attr, fmt='%0.3f')\n\n def _attr_sim(self, a, b):\n s = a == b\n # print(a)\n # print(b)\n # print(\"s:\", s)\n sim = s * self.attr_weight\n # print(\"weight:\", self.attr_weight)\n # print(\"sim:\", sim)\n return sim.sum()\n\n def act_sim(self):\n print('***计算行为similarity***')\n for i in range(self.user_num):\n for j in range(i):\n self.sim_act[i][j] = self.sim_act[j][i]\n for j in range(i + 1, self.user_num):\n self.sim_act[i][j] = self._act_sim(self.user_act[i], self.user_act[j])\n # print(\"sim_act[i][j]:\", self.sim_act[i][j])\n # break\n # break\n np.savetxt('./data/users_act_sim.csv', self.sim_act, fmt='%0.3f')\n\n def _act_sim(self, a, b):\n # print(a)\n # print(b)\n s = np.zeros(self.act_len)\n for k in range(self.act_len):\n s[k] = 2 * (1 - 1 / (1 + np.exp(-np.fabs(a[k] - b[k]))))\n # print(\"s:\", s)\n # print(\"sim:\", np.mean(s))\n return np.mean(s)\n\n def user_sim(self):\n print('***计算总的similarity***')\n for i in range(self.user_num):\n for j in range(i):\n self.sim_user[i][j] = self.sim_user[j][i]\n for j in range(i + 1, self.user_num):\n self.sim_user[i][j] = self._sim_user(self.sim_attr[i][j], self.sim_act[i][j], self.user_act[i],\n self.user_act[j])\n # print(\"sim_user[i][j]:\", self.sim_user[i][j])\n # break\n # break\n # np.savetxt('./data/users_sim.csv', self.sim_user, fmt='%0.3f')\n\n def _sim_user(self, s1, s2, a, b):\n c = 2 * (1 - 1 / (1 + np.exp(-(a.sum() + b.sum()) / 7)))\n # print(\"平滑系数:\", c)\n return s1 * c + s2 * (1 - c)\n\n def user_score(self):\n # print(self.user_type[:15])\n for i in range(self.user_num):\n self.score[i][self.user_type[i]] = 1\n # print(\"score:\", self.score[:15])\n # np.savetxt('./data/users_score.csv', self.score, fmt='%d')\n\n def user_predict(self):\n print('***计算预测分数***')\n for i in range(self.user_num):\n nb_top = np.argsort(self.sim_user[i])[-self.k:]\n # print(\"nb_top:\", nb_top)\n sim_top = self.sim_user[i][nb_top]\n score_top = self.score[nb_top]\n # print(\"sim_top:\", sim_top)\n # print(\"score_top:\", score_top)\n a = np.dot(sim_top, score_top)\n b = sim_top.sum()\n if b != 0:\n self.predict[i] = a / b\n # print(\"predict[i]:\", self.predict[i])\n # break\n # np.savetxt('./data/users_predict.csv', self.predict, fmt='%0.2f')\n\n def top_n(self):\n print('***计算topN用户列表,召回率,准确率***')\n for i in range(3):\n # print(\"predict[%s]:\" % i, self.predict[:, i])\n top = np.argsort(self.predict[:, i])[-self.n:]\n # top = np.nonzero(self.predict[:, i])\n # print(\"predict[%s]_top:\" % i, self.predict[:, i][top])\n for w in top:\n if self.predict[:, i][w] > 0:\n self.recommend[:, i][w] = 1\n # print(\"recommend[%s]_top:\" % i, self.recommend[:, i][top])\n a = (self.score[:, i] * self.recommend[:, i]).sum()\n b = self.score[:, i].sum()\n c = self.recommend[:, i].sum()\n # print(a, b, c)\n self.recalls[i] = a / b # 召回率\n self.precises[i] = a / c # 准确率\n print(\"召回率:\", self.recalls)\n print(\"准确率:\", self.precises)\n self.recall = np.mean(self.recalls)\n self.precise = np.mean(self.precises)\n print(\"召回率:\", self.recall)\n print(\"准确率:\", self.precise)\n\n def run(self):\n # self.attr_sim()\n # self.act_sim()\n # self.user_sim()\n self.user_score()\n self.sim_user = np.loadtxt('./data/users_sim.csv', dtype=np.float)\n self.user_predict()\n print(self.predict)\n print(len(self.predict[:, 2][np.nonzero(self.predict[:, 2])]))\n # self.top_n()\n\n def adjust_k(self):\n self.user_score()\n # self.sim_act = np.loadtxt('./data/users_act_sim.csv', dtype=np.float)\n # self.sim_attr = np.loadtxt('./data/users_attr_sim.csv', dtype=np.float)\n # self.user_sim()\n # self.sim_user = np.loadtxt('./data/users_sim.csv', dtype=np.float)\n # print(self.sim_user)\n x = self.ks\n y1 = np.zeros(len(self.ks))\n y2 = np.zeros(len(self.ks))\n for i in range(len(self.ks)):\n self.k = self.ks[i]\n print(\"K:\", self.k)\n self.predict = np.zeros((self.user_num, 3))\n self.recommend = np.zeros((self.user_num, 3))\n self.recalls = np.zeros(3)\n self.precises = np.zeros(3)\n self.recall = 0\n self.precise = 0\n\n self.user_predict()\n self.top_n()\n\n y1[i] = self.recall\n y2[i] = self.precise\n # break\n return x, y1, y2\n\n def adjust_n(self):\n self.user_score()\n # self.sim_user = np.loadtxt('./data/users_sim.csv', dtype=np.float)\n self.predict = np.zeros((self.user_num, 3))\n self.user_predict()\n x = self.ns\n y1 = np.zeros(len(self.ns))\n y2 = np.zeros(len(self.ns))\n for i in range(len(self.ns)):\n self.n = self.ns[i]\n print(\"N:\", self.n)\n self.recommend = np.zeros((self.user_num, 3))\n self.recalls = np.zeros(3)\n self.precises = np.zeros(3)\n self.recall = 0\n self.precise = 0\n\n self.top_n()\n\n y1[i] = self.recall\n y2[i] = self.precise\n return x, y1, y2\n\n def clod_start(self):\n a = np.zeros((2, len(self.ks)))\n b = np.zeros((2, len(self.ks)))\n self.sim_user = np.loadtxt('./data/users_attr_sim.csv', dtype=np.float)\n x, y1, y2 = self.adjust_k()\n a[0] = y1\n b[0] = y2\n # x = self.ks\n # self.user_score()\n r = p = 0\n for n in range(1000):\n self.predict = np.zeros((self.user_num, 3))\n self.recommend = np.zeros((self.user_num, 3))\n self.recalls = np.zeros(3)\n self.precises = np.zeros(3)\n self.recall = 0\n self.precise = 0\n for i in range(3):\n temp = int(np.ceil((self.score[:, i].sum() / self.user_num) * self.n))\n rand = random.sample(range(0, self.user_num), temp)\n # print(rand)\n self.predict[:, i][rand] = 1\n # break\n self.top_n()\n r += self.recall\n p += self.precise\n a[1] = r / 1000\n b[1] = p / 1000\n print(a[1], b[1])\n return x, a, b\n\n def compare(self):\n files = [\"users_sim\", \"users_attr_sim\", \"users_act_sim\"]\n a = np.zeros((3, len(self.ks)))\n b = np.zeros((3, len(self.ks)))\n for i in range(len(files)):\n self.sim_user = np.loadtxt('./data/%s.csv' % files[i], dtype=np.float)\n # print(self.sim_user)\n x, y1, y2 = self.adjust_k()\n a[i] = y1\n b[i] = y2\n print(a)\n print(b)\n return x, a, b\n\n\nif __name__ == '__main__':\n matrix = np.loadtxt('./data/users_filter_vector.csv', delimiter=',', skiprows=1, dtype=np.int)\n train = Train(matrix=matrix)\n # train.run()\n train.sim_user = np.loadtxt('./data/users_sim.csv', dtype=np.float)\n train.ks = np.arange(5, 100, 1)\n train.ns = np.arange(100, 2000, 100)\n\n x, y1, y2 = train.adjust_k()\n fig_show(x, y1, y2, \"近邻数目K\")\n # x, y1, y2 = train.adjust_n()\n # fig_show(x, y1, y2, \"TopN数目N\")\n\n x, y1, y2 = train.compare()\n fig_show_multi(x, y1, y2, \"近邻数目K\")\n\n x, y1, y2 = train.clod_start()\n fig_show_cold(x, y1, y2, \"近邻数目K\")\n","sub_path":"algorithm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16603006","text":"import os\nfrom bs4 import BeautifulSoup\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom collections import defaultdict\nimport pymongo\nimport timeout_decorator\nimport math\nimport string\nimport json\nimport urllib.parse\nimport webbrowser\n\n\n##Global constants\nstopwords_set = set(stopwords.words(\"english\"))\n\n\ndef invalid_folder(folder):\n return '.json' in folder or '.tsv' in folder or '.DS_Store' in folder\n\n\ndef is_english(word):\n try:\n word.encode(encoding = 'utf-8').decode('ascii')\n return True\n except UnicodeDecodeError:\n return False\n\n\ndef valid_token(word):\n return word not in stopwords_set and word[0] not in string.punctuation \\\n and is_english(word) and not word[0].isdigit() and len(word) > 2\n\n\n@timeout_decorator.timeout(5)\ndef update_dictionary(doc_number, dir, folder_num, dictionary):\n html = open(dir, 'r').read().lower()\n soup = BeautifulSoup(html, 'lxml').text\n tokens = word_tokenize(soup)\n\n for token in tokens:\n token = token.strip().lower()\n docID = str(folder_num + r'/' + doc_number)\n if valid_token(token):\n dictionary[token][docID] += 1\n\n\ndef create_dictionary(dictionary):\n dir = str(os.getcwd() + r'/WEBPAGES_RAW')\n folders = os.listdir(dir)\n docCount = 0\n folderCount = 0 #for debugging purposes\n\n for folder in folders:\n if invalid_folder(folder):\n continue\n print(\"folder: \" + str(folderCount) + \"/74\")\n folderCount += 1\n count = 0 #debugging purposes\n newDir = str(dir + r'/' + str(folder))\n files = os.listdir(newDir)\n\n for doc in files:\n #count += 1\n #if count == 3:\n # break\n docDir = str(newDir + r'/' + str(doc))\n try:\n update_dictionary(doc, docDir, folder, dictionary)\n docCount += 1\n except timeout_decorator.timeout_decorator.TimeoutError:\n continue\n return docCount\n\n\ndef print_dicionary(d):\n for token, values in d.items():\n print(token)\n for doc, count in values.items():\n s = '\\t' + str(doc) + ': ' + str(count)\n print(s)\n\n\ndef tfIdf_list(dictionary, docCount):\n collection_list = []\n for token, docs in dictionary.items():\n idf = math.log(docCount / len(token))\n result = []\n for docID, count in docs.items():\n tfidf = idf * count\n result.append({\"docID\": docID, \"count\": count, \"tf-idf\": tfidf})\n result = sorted(result, key = lambda x: -x['tf-idf']) #sorts the list by highest scores [champion list]\n collection_list.append({\"token\": token, \"data\": result})\n #[{'token': 'integrated', 'data': [{'docID': '61/135', 'count': 3, 'tfidf': 8.5}]}]\n return collection_list\n\n\n\ndef create_database(dataList, collection):\n collection.insert_many(dataList)\n\n\n\ndef find_docs(query, docs, collection):\n \"\"\"Find the docs relevant to the query and appends these docID's to the docs list\"\"\"\n try:\n cursorObj = collection.find({\"token\": query})[0]\n documents = cursorObj['data']\n for doc in documents:\n docs.append(doc['docID'])\n except IndexError:\n print(f\"No results found for {query}\")\n\n\n\ndef absolute(url):\n return urllib.parse.urljoin('http://', url)\n\n\n\ndef print_urls(docs, JSON, max):\n if len(docs) > max:\n for i in range(max):\n print(absolute(JSON[docs[i]]))\n else:\n for doc in docs:\n print(absolute(JSON[doc]))\n print()\n\n\n\ndef filter_result(docs):\n result = []\n for doc in docs:\n if docs.count(doc) > 1:\n result.append(doc)\n return result\n\n\n\ndef filter_queries(queries):\n result = []\n for i in queries:\n if valid_token(i):\n result.append(i)\n return result\n\n\n\ndef open_pages(docs, JSON, max):\n if max > len(docs):\n for doc in docs:\n url = absolute(JSON[doc])\n webbrowser.get('chrome').open_new_tab(url)\n else:\n for i in range(max):\n url = absolute(JSON[docs[i]])\n webbrowser.get('chrome').open_new_tab(url)\n\n\n\nif __name__ == \"__main__\":\n client = pymongo.MongoClient()\n db = client.searchEngine\n collection = db.index\n\n create_index = input(\"Would you like to create the index? [Enter yes or no]\\n\")\n if (create_index.lower() == 'yes'):\n frequencyDictionary = defaultdict(lambda: defaultdict(int))\n # what it looks like --> {'token': {'docID1: 5'}, {'doc2: 10'}}\n docCount = create_dictionary(frequencyDictionary) #total # of docs (N)\n tfIdf_list = tfIdf_list(frequencyDictionary, docCount)\n create_database(tfIdf_list, collection)\n\n\n directory = os.getcwd() + r'/WEBPAGES_RAW/bookkeeping.json'\n JSON = json.load(open(directory))\n\n while True:\n query = input(\"Enter your search query [or type quit() to exit]\\n\")\n\n if query == \"quit()\":\n break\n\n queries = query.strip().lower().split()\n queries = filter_queries(queries)\n docIDS = []\n\n for query in queries:\n find_docs(query, docIDS, collection)\n if len(queries) > 1:\n docIDS = filter_result(docIDS)\n print(\"\\nNumber of links: \" + str(len(docIDS)))\n print(\"First 20 links:\\n\")\n\n\n print_urls(docIDS, JSON, 20)\n\n\n if len(docIDS) > 1:\n open_doc = input(\"Enter how many pages you want to open or enter to continue\\n\")\n open_doc = open_doc.strip().lower()\n try:\n num_pages = int(open_doc)\n open_pages(docIDS, JSON, num_pages)\n except ValueError:\n pass\n\n\n\n client.close()\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"348615052","text":"import cv2\nimport numpy as np\n#import pytesseract\n\npic = cv2.imread('sample/Jeep.jpg')\nimg = cv2.imread('sample/Jeep.jpg')\n\nedges = cv2.Canny(img,200,200)\n\ncontours, heirarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nfor cnt in contours:\n area = int(cv2.contourArea(cnt))\n if(area > 1999 and area < 50000):\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(pic,(x,y),(x+w,y+h),(0,255,0),2)\n crop = img[y:y+h,x:x+w]\n\ncrop_grey = cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)\n\ncv2.imshow('edges',edges)\n#cv2.imwrite('test_data/6.jpg',crop_grey) Comment this out whe you want to write new data to file\ncv2.imshow('img',img)\ncv2.imshow('pic',pic)\ncv2.imshow('crop_grey',crop_grey)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"number_plate_2.py","file_name":"number_plate_2.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"601196413","text":"from kafka import *\nimport io\nimport avro.schema\nimport avro.datafile\nimport avro.io\nimport time\nimport sys\n\nkafka_client = KafkaClient('52.2.239.144:9092')\nproducer = KeyedProducer(kafka_client)\nschema_path = \"RuleMessage.avsc\"\nschema = avro.schema.parse(open(schema_path).read())\n\n\ndef message_serializer(station, model, iodata, value, connection):\n raw_bytes = None\n try:\n writer = avro.io.DatumWriter(schema)\n bytes_writer = io.BytesIO()\n encoder = avro.io.BinaryEncoder(bytes_writer)\n writer.write({\n \"station\": station,\n \"model\": model,\n \"io\": iodata,\n \"timestamp\": int(round(time.time() * 1000)),\n \"value\": value,\n \"connection\": connection\n },\n encoder)\n raw_bytes = bytes_writer.getvalue()\n except:\n print(\"Error serializer data\", sys.exc_info()[0])\n return raw_bytes\n\n\ndef send_message_producer(topic, raw_bytes, station):\n try:\n producer.send_messages(topic, station, raw_bytes)\n except:\n print(\"Error send message kafka\", sys.exc_info()[0])\n\n\ntopic = \"events\"\nmessageToSend = message_serializer(\"6\", \"d\", \"1d\", 1.0, True)\nraw_bytes = messageToSend\nif raw_bytes is not None:\n send_message_producer(topic, raw_bytes, \"idestacion\")\n","sub_path":"producerKafka.py","file_name":"producerKafka.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"348535094","text":"from ROOT import *\nfrom PlotTools import *\nfrom Utils import Bunch\nfrom Style import *\n\nSetLHCbStyle()\n\n\nf = TFile(\"/user2/sfarry/lhcb/DaVinciDev_v42r1/Top/options/tbvs_mc2016.root\")\ng = TFile(\"/user2/sfarry/lhcb/DaVinciDev_v42r1/Top/options/tbvs_mc2012.root\")\n\nplots = [\n Bunch(name='ghostprob', var = 'max(piplus_TRACK_GhostProb, piminus_TRACK_GhostProb)', bins = 50, lo = 0, hi = 0.6, xlabel = 'max ghostprob', ylabel = '[A.U.]'),\n Bunch(name='chi2ndof', var = 'KS0_ENDVERTEX_CHI2/KS0_ENDVERTEX_NDOF', bins = 50, lo = 0, hi = 10, xlabel = 'chi^{2}_{vtx.}/nDoF', ylabel = '[A.U.]'),\n Bunch(name='ipchi2', var='min(piplus_IPCHI2_TOPPV, piminus_IPCHI2_TOPPV)', bins = 50, lo = 0, hi = 10, xlabel = 'chi^{2}_{ip}', ylabel = '[A.U.]'),\n Bunch(name='eta', var='atanh( KS0_PZ / KS0_P)', bins = 50, lo = 2.0, hi = 4.5, xlabel = 'eta', ylabel = '[A.U.]'),\n Bunch(name='pt', var='KS0_PT', bins = 50, lo = 0, hi = 2000, xlabel = 'pt', ylabel = '[A.U.]'),\n Bunch(name='minpt', var='min(piplus_PT, piminus_PT)', bins = 50, lo = 0, hi = 2000, xlabel = 'min(pt)', ylabel = '[A.U.]')\n\n]\n\nevtplots = [\n Bunch(name='ntbvs', var = 'ntbvs', bins = 25, lo = 0, hi = 500, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_full', var = 'ntbvs_full', bins = 26, lo = -1.0, hi = 51.0, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_ip', var = 'ntbvs_ip', bins = 25, lo = 0, hi = 200, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_looseip', var = 'ntbvs_looseip', bins = 25, lo = 0, hi = 500, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_noip', var = 'ntbvs_noip', bins = 25, lo = 0, hi = 500, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_gp', var = 'ntbvs_gp', bins = 25, lo = 0, hi = 500, xlabel = 'ntbvs', ylabel = '[A.U.]'),\n Bunch(name='ntbvs_trchi2', var = 'ntbvs_trchi2', bins = 25, lo = 0, hi = 500, xlabel = 'ntbvs', ylabel = '[A.U.]')\n\n]\n\nghostprob = TCut(\"max(piplus_TRACK_GhostProb, piminus_TRACK_GhostProb) < 0.4\")\n\nfrom Jawa import Template\n\ntbvs_mc2012 = Template(\"tbvs_mc2012\")\ntbvs_mc2012.SetSelCut(ghostprob)\ntbvs_mc2012.AddTree(g.Get(\"tbvs/DecayTree\"))\nfor b in plots:\n tbvs_mc2012.AddVar(b.name, b.var, b.bins, b.lo, b.hi)\ntbvs_mc2012.Run()\n\ntbvs_mc2016 = Template(\"tbvs_mc2016\")\ntbvs_mc2016.SetSelCut(ghostprob)\ntbvs_mc2016.AddTree(f.Get(\"tbvs/DecayTree\"))\nfor b in plots:\n tbvs_mc2016.AddVar(b.name, b.var, b.bins, b.lo, b.hi)\ntbvs_mc2016.Run()\n\ntbvs_mc2012_evt = Template(\"tbvs_mc2012_evt\")\ntbvs_mc2012_evt.AddTree(g.Get(\"tbvs_evt/EventTuple\"))\nfor b in evtplots:\n tbvs_mc2012_evt.AddVar(b.name, b.var, b.bins, b.lo, b.hi)\ntbvs_mc2012_evt.Run()\n\ntbvs_mc2016_evt = Template(\"tbvs_mc2016_evt\")\ntbvs_mc2016_evt.AddTree(f.Get(\"tbvs_evt/EventTuple\"))\nfor b in evtplots:\n tbvs_mc2016_evt.AddVar(b.name, b.var, b.bins, b.lo, b.hi)\ntbvs_mc2016_evt.Run()\n\n\n\nfor p in plots:\n d = Plot([tbvs_mc2012.GetVar(p.name).GetHist(), tbvs_mc2016.GetVar(p.name).GetHist()])\n d.AutoYlims()\n d.setProp('xlabel', p.xlabel)\n d.setProp('ylabel', p.ylabel)\n d.setProp('labels', ['mc2012', 'mc2016'])\n d.setProp('colors', ['red', 'blue'])\n d.setProp('location', '/user2/sfarry/workspaces/top/figures/')\n d.setProp('filename', 'tbvs_'+p.name)\n d.setProp('markerstyles', 20)\n d.setProp('leglims', [0.65, 0.7, 0.9, 0.9])\n d.setProp('normalised', True)\n if hasattr(p, 'shiftlegy'):\n d.ShiftLegY(p.shiftlegy)\n d.drawROOT()\n\nfor p in evtplots:\n d = Plot([tbvs_mc2012_evt.GetVar(p.name).GetHist(), tbvs_mc2016_evt.GetVar(p.name).GetHist()])\n d.AutoYlims()\n d.setProp('xlabel', p.xlabel)\n d.setProp('ylabel', p.ylabel)\n d.setProp('labels', ['mc2012', 'mc2016'])\n d.setProp('colors', ['red', 'blue'])\n d.setProp('location', '/user2/sfarry/workspaces/top/figures/')\n d.setProp('filename', 'tbvs_'+p.name)\n d.setProp('markerstyles', 20)\n d.setProp('leglims', [0.65, 0.7, 0.9, 0.9])\n d.setProp('normalised', True)\n if hasattr(p, 'shiftlegy'):\n d.ShiftLegY(p.shiftlegy)\n d.drawROOT()\n","sub_path":"top/python/draw_tbvs.py","file_name":"draw_tbvs.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"146515739","text":"import random\n\nstart_no = 1\nend_no = 1000\n\nlists = [i for i in random.sample(\n range(start_no, end_no + 1), 5) if i % 5 == 0 and i % 7 == 0]\n\nprint(lists)\n\n# work need\n","sub_path":"Training_Work/Training_WORKSPACE/p16.py","file_name":"p16.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"273962306","text":"import os, sys, time, random,torch\nimport models\nimport torch.backends.cudnn as cudnn\nimport argparse\nimport dataset\nimport compute_flops as flops\n# import mask,mask_modify\nimport mask as mask\nfrom utils import AverageMeter, \\\n RecorderMeter, time_string, \\\n convert_secs2time,print_log,\\\n accuracy,adjust_learning_rate,\\\n save_checkpoint\nimport original_train\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\nparser=argparse.ArgumentParser(description='filter pruning',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--dataset', type=str, default='cifar10',\n choices=['cifar10', 'cifar100', 'imagenet'],\n help='Choose between Cifar10/100 and ImageNet.')\nparser.add_argument('--arch', metavar='ARCH', default='vgg16_cifar', choices=model_names,\n help='model architecture: ' + ' | '.join(model_names) + ' (default: resnext29_8_64)')\n# Optimization options\nparser.add_argument('--epochs', type=int, default=300, help='Nu mber of epochs to train.')\nparser.add_argument('--batch_size', type=int, default=256, help='Batch size.')\nparser.add_argument('--learning_rate', type=float, default=0.1, help='The Learning Rate.')\nparser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\nparser.add_argument('--decay', type=float, default=0.0005, help='Weight decay (L2 penalty).')\nparser.add_argument('--schedule', type=int, nargs='+', default=[120, 225, 275],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1, 0.5],\n help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')\n# Checkpoints\nparser.add_argument('--save_path', type=str, default='./', help='Folder to save checkpoints and log.')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\nparser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')\n# Acceleration\nparser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')\nparser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')\n# random seed\nparser.add_argument('--manualSeed', type=int, default=None, help='manual seed')\n# compress rate\nparser.add_argument('--rate', type=float, default=0.8, help='compress rate of model')\nparser.add_argument('--epoch_prune', type=int, default=1, help='compress layer of model')\nparser.add_argument('--use_state_dict', dest='use_state_dict', action='store_true', help='use state dcit or not')\nparser.add_argument('--description',type=str,default='')\nparser.add_argument('--last_index',type=int,default=0)\n\nargs=parser.parse_args()\nargs.use_cuda = args.ngpu > 0 and torch.cuda.is_available()\n\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif args.use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\ncudnn.benchmark = True\n# custom args\n# args.learning_rate=0.001 # for vgg\nargs.arch='resnet32'\nargs.dataset='cifar10'\nargs.method='my'\nargs.rate=0.8\nargs.description='{}_{}_{}_{}'.format(args.arch,args.dataset,args.method,args.rate)\nargs.save_path='./{}/'.format(args.description)\nargs.resume=args.save_path+'ckpt/'\nargs.resume=False\nargs.model_save=args.save_path+'model.pth'\nargs.last_index=90\nargs.original_train=False\n#\ndef main():\n # Init logger\n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path)\n if args.resume:\n if not os.path.isdir(args.resume):\n os.makedirs(args.resume)\n log = open(os.path.join(args.save_path, '{}.txt'.format(args.description)), 'w')\n print_log('save path : {}'.format(args.save_path), log)\n state = {k: v for k, v in args._get_kwargs()}\n print_log(state, log)\n print_log(\"Random Seed: {}\".format(args.manualSeed), log)\n print_log(\"use cuda: {}\".format(args.use_cuda), log)\n print_log(\"python version : {}\".format(sys.version.replace('\\n', ' ')), log)\n print_log(\"torch version : {}\".format(torch.__version__), log)\n print_log(\"cudnn version : {}\".format(torch.backends.cudnn.version()), log)\n print_log(\"Compress Rate: {}\".format(args.rate), log)\n print_log(\"Epoch prune: {}\".format(args.epoch_prune), log)\n print_log(\"description: {}\".format(args.description), log)\n\n # Init data loader\n if args.dataset=='cifar10':\n train_loader=dataset.cifar10DataLoader(True,args.batch_size,True,args.workers)\n test_loader=dataset.cifar10DataLoader(False,args.batch_size,False,args.workers)\n num_classes=10\n elif args.dataset=='cifar100':\n train_loader=dataset.cifar100DataLoader(True,args.batch_size,True,args.workers)\n test_loader=dataset.cifar100DataLoader(False,args.batch_size,False,args.workers)\n num_classes=100\n elif args.dataset=='imagenet':\n assert False,'Do not finish imagenet code'\n else:\n assert False,'Do not support dataset : {}'.format(args.dataset)\n\n # Init model\n if args.arch=='cifarvgg16':\n net=models.vgg16_cifar(True,num_classes)\n elif args.arch=='resnet32':\n net=models.resnet32(num_classes)\n elif args.arch=='resnet56':\n net=models.resnet56(num_classes)\n elif args.arch=='resnet110':\n net=models.resnet110(num_classes)\n else:\n assert False,'Not finished'\n\n\n print_log(\"=> network:\\n {}\".format(net),log)\n net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))\n # define loss function (criterion) and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],\n weight_decay=state['decay'], nesterov=True)\n if args.use_cuda:\n net.cuda()\n criterion.cuda()\n\n recorder = RecorderMeter(args.epochs)\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume+'checkpoint.pth.tar'):\n print_log(\"=> loading checkpoint '{}'\".format(args.resume+'checkpoint.pth.tar'), log)\n checkpoint = torch.load(args.resume+'checkpoint.pth.tar')\n recorder = checkpoint['recorder']\n args.start_epoch = checkpoint['epoch']\n if args.use_state_dict:\n net.load_state_dict(checkpoint['state_dict'])\n else:\n net = checkpoint['state_dict']\n optimizer.load_state_dict(checkpoint['optimizer'])\n print_log(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']), log)\n\n if args.evaluate:\n time1=time.time()\n validate(test_loader,net,criterion,args.use_cuda,log)\n time2=time.time()\n print('validate function took %0.3f ms' % ((time2 - time1) * 1000.0))\n return\n else:\n print_log(\"=> no checkpoint found at '{}'\".format(args.resume), log)\n else:\n print_log(\"=> not use any checkpoint for {} model\".format(args.description), log)\n\n if args.original_train:\n original_train.args.arch=args.arch\n original_train.args.dataset=args.dataset\n original_train.main()\n return\n\n comp_rate=args.rate\n m=mask.Mask(net,args.use_cuda)\n print(\"-\" * 10 + \"one epoch begin\" + \"-\" * 10)\n print(\"the compression rate now is %f\" % comp_rate)\n\n val_acc_1, val_los_1 = validate(test_loader, net, criterion, args.use_cuda,log)\n print(\" accu before is: %.3f %%\" % val_acc_1)\n\n m.model=net\n print('before pruning')\n m.init_mask(comp_rate,args.last_index)\n m.do_mask()\n print('after pruning')\n m.print_weights_zero()\n net=m.model#update net\n\n if args.use_cuda:\n net=net.cuda()\n val_acc_2, val_los_2 = validate(test_loader, net, criterion, args.use_cuda,log)\n print(\" accu after is: %.3f %%\" % val_acc_2)\n #\n\n start_time=time.time()\n epoch_time=AverageMeter()\n for epoch in range(args.start_epoch,args.epochs):\n current_learning_rate=adjust_learning_rate(args.learning_rate,optimizer,epoch,args.gammas,args.schedule)\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs - epoch))\n need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)\n print_log(\n '\\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs,\n need_time, current_learning_rate) \\\n + ' [Best : Accuracy={:.2f}]'.format(recorder.max_accuracy(False)), log)\n train_acc,train_los=train(train_loader,net,criterion,optimizer,epoch,args.use_cuda,log)\n validate(test_loader, net, criterion,args.use_cuda, log)\n if (epoch % args.epoch_prune == 0 or epoch == args.epochs - 1):\n m.model=net\n print('before pruning')\n m.print_weights_zero()\n m.init_mask(comp_rate,args.last_index)\n m.do_mask()\n print('after pruning')\n m.print_weights_zero()\n net=m.model\n if args.use_cuda:\n net=net.cuda()\n\n val_acc_2, val_los_2 = validate(test_loader, net, criterion,args.use_cuda,log)\n\n is_best = recorder.update(epoch, train_los, train_acc, val_los_2, val_acc_2)\n if args.resume:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': net,\n 'recorder': recorder,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args.resume, 'checkpoint.pth.tar')\n print('save ckpt done')\n\n epoch_time.update(time.time()-start_time)\n start_time=time.time()\n torch.save(net,args.model_save)\n # torch.save(net,args.save_path)\n flops.print_model_param_nums(net)\n flops.count_model_param_flops(net,32,False)\n log.close()\n\ndef train(train_loader,model,criterion,optimizer,epoch,use_cuda=False,log=None):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.train()\n\n end_time=time.time()\n for i, (input, label) in enumerate(train_loader):\n data_time.update(time.time() - end_time)\n if use_cuda:\n label = label.cuda()\n input = input.cuda()\n with torch.no_grad():\n input_var=torch.autograd.Variable(input)\n label_var=torch.autograd.Variable(label)\n output=model(input_var)\n loss=criterion(output,label_var)\n prec1, prec5 = accuracy(output.data, label, topk=(1, 5))\n # torch>=0.5\n losses.update(loss.data, input.size(0))\n top1.update(prec1, input.size(0))\n top5.update(prec5, input.size(0))\n # torch<0.5\n # losses.update(loss.data[0], input.size(0))\n # ...\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time()-end_time)\n end_time=time.time()\n\n print_log(' **Train** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5),log)\n return top1.avg, losses.avg\n\ndef validate(val_loader,model,criterion,use_cuda=False,log=None):\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n for i,(input,label) in enumerate(val_loader):\n if use_cuda:\n label=label.cuda()\n input=input.cuda()\n with torch.no_grad():\n input_var=torch.autograd.Variable(input)\n label_var=torch.autograd.Variable(label)\n output=model(input_var)\n loss=criterion(output,label_var)\n prec1, prec5 = accuracy(output.data, label, topk=(1, 5))\n # torch>=0.5\n losses.update(loss.data, input.size(0))\n top1.update(prec1, input.size(0))\n top5.update(prec5, input.size(0))\n # torch<0.5\n # losses.update(loss.data[0], input.size(0))\n # ...\n print_log(' **Test** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5),log)\n return top1.avg, losses.avg\n\nif __name__ == '__main__':\n for i in range(1):\n args.arch='resnet32'\n args.dataset='cifar10'\n args.method='sfp'\n args.rate=0.7\n args.description='{}_{}_{}_{}_{}'.format(args.arch,args.dataset,args.method,args.rate,i)\n args.save_path='./{}/'.format(args.description)\n args.resume=args.save_path+'ckpt/'\n args.resume=False\n args.model_save=args.save_path+'model.pth'\n args.last_index=90\n args.original_train=False\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"548021668","text":"#!/usr/bin/env python3\nimport sys, operator\n\nclass Node:\n def __init__(self, name):\n self.name = name;\n self.sent = 0\n self.notSent = 0\n self.peopleDict = {}\n self.notPeopleDict = {}\n def addReceiver(self, rname):\n if rname == self.name:\n return\n\n self.sent += 1\n\n if rname in self.peopleDict:\n self.peopleDict[rname] += 1\n else:\n self.peopleDict[rname] = 1\n\n def addNotSender(self, rname):\n if rname == self.name:\n return\n\n self.notSent += 1\n\n if rname in self.notPeopleDict:\n self.notPeopleDict[rname] += 1\n else:\n self.notPeopleDict[rname] = 1\n \n\n def probableFriends(self):\n probDict = {}\n for k,v in self.peopleDict.items():\n if k not in self.notPeopleDict:\n notVal = 0\n else:\n notVal = self.notPeopleDict[k]\n probDict[k] = abs((v/self.sent) - (notVal / self.notSent))\n return sorted(probDict.items(), key=operator.itemgetter(1), reverse=True)[:3]\n\nif __name__ == \"__main__\":\n lines = sys.stdin.readlines()\n\n Nodes = {}\n\n for i in range(0, 10):\n for j in range(0, 26):\n name = chr(j+97) + str(i)\n Nodes[name] = Node(name)\n\n\n for i in range(0, len(lines), 2):\n if not lines[i].strip():\n break\n\n senders = lines[i].strip(\"S[:]\\n\").replace(\"'\",\"\").replace(\" \", \"\").split(',')\n receivers = lines[i+1].strip(\"R[:]\\n\").replace(\"'\",\"\").replace(\" \", \"\").split(',')\n\n for k,v in Nodes.items():\n if v.name in senders:\n for rec in receivers:\n v.addReceiver(rec)\n else:\n for rec in receivers:\n v.addNotSender(rec)\n\n for k,node in Nodes.items():\n friends = node.probableFriends()\n print(\"%s,%s,%s,%s\" % (node.name, friends[0][0], friends[1][0], friends[2][0]))\n","sub_path":"Everything else/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"543746481","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom phones_app.models import Product, Category\nfrom django.contrib.auth.models import User\nimport datetime\n\n\nclass TestViews(TestCase):\n\n def setUp(self):\n test_category = Category.objects.create(\n name='Phones',\n slug='phones'\n )\n\n test_product = Product.objects.create(\n title='iPhone Xr',\n brand=test_category,\n image='some-image.png',\n price=666.66,\n slug='iphone-xr',\n description='dhawdawdw',\n full_description='dhuhagwiudgawk',\n time_added=datetime.datetime.now()\n )\n\n self.user = User.objects.create_user(\n 'john', 'lennon@thebeatles.com', 'johnpassword')\n\n self.client = Client()\n\n def test_base_view(self):\n response = self.client.get(reverse('base_view'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'index.html')\n\n def test_sign_up_view(self):\n response = self.client.get(reverse('sign_up_view'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'registration.html')\n\n def test_sign_in_view(self):\n response = self.client.get(reverse('sign_in_view'))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'login.html')\n\n def test_detailed_product_view(self):\n response = self.client.get(\n reverse('detailed_product_view', args=['iphone-xr']))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'detailed_product.html')\n\n def test_detailed_category_view(self):\n response = self.client.get(\n reverse('detailed_category_view', args=['phones']))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'detailed_category.html')\n\n def test_account_view(self):\n self.client.login(username='john', password='johnpassword')\n response = self.client.get(\n reverse('account_view', args=['alexpetul']))\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'account.html')\n","sub_path":"phones_app/tests/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"531096621","text":"import unittest\nfrom ..utils import isInSideRange, leftSideOutOfBound, rightSideOutOfBound\nfrom stubs import ImageStub\n\nclass TestIsInSideRange(unittest.TestCase):\n\n def setUp(self):\n #'csz': mean container_side_size\n #'esz': mean element_side_size\n self.csz = 100\n self.esz = 20\n\n def test_lowerOrigin(self):\n side_origin = -1\n self.assertFalse(isInSideRange(self.csz, self.esz,\n side_origin),\n 'lower origin value accepted')\n\n def test_greaterExtrem(self):\n side_origin = 90\n self.assertFalse(isInSideRange(self.csz, self.esz,\n side_origin),\n 'greater extremity value accepted')\n\n def test_isInContainerSideRange(self):\n side_origin = 50\n self.assertTrue(isInSideRange(self.csz, self.esz,\n side_origin),\n \"\"\"element side in good range refused\"\"\")\n self.assertTrue(isInSideRange(self.csz, self.csz, 0),\n \"\"\"element size and origin the as\n container size and origin refused\"\"\")\n\nclass test_sideOutOfBound(unittest.TestCase):\n\n def setUp(self):\n self.page_size = (60, 100)\n\n def test_rightSideOutOfBound(self):\n comp_in_bound = ImageStub((15, 0), (40, 0))\n self.assertFalse(rightSideOutOfBound(self.page_size,\n comp_in_bound),\n 'side in bound refused')\n comp_out_bound = ImageStub((21, 0), (40, 0))\n self.assertTrue(rightSideOutOfBound(self.page_size,\n comp_out_bound),\n 'side out of bound accepted')\n\n def test_leftSideOutOfBound(self):\n comp_in_bound = ImageStub((10, 20), (0, 0))\n self.assertFalse(leftSideOutOfBound(self.page_size,\n comp_in_bound),\n 'side in bound refused')\n comp_out_bound = ImageStub((10, 20), (-1, 0))\n self.assertTrue(leftSideOutOfBound(self.page_size,\n comp_out_bound),\n 'side out of bound accepted')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"app/pdfgen/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47764925","text":"import json\n\nimport requests\n\nfrom modules import argbase as arg\n\n# define global variables\n\nbaseadvertiser = {\n \"advertisers\": [\n\n ]\n}\nbasecreative = {\n \"materials\": [\n\n ]\n}\nerrorcodes = {\n 1001: \"Authentication error (dsp-token error)\",\n 1002: \"Missing required parameter error\",\n 1003: \"Illegal parameters\",\n 1004: \"File format error\",\n 1005: \"File size error\",\n 1006: \"The file size is incorrect\",\n 1007: \"File get error\",\n 2001: \"Upload failed\",\n 2002: \"Data does not exist\",\n 2003: \"Database error\"\n}\n\ntrackingentry = {\n \"type\": \"\",\n \"id\": \"\",\n \"status\": 0,\n \"raw\": {}\n}\n\nbasejudge = {\n \"advId\": \"\",\n \"status\": \"\",\n \"reason\": \"\"\n}\n\nbaseheader = {'content-type': 'application/json', 'authorization': ''}\n\n# options as globals\nusagemsg = \"This program uses the judge api for the advertiser. It takes a single advertiser id.\"\nmsg = arg.MSG()\n\n\ndef main():\n \"\"\"main processing loop\"\"\"\n do = arg.MyArgs(usagemsg)\n do.processargs()\n if arg.Flags.test:\n msg.TEST(\"Running in test mode.\")\n baseurl = arg.Flags.configsettings['testurl']\n else:\n baseurl = arg.Flags.configsettings['serverurl']\n msg.DEBUG(do)\n baseheader['authorization'] = arg.Flags.configsettings['dsptoken']\n advId = arg.Flags.id\n status_code, rj = judgeadvertiser(baseurl, advId)\n if status_code == 200:\n # we don't know what to expect so just print out whatever you got\n # we assume it is json\n print(\"Received JSON as follows:\\n{}\".format(rj))\n\n\ndef judgeadvertiser(u: str, a):\n action_u_r_l = u + \"/v1/advertiser/judge\"\n msg.DEBUG(\"POST: {}\".format(action_u_r_l))\n basejudge[\"advId\"] = a\n try:\n r = requests.post(action_u_r_l, json=basejudge, headers=baseheader)\n msg.DEBUG(\"{}\\n\\t{}\".format(r.status_code, r.content.decode('utf-8')))\n except requests.exceptions.Timeout:\n # Maybe set up for a retry, or continue in a retry loop\n msg.ERROR(\"Connection timeout Error\")\n except requests.exceptions.RequestException as e:\n msg.ERROR(e)\n if r.status_code == 200:\n if arg.Flags.test:\n msg.TEST(\"full json is \\n\\t{}\".format(json.loads(r.content.decode('utf-8'))))\n msg.TEST(\"\\n\\tstatus: {}\\n\\theaders: {}\\n\\turl: {}\\n\\treason: {}\".format(r.status_code, r.headers,r.url, r.reason))\n return r.status_code, json.loads(r.content.decode('utf-8'))\n else:\n msg.ERROR(\"HTTP Response {}\\n{}\".format(r.status_code, r.content.decode('utf-8')))\n return None\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"deprecated/advjudge.py","file_name":"advjudge.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"443023507","text":"from node import Node\n\nwith open(\"license.txt\", \"r\") as input_file:\n license_numbers = map(lambda x: int(x), input_file.readlines()[0].split (\" \"))\n\nnode_num = 0\n\ndef create_node(start_index):\n # read header\n num_children = license_numbers[start_index]\n num_meta = license_numbers[start_index+1]\n\n new_node = Node(num_meta, num_children)\n end_index = start_index+2\n\n for i in range(num_children):\n end_index, new_child = create_node(end_index)\n new_node.add_child(new_child)\n\n for i in range(num_meta):\n new_node.add_meta(license_numbers[end_index])\n end_index += 1\n\n return (end_index, new_node)\n\n_, root_node = create_node(0)\n\nprint(root_node.sum_meta())","sub_path":"day_8/meta_sum/sum_metadata.py","file_name":"sum_metadata.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"184650303","text":"# https://leetcode.com/problems/reverse-integer/\n#\n# Program returns reversed input 32-bit integer or 0 if overflow occurs.\n#\n# Runtime: 36 ms, faster than 94.82% of Python3 online submissions for Reverse Integer.\n# Memory Usage: 13.3 MB, less than 44.18% of Python3 online submissions for Reverse Integer.\n# (5/27/2019)\n\nclass Solution:\n def reverse(self, x: int) -> int:\n \n sign = -1 if x < 0 else 1\n \n reverse = int(str(x * sign)[::-1])\n \n return sign * reverse * (reverse < 2**31)\n","sub_path":"Python3/reverse-integer.py","file_name":"reverse-integer.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357071899","text":"'''\n\n@author: Viet Nam\n'''\n\nimport json \nfrom textwrap import indent \n\ndata = {}\ndata['user1'] = []\ndata['user1'].append(['duy','123'])\ndata['user2'] = []\ndata['user2'].append(['ronaldo','07'])\ndata['user3'] = []\ndata['user3'].append(['messi','10'])\ndata['user4'] = []\ndata['user4'].append(['salah','11'])\n\nf=open('users.json','w')\njson.dump(data,f,indent=4)\n\n\n","sub_path":"store application/user_json.py","file_name":"user_json.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147676815","text":"from __future__ import print_function\n\nss = sorted(list(raw_input()))\nnums = map(int,filter(lambda x:ord(x)>=ord('0') and ord(x)<=ord('9'),ss))\nodd = filter(lambda x:x%2,nums)\neven = filter(lambda x:x%2==0,nums)\nupper = filter(lambda x:ord(x)>=ord('A') and ord(x)<=ord('Z'),ss)\nlower = filter(lambda x:ord(x)>=ord('a') and ord(x)<=ord('z'),ss)\nfinal = lower+upper+map(str,odd)+map(str,even)\n#map(lambda x:print(x,sep=''),final)\nprint(*final,sep='')\n\n# Someone's inline solution \n#print(*(sorted(input(), key=lambda x: (x.isdigit(), x.isdigit() and int(x)%2==0, x.isupper(), x.islower(), x))), sep='')\n","sub_path":"python/built-ins/ginorts.py","file_name":"ginorts.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"261150916","text":"from django.shortcuts import render\nfrom .models import Girl\nimport random\nfrom django.http import HttpResponseRedirect\n\n\ndef add_new(request):\n if request.method == 'GET':\n return render(request, 'comparing_app/add_new.html')\n elif request.method == 'POST':\n girl = Girl(\n name=request.POST['name'],\n description=request.POST['description'],\n photo=request.FILES['picture']\n )\n girl.save()\n return HttpResponseRedirect(\"/\" + str(girl.id))\n\n\ndef index(request):\n if request.method == 'GET':\n girls = Girl.objects.all()\n first_girl = random.choice(girls)\n second_girl = random.choice(girls)\n while first_girl == second_girl:\n second_girl = random.choice(girls)\n return render(request, 'comparing_app/index.html', {'first': first_girl, 'second': second_girl})\n elif request.method == 'POST':\n winner_id = request.POST['win']\n loser_id = request.POST['lose']\n winner = Girl.objects.get(id=winner_id)\n loser = Girl.objects.get(id=loser_id)\n winner.rating += 1\n loser.rating -= 1\n winner.save()\n loser.save()\n return HttpResponseRedirect('/')\n# Create your views here.\n","sub_path":"comparing_system/comparing_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"159005832","text":"'''\n字符串转换整数 (atoi)\n请你来实现一个 atoi 函数,使其能将字符串转换成整数。\n\n首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:\n\n如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。\n假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。\n该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。\n假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。\n\n在任何情况下,若函数不能进行有效的转换时,请返回 0 。\n\n注意:\n\n本题中的空白字符只包括空格字符 ' ' 。\n假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−231,  231 − 1]。如果数值超过这个范围,请返回  231 − 1 或 −231 。\n \n\n示例 1:\n\n输入: \"42\"\n输出: 42\n示例 2:\n\n输入: \" -42\"\n输出: -42\n解释: 第一个非空白字符为 '-', 它是一个负号。\n  我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。\n示例 3:\n\n输入: \"4193 with words\"\n输出: 4193\n解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。\n示例 4:\n\n输入: \"words and 987\"\n输出: 0\n解释: 第一个非空字符是 'w', 但它不是数字或正、负号。\n 因此无法执行有效的转换。\n示例 5:\n\n输入: \"-91283472332\"\n输出: -2147483648\n解释: 数字 \"-91283472332\" 超过 32 位有符号整数范围。\n  因此返回 INT_MIN (−231) 。\n\n考查标签:数学、字符串\n'''\n\n\nclass Solution:\n def myAtoi(self, str: str) -> int:\n nums = ['-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n res = []\n str_list = list(str)\n for i in range(len(str_list)):\n if str_list[i] == ' ':\n continue\n elif str_list[i] in nums:\n res.append(str_list[i])\n else:\n break\n if not res:\n return 0\n res_1 = []\n flag = 1\n for each in res:\n if each == '-':\n flag = -1\n if each == '-' or each == '+':\n continue\n res_1.append(each)\n res_int = int(''.join(res_1))\n if res_int * flag > (2 ** 31) - 1:\n return (2 ** 31) - 1\n if res_int * flag < -2 ** 31:\n return -2 ** 31\n return res_int * flag\n\n\nclass Solution:\n def myAtoi(self, str: str) -> int:\n i = 0\n n = len(str)\n while i < n and str[i] == ' ':\n i = i + 1\n if n == 0 or i == n:\n return 0\n flag = 1\n if str[i] == '-':\n flag = -1\n if str[i] == '+' or str[i] == '-':\n i = i + 1\n INT_MAX = 2 ** 31 - 1\n INT_MIN = -2 ** 31\n ans = 0\n while i < n and '0' <= str[i] <= '9':\n ans = ans * 10 + int(str[i]) - int('0')\n i += 1\n if (ans - 1 > INT_MAX):\n break\n\n ans = ans * flag\n if ans > INT_MAX:\n return INT_MAX\n return INT_MIN if ans < INT_MIN else ans\n","sub_path":"腾讯/数组与字符串/字符串转换整数 (atoi).py","file_name":"字符串转换整数 (atoi).py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462228958","text":"\"\"\"empty message\n\nRevision ID: 15d3e11dea8e\nRevises: 6b70a7ed2a00\nCreate Date: 2018-07-26 17:44:49.372723\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '15d3e11dea8e'\ndown_revision = '6b70a7ed2a00'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_images_id', table_name='images')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('ix_images_id', 'images', ['id'], unique=1)\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/15d3e11dea8e_.py","file_name":"15d3e11dea8e_.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149050124","text":"import math\nimport numpy as np\n\ndef radian(angular):\n return math.pi / 180 * angular\n\ndef to_XYZ(u, v, r):\n return r*math.sin(radian(v))*math.cos(radian(u)), \\\n r*math.sin(radian(v))*math.sin(radian(u)), \\\n r*math.cos(radian(v))\n\ndef get_norm(u, v):\n return to_XYZ(u, v, 1)\n\n# 等积投影\ndef get_tex_coor(u, v):\n x = math.cos(radian(v))\n y = math.sin(radian(v))\n z = math.sqrt(math.pow(x, 2) + math.pow((1-y), 2))\n if v < 90:\n y = 0.5 + z / math.sqrt(2) / 2\n else:\n y = 0.5 - z / math.sqrt(2) / 2 \n return 1-u/360, y\n\ndef to_data(t):\n s = \"\"\n for i in t:\n s = s + str(i)\n s +=\" \"\n return s\n\n\nfile = open(\"sphere.dat\", \"w\")\nstrip = 10\nradius = 1\n\nfile.write(str(strip)+\" \"+str(strip)+\" \")\nfor v in np.arange(0, 180+strip, strip):\n for u in np.arange(0, 360+strip, strip):\n file.write(to_data(to_XYZ(u, v, radius)))\n file.write(to_data(get_norm(u, v)))\n file.write(to_data(get_tex_coor(u, v)))\nprint(np.arange(0, 180+strip, strip).size)\nprint(np.arange(0, 360+strip, strip).size)\n\nfile.close()\n\n\n\n\n","sub_path":"21851077 宋崇钧/21851077 宋崇钧 project02/src/sephere.py","file_name":"sephere.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137642070","text":"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch.optim\n\nfrom . import FairseqOptimizer, register_optimizer\n\n\n@register_optimizer('adagrad')\nclass Adagrad(FairseqOptimizer):\n def __init__(self, args, params):\n super().__init__(args, params)\n self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add optimizer-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',\n help='weight decay')\n # fmt: on\n\n @property\n def optimizer_config(self):\n \"\"\"\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n \"\"\"\n return {\n 'lr': self.args.lr[0],\n 'weight_decay': self.args.weight_decay,\n }\n","sub_path":"fairseq_master/fairseq/optim/adagrad.py","file_name":"adagrad.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"486736455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File name: eob_plot.py\n\"\"\"\nCreated on Wed Jan 10 16:32:09 2018\n\n@author: Neo(liuniu@smail.nju.edu.cn)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom read_eob import read_eob\n\n\n# ----------------------------- FUNCTIONS -----------------------------\ndef errorbarplot_res(x, y, err, lab, unit):\n '''\n '''\n\n plt.figure(figsize=(10, 4))\n # plt.plot(x, y, '.', markersize=0.1)\n # plt.xlabel('MJD')\n # plt.title(\"%s(%s)\" % (lab, unit))\n # plt.xlim([1979.0, 2018.0])\n\n fig, ax = plt.subplots(figsize=(10, 4))\n ax.errorbar(x, y, yerr=err,\n fmt='.', ms=1,\n ecolor='grey',\n elinewidth=0.1)\n\n ax.set_xlabel('MJD')\n ax.set_title(\"%s(%s)\" % (lab, unit))\n ax.set_xlim([1979.5, 2018.5])\n\n # plt.ylim([-30, 30])\n # plt.savefig(\"%s_residual30.eps\" % lab)\n # ax.set_ylim([-10, 10])\n # plt.savefig(\"%s_residual10.eps\" % lab)\n plt.ylim([-5, 5])\n plt.savefig(\"%s_residual05.eps\" % lab)\n plt.ylim([-1, 1])\n plt.savefig(\"%s_residual01.eps\" % lab)\n plt.close()\n\n\nEOB_file = \"/home/nliu/solutions/opa2018a/opa2018a.eob\"\n\n[dbname, obsnum, tag_eop, Xp, Xp_err, Yp, Yp_err, U, U_err,\n XR, XR_err, YR, YR_err, UR, UR_err,\n corXY, corXU, corYU, corXUR, corYUR, corUUR,\n tag_nut, dX, dX_err, dY, dY_err, cordXdY] = read_eob(EOB_file)\n\nepo = (tag_nut - 51544.5) / 365.25 + 2000.0\n\n# Plot\nerrorbarplot_res(epo, dX, dX_err, \"dX\", \"mas\")\nerrorbarplot_res(epo, dY, dY_err, \"dY\", \"mas\")\n\n# --------------------------------- END --------------------------------\n","sub_path":"eob_plot.py","file_name":"eob_plot.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244584028","text":"import sqlite3\nfrom pprint import pprint\nfrom random import shuffle\nfrom random import randint\nimport random\nfrom datetime import datetime\nimport math\n\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport torch\nimport matplotlib\nmatplotlib.use('Qt5Cairo')\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nimport settings\nfrom basket import StockBasket\n\n# random.seed(992312)\n\n\nclass DatasetView(Dataset):\n\n def __init__(self, dataset, mode):\n\n self.dataset = dataset\n self.mode = mode\n\n def __len__(self):\n return len(self.dataset.data[self.mode])\n\n def __getitem__(self, idx):\n\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n return self.dataset.data[self.mode][idx]\n\n\nclass TestDataset2:\n\n def __init__(self,\n num_stocks,\n split_ratio,\n forecast_len,\n hold_len):\n\n super().__init__()\n\n self.num_stocks = num_stocks\n self.split_ratio = split_ratio\n self.forecast_len = forecast_len\n\n basket = StockBasket(num_stocks)\n\n start_date_str = basket.start_date.strftime('%Y-%m-%d')\n end_date_str = basket.end_date.strftime('%Y-%m-%d')\n\n tickers, _, _, _, true_overall_rank = zip(*basket.get_list())\n\n with sqlite3.connect(settings.DATA_DIRECTORY / settings.DATABASE_NAME) as db:\n\n data = dict()\n for ticker in tickers:\n data[ticker] = db.execute('''\nSELECT date, adj_open, adj_high, adj_low, adj_close, adj_volume\nFROM qdl_eod_symbols_view\nWHERE symbol == ? AND date >= ? AND date <= ?\nORDER BY date;\n''', (ticker, start_date_str, end_date_str)).fetchall()\n\n dates = list()\n for _, rows in data.items():\n for row in rows:\n dates.append(row[0])\n\n dates = sorted(tuple(set(dates)))\n dates = {d: i for i, d in enumerate(dates)}\n\n tickers = {t: i for i, t in enumerate(tickers)}\n\n eod_df = np.ones((len(dates), num_stocks, 5)) * np.NaN\n\n self.t_dom = [datetime.strptime(d.split(' ')[0], '%Y-%m-%d').date()\n for d in dates.keys()]\n\n self.stock_labels = list()\n\n for ticker, rows in data.items():\n jdx = tickers[ticker]\n self.stock_labels.append(ticker)\n for row in rows:\n idx = dates[row[0]]\n\n eod_df[idx, jdx, :] = row[1:]\n\n # Detect and handle missing values.\n missing_values = np.argwhere(np.isnan(eod_df))\n self.num_missing_vals = len(missing_values)\n\n for t, j, i in missing_values:\n eod_df[t, j, i] = 1.0\n\n # Double check that all NaN have been filled.\n assert(len(np.argwhere(np.isnan(eod_df))) == 0)\n\n# missing = np.argwhere(np.isnan(eod_df))\n# if missing.shape[0] > 0:\n# print(missing)\n# exit()\n\n # Create features.\n# features_df = np.copy(eod_df)\n\n num_timesteps = len(eod_df) - 1\n features_df = np.zeros((num_timesteps, num_stocks, 5))\n\n for j in range(num_stocks):\n\n features_df[:, j, 0] = np.diff(np.log(eod_df[:, j, 0]), axis=0)\n features_df[:, j, 1] = np.diff(np.log(eod_df[:, j, 1]), axis=0)\n features_df[:, j, 2] = np.diff(np.log(eod_df[:, j, 2]), axis=0)\n features_df[:, j, 3] = np.diff(np.log(eod_df[:, j, 3]), axis=0)\n# features_df[:, j, 4] =\n # Generate splits.\n\n portion_dem = sum(split_ratio)\n portions = {'train': split_ratio[0] / portion_dem,\n 'validate': split_ratio[1] / portion_dem,\n 'test': split_ratio[2] / portion_dem\n }\n\n offsets = {'train': 0,\n 'validate': math.floor(portions['train'] * num_timesteps + 0.5),\n 'test': math.floor((portions['train'] + portions['validate']) * num_timesteps + 0.5)\n }\n\n MODES = ('train', 'validate', 'test')\n\n self.num_samples = {'train': offsets['validate'] - forecast_len - hold_len - 1,\n 'validate': offsets['test'] - offsets['validate'] - forecast_len - hold_len - 1,\n 'test': num_timesteps - offsets['test'] - forecast_len - hold_len - 1}\n\n self.data = {m: list() for m in MODES}\n\n # Construct samples for TRAIN, VALIDATE, and TEST.\n for mode in MODES:\n\n # The size of the sampling window constrains how many samples we can\n # get. Samples are generated by sliding a window through the particular\n # mode's timeseries.\n for idx in range(self.num_samples[mode]):\n\n # These are the offsets for the window.\n t0 = idx\n t1 = idx + forecast_len\n t2 = t1 + hold_len\n\n window = np.copy(features_df[t0:t2])\n\n for i_stock in range(window.shape[1]):\n\n # For now, perform log normalization on the each of the\n # 5 attributes.\n # window[:, i_stock, 0] = np.log(\n # window[:, i_stock, 0] / window[0, i_stock, 0])\n # window[:, i_stock, 1] = np.log(\n # window[:, i_stock, 1] / window[0, i_stock, 1])\n # window[:, i_stock, 2] = np.log(\n # window[:, i_stock, 2] / window[0, i_stock, 2])\n # window[:, i_stock, 3] = np.log(\n # window[:, i_stock, 3] / window[0, i_stock, 3])\n\n # Volume is unused; set it to 0 for now.\n window[:, i_stock, 4] = 0.0\n\n # The example is the sequence for the forecasting period.\n seq_x = window[0:forecast_len, :, :]\n\n # The label is the following timeseries, over which the security\n # is held before being sold.\n seq_y = window[forecast_len:forecast_len + hold_len, :, :]\n\n # Just predict closing (log-normed) price.\n target = np.sum(seq_y[:, :, 3], axis=0)\n# target = (seq_y[hold_len - 1, :, 0] -\n# seq_y[0, :, 0]) / seq_y[0, :, 0]\n\n seq_x = torch.FloatTensor(seq_x)\n target = torch.FloatTensor([target])\n\n self.data[mode].append((seq_x, target))\n\n def train_view(self, num_samples=None):\n \"\"\"\n Returns a view on dataset with the train portion.\n \"\"\"\n\n return DatasetView(self, 'train')\n\n def validate_view(self, num_samples=None):\n \"\"\"\n Returns a view on dataset with the validation portion.\n \"\"\"\n\n return DatasetView(self, 'validate')\n\n def test_view(self, num_samples=None):\n \"\"\"\n Returns a view on dataset with test portion.\n \"\"\"\n\n return DatasetView(self, 'test')\n\n def get_stock_labels(self):\n \"\"\"\n Gets the ticker label for the stocks in dataset.\n \"\"\"\n\n return self.stock_labels\n\n def time_domain(self):\n \"\"\"\n Gets the dates which form the time domain for the datset's datpoints.\n \"\"\"\n\n return self.t_dom\n\n\ndata = TestDataset2(5, (8, 1, 1), 200, 10)\n\ntrain = data.train_view()\n\nx, y = next(iter(train))\n\nsymbols = data.get_stock_labels()\n\npprint(x)\n\nfor i in range(5):\n plt.plot(x[:, i, 3], lw=0.4, label=symbols[i])\nplt.legend()\nplt.show()\n","sub_path":"src/datasets/test_dataset2.py","file_name":"test_dataset2.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"479275864","text":"import NRPy_param_funcs as par\n# The indexedexp module defines various functions for defining and managing indexed quantities like tensors and pseudotensors\nimport indexedexp as ixp\n# The grid module defines various parameters related to a numerical grid or the dimensionality of indexed expressions\n# For example, it declares the parameter DIM, which specifies the dimensionality of the indexed expression\nimport grid as gri\nimport finite_difference as fin\nfrom outputC import *\nimport sympy\nfrom sympy import symbols, IndexedBase, Indexed, Idx, preorder_traversal\nimport numpy as np\n\nfrom sympy.printing.cxxcode import *\nfrom sympy.printing.fcode import FCodePrinter\n\nclass CustomCXX17Printer(CXX17CodePrinter):\n def _print_Indexed(self, expr):\n return FCodePrinter._print_Indexed(self, expr)\n \nprinter = CustomCXX17Printer()\n\nNx, Ny, Nz, Nn= symbols('Nx Ny Nz Nn', integer=True)\ni = Idx('i', Nx)\nj = Idx('j', Ny)\nk = Idx('k', Nz)\nn = Idx('n', Nn)\ndx, dy, dz = symbols('dx dy dz')\n\ndirections = ['x','y','z']\n\ndef shift(E, idx_shift):\n # This function takes a generic Sympy expression and\n # returns a new Sympy expression where every Sympy Indexed\n # object in E has been shifted by idx_shift.\n # - idx_shift should be of length D, the dimension of E\n \n def shift_indexed(S, idx_shift):\n # This function returns a new IndexedBase object with shifted indices\n # - S should be a Sympy Indexed object\n # - idx_shift should be a tuple or list of index offsets to apply\n # - idx_shift should be of length D, the dimension of S\n base = S.base\n indices = [si + di for si, di in zip(S.indices, idx_shift)]\n return base[indices]\n\n return E.replace(lambda expr: type(expr) == Indexed, lambda expr: shift_indexed(expr, idx_shift))\n\ndef Diff1(E, difftype, order):\n fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl(difftype,FDORDER=order)\n if int(difftype[2]) == 0:\n delta = dx\n elif int(difftype[2]) == 1:\n delta = dy\n elif int(difftype[2]) == 2:\n delta = dz\n \n shiftE = 0\n for i in range(len(fdcoeffs)):\n shiftE += fdcoeffs[i]*shift(E,fdstencl[i])\n shiftE = shiftE/delta\n return shiftE\n\ndef Diff2(E, difftype, order):\n fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl(difftype,FDORDER=order)\n if int(difftype[3]) == 0:\n delta = dx\n elif int(difftype[3]) == 1:\n delta = dy\n elif int(difftype[3]) == 2:\n delta = dz\n if int(difftype[4]) == 0:\n delta *= dx\n elif int(difftype[4]) == 1:\n delta *= dy\n elif int(difftype[4]) == 2:\n delta *= dz\n \n shiftE = 0\n for i in range(len(fdcoeffs)):\n shiftE += fdcoeffs[i]*shift(E,fdstencl[i])\n shiftE = shiftE/delta\n return shiftE\n\ndef Diffup1(E, dir, order):\n fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl('dD'+str(dir),FDORDER=order)\n if dir == 0:\n delta = dx\n elif dir == 1:\n delta = dy\n elif dir == 2:\n delta = dz\n \n shiftE = 0\n for i in range(len(fdcoeffs)):\n shiftE += fdcoeffs[i]*shift(E,fdstencl[i])\n shiftE = shiftE/delta\n return shiftE\n\ndef Diffdn1(E, dir, order):\n fdcoeffs, fdstencl = fin.compute_fdcoeffs_fdstencl('ddnD'+str(dir),FDORDER=order)\n if dir == 0:\n delta = dx\n elif dir == 1:\n delta = dy\n elif dir == 2:\n delta = dz\n \n shiftE = 0\n for i in range(len(fdcoeffs)):\n shiftE += fdcoeffs[i]*shift(E,fdstencl[i])\n shiftE = shiftE/delta\n return shiftE\n\ndef KOdiss(E, dir, order, sigma=0.1):\n if dir == 0:\n delta = dx\n elif dir == 1:\n delta = dy\n elif dir == 2:\n delta = dz\n r = int((2+order)/2)\n for i in range(r):\n E = sp.simplify(Diffdn1(E,dir,1))\n for i in range(r):\n E = sp.simplify(Diffup1(E,dir,1)) \n E = (-1)**(r+1)/(2**(2*r))*delta**(2*r-1)*sigma*E\n return E\n\ndef Dc(E, direction):\n assert(direction == 'x' or direction == 'y' or direction == 'z')\n if direction == 'x':\n shift_hi = (1, 0, 0, 0)\n shift_lo = (-1, 0, 0, 0)\n delta = dx\n elif direction == 'y':\n shift_hi = (0, 1, 0, 0)\n shift_lo = (0, -1, 0, 0)\n delta = dy\n elif direction == 'z':\n shift_hi = (0, 0, 1, 0)\n shift_lo = (0, 0, -1, 0)\n delta = dz\n return (shift(E, shift_hi) - shift(E, shift_lo))/(2 * delta)\n\ndef Dc2(E, direction):\n assert(direction == 'x' or direction == 'y' or direction == 'z')\n if direction == 'x':\n shift_hi = (1, 0, 0, 0)\n shift_lo = (-1, 0, 0, 0)\n delta = dx*dx\n elif direction == 'y':\n shift_hi = (0, 1, 0, 0)\n shift_lo = (0, -1, 0, 0)\n delta = dy*dy\n elif direction == 'z':\n shift_hi = (0, 0, 1, 0)\n shift_lo = (0, 0, -1, 0)\n delta = dz*dz\n return (shift(E, shift_hi) - 2*E + shift(E, shift_lo))/(delta)\n\ndef DcTen(E, direction):\n retE = ixp.zerorank1(len(E))\n for itr in range(len(E)):\n retE[itr] = Dc(E[itr], direction)\n return retE\n\ndef Dc2Ten(E, direction):\n retE = ixp.zerorank1(len(E))\n for itr in range(len(E)):\n retE[itr] = Dc2(E[itr], direction)\n return retE\n\ndef grad(phi):\n retGradPhi = ixp.zerorank1(len(directions))\n for itr in range(len(directions)):\n retGradPhi[itr] = Dc(phi, directions[itr])\n return retGradPhi\n\ndef Lap(phi):\n return Dc2(phi,'x')+Dc2(phi,'y')+Dc2(phi,'z') \n \ndef div(E):\n div = 0\n for itr in range(len(E)):\n div += Dc(E[itr],directions[itr])\n return div\n\ndef LapTen(E):\n retLapE = ixp.zerorank1()\n for itr in range(len(directions)):\n retLapE += np.array(Dc2Ten(E,directions[itr]))\n return retLapE\n\ndef der(var,direction):\n dvar = symbols('d'+str(var)+str(direction))\n return dvar\n\ndef AMReXcode(expr, varnames= \"\", declare_rhs = False, rhsname = \"\", declare_state = False, statename = \"\"):\n str_expr = str(printer.doprint(expr))\n \n #str_expr = str_expr.replace(\"[\",\"(\").replace(\"]\",\")\")\n str_expr = str_expr.replace(\"dx\",\"dx[0]\")\n str_expr = str_expr.replace(\"dy\",\"dx[1]\")\n str_expr = str_expr.replace(\"dz\",\"dx[2]\")\n str_expr = str_expr.replace(\"dx[0]**2\",\"(dx[0]*dx[0])\")\n str_expr = str_expr.replace(\"dx[1]**2\",\"(dx[1]*dx[1])\")\n str_expr = str_expr.replace(\"dx[2]**2\",\"(dx[2]*dx[2])\")\n str_expr = str_expr.replace(\"pi\",\"M_PI\")\n str_expr = str_expr+\";\"\n for name in varnames:\n str_expr = str_expr.replace('state_fab'+name,'state_fab')\n for name in varnames:\n str_expr = str_expr.replace(name,\"Idx::\"+name)\n \n if declare_rhs == True:\n str_expr = \"rhs_fab(i, j, k, Idx::\"+rhsname+ \") = \" + str_expr\n \n if declare_state == True:\n str_expr = \"state_fab(i, j, k, Idx::\"+statename+ \") = \" + str_expr\n \n return str_expr\n \ndef createSETUP(name, varnames, diagnames, nghostcells):\n fileSETUP = open(name, \"w+\")\n fileSETUP.write(\"#ifndef ET_INTEGRATION_SETUP_K_H \\n\")\n fileSETUP.write(\"#define ET_INTEGRATION_SETUP_K_H \\n\\n\")\n\n fileSETUP.write(\"#include \\n\")\n fileSETUP.write(\"#include \\n\\n\")\n \n fileSETUP.write(\"namespace Idx { \\n\")\n fileSETUP.write(\" enum ETIndexes {\")\n \n Idx_string = \"\"\n for itr in varnames:\n Idx_string += itr+\", \"\n Idx_string += \"NumScalars\"\n \n fileSETUP.write(Idx_string)\n fileSETUP.write(\"}; \\n};\\n\\n\")\n \n fileSETUP.write(\"namespace Diag { \\n\")\n fileSETUP.write(\" enum DiagnosticIndexes {\")\n \n Idx_string = \"\"\n for itr in diagnames:\n Idx_string += itr+\", \"\n Idx_string += \"NumScalars\"\n \n fileSETUP.write(Idx_string)\n fileSETUP.write(\"}; \\n};\\n\\n\")\n \n fileSETUP.write(\"#define NUM_GHOST_CELLS \"+str(nghostcells)+\"\\n\\n\")\n fileSETUP.write(\"#endif\")\n\n fileSETUP.close()\n\ndef createVARIABLES(name,varnames):\n fileVARS = open(name,\"w+\")\n fileVARS.write(\"names = {\")\n for itr in range(len(varnames)-1):\n fileVARS.write(\"\\\"\"+varnames[itr]+\"\\\", \")\n fileVARS.write(\"\\\"\"+varnames[len(varnames)-1]+\"\\\"\")\n fileVARS.write(\"};\")\n fileVARS.close()\n \n\ndef createRHS(name):\n fileRHS = open(name, \"w+\")\n fileRHS.write(\"#ifndef ET_INTEGRATION_RHS_K_H \\n\")\n fileRHS.write(\"#define ET_INTEGRATION_RHS_K_H \\n\\n\")\n\n fileRHS.write(\"#include \\n\")\n fileRHS.write(\"#include \\n\")\n fileRHS.write(\"#include \\n\\n\")\n\n fileRHS.write(\"AMREX_GPU_DEVICE \\ninline \\nvoid \\n\")\n fileRHS.write(\"state_rhs(int i, int j, int k, \\n\")\n fileRHS.write(\" amrex::Array4 const& rhs_fab, \\n\")\n fileRHS.write(\" amrex::Array4 const& state_fab, \\n\")\n fileRHS.write(\" amrex::GpuArray const& dx) noexcept \\n{\\n\")\n fileRHS.close()\n\ndef addRHS(name,RHS):\n fileRHS = open(name,\"a+\")\n fileRHS.write(\" \"+RHS+\"\\n\\n\")\n fileRHS.close()\n \ndef finishRHS(name):\n fileRHS = open(name, \"a+\")\n fileRHS.write(\"}\\n\")\n fileRHS.write(\"#endif\")\n fileRHS.close()\n \n \ndef createINIT(name):\n fileINIT = open(name, \"w+\")\n fileINIT.write(\"#ifndef ET_INTEGRATION_INIT_K_H \\n\")\n fileINIT.write(\"#define ET_INTEGRATION_INIT_K_H \\n\\n\")\n\n fileINIT.write(\"#include \\n\")\n fileINIT.write(\"#include \\n\")\n fileINIT.write(\"#include \\n\\n\")\n\n fileINIT.write(\"AMREX_GPU_DEVICE \\ninline \\nvoid \\n\")\n fileINIT.write(\"state_init(int i, int j, int k, \\n\")\n fileINIT.write(\" amrex::Array4 const& state_fab, \\n\")\n fileINIT.write(\" amrex::Real time, const amrex::GeometryData& geom) noexcept \\n{\\n\")\n fileINIT.write(\" const auto domain_xlo = geom.ProbLo(); \\n\\n\")\n fileINIT.write(\" amrex::Real x = (i + 0.5)*geom.CellSize(0) + domain_xlo[0];\\n\")\n fileINIT.write(\" amrex::Real y = (j + 0.5)*geom.CellSize(1) + domain_xlo[1];\\n\")\n fileINIT.write(\" amrex::Real z = (k + 0.5)*geom.CellSize(2) + domain_xlo[2];\\n\\n\")\n fileINIT.close()\n\ndef addINIT(name,INIT):\n fileINIT = open(name,\"a+\")\n fileINIT.write(\" \"+INIT+\"\\n\\n\")\n fileINIT.close()\n \ndef finishINIT(name):\n fileINIT = open(name, \"a+\")\n fileINIT.write(\"}\\n\")\n fileINIT.write(\"#endif\")\n fileINIT.close()\n \n \n \n \n \n","sub_path":"AMReXCodeGen/FunctionSet.py","file_name":"FunctionSet.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"614116846","text":"import pypco\nimport os\nfrom plugins.pco import msg_attachment\nimport datetime\n\n\n\n# You need to put your Personal access token Application key and secret in your environment variables.\n# Get a Personal Access Key: https://api.planningcenteronline.com/oauth/applications\n# Application ID: WILL_PCO_APPLICATION_KEY environment variable\n# Secret: WILL_PCO_API_SECRET environment variable\n\npco = pypco.PCO(os.environ[\"WILL_PCO_APPLICATION_KEY\"], os.environ[\"WILL_PCO_API_SECRET\"])\n\n\ndef get_plan_item(service_type, plan, item):\n time_to_end = 0\n length = 0\n title = \"\"\n for p in pco.services.service_types.get(item_id=service_type).rel.plans.list():\n if p.id == plan:\n for items in p.rel.items.list():\n if items.id == item:\n length = datetime.timedelta(seconds=items.length)\n title = items.title\n time_to_end += items.length\n elif int(items.id) > int(item):\n time_to_end += items.length\n text = f\"Live Service Update:\\n\\nStarted: {title} - {length}\\n\" \\\n f\"Planned Time Left - {datetime.timedelta(seconds=time_to_end)}\"\n attachment = msg_attachment.\\\n SlackAttachment(fallback=text,\n pco='services',\n text=text,\n button_text=\"Open Live\",\n button_url=f\"https://services.planningcenteronline.com/live/{plan}\")\n return attachment\n return \"No Item Found\"\n\n\ndef parse_live_hook(data):\n # data = {'type': 'ItemTime', 'id': '109014325', 'attributes': {'exclude': False, 'length_offset': 0, 'live_end_at': None, 'live_start_at': '2019-04-30T17:31:07Z'}, 'relationships': {'item': {'data': {'type': 'Item', 'id': '568592902'}}, 'plan_time': {'data': {'type': 'PlanTime', 'id': '101382539'}}, 'plan': {'data': {'type': 'Plan', 'id': '41984194'}}}, 'links': {'self': 'https://api.planningcenteronline.com/services/v2/service_types/793678/plans/41984194/live/current_item_time'}}\n meta_data = {}\n meta_data['plan_id'] = data['relationships']['plan']['data']['id']\n meta_data['service_type'] = data['links']['self'].split('/')[6]\n meta_data['item_id'] = data['relationships']['item']['data']['id']\n return meta_data\n\n\nif __name__ == '__main__':\n # attachment = get_plan_item(service_type=793678, plan='41984194', item='568592902')\n # print(attachment.slack())\n data = {'type': 'ItemTime', 'id': '109014325', 'attributes': {'exclude': False, 'length_offset': 0, 'live_end_at': None, 'live_start_at': '2019-04-30T17:31:07Z'}, 'relationships': {'item': {'data': {'type': 'Item', 'id': '568592902'}}, 'plan_time': {'data': {'type': 'PlanTime', 'id': '101382539'}}, 'plan': {'data': {'type': 'Plan', 'id': '41984194'}}}, 'links': {'self': 'https://api.planningcenteronline.com/services/v2/service_types/793678/plans/41984194/live/current_item_time'}}\n meta_data = parse_live_hook(data)\n attachment = get_plan_item(meta_data['service_type'], meta_data['plan_id'], meta_data['item_id'])\n print(attachment.slack())\n","sub_path":"plugins/pco/live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257132582","text":"import tensorflow as tf\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.framework import ops\nfrom tensorflow.contrib.layers.python.layers import utils as tf_utils\n\ndef spectral_norm(w, is_training, iteration=1):\n # https://github.com/ANIME305/Anime-GAN-tensorflow/blob/master/layers.py\n if iteration != 1:\n raise NotImplementedError\n \n w_shape = tf.shape(w)\n w = tf.reshape(w, [-1, w_shape[-1]]) # [N, output_filters] \n # N = kernel_size*kernel_size*input_filters\n\n u = tf.get_variable(\"u\", [1, w_shape[-1]], \n initializer=tf.truncated_normal_initializer(),\n trainable=False) # [1, output_filters]\n\n u_norm = u\n v_norm = None\n \n v_ = tf.matmul(u_norm, w, \n transpose_b=True) # [1, N]\n v_norm = l2_norm(v_)\n\n u_ = tf.matmul(v_norm, w) # [1, output_filters]\n u_norm = l2_norm(u_)\n\n # Au=λ1u u⊤Au=λ1u⊤u=λ1\n sigma = tf.matmul(tf.matmul(v_norm, w), u_norm, \n transpose_b=True) # [1,1]\n w_norm = w / sigma\n\n # Update estimated 1st singular vector while training\n with tf.control_dependencies([tf.cond(is_training,\n true_fn=lambda: u.assign(u_norm), \n false_fn=lambda: u.assign(u))]):\n w_norm = tf.reshape(w_norm, w_shape)\n\n return w_norm\n\n\ndef spectral_conv2d(x, filters, kernel_size, stride, is_training, padding='SAME', scope='conv2d'):\n with tf.variable_scope(scope):\n shape = tf.shape(x)\n w = tf.get_variable(\"w\",\n shape=[kernel_size, kernel_size, shape[-1], filters],\n initializer=weight_init,\n regularizer=weight_regularizer)\n x = tf.nn.conv2d(input=x,\n filter=spectral_norm(w, is_training),\n strides=[1, stride, stride, 1],\n padding=padding)\n \n bias = tf.get_variable(\"b\", [filters], initializer=tf.constant_initializer(0.0))\n x = tf.nn.bias_add(x, bias)\n return x\n","sub_path":"spectal_conv/spectral_conv.py","file_name":"spectral_conv.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200609544","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.callbacks import History\nhistory = History()\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom statistics import mean\nfrom regression import run_regression\nfrom add_noise import addnoise\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n\nclass AutoEncoder:\n def __init__(self, layer_size, df):\n self.df = df\n self.create_model(layer_size) # specify hidden layer size\n\n def import_data(self):\n path = os.getcwd()+'/noisy_data.csv'\n df = pd.read_csv(path, index_col=0)\n return df\n\n def create_model(self, layer_size):\n train, test = train_test_split(self.df, test_size=0.2)\n input_size = self.df.shape[1]\n\n inputs = Input(shape=(input_size,))\n encoded = Dense(layer_size)(inputs)\n self.encoder = Model(inputs, encoded) # map input to small dimension\n\n encoded_input = Input(shape=(layer_size,))\n decoded = Dense(input_size)(encoded_input)\n self.decoder = Model(encoded_input, decoded)\n\n self.autoencoder = Model(inputs, self.decoder(self.encoder(inputs)))\n self.autoencoder.compile(optimizer='adam', loss='mse')\n\n # train\n history = self.autoencoder.fit(train, train, epochs=100, batch_size=32, shuffle=True, validation_data=(test, test), verbose=0)\n self.loss_values = history.history\n\n encoded_imgs = self.encoder.predict(train)\n decoded_imgs = self.decoder.predict(encoded_imgs)\n \n # min_max_scaler = preprocessing.MinMaxScaler()\n # x_scaled = min_max_scaler.fit_transform(x)\n \n #export as csv\n self.export_df = pd.DataFrame(decoded_imgs)\n self.export_df.to_csv(r'reconstructed.csv')\n\n # enc = pd.DataFrame(encoded_imgs)\n # enc.to_csv(r'x_encoded.csv')\n\n def get_loss(self):\n test_loss = list(self.loss_values['val_loss'])\n train_loss = list(self.loss_values['loss'])\n test_mean = mean(test_loss[-10:])\n train_mean = mean(train_loss[-10:])\n return test_mean, train_mean\n\n\nif __name__ == \"__main__\":\n # test = []\n # train = []\n # subtest = []\n # subtrain = []\n # bottleneck = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15, 17, 20, 30, 40, 50]\n # mse = []\n # mse_temp = []\n\n # for i in bottleneck:\n # print('Iteration:', i)\n # for x in range(5):\n # print('Sub-Iteration:', x)\n # ae = AutoEncoder(i)\n # mse_temp.append(run_regression())\n # # x, y = ae.get_loss()\n # # subtest.append(x)\n # # subtrain.append(y)\n # # # bottleneck.append(i)\n # mse.append(mean(mse_temp))\n # mse_temp.clear()\n # # train.append(mean(subtrain))\n # # subtest.clear()\n # # subtrain.clear()\n \n #noisy pipeline\n noisy_mse = []\n norm_mse = []\n \n # x = [0.1, 0.2, 0.3, 0.4, 0.5]\n\n for _ in range(2):\n # path = os.getcwd()+'/noisy_data.csv'\n # df_noisy = pd.read_csv(path, index_col=0)\n # ae = AutoEncoder(10, df_noisy)\n # noisy_mse.append(run_regression(ae.export_df))\n \n path = os.getcwd()+'/x_modified.csv'\n df_norm = pd.read_csv(path, index_col=0)\n\n noisy_mse.append(run_regression(df_norm))\n\n\n path = os.getcwd()+'/reconstructed.csv'\n df2 = pd.read_csv(path, index_col=0)\n \n ae2 = AutoEncoder(10, df2)\n norm_mse.append(run_regression(ae2.export_df))\n\n # run_regression(df_noisy)\n # run_regression(df_norm)\n \n x = [mean(noisy_mse), mean(norm_mse)]\n \n plt.bar(['original', 'decoded'], x)\n plt.show()\n","sub_path":"Archive/AE_2.py","file_name":"AE_2.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344367842","text":"with open('IntegerArray.txt', 'r') as f:\n\tcontent = [int(line.rstrip('\\r\\n')) for line in f]\n\tprint(\"num of elements in document: \" + str(len(content)))\n\ninversion = 0\n\ndef merge_sort(unsorted_list):\n\tsize = len(unsorted_list)\n\tif size <= 1:\n\t\treturn unsorted_list #consider it sorted\n\n\tmid = int(size / 2)\n\n\tleft = unsorted_list[:mid]\n\tright = unsorted_list[mid:]\n\n\tleft = merge_sort(left)\n\tright = merge_sort(right)\n\n\treturn merge(left,right)\n\ndef merge (left,right):\n\tresult = []\n\twhile len(left) > 0 or len(right) > 0: #mientras alguno de los dos tiene elementos\n\t\tif len(left) > 0 and len(right) > 0:\n\t\t\tif left[0] <= right[0]:\n\t\t\t\tresult.append(left[0])\n\t\t\t\tleft.remove(left[0])\n\t\t\telse:\n\t\t\t\tglobal inversion\n\t\t\t\tinversion += len(left) #key instruction\n\t\t\t\tresult.append(right[0])\n\t\t\t\tright.remove(right[0])\n\t\telif len(left) > 0:\n\t\t\tresult += left\n\t\t\tleft = []\n\t\telif len(right) > 0:\n\t\t\tresult += right\n\t\t\tright = []\n\n\treturn result\n\n\n\ndata = merge_sort(content)\nprint(inversion)","sub_path":"findinversions.py","file_name":"findinversions.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"240656894","text":"from django.test import SimpleTestCase\nfrom eulxml.xpath import parse as parse_xpath\nfrom testil import eq\n\nfrom corehq.apps.case_search.xpath_functions.ancestor_functions import is_ancestor_comparison, \\\n _is_ancestor_path_expression\nfrom corehq.util.test_utils import generate_cases\n\n\nclass TestIsAncestorPath(SimpleTestCase):\n @generate_cases([\n (\"parent/name\", False),\n (\"parent/host/name\", False),\n (\"parent/host/@case_id\", False),\n (\"parent\", False),\n (\"parent = 'bob'\", False),\n (\"parent/name = 'bob'\", True),\n (\"parent/host/name = 'bob'\", True),\n ])\n def test_is_ancestor_query(self, expression, expected):\n node = parse_xpath(expression)\n eq(is_ancestor_comparison(node), expected)\n\n @generate_cases([\n (\"parent/name\", True),\n (\"parent/host/name\", True),\n (\"parent/host/parent/@case_id\", True),\n (\"parent\", False),\n (\"parent = 'bob'\", False),\n (\"parent/name = 'bob'\", False),\n (\"parent/host/name = 'bob'\", False),\n ])\n def test_is_ancestor_path_expression(self, expression, expected):\n node = parse_xpath(expression)\n eq(_is_ancestor_path_expression(node), expected)\n","sub_path":"corehq/apps/case_search/tests/test_ancestor_functions.py","file_name":"test_ancestor_functions.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"434215774","text":"import nltk\nfrom pprint import pprint\n\n# Download required nltk data\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('wordnet')\n\n# Tokenizing module\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n# Stopwords module\nfrom nltk.corpus import stopwords\nfrom string import punctuation\n# Bigrams module\nfrom nltk.collocations import *\n# Stemming module\nfrom nltk.stem.lancaster import LancasterStemmer\n# Disambiguation module\nfrom nltk.corpus import wordnet as wn\nfrom nltk.wsd import lesk\n\nclass nlbasics():\n\n # Tokenizing text\n def do_tokenize(self, sample):\n # text = input (\"Enter your text to tokenize here: \")\n\n sents = sent_tokenize(sample)\n # print(sents)\n\n words = [word_tokenize(sent) for sent in sents]\n return words\n\n # Removing stopwords\n def do_removestopwords(self):\n text = input (\"Enter your text here: \")\n\n customStopWords = set(stopwords.words('english')+list(punctuation))\n wordsWOStopwords = [word for word in word_tokenize(text) if word not in customStopWords]\n return wordsWOStopwords\n\n #Identifying bigrams\n def do_identifybigrams(self):\n text = input (\"Enter your text here: \")\n\n customStopWords = set(stopwords.words('english')+list(punctuation))\n wordsWOStopwords = [word for word in word_tokenize(text) if word not in customStopWords]\n\n bigram_measures = nltk.collocations.BigramAssocMeasures()\n finder = BigramCollocationFinder.from_words(wordsWOStopwords)\n return finder.ngram_fd.items()\n\n # Stemming\n def do_stemming(self):\n text = input (\"Enter your text here: \")\n\n st = LancasterStemmer()\n stemmedWords = [st.stem(word) for word in word_tokenize(text)]\n return stemmedWords\n\n # POS Tagging\n def do_POStagging(self):\n text = input (\"Enter your text here: \")\n\n tagList = nltk.pos_tag(word_tokenize(text))\n return tagList\n\n # Disambiguating word meanings\n def do_worddisambiguation(self):\n word = input (\"Enter your word here: \")\n for ss in wn.synsets(word):\n print(ss, ss.definition())\n\n text2 = input (\"Enter a sentence with that previous word: \")\n contextualsentence = lesk(word_tokenize(text2), word)\n return [contextualsentence, contextualsentence.definition()]\n\nif __name__ == '__main__':\n menu = \"\"\"\n 1: Tokenize text\n 2: Remove stopwords\n 3: Find bigrams\n 4: Word stemmer\n 5: Part of speech tagging\n 6: Word disambiguation (must enter single word)\n \"\"\"\n\n print(menu)\n\n mymenu = nlbasics()\n\n choice = input (\"Input your choice [1]: \" )\n\n if choice == \"2\":\n print(mymenu.do_removestopwords())\n elif choice == \"3\":\n print(mymenu.do_identifybigrams())\n elif choice == \"4\":\n print(mymenu.do_stemming())\n elif choice == \"5\":\n print(mymenu.do_POStagging())\n elif choice == \"6\":\n print(mymenu.do_worddisambiguation())\n else:\n print(mymenu.do_tokenize())\n","sub_path":"textsummarizer/nlbasics.py","file_name":"nlbasics.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"137684148","text":"# Convert the png annotations to json file.\n# Refer: https://github.com/CSAILVision/placeschallenge\n# Created by Xu Ma.\n# Date: July 04 2020\n\nimport os\nimport glob\nimport argparse\nimport json\nimport numpy as np\nfrom scipy.misc import imread\nfrom pycocotools import mask as COCOmask\n\n# strict mapping class\nsplit_coco_id_24classes = [60, 1, 61, 57, 3, 72, 73, 62, 74, 14, 64, 9, 6, 8, 5,\n 40, 70, 33, 69, 2, 63, 76, 10, 75 ]\nsplit_coco_id_24classes = [60, 1, 61, 57, 3, 72, 73, 62, 74, 14, 64, 9, 6, 8, 5,\n 40, 70, 33, 69, 2, 63, 76, 10, 75,58, 12, 25, 47, 48,\n 49, 50, 51, 52, 53, 54, 55, 56, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Evaluation demo')\n parser.add_argument('--ann_file', default='/Users/melody/Downloads/instances_val2017.json') # CHANGE ACCORDINGLY\n parser.add_argument('--output_overlap_json', default='/Users/melody/Downloads/instances_val2017_24classes.json')\n parser.add_argument('--output_rest__json', default='/Users/melody/Downloads/instances_val2017_76classes.json')\n # parser.add_argument('--parsing_2coco', action='store_true', help='Parsing ADE20K cat_id to COCO id.')\n args = parser.parse_args()\n return args\n\n\ndef convert(args):\n data_dict = json.load(open(args.ann_file, 'r'))\n images = data_dict['images']\n licenses = data_dict['licenses']\n info = data_dict['info']\n categories = data_dict['categories']\n annotations = data_dict['annotations']\n print('#Images: {}, # totally instances: {}'.format(len(images), len(annotations)))\n\n overlap_ann = []\n rest_ann = []\n for i in range(0,len(annotations)):\n if i % 100 == 0:\n print('#files processed: {}'.format(i))\n if annotations[i]['category_id']in split_coco_id_24classes:\n overlap_ann.append(annotations[i])\n else:\n rest_ann.append(annotations[i])\n\n overlap_out = {'licenses': licenses,\n 'categories': categories,\n 'images': images,\n 'annotations': overlap_ann,\n 'info': info\n }\n rest_out = {'licenses': licenses,\n 'categories': categories,\n 'images': images,\n 'annotations': rest_ann,\n 'info': info\n }\n print(\"{}: instance: {}\".format(args.output_overlap_json, len(overlap_ann)))\n with open(args.output_overlap_json, 'w') as f:\n json.dump(overlap_out, f)\n print(\"{}: instance: {}\".format(args.output_rest__json, len(rest_ann)))\n with open(args.output_rest__json, 'w') as f:\n json.dump(rest_out, f)\n\n\nif __name__ == '__main__':\n args = parse_args()\n convert(args)\n","sub_path":"mmdet/datasets/ADE20k_process/coco_split.py","file_name":"coco_split.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349819840","text":"from .__version__ import __version__\n\nimport pytest\n\n\ndef pytest_configure(config):\n \"\"\"Register the \"run\" marker.\"\"\"\n\n config_line = (\n 'blocker: specify a blocker test. '\n 'See also: http://pytest-blocker.readthedocs.org/'\n )\n config.addinivalue_line('markers', config_line)\n\n\ndef pytest_runtest_makereport(item, call, __multicall__):\n # get current report status from _pytest.runner.pytest_runtest_makereport\n report = __multicall__.execute()\n if report.failed and item.get_marker('blocker'):\n skip_reason = \"Blocker test {0} failed, skipping remaining tests.\".format(item.name)\n for test in item.session.items:\n if test.location[0] == item.location[0]:\n test.add_marker(pytest.mark.skipif(True, reason=skip_reason))\n return report\n","sub_path":"pytest_blocker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570683992","text":"#%% - такая конструкция нужна, чтобы запускать ячейки в VSCode\nimport datetime\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import GridSearchCV, KFold, cross_val_predict\nfrom sklearn.preprocessing import StandardScaler\n\npath = 'week7/' # папка, в которой лежит features.csv и features_test.csv\n\n### ПОДХОД №1 ###\n\n#%% Ссчитаем таблицу с признаками\nfeatures = pd.read_csv(path + 'features.csv', index_col='match_id')\n\n#%% Удалим стобцы с итогами матча, кроме целевой переменной\ncol_end_drop = ['duration', \n 'tower_status_radiant', \n 'tower_status_dire', \n 'barracks_status_radiant', \n 'barracks_status_dire']\nfeatures.drop(col_end_drop, axis = 1, inplace = True)\n\n#%% Посчитаем пропуски в столбцах\nempty_val_col = dict()\ncnt = features.count()\nfor i in range(features.shape[1]):\n if cnt[i] < features.shape[0]:\n empty_val_col[features.columns[i]] = features.shape[0] - cnt[i]\nprint(*empty_val_col)\n\n#%% Заполним пропуски нулями\nfeatures_fill = features.fillna(0)\n\n#%% Разделим признаки и целевую переменную\ny = features_fill['radiant_win']\nX = features_fill.iloc[:, :-1]\n\n#%% Оценим время и качество классификации GB по метрике roc_auc_score\nresults = pd.DataFrame(columns=['N_estimators', 'mean_quality', 'time'])\nnum_estiramors = [100]\nquality = []\ntimes = []\nfor num in num_estiramors:\n clf = GradientBoostingClassifier(n_estimators=num, random_state=1)\n kf = KFold(n_splits=5, shuffle=True, random_state=1)\n \n start_time = datetime.datetime.now()\n res = cross_val_predict(clf, X, y, cv=kf, method='predict_proba')\n elapsed_time = datetime.datetime.now() - start_time\n\n roc_sc = roc_auc_score(y, res[:, 1])\n \n quality.append(roc_sc)\n times.append(elapsed_time)\n\nfor i in range(len(num_estiramors)):\n results.loc[i] = [num_estiramors[i], quality[i], times[i]]\nprint(results)\n\n### ПОДХОД №2 ###\n\n#%% 1. Логистическая регрессия на отмасштабированных данных\nX = features_fill.iloc[:, :-1]\ny = features_fill.iloc[:, -1]\nsc = StandardScaler()\nX_scaled = sc.fit_transform(X)\n\nclf = LogisticRegression(penalty='l2')\nC = {'C': np.power(10., np.arange(-3, 1))}\nkf = KFold(n_splits=5, shuffle=True, random_state=1)\ngs = GridSearchCV(clf, C, scoring='roc_auc', cv=kf)\nstart_time = datetime.datetime.now()\ngs.fit(X_scaled, y)\n\nelapsed_time = datetime.datetime.now() - start_time\nbest_c = gs.best_params_['C']\nbest_score = gs.best_score_\n\nprint(best_c, elapsed_time, best_score, sep=' | ')\n#%% 2. ЛР на отмасштабированных данных, без категориальных признаков\nX = features_fill.iloc[:, :-1]\ny = features_fill.iloc[:, -1]\ndrop_cat_col = ['lobby_type',\n 'r1_hero', \n 'r2_hero', \n 'r3_hero', \n 'r4_hero', \n 'r5_hero', \n 'd1_hero', \n 'd2_hero', \n 'd3_hero', \n 'd4_hero', \n 'd5_hero']\nX.drop(columns=drop_cat_col, axis=1, inplace=True)\nsc = StandardScaler()\nX_scaled = sc.fit_transform(X)\n\nclf = LogisticRegression(penalty='l2')\nC = {'C': np.power(10., np.arange(-3, 1))}\nkf = KFold(n_splits=5, shuffle=True, random_state=1)\ngs = GridSearchCV(clf, C, scoring='roc_auc', cv=kf)\nstart_time = datetime.datetime.now()\ngs.fit(X_scaled, y)\n\nelapsed_time = datetime.datetime.now() - start_time\nbest_c = gs.best_params_['C']\nbest_score = gs.best_score_\n\nprint(best_c, elapsed_time, best_score, sep=' | ')\n\n#%% 3. Выясняем кол-во уникальных героев в игре\nX = features_fill.iloc[:, :-1]\ny = features_fill.iloc[:, -1]\n\nunique = set()\nfor i in drop_cat_col[1:]:\n unique.update(set(X[i]))\nprint('unique heroes in dataset:', len(unique), \"| unique heroes in game:\", max(unique))\n\n#%% 4. Мешок слов для кодирования информации о героях, матрица X_pick\nX_pick = np.zeros((X.shape[0], max(unique)))\n\nfor i, match_id in enumerate(X.index):\n for p in range(5):\n X_pick[i, X.loc[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, X.loc[match_id, 'd%d_hero' % (p+1)]-1] = -1\n\n\nX.drop(columns=drop_cat_col, axis=1, inplace=True)\n\nX_with_hero = np.concatenate([X, X_pick], axis=1)\n\n#%% 5. Оценка алгоритма на преобразованных данных (матрица признаков + \"мешок слов\" по героям)\nsc = StandardScaler()\nX_scaled = sc.fit_transform(X_with_hero)\n\nclf = LogisticRegression(penalty='l2')\nC = {'C': np.power(10., np.arange(-3, 1))}\nkf = KFold(n_splits=5, shuffle=True, random_state=1)\ngs = GridSearchCV(clf, C, scoring='roc_auc', cv=kf)\nstart_time = datetime.datetime.now()\ngs.fit(X_scaled, y) \n\nelapsed_time = datetime.datetime.now() - start_time\nbest_c = gs.best_params_['C']\nbest_score = gs.best_score_\n\nprint(best_c, elapsed_time, best_score, sep=' | ')\n\n#%% 6. Построим предсказания для тестовой выборки\n# импортируем тестовую выборку\nX_test = pd.read_csv(path + 'features_test.csv', index_col='match_id')\n\nX_train = features_fill.iloc[:, :-1]\ny_train = features_fill.iloc[:, -1]\n# Заполним пропуски нулями в тестовой выборке\nX_test = X_test.fillna(0)\n\n# Для каждой выборки: из категориальных признаков героев \n# сделаем \"мешок слов\" и соединим с выборкой\n\nunique = set()\nfor i in drop_cat_col[1:]:\n unique.update(set(X_train[i]))\n unique.update(set(X_test[i]))\n\nX_pick = np.zeros((X_train.shape[0], max(unique)))\nfor i, match_id in enumerate(X_train.index):\n for p in range(5):\n X_pick[i, X_train.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, X_train.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1\nX_train.drop(columns=drop_cat_col, axis=1, inplace=True)\nX_train_with_hero = np.concatenate([X_train, X_pick], axis=1)\n\nX_pick = np.zeros((X_test.shape[0], max(unique)))\nfor i, match_id in enumerate(X_test.index):\n for p in range(5):\n X_pick[i, X_test.ix[match_id, 'r%d_hero' % (p+1)]-1] = 1\n X_pick[i, X_test.ix[match_id, 'd%d_hero' % (p+1)]-1] = -1\nX_test.drop(columns=drop_cat_col, axis=1, inplace=True)\nX_test_with_hero = np.concatenate([X_test, X_pick], axis=1)\n\n# Отмасштабируем выборки\nsc = StandardScaler()\nX_train_scaled = sc.fit_transform(X_train_with_hero)\nX_test_scaled = sc.transform(X_test_with_hero)\n\n# Обучим классификатор\nclf = LogisticRegression(penalty='l2', C=best_c)\nclf.fit(X_train_scaled, y_train)\n# Получим прогноз для тестовой выборки\npredict = clf.predict_proba(X_test_scaled)[:, 1]\n# Выведем минимальное и максимальное значение прогноза для класса 1\nprint(min(predict), max(predict))\n","sub_path":"coursera-vvedenie-v-mashinnoe-obuchenie/week7/predict-the-winner.py","file_name":"predict-the-winner.py","file_ext":"py","file_size_in_byte":7383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223122568","text":"\n\ndef findDiv(div):\n for i in divs:\n if str(i) in div:\n return i\n return -1\n\nif __name__ == '__main__':\n n = input()\n n = list(n)\n divs = [(8*(i)) for i in range(26)]\n can = -1\n\n #\n while (int(n[len(n)-1])%2 != 0 and len(n) > 2): n.pop()\n can = findDiv(\"\".join(n))\n if (int(\"\".join(n)) % 8 == 0):\n print(\"YES\\n{}\".format(\"\".join(n)))\n elif (can != -1):\n print(\"YES\\n{}\".format(can))\n else:\n aux = []\n num = len(n)-1\n while (num > 0):\n aux = n[:]\n aux.pop(num)\n if (int(\"\".join(aux)) % 8 == 0):\n can = \"\".join(aux)\n break\n else:\n can = findDiv(\"\".join(aux))\n if (can != -1): break\n num-=1\n if (can != -1):\n print(\"YES \\n{}\".format(can))\n else:\n print(\"NO\")\n","sub_path":"semanai/2/divisibility.py","file_name":"divisibility.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"578577551","text":" \nimport unittest\ndef shaiba(x, y, dx, dy):\n if x+dx < 100 and y+dy < 100:\n x=x+dx\n y=y+dy\n if x+dx > 100 and y+dy < 100:\n x = x + (-dx)\n y = y + dy \n if x+dx < 100 and y+dy > 100:\n x = x+dx\n y = y+(-dy)\n if x+dx > 100 and y+dy > 100: \n x = x+(-dx)\n y = y+(-dy)\n return x, y, dx, dy\n\nclass TestShaiba(unittest.TestCase):\n def test_next_shaiba_calculated_plus_speed(self): \n self.assertEqual(list(shaiba(81, 20, 5, 3)), [86, 23, 5, 3])\n self.assertEqual(list(shaiba(73, 94, 5, 10)), [78, 84, 5, 10])\n self.assertEqual(list(shaiba(91, 26, 15, 32)), [76, 58, 15, 32])\n self.assertEqual(list(shaiba(96, 87, 13, 25)), [83, 62, 13, 25])\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"onpython/sol_kat/hakaton.py","file_name":"hakaton.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583800048","text":"\"\"\"\nThis code was used to optimize the partitioning and hierarchical clustering \nalgorithms and parameters for HW2. See code comments for details.\n\"\"\"\n\nimport numpy as np\nimport scipy.cluster.hierarchy as hac\nfrom hw2skeleton import *\nimport scipy.spatial.distance as ssd\nimport matplotlib.pyplot as plt\n\n# For silhouette\nfrom sklearn import metrics\n\nnp.set_printoptions(formatter={'float_kind':'{:f}'.format})\n\n# Read in active sites \nactive_sites = io.read_active_sites('data')\n\n# Compute similarity metric and build pairwise distance matrix\nactive_site_score_dict = {}\nactive_site_score_list = []\nactive_site_list = []\nmatrix = np.zeros([len(active_sites), len(active_sites)])\nfor i in range(len(active_sites)):\n for j in range(len(active_sites)):\n active_site_scores, matrix[i,j] = cluster.compute_similarity(active_sites[i],active_sites[j])\n active_site_score_dict.update(active_site_scores)\n \nfor key, value in active_site_score_dict.items():\n active_site_list.append(key)\n active_site_score_list.append(value)\n\n# From http://stackoverflow.com/questions/18952587/use-distance-matrix-in-scipy-cluster-hierarchy-linkage\n# This converts the n*n matrix to condense nC2 array for scipy\ndistArray = ssd.squareform(matrix)\n\n# Hierarchical clustering: try lots of different types of agglomerative clustering\nz_single = hac.linkage(distArray, method=\"single\")\nplt.figure(1)\nd_single = hac.dendrogram(z_single)\nplt.title(\"single linkage\")\nplt.savefig('single linkage')\n\nplt.figure(2)\nz_complete = hac.complete(distArray)\nd_complete = hac.dendrogram(z_complete)\nplt.title(\"complete linkage\")\nplt.savefig('complete linkage')\n\nplt.figure(3)\nz_centroid = hac.centroid(distArray)\nd_centroid = hac.dendrogram(z_centroid)\nplt.title(\"centroid linkage\")\nplt.savefig('centroid linkage')\n\nplt.figure(4)\nz_weighted = hac.weighted(distArray)\nd_weighted = hac.dendrogram(z_weighted)\nplt.title(\"weighted linkage\")\nplt.savefig('weighted linkage')\n\nplt.figure(5)\nz_ward = hac.ward(distArray)\nd_ward = hac.dendrogram(z_ward)\nplt.title(\"ward linkage\")\nplt.savefig('ward linkage')\n\nnames = [z_single,z_complete,z_centroid,z_weighted,z_ward]\nlabel_names = ['single linkage','complete linkage','centroid linkage','weighted linkage','ward linkage']\n# Go through each kind of agglomerative clustering, and calculate silhouette score at\n# various cutpoints in the dendrogram. The cutpoints define the number of clusters.\n# Plot each clustering method with its silhouette score over range of cutpoints to\n# determine the best performing one\nfor index,name in enumerate(names):\n sil_cuttree_list = []\n for num in range(2,20):\n sil_cuttree = 0\n cuttree = hac.cut_tree(name, n_clusters = num)\n labels_cuttree = []\n for value in cuttree:\n labels_cuttree.append(int(value))\n sil_cuttree = metrics.silhouette_score(matrix, labels_cuttree, metric='precomputed')\n sil_cuttree_list.append(sil_cuttree)\n # print number of clusters and max silhouette score for each type of hac for writeup\n print((sil_cuttree_list.index(max(sil_cuttree_list))+2),max(sil_cuttree_list))\n\n plt.figure(6)\n plt.plot(range(2,20), sil_cuttree_list, label=label_names[index]) \n plt.legend(loc='lower right')\n plt.xlabel('Number of clusters by cutting dendrogram')\n plt.ylabel('Silhouette score')\nplt.show()\n \n\n# Partition clustering: Kmediods. Run 100 times over a range of k values and calculate \n# mean and stdev silhouette scores at each k value to determine optimum k for ultimate\n# implementation\nsil_list = []\nmean_sil_list = []\nstd_sil_list = []\nfor n in range(2, 20):\n mean_sil=float\n std_sil = float\n for x in range(50):\n sil = 0\n M, C = cluster.kMedoids(matrix,n)\n # Convert dictionary output from kMedoids to list of labels indexed by active site\n labels = np.zeros(len(active_sites))\n for c, value in C.items():\n labels[value] = c\n # Compute silhoutte score\n sil = metrics.silhouette_score(matrix, labels, metric='precomputed')\n sil_list.append(sil)\n mean_sil = np.mean(sil_list)\n std_sil = np.std(sil_list)\n mean_sil_list.append(mean_sil)\n std_sil_list.append(std_sil)\n print(mean_sil, n) # print mean silhouette values so can find max for writeup\nplt.figure(7)\nplt.errorbar(range(2,20),mean_sil_list,std_sil_list) \nplt.ylim([0,1])\nplt.title(\"K_medoid silhouette plot\")\nplt.xlabel(\"Number of clusters (k)\")\nplt.ylabel(\"Silhouette score\")\nplt.savefig('K_medoid silhouette plot')\nplt.show()","sub_path":"optimize_algorithm_parameters.py","file_name":"optimize_algorithm_parameters.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"68861151","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/21 14:35\n# @Author : zgh\n# @Email : 849080458@qq.com\n# @File : teacher_work.py\n# @Software: PyCharm\n\n#获取测试数据\n#地址:D:\\python5\\class_unittest_html0412\\teacher\\test.txt\nimport requests\nimport copy\nimport unittest\n\nfrom class_ddt_0421.teacher_study.class_unittest_yuanLi import testHttpRequset\n\nclass siMida:\n dict={}\n list=[]\n def __init__(self,address):\n self.address=address#初始化函数\n def luJin(self):\n with open(self.address,'r') as file:#打开路径地址\n for i in file.readlines():#依次读取每一行内容记为:i\n for ii in i.strip (\"\\n\").split (','):#每次读取的内容删除换行符,再根据“,”进行切割后记为:ii\n self.dict[ii.split(':',1)[0]] = ii.split(':',1)[1]\n self.list.append(copy.deepcopy(self.dict))#每次从dict里面拷贝的值,插入list\n return self.list#返回值\nt=siMida('test.txt')\nprint(t.luJin())\nlist=t.luJin()\n\nfor i in range(len(list)):\n url=list[i]['url']\n list[i].pop('url')\n\n method=list[i]['method']\n# if method=='get':\n# methodName='test_get'\n# if method=='post':\n# methodName='test_post'\n list[i].pop('method')\n\n suite=unittest.TestSuite()\n suite.addTest(testHttpRequset(url,list[i],method))\n\n runner=unittest.TextTestRunner()\n runner.run(suite)\n\n\n\n\n\n","sub_path":"class_ddt_0421/teacher_study/teacher_work.py","file_name":"teacher_work.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19940711","text":"from scipy import *\nfrom scipy.integrate import odeint\nfrom pylab import *\n \n\"\"\"\nA spring-mass system with damping\n\"\"\"\n \ndef damped_osc(u,t): #defines the system of diff. eq.\n x, v = u\n return(v,-k*(x-L)/m-b*v) #the vector (dx/dt, dv/dt)\n \nt = arange(0,20.1,0.1)\nu0 = array([1,0]) #initial values of x, v\n \n#assume certain values of parameters b, k, L, m; these would be given in the problem\nb=0.4\nk=8.0\nL=0.5\nm=1.0\n \nu=odeint(damped_osc,u0,t) #solve ODE\n#plot x, v, phase, using matplotlib\n \nfigure(1)\nplot(t,u[:,0],t,u[:,1]) #u[:,0] is x; u[:,1] is v\nxlabel('Time')\ntitle('Damped Oscillator')\n \nfigure(2)\nplot(u[:,0],u[:,1])\ntitle('Phase-space')\nxlabel('Position')\nylabel('Velocity')\nshow()","sub_path":"gravity.py","file_name":"gravity.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"338829363","text":"import time\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom airsim_dataset import AirsimIntVarDataset\nfrom fem.goodpoint import GoodPoint\nimport os\nfrom fem.nonmaximum import MagicNMS\nfrom fem.eval_fem_airsim import loop\nfrom fem.nonmaximum import PoolingNms, MagicNMS\n\n\nPATH_SAVE = \"/tmp/village_00_320x240_day_night_SP_fem_my\"\n\nvillage = dict(dir_day = '/mnt/fileserver/shared/datasets/AirSim/village_00_320x240/00_day_light',\n dir_night ='/mnt/fileserver/shared/datasets/AirSim/village_00_320x240/00_night_light',\n poses_file ='/mnt/fileserver/shared/datasets/AirSim/village_00_320x240/village_00.json')\n\nfantasy_village = dict(dir_day = '/mnt/fileserver/shared/datasets/AirSim/fantasy_village_362x362/00_day_light/',\n dir_night ='/mnt/fileserver/shared/datasets/AirSim/fantasy_village_362x362/00_day_light_fog/',\n poses_file ='/mnt/fileserver/shared/datasets/AirSim/fantasy_village_362x362/00_day_light_fog/fantasy_village_00.json')\n\n\nPATH_SAVE_PTS = PATH_SAVE + '/data/'\n\nmagicleap_file = \"superpoint_magicleap/superpoint_v1.pth\"\nmagicleap_file = None\n\nPATH_WEIGHTS = None\n\n\nPATH_WEIGHTS = \"snapshots/super16000.pt\"\nPATH_WEIGHTS = \"./snapshots/super12300.pt\"\nPATH_WEIGHTS = \"./super6900.pt\"\n\n\n#PATH_WEIGHTS = \"./snapshots/TWD/tr_0.5_dr_0.4/from_scratch_super17.pt\"\n#PATH_WEIGHTS = \"./snapshots/TWD/tr_0.5_dr_0.4/super8.pt\"\n\n\nbatchnorm=True\nIMG_SIZE = [240, 320]\n\nframe_offset = 5\n\n\n\nbatch_size = 1\n\n\nconf_thresh= 0.020885\nconf_thresh= 0.0455591090510123100629\n\n\ndef run_all_snapshots():\n sp = GoodPoint(dustbin=0,\n activation=torch.nn.ReLU(),\n batchnorm=batchnorm,\n grid_size=8,\n nms=nms).eval()\n best_f1 = 0.0\n best_path = None\n for f in os.listdir('.'):\n if f.endswith('.pt'):\n current = loop(sp, weights=f)\n if best_f1 < current:\n print('new best: {0}, f1: {1} '.format(f, current))\n best_f1 = current\n best_path = f\n print(best_path)\n\n\ndef test_magicleap(loader, angle=0.0):\n\n from fem.wrapper import SuperPoint\n sp_path = '/home/noskill/projects/neuro-fem/fem/superpoint_magicleap/superpoint_v1.pth'\n\n sp = SuperPoint(nms).to(device)\n sp.load_state_dict(torch.load(sp_path))\n loop(sp, loader, thresh=0.015, print_res=False, draw=True, rotation_angle=angle)\n print('test superpoint completed')\n\ndef test_magicleap1(loader, angle=0.0):\n sp_path = '/home/noskill/projects/neuro-fem/fem/superpoint_magicleap/superpoint_v1.pth'\n\n from superpoint_magicleap.demo_superpoint import PointTracker, SuperPointFrontend\n fe = SuperPointFrontend(weights_path=sp_path,\n nms_dist=8,\n conf_thresh=0.015,\n nn_thresh=0.8,\n cuda=True)\n loop(loader=loader, sp=None, fe=fe, thresh=0.015, print_res=True, draw=False, rotation_angle=angle)\n\n\ndef test_distilled(loader):\n from superpoint_05 import SuperPointNet\n sp = SuperPointNet().eval()\n path = '/home/noskill/projects/neuro-fem/fem/airsim_realsense_gpnt_model_last.pth'\n sp.load_state_dict(torch.load(path, map_location=device))\n sp.nms = MagicNMS()\n loop(sp, loader, thresh=0.015, print_res=False, draw=False)\n print('test distilled completed')\n\n\ndef run_distilled(loader, angle=0.0):\n weight = \"./snapshots/distilled3400.pt\"\n weight = \"./distilled13800.pt\"\n from goodpoint_small import GoodPointSmall\n sp = GoodPointSmall(dustbin=0,\n activation=torch.nn.LeakyReLU(),\n batchnorm=True,\n grid_size=8,\n nms=nms,\n base1=32, base2=32, base3=64).eval()\n\n\n #sp_desc = GoodPoint(dustbin=0,\n # activation=torch.nn.LeakyReLU(),\n # batchnorm=True,\n # grid_size=8,\n # nms=nms).eval().cuda()\n\n #sp_desc.load_state_dict(torch.load('snapshots/super6300.pt', map_location=device)['superpoint'])\n sp.load_state_dict(torch.load(weight, map_location=device)['superpoint'])\n # sp.to(torch.bfloat16)\n # just in case\n torch.set_flush_denormal(True)\n loop(sp=sp, loader=loader, draw=False, print_res=False, thresh=0.0217075525,\n device=device, desc_model=None, rotation_angle=angle, N=100)\n print('test destilled {0} completed'.format(weight))\n\n\ndef run_good(loader, angle=0.0):\n weight = './snapshots/orbnet.d1.pt'\n weight = \"./snapshots/super3400.pt\"\n\n sp = GoodPoint(dustbin=0,\n activation=torch.nn.LeakyReLU(),\n batchnorm=True,\n grid_size=8,\n nms=nms).eval()\n\n\n #sp_desc = GoodPoint(dustbin=0,\n # activation=torch.nn.LeakyReLU(),\n # batchnorm=True,\n # grid_size=8,\n # nms=nms).eval().cuda()\n\n #sp_desc.load_state_dict(torch.load('snapshots/super6300.pt', map_location=device)['superpoint'])\n\n sp.load_state_dict(torch.load(weight, map_location=device)['superpoint'])\n loop(sp=sp, loader=loader, draw=False, print_res=False, thresh=0.021075525,\n desc_model=None, rotation_angle=angle, N=100)\n print('test goodpoint {0} completed'.format(weight))\n\n\ndef measure_performance(loader):\n device = 'cpu'\n weight = './snapshots/super3400.pt'\n sp = GoodPoint(dustbin=0,\n activation=torch.nn.LeakyReLU(),\n batchnorm=True,\n grid_size=8,\n nms=nms).eval()\n ipow = [i for i in range(11)]\n ipow = [x * -1 for x in reversed(ipow)][:-1] + ipow\n for t_pow in ipow:\n weights = torch.load(weight, map_location=device)['superpoint']\n for key, wt in list(weights.items()):\n weights[key] = wt * (2 ** t_pow)\n sp.load_state_dict(weights)\n sp = sp.to(device)\n perf = emtpy_loop(sp, loader, device, thresh=0.021075525)\n print('pow {0} fps {1}'.format(t_pow, perf))\n\n\ndef emtpy_loop(sp, loader, device, thresh):\n total = 0.0\n for i_batch, sample in enumerate(loader):\n img_1_batch = sample['img1'].numpy()\n img_2_batch = sample['img2'].numpy()\n img_1 = img_1_batch[0, :, :]\n img_2 = img_2_batch[0, :, :]\n timg1 = np.expand_dims(np.expand_dims(img_1.astype('float32'), axis=0), axis=0)\n timg2 = np.expand_dims(np.expand_dims(img_2.astype('float32'), axis=0), axis=0)\n timg1 = torch.from_numpy(timg1).to(device)\n timg2 = torch.from_numpy(timg2).to(device)\n\n start = time.time()\n with torch.no_grad():\n pts_2, desc_2 = sp.points_desc(timg1, threshold=thresh)\n pts_2, desc_2 = sp.points_desc(timg2, threshold=thresh)\n\n end = time.time()\n total += (end - start)\n\n if i_batch == 100:\n break\n\n\n perf = ((i_batch + 1) * 2) / total\n return perf\n\n\ndataset_village = AirsimIntVarDataset(**village, frame_offset=frame_offset)\ndataset_fantasy_village = AirsimIntVarDataset(**fantasy_village, frame_offset=frame_offset)\n\n\nvillage_loader = DataLoader(dataset_village, batch_size=batch_size, shuffle=False, num_workers=1)\nfantasy_loader = DataLoader(dataset_fantasy_village, batch_size=batch_size, shuffle=False, num_workers=1)\n\n\nif __name__ == '__main__':\n if PATH_WEIGHTS == \"snapshots/super.snap.4.pt\":\n conf_thresh = 0.15\n\n device = 'cpu'\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n print(\"using device {0}\".format(device))\n\n\n\n batchnorm = True\n # from superpoint_magicleap.demo_superpoint import SuperPointFrontend\n # sp_magic = SuperPointFrontend(weights_path=\"superpoint_magicleap/superpoint_v1.pth\",\n # nms_dist=8,conf_thresh=conf_thresh, nn_thresh=0.3)\n\n nms = MagicNMS()\n nms = PoolingNms(8)\n\n\n #run_all_snapshots()\n #test_distilled(fantasy_loader)\n #test_magicleap1(village_loader, angle=0.0)\n #test_magicleap(fantasy_loader, angle=5.0)\n\n # run_good(fantasy_loader, angle=0.0)\n run_distilled(fantasy_loader, angle=5.0)\n run_distilled(village_loader, angle=5.0)\n # run_good(village_loader, angle=5.0)\n print('village_loader')\n # print('fantasy_loader')\n\n #measure_performance(village_loader)\n","sub_path":"fem/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"576038941","text":"from boto import connect_sqs\nfrom boto.sqs.message import RawMessage\nfrom tilequeue.queue import MessageHandle\nfrom tilequeue.utils import grouper\n\n\nclass SqsQueue(object):\n\n def __init__(self, sqs_queue, read_size=10):\n self.sqs_queue = sqs_queue\n self.read_size = read_size\n\n def enqueue(self, payload):\n message = RawMessage()\n message.set_body(payload)\n self.sqs_queue.write(message)\n\n def enqueue_batch(self, payloads):\n # sqs can only send 10 messages at once\n for payloads_chunk in grouper(payloads, 10):\n msg_tuples = []\n for i, payload in enumerate(payloads_chunk):\n msg_tuples.append((str(i), payload, 0))\n self.sqs_queue.write_batch(msg_tuples)\n\n def read(self):\n msg_handles = []\n sqs_messages = self.sqs_queue.get_messages(\n num_messages=self.read_size, attributes=[\"SentTimestamp\"])\n for sqs_message in sqs_messages:\n payload = sqs_message.get_body()\n try:\n timestamp = float(sqs_message.attributes.get('SentTimestamp'))\n except (TypeError, ValueError):\n timestamp = None\n\n metadata = dict(\n queue_name=self.sqs_queue.name,\n timestamp=timestamp,\n )\n msg_handle = MessageHandle(sqs_message, payload, metadata)\n msg_handles.append(msg_handle)\n return msg_handles\n\n def job_done(self, msg_handle):\n self.sqs_queue.delete_message(msg_handle.handle)\n\n def clear(self):\n n = 0\n while True:\n msgs = self.sqs_queue.get_messages(self.read_size)\n if not msgs:\n break\n self.sqs_queue.delete_message_batch(msgs)\n n += len(msgs)\n return n\n\n def close(self):\n pass\n\n\ndef make_sqs_queue(\n queue_name, aws_access_key_id=None, aws_secret_access_key=None):\n conn = connect_sqs(aws_access_key_id, aws_secret_access_key)\n queue = conn.get_queue(queue_name)\n assert queue is not None, \\\n 'Could not get sqs queue with name: %s' % queue_name\n queue.set_message_class(RawMessage)\n read_size = 10\n return SqsQueue(queue, read_size)\n","sub_path":"tilequeue/queue/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"589267726","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport plotly.graph_objs as go\n\ndf3 = pd.read_csv('../Datasets/Olympic2016Rio.csv')\ndf4 = pd.read_csv('../Datasets/Weather2014-15.csv')\n\napp2 = dash.Dash()\n\n\n\n# Olympic Bar Chart Data\nobc_df = df3.sort_values(by='Total', ascending=[False]).head(20)\ndata_obc = [go.Bar(x=obc_df['NOC'], y=obc_df['Total'])]\n\n# Olympic Stack Bar Chart Data\nnosbc_df = df3.sort_values(by=['Total'], ascending=[False]).head(20)\ntrace1 = go.Bar(x=nosbc_df['NOC'], y=nosbc_df['Gold'], name='Gold',\nmarker={'color': '#FFD700'})\ntrace2 = go.Bar(x=nosbc_df['NOC'], y=nosbc_df['Silver'], name='Silver',\nmarker={'color': '#C0C0C0'})\ntrace3 = go.Bar(x=nosbc_df['NOC'], y=nosbc_df['Bronze'], name='Bronze',\nmarker={'color': '#8C7853'})\ndata = [trace1, trace2, trace3]\ndata_olympicstackbarchart = [trace1, trace2, trace3]\n\n# Temperature Line Graph\ndf4['date'] = pd.to_datetime(df4['date'])\nnew_df4 = df4.groupby(['month',]).agg({\"actual_max_temp\": 'max'}).reset_index()\ndata_templinechart = [go.Scatter(x=new_df4['month'], y=new_df4['actual_max_temp'], mode='lines', name='max_temp')]\n\n# Temperature Multi-Line Graph\ndf4['date'] = pd.to_datetime(df4['date'])\nnew_df = df4.groupby(['month',]).agg({\"actual_max_temp\": 'max', \"actual_min_temp\": 'min', \"actual_mean_temp\": 'mean'}).reset_index()\n# Preparing data\ntrace1_tempmultiline = go.Scatter(x=new_df['month'], y=new_df['actual_max_temp'], mode='lines', name='max')\ntrace2_tempmultiline = go.Scatter(x=new_df['month'], y=new_df['actual_min_temp'], mode='lines',\nname='min')\ntrace3_tempmultiline = go.Scatter(x=new_df['month'], y=new_df['actual_mean_temp'], mode='lines',\nname='mean')\ndata_tempmultiline = [trace1_tempmultiline,trace2_tempmultiline,trace3_tempmultiline]\n\n# Temperature Bubble Chart\nbubble_df = df4.groupby(['month']).agg({\"average_min_temp\": 'mean', \"average_max_temp\": 'mean'}).reset_index()\ndata_tempbubble = [\ngo.Scatter(x=bubble_df['average_min_temp'], y=bubble_df['average_max_temp'], text=bubble_df['month'], mode='markers',\nmarker=dict(size=bubble_df['average_min_temp'], color=bubble_df['average_min_temp'], showscale=True))\n]\n\n# Temperature Heatmap\ntempheatmap_df = df4.groupby(['month','day']).agg({'month': 'min', 'day': 'min', 'record_max_temp': 'max'})\ndata_tempheatmap = [go.Heatmap(x=tempheatmap_df['day'],\ny=tempheatmap_df['month'],\nz=tempheatmap_df['record_max_temp'].values.tolist(),\ncolorscale='Jet')]\n\napp2.layout = html.Div(children=[\n html.H1(children='Python Dash Weather / Olympics',\n style={\n 'textAlign': 'center',\n 'color': '#006400',\n\n }\n ),\n\n html.Div('Web dashboard for 2016 Rio Olympics and 2014-2015 Weather Data', style={'textAlign': 'center'}),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Hr(style={'color': '#7FDBFF'}),\n html.H3('Bar chart', style={'color': '#006400'}),\n html.Div('This bar chart represent the number of medals won by the top 20 countries in the 2016 Rio Olympics'),\n dcc.Graph(id='graph8',\n figure={\n 'data': data_obc,\n 'layout': go.Layout(title='Most Medals Per Country Olympics 2016',\n xaxis={'title': 'Country'}, yaxis={'title': 'Medals'})\n\n }\n ),\nhtml.Hr(style={'color': '#7FDBFF'}),\n html.H3('Stack bar chart', style={'color': '#006400'}),\n html.Div(\n 'This stack bar chart represent the Types of medals won by each country and how many'),\n dcc.Graph(id='graph9',\n figure={\n 'data': data_olympicstackbarchart,\n 'layout': go.Layout(title='Medals won by top 20 countries 2016 Olympics Rio',\n xaxis={'title': 'Country'}, yaxis={'title': 'Medals'},\n barmode='stack')\n }\n ),\nhtml.Hr(style={'color': '#7FDBFF'}),\n html.H3('Line chart', style={'color': '#006400'}),\n html.Div('This line chart represent the Max Temperature for every month from 2014-2015'),\n dcc.Graph(id='graph10',\n figure={\n 'data': data_templinechart,\n 'layout': go.Layout(title='Max Temperatures every month',\n xaxis={'title': 'Month'}, yaxis={'title': 'Temperature'})\n }\n ),\nhtml.Hr(style={'color': '#7FDBFF'}),\n html.H3('Multi Line chart', style={'color': '#006400'}),\n html.Div(\n 'This line chart represent the Max, Min, and Mean Temperatures from 2014-2015'),\n dcc.Graph(id='graph11',\n figure={\n 'data': data_tempmultiline,\n 'layout': go.Layout(\n title='Max, Min, Mean of Temperatures every month for 2 years',\n xaxis={'title': 'Month'}, yaxis={'title': 'Temperature'})\n }\n ),\nhtml.Hr(style={'color': '#7FDBFF'}),\n html.H3('Bubble chart', style={'color': '#006400'}),\n html.Div(\n 'This bubble chart represents Max and Min Temperatures per month from 2014-2015 (hover over bubble for month)'),\n dcc.Graph(id='graph12',\n figure={\n 'data': data_tempbubble,\n 'layout': go.Layout(title='Max and Min Temperatures per Month',\n xaxis={'title': 'Min Temp'}, yaxis={'title': 'Max Temp'},\n hovermode='closest')\n }\n ),\n html.Hr(style={'color': '#7FDBFF'}),\n html.H3('Heat map', style={'color': '#006400'}),\n html.Div(\n 'This heat map represent the recorded max temperatures for days of the week in each month of the years 2014-2015.'),\n dcc.Graph(id='graph13',\n figure={\n 'data': data_tempheatmap,\n 'layout': go.Layout(title='recoreded max temperatures',\n xaxis={'title': 'Day of Week'}, yaxis={'title': 'Month of Year'})\n }\n )\n])\n\nif __name__ == '__main__':\n app2.run_server()\n\n","sub_path":"Plots/WeatherOlympicdash.py","file_name":"WeatherOlympicdash.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"56213640","text":"\"\"\"\nConstants with CSS selector mappings to easily keep track of things on the page\n\"\"\"\n\nfrom api.app.tests.util import UrlMappings as urls\n\n# General Game Links\nGAME_LINK_CLASS = \"game-view-link\"\nGAME_EDIT_LINK_CLASS = \"game-edit-link\"\nGAME_DELETE_LINK_CLASS = \"game-delete-link\"\n\n# Specific Game Links\nSIGNUP_GAME_LINK_ID = \"game-view-link-%s\" % urls.SIGNUP_GAME_ID\nWITHDRAW_GAME_LINK_ID = \"game-view-link-%s\" % urls.WITHDRAW_GAME_ID\nPROCESS_SIGNUPS_GAME_LINK_ID = \"game-view-link-%s\" % urls.PROCESS_SIGNUPS_GAME_ID\nPENDING_GAME_LINK = \"game-view-link-%s\" % urls.PENDING_GAME_ID\n\n# Game Elements\nGAME_STATUS_TEXT_CLASS = \"game-status\"\nGAME_SUBMIT_BUTTON_CLASS = \"game-submit-button\"\nGAME_CONFIRM_DELETE_CLASS = \"game-confirm-delete-button\"\nSIGNUP_BUTTON_CLASS = \"signup-button-signup\"\nWITHDRAW_BUTTON_CLASS = \"signup-button-withdraw\"\nACCEPT_BUTTON_CLASS = \"signup-button-accept\"\nREJECT_BUTTON_CLASS = \"signup-button-reject\"\nSIGNUP_STATUS_TEXT_CLASS = \"signup-status\"\n\n# Character Elements\nCHARACTER_LINK_CLASS = \"character-link\"\nCHARACTER_ATTRIBUTES_ID = \"character-attributes\"\nEDIT_CHARACTER_LINK_ID = \"character-edit-link\"\nCHARACTER_SUBMIT_BUTTON_CLASS = \"character-submit-button\"\nCHARACTER_REVIEW_BUTTON_ID = \"character-review-button\"\nCHARACTER_APPROVE_BUTTON_ID = \"character-approve-button\"\nCHARACTER_REJECT_BUTTON_ID = \"character-reject-button\"\n\n# Thread Elements\nTHREAD_CONTENT_ID = \"thread\"\nTHREAD_SUBMIT_BUTTON_CLASS = \"submit-button\"\nCOMMENT_BODY_CLASS = \"comment-body\"\nREPLY_LINK_ID = \"reply-link-\"\nREPLY_TEXT_FIELD_ID = \"comment-reply-text\"\nREPLY_SUBMIT_BUTTON_ID = \"comment-reply-submit\"\nPERMALINK_ID = \"permalink-\"\n","sub_path":"backend/api/app/tests/util/CssMappings.py","file_name":"CssMappings.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223368419","text":"n=int(input(\"Введите колчиество строк: \"))\nm=int(input(\"Введите колчиество столбцов: \"))\nfrom random import randint\na = [[randint(0,20) for j in range (m)] for i in range (n)]\ndel randint \nprint(\"Ваша сгенерированная матрица: \")\nfor row in a:\n print(' '.join([str(elem) for elem in row]), end=\"\\n\")\n\nmx=[[-1,0,0]]\nmn=[[21,0,0]]\n\nfor i in range (n):\n for j in range (m):\n elm = a[i][j]\n if elm > mx[-1][0]:\n mx.clear()\n mx.append([elm,i,j])\n elif elm == mx[-1][0]:\n mx.append([elm,i,j])\n elif elm < mn[-1][0]:\n mn.clear()\n mn.append([elm,i,j])\n elif elm == mn[-1][0]:\n mn.append([elm,i,j])\n\nif len(mx) == 1:\n print()\n print(\"Максимальный элемент: \", mx[0][0], \"( строка:\", mx[0][1],\", столбец:\", mx[0][2], \")\" )\nelse:\n print()\n print(\"Максимаьных элементов несколько: \")\n print(*[(str(elem[0])+\" ( строка: \" + str(elem[1]) + \" , столбец: \" + str(elem[2]) + \" )\") for elem in mx], sep=\"\\n\" )\n\n\nif len(mn) == 1:\n print()\n print(\"Минимальный элемент: \", mn[0][0], \"( строка:\", mn[0][1],\", столбец:\", mn[0][2], \")\" )\nelse:\n print()\n print(\"Минимальных элементов несколько: \")\n print(*[(str(elem[0])+\" ( строка: \"+str(elem[1])+ \" , столбец: \"+ str(elem[2]) + \" )\") for elem in mn],sep=\"\\n\")","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570359667","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom scrapy.selector import Selector\nfrom demo.items import DemoItem\nimport time\n\n\nclass MoiveSpider(CrawlSpider):\n name = 'moive'\n allowed_domains = ['moive.douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n # rules = (\n # Rule(LinkExtractor(allow=(r'https://movie.douban.com/top250\\?start=\\d+.*'))),\n # Rule(LinkExtractor(allow=(r'https://movie.douban.com/subject/\\d+')),\n # callback='parse_item'),\n # )\n\n def parse_item(self, response):\n print(response, 'response')\n # i['domain_id'] = response.xpath('//input[@id=\"sid\"]/@value').extract()\n # i['name'] = response.xpath('//div[@id=\"name\"]').extract()\n # i['description'] = response.xpath('//div[@id=\"description\"]').extract()\n time.sleep(3)\n li_list = response.xpath('//*[@id=\"content\"]/div/div[1]/ol/li')\n for li in li_list:\n item = DemoItem()\n item['title'] = li.xpath(\n 'div/div[2]/div[1]/a/span[1]/text()').extract_first()\n item['score'] = li.xpath(\n 'div/div[2]/div[2]/div/span[2]/text()').extract_first()\n item['motto'] = li.xpath(\n 'div/div[2]/div[2]/p[2]/span/text()').extract_first()\n yield item\n","sub_path":"scrapy/demo/demo/spiders/moive.py","file_name":"moive.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113127428","text":"# https://www.memrise.com/course/131111/5000-most-common-french-words/1/\nimport os\nfrom urllib.request import urlopen\nimport ssl\n\nfrom bs4 import BeautifulSoup\n\n# This restores the same behavior as before.\ncontext = ssl._create_unverified_context()\n\nurl = 'http://www.le-francais.ru/bibliotheque/adopte'\nresponse = urlopen(url, context=context).read()\ndecoded_html = response.decode('utf-8')\n\nsoup = BeautifulSoup(decoded_html, \"html.parser\")\n\ndictionary = {}\ni = 'translation'\nready = False\nfor item in soup.find_all(\"td\"):\n if ready == True:\n if i == 'translation':\n trans = item\n i = 'original'\n elif i == 'original':\n orig = item\n i = 'explanation'\n else:\n dictionary[orig] = [trans, item]\n i = 'translation'\n # print(orig.text, trans.text, item.text)\n elif item.text == 'абажур ':\n trans = item\n i = 'original'\n ready = True\n\n\ntext = ''\n\nfor key, value in dictionary.items():\n line = key.text + ' --- ' + value[0].text + ' •Explication: ' + value[1].text\n text += line + \"\\n\"\n # print(line)\n\n\nwith open(\"results/Rus-Fr Dictionary.txt\", \"ab\") as f:\n f.write(text.encode(\"UTF-8\"))\n\n\n","sub_path":"Parce Rus-Fr Dictionary.py","file_name":"Parce Rus-Fr Dictionary.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"89196026","text":"import palette\nfrom model.helper_functions.message import message\nfrom game import Game\n\n\ndef monster_death(monster):\n # transform it into a nasty corpse! it doesn't block, can't be\n # attacked and doesn't move\n message(monster.name.capitalize() + ' is dead!', palette.orange)\n _mark_entity_as_dead(monster)\n\n Game.instance.xp_system.get(Game.instance.player).gain_xp(Game.instance.xp_system.get(monster).xp)\n Game.instance.fighter_system.remove(monster)\n Game.instance.ai_system.remove(monster)\n\n monster.original_ai = None\n\n\ndef player_death(player):\n # the game ended!\n message('You died!', palette.red)\n Game.instance.game_state = 'dead'\n Game.instance.keybinder.register_all_keybinds_and_events()\n\n # for added effect, transform the player into a corpse!\n player.char = '%'\n player.color = palette.gray_blue\n\n\ndef horse_death(horse):\n message('Stallion is dead!', palette.red)\n _mark_entity_as_dead(horse)\n\n Game.instance.fighter_system.remove(horse)\n Game.instance.ai_system.remove(horse)\n\n if Game.instance.player.mounted:\n Game.instance.player.unmount(Game.instance.stallion)\n\n\ndef _mark_entity_as_dead(entity):\n entity.char = '%'\n entity.color = palette.gray_blue\n entity.blocks = False\n entity.name = \"{} remains\".format(entity.name)\n entity.send_to_back()\n","sub_path":"model/helper_functions/death_functions.py","file_name":"death_functions.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"619722683","text":"import json\nimport configparser\n\nfrom models import DatabaseCfg, ExportCfg, WorkspaceCfg, TransformCfg\n\n\nclass IniConfig():\n def __init__(self, config):\n self.database = DatabaseCfg(config)\n self.workspace = WorkspaceCfg(config)\n self.export = ExportCfg(config)\n self.transform = TransformCfg(config)\n\ndef parse(fn):\n config = configparser.ConfigParser()\n config.read(fn)\n ini = IniConfig(config)\n return ini\n\nparse('ini_parser/etl_example.cfg')","sub_path":"ini_parser/ini_parser.py","file_name":"ini_parser.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"264316586","text":"import sys\nimport time\n\nfrom influxdb import InfluxDBClient\nfrom influxdb.exceptions import InfluxDBClientError, InfluxDBServerError\nfrom requests import ConnectTimeout, ConnectionError\n\nfrom ipfire_traffic.common import log\nfrom ipfire_traffic.config import config\n\nimport time\nimport os\nimport json\n# import datetime\n\n\n\nclass IPFireTrafficSpeed():\n def __init__(self):\n self.influx_client = self._get_influx_connection()\n\n self.time_current = time.time()\n self.time_last = time.time()\n self.rxb_current = 0\n self.rxb_last = 0\n self.txb_current = 0\n self.txb_last = 0\n self.rpkt_last = 0\n self.tpkt_last = 0\n self.rpkt_current = 0\n self.tpkt_current = 0\n \n self.first_try = True\n\n def _get_influx_connection(self):\n \"\"\"\n Create an InfluxDB connection and test to make sure it works.\n We test with the get all users command. If the address is bad it fails\n with a 404. If the user doesn't have permission it fails with 401\n :return:\n \"\"\"\n\n influx = InfluxDBClient(\n config.influx_address,\n config.influx_port,\n database=config.influx_database,\n ssl=config.influx_ssl,\n verify_ssl=config.influx_verify_ssl,\n username=config.influx_user,\n password=config.influx_password,\n timeout=5\n )\n try:\n log.debug('Testing connection to InfluxDb using provided credentials')\n influx.get_list_users() # TODO - Find better way to test connection and permissions\n log.debug('Successful connection to InfluxDb')\n except (ConnectTimeout, InfluxDBClientError, ConnectionError) as e:\n if isinstance(e, ConnectTimeout):\n log.critical('Unable to connect to InfluxDB at the provided address (%s)', config.influx_address)\n elif isinstance(e, ConnectionError):\n log.critical('Unable to connect to InfluxDB. Database server offline')\n elif e.response == 401:\n log.critical('Unable to connect to InfluxDB with provided credentials')\n else:\n log.critical('Failed to connect to InfluxDB for unknown reason')\n\n sys.exit(1)\n\n return influx\n\n def write_influx_data(self, json_data):\n \"\"\"\n Writes the provided JSON to the database\n :param json_data:\n :return: None\n \"\"\"\n log.debug(json_data)\n\n try:\n self.influx_client.write_points(json_data)\n except (InfluxDBClientError, ConnectionError, InfluxDBServerError) as e:\n if hasattr(e, 'code') and e.code == 404:\n log.error('Database %s Does Not Exist. Attempting To Create', config.influx_database)\n self.influx_client.create_database(config.influx_database)\n self.influx_client.write_points(json_data)\n return\n\n log.error('Failed To Write To InfluxDB')\n print(e)\n\n log.debug('Data written to InfluxDB')\n\n def send_results(self, results):\n \"\"\"\n Formats the payload to send to InfluxDB\n :rtype: None\n \"\"\"\n result_dict = results\n\n input_points = [\n {\n 'measurement': 'IPFireTraffic',\n 'fields': {\n 'download_bytes': result_dict['rx_bytes'],\n 'upload_bytes': result_dict['tx_bytes'],\n 'download_packets': result_dict['rpkt_count'],\n 'upload_packets': result_dict['tpkt_count']\n }\n }\n ]\n\n print(f'Sending data to influxdb: {input_points}')\n self.write_influx_data(input_points)\n\n\n def get_bytes(self):\n try:\n \n shell_cmd = f'ip -j -s link show {config.interface_name}'\n print(f'CMD: {shell_cmd}')\n # shell_cmd = '/Users/michal/Desktop/speed.cgi'\n std_out = os.popen(shell_cmd)\n output = std_out.read()\n json_output = json.loads(output)\n\n json_stats = json_output[0]['stats64']\n rx_bytes = json_stats['rx']['bytes']\n rx_packets = json_stats['rx']['packets']\n rx_errors = json_stats['rx']['errors']\n\n tx_bytes = json_stats['tx']['bytes']\n tx_packets = json_stats['tx']['packets']\n tx_errors = json_stats['tx']['errors']\n\n\n self.time_current = time.time()\n time_diff = self.time_current - self.time_last\n self.time_last = self.time_current\n\n \n\n print(json_output)\n \n # Receive\n self.rxb_current = rx_bytes\n rxb_diff = self.rxb_current - self.rxb_last\n self.rxb_last = self.rxb_current\n rx_final_bytes = rxb_diff / time_diff\n # Send\n self.txb_current = tx_bytes\n txb_diff = self.txb_current - self.txb_last\n self.txb_last = self.txb_current\n tx_final_bytes = txb_diff / time_diff\n\n # Packets Receive\n self.rpkt_current = rx_packets\n rpkt_diff = self.rpkt_current - self.rpkt_last\n self.rpkt_last = self.rpkt_current\n rpkt_final_count = rpkt_diff / time_diff\n # Packet Send\n self.tpkt_current = tx_packets\n tpkt_diff = self.tpkt_current - self.tpkt_last\n self.tpkt_last = self.tpkt_current\n tpkt_final_count = tpkt_diff / time_diff\n\n result_dict = {\n 'rx_bytes': round(rx_final_bytes, 2),\n 'tx_bytes': round(tx_final_bytes, 2),\n 'rpkt_count': round(rpkt_final_count, 2),\n 'tpkt_count': round(tpkt_final_count, 2),\n }\n print(f'Result dict: {result_dict}')\n return result_dict\n \n except Exception as err:\n print(f'Error occured here: {err}')\n \n def run(self):\n while True:\n print(\"Starting in run\")\n bytes_dict = self.get_bytes()\n if not self.first_try:\n try:\n self.send_results(bytes_dict)\n except Exception as e:\n print(f\"Error sending results: ${e}\")\n \n self.first_try = False\n\n print(bytes_dict)\n log.info('Waiting %s seconds until next test', config.delay)\n time.sleep(config.delay)\n","sub_path":"ipfire_traffic/IPFireTrafficSpeed.py","file_name":"IPFireTrafficSpeed.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646304885","text":"import logging\nfrom datetime import datetime, timedelta\n\nfrom dateutil import parser\n\nfrom core import Feed\nfrom core.errors import ObservableValidationError\nfrom core.observables import Url\n\n\nclass UrlHaus(Feed):\n default_values = {\n \"frequency\": timedelta(minutes=20),\n \"name\": \"UrlHaus\",\n \"source\": \"https://urlhaus.abuse.ch/downloads/csv_recent/\",\n \"description\":\n \"URLhaus is a project from abuse.ch with the goal of sharing malicious URLs that are being used for malware distribution.\",\n }\n\n def update(self):\n since_last_run = datetime.utcnow() - self.frequency\n\n for line in self.update_csv(delimiter=',', quotechar='\"'):\n if not line or line[0].startswith(\"#\"):\n return\n\n first_seen = parser.parse(line[1])\n if self.last_run is not None:\n since_last_run = datetime.now() - self.frequency\n if since_last_run > first_seen:\n continue\n\n self.analyze(line)\n\n def analyze(self, line):\n\n id_feed, first_seen, url, url_status, threat, tags, urlhaus_link, source = line # pylint: disable=line-too-long\n\n context = {\n \"id_urlhaus\": id_feed,\n \"status\": url_status,\n \"source\": self.name,\n \"report\": urlhaus_link,\n \"threat\": threat,\n \"reporter\": source,\n }\n\n if url:\n try:\n url_obs = Url.get_or_create(value=url)\n url_obs.tag(tags.split(','))\n url_obs.add_context(context)\n url_obs.add_source(self.name)\n except ObservableValidationError as e:\n logging.error(e)\n","sub_path":"plugins/feeds/public/urlhaus.py","file_name":"urlhaus.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"100484275","text":"# -*- coding:utf-8 -*-\n# @Time : 2020/7/7 20:10\n# @Author : zengln\n# @File : 删除字符串中所有相邻重复项.py\n\n# 给出由小写字母组成的字符串 S,重复项删除操作会选择两个相邻且相同的字母,并删除它们。\n#\n# 在 S 上反复执行重复项删除操作,直到无法继续删除。\n#\n# 在完成所有重复项删除操作后返回最终的字符串。答案保证唯一。\n#\n#\n#\n# 示例:\n#\n# 输入:\"abbaca\"\n# 输出:\"ca\"\n# 解释:\n# 例如,在 \"abbaca\" 中,我们可以删除 \"bb\" 由于两字母相邻且相同,这是此时唯一可以执行删除操作的重复项。之后我们得到字符串 \"aaca\",其中又\n# 只有 \"aa\" 可以执行重复项删除操作,所以最后的字符串为 \"ca\"。\n#\n#\n#\n#\n# 提示:\n#\n#\n# 1 <= S.length <= 20000\n# S 仅由小写英文字母组成。\n\nclass Solution1:\n def removeDuplicates(self, S: str) -> str:\n stack = []\n for x in S:\n if not stack:\n stack.append(x)\n continue\n\n if stack[-1] == x:\n stack.pop()\n else:\n stack.append(x)\n\n return \"\".join(stack)\n\n\nclass Solution:\n def removeDuplicates(self, S: str) -> str:\n stack = []\n for x in S:\n if stack and stack[-1] == x:\n stack.pop()\n else:\n stack.append(x)\n\n return \"\".join(stack)\n\ns = Solution()\nprint(s.removeDuplicates(\"abbaca\"))","sub_path":"栈/删除字符串中所有相邻重复项.py","file_name":"删除字符串中所有相邻重复项.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"453263050","text":"import json\nfrom itertools import product\nfrom collections import Counter, defaultdict\t\n\n\nclass myPOSTagger:\n\n\tdef __init__(self, initial_prob=None, emission_prob=None, transition_prob=None):\n\n\t\tself.tags = ['PRON', 'X', '.', 'DET', 'ADP', 'NOUN', 'VERB', 'PRT', 'CONJ', 'ADV', 'NUM', 'ADJ']\n\n\t\tself.total_sents = 0\n\t\tself.init_counts = Counter()\n\t\tself.pair_counts = Counter()\n\t\tself.word_counts = Counter()\n\t\tself.tag_counts = Counter()\n\t\tself.bigram_tag_counts = Counter()\n\n\t\ttry:\n\t\t\twith open(initial_prob, 'r') as f:\n\t\t\t\tf = json.load(f)\n\t\t\t\tself.initial_prob = defaultdict(lambda: defaultdict(float),f)\n\t\texcept:\n\t\t\tself.initial_prob = defaultdict(lambda: defaultdict(float))\n\n\t\ttry:\n\t\t\twith open(emission_prob, 'r') as f:\n\t\t\t\tf = json.load(f)\n\t\t\t\tself.emission_prob = defaultdict(lambda: defaultdict(float),f)\n\t\texcept:\n\t\t\tself.emission_prob = defaultdict(lambda: defaultdict(float))\n\n\t\ttry:\n\t\t\twith open(transition_prob, 'r') as f:\n\t\t\t\tf = json.load(f)\n\t\t\t\tself.transition_prob = defaultdict(lambda: defaultdict(float),f)\n\t\texcept:\n\t\t\tself.transition_prob = defaultdict(lambda: defaultdict(float))\n\n\n\tdef __str__(self):\n\t\treturn \"My PoSTagger\"\n\n\n\tdef _initial_prob(self, corpus):\n\t\tfor sent in corpus:\n\t\t\tself.init_counts[sent[0][1]] += 1\n\t\t\n\t\tnum = float(self.total_sents)\n\t\tfor tag,count in self.init_counts.items():\n\t\t\tself.initial_prob[tag] = count / num\n\n\n\tdef _emission_prob(self, corpus):\n\t\tfor pair in corpus:\n\t\t\tword = pair[0]\n\t\t\ttag = pair[1]\n\n\t\t\tself.pair_counts[pair] += 1\n\t\t\tself.word_counts[word] += 1\n\t\t\tself.tag_counts[tag] += 1\n\n\t\tfor word,tag in product(self.word_counts.keys(), self.tag_counts.keys()):\n\t\t\tself.emission_prob[word][tag] = self.pair_counts[(word,tag)] / float(self.tag_counts[tag])\n\n\n\tdef _transition_prob(self, corpus):\n\t\ttag_list = [pair[1] for pair in corpus]\n\n\t\ttag_bigrams = zip(tag_list[:-1],tag_list[1:])\n\n\t\tfor bigram in tag_bigrams:\n\t\t\tself.bigram_tag_counts[bigram] += 1\n\n\t\tfor tag0,tag1 in product(self.tag_counts.keys(), repeat=2):\n\t\t\tself.transition_prob[tag0][tag1] = self.bigram_tag_counts[(tag0,tag1)] / float(self.tag_counts[tag0])\n\t\t\n\n\tdef train(self, corpus):\n\t\tself.total_sents += len(corpus)\n\t\tself._initial_prob(corpus)\n\n\t\tflat_corpus = [pair for sentence in corpus for pair in sentence]\n\t\tself._emission_prob(flat_corpus)\n\t\tself._transition_prob(flat_corpus)\n\n\t\tself.tags = self.tag_counts.keys()\n\n\n\tdef save(self):\n\t\twith open('initial_prob.json', 'w') as f:\n\t\t\tjson.dump(self.initial_prob, f, indent=True)\n\t\t\tf.close()\n\n\t\twith open('emission_prob.json', 'w') as f:\n\t\t\tjson.dump(self.emission_prob, f, indent=True)\n\t\t\tf.close()\n\n\t\twith open('transition_prob.json', 'w') as f:\n\t\t\tjson.dump(self.transition_prob, f, indent=True)\n\t\t\tf.close()\n\n\n\tdef tag(self, wordlist):\n\t\tprob_list = []\n\t\tresult = []\n\n\t\t# Initialize Base Case (i = 0)\n\t\tprob_dict = dict()\n\t\tif not self.emission_prob.get(wordlist[0]):\n\t\t\tfor tag in self.tags:\n\t\t\t\tprob_dict[tag] = 1.\n\t\t\tresult.append((wordlist[0],'X'))\n\t\telse:\n\t\t\tfor tag in self.tags:\n\t\t\t\tprob_dict[tag] = (self.initial_prob[tag] * \n\t\t\t\t\t\t\t\t\tself.emission_prob[wordlist[0]][tag])\n\t\t\t(_,max_tag) = max((value,key) for key,value in prob_dict.items())\n\t\t\tresult.append((wordlist[0], max_tag))\n\t\tprob_list.append(prob_dict)\n\n\t\t# Viterbi Algorithm (i > 0)\n\t\tfor i,word in enumerate(wordlist[1:],1):\n\t\t\tprob_dict = dict()\n\n\t\t\tif not self.emission_prob.get(word):\n\t\t\t\tfor tag in self.tags:\n\t\t\t\t\tprob_dict[tag] = prob_list[i-1][tag]\n\t\t\t\tresult.append((word, 'X'))\n\t\t\telse:\n\t\t\t\tfor tag in self.tags:\n\t\t\t\t\t(prob,_) = max((prob_list[i-1][tag0] * \n\t\t\t\t\t\t\t\t\tself.transition_prob[tag0][tag] *\n\t\t\t\t\t\t\t\t\tself.emission_prob[word][tag], tag0) \n\t\t\t\t\t\t\t\t\tfor tag0 in self.tags)\n\t\t\t\t\tprob_dict[tag] = prob\n\t\t\t\t(_,max_tag) = max((prob_dict[tag],tag) for tag in self.tags)\n\t\t\t\tresult.append((word,max_tag))\n\n\t\t\tprob_list.append(prob_dict)\n\n\t\treturn result\n\n\nif __name__ == '__main__':\n\n\tpostagger = myPOSTagger('initial_prob.json',\n\t\t\t\t\t\t\t'emission_prob.json',\n\t\t\t\t\t\t\t'transition_prob.json')\n\n\tfrom nltk.tokenize import word_tokenize\n\n\twordlist = word_tokenize(\"Hello, my name is Richard Kim.\")\n\tprint(postagger.tag(wordlist))\n","sub_path":"postag.py","file_name":"postag.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646381644","text":"import pandas as pd\n\ndf = pd.read_csv('elections.csv')\n\ncandidate = df.groupby(['year', 'party']).candidate.agg(lambda x: x.value_counts().index[0]).reset_index()\ncandidate.to_csv('output/candidates.csv', index=False)\ndel candidate\n\ndf = df[['year', 'short_state', 'votes', 'party']]\n\ndf['party'] = df['party'].apply(lambda x: x if x in ['Democratic', 'Republican'] else 'Other')\n\ndf_reshape = df.pivot_table(index=['year', 'short_state'] , columns='party', values='votes')\ndf_reshape = df_reshape.reset_index().fillna(0)\ndf_reshape[['Democratic', 'Republican', 'Other']] = df_reshape[['Democratic', 'Republican', 'Other']].astype(int)\nprint(df_reshape)\n\ndf_reshape.rename(columns={\n 'short_state': 'State',\n 'Democratic': 'Democrat',\n 'year': 'Year'\n}, inplace=True)\n\nprint(df_reshape.columns)\n\ndf_reshape[['State', 'Democrat', 'Republican', 'Other', 'Year']].to_csv('output/election.csv', index=False)\n","sub_path":"02_to_long_form.py","file_name":"02_to_long_form.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"395193360","text":"def partition(a,low,high):\n i = low\n a[(low+high)//2],a[high] = a[high],a[(low+high)//2]\n pivot = a[high]\n for j in range(low,high):\n if(a[j]